query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
listlengths 30
30
| negative_scores
listlengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Records a lap time. If no lap label is specified, a single 'last lap' counter will be (re)used. To keep track of more laps, provide labels yourself. | def lap(self, lap="__lap__"):
t = time.time()
self.laps[lap] = t
return t | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def log(self, label, times, overlapping=False):\r\n self._timings.append(Timing(label, times, overlapping))",
"def log_time(label: str) -> None:\n print(label, datetime.now())",
"def lap(self):\n oldtime = self._clock() - self._starttime\n self._starttime = self._clock()\n return oldtime",
"def lap(self):\n current_time = time.perf_counter()\n ret = current_time - self._lap\n if abs(ret) != ret:\n ret = self._time_corruption\n self._lap = current_time\n return ret",
"def log_runtime(label, mean_time, std, instances):\n pass",
"def test_time_lapse(self):\n t0 = time.time()\n time.sleep(2)\n lap = time_lapse(t0)\n self.assertEqual(lap, '00:00:02')",
"def label(self, t):\n if self.labels is None:\n return None\n prev_label = None\n for l in self.labels:\n if l.time > t:\n break\n prev_label = l\n if prev_label is None:\n return None\n return prev_label.name",
"def get_lap(self, lap=\"__exit__\"):\n return self.lap[lap]",
"def record(self, time, increment):\n raise NotImplementedError(\"Abstract method not implemented.\")",
"def add_timing(self, label, secs, is_tool=False):\r\n self._timings_by_path[label] += secs\r\n if is_tool:\r\n self._tool_labels.add(label)\r\n # Check existence in case we're a clean-all. We don't want to write anything in that case.\r\n if self._path and os.path.exists(os.path.dirname(self._path)):\r\n with open(self._path, 'w') as f:\r\n for x in self.get_all():\r\n f.write('%(label)s: %(timing)s\\n' % x)",
"def run_lap():\n pass",
"def tie(self, timer_label):\n self.timer_label = timer_label",
"def timer(*args, endTimer: bool=True, lapTime: bool=True, name: AnyStr=\"\", startTimer:\n bool=True, **kwargs)->None:\n pass",
"def use_ground_truth_time_slice(gt, df_p, df_l, app):\n\n gt = gt.reset_index(drop=True)\n gt['start_time'] = gt.start_time / 1000\n gt['end_time'] = gt.end_time / 1000\n gt['pred_location'] = np.zeros(len(gt))\n gt['true_location'] = gt.wlabel\n gt['true_appliance'] = gt.slabel\n\n gt['type'] = np.zeros(len(gt))\n gt['magnitude'] = np.zeros(len(gt))\n gt['phase'] = np.zeros(len(gt))\n\n print \"\\n\"\n # Label lights and fans as \"light\" type and magnitude as the fall magnitude of the time slice\n for i in gt.index:\n appl = gt.ix[i]['slabel']\n etype = ''\n\n if appl in ['Fan', 'Light']:\n gt.ix[i, 'type'] = etype = \"light\"\n else:\n gt.ix[i, 'type'] = etype = \"power\"\n\n # Get index from the main light/power data stream\n fall_time = gt.ix[i]['end_time']\n logger.debug(\"Fall_time:: %s (%s)\", dt.datetime.fromtimestamp(fall_time), fall_time)\n if etype == \"light\":\n idx = 0\n try:\n idx = np.where(df_l.time == fall_time)[0][0]\n except IndexError, e:\n print \"-----Index not found--------\", e\n continue\n\n edge = get_magnitude_metadata(df_l, idx, \"lightpower\", lwinmin)\n if edge:\n gt.ix[i, 'magnitude'] = math.fabs(edge['magnitude'])\n print i, \"Light:: Index\", idx, \"Magnitude::\", math.fabs(edge['magnitude'])\n else:\n logger.debug(\"--------Edge not found---------\")\n continue\n\n # Get phase of the edge\n gt.ix[i, 'phase'] = get_phase(idx, df_l, \"light\")\n # print \"LIGHT:: Index\", idx, \"magnitude\", magnitude\n else:\n idx = 0\n try:\n idx = np.where(df_p.time == fall_time)[0][0]\n except IndexError, e:\n print \"-----Index not found--------\", e\n continue\n\n edge = get_magnitude_metadata(df_p, idx, \"power\", pwinmin)\n if edge:\n gt.ix[i, 'magnitude'] = math.fabs(edge['magnitude'])\n print i, \"Power:: Index\", idx, \"Magnitude::\", math.fabs(edge['magnitude'])\n else:\n logger.debug(\"--------Edge not found---------\")\n continue\n\n # Get phase of the edge\n gt.ix[i, 'phase'] = get_phase(idx, df_p, \"power\")\n\n # print \"POWER:: Index\", idx, \"magnitude\", magnitude\n\n gt.pop('wlabel')\n gt.pop('slabel')\n\n if app in [4, 7]:\n gt = gt[(gt.phase != 0) & (gt.magnitude > lthresmin)]\n else:\n gt = gt[gt.magnitude > lthresmin]\n\n gt['act_start_time'] = [dt.datetime.fromtimestamp(i)\n for i in gt['start_time']]\n gt['act_end_time'] = [dt.datetime.fromtimestamp(i)\n for i in gt['end_time']]\n gt = gt.sort(['act_start_time'])\n\n print \"\\nGround Truth Time slices:\\n\", gt\n\n return gt",
"def timeCheckpoint(start_time, name):\n\n time = clock() - start_time\n print(str.capitalize(name) + ': \\t%.3f' % time)\n return clock()",
"def lammps(step, atomnum, boxbounds, addson = ''):\r\n\r\n line = 'ITEM: TIMESTEP\\n'\r\n line += str(step) + '\\n'\r\n\r\n line += 'ITEM: NUMBER OF ATOMS\\n'\r\n line += str(atomnum) + '\\n'\r\n\r\n line += 'ITEM: BOX BOUNDS pp pp pp\\n'\r\n line += '%.6f %.6f\\n' %(boxbounds[0, 0], boxbounds[0, 1])\r\n line += '%.6f %.6f\\n' %(boxbounds[1, 0], boxbounds[1, 1])\r\n if boxbounds.shape[0] == 3:\r\n line += '%.6f %.6f\\n' %(boxbounds[2, 0], boxbounds[2, 1])\r\n else:\r\n line += '%.6f %.6f\\n' %(-0.5, 0.5)\r\n\r\n line += 'ITEM: ATOMS id type x y z %s\\n' %addson\r\n\r\n return line",
"def _record_current_time(self):\n now = time.time()\n delta = now - self._last_time\n self._last_time = now\n self._timing_recorder.append(delta)",
"def setTimepoint(self, tp):\n\t\tpass",
"def __init__(self, parent=None, seconds=True, colon=False):\r\n tkinter.Label.__init__(self, parent)\r\n\r\n self.display_seconds = seconds\r\n if self.display_seconds:\r\n self.time = time.strftime('%H:%M:%S')\r\n else:\r\n self.time = time.strftime('%I:%M %p').lstrip('0')\r\n self.display_time = self.time\r\n self.configure(text=self.display_time)\r\n\r\n if colon:\r\n self.blink_colon()\r\n\r\n self.after(200, self.tick)",
"def timelapse():\n now = arrow.now()\n pic_path = Path(f\"/home/pi/lapse_{now.format('YYYY-MM-DD')}\")\n if not pic_path.exists():\n log.info(f\"Creating pic dir: {pic_path}\")\n pic_path.mkdir(parents=True)\n os.chdir(pic_path)\n log.info(f\"Picture directory: {pic_path}\")\n\n if __debug__:\n # In __debug__ mode, just run for 3 minutes.\n end_time = now.shift(minutes=+3)\n else:\n end_time = now.shift(hours=+END_AFTER_HOURS)\n\n with picamera.PiCamera() as camera:\n camera.resolution = (1920, 1080) # Full HD resolution\n camera.rotation = ROTATE\n for filename in camera.capture_continuous(\"sl_{timestamp:%Y%j_%H%M%S}.jpg\"):\n # Using the timestamp to ensure that there are no collisions when/if there's a problem during\n # the night and the timelapse \"restarts\"\n log.info(f\"Taking pic at: {time.asctime()}\")\n if arrow.now() > end_time:\n log.info(\"Got to end time, quitting normally\")\n break\n else:\n time.sleep(WAIT_TIME)",
"def add(self, timestamp):\n self.total_count += 1\n self.times.append(timestamp)",
"def record_event(self, description, time=None, additional=None):\n if time is None:\n time = datetime.datetime.now()\n if additional is not None:\n self.history.append((time, (description, additional)))\n else:\n self.history.append((time, description))",
"def record(self, pos):\n self.lasts += (datetime.now(), pos),\n if len(self.lasts) > 10:\n self.lasts.pop(0)",
"def configure_lldp_holdtime(device, timer):\r\n try:\r\n device.configure(f'lldp holdtime {timer}')\r\n except SubCommandFailure as e:\r\n raise SubCommandFailure(\r\n \"Could not configure LLDP holdime\"\r\n \"Error: {error}\".format(error=e)\r\n )",
"def set_times(self, p, f):\n self._dot_print_time = p\n self._dot_feed_time = f",
"def set_time_next_pps(self, *args, **kwargs):\n return _uhd_swig.usrp_sink_set_time_next_pps(self, *args, **kwargs)",
"def __init__(self, parent=None, seconds=True, colon=False):\n tk.Label.__init__(self, parent)\n\n self.display_seconds = seconds\n if self.display_seconds:\n self.time = time.strftime('%I:%M:%S %p')\n else:\n self.time = time.strftime('%I:%M:%S %p').lstrip('0')\n self.display_time = self.time\n self.configure(text=self.display_time)\n\n if colon:\n self.blink_colon()\n\n self.after(200, self.tick)",
"def record_lr(self, optimizer, step):\n for idx, group in enumerate(optimizer.param_groups):\n updated_lr = group[\"lr\"]\n self.writer_.add_scalar(tag=f\"train_step/group{idx}\", scalar_value=updated_lr, global_step=step)",
"def add_labels(df, binary=True, DELAY_THRESHOLD=20, categorical=False):\n\n def delay_class(minutes):\n if minutes <= 5:\n return 0\n if 5 < minutes <= 20:\n return 1\n if 20 < minutes <= 60:\n return 2\n if 60 < minutes <= 120:\n return 3\n if 120 < minutes:\n return 4\n else:\n return None\n\n if binary and not categorical:\n # add the target label \"binary: delayed (positive) not-delayed (negative)\" based on the threshold in minutes\n df['DELAYED'] = df['DEP_DELAY'].apply(lambda x: 1 if x >= DELAY_THRESHOLD else 0)\n\n # balance the data (same number of samples for delayed / not delayed flights)\n delayed = df[df['DELAYED'] == 1].copy()\n no_delay = df[df['DELAYED'] == 0][:delayed.shape[0]].copy()\n\n # concat into one dateframe\n data = delayed.append(no_delay, ignore_index=True)\n # logging\n percentage = delayed_percentage(df, DELAY_THRESHOLD)\n print('{:.2f}% of the total flights were delayed {} minutes or more.'.format(percentage, DELAY_THRESHOLD))\n\n del delayed, no_delay, df # release some memory\n\n elif categorical:\n df['DELAY_CLASS'] = df['DEP_DELAY'].apply(lambda row: delay_class(row))\n counts = df['DELAY_CLASS'].value_counts()\n m = min(counts)\n c0 = df[df['DELAY_CLASS'] == 0][:m].copy()\n c1 = df[df['DELAY_CLASS'] == 1][:m].copy()\n c2 = df[df['DELAY_CLASS'] == 2][:m].copy()\n c3 = df[df['DELAY_CLASS'] == 3][:m].copy()\n c4 = df[df['DELAY_CLASS'] == 4][:m].copy()\n data = c0.append([c1, c2, c3, c4])\n data['DELAY_CLASS'] = data['DELAY_CLASS'].astype(int)\n del c0, c1, c2, c3, c4 # release memory\n else:\n raise('either of binary or categorical must be true')\n\n # shuffle dataframe\n data = data.sample(frac=1).reset_index(drop=True)\n\n return data",
"def log_metrics_for_packet(self, packet: \"Packet\") -> None:\n client_timestamp = packet.client_timestamp\n router_incoming_timestamp = packet.router_incoming_timestamp\n router_outgoing_timestamp = packet.router_outgoing_timestamp\n server_timestamp = packet.server_timestamp\n response_timestamp = time.time()\n if router_outgoing_timestamp is None:\n router_outgoing_timestamp = server_timestamp\n if router_incoming_timestamp is None:\n router_incoming_timestamp = router_outgoing_timestamp\n if client_timestamp is None:\n client_timestamp = router_incoming_timestamp\n client_to_router = max(0, router_incoming_timestamp - client_timestamp)\n router_processing = max(0, router_outgoing_timestamp - router_incoming_timestamp)\n router_to_server = max(0, server_timestamp - router_outgoing_timestamp)\n server_processing = max(0, response_timestamp - server_timestamp)\n e2e_time = max(0, response_timestamp - client_timestamp)\n E2E_PACKET_LATENCY.labels(packet_type=packet.type, stage=\"client_to_router\").observe(\n client_to_router\n )\n E2E_PACKET_LATENCY.labels(packet_type=packet.type, stage=\"router_processing\").observe(\n router_processing\n )\n E2E_PACKET_LATENCY.labels(packet_type=packet.type, stage=\"router_to_server\").observe(\n router_to_server\n )\n E2E_PACKET_LATENCY.labels(packet_type=packet.type, stage=\"server_processing\").observe(\n server_processing\n )\n E2E_PACKET_LATENCY.labels(packet_type=packet.type, stage=\"e2e_time\").observe(e2e_time)"
]
| [
"0.6577776",
"0.6341617",
"0.619218",
"0.600884",
"0.595028",
"0.56649774",
"0.5482535",
"0.5314698",
"0.52446914",
"0.5106381",
"0.5097195",
"0.50922316",
"0.5041162",
"0.50392014",
"0.50362635",
"0.50173616",
"0.4965538",
"0.49572885",
"0.49547583",
"0.4949091",
"0.4937117",
"0.49168217",
"0.49067983",
"0.49021825",
"0.4852798",
"0.48395717",
"0.48301274",
"0.47703806",
"0.47611547",
"0.47531024"
]
| 0.7235086 | 0 |
Get the timer for label specified by 'lap' | def get_lap(self, lap="__exit__"):
return self.lap[lap] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def lap(self, lap=\"__lap__\"):\n t = time.time()\n self.laps[lap] = t\n return t",
"def test_time_lapse(self):\n t0 = time.time()\n time.sleep(2)\n lap = time_lapse(t0)\n self.assertEqual(lap, '00:00:02')",
"def run_lap():\n pass",
"def label(self, t):\n if self.labels is None:\n return None\n prev_label = None\n for l in self.labels:\n if l.time > t:\n break\n prev_label = l\n if prev_label is None:\n return None\n return prev_label.name",
"def lap(self):\n current_time = time.perf_counter()\n ret = current_time - self._lap\n if abs(ret) != ret:\n ret = self._time_corruption\n self._lap = current_time\n return ret",
"def timer(*args, endTimer: bool=True, lapTime: bool=True, name: AnyStr=\"\", startTimer:\n bool=True, **kwargs)->None:\n pass",
"def get_sleep_timer(self):\n return self.get(COMMAND_UIC, 'GetSleepTimer')",
"def getTimeDelay(*args):\n return args[0].TimeState.TimeDelay.time_delay",
"def time_pulse_us(pin:Pin, pulse_level:int, timeout_us:int=1000000, /) -> int:",
"def tie(self, timer_label):\n self.timer_label = timer_label",
"def _get_polling(self, name):\n path_format_arguments = {\n \"scheduleName\": name,\n \"resourceGroupName\": self._resource_group_name,\n \"workspaceName\": self._workspace_name,\n }\n return AzureMLPolling(\n LROConfigurations.POLL_INTERVAL,\n path_format_arguments=path_format_arguments,\n )",
"def process_time_label(self, label):\n\n # \"HH:MM:SS\" has eight characters\n if len(label) != 8:\n raise ValueError(\"Label has invalid length (must be in HH-MM-SS format\")\n\n tokens = label.split('-')\n\n # Ensure tokens is a list of three values ('HH', 'MM', 'SS')\n if len(tokens) != 3 or not all(map(lambda x: len(x) == 2, tokens)):\n raise ValueError(\"Label be in HH-MM-SS format\")\n\n hours = int(tokens[0])\n minutes = int(tokens[1])\n seconds = int(tokens[2])\n\n if hours < 0 or hours > 23 or minutes < 0 or minutes > 59 or seconds < 0 or seconds > 59:\n raise ValueError(\"Label must be in HH-MM-SS format\")\n\n return hours, minutes, seconds",
"def timer(\n self,\n stat: str | None = None,\n *args,\n tags: Attributes = None,\n **kwargs,\n ) -> TimerProtocol:\n return _OtelTimer(self, stat, tags)",
"def getitimer(which): # real signature unknown; restored from __doc__\n pass",
"def log(self, label, times, overlapping=False):\r\n self._timings.append(Timing(label, times, overlapping))",
"def timedAnnotation(self, time):\n\n assert type(time) == float\n tgtInterval = min(\n enumerate(self.intervals), key=lambda x: abs(x[1].xmin - time))\n\n return tgtInterval.text",
"def offset_sleep(self, sleep_time):\n ran_time = time.perf_counter() - self._lap\n while sleep_time - (time.perf_counter() - self._lap) > 0:\n if sleep_time - (time.perf_counter() - self._lap) > 0.002:\n time.sleep(0.00001)\n total_time = time.perf_counter() - self._lap\n self.lap()\n return ran_time, total_time",
"def get_raw_speed_from_label(self, label):\n try:\n return self.label_to_raw_speed_mapping[label]\n except IndexError as err:\n err.args = (f'Label \"{label}\" is not a valid index of the mapping : {self.label_to_raw_speed_mapping}',)\n raise",
"def get_timer_details(id):\n\twith postgres, postgres.cursor(cursor_factory=psycopg2.extras.RealDictCursor) as cur:\n\t\tcur.execute(\"select * from mustard.timers where id=%s\", (id,))\n\t\treturn cur.fetchone()",
"def start_timer():\r\n global reps\r\n reps += 1\r\n \r\n # convert minutes to seconds\r\n work_sec = WORK_MIN * 60\r\n short_break_sec = SHORT_BREAK_MIN * 60\r\n long_break_sec = LONG_BREAK_MIN * 60\r\n \r\n # countdown session types\r\n if reps % 2 == 0:\r\n pomodoro_title.config(text=\"Break\", fg=PINK)\r\n count_down(short_break_sec)\r\n elif reps % 8 == 0:\r\n pomodoro_title.config(text=\"Break\", fg=RED)\r\n count_down(long_break_sec)\r\n else:\r\n pomodoro_title.config(text=\"Work\", fg=GREEN)\r\n count_down(work_sec)",
"def time(lancet, issue):\n issue = get_issue(lancet, issue)\n\n with taskstatus(\"Starting harvest timer\") as ts:\n lancet.timer.start(issue)\n ts.ok(\"Started harvest timer\")",
"def get_label(self, name):\n label_list = self.wls_board.get_labels()\n for label in label_list:\n if name in label.name: \n return label",
"def get_time_elapsed(check, metric_name, modifiers, global_options):\n gauge_method = check.gauge\n\n def time_elapsed(metric, sample_data, runtime_data):\n for sample, tags, hostname in sample_data:\n gauge_method(metric_name, get_timestamp() - sample.value, tags=tags, hostname=hostname)\n\n del check\n del modifiers\n del global_options\n return time_elapsed",
"def check_timer():\n end = time.time()\n time_elapsed = end - target_time[0]\n durationMSG = fg.cyan + f\"Scans Completed for {args.target} in: \" + fg.rs\n print(durationMSG, display_time(time_elapsed))",
"def _read_lap(rows):\n logging.debug(\"parsing lap\")\n tracks = []\n\n for name, row in _safe_generate(rows):\n if re.search(TOTAL, name):\n distance, calories = _read_lap_total(rows)\n logging.info(\"Parsed lap with {} tracks, Distance: {}, Calories: {}\"\n .format(len(tracks), distance, calories))\n return Lap(total_distance=distance, total_calories=calories, tracks=tracks)\n\n tracks.append(Track.from_row(row))",
"def get(input=None):\n if isinstance(input, datetime.datetime):\n return Elapsed((datetime.datetime.now() - input).total_seconds())\n if not input or isinstance(input, int):\n pid = input if input else os.getpid()\n output = os.popen(\"ps -p %s -o etime | grep -v ELAPSED\" %\n pid).read().strip()\n if output:\n return Elapsed(_parse_ps_output(output))",
"def configure_lldp_timer(device, timer):\r\n try:\r\n device.configure(f'lldp timer {timer}')\r\n except SubCommandFailure as e:\r\n raise SubCommandFailure(\r\n \"Could not configure LLDP timer\"\r\n \"Error: {error}\".format(error=e)\r\n )",
"def unconfigure_lldp_timer(device): \r\n try:\r\n device.configure('no lldp timer')\r\n except SubCommandFailure as e:\r\n raise SubCommandFailure(\r\n \"Could not unconfigure LLDP timer\"\r\n \"Error: {error}\".format(error=e)\r\n )",
"def GAME_TIME_ADVANCE(dt):",
"def timer(elapsed):\n hours, rem = divmod(elapsed, 3600)\n minutes, seconds = divmod(rem, 60)\n if int(seconds) == 0:\n return 'UNKNOWN'\n else:\n return '{:0>2}:{:0>2}:{:0>2}'.format(\n int(hours), int(minutes), int(seconds))"
]
| [
"0.66542476",
"0.5889318",
"0.5687886",
"0.56285954",
"0.5593656",
"0.55107373",
"0.524594",
"0.5210685",
"0.5123175",
"0.5055814",
"0.49799293",
"0.49091572",
"0.4891311",
"0.4878939",
"0.485451",
"0.4827149",
"0.48147845",
"0.47879618",
"0.47779948",
"0.47756517",
"0.47512257",
"0.4745337",
"0.47260758",
"0.47257754",
"0.4715603",
"0.47126645",
"0.47004542",
"0.4698733",
"0.4697849",
"0.4690418"
]
| 0.7295211 | 0 |
Use an HTTP service that only returns IP address. | def myip_http(arg=None):
# Return type if no argument for use in Lister.
if arg is None:
return 'https'
page = requests.get(arg, stream=True, timeout=3.05)
soup = BeautifulSoup(page.text, 'html.parser')
if page.status_code != 200:
raise RuntimeError(
f"[-] error: {page.reason}\n{soup.body.text}")
logger.debug('[-] got page: "%s"', page.text)
interface = ipaddress.ip_interface(str(soup).strip())
return interface | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def api_myip():\n return request.remote_addr, 200, {'Content-Type': 'text/plain'}",
"def GetExternalIp():\n h = httplib2.Http(tempfile.gettempdir(), timeout=10)\n url = 'http://whatismyip.akamai.com'\n resp, content = h.request(url, 'GET')\n if resp.status == 200:\n return content\n for provider in (UltraDNSAuth(), MyResolverInfo()):\n answer = provider.GetClientIp()\n if answer:\n return answer",
"def get(self, request):\n content = {'ip': socket.gethostbyname(socket.gethostname())}\n return Response(content)",
"def handler(value, provider, **kwargs): # pylint: disable=W0613\n\n ip = requests.get(value).text.strip()\n LOGGER.debug('external IP: %s', ip)\n return ip",
"def get_ip(self):",
"def _get_ip_resp(api_url: str):\n return get(api_url, headers={'user-agent': USER_AGENT})",
"def ip(self, request):\n ip = get_real_ip(request) or 'undefined'\n debug_logger.debug(\"IP request from : \" + ip)\n return Response({'ip': ip})",
"def public_ip_url(url, sanitize=(lambda x:x)):\n reply = requests.get(url)\n if reply.status_code == 200:\n try:\n ip = sanitize(reply.text.strip())\n ipaddress.ip_address(ip)\n print(ip)\n sys.exit()\n except Exception as e:\n print(e)",
"def getIp(self):\n raise NotImplementedError",
"def ip(self, mess, args):\n return '%s\\n' % urlgrabber.urlread('http://whatismyip.org')",
"def get_public_ip():\n public_ip = get('https://api.ipify.org').text\n return public_ip",
"def getIP():\n try:\n page = urlopen(\"http://www.whatismyip.com/automation/n09230945.asp\")\n IP = page.read()\n page.close()\n return IP\n except:\n return \"Could not retrieve the IP address.\"",
"def _open_stack_get_ip_(srv):\n addr_info = srv.addresses\n for net in addr_info.keys():\n for addr in addr_info[net]:\n ip = addr['addr']\n return ip",
"def __lookup_public_ip(self):\n\n response = requests.get('https://api.ipify.org?format=json', timeout=self.timeout)\n\n if response.status_code == 200:\n ip_data = response.json()\n if 'ip' not in ip_data.keys():\n return 'Unable to determine IP'\n else:\n return ip_data['ip']\n else:\n return 'Unable to determine IP'",
"def getPublicIpAddress() :\n f = urllib.urlopen(\"http://www.canyouseeme.org/\")\n html_doc = f.read()\n f.close()\n ipAddress = re.search('(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)',html_doc)\n\n #response = urllib.urlopen('http://api.hostip.info/get_html.php?ip=' + ipAddress.group(0) + '&position=true').read()\n return urllib.urlopen('http://api.hostip.info/get_html.php?ip=' + ipAddress.group(0)).read()",
"def obtain_public_ip():\n from urllib2 import urlopen\n my_ip = urlopen('http://ip.42.pl/raw').read()\n logger.debug('The public ip is: %s' % my_ip)\n return str(my_ip)",
"def get_ip_address(self):\n raise NotImplementedError",
"def ip_lookup(ip):\n # Create the required data dictionary for Host/Reputation\n api_data = {\n 'host': ip\n }\n response = http_request(endpoint=HOST_REPUTE_API, data=api_data)\n\n if response.get('errorNo') != 0:\n return_error('API Returned, {}:{}'.format(response.get('errorNo'), response.get('errorMsg')))\n\n return response",
"def ip():\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.connect((\"8.8.8.8\", 80))\n ip = s.getsockname()[0]\n s.close()\n return ip",
"def ipAddress():\n \n sk = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sk.connect((\"8.8.8.8\", 80))\n ip = (sk.getsockname()[0])\n sk.close()\n return str(ip)",
"def publicIP(self):\n return self.query('https://plex.tv/:/ip')",
"def gen_ip(self):\n\n try:\n self.ip = self.auth_url.split(\":\")[1].strip(\"//\")\n except Exception:\n self.ip = socket.gethostbyname(socket.gethostname())\n print \"\\t! Error obtaining ip address from cred file. Using %s\" % (self.ip)",
"def get_ip(tag,env=None,eip=False):\n api_url = 'http://api.rahulinux.io/ip?host={0}&env={1}&eip={2}'\n try:\n resp = requests.get(api_url.format(tag,env,eip))\n except requests.exceptions.RequestException as e:\n return e\n if len(resp.text) >= 30:\n return resp.text.split()\n return [ resp.text ]",
"def get_ip(request):\n\n # if neither header contain a value, just use local loopback\n ip_address = request.META.get('HTTP_X_FORWARDED_FOR',\n request.META.get('REMOTE_ADDR', '127.0.0.1'))\n if ip_address:\n # make sure we have one and only one IP\n try:\n ip_address = IP_RE.match(ip_address)\n if ip_address:\n ip_address = ip_address.group(0)\n else:\n # no IP, probably from some dirty proxy or other device\n # throw in some bogus IP\n ip_address = '10.0.0.1'\n except IndexError:\n pass\n return ip_address",
"async def get_ip(self) -> Union[IPv4Address, IPv6Address]:\n xff = await self.get_x_forwarded_for()\n if xff: return xff[0]\n ip_addr = self._request.transport.get_extra_info('peername')[0]\n return ip_address(ip_addr)",
"def dshield_ip_check(ip):\n if not is_IPv4Address(ip):\n return None\n\n headers = {'User-Agent': useragent}\n url = 'https://isc.sans.edu/api/ip/'\n response = requests.get('{0}{1}?json'.format(url, ip), headers=headers)\n return response.json()",
"def getLocalIP():\r\n try:\r\n csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\r\n csock.connect(('8.8.8.8', 80))\r\n (addr, port) = csock.getsockname()\r\n csock.close()\r\n return addr\r\n except socket.error:\r\n return \"127.0.0.1\"",
"def get_local_host_ip(self) -> str:",
"def get_global_ip() -> str:\n return urllib.request.urlopen(\"https://icanhazip.com\").read().decode().strip()",
"def get_ip():\n if not request.headers.getlist(\"X-Forwarded-For\"):\n return str(request.remote_addr)\n else:\n return str(request.headers.getlist(\"X-Forwarded-For\")[0])"
]
| [
"0.6969286",
"0.66623604",
"0.6631766",
"0.66235554",
"0.6597741",
"0.6467402",
"0.6458488",
"0.6360583",
"0.62954813",
"0.6271929",
"0.62604827",
"0.62479866",
"0.6207786",
"0.61434484",
"0.6139615",
"0.6131207",
"0.61227196",
"0.61169213",
"0.61078197",
"0.6082477",
"0.60817766",
"0.6053473",
"0.6049103",
"0.6048843",
"0.59796494",
"0.5978561",
"0.5974971",
"0.59290844",
"0.59239656",
"0.59036946"
]
| 0.7358666 | 0 |
Return list of available method ids for getting IP address. | def get_myip_methods(include_random=False):
methods = list(myip_methods.keys())
# For argparse choices, set True
if include_random:
methods.append('random')
return methods | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_ids_from_ip(self, ip):\r\n try:\r\n # Does it look like an ip address?\r\n socket.inet_aton(ip)\r\n except socket.error:\r\n return []\r\n\r\n # Find the server via ip address. First try public ip, then private\r\n results = self.list_hardware(public_ip=ip, mask=\"id\")\r\n if results:\r\n return [result['id'] for result in results]\r\n\r\n results = self.list_hardware(private_ip=ip, mask=\"id\")\r\n if results:\r\n return [result['id'] for result in results]",
"def _get_ids_from_ip(self, ip_address):\r\n try:\r\n # Does it look like an ip address?\r\n socket.inet_aton(ip_address)\r\n except socket.error:\r\n return []\r\n\r\n # Find the VS via ip address. First try public ip, then private\r\n results = self.list_instances(public_ip=ip_address, mask=\"id\")\r\n if results:\r\n return [result['id'] for result in results]\r\n\r\n results = self.list_instances(private_ip=ip_address, mask=\"id\")\r\n if results:\r\n return [result['id'] for result in results]",
"def available_methods():\n return {mc.method_id: mc for mc in MethodFactory.method_classes}",
"def service_ip_lists(self) -> pulumi.Output[Sequence[str]]:\n return pulumi.get(self, \"service_ip_lists\")",
"def IP_list(pwd):\r\n # Connect to the gmail server.\r\n mail = imaplib.IMAP4_SSL('imap.gmail.com')\r\n mail.login('[email protected]', pwd)\r\n\r\n # get mail IDs.\r\n mail.select('Inbox')\r\n typ, data = mail.search(None, '(From \"arc.pi.reg\")')\r\n mail_ids = data[0].decode()\r\n id_list = mail_ids.split()\r\n\r\n pi_ip = []\r\n\r\n # More description here.\r\n for id in id_list[::-1]:\r\n typ, msg_data = mail.fetch(id, '(BODY.PEEK[TEXT])')\r\n msg = msg_data[0][1].decode().strip()\r\n\r\n name, addr = msg.split(\" \")\r\n pi_ip.append((name, addr))\r\n\r\n return pi_ip",
"def list_methods(self):\n return list(self.methods.keys())",
"def get_all_ips_connection(self):\n return self.m_connection.all_ips",
"def get_api_server_authorized_ip_ranges(self) -> List[str]:\n return self._get_api_server_authorized_ip_ranges(enable_validation=True)",
"def get_methods():\n try:\n methods = Method.query.all()\n except OperationalError:\n return ''\n return methods",
"def interface_list() -> List[str]:\n cmd_ip = system_command('ip')\n command = f\"{cmd_ip} -o addr show up primary scope global\".split()\n result = SUDO.execute_unit(command)\n result.assert_return()\n line_list = result.stdout.splitlines()\n pattern = re.compile(r\"^\\d+[:]\\s+(\\S+)\\s+(.+)$\")\n select = lambda line: pattern.search(line).group(1)\n face_list = list(map(select, line_list))\n return face_list",
"def method_list(self):\n\t\tmethod_names = list(self.methods.keys())\n\t\tmethod_names.sort()\n\t\tmethod_list = []\n\t\tfor mn in method_names:\n\t\t\tmethod_list += [self.methods[mn]]\n\t\treturn method_list",
"def discovered_ips(self) -> Sequence[str]:\n return pulumi.get(self, \"discovered_ips\")",
"def list_methods(client: Client) -> List[str]:\n return client._client.ListMethods()",
"def get_ips():\r\n local_ips = []\r\n public_ips = []\r\n \r\n # list of iface names, 'lo0', 'eth0', etc.\r\n for iface in netifaces.interfaces():\r\n # list of ipv4 addrinfo dicts\r\n ipv4s = netifaces.ifaddresses(iface).get(netifaces.AF_INET, [])\r\n for entry in ipv4s:\r\n addr = entry.get('addr')\r\n #print(\"addr: \" + addr)\r\n if not addr:\r\n continue\r\n if not (iface.startswith('lo') or addr.startswith('127.')):\r\n public_ips.append(addr)\r\n else:\r\n local_ips.append(addr) \r\n return public_ips",
"def _get_methods(self):\n\n methods = inspect.getmembers(self, predicate=callable)\n method_list = set()\n\n for name, _ in methods:\n if (name in ('proxy', 'start', 'stop', 'part', 'join',)\n or name[0] == '_'):\n continue\n\n method_list.add(name)\n\n return method_list",
"def _list_global_ips_by_identifier(self, identifier):\r\n results = self.list_global_ips(identifier=identifier, mask='id')\r\n return [result['id'] for result in results]",
"def get_methods(self, dataset_id=None, datarun_id=None,\n ignore_errored=False, ignore_gridding_done=False):\n hyperpartitions = self.get_hyperpartitions(dataset_id=dataset_id,\n datarun_id=datarun_id,\n ignore_gridding_done=False,\n ignore_errored=False)\n methods = set(f.method for f in hyperpartitions)\n return list(methods)",
"def get_all_ip():\n sql = sqlite3.connect('data.db')\n\n cursor = sql.cursor()\n\n get_ip = \"\"\"SELECT ip FROM Status\"\"\"\n\n ip = cursor.execute(get_ip).fetchall()\n\n get_protocol = \"\"\"SELECT protocol FROM Status\"\"\"\n\n protocol = cursor.execute(get_protocol).fetchall()\n\n get_port = \"\"\"SELECT port FROM Status\"\"\"\n\n port = cursor.execute(get_port).fetchall()\n\n cursor.close()\n\n return zip(ip, protocol, port)",
"def ip_addresses(self) -> pulumi.Output[Sequence['outputs.IpMappingResponse']]:\n return pulumi.get(self, \"ip_addresses\")",
"def get_ip(self):\n json_scheme = self.gen_def_json_scheme('GetPurchasedIpAddresses')\n json_obj = self.call_method_post(method='GetPurchasedIpAddresses ', json_scheme=json_scheme)\n self.iplist = IpList()\n for ip in json_obj['Value']:\n r = Ip()\n r.ip_addr = ip['Value']\n r.resid = ip['ResourceId']\n r.serverid = ip['ServerId'] if 'None' not in str(ip['ServerId']) else None\n self.iplist.append(r)",
"def _method_lookup_table(gen: CodeGenerator, service: ProtoService) -> None:\n gen.line('static constexpr std::array<uint32_t, '\n f'{len(service.methods())}> kPwRpcMethodIds = {{')\n\n with gen.indent(4):\n for method in service.methods():\n method_id = pw_rpc.ids.calculate(method.name())\n gen.line(f'0x{method_id:08x}, // Hash of \"{method.name()}\"')\n\n gen.line('};')",
"def get_methods_detail(self, ids):\n url = '/method/set/%s/' % \";\".join(map(str, ids))\n return self.get_resource_data(url)",
"def slave_ips(self) -> 'List[str]':\n raise NotImplementedError",
"def getMethods(iface):\n return getElements(iface, IMethod).items()",
"def getBaseIP(url: str) -> list:\n \n response = requests.get(url) #get data \n\n ip_sets = response.text\n ip_list = re.findall(r'(?:\\d{1,3}\\.)+(?:\\d{1,3})', ip_sets)\n \n return ip_list",
"def ip_addresses(self):\n try:\n return socket.gethostbyaddr(self.fqdn)[-1]\n except socket.error as _:\n return ['127.0.0.1']",
"def _get_rpc_method_names(self):\n return [name for name in dir(self) if _is_rpc_call_method(getattr(self, name))]",
"def GetInstructionList():\n return [i[0] for i in ida_idp.ph_get_instruc() if i[0]]",
"def neighbors_ip(self):\n neighbors = self.neighbors()\n nei_list = []\n net_ip = self._rloc_ip_net_addr()\n if neighbors is not None:\n for nei_rec in neighbors:\n nei_ip = net_ip + hex(nei_rec.rloc16)[2:]\n nei_list.append(nei_ip)\n return nei_list",
"def get_ids(self) -> List[str]:"
]
| [
"0.656648",
"0.62675756",
"0.62597704",
"0.5974203",
"0.5927884",
"0.5880266",
"0.5875549",
"0.58529127",
"0.5826927",
"0.5816129",
"0.5808003",
"0.57877886",
"0.5781475",
"0.57750034",
"0.57587093",
"0.57474077",
"0.5737622",
"0.5725721",
"0.57247376",
"0.5709294",
"0.5704034",
"0.570372",
"0.56860155",
"0.568005",
"0.5670899",
"0.5657027",
"0.5651356",
"0.5650935",
"0.56497544",
"0.56479806"
]
| 0.6813391 | 0 |
Main script to initialize DataSource object in interactive python. Currently using ipsana.sh bash script to start this, but should look to using example Dan D. provided for IPython startup. /reg/neh/home/ddamiani/Workarea/psanadev/psmondev/psmon/src/console.py Left some code from psutils module for automatically guessing experiment and instrument based on the user and local machine from which this is started. In future should add detection of data files to avoid trying to load run data that does not exist, including checks that the code is being run on an appropriate machine. If someone tries loading a file they do not have access to, a message should be given how to get access (i.e., ask the PI of the experiment to be added, and if sure are on experiment then submit ticket to appropriate mail list for assistance). | def main():
time0 = time.time()
args = initArgs()
if not args.exp:
from PyDataSource import psutils
args.exp = psutils.active_experiment()
print('No exp provided -- {:} is the active experiment'.format(args.exp))
ds = PyDataSource.DataSource(**vars(args))
setattr(sys.modules['__main__'], args.base, ds)
if not ds.data_source.monshmserver:
ds.reload()
IPython.embed(banner1=banner(ds, base=args.base, time0=time0)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load_run(self, reload=False, *args, **kwargs):\n if 'data_source' in kwargs:\n data_source = kwargs['data_source']\n else:\n data_source = self.get_data_source(*args, **kwargs)\n \n if data_source:\n# try:\n if True:\n self.data_source = data_source\n if self.psana_cfg_dict:\n self.setOptions()\n elif self.cfg:\n # if a cfg file is specified it will be loaded\n # however, the cfg_setOptions takes precidence\n # in future may try combind the two.\n psana.setConfigFile(self.cfg)\n\n calibDir = '/reg/d/psdm/cxi/{:}/calib'.format(self.exp)\n print 'setting calibDir', self.exp, calibDir\n psana.setOption('psana.calib-dir', calibDir)\n\n print 'Loading data from ',data_source\n if self.ds and self.live:\n print 'WARNING: Currently Cannot reload live shared memory'\n print ' Need to exit python to reload'\n else:\n self.ds = psana.DataSource(data_source)\n self._no_evtData = False\n\n self._ds_run = self.ds.runs().next()\n\n _source_attrs = ['ds','events','evt']\n if self.indexed:\n self.times = self._ds_run.times()\n\n self.events = self._ds_run.events()\n self.configStore = PsanaDictify(self._ds_run.env().configStore())\n self.evrConfig = EvrDictify(self.configStore)\n self.load_epicsStore()\n\n# self.daqEventCodes = [ec.code() for ec in self.configStore.evr0.eventcodes] \n self.ievent = 0\n if not reload and self._kwargs.get('nstart'):\n for i in range(self._kwargs.get('nstart')-1),:\n self.next_event()\n \n# except:\n# print 'Failed to load data source \"{:}\"'.format(data_source)\n else:\n if len(self.runs) > 0:\n print 'WARNING: No xtc files for {:} available in {:}'.format(\n self.exp,self.xtc_dir)\n print 'Either set xtc_dir to a valid directory or restore files' \n print ' through the Data Manager:'\n pswww_portal = 'https://pswww.slac.stanford.edu/apps/portal/index'\n print pswww_portal+'.php?exper_id={:}'.format(self.exper_id)\n else:\n print 'No runs taken for this experiment'\n\n if self._reloadOnLoadRun:\n self._reloadOnLoadRun = False\n self.load_run(reload=True)",
"def __init__(self, data_source_identifier, verbose=True):\n pass",
"def main():\n parser = specify_parser()\n args = parser.parse_args()\n\n mapping = init_data(args.datafile[0])\n loaded_data = read(args.input)\n\n mount(mapping, loaded_data)",
"def main():\n import redis_helper as rh\n import input_helper as ih\n\n if rh.REDIS is None:\n connected, _ = rh.connect_to_server()\n if not connected:\n raise Exception('Unable to connect to {}'.format(rh.REDIS_URL))\n selected = rh.Collection.select_models(named=True)\n if selected:\n ih.start_ipython(warn=True, rh=rh, ih=ih, **selected)",
"def start(data_file, script_file=None):\n\n # Try to start up the interpreter\n try:\n initialize(data_file)\n except blotish.BlotishError, err:\n blot_common.print_blot_error(err)\n return\n\n # Maybe run a script\n exit_flag = False\n if script_file:\n exit_flag = execute_file(script_file)\n\n # Start the interpreter unless the script called exit\n if not exit_flag:\n global interpreter\n interpreter.cmdloop()\n\n # Cleanup\n finalize()",
"def main() :\n\n global dbMan\n dbMan = DBManager( 'CMS_HCL_APPUSER_R/HCAL_Reader_55@localhost:1521/cms_omds_lb.cern.ch' )",
"def get_data_source(self, *args, **kwargs):\n if kwargs.get('exp',None) is not self.exp:\n self.xtc_dir = None\n\n self.set_exp_defaults(**kwargs)\n# if self.get_kwarg('camrecord'):\n# host = os.uname()[1]\n# xtc_dir = \"/reg/d/camera/{:}/\"\n\n if self._kwargs.get('iocrc'):\n if self._kwargs.get('iocrc') in 'local':\n host = os.uname()[1]\n else:\n host = self._kwargs.get('iocrc')\n \n self.xtc_dir = \"/reg/d/cameras/{:}/daq/xtc\".format(host)\n\n if self.exp.startswith('dia'):\n folder = 'dia'\n else:\n folder = self.instrument\n \n if self.xtc_dir:\n default_dir = True\n else:\n self.xtc_dir = \"/reg/d/psdm/{:}/{:}/xtc\".format(folder, self.exp)\n default_dir = True\n \n if not self.h5_dir:\n self.h5_dir = \"/reg/d/psdm/{:}/{:}/hdf5\".format(folder, self.exp)\n\n if self.live:\n data_source = psutils.live_source(monshmserver=self.monshmserver)\n \n else:\n# if len(self.runs) == 0:\n# self.exp = _default_exp['exp']\n# self.run = _default_exp['run']\n# self.instrument = self.exp[0:3]\n## self.runs = experiment_info.experiment_runs(self.instrument.upper(),self.exp)\n# print 'No runs taken yet for ',self.exp\n# print 'Using default experiment {exp} and run {run}'.format(\n# exp=self.exp,run=self.run)\n\n if len(self.runs) > 0 and self.run > len(self.runs):\n print 'Run number {:} too large'.format(self.run)\n print 'Looking to load last run from experiment {:}'.format(self.exp)\n self.run = -1\n \n if len(self.runs) > 0 and self.run <= 0:\n while -self.run < len(self.runs)-1 and \\\n len(self.runs[-1+self.run].get('xtc_files',[])) == 0:\n self.run -= 1\n \n if self.run:\n self.run = self.runs[self.run]['num']\n else:\n self.run = 0\n\n if len(self.runs[-1+self.run].get('xtc_files',[])) == 0:\n data_source = None\n self._kwargs['noload'] = True\n \n if self.run <= 0:\n data_source = None\n self._kwargs['noload'] = True\n else:\n try:\n self.exper_id = self.runs[self.run-1]['exper_id']\n data_source = \"exp={exp}:run={run}\".format(exp=self.exp,run=self.run)\n if self.ffb:\n data_source += \":one-stream\"\n # data_source += \":live\"\n self.xtc_dir = \"/reg/d/ffb/{instrument}/{exp}/xtc\".format(\n instrument=self.instrument,exp=self.exp)\n elif self.h5:\n data_source += \":h5\"\n elif self.indexed:\n if self.idx:\n data_source += \":idx\"\n self.smd = False\n else:\n data_source += \":smd\"\n\n if self.xtc_dir and not default_dir and not self.h5:\n data_source += \":dir={:}\".format(self.xtc_dir)\n except:\n data_source = None\n self._kwargs['noload'] = True\n print 'No data source'\n\n return data_source",
"def main():\n args = parse_command_line()\n expt_config = load_config(args.experiment_config_path)\n run_cli(RunOptions.from_dict(expt_config))",
"def __init__(self, run, expname):\n logger.debug('Initializing worker {}.'.format(rank))\n self.run = int(run)\n self.expname = expname\n bcast_var = None\n dsname = comm.bcast(bcast_var, root=0)\n print(dsname)\n \n print('********** Start setup.')\n t0 = time.time()\n self.dsIdx = psana.DataSource(str(dsname))\n logger.info('********** Datasource on rank {}: {}s'.format(rank, time.time()-t0))\n self.dsIdxRun = next(self.dsIdx.runs())\n self.parse_detectors()\n logger.info('Rank {} has datasource and detectors.'.format(rank))\n print('********** Setup on rank {}: {}s'.format(rank, time.time()-t0))\n return",
"def main():\n logger = logging.getLogger(__name__)\n logger.info('making final data set from raw data')\n get_user_config()",
"def __init__(self):\n \n #\n # imports\n #\n import time\n from socket import gethostname\n import commands\n \n #\n # Declare variables and set standard default values\n #\n self.database = None\n self.analysisPath = None\n self.command = None\n self.commandLine = None\n self.commandLineList = None\n SEAseqPipeLine.settings = Settings()\n SEAseqPipeLine.logfile = None\n SEAseqPipeLine.startTime = time.time()\n SEAseqPipeLine.startTimeStr = time.strftime(\"%A, %d %b %Y %H:%M:%S\",time.localtime())\n self.availableCommands = {\n 'initiateAnalysis':self.initiateAnalysis,\n 'addData':self.addData,\n 'changeSettings':self.changeSettings,\n 'startAnalysis':self.startAnalysis,\n 'commandLog':self.commandLog,\n 'help':self.printHelp,\n }\n if gethostname().split('.')[1] == 'uppmax': self.onUppmax = True\n else: self.onUppmax = False\n tempFolderName = 'SEAseq2temporaryFiles'\n if self.onUppmax: self.tempFileFolder = os.path.abspath(commands.getoutput('echo $SNIC_TMP'))+'/'+tempFolderName\n else: self.tempFileFolder = self.analysisPath+'/'+tempFolderName\n if not os.path.isdir(self.tempFileFolder): os.makedirs(self.tempFileFolder)\n \n #\n # Get information from commandline\n #\n self.getComandAndPath()\n self.doCurrentTask()",
"def main():\n db = _db.Database(experiment.ORACLE_PATH)\n db.populate_kernel_names_table()\n db.commit()",
"def __init__(self):\n self.label = \"Data Assistant\"\n self.alias = \"dla\"\n\n # List of tool classes associated with this toolbox\n self.tools = [Append, Stage, NewFile, Preview, Replace]",
"def main(*args, **kwargs):\n src_mgr = SourceDriverManager()\n fs_mgr = FileSystemManager()",
"def run_init(self):\n try:\n with open(SAGE_STARTUP_FILE, 'r') as f:\n self.shell.run_cell(f.read(), store_history=False)\n except IOError:\n pass",
"def start(self):\n self.ioloop.add_callback(self.get_data)\n logging.info(\"[DataSource] Started\")",
"def init(default):\n\tconsole = Console()\n\tconsole.clear()\n\n\tif default:\n\t\tdocument_dict = {\n\t\t\t\"DATASET_NAME\": \"dataset\",\n\t\t\t\"API_KEY\": \"\",\n\t\t\t\"SAMPLES_PER_SEARCH\": 50,\n\t\t\t\"IMAGE_SIZE\": 512,\n\t\t\t\"ENGINE\": \"duckgo\",\n\t\t\t\"RESIZE_METHOD\": \"longer_side\",\n\t\t\t\"CLASSES\": [{\"CLASS_NAME\": \"Test\", \"SEARCH_KEYWORDS\": \"images of cats\"}]}\n\n\t\tif not os.path.exists(\"dataset.yaml\"):\n\t\t\tconsole.print(\"[bold]Creating a dataset configuration file...[/bold]\")\n\t\t\t\n\t\t\tf = open(\"dataset.yaml\", \"w\")\n\t\t\tf.write(yaml.dump(document_dict))\n\t\t\tif f:\n\t\t\t\tconsole.clear()\n\t\t\t\tconsole.print(\"Dataset YAML file has been created sucessfully. Now run [bold blue]idt build[/bold blue] to mount your dataset!\")\n\t\t\t\texit(0)\n\t\t\t\n\t\t\n\t\telse:\n\t\t\tconsole.print(\"[red]A dataset.yaml is already created. To use another one, delete the current dataset.yaml file[/red]\")\n\t\t\texit(0)\n\n\tconsole.print(BANNER)\n\tdataset_name = click.prompt(\"Insert a name to your dataset: \")\n\n\tconsole.clear()\n\tsamples = click.prompt(\"How many samples per seach will be necessary? \",type=int)\n\n\tconsole.clear()\n\tconsole.print(\"[bold]Choose image resolution[/bold]\", justify=\"center\")\n\tconsole.print(\"\"\"\n\n[1] 512 pixels / 512 pixels [bold blue](recommended)[/bold blue]\n[2] 1024 pixels / 1024 pixels\n[3] 256 pixels / 256 pixels\n[4] 128 pixels / 128 pixels\n[5] Keep original image size\n\n[italic]ps: note that the aspect ratio of the image will [bold]not[/bold] be changed, so possibly the images received will have slightly different size[/italic]\n\t\t\n\t\t\"\"\")\n\n\n\timage_size_ratio = click.prompt(\"What is the desired image size ratio\", type=int)\n\twhile image_size_ratio < 1 or image_size_ratio > 5:\n\t\tconsole.print(\"[italic red]Invalid option, please choose between 1 and 5. [/italic red]\")\n\t\timage_size_ratio= click.prompt(\"\\nOption: \",type=int)\n\n\tif image_size_ratio == 1:\n\t\timage_size_ratio= 512\n\telif image_size_ratio == 2:\n\t\timage_size_ratio = 1024\n\telif image_size_ratio == 3:\n\t\timage_size_ratio = 256\n\telif image_size_ratio == 4:\n\t\timage_size_ratio= 128\n\telif image_size_ratio == 5:\n\t\timage_size_ratio = 0\n\n\tconsole.clear()\n\tconsole.print(\"[bold]Choose a resize method[/bold]\", justify=\"center\")\n\tconsole.print(\"\"\"\n\n[1] Resize image based on longer side\n[2] Resize image based on shorter side\n[3] Smartcrop\n\n[italic]ps: note that the aspect ratio of the image will [bold]not[/bold] be changed, so possibly the images received will have slightly different size[/italic]\n\t\t\n\t\t\"\"\")\n\tresize_method = click.prompt(\"Desired Image resize method: \", type=int)\n\twhile resize_method < 1 or resize_method > 3:\n\t\tconsole.print(\"[red]Invalid option[/red]\")\n\t\tresize_method = click.prompt(\"Choose method [1-3]: \")\n\n\tresize_method_options = ['','longer_side','shorter_side','smartcrop']\n\n\n\tconsole.clear()\n\tnumber_of_classes = click.prompt(\"How many image classes are required? \",type=int)\n\n\tdocument_dict = {\n \n \"DATASET_NAME\": dataset_name,\n \n \"SAMPLES_PER_SEARCH\": samples,\n \n \"IMAGE_SIZE\": image_size_ratio,\n \n \"RESIZE_METHOD\": resize_method_options[resize_method],\n \n \"CLASSES\": []\n \n}\n\n\tconsole.clear()\n\tconsole.print(\"[bold]Choose a search engine[/bold]\", justify=\"center\")\n\tconsole.print(\"\"\"\n\n[1] Duck GO [bold blue](recommended)[/bold blue]\n[2] Bing\n[3] Bing API [italic yellow](Requires API key)[/italic yellow]\n[4] Flickr API [italic yellow](Requires API key)[/italic yellow]\n\n\t\t\"\"\")\n\tsearch_engine= click.prompt(\"Select option:\", type=int)\n\twhile search_engine < 0 or search_engine > 4:\n\t\tconsole.print(\"[italic red]Invalid option, please choose between 1 and 4.[/italic red]\")\n\t\tsearch_engine = click.prompt(\"\\nOption: \", type=int)\n\n\tsearch_options = ['none', 'duckgo', 'bing', 'bing_api', 'flickr_api']\n\tdocument_dict['ENGINE'] = search_options[search_engine]\n\n\tif search_engine > 2:\n\t\tconsole.clear()\n\t\tconsole.print(f'Insert your [bold blue]{search_options[search_engine]}[/bold blue] API key')\n\t\tengine_api_key = click.prompt(\"API key: \", type=str)\n\t\tdocument_dict['API_KEY'] = engine_api_key\n\telse:\n\t\tdocument_dict['API_KEY'] = \"NONE\"\n\n\tsearch_engine = search_options[search_engine]\n\n\tfor x in range(number_of_classes):\n\t\tconsole.clear()\n\t\tclass_name = click.prompt(\"Class {x} name: \".format(x=x+1))\n\t\tconsole.clear()\n\n\t\tconsole.print(\"\"\"In order to achieve better results, choose several keywords that will be provided to the search engine to find your class in different settings.\n\t\n[bold blue]Example: [/bold blue]\n\nClass Name: [bold yellow]Pineapple[/bold yellow]\n[italic]keywords[/italic]: [underline]pineapple, pineapple fruit, ananas, abacaxi, pineapple drawing[/underline]\n\n\t\t\t\"\"\")\n\t\tkeywords = click.prompt(\"Type in all keywords used to find your desired class, separated by commas: \")\n\t\tdocument_dict['CLASSES'].append({'CLASS_NAME': class_name, 'SEARCH_KEYWORDS': keywords})\n \n\tif not os.path.exists(\"dataset.yaml\"):\n\t\tconsole.print(\"[bold]Creating a dataset configuration file...[/bold]\")\n\t\ttry:\n\t\t\tf = open(\"dataset.yaml\", \"w\")\n\t\t\tf.write(yaml.dump(document_dict))\n\t\t\tif f:\n\t\t\t\tconsole.clear()\n\t\t\t\tconsole.print(\"Dataset YAML file has been created sucessfully. Now run [bold blue]idt build[/bold blue] to mount your dataset!\")\n\t\texcept:\n\t\t\tconsole.print(\"[red]Unable to create file. Please check permission[/red]\")\n\t\t\n\telse:\n\t\tconsole.print(\"[red]A dataset.yaml is already created. To use another one, delete the current dataset.yaml file[/red]\")",
"def main(passed_arguments):\n\n # use real data as default\n scripts_path = os.path.abspath(os.path.join(PYTHON_PATH, 'scripts'))\n meta_path = os.path.abspath(os.path.join(scripts_path, 'meta.json'))\n manifest_path = os.path.abspath(os.path.join(scripts_path, 'manifest.csv'))\n\n # Locally, we can optionally have sample data\n if passed_arguments.sample and passed_arguments.database != 'remote':\n meta_path = os.path.abspath(os.path.join(scripts_path,\n 'meta_sample.json'))\n manifest_path = os.path.abspath(\n os.path.join(scripts_path, 'manifest_sample.csv'))\n\n # for case of more than one database choice default to the option with\n # the lowest risk if database is updated\n if passed_arguments.database == 'docker':\n database_choice = 'docker_database'\n drop_tables = True\n\n elif passed_arguments.database == 'docker_local':\n database_choice = 'docker_with_local_python'\n drop_tables = True\n\n elif passed_arguments.database == 'remote':\n database_choice = 'remote_database'\n drop_tables = False #TODO this is a hacky way to avoid dropping tables because it's not working with RDS...\n\n # Only users with additional admin privileges can rebuild the\n # remote database\n if not passed_arguments.update_only:\n database_choice = 'remote_database_master'\n\n # TODO: do we want to default to local or docker?\n elif passed_arguments.database == 'local':\n database_choice = 'local_database'\n drop_tables = True\n\n # universal defaults\n keep_temp_files = True\n\n # Instantiate and run the loader\n loader = LoadData(database_choice=database_choice, meta_path=meta_path,\n manifest_path=manifest_path,\n keep_temp_files=keep_temp_files,\n drop_tables=drop_tables)\n\n if passed_arguments.update_only:\n loader.update_database(passed_arguments.update_only)\n else:\n loader.rebuild()\n\n\n\n #TODO add in failures report here e.g. _failed_table_count",
"def main():\n bootstrapping.CommandStart('dataflow-sql', component_id='dataflow-sql')\n bootstrapping.CheckUpdates('dataflow-sql')\n update_manager.UpdateManager.EnsureInstalledAndRestart(\n ['dataflow-sql'], command=__file__)\n java_bin = java.RequireJavaInstalled('Dataflow SQL')\n bootstrapping.ExecuteJavaClass(\n java_bin,\n jar_dir=_JAR_DIR,\n main_jar=_MAIN_JAR,\n main_class=_CLASSNAME,\n main_args=['-nn', 'DFSQL', '-u', 'jdbc:beam:userAgent=DataflowSQL'])",
"def init(path=None, ipython=None):\n\n import os\n from .magics import register_magics\n from .dotmagic import load_ipython_extension as load_dot\n load_dot(ipython)\n register_magics(ipython)\n\n if path is None:\n path = os.getcwd()\n persistence_config.connect(path)\n\n return u\"ok\"",
"def setUp(self):\n\n pass\n # screenip2 = screenip_model.screenip(0, pd_obj_inputs, pd_obj_exp_out)\n # setup the test as needed\n # e.g. pandas to open screenip qaqc csv\n # Read qaqc csv and create pandas DataFrames for inputs and expected outputs",
"def initialize():\n dislin.disini()",
"def __init__(self):\n\t\tappionScript.AppionScript.__init__(self)\n\t\tself.rundata = {}\n\t\t### extra appionLoop functions:\n\t\tself._addDefaultParams()\n\t\tself.setFunctionResultKeys()\n\t\tself._setRunAndParameters()\n\t\t#self.specialCreateOutputDirs()\n\t\tself._initializeDoneDict()\n\t\tself.result_dirs={}",
"def _problem_run_experiments_initialise(self):\n pass",
"def init():\n\n @click.group(cls=cli.make_commands(__name__))\n def run():\n \"\"\"Cross-cell supervision tools.\"\"\"\n cli.init_logger('daemon.conf')\n\n return run",
"def main():\n\n # Chdir into script directory so to properly resolve relative paths in configuration\n os.chdir(os.path.dirname(os.path.realpath(__file__)) + \"/\")\n\n # Disable proxy as we access localhost, both to avoid overhead and issues with proxy misconfiguration\n os.environ['NO_PROXY'] = '*'\n\n # Stop any GraphDB server that we previously started and is possibly still around due to script interruption/crash\n shell(f\"{cmd_graphdb} stopall\")\n\n # Generate synthetic traces, both for populating the repositories and for the {sf, sp, pf, pp} tests\n prepare_traces()\n \n # Generate central repositories (if needed)\n for size, approach in itertools.product(sizes, approaches):\n prepare_repository(size, approach)\n \n # Run experiments (if needed)\n for size, approach in itertools.product(sizes, approaches):\n run_experiments(size, approach)",
"def __init__(__self__, *,\n password: pulumi.Input[str],\n username: pulumi.Input[str],\n alternate_data_source_parameters: Optional[pulumi.Input[Sequence[pulumi.Input['DataSourceParametersArgs']]]] = None):\n pulumi.set(__self__, \"password\", password)\n pulumi.set(__self__, \"username\", username)\n if alternate_data_source_parameters is not None:\n pulumi.set(__self__, \"alternate_data_source_parameters\", alternate_data_source_parameters)",
"def init(*args):\n global dataset\n dataset = args[0]",
"def experiment_init(self):\n pass",
"def __init__(self):\r\n self.label = \"ExamineOutputs\"\r\n self.description = \"This tool takes the output zip file from the ProcessGeogrid script\" + \\\r\n \"and creates a raster from each output NetCDF file.\" + \\\r\n \"\" + \\\r\n \"The Input should be a .zip file that was created using the WRF Hydro pre-\" + \\\r\n \"processing tools. The Output Folder parameter should be set to a non-existent \" +\\\r\n \"folder location. The tool will create the folder which will contain the results.\"\r\n self.canRunInBackground = True\r\n self.category = \"Utilities\""
]
| [
"0.6025968",
"0.5882253",
"0.5881036",
"0.5810399",
"0.5765473",
"0.5746006",
"0.5735826",
"0.57271963",
"0.5704829",
"0.56844497",
"0.56774366",
"0.5664643",
"0.5602337",
"0.55749357",
"0.5566321",
"0.55554545",
"0.5535359",
"0.5527598",
"0.5526025",
"0.55064017",
"0.5496822",
"0.54600734",
"0.5452922",
"0.5449839",
"0.5439937",
"0.5438941",
"0.5428743",
"0.54135907",
"0.5405638",
"0.5391316"
]
| 0.78781056 | 0 |
Gets all unique column names for all csv files in file path. | def get_unique_col_names(source_path: str, sink_path: str) -> None:
# Get all files in data directory
files = glob.glob(get_project_root() + '/' + source_path + '*.csv')
# Get all timestamps from filenames
[filename_to_timestamp(f, sink_path) for f in files] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_file_names(self):\n return glob.glob(os.path.join(self.path, '*.csv'))",
"def get_processed_csv_file_names(directory_path):\n\n\t__supported_extensions = ['.csv', ]\n\n\tprocessed_csv_file_names = list()\n\tlistdir = os.listdir(directory_path)\n\tfor file in listdir:\n\t\tif os.path.splitext(file)[1] in __supported_extensions:\n\t\t\tprocessed_csv_file_names.append(file)\n\n\t# sort so that we always read in a predefined order\n\t# key: smallest file first\n\tprocessed_csv_file_names.sort(key = lambda f: os.path.getsize(os.path.join(directory_path, f)))\n\treturn processed_csv_file_names",
"def get_norm_cols(files):\n col_src = dict()\n norm_cols = list()\n for fp in files:\n df = pd.read_csv(fp, nrows=5)\n for col in df.columns:\n if col in norm_cols:\n col_src[col].append(fp)\n else:\n norm_cols.append(col)\n col_src[col] = [fp]\n # breakpoint()\n return norm_cols",
"def all_columns(self):\r\n try:\r\n csv_file = open(self.file_path,'rbU')\r\n csv_rows = csv.DictReader(csv_file)\r\n _all_columns = csv_rows.fieldnames\r\n csv_file.close()\r\n return _all_columns\r\n except:\r\n return []",
"def get_all_columns_name(input_glob):\n reader = tf.python_io.TableReader(input_glob,\n selected_cols=\"\",\n excluded_cols=\"\",\n slice_id=0,\n slice_count=1,\n num_threads=0,\n capacity=0)\n schemas = reader.get_schema()\n return set([col_name for col_name, _, _ in schemas])",
"def get_file_names():\n all_file_names = []\n cwd = os.getcwd()\n # Change to dir with result files to analyze\n os.chdir(args.dir)\n \n for file in glob.glob(\"*.csv\"):\n all_file_names.append(file)\n\n # Return to current working directory\n os.chdir(cwd)\n return all_file_names",
"def list_csv_files():\n # See README.txt Ref#2.\n return [filename for filename in glob.glob(\"*.csv\")]",
"def get_column_names(path_to_test, key_to_infer):\n with open(path_to_test, 'r') as f:\n list_of_columns = f.readline()[:-1].split(',')\n list_of_columns.remove(key_to_infer[0])\n return list_of_columns",
"def infer_column_cats(dir: \"Path to working directoty.\") -> tuple:\n files = os.listdir(os.path.join(dir, \"data\"))\n cats = set([re.match(pattern=\".*_(.*).csv$\", string=file).group(1) for file in files])\n cols = set([re.match(pattern=\".*_(.*)_.*.csv$\", string=file).group(1) for file in files])\n return cats, cols",
"def get_csv_in_path(self, path):\n files = os.listdir((path))\n return files",
"def get_col_names(fname):\n with open(fname) as f:\n cols = f.readline().strip(\"#\\n\").lower()\n cols = (re.sub(r'\\(\\d+\\)', '', cols)\n .replace('/', '_to_')\n .split())\n return cols",
"def load_multiple_csv(self, path, column):\n df = pd.concat([pd.read_csv(f\"{path}/{f}\") for f in tqdm(os.listdir(f\"{path}/\"))], ignore_index=True)\n return df[column]",
"def task2(config: SimpleNamespace) -> None:\n get_unique_col_names(config.get('source'), config.get(\"sink\"))",
"def get_feature_names(factors_path):\n with open(factors_path) as f:\n col_names = f.readline().split(',')\n col_names[-1] = col_names[-1].strip('\\n')\n # Skip first field if empty (result of to_csv(save_index=True))\n if not col_names[0]:\n return col_names[1:]\n else:\n return col_names",
"def gene_names(filepath, complete=True):\n if complete:\n df_ucsc = pd.read_csv(filepath, sep='\\t', header=None)\n df_ucsc.columns = (\n ['number', 'gene_name', 'locus_link',\n 'ref_seq_num', 'genbank', 'uniprot', 'taxon']\n )\n gene_ucsc = set(\n [str(name).lower() for name in df_ucsc[\"gene_name\"]\n if len(str(name)) >1]\n )\n return gene_ucsc\n else:\n df_syn = pd.read_csv(filepath, sep='\\t', header=None)\n df_syn.columns = ['number', 'gene_name']\n gene_ucsc = set(\n [str(name).lower() for name in df_syn[\"gene_name\"]\n if len(str(name)) >1]\n )\n return gene_ucsc",
"def read_csv(self, path):\n for file in os.listdir(path):\n if file[-4:] == \".csv\":\n name = file[:-4]\n table_index_header = cfg.get_list(\"table_index_header\", name)\n filename = os.path.join(path, file)\n self.input_data[name] = pd.read_csv(\n filename,\n index_col=list(range(int(table_index_header[0]))),\n header=list(range(int(table_index_header[1]))),\n squeeze=(\"series\" not in name),\n )\n self.check_input_data(warning=False)\n self.add_meta_data()\n return self",
"def list_file(csv_directory):\n list_of_files = [os.path.join(dirpath, file_name)\n for dirpath, dirnames, files in os.walk(csv_directory)\n for file_name in fnmatch.filter(files, '*.csv')]\n return list_of_files",
"def read_csv_hash(path):\n dic = get_dtype(path,1000)\n col_names = [i for i in dic]\n dtypes = [dic[i] for i in col_names]\n str_cols = [i for i in col_names if dic[i]=='str'] \n dtypes = ['int32' if i=='str' else i for i in dtypes]\n\n gdf = gd.read_csv(path,names=col_names,dtype=dtypes,skiprows=1)\n return gdf,str_cols",
"def read_names(path):\n return SortedSet([os.path.basename(n) for n in glob.glob(path + os.sep + '*')])",
"def get_column_names(self):\n # here, creating combined column/volue column names for uniqueness\n colname_temp = list()\n for column in self.col_value:\n colname_temp.append(self.question_column + \"-\" + str(column))\n return colname_temp",
"def get_dictionary_from_csv(file):\n csv_file = file[:-4] # avoid .txt extension\n csv_file += \"_dico.csv\"\n dic = pd.read_csv(csv_file, delimiter=',')\n return list(dic.columns)",
"def get_csv_column(file_name, column):\n list = []\n with open('../test_files/' + file_name, 'r') as infile:\n for x in infile.readlines():\n x = x.replace('\\n', '')\n # splitting based on ',' that are encountered in csv files.\n #column-1 because the range start from 0 , so if user enters 1st column then its 0th column we need to fetch\n list.append(x.split(',')[column - 1])\n return list",
"def read_sample_csv(self):\n f = open('sample.csv')\n lines = f.readline()\n fields = lines.split(',')\n fieldnames_lst = [i.strip() for i in fields]\n f.close()\n return fieldnames_lst",
"def get_columns(names: list) -> list:\n\n csv = read_csv(Path(DATA_DIR, \"high_diamond_ranked_10min.csv\"))\n return [csv[i] for i in names]",
"def get_all_columns_from_csv(csvFile):\n dmlst = []\n with open(csvFile, 'r', encoding='iso-8859-3') as fh:\n for dm in fh.readline().split(';'):\n dmlst.append(dm.strip())\n return dmlst",
"def remove_columns(path, columns=None, anonymize = True, make_backups = True):\n\n if columns is None: columns = []\n if anonymize: columns.extend(PERSONAL_INFO_COLUMN_NAMES_LIST)\n files = []\n if os.path.isfile(path):\n files.extend(path)\n elif os.path.isdir(path):\n files.extend([f for f in os.listdir(path) if os.path.isfile(os.path.join(path, f))])\n else:\n raise TypeError(\"remove_columns() requires a file name or directory name\")\n for file_name in files:\n if make_backups:\n shutil.copyfile(file_name, \"_original_file_\" + file_name)\n table_df = pd.read_csv(file_name)\n if isinstance(columns, basestring):\n columns = [columns]\n table_df.drop(labels=columns, axis=1, inplace=True)\n table_df.to_csv(file_name, index=False)\n return files",
"def get_all_csv_files_in_directory(directory):\n return filter(lambda x: x[-4:] == \".csv\", os.listdir(directory))",
"def get_all_metrics(dir):\r\n file_lst = os.listdir(dir)\r\n file_lst = list(filter(lambda x: re.findall(r'\\.csv$',x), file_lst))\r\n return file_lst",
"def get_allbud_urls(filename, columns):\n import pandas as pd\n import os\n path = os.path.join(os.getcwd(), filename)\n\n\n data = pd.read_csv(path)\n\n return data[columns].tolist()",
"def list_files(path=None):\n if path == None:\n return glob.glob('Data/*.csv')\n else:\n return glob.glob(path+'*.csv')"
]
| [
"0.6773052",
"0.6419558",
"0.6371183",
"0.63577497",
"0.6126617",
"0.6117228",
"0.60081875",
"0.58998555",
"0.5802751",
"0.5797968",
"0.5726824",
"0.57106483",
"0.5674198",
"0.56591386",
"0.56252927",
"0.5609181",
"0.56086415",
"0.5538723",
"0.5481517",
"0.54493904",
"0.5416798",
"0.54149044",
"0.53927976",
"0.5390931",
"0.5374692",
"0.53456277",
"0.5337654",
"0.5337508",
"0.5313209",
"0.52985996"
]
| 0.7558615 | 0 |
Retorna o valor do produto com desconto | def desconto(self, porcentagem):
return(self.__valor * (100 - porcentagem)/100) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def descricao_produto(self):\n return self._descricao_produto",
"def precio_descuento(self):\n descount_price = self.html.xpath(self.xpath_descount_price)\n precio_descuento = map(self.limpieza_precio, descount_price)\n return precio_descuento",
"def get_descuentos(self):\n return float(\n self.input.get_text(liquidaciones_historicas_catalog.DESCUENTOS).replace(\".\", \"\").replace(\",\", \".\"))",
"def get_total_descuentos(self):\n return float(self.input.get_text(liquidaciones_historicas_catalog.TOTAL_DESCUENTOS).replace(\".\", \"\"))",
"def descricao(self) -> str:\n return self._descricao",
"def descricao_produto(self, descricao_produto):\n self._descricao_produto = descricao_produto",
"def get_cost_val(self, var_name, pos):\n return self.get_other_value(self.cost, var_name, pos)",
"def descricao_status_cartao(self):\n return self._descricao_status_cartao",
"def atributo_complejidad():\n tipo_defecto = ItemTipos.objects.filter(es_supertipo=True)\n if tipo_defecto.count() > 0:\n attr1 = ItemAtributos.objects.filter(nombre='complejidad').\\\n filter(idtipoitem=tipo_defecto)\n return attr1\n return None",
"def descricao_status_conta(self):\n return self._descricao_status_conta",
"def _get_desc(self):\n return self.__desc",
"def get_valor_total_no_ciclo(self):\n valor = Decimal(self.coagricultor.coagricultor.identificador * 0.01) \\\n .quantize(TWOPLACES) # Centavos do identificador - decimal\n # com 2 casas\n for item in self.itens.all():\n if(item.cesta.coagricultor.ciclo.ativo == True):\n valor = valor + item.produto.valor\n\n return valor",
"def totaliza_nfce(self, valor_desconto_acrescimo, tipo_desconto_acrescimo=\"A$\"):\r\n # Estao sendo feitos apenas acrescimos no subtotal, o desconto\r\n\t# esta sendo no item.\r\n self.dll.aCFTotalizar_NFCe_Daruma.argstypes = [c_char_p * 2]\r\n\r\n status = self.dll.aCFTotalizar_NFCe_Daruma(tipo_desconto_acrescimo,\r\n\t\t\t valor_desconto_acrescimo)\r\n\tif status !=1:\r\n\t if status == -1:\r\n\t\traise Exception(\"-1: Erro encontrado na execucao do metodo.\")\r\n\t elif status == -35:\r\n\t\traise Exception(\"-35: Desconto ou Acrescimo nao pode ser maior que o valor total.\")\r\n\t elif status == -52:\r\n\t\traise Exception(\"-52: Erro ao gravar em arquivo temporario.\")\r\n elif status == -99:\r\n\t\traise Exception(\"-99: Parametros invalidos ou ponteiro nulo.\")\r\n\t elif status == -103:\r\n\t\traise Exception(\"-103: Dll auxiliar nao encontrada.\")\r\n\t elif status == -133:\r\n\t\traise Exception(\"-133: NFCe nao em fase de totalizacao.\")\r\n\t elif status == -120:\r\n\t\traise Exception(\"-120: Encontrada tag invalida.\")\r\n\t elif status == -121:\r\n\t\traise Exception(\"-121: Estrutura invalida.\")\r\n\t elif status == -122:\r\n\t\traise Exception(\"-122: Tag obrigatoria nao foi informada.\")\r\n else:\r\n raise Exception(\"0: Erro ao executar metodo de venda de itens.\")\r\n return {\"status\": status}",
"def valor(self):\n if self._inteiro != None:\n return str(self._inteiro)\n else:\n return \"Sem Valor\"",
"def get_desc(self):\n return self._desc",
"def getDescription(self):\n return self.getDbRecord().getColumnValue(DESCRIPTION_COLUMN)",
"def getDescQualidade(self, local='Itaquera'):\n quality = int(self.soup.find('td', text=local).parent.find('td', width=50).text)\n if quality >= 0 and quality <= 40:\n descript = 'Boa'\n elif quality >= 41 and quality <= 80:\n descript = 'Moderado'\n elif quality >= 81 and quality <= 120:\n descript = 'Ruim'\n elif quality >= 121 and quality <= 200:\n descript = 'Muito Ruim'\n elif quality >= 200:\n descript = 'Pessimo'\n return descript",
"def id_produto(self):\n return self._id_produto",
"def id_produto(self):\n return self._id_produto",
"def descricao_estagio(self):\n return self._descricao_estagio",
"def custo(EstadoRestaUm, resultante):\n return 1",
"def produto(conexao, valores):\n try:\n return '{0}'.format(float(valores[1]) * float(valores[2]))\n except:\n return 'ERRO'",
"def comando_informacao(self):\r\n return self.informacoes_ultima_nfce()",
"def get_cost_change_value(self, var_change_name, pos):\n return self.get_other_value(self.cost_change, var_change_name, pos)",
"def showdeliverycost(self):\n return self.deliverycost",
"def get_cash(self):\r\n return self.cash",
"def get_description(self):\n\t\treturn call_sdk_function('PrlVmCfg_GetDescription', self.handle)",
"def getCostoPromedioCompras(self, codigo):\n return self.conexion.ejecutarSQL(\"select sum(valor_total)/sum(cantidad) from productosXcompras where codigo_producto = '%s'\"%(codigo))[0][0]",
"def get_descent(self) -> pDecimal:\n assert \"FontDescriptor\" in self\n assert \"Descent\" in self[\"FontDescriptor\"]\n return self[\"FontDescriptor\"][\"Descent\"]",
"def get_description(self):\n\t\treturn call_sdk_function('PrlVmDev_GetDescription', self.handle)"
]
| [
"0.7405879",
"0.6725024",
"0.6323255",
"0.60881495",
"0.60817945",
"0.6061298",
"0.5971168",
"0.5968312",
"0.5792806",
"0.57581604",
"0.5727338",
"0.5680671",
"0.56648684",
"0.56408626",
"0.5625328",
"0.55662155",
"0.55641836",
"0.5562378",
"0.5562378",
"0.5552865",
"0.5525161",
"0.5523323",
"0.5523051",
"0.5505454",
"0.5478789",
"0.54766214",
"0.54711324",
"0.5467341",
"0.54585594",
"0.5446185"
]
| 0.70276046 | 1 |
Sets a string designating the mutation type. | def set_mutation_type(self, mut_type=''):
if mut_type:
# specified mutation type
self.mutation_type = mut_type
else:
# interpret mutation type from attributes
if not self.is_valid:
# does not correctly fall into a category
self.mutation_type = 'not valid'
elif self.unknown_effect:
self.mutation_type = 'unknown effect'
elif self.is_missing_info:
self.mutation_type = 'missing'
elif self.is_substitution:
self.mutation_type = 'substitution'
elif self.is_deletion:
self.mutation_type = 'deletion'
elif self.is_insertion:
self.mutation_type = 'insertion'
# check if mutation at splice site
self.__set_splice_mutation() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def type(self, type: str):\n\n self._type = type",
"def type(self, string):\n\n\t\tself._interface.type(string)",
"def data_type_string(self, data_type_string):\n\n self._data_type_string = data_type_string",
"def set_type(self, type):\n self.type = type",
"def set_type(self, type):\n self.type = type",
"def setType(self,newtype):\n\t\tself.type = newtype;",
"def type(self, type):\n allowed_values = [\"I\", \"O\", \"T\"]\n if type not in allowed_values:\n raise ValueError(\n \"Invalid value for `type`, must be one of {0}\"\n .format(allowed_values)\n )\n self._type = type",
"def setString(self, name: unicode, value: unicode) -> None:\n ...",
"def type(self, type):\n self._type = type",
"def type(self, type):\n self._type = type",
"def set_type(self, type):\n self._type = type",
"def experiment_type(self, new_type: str) -> None:\n self._db_data.experiment_type = new_type",
"def _assign_type(self, type):\n if self.is_input:\n return 'data'\n else:\n return type",
"def set_strmem_type(self, *args):\n return _ida_hexrays.vdui_t_set_strmem_type(self, *args)",
"def set_type(self,typ):\n self._typ = typ\n if typ == 'Sine':\n self._type = 7\n elif typ == 'Sawtooth up':\n self._type = 0\n elif typ == 'Sawtooth down':\n self._type = 1\n elif typ == 'Square':\n self._type = 2\n elif typ == 'Triangle':\n self._type = 3\n elif typ == 'Pulse':\n self._type = 4\n elif typ == 'Bipolar pulse':\n self._type = 5\n elif typ == 'Sample and hold':\n self._type = 6\n else:\n print \"Unrecognized type keyword!\"\n print \"Please use only the following keywords:\"\n print \"Choices are :\"\n print \" 0. Saw up\"\n print \" 1. Saw down\"\n print \" 2. Square\"\n print \" 3. Triangle\"\n print \" 4. Pulse\"\n print \" 5. Bipolar pulse\"\n print \" 6. Sample and hold\"\n print \" 7. Modulated Sine\"\n self._type = 7\n super(self.__class__, self).setType(self, self._type):",
"def type(self, type):\n\n self._type = type",
"def type(self, type):\n\n self._type = type",
"def type(self, type):\n\n self._type = type",
"def type(self, type):\n\n self._type = type",
"def type(self, type):\n\n self._type = type",
"def type(self, type):\n\n self._type = type",
"def type(self, type):\n\n self._type = type",
"def type(self, type):\n\n self._type = type",
"def type(self, type):\n\n self._type = type",
"def type(self, type):\n\n self._type = type",
"def type(self, type):\n\n self._type = type",
"def type(self, type):\n\n self._type = type",
"def type(self, type):\n\n self._type = type",
"def type(self, type):\n\n self._type = type",
"def type(self, type):\n\n self._type = type"
]
| [
"0.6742497",
"0.664363",
"0.6386889",
"0.6342491",
"0.6342491",
"0.6250034",
"0.6208556",
"0.62068844",
"0.61978424",
"0.61978424",
"0.6162762",
"0.6158626",
"0.60422486",
"0.60403293",
"0.6031799",
"0.6026561",
"0.6026561",
"0.6026561",
"0.6026561",
"0.6026561",
"0.6026561",
"0.6026561",
"0.6026561",
"0.6026561",
"0.6026561",
"0.6026561",
"0.6026561",
"0.6026561",
"0.6026561",
"0.6026561"
]
| 0.78382325 | 0 |
Set the is_splicing_mutation flag | def __set_splice_mutation(self):
#len5ss = 6 # positive number since 5SS
#len3ss = -20 # use negative syntax like HGVS
if type(self.intron_pos) == int:
# SNV case, only one position
if self.len3ss <= self.intron_pos <= self.len5ss:
self.is_splicing_mutation = True
else:
self.is_splicing_mutation = False
elif type(self.intron_pos) == list:
# deletion case, now have a window to check overlap
if self.intron_pos[0]:
first_in_splice = self.len3ss <= self.intron_pos[0] <= self.len5ss
tmp_pos1 = self.intron_pos[0]
else:
first_in_splice = False
tmp_pos1 = 0
if self.intron_pos[1]:
second_in_splice = self.len3ss <= self.intron_pos[1] <= self.len5ss
tmp_pos2 = self.intron_pos[1]
else:
second_in_splice = False
tmp_pos2 = 0
# set splice site mutation flag
if first_in_splice or second_in_splice:
self.is_splicing_mutation = True
elif (tmp_pos1 == 0 and tmp_pos2 > self.len5ss) or (tmp_pos1 < self.len3ss and tmp_pos2 == 0):
self.is_splicing_mutation = True
else:
self.is_splicing_mutation = False
else:
self.is_splicing_mutation = False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_seen_op(self, boolean):\n\n self.seen_op = boolean",
"def toggle_scattering(self, setting=1):\n if setting not in [0, 1, \"on\", \"off\"]:\n raise ValueError(\n \"The input for the toggle the us of scattering \"\n 'in the model must \"on\" (1) or \"off\" (0)'\n )\n self.use_scat = 1 if setting == \"on\" else 0 if setting == \"off\" else setting",
"def local_inplace_setsubtensor(node):\r\n if isinstance(node.op, IncSubtensor) and not node.op.inplace:\r\n new_op = node.op.__class__(\r\n node.op.idx_list, inplace=True,\r\n set_instead_of_inc=node.op.set_instead_of_inc,\r\n destroyhandler_tolerate_aliased=node.op.destroyhandler_tolerate_aliased)\r\n new_node = new_op(*node.inputs)\r\n return [new_node]\r\n return False",
"def test_mutation(self):\n genotype = '0|0|2|0|0|2|0|0 1|0|0|1|1|0|0|0 0|1|0|0|0|0|2|1--1 7'\n search_space = {'dil_conv_3x3', 'dil_conv_5x5', 'dil_conv_7x7',\n 'skip_connect', 'clinc_3x3', 'clinc_7x7', 'avg_pool_3x3', 'max_pool_3x3'}\n\n mutator = Mutations(search_space, prob_mutation=0.8,\n prob_resize=0.99, prob_swap=0.99)\n mutated_g = mutator(genotype)\n mutated_g = mutator(mutated_g)\n mutated_g = mutator(mutated_g)\n a, s, d = get_conf(mutated_g)\n print('---->', mutated_g)\n self.assertGreaterEqual(10, d)\n self.assertTrue(s in (0, 1))\n a = torch.tensor(a)\n d = int((a.shape[0]*2)**.5)\n start = 0\n for i in range(d):\n end = int((i+1)*(i+2)/2)\n self.assertTrue(a[start:end, :].sum() > 0)\n start = end",
"def bitFlip_mutation(population, **kwargs):\r\n new_pop = []\r\n for indiv in population:\r\n mutation_mask = np.random.random(size=indiv.shape) < kwargs['mutation_prob']\r\n indiv[mutation_mask] = 1 - indiv[mutation_mask]\r\n new_pop.append(indiv.copy())\r\n return new_pop",
"def toggle_dropable(self,new_bool):\n self.dropablee = new_bool",
"def _mutate(self, noise_generator, sigma):\n\n mutation_indexes = torch.distributions.categorical.Categorical(\n torch.tensor([self.mutation_prob, 1 - self.mutation_prob])).sample([self.population_size]) > 0.5\n\n noise = noise_generator.sample([self.population_size, len(self.population[0])]).squeeze(-1)\n self.population[mutation_indexes] += noise[mutation_indexes] * sigma",
"def test_mutation2(self):\n genotype = '0|0|2|0|0|2|0|0 1|0|0|1|1|0|0|0 0|1|0|0|0|0|2|1--1 7'\n search_space = {'dil_conv_3x3', 'dil_conv_5x5', 'dil_conv_7x7',\n 'skip_connect', 'clinc_3x3', 'clinc_7x7', 'avg_pool_3x3', 'max_pool_3x3'}\n\n mutator = Mutations(search_space, prob_mutation=0.8,\n prob_resize=0.99, prob_swap=0.99)\n mutated_g = mutator(genotype)\n a, s, d = get_conf(mutated_g)\n mutator.update_strat_good(a)",
"def _get_dscp_mutation(self):\n return self.__dscp_mutation",
"def _set_dscp_mutation(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGListType(\"dscp_mutation_map_name\",dscp_mutation.dscp_mutation, yang_name=\"dscp-mutation\", rest_name=\"dscp-mutation\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='dscp-mutation-map-name', extensions={u'tailf-common': {u'info': u'Configure DSCP-Mutation map', u'cli-no-key-completion': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'dscp_mutation', u'cli-mode-name': u'dscp-mutation-$(dscp-mutation-map-name)'}}), is_container='list', yang_name=\"dscp-mutation\", rest_name=\"dscp-mutation\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure DSCP-Mutation map', u'cli-no-key-completion': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'dscp_mutation', u'cli-mode-name': u'dscp-mutation-$(dscp-mutation-map-name)'}}, namespace='urn:brocade.com:mgmt:brocade-qos-mls', defining_module='brocade-qos-mls', yang_type='list', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"dscp_mutation must be of a type compatible with list\"\"\",\n 'defined-type': \"list\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGListType(\"dscp_mutation_map_name\",dscp_mutation.dscp_mutation, yang_name=\"dscp-mutation\", rest_name=\"dscp-mutation\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='dscp-mutation-map-name', extensions={u'tailf-common': {u'info': u'Configure DSCP-Mutation map', u'cli-no-key-completion': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'dscp_mutation', u'cli-mode-name': u'dscp-mutation-$(dscp-mutation-map-name)'}}), is_container='list', yang_name=\"dscp-mutation\", rest_name=\"dscp-mutation\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure DSCP-Mutation map', u'cli-no-key-completion': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'dscp_mutation', u'cli-mode-name': u'dscp-mutation-$(dscp-mutation-map-name)'}}, namespace='urn:brocade.com:mgmt:brocade-qos-mls', defining_module='brocade-qos-mls', yang_type='list', is_config=True)\"\"\",\n })\n\n self.__dscp_mutation = t\n if hasattr(self, '_set'):\n self._set()",
"def _apply_mutation(pop, op, pb):\n for i in range(len(pop)):\n if random.random() < pb:\n pop[i], = op(pop[i])\n del pop[i].fitness.values\n return pop",
"def shuffle_opacities(mutated_genome):\n mutated_genome",
"def bit_flip_mutation(random, candidate, args):\r\n rate = args.setdefault('mutation_rate', 0.1)\r\n mutant = copy.copy(candidate)\r\n if len(mutant) == len([x for x in mutant if x in [0, 1]]):\r\n for i, m in enumerate(mutant):\r\n if random.random() < rate:\r\n mutant[i] = (m + 1) % 2\r\n return mutant",
"def swish_(t: Tensor) -> Tensor:\n ctx = get_current_context()\n g = ctx.graph\n pb_g = g._pb_graph\n\n check_in_graph(g, t=t)\n\n settings = ctx._get_op_settings(\"swish_inplace\")\n op = pb_g.createConnectedOp_SwishInplaceOp(\n {0: t.id}, \n {\n 0: g._create_tensor_id(\"swish_inplace_out\")\n }, \n settings\n )\n\n return Tensor._from_pb_tensor(op.outTensor(0))",
"def set_mutation_type(self, mut_type=''):\n if mut_type:\n # specified mutation type\n self.mutation_type = mut_type\n else:\n # interpret mutation type from attributes\n if not self.is_valid:\n # does not correctly fall into a category\n self.mutation_type = 'not valid'\n elif self.unknown_effect:\n self.mutation_type = 'unknown effect'\n elif self.is_missing_info:\n self.mutation_type = 'missing'\n elif self.is_substitution:\n self.mutation_type = 'substitution'\n elif self.is_deletion:\n self.mutation_type = 'deletion'\n elif self.is_insertion:\n self.mutation_type = 'insertion'\n\n # check if mutation at splice site\n self.__set_splice_mutation()",
"def set_mutation(self, mrate):\n if(mrate=='gene'):\n try: del self.mutation_rate #remove local mrates and use gene classes mrate\n except AttributeError: pass\n elif(mrate=='adapt'):\n self.mutation_rate = prng.uniform(self.mr_bounds[0], self.mr_bounds[1])\n else:\n self.__class__.mutation_rate = mrate",
"def permute_sentence(sentence, permutation_set):\n pass",
"def inversion_mutation(random, candidate, args):\r\n rate = args.setdefault('mutation_rate', 0.1)\r\n if random.random() < rate:\r\n size = len(candidate)\r\n p = random.randint(0, size-1)\r\n q = random.randint(0, size-1)\r\n p, q = min(p, q), max(p, q)\r\n s = candidate[p:q+1]\r\n return candidate[:p] + s[::-1] + candidate[q+1:]\r\n else:\r\n return candidate",
"def sub_inplace(a, b):",
"def change_contig(self, state):\n if state == Qt.Checked:\n self.layer.contiguous = True\n else:\n self.layer.contiguous = False",
"def toggle(self, env, pos):\n return False",
"def permute(self):\n raise NotImplementedError()",
"def is_mutation_finite(self):\n return self._info['mutation_finite']",
"def int_flip_mutation(individual):\n for i in range(len(individual.genome)):\n if random.random() < MUTATION_PROBABILITY:\n individual.genome[i] = random.randint(0, CODON_SIZE)\n return individual",
"def listDuplicate(self,permutations=True):\n ind,ok = self.testDuplicate(permutations)\n return ind[~ok]",
"def _mutate(self,arr,p_mut):\n mut = np.random.random_sample(arr.shape)<p_mut\n no_mut = ~mut\n mut_val = np.random.uniform(low=self.minval,high=self.maxval,size=arr.shape)\n return (no_mut*arr) + (mut*mut_val)",
"def test_random_valid_mutation_without_shrink(self):\n\n applied_mutation = defaultdict(int)\n N = self._min_trials(n_mutations=3)\n\n for i in range(N):\n ind = self.individuals[self.ind_strings[2]]\n ind_clone = self.gama._toolbox.clone(ind)\n new_ind, = mut_replace_primitive(ind_clone, self.gama._pset)\n if self._mutInsert_is_applied(ind, new_ind)[0]:\n applied_mutation['insert'] += 1\n elif self._mut_replace_terminal_is_applied(ind, new_ind)[0]:\n applied_mutation['terminal'] += 1\n elif self._mut_replace_primitive_is_applied(ind, new_ind)[0]:\n applied_mutation['primitive'] += 1\n else:\n self.fail(\"No mutation (or one that is unaccounted for) is applied.\")\n\n self.assertTrue(all([n > 0 for (mut, n) in applied_mutation.items()]))",
"def _op_inplace(self, op: str, other: t.Any) -> te.Self:\n if hasattr(self.__members__, op):\n if isinstance(other, InspectableSet):\n other = other.__members__\n if getattr(self.__members__, op)(other) is NotImplemented:\n return NotImplemented\n return self\n return NotImplemented",
"def scramble_mutate(\n self, crossover_pop_dict, test=False, mutation_prob={}\n ):\n\n print('Performing mutations')\n\n # Initialises dictionary of mutated child networks\n mutated_pop_dict = OrderedDict()\n\n # Scrambles the amino acid identities of randomly selected nodes\n for network_num in list(crossover_pop_dict.keys()):\n G = copy.deepcopy(crossover_pop_dict[network_num])\n\n scrambled_nodes = []\n aa_ids = []\n for node in list(G.nodes):\n if G.nodes()[node]['type'] == 'loop':\n continue\n\n if test is False:\n random_number = random.uniform(0, 1)\n else:\n random_number = mutation_prob[network_num][node]\n if random_number <= self.mutation_prob:\n scrambled_nodes.append(node)\n aa_ids.append(G.nodes()[node]['aa_id'])\n\n if test is False:\n random.shuffle(aa_ids)\n else:\n aa_ids = aa_ids[::-1]\n attributes = OrderedDict({\n node: {'aa_id': aa_id} for node, aa_id in zip(scrambled_nodes, aa_ids)\n })\n nx.set_node_attributes(G, values=attributes)\n\n mutated_pop_dict[network_num] = G\n\n return mutated_pop_dict",
"def __set_nucleotide_mutation(self, hgvs_str):\n self.__set_substitution_status(hgvs_str)\n self.__set_indel_status(hgvs_str)"
]
| [
"0.5286722",
"0.52487475",
"0.50532234",
"0.49155635",
"0.48051128",
"0.4798468",
"0.4796511",
"0.47922078",
"0.47871938",
"0.4733797",
"0.4712082",
"0.4694651",
"0.46598366",
"0.46041948",
"0.45968363",
"0.45936152",
"0.45646825",
"0.454141",
"0.45339727",
"0.45288524",
"0.45173258",
"0.44913304",
"0.44766074",
"0.44678918",
"0.44677728",
"0.4437675",
"0.4427822",
"0.44196716",
"0.4409661",
"0.44070306"
]
| 0.7626875 | 0 |
Sets a flag for unkown effect (c.? or ?). | def __set_unknown_effect(self, hgvs_str):
unknown_effect_list = ['c.?', '?']
if hgvs_str.lower() in unknown_effect_list:
self.unknown_effect = True
elif hgvs_str.startswith("("):
self.unknown_effect = True
else:
self.unknown_effect = False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_fluorescence(self, flag):\n flag_ = c.c_int(flag)\n logger.debug('StSetFluorFlg(%i)', flag)\n self._lib.StSetFluorFlg(flag_)",
"def take_action_on_flags(self, *args, **kwargs):\r\n pass",
"def setFlag(self, flag, value) -> None:\n ...",
"def flag(self, reason):\r\n self._flagged = True\r\n self._flagged_reason = reason",
"def flag():\n pass",
"def set_flag(self, flag_name, value):\n flags = {'C':0, # Carry\n 'Z':1, # Zero\n 'I':2, # Interrupt mask\n 'D':3, # Decimal\n 'B':4, # Break\n 'V':6, # Overflow\n 'N':7} # Negative\n\n flag_reg = self.get_register('P')\n if value == 1:\n new_flag = flag_reg | 1 << flags[flag_name]\n else:\n new_flag = flag_reg & ~(1 << flags[flag_name])\n\n self.set_register('P', new_flag)",
"def set_force(state):\n global _FORCE\n _FORCE = bool(state)",
"def test_setFlagsSilently(self):\n self._flagsSilentlyTest('setFlags', b'FLAGS.SILENT')",
"def set_sensitive(self, flag):\n\t\tself._edit.set_sensitive(flag)",
"def set_flag(self, new):\n self.flag = new",
"def flag(self, i, j):\n # Does not allow starting a game with a flag\n if not self.is_game_over and self.is_initialized:\n if not self.revealed[i, j]:\n self.flags[i, j] = not self.flags[i, j]\n self.flags_pts.set_data(*np.where(self.flags)[::-1])\n self.title_txt.set_text('{}/{}'.format(np.count_nonzero(self.flags), self.n_mines))\n self.refresh_canvas()",
"def setDiscardFlag(self, flag, on=True):\r\n if on:\r\n self.__data.discardFlags |= flag\r\n else:\r\n self.__data.discardFlags &= ~flag",
"def set_mask_good(self, _=None):\n self.set_mask_type(\"good\")",
"def event11510537():\n header(11510537, 0)\n npc, start_flag, end_flag, new_flag = define_args('iiii')\n if_event_flag_on(1, 1493) # Still stuck at the archers.\n if_event_flag_on(1, EVENT.DarkAnorLondo)\n if_condition_true(0, 1)\n flag.disable_chunk(start_flag, end_flag)\n flag.enable(new_flag)\n chr.disable(npc)",
"def test_setFlagsSilentlyWithUnsolicitedData(self):\n self._flagsSilentlyWithUnsolicitedDataTest('setFlags', b'FLAGS.SILENT')",
"def secondary_effect(self, saved, caster, spell_effect):\n if saved:\n # print(\"Shoke off effect\")\n self.cleanup_effect(caster, spell_effect)\n else:\n pass",
"def flag_change(self, flag):\n\n if flag == 0:\n self.setText(\"全部暂停\")\n else:\n self.setText(\"全部继续\")\n self.flag = flag",
"def setFast(self, *args):\n return _libsbml.Reaction_setFast(self, *args)",
"def flag_set(self, flag):\n if self.flags & flag != 0:\n return True\n else:\n return False",
"def set_food_dependent_flags(self):\n # check if self.food <= self.population\n # change self.hungry\n pass",
"def event2600():\n header(2600)\n rune_effect, rune_flag = define_args('ii')\n\n if_player_has_special_effect(0, rune_effect)\n flag.disable_chunk(EVENT.SableRuneActive, EVENT.RhythmRuneActive)\n flag.enable(rune_flag)\n if_player_does_not_have_special_effect(0, rune_effect)\n restart()",
"def setFlag(flagbyte, pos, status):\n if status:\n return flagbyte | 2**pos\n else:\n return flagbyte & ~2**pos",
"def set_mark(self, perception: Perception) -> None:\n if self.mark.set_mark_using_condition(self.condition, perception):\n self.ee = False",
"def event_m20_11_x71(z52=_, z53=96960000):\n \"\"\"State 0,1: Cancel special effects\"\"\"\n ClearEnemySpEffect(z52, z53)\n \"\"\"State 2: End state\"\"\"\n return 0",
"def light(self, value: bool | int, /) -> None:",
"def eflags_set(self, bit: int, value: bool) -> None:\n if self.eflags_get(bit):\n if not value:\n self.eflags &= ~(1 << bit)\n else:\n if value:\n self.eflags |= 1 << bit",
"def on_off_action(speaker, action, args, soco_function, use_local_speaker_list):\n if action == \"group_mute\":\n speaker = speaker.group\n soco_function = \"mute\"\n np = len(args)\n if np == 0:\n state = \"on\" if getattr(speaker, soco_function) else \"off\"\n print(state)\n elif np == 1:\n arg = args[0].lower()\n if arg == \"on\":\n setattr(speaker, soco_function, True)\n elif arg == \"off\":\n setattr(speaker, soco_function, False)\n else:\n parameter_type_error(action, \"on|off\")\n return False\n return True",
"def set_danger(x, y, char):\n danger = False\n if char not in \".lph\":\n danger = True\n danger_buffer[y * 16 + x] = danger",
"def updateCurrentBehavior(self, gameState, action):\n self.behavior = \"attack\"",
"def setNoUglyHyphenation(self, value):\n self.setBooleanOption(4, value)"
]
| [
"0.6180999",
"0.60227305",
"0.59633523",
"0.58997005",
"0.5819151",
"0.5815705",
"0.580962",
"0.57771164",
"0.5773074",
"0.5726969",
"0.56456053",
"0.56246287",
"0.5571652",
"0.5487456",
"0.54704344",
"0.54658586",
"0.54554933",
"0.5448815",
"0.5436461",
"0.5383572",
"0.53551316",
"0.5338195",
"0.5321642",
"0.53216",
"0.5319412",
"0.5312538",
"0.5272532",
"0.52673787",
"0.5261849",
"0.5213982"
]
| 0.6313566 | 0 |
Sets a flag for missing data (? in HGVS syntax). | def __set_missing_info(self, hgvs_str):
if '?' in hgvs_str:
self.is_missing_info = True
else:
self.is_missing_info = False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def missing(self, value):\n self.MISSING = value",
"def no_data_value(self, no_data_value):\n\n self._no_data_value = no_data_value",
"def missing_information(self, info, field):\n raise NoData",
"def correct_miss_fill(ds):\n for d in ds.data_vars:\n try:\n ds[d].attrs.update({'missing_value': ds[d]._FillValue})\n except:\n pass\n return xr.decode_cf(ds)",
"def missing_values():\n print('Missings in the train data:', train_data.isnull().sum())",
"def data_missing(data):\n return type(data)._from_sequence([None, data[0]])",
"def isUndefinedData(program: ghidra.program.model.listing.Program, addr: ghidra.program.model.address.Address) -> bool:\n ...",
"def test_invalid_flag_record(self):\n log.info(\"START INTEG TEST INVALID\")\n\n self.clear_sample_data()\n self.clear_async_data()\n\n # Made-up data with all flags except the first set to True.\n # First flag is not a zero or one.\n filename = \"A1000003.DEC\"\n self.create_sample_data('invalid_A0000003.DEC', filename)\n\n # Start sampling.\n self.driver.start_sampling()\n\n # an event catches the sample exception\n self.assert_event('ResourceAgentErrorEvent')\n\n # Verify that the entire file has been read.\n self.assert_file_ingested(filename)\n log.info(\"END INTEG TEST INVALID\")",
"def _is_missing(self, item):\n pass",
"def setup_no_data_values(input_dataset, options):\n in_nodata = []\n if options.srcnodata:\n nds = list(map(float, options.srcnodata.split(',')))\n if len(nds) < input_dataset.RasterCount:\n in_nodata = (nds * input_dataset.RasterCount)[:input_dataset.RasterCount]\n else:\n in_nodata = nds\n else:\n for i in range(1, input_dataset.RasterCount+1):\n raster_no_data = input_dataset.GetRasterBand(i).GetNoDataValue()\n if raster_no_data is not None:\n in_nodata.append(raster_no_data)\n\n if options.verbose:\n print(\"NODATA: %s\" % in_nodata)\n\n return in_nodata",
"def handel_missing_values(dataset, missing_values_header, missing_label):\n \n return dataset[dataset[missing_values_header] != missing_label]",
"def AddMissingPresentValue(self, point: str) -> None:\n self._valid = False\n self._missing_present_values.append(point)",
"def handle_missing_values(dataset, missing_values_header, missing_label):\n\n return dataset[dataset[missing_values_header] != missing_label]",
"def set_missings(self, var, missing_map='default', hide_on_y=True,\n ignore=None):\n var = self.unroll(var)\n ignore = self.unroll(ignore, both='all')\n if not missing_map:\n for v in var:\n if 'missings' in self._meta['columns'][v]:\n del self._meta['columns'][v]['missings']\n elif missing_map == 'default':\n self._set_default_missings(ignore)\n else:\n if isinstance(missing_map, list):\n m_map = {'exclude': missing_map}\n else:\n m_map = org_copy.deepcopy(missing_map)\n for v in var:\n if v in ignore: continue\n v_m_map = self._clean_missing_map(v, m_map)\n if self._has_missings(v):\n self._meta['columns'][v].update({'missings': v_m_map})\n else:\n self._meta['columns'][v]['missings'] = v_m_map\n if hide_on_y:\n self.hiding(var, missing_map, 'y', True)\n\n return None",
"def test_invalid_flag_record(self):\n log.info(\"START QUAL TEST INVALID FLAG RECORD\")\n\n # Made-up data with all flags except the first set to True.\n # First flag is not a zero or one.\n self.clear_sample_data()\n self.event_subscribers.clear_events()\n self.assert_initialize()\n self.create_sample_data('invalid_A0000003.DEC', \"A1000003.DEC\")\n\n # Verify an event was raised and we are in our retry state.\n self.verify_queue_empty()\n self.assert_event_received(ResourceAgentErrorEvent, 10)\n self.assert_state_change(ResourceAgentState.STREAMING, 10)\n\n log.info(\"END QUAL TEST INVALID FLAG RECORD\")",
"def no_data_enabled(self, no_data_enabled):\n\n self._no_data_enabled = no_data_enabled",
"def a_flag(self):\n if self.datalogflag:\n self.datalog = DEFAULT_DATALOG_D3S",
"def d_flag(self):\n if self.datalog:\n self.datalogflag = True",
"def d_flag(self):\n if self.datalog:\n self.datalogflag = True",
"def _clean(self, dataset):\n # Replace missing values with numpy's NaN. The missing value is\n # usually 1e+20, but values can be like 1.0000002e+20, which is\n # different. Ergo the inequality.\n for var in dataset.data_vars.itervalues():\n if 'missing_value' in var.attrs:\n missing_data_value = var.missing_value\n try:\n var.values[var.values >= missing_data_value] = np.NaN\n except ValueError:\n print \"Encountered ValueError in {0}. Ignoring\".format(var.name)",
"def __missing__(self, key):\n global MISSING\n MISSING = key # For debugging - save name of missing key\n return INVALID",
"def missing(self):\n return self.MISSING",
"def fill_missing(self) -> None:\n\n self.fill_missing_rows()\n self.fill_missing_source_parameters()\n return",
"def data_missing() -> ExtensionArray:\n data_matrix = np.arange(\n 2 * 10 * 10 * 3,\n dtype=np.float_,\n ).reshape(2, 10, 10, 3)\n data_matrix[0, ...] = np.NaN\n grid_points = [\n np.arange(10),\n np.arange(10) / 10,\n ]\n\n return skfda.FDataGrid(data_matrix, grid_points=grid_points)",
"def missing_in_gn(self):\n return self._missing_gn_flags",
"def AddMissingPoint(self, point: str) -> None:\n self._valid = False\n self._missing_points.append(point)",
"def testPluginNoError(self):\n schema = self.dataset.makeMinimalSchema()\n task = lsst.meas.base.SingleFrameMeasurementTask(schema=schema, config=self.config)\n exposure, cat = self.dataset.realize(noise=100.0, schema=schema, randomSeed=0)\n task.run(cat, exposure)\n source = cat[0]\n self.assertFalse(source.get(self.algName + \"_flag\"))\n self.assertFalse(source.get(self.algName + \"_flag_containsNan\"))\n self.assertFalse(source.get(self.algName + \"_flag_edge\"))",
"def update_no_data_values(warped_vrt_dataset, nodata_values, options=None):\n # TODO: gbataille - Seems that I forgot tests there\n if nodata_values != []:\n temp_file = gettempfilename('-gdal2tiles.vrt')\n warped_vrt_dataset.GetDriver().CreateCopy(temp_file, warped_vrt_dataset)\n with open(temp_file, 'r') as f:\n vrt_string = f.read()\n\n vrt_string = add_gdal_warp_options_to_string(\n vrt_string, {\"INIT_DEST\": \"NO_DATA\", \"UNIFIED_SRC_NODATA\": \"YES\"})\n\n# TODO: gbataille - check the need for this replacement. Seems to work without\n# # replace BandMapping tag for NODATA bands....\n# for i in range(len(nodata_values)):\n# s = s.replace(\n# '<BandMapping src=\"%i\" dst=\"%i\"/>' % ((i+1), (i+1)),\n# \"\"\"\n# <BandMapping src=\"%i\" dst=\"%i\">\n# <SrcNoDataReal>%i</SrcNoDataReal>\n# <SrcNoDataImag>0</SrcNoDataImag>\n# <DstNoDataReal>%i</DstNoDataReal>\n# <DstNoDataImag>0</DstNoDataImag>\n# </BandMapping>\n# \"\"\" % ((i+1), (i+1), nodata_values[i], nodata_values[i]))\n\n # save the corrected VRT\n with open(temp_file, 'w') as f:\n f.write(vrt_string)\n\n corrected_dataset = gdal.Open(temp_file)\n os.unlink(temp_file)\n\n # set NODATA_VALUE metadata\n corrected_dataset.SetMetadataItem(\n 'NODATA_VALUES', ' '.join([str(i) for i in nodata_values]))\n\n if options and options.verbose:\n print(\"Modified warping result saved into 'tiles1.vrt'\")\n # TODO: gbataille - test replacing that with a gdal write of the dataset (more\n # accurately what's used, even if should be the same\n with open(\"tiles1.vrt\", \"w\") as f:\n f.write(vrt_string)\n\n return corrected_dataset",
"def _add_missing_keys(self):\n for k, v in self.defaults.items():\n if k not in self.data:\n self.data[k] = v\n\n self.save()",
"def test_no_update_on_data_element(self):\n no_update = self.admitgen.data.attrib['noupdate']\n self.assertEqual(no_update, '1', 'Incorrect noupdate flag')"
]
| [
"0.6888793",
"0.62723064",
"0.5965886",
"0.5911416",
"0.58497274",
"0.5812564",
"0.58124995",
"0.58106726",
"0.58079594",
"0.58027685",
"0.5800109",
"0.57520694",
"0.5685451",
"0.5645255",
"0.5604394",
"0.5580269",
"0.55797935",
"0.55784595",
"0.55784595",
"0.55740666",
"0.5564525",
"0.5540056",
"0.5513842",
"0.54962945",
"0.5493326",
"0.5491388",
"0.5416409",
"0.5402311",
"0.5398226",
"0.537249"
]
| 0.69292474 | 0 |
Parse the HGVS DNA mutation string to set attributes. Look at tests/test_nucleotide.py for examples on how specific HGVS strings should be parsed. | def __parse_hgvs_syntax(self, hgvs_str):
self.is_valid = True # assume initially the syntax is valid
if self.is_substitution:
sub_pattern = '(?:(\d+)([+-]\d+)?_)?(\d+)([+-]\d+)?([A-Z]+)>([A-Z]+)$'
matches = re.findall(sub_pattern, hgvs_str)
if matches:
init_pos, init_intron, reg_pos, reg_intron, initial, mutated = matches[0]
if not init_pos:
self.pos = int(reg_pos)
self.intron_pos = int(reg_intron) if reg_intron != '' else None
self.initial = initial
self.mutated = mutated
else:
init_pos = init_pos.strip('_') # remove separating underscore
self.pos = [int(init_pos), int(reg_pos)]
intron_tmp1 = int(init_intron) if init_intron != '' else None
intron_tmp2 = int(reg_intron) if reg_intron != '' else None
self.intron_pos = [intron_tmp1, intron_tmp2]
self.initial = initial
self.mutated = mutated
else:
self.is_valid = False
self.intron_pos = None
self.logger.debug('(Parsing-Problem) Invalid DNA Substitution: ' + hgvs_str)
return
elif self.is_deletion:
del_pattern = '(?:([0-9?]+)([-+]\d+)?(?:_))?([0-9?]+)([-+]\d+)?del([A-Z?0-9]+)$'
matches = re.findall(del_pattern, hgvs_str)
if matches:
init_pos, init_intron, reg_pos, reg_intron, del_nuc = matches[0]
if not init_pos:
# only one nucleotide deleted
self.pos = int(reg_pos) if reg_pos != '?' else reg_pos
self.intron_pos = int(reg_intron) if reg_intron != '' else None
self.mutated = ''
self.initial = del_nuc
else:
# more than one nucleotide deleted
init_pos = init_pos.strip('_') # remove '_' because of regex
pos1 = int(init_pos) if init_pos != '?' else init_pos
pos2 = int(reg_pos) if reg_pos != '?' else reg_pos
self.pos = [pos1, pos2]
intron_tmp1 = int(init_intron) if init_intron != '' else None
intron_tmp2 = int(reg_intron) if reg_intron != '' else None
self.intron_pos = [intron_tmp1, intron_tmp2]
self.mutated = ''
self.initial = del_nuc
else:
self.intron_pos = False
elif self.is_insertion:
ins_pattern = '(?:([0-9?]+)([-+]\d+)?(?:_))?([0-9?]+)([-+]\d+)?ins([A-Z?0-9]+)$'
matches = re.findall(ins_pattern, hgvs_str)
if matches:
init_pos, init_intron, reg_pos, reg_intron, ins_nuc = matches[0]
if not init_pos:
# only one nucleotide inserted
self.pos = int(reg_pos) if reg_pos != '?' else reg_pos
self.intron_pos = int(reg_intron) if reg_intron != '' else None
self.initial = ''
self.mutated = ins_nuc
else:
# more than one nucleotide inserted
init_pos = init_pos.strip('_') # remove '_' because of regex
pos1 = int(init_pos) if init_pos != '?' else init_pos
pos2 = int(reg_pos) if reg_pos != '?' else reg_pos
self.pos = [pos1, pos2]
intron_tmp1 = int(init_intron) if init_intron != '' else None
intron_tmp2 = int(reg_intron) if reg_intron != '' else None
self.intron_pos = [intron_tmp1, intron_tmp2]
self.initial = ''
self.mutated = ins_nuc
else:
self.intron_pos = None
elif self.unknown_effect:
# unknown effect for mutation. usually denoted as c.?
self.intron_pos = None
return
else:
# mutation did not fall into any of the categories. thus it likely
# has invalid syntax
self.is_valid = False
self.intron_pos = None
self.logger.debug('(Parsing-Problem) Invalid HGVS DNA syntax: ' + hgvs_str)
return | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __set_nucleotide_mutation(self, hgvs_str):\n self.__set_substitution_status(hgvs_str)\n self.__set_indel_status(hgvs_str)",
"def _parseAttributeString(self, line):\n attribute, value = line.partition(' ')[::2]\n self._setAttribute(attribute, value)",
"def format_hgvs_mutation(mutation_refseq_aa):\n if pd.isnull(mutation_refseq_aa):\n return (np.nan, np.nan, np.nan)\n refseq_base_id = mutation_refseq_aa.split(':')[0].split('.')[0]\n refseq_mutation = mutation_refseq_aa.split(':')[-1].lstrip('p.')\n refseq_mutation_pos = int(refseq_mutation[1:-1])\n return refseq_base_id, refseq_mutation, refseq_mutation_pos",
"def parseString(self, s):\n\n t0 = time.time()\n lines = self.getline(s)\n lineno = 0\n for l in lines:\n lineno += 1\n logging.log(10, \"raw line %05d: %s\" % (lineno, l))\n if len(l) == 0 or l[0] == '#':\n continue\n \n if l.startswith('typedef'):\n lidx = self.parseTypedef(l, lines)\n lineno += lidx\n else:\n # Not a typedef -- see if the 1st token matches a known\n # structure name. If not, create a new variable.\n sidx = l.find(' ')\n if sidx > 0:\n name = l[0:sidx]\n struct = self.structs.get(name.upper(), None)\n if struct:\n struct.parseOne(l)\n else:\n v = YPFVar(l, debug=0)\n if v.name in self.vars:\n newValue = v.value\n oldValue = self.vars[v.name].value\n if newValue != oldValue:\n print(\"Variable %s is being defined with a new value, overwriting it. old=%s, new=%s\" \n % (v.name, oldValue, newValue))\n self.vars[v.name] = v",
"def _parse_tags(self):\n tokens = self.tags_str[1:].split(\";\")\n self._tags = {\n k.strip(): v\n for token in tokens\n for k, v in [token.split(\"=\")]\n }",
"def parse(self):\n\n special_vars = {'amplification', 'copy number loss', \n 'epigenetic silencing', 'overexpression'}\n\n special_terms = ['dna binding domain', 'egfrv', 'truncating mutation',\n 'fusion', 'mutation', 'deletion', 'duplication', 'insertion',\n 'hypermethylation']\n\n var = self.var.lower()\n\n # Check if the stop sign '*' in the variation\n if '*' in var:\n self.stop_sign = True\n \n # Type \"exact match with special pre-difined variations\"\n if var in special_vars:\n self.type = var\n return\n \n # Type \"with special term\"\n for term in special_terms:\n if term in var:\n self.type = term\n return\n\n # Type \"point\": A123B or A123* or A123\n if re.match('^[a-z][0-9]+[a-z|*]?$', var):\n split = re.split('[0-9]+', var)\n self.type = 'point'\n self.start_amino = split[0]\n self.end_amino = split[1]\n s = re.search('[0-9]+', var)\n self.pos = int(s.group())\n return\n\n # Type \"del/ins/trunc/splice/dup/fs\": A123del or A123_B234del\n for suffix in ['del', 'ins', 'trunc', 'splice', 'dup', 'fs']:\n if suffix in var:\n self.type = self.alias_dict.get(suffix, suffix)\n self._parse_suffix(var, suffix)\n return\n\n print('[INFO] variation cannot be parsed: %s' % self.var)",
"def parseString(self, s):\n pass",
"def parse_mutations_uniprot_data(gff_data, start='start', stop='end', mut_types_to_skip=None):\n if mut_types_to_skip is None:\n mut_types_to_skip = [\n 'Chain', # This is the whole protein\n 'Region', # Those are better described in pfam database\n ]\n\n if 'Chain' not in mut_types_to_skip:\n mut_types_to_skip.append('Chain')\n\n # Selects the various mutations types in the dataset, except types contained in the above list\n mut_types = gff_data['mut'].loc[~gff_data['mut'].isin(mut_types_to_skip)].value_counts().index\n\n x = np.array([]).astype('str')\n y = np.array([]).astype('str')\n mutationgroups = np.array([]).astype('str')\n\n for mut_type in mut_types:\n\n # Selects the start and end protein coordinates of the mutation\n data_coord = gff_data[gff_data.mut == mut_type][[start, stop]]\n\n # Sort between the single and multi-site coordinates\n single_sites = data_coord.loc[data_coord[start] == data_coord[stop]]\n multi_sites = data_coord.loc[data_coord[start] != data_coord[stop]]\n\n # Joins the start and end coordinates into one string\n multi_sites['sep'] = \"-\"\n multi_sites[start] = \\\n multi_sites[start].map(str) \\\n + multi_sites['sep'] \\\n + multi_sites[stop].map(str)\n\n # Merge the single and multi-site coordinates in one columns and counts the occurrences\n sorted_data = single_sites[start].append(multi_sites[start]).value_counts()\n n = (len(sorted_data.index))\n\n x = np.append(x, np.array(sorted_data.index).astype('str'))\n y = np.append(y, np.array(sorted_data.values).astype('str'))\n mutationgroups = np.append(mutationgroups, np.repeat(mut_type, n))\n\n formatted_data = dict(\n x=x.tolist(),\n y=y.tolist(),\n mutationGroups=mutationgroups.tolist(),\n domains=[],\n )\n jsonschema.validate(formatted_data, MUT_DATA_SCHEMA)\n return formatted_data",
"def setup_parser(self) -> Dict[str, Any]:\n\n\n # % GALAT - SPP Single Point Positioning\n # % -------------------------------------\n # % Processing Option\n # % ------------------\n # % GNSS system(s) : GALILEO\n # % Orbit type : Broadcast - INAV\n # % Solution type : SPP\n # % Frequency : E1\n # % Elevation mask : 5.0 deg\n # % Time interval : 30.0 s\n # % Ionosphere opt : NeQuick-G\n # % Troposhere opt : GMF with GPT\n # % Obs start : 2020/01/04 00:00:00.0 GPST (week 2086 518400.0s)\n # % Obs end : 2020/01/04 23:59:30.0 GPST (week 2086 604770.0s)\n # % Epoch expected : 2880\n # % Epoch have : 2880\n # %\n # % Input file(s) : KOUG00GUF_R_20200040000_01D_30S_MO.rnx\n # % Input file(s) : CNES0030.20L\n # % Input file(s) : CNES0040.20L\n # % Input file(s) : igs14.atx\n # %\n # % RINEX header info\n # % ------------------\n # % Marker : KOUG 97301M402\n # % Receiver T/V/# : SEPT POLARX5TR 5.3.0 17323022503\n # % Antenna T/ /# : LEIAR25.R3 LEIT 10180007\n # % Position XYZ : 3855263.3407 -5049731.9986 563040.4252\n # % Antenna H/E/N : 0.0000 0.0000 0.0000\n self._parse_header()\n\n # ----+----1----+----2----+----3----+----4----+----5----+----6----+----7----+----8----+----9----+----0----+--\n # 2020/01/04 00:00:00 5.098466365 -52.639742999 106.8901 -0.603 -0.821 -0.349 1.018 0.349 \n # 2020/01/04 00:00:30 5.098466094 -52.639742684 107.4962 -0.633 -0.856 0.257 1.065 0.257 \n # 2020/01/04 00:01:00 5.098466030 -52.639740961 107.6125 -0.640 -1.047 0.373 1.228 0.373 \n return dict(\n names=(\n \"yyyymmdd\", \n \"hhmmss\", \n \"latitude\", \n \"longitude\", \n \"height\", \n \"dlatitude\", \n \"dlongitude\", \n \"dheight\",\n \"hpe\",\n \"vpe\",\n \"site_vel_3d\",\n \"pdop\",\n \"num_satellite_available\",\n \"num_satellite_used\",\n ),\n comments=\"%\",\n delimiter=(10, 9, 15, 15, 10, 9, 9, 9, 9, 9, 9, 6, 4, 4),\n dtype=(\"U10\", \"U9\", \"f8\", \"f8\", \"f8\", \"f8\", \"f8\", \"f8\", \"f8\", \"f8\", \"f8\", \"f8\", \"f8\", \"f8\"),\n autostrip=True,\n )",
"def __init__(self, line):\n (self.seqid, \n self.source, \n self.type, \n self.start, \n self.end, \n self.score, \n self.strand, \n self.phase, \n self.attributes_str) = line.strip().split('\\t')\n # preserve attribute order as a list of keys (attributes_order)\n attributes_list = self.attributes_str.split(';')\n self.attributes_order = [attr.split('=')[0] for attr in \n attributes_list]\n # store attribute keys and their values in a dictionary\n self.attributes = {attr.split('=')[0]:attr.split('=')[1] for attr in \n attributes_list}\n # rename the name attribute key to Name so it conforms to the\n # GFF3 specification, where Name is a reserved attribute key\n if 'name' in self.attributes:\n self.attributes['Name'] = self.attributes.pop('name')\n self.attributes_order[self.attributes_order.index('name')] = 'Name'",
"def parse(self,gff3_line):\r\n split_line = gff3_line.strip().split('\\t')\r\n self.seqid = split_line[0]\r\n self.source = split_line[1]\r\n self.type = split_line[2]\r\n self.start = int(split_line[3])\r\n self.end = int(split_line[4])\r\n self.score = split_line[5]\r\n self.strand = split_line[6]\r\n self.phase = split_line[7]\r\n self.attributes.parse(split_line[8])\r\n return self",
"def parse(cls, s):\n raise NotImplementedError",
"def test_parse_string(self):\n bb = parse(antlr4.InputStream(test_file))\n\n assert bb._var == {\"alpha\": 0.3423}\n\n expected = {\"name\": \"fock\", \"options\": {\"num_subsystems\": 1, \"cutoff_dim\": 7, \"shots\": 10}}\n assert bb.target == expected\n\n expected = [\n {\"op\": \"Coherent\", \"args\": [0.3423, np.sqrt(np.pi)], \"kwargs\": {}, \"modes\": [0]},\n {\"op\": \"MeasureFock\", \"args\": [], \"kwargs\": {}, \"modes\": [0]},\n ]\n\n assert bb.operations == expected",
"def test_mutation2(self):\n genotype = '0|0|2|0|0|2|0|0 1|0|0|1|1|0|0|0 0|1|0|0|0|0|2|1--1 7'\n search_space = {'dil_conv_3x3', 'dil_conv_5x5', 'dil_conv_7x7',\n 'skip_connect', 'clinc_3x3', 'clinc_7x7', 'avg_pool_3x3', 'max_pool_3x3'}\n\n mutator = Mutations(search_space, prob_mutation=0.8,\n prob_resize=0.99, prob_swap=0.99)\n mutated_g = mutator(genotype)\n a, s, d = get_conf(mutated_g)\n mutator.update_strat_good(a)",
"def __parse_sample_name(self):\n pattern = '(.*)(P53)(XR|NT)(\\d+)([A-Z]?|Ctr)?.*'\n vals = re.findall(pattern, self.sample_name.replace('_', ''))[0]\n self.cell_type = vals[0]\n self.treatment_type = vals[2]\n self.treatment_time = vals[3]\n if vals[3]:\n self.treatment_repeat = vals[4]",
"def test_parse_hsp(self):\n data = parse_hsp(self.hsp1)\n d = dict(list(zip(HSP_XML_FIELDNAMES, data)))\n self.assertEqual(float(d[\"BIT_SCORE\"]), 1023.46)\n self.assertEqual(float(d[\"SCORE\"]), 2645)\n self.assertEqual(float(d[\"E_VALUE\"]), 0.333)\n self.assertEqual(int(d[\"QUERY_START\"]), 4)\n self.assertEqual(int(d[\"QUERY_END\"]), 18)\n self.assertEqual(int(d[\"SUBJECT_START\"]), 5)\n self.assertEqual(int(d[\"SUBJECT_END\"]), 19)\n self.assertEqual(int(d[\"GAP_OPENINGS\"]), 0)\n self.assertEqual(int(d[\"ALIGNMENT_LENGTH\"]), 14)\n\n self.assertEqual(d[\"QUERY_ALIGN\"], \"ELEPHANTTHISISAHITTIGER\")\n self.assertEqual(d[\"MIDLINE_ALIGN\"], \"ORCA-WHALE\")\n self.assertEqual(d[\"SUBJECT_ALIGN\"], \"SEALSTHIS---HIT--GER\")",
"def parseString(self, val):\n \n if not isinstance(val, str):\n raise Exception('Input must be a string!')\n if len(val) < 9:\n raise Exception( 'ESDT Names must be 9 characters!' )\n self.setType( val[:2] )\n self.setTime( val[2] )\n self.setFrequency( val[3] )\n self.setHRes( val[4] )\n self.setVRes( val[5] )\n self.setGroup( val[6:9] )\n tmp = val.split('.')\n if len(tmp) == 4:\n self.setVersion( *tmp[1:] )",
"def process_from_string(self, string):\n tempval = struct.unpack_from('!IIIIIIIIIII', string)\n\n # Validate header where possible\n if tempval[0] != 0xd00dfeed:\n raise ValueError('DTB Magic Value not found')\n if tempval[5] not in [16, 17]:\n raise ValueError('DTB version is not supported. Must be 16 or 17.')\n\n # Validation okay, set values\n self.magic = tempval[0]\n self.totalsize = tempval[1]\n self.off_dt_struct = tempval[2]\n self.off_dt_strings = tempval[3]\n self.off_mem_rsvmap = tempval[4]\n self.version = tempval[5]\n self.last_comp_version = tempval[6]\n self.boot_cpuid_phys = tempval[7]\n self.size_dt_strings = tempval[8]\n self.size_dt_struct = tempval[9]",
"def __parseVals(self):# -> None\n #Do not load info for samples with missing info\n if \"./.\" in self.sampleString:\n self.missingInfo = True\n return\n \n #Split sample string into various info GT, AD, DP ...\n splitSample = self.sampleString.split(\":\")\n\n if self.atAltSite:\n #Load sample info from the format GT:AD:DP:GQ:PL\n self.GT = splitSample[0] \n try:\n self.DP = int(splitSample[2])\n except:\n self.DP = 0\n self.altReads = int(splitSample[1].split(\",\")[1])\n self.refReads = int(splitSample[1].split(\",\")[0])\n self.otherReads = self.DP - (self.refReads + self.altReads)\n\n if len(splitSample) >= 5:\n #GT:AD:DP:PGT:PID or GT:AD:DP:GQ:PL GT:AD:DP:PGT:PID:PL\n if \"PGT\" in self.format:\n self.PGT = splitSample[3] \n if \"PID\" in self.format:\n self.PID = splitSample[4] \n if \"GQ\" in self.format:\n self.GQ = float(splitSample[3])\n if \"PL\" in self.format:\n self.PL = splitSample[4]\n \n else:#At ref site sample format is GT:DP\n\n self.GT = splitSample[0]\n self.DP = float(splitSample[1])\n\n #All ref sites seem to have no occurance of alt reads NOT SURE YET\n self.refReads = float(self.DP)\n self.altReads = 0\n self.otherReads = 0",
"def parse_mutations(\n source, strict=True, encoding=\"utf8\", base64_metadata=True, table=None\n):\n sep = None\n if strict:\n sep = \"\\t\"\n if table is None:\n table = tables.MutationTable()\n header = source.readline().rstrip(\"\\n\").split(sep)\n site_index = header.index(\"site\")\n node_index = header.index(\"node\")\n try:\n time_index = header.index(\"time\")\n except ValueError:\n time_index = None\n derived_state_index = header.index(\"derived_state\")\n parent_index = None\n parent = NULL\n try:\n parent_index = header.index(\"parent\")\n except ValueError:\n pass\n metadata_index = None\n try:\n metadata_index = header.index(\"metadata\")\n except ValueError:\n pass\n for line in source:\n tokens = line.rstrip(\"\\n\").split(sep)\n if len(tokens) >= 3:\n site = int(tokens[site_index])\n node = int(tokens[node_index])\n if time_index is None or tokens[time_index] == tskit.TIME_UNITS_UNKNOWN:\n time = UNKNOWN_TIME\n else:\n time = float(tokens[time_index])\n derived_state = tokens[derived_state_index]\n if parent_index is not None:\n parent = int(tokens[parent_index])\n metadata = b\"\"\n if metadata_index is not None and metadata_index < len(tokens):\n metadata = tokens[metadata_index].encode(encoding)\n if base64_metadata:\n metadata = base64.b64decode(metadata)\n table.add_row(\n site=site,\n node=node,\n time=time,\n derived_state=derived_state,\n parent=parent,\n metadata=metadata,\n )\n return table",
"def parse_string(str_arr):\n def to_arr(str_arr):\n \"\"\" Switch to list. \"\"\"\n row = str_arr.replace(']', '').\\\n replace('[', '').\\\n replace('{', '').\\\n replace('}', '').\\\n replace('\\n', '').split()\n\n if '+-' in row:\n row = kludge_gvars(row)\n row = [gv.gvar(str(elt)) for elt in row]\n return np.array(row)\n\n def kludge_gvars(mangled):\n \"\"\"\n Occasionally, gvars get rendered to strings as, e.g.,\n -4e-06 +- 1 instead of -0.000006(1.0). This makes a\n complete mess of trying to parse the a list of gvar\n which has been turned into a string, e.g.,\n '[1(2) 1 +- 2 0.003(2)]', since the usual str.split()\n separates '1 +- 2' --> ['1','+-','2']. This function is\n a kludge which works around this difficulty.\n \"\"\"\n # Loop in reverse looking for '+-', but don't run off the end\n for idx in range(len(mangled) - 1)[::-1]:\n if mangled[idx + 1] == '+-':\n reunited = ' '.join(mangled[idx:idx + 3])\n # Throw away the used elements...\n for _ in range(3):\n mangled.pop(idx)\n # Repair the list with reunited gvar string\n mangled.insert(idx, reunited)\n return mangled\n\n return to_arr(str_arr)",
"def parse_mutants(self, mutant_names=None, mutation_number=None):\n if not mutant_names and not mutation_number:\n raise ValueError('Please specify either mutant_names or mutation_number')\n elif not mutant_names:\n mutant_names = [f\"Mutant {elt4}\" for elt4 in range(1, 2 ** mutation_number + 1)]\n elif not mutation_number:\n mutation_number = len(mutant_names)\n else:\n assert len(mutant_names) == 2 ** mutation_number, f'There are {len(mutant_names)} names, but was expecting {2 ** mutation_number}'\n return mutant_names, 2 ** mutation_number",
"def parse_info(s:str) -> dict:\n d = {}\n d[\"SVTYPE\"] = re.search(r'(?<=SVTYPE=)\\w+',s).group(0)\n d[\"SUPPORT\"] = re.search(r'(?<=SUPPORT=)\\d+',s).group(0)\n if d[\"SVTYPE\"] in [\"BND\"]:\n return d\n d[\"END\"] = re.search(r'(?<=END=)\\d+',s).group(0)\n if d[\"SVTYPE\"] in [\"INV\"]:\n return d\n d[\"SVLEN\"] = re.search(r'(?<=SVLEN=)(.*?)(?=;)',s).group(0)\n d[\"READS\"] = re.search(r'(?<=READS=)(.*?)(?=$)',s).group(0).split(\",\")\n if d[\"SVTYPE\"] == \"INS\":\n d[\"SEQS\"] = re.search(r'(?<=SEQS=)(.*?)(?=;)',s).group(0).split(\",\")\n return d",
"def parse_molecule_gcmc(dlstr):\n\n try:\n tok = dlstr.split()\n molecule = {\"id\": tok[0], \"molpot\": float(tok[1])}\n except (IndexError, TypeError):\n raise ValueError(\"Unrecognised GCMC Molecule: {!r}\".format(dlstr))\n\n return molecule",
"def parse_mutations(self, mutation_names=None, mutation_number=None):\n if not mutation_names and not mutation_number:\n raise ValueError('Please specify either mutation_names or mutation_number')\n elif not mutation_names:\n mutation_names = [f'M{i}' for i in range(1, mutation_number + 1)]\n elif not mutation_number:\n mutation_number = len(mutation_names)\n else:\n assert len(mutation_names) == mutation_number, f'There are {len(mutation_names)} names, but was expecting {mutation_number}'\n return mutation_names, mutation_number",
"def get_protein_hgvs(annotation):\n if '%3D' in annotation['HGVSp']: # \"%3D\" is \"=\"\n try:\n amino_acids = ''.join([protein_letters_1to3[x] for x in annotation['Amino_acids']])\n return \"p.\" + amino_acids + annotation['Protein_position'] + amino_acids\n except Exception, e:\n print 'Could not create HGVS for: %s' % annotation\n return annotation['HGVSp'].split(':')[-1]",
"def test_parse_metadata_state_descriptions(self):\r\n s = ''\r\n self.assertEqual(parse_metadata_state_descriptions(s), {})\r\n s = 'Study:Twin,Hand,Dog;BodySite:Palm,Stool'\r\n self.assertEqual(\r\n parse_metadata_state_descriptions(\r\n s), {'Study': set(['Twin', 'Hand', 'Dog']),\r\n 'BodySite': set(['Palm', 'Stool'])})\r\n\r\n # category names with colons i. e. ontology-derived\r\n s = 'Study:Twin,Hand,Dog;site:UBERON:feces,UBERON:ear canal;' +\\\r\n 'env_feature:ENVO:farm soil,ENVO:national park'\r\n self.assertEqual(parse_metadata_state_descriptions(s), {'Study':\r\n set([\r\n 'Twin', 'Hand', 'Dog']), 'site': set(['UBERON:feces',\r\n 'UBERON:ear canal']), 'env_feature': set(['ENVO:farm soil',\r\n 'ENVO:national park'])})\r\n\r\n s = \"Treatment:A,B,C;env_matter:ENVO:nitsol,ENVO:farm soil;env_biom:\" +\\\r\n \"ENVO:Tropical dry (including Monsoon forests) and woodlands,\" +\\\r\n \"ENVO:Forest: including woodlands;country:GAZ:Persnickety Islands\" +\\\r\n \",St. Kitt's and Nevis\"\r\n self.assertEqual(parse_metadata_state_descriptions(s), {\"country\":\r\n set([\r\n \"GAZ:Persnickety Islands\", \"St. Kitt's and Nevis\"]),\r\n \"env_biom\": set([\"ENVO:Tropical dry (including Monsoon forests) \" +\r\n \"and woodlands\", \"ENVO:Forest: including woodlands\"]), \"env_matter\":\r\n set([\r\n \"ENVO:nitsol\", \"ENVO:farm soil\"]), 'Treatment': set([\"A\", \"B\",\r\n \"C\"])})",
"async def test_coding_dna_silent_mutation(test_handler,\n coding_dna_silent_mutation,\n braf_gene_context):\n resp = await test_handler.normalize(\"NM_004333.4:c.1799= \")\n assertion_checks(resp.variation_descriptor, coding_dna_silent_mutation,\n \"NM_004333.4:c.1799=\")\n\n fixture_id = \"normalize.variation:NM_004333.4%3Ac.1799%3D\"\n\n resp = await test_handler.normalize(\"ENST00000288602.11:c.1799=\")\n assert resp.variation_descriptor.id == \\\n \"normalize.variation:ENST00000288602.11%3Ac.1799%3D\"\n resp.variation_descriptor.id = fixture_id\n assertion_checks(resp.variation_descriptor, coding_dna_silent_mutation,\n \"ENST00000288602.11:c.1799=\")\n\n # TODO: What to do for older Ensembl transcripts that aren\"t found\n # in seqrepo or UTA\n # resp = await test_handler.normalize(\"ENST00000288602.6:c.1799=\")\n # assert_coding_dna_genomic_silent_mutation(resp, braf_gene_context,\n # 1798, 1799)\n # assert resp.variation_descriptor.id == \"normalize.variation:ENST00000288602.6%3Ac.1799%3D\" # noqa: E501\n # assert resp.variation_descriptor.label == \"ENST00000288602.6:c.1799=\"\n # assert resp.variation_descriptor.molecule_context == \"transcript\"\n\n resp = await test_handler.normalize(\"BRAF c.1799=\")\n assert resp.variation_descriptor.id == \"normalize.variation:BRAF%20c.1799%3D\"\n resp.variation_descriptor.id = fixture_id\n assertion_checks(resp.variation_descriptor, coding_dna_silent_mutation,\n \"BRAF c.1799=\")\n\n resp = await test_handler.normalize(\" BRAF V600E c.1799= \")\n assert resp.variation_descriptor.id == \\\n \"normalize.variation:BRAF%20V600E%20c.1799%3D\"\n resp.variation_descriptor.id = fixture_id\n assertion_checks(resp.variation_descriptor, coding_dna_silent_mutation,\n \"BRAF V600E c.1799=\")",
"def parse_string(self, data):\n pass",
"def _parse_data(data: str) -> Tuple[str, str, str, int, int, int, str]:\n\n phg = None\n rng = None\n dfs = None\n course = None\n speed = None\n altitude = None\n comment = None\n\n if re.match(r'^PHG[0-9]{4}', data[:7]):\n # Packet has a PHG (power, antenna height/gain/directivity) value\n phg = data[3:7]\n logger.debug(\"PHG is {}\".format(phg))\n data = data[7:]\n\n elif re.match('^RNG[0-9]{4}', data[:7]):\n # Packet has an RNG (radio range) value\n rng = data[3:7]\n logger.debug(\"RNG is {}\".format(rng))\n data = data[7:]\n\n elif re.match('^DFS[0-9]{4}', data[:7]):\n # Packet has a DFS (DF signal strength, antenna height/gain/directivity) value\n dfs = data[3:7]\n logger.debug(\"DFS is {}\".format(dfs))\n data = data[7:]\n\n elif re.match('^[0-9]{3}/[0-9]{3}', data[:7]):\n # Packet has course and speed values\n course = int(data[:3])\n speed = int(data[4:7])\n logger.debug(\"Course is {}, speed is {}\".format(course, speed))\n data = data[7:]\n\n # TODO - parse BRG/NRQ\n\n # Check for comment\n if len(data) > 0:\n\n # Check for altitude\n # As per APRS 1.01 C6 P26, altitude as /A=nnnnnn may appear anywhere in the comment\n has_altitude = re.match('.*/A=([0-9]{6}).*', data)\n if has_altitude:\n # TODO - fix altitude format\n altitude = int(has_altitude.groups()[0])\n logger.debug(\"Altitude is {} ft\".format(altitude))\n\n # Strip out the altitude from the comment\n data = re.sub(r'/A=[0-9]{6}', \"\", data)\n\n # Set the comment as the remainder of the information field\n comment = data\n logger.debug(\"Comment is {}\".format(comment))\n\n return (phg, rng, dfs, course, speed, altitude, comment)"
]
| [
"0.57316065",
"0.5169754",
"0.5088486",
"0.49869543",
"0.4979097",
"0.4970234",
"0.48902816",
"0.48882684",
"0.4880839",
"0.48786032",
"0.4860372",
"0.48560435",
"0.48316512",
"0.48309886",
"0.4803737",
"0.4778657",
"0.47782975",
"0.476909",
"0.47433475",
"0.47147992",
"0.47088706",
"0.47050023",
"0.47049302",
"0.46965852",
"0.46916154",
"0.4689887",
"0.46743178",
"0.46658966",
"0.46658114",
"0.46639797"
]
| 0.6598335 | 0 |
Variables for clients scalar aggregation. | def clients_scalar_aggregates(**kwargs):
attributes_list = [
"client_id",
"ping_type",
"os",
"app_version",
"app_build_id",
"channel",
]
attributes_type_list = ["STRING", "STRING", "STRING", "INT64", "STRING", "STRING"]
user_data_attributes_list = ["metric", "metric_type", "key"]
return dict(
attributes=",".join(attributes_list),
attributes_list=attributes_list,
attributes_type=",".join(
f"{name} {dtype}"
for name, dtype in zip(attributes_list, attributes_type_list)
),
user_data_attributes=",".join(user_data_attributes_list),
user_data_type="""
ARRAY<
STRUCT<
metric STRING,
metric_type STRING,
key STRING,
agg_type STRING,
value FLOAT64
>
>
""",
**kwargs,
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _aggregate(self, *params): \n serialized_params = np.array([self._serialize(client) for client in params])\n serialized_aggregation = self._aggregate(*serialized_params)\n aggregated_weights = self._deserialize(serialized_aggregation)\n \n return aggregated_weights",
"def _aggregation_target(self):\n ...",
"def calculate_vars(self):\n pass",
"def _aggregate(self, *params):\n clients_params = np.array(params)\n mean = super()._aggregate(*params)\n noise = np.random.normal(loc=0.0, scale=self._noise_mult*self._clip/len(clients_params), size=mean.shape) \n return mean + noise",
"def aggregate_client(df, group_vars, df_names):\n \n # Aggregate the numeric columns\n df_agg = agg_numeric(df, parent_var= group_vars[0], df_name = df_names[0])\n \n # If there are categorical variables\n if any(df.dtypes == 'category'):\n \n # Count the categorical columns\n df_counts = agg_categorical(df, parent_var= group_vars[0], df_name = df_names[0])\n\n # Merge the numeric and categorical\n df_by_loan = df_counts.merge(df_agg, on = group_vars[0], how = 'outer')\n\n gc.enable()\n del df_agg, df_counts\n gc.collect()\n\n # Merge to get the client id in dataframe\n df_by_loan = df_by_loan.merge(df[[group_vars[0], group_vars[1]]], on = group_vars[0], how = 'left')\n\n # Remove the loan id\n df_by_loan = df_by_loan.drop(columns = [group_vars[0]])\n\n # Aggregate numeric stats by column\n df_by_client = agg_numeric(df_by_loan, parent_var = group_vars[1], df_name = df_names[1])\n\n \n # No categorical variables\n else:\n # Merge to get the client id in dataframe\n df_by_loan = df_agg.merge(df[[group_vars[0], group_vars[1]]], on = group_vars[0], how = 'left')\n \n gc.enable()\n del df_agg\n gc.collect()\n \n # Remove the loan id\n df_by_loan = df_by_loan.drop(columns = [group_vars[0]])\n \n # Aggregate numeric stats by column\n df_by_client = agg_numeric(df_by_loan, parent_var = group_vars[1], df_name = df_names[1])\n \n # Memory management\n gc.enable()\n del df, df_by_loan\n gc.collect()\n\n return df_by_client",
"def _get_aggregated_results(self):\n gradients = self.gradients\n client_traj_infos = flatten_lists(self.client_traj_infos)\n client_opt_infos = self._combine_client_opt_infos(self.client_opt_infos)\n \n self.gradients = []\n self.client_traj_infos = []\n self.client_opt_infos = []\n\n return gradients, client_traj_infos, client_opt_infos",
"def get_client_vars(self):\n with self.graph.as_default():\n client_vars = self.sess.run(tf.trainable_variables())\n return client_vars",
"def client_aggregated_by_id_view(request):\n # Check connected\n if not check_connected(request):\n raise exc.HTTPForbidden()\n\n id = request.matchdict['id']\n result = request.dbsession.query(Client).filter(\n Client.id == id).first()\n \n client = {\n 'id': result.id,\n 'nom': _set_client_aggregated_name(result),\n 'type_client': result.type_client\n }\n\n return client",
"def values():",
"def _getValue( self, client ):\n\t\treturn client.getValue( self.schema )",
"def build_aggregator(compression_flags, dp_flags, num_clients,\n num_clients_per_round, num_rounds, client_template):\n\n clip, epsilon = dp_flags['l2_norm_clip'], dp_flags['epsilon']\n # No DP (but still do the clipping if necessary).\n if epsilon is None:\n agg_factory = tff.aggregators.UnweightedMeanFactory()\n if clip is not None:\n assert clip > 0, 'Norm clip must be positive.'\n agg_factory = tff.aggregators.clipping_factory(clip, agg_factory)\n logging.info('Using vanilla sum aggregation with clipping %s', clip)\n return agg_factory\n\n # Parameters for DP\n assert epsilon > 0, f'Epsilon should be positive, found {epsilon}.'\n assert clip is not None and clip > 0, f'Clip must be positive, found {clip}.'\n sampling_rate = float(num_clients_per_round) / num_clients\n delta = dp_flags['delta'] or 1.0 / num_clients # Default to delta = 1 / n.\n dim = get_total_dim(client_template)\n\n logging.info('Shared DP Parameters:')\n logging.info(\n pprint.pformat({\n 'epsilon': epsilon,\n 'delta': delta,\n 'clip': clip,\n 'dim': dim,\n 'sampling_rate': sampling_rate,\n 'num_clients': num_clients,\n 'num_clients_per_round': num_clients_per_round,\n 'num_rounds': num_rounds\n }))\n\n # Baseline: continuous Gaussian\n if dp_flags['dp_mechanism'] == 'gaussian':\n noise_mult = accounting_utils.get_gauss_noise_multiplier(\n target_eps=epsilon,\n target_delta=delta,\n target_sampling_rate=sampling_rate,\n steps=num_rounds)\n # Operations include clipping on client and noising + averaging on server;\n # No MeanFactory and ClippingFactory needed.\n agg_factory = tff.aggregators.DifferentiallyPrivateFactory.gaussian_fixed(\n noise_multiplier=noise_mult,\n clients_per_round=num_clients_per_round,\n clip=clip)\n logging.info('Gaussian Parameters:')\n logging.info({'noise_mult': noise_mult})\n\n # Distributed Discrete Gaussian\n elif dp_flags['dp_mechanism'] == 'ddgauss':\n padded_dim = pad_dim(dim)\n\n k_stddevs = compression_flags['k_stddevs'] or 2\n beta = compression_flags['beta']\n bits = compression_flags['num_bits']\n\n # Modular clipping has exclusive upper bound.\n mod_clip_lo, mod_clip_hi = -(2**(bits - 1)), 2**(bits - 1)\n\n gamma = accounting_utils.get_ddgauss_gamma(\n q=sampling_rate,\n epsilon=epsilon,\n l2_clip_norm=clip,\n bits=bits,\n num_clients=num_clients_per_round,\n dimension=padded_dim,\n delta=delta,\n beta=beta,\n steps=num_rounds,\n k=k_stddevs,\n sqrtn_norm_growth=False)\n\n local_stddev = accounting_utils.get_ddgauss_noise_stddev(\n q=sampling_rate,\n epsilon=epsilon,\n l2_clip_norm=clip,\n gamma=gamma,\n beta=beta,\n steps=num_rounds,\n num_clients=num_clients_per_round,\n dimension=padded_dim,\n delta=delta)\n\n logging.info('DDGauss Parameters:')\n logging.info(\n pprint.pformat({\n 'bits': bits,\n 'beta': beta,\n 'dim': dim,\n 'padded_dim': padded_dim,\n 'gamma': gamma,\n 'k_stddevs': k_stddevs,\n 'local_stddev': local_stddev\n }))\n\n # Build nested aggregators.\n agg_factory = tff.aggregators.SumFactory()\n # 1. Modular clipping.\n agg_factory = modular_clipping_factory.ModularClippingSumFactory(\n clip_range_lower=mod_clip_lo,\n clip_range_upper=mod_clip_hi,\n inner_agg_factory=agg_factory)\n\n # 2. DPFactory that uses the compressed_query.\n compressed_query = build_compressed_dp_query(\n mechanism='ddgauss',\n clip=clip,\n padded_dim=padded_dim,\n gamma=gamma,\n stddev=local_stddev,\n beta=beta,\n client_template=client_template)\n\n agg_factory = tff.aggregators.DifferentiallyPrivateFactory(\n query=compressed_query, record_aggregation_factory=agg_factory)\n\n # 3. L2 norm clipping as the first step.\n agg_factory = tff.aggregators.clipping_factory(\n clipping_norm=clip, inner_agg_factory=agg_factory)\n\n # 4. Apply a MeanFactory at last (mean can't be part of the discrete\n # DPQueries (like the case of Gaussian) as the records may become floats\n # and hence break the decompression process).\n agg_factory = tff.aggregators.UnweightedMeanFactory(\n value_sum_factory=agg_factory)\n\n else:\n raise ValueError(f'Unsupported mechanism: {dp_flags[\"dp_mechanism\"]}')\n\n return agg_factory",
"def clients_histogram_aggregates(**kwargs):\n attributes_list = [\n \"sample_id\",\n \"client_id\",\n \"ping_type\",\n \"os\",\n \"app_version\",\n \"app_build_id\",\n \"channel\",\n ]\n fixed_attributes = [\"app_version\", \"channel\"]\n cubed_attributes = [x for x in attributes_list if x not in fixed_attributes]\n return dict(\n attributes_list=attributes_list,\n attributes=\",\".join(attributes_list),\n cubed_attributes=cubed_attributes,\n attribute_combinations=compute_datacube_groupings(cubed_attributes),\n metric_attributes=\"\"\"\n metric,\n metric_type,\n key,\n agg_type\n \"\"\",\n **kwargs,\n )",
"def variables(self):\n return ()",
"def aggregateFunction():\r\n global aggFunc\r\n aggFunc = []\r\n for objFunc in P_prime:\r\n aggFunc.append(objFunc[0]*FileSettings.settingsdict['weights'][0] +\r\n objFunc[1]*FileSettings.settingsdict['weights'][1] +\r\n objFunc[2]*FileSettings.settingsdict['weights'][2] +\r\n objFunc[3]*FileSettings.settingsdict['weights'][3])\r\n return aggFunc",
"def fetch_aggregation(self):\n return None",
"def IpTrafficAnalysis(client):\n\tindex = \"netflow*\"\n\tbucket1 = \"src_addr\"\n\tbucket2 = \"dst_addr\"\n\t\n\t#aggregate ipv4 flows\n\tbucket1DocValue = \"netflow.ipv4_src_addr\"\n\tbucket2DocValue = \"netflow.ipv4_dst_addr\"\t\n\tqDict = QueryBuilder().BuildDoubleAggregateQuery(bucket1, bucket2, bucket1DocValue, bucket2DocValue, level1BucketType=\"terms\", level2BucketType=\"terms\", level1DocValueType=\"field\", level2DocValueType=\"field\", size=0)\n\tjsonBucket = client.aggregate(index, qDict)\n\taggDict_Ipv4 = jsonBucket[\"aggregations\"]\n\t#aggregate ipv6 flows\n\tbucket1DocValue = \"netflow.ipv6_src_addr\"\n\tbucket2DocValue = \"netflow.ipv6_dst_addr\"\n\tqDict = QueryBuilder().BuildDoubleAggregateQuery(bucket1, bucket2, bucket1DocValue, bucket2DocValue, level1BucketType=\"terms\", level2BucketType=\"terms\", level1DocValueType=\"field\", level2DocValueType=\"field\", size=0)\n\tjsonBucket = client.aggregate(index, qDict)\n\taggDict_Ipv6 = jsonBucket[\"aggregations\"]\n\t#aggregate the ipv4/6 dictionaries together\n\taggDict = aggDict_Ipv4\n\taggDict[bucket1][\"buckets\"] += aggDict_Ipv6[bucket1][\"buckets\"]\n\t\n\tlabelVertices=True\n\tlabelEdges=False\n\t#aggDict = {u'src_addr': {u'buckets': [{u'dst_addr': {u'buckets': [{u'key': u'192.168.1.160', u'doc_count': 1061347}, {u'key': u'192.168.1.11', u'doc_count': 14857}, {u'key': u'192.168.0.12', u'doc_count': 14852}, {u'key': u'192.168.1.102', u'doc_count': 13044}, {u'key': u'239.255.255.250', u'doc_count': 7607}, {u'key': u'192.168.0.11', u'doc_count': 7382}, {u'key': u'192.168.0.91', u'doc_count': 5283}, {u'key': u'192.168.3.216', u'doc_count': 1730}, {u'key': u'192.168.0.1', u'doc_count': 625}, {u'key': u'192.168.1.118', u'doc_count': 257}], u'sum_other_doc_count': 544, u'doc_count_error_upper_bound': 1}, u'key': u'192.168.2.10', u'doc_count': 1127528}, {u'dst_addr': {u'buckets': [{u'key': u'192.168.2.10', u'doc_count': 1061347}, {u'key': u'239.255.255.250', u'doc_count': 14710}, {u'key': u'192.168.0.14', u'doc_count': 605}, {u'key': u'255.255.255.255', u'doc_count': 315}, {u'key': u'224.0.0.1', u'doc_count': 312}, {u'key': u'224.0.0.252', u'doc_count': 264}, {u'key': u'224.0.0.251', u'doc_count': 9}, {u'key': u'224.0.1.129', u'doc_count': 2}, {u'key': u'239.192.152.143', u'doc_count': 2}], u'sum_other_doc_count': 0, u'doc_count_error_upper_bound': 0}, u'key': u'192.168.1.160', u'doc_count': 1077566}, {u'dst_addr': {u'buckets': [{u'key': u'192.168.0.1', u'doc_count': 104641}, {u'key': u'239.255.255.250', u'doc_count': 81122}, {u'key': u'224.0.0.252', u'doc_count': 24754}, {u'key': u'172.217.3.163', u'doc_count': 20530}, {u'key': u'172.217.3.174', u'doc_count': 19105}, {u'key': u'134.121.120.167', u'doc_count': 16311}, {u'key': u'192.168.3.255', u'doc_count': 8152}, {u'key': u'64.4.54.254', u'doc_count': 7700}, {u'key': u'64.71.168.217', u'doc_count': 7127}, {u'key': u'192.168.1.114', u'doc_count': 6920}], u'sum_other_doc_count': 187585, u'doc_count_error_upper_bound': 1754}, u'key': u'192.168.0.14', u'doc_count': 483947}, {u'dst_addr': {u'buckets': [{u'key': u'192.168.0.14', u'doc_count': 120591}, {u'key': u'255.255.255.255', u'doc_count': 2397}, {u'key': u'239.255.255.250', u'doc_count': 508}, {u'key': u'192.168.2.10', u'doc_count': 247}, {u'key': u'192.168.3.224', u'doc_count': 79}, {u'key': u'224.0.0.1', u'doc_count': 63}, {u'key': u'224.0.0.252', u'doc_count': 14}, {u'key': u'192.168.0.109', u'doc_count': 10}, {u'key': u'192.168.0.111', u'doc_count': 4}, {u'key': u'192.168.0.16', u'doc_count': 4}], u'sum_other_doc_count': 7, u'doc_count_error_upper_bound': 0}, u'key': u'192.168.0.1', u'doc_count': 123924}, {u'dst_addr': {u'buckets': [{u'key': u'239.255.255.250', u'doc_count': 87186}, {u'key': u'192.168.2.10', u'doc_count': 21272}, {u'key': u'192.168.3.255', u'doc_count': 8093}, {u'key': u'255.255.255.255', u'doc_count': 2206}, {u'key': u'192.168.0.14', u'doc_count': 78}, {u'key': u'224.0.0.252', u'doc_count': 2}], u'sum_other_doc_count': 0, u'doc_count_error_upper_bound': 0}, u'key': u'192.168.0.12', u'doc_count': 118837}, {u'dst_addr': {u'buckets': [{u'key': u'239.255.255.250', u'doc_count': 69383}, {u'key': u'192.168.3.255', u'doc_count': 11231}, {u'key': u'192.168.0.14', u'doc_count': 200}, {u'key': u'192.168.2.10', u'doc_count': 64}, {u'key': u'224.0.0.252', u'doc_count': 35}, {u'key': u'255.255.255.255', u'doc_count': 4}], u'sum_other_doc_count': 0, u'doc_count_error_upper_bound': 0}, u'key': u'192.168.0.13', u'doc_count': 80917}, {u'dst_addr': {u'buckets': [{u'key': u'239.255.255.250', u'doc_count': 37482}, {u'key': u'192.168.2.10', u'doc_count': 18645}, {u'key': u'192.168.15.255', u'doc_count': 7153}, {u'key': u'192.168.3.255', u'doc_count': 6852}, {u'key': u'255.255.255.255', u'doc_count': 3385}, {u'key': u'192.168.0.14', u'doc_count': 107}, {u'key': u'224.0.0.251', u'doc_count': 28}, {u'key': u'224.0.0.252', u'doc_count': 10}, {u'key': u'192.168.1.111', u'doc_count': 5}, {u'key': u'224.0.1.129', u'doc_count': 1}], u'sum_other_doc_count': 0, u'doc_count_error_upper_bound': 0}, u'key': u'192.168.1.102', u'doc_count': 73668}, {u'dst_addr': {u'buckets': [{u'key': u'239.255.255.250', u'doc_count': 32847}, {u'key': u'192.168.2.10', u'doc_count': 21241}, {u'key': u'192.168.3.255', u'doc_count': 12561}, {u'key': u'255.255.255.255', u'doc_count': 3511}, {u'key': u'192.168.0.14', u'doc_count': 355}, {u'key': u'192.168.2.101', u'doc_count': 9}, {u'key': u'192.168.2.102', u'doc_count': 9}, {u'key': u'192.168.2.103', u'doc_count': 9}, {u'key': u'192.168.2.107', u'doc_count': 8}, {u'key': u'192.168.2.108', u'doc_count': 8}], u'sum_other_doc_count': 35, u'doc_count_error_upper_bound': 0}, u'key': u'192.168.1.11', u'doc_count': 70593}, {u'dst_addr': {u'buckets': [{u'key': u'239.255.255.250', u'doc_count': 48167}, {u'key': u'192.168.1.255', u'doc_count': 7814}, {u'key': u'255.255.255.255', u'doc_count': 2350}, {u'key': u'224.0.0.252', u'doc_count': 80}, {u'key': u'192.168.3.255', u'doc_count': 3}, {u'key': u'224.0.0.251', u'doc_count': 3}, {u'key': u'192.168.0.14', u'doc_count': 1}, {u'key': u'192.168.1.101', u'doc_count': 1}], u'sum_other_doc_count': 0, u'doc_count_error_upper_bound': 0}, u'key': u'192.168.1.14', u'doc_count': 58419}, {u'dst_addr': {u'buckets': [{u'key': u'239.255.255.250', u'doc_count': 31456}, {u'key': u'255.255.255.255', u'doc_count': 8959}, {u'key': u'192.168.3.255', u'doc_count': 7454}, {u'key': u'192.168.2.10', u'doc_count': 7387}, {u'key': u'192.168.0.14', u'doc_count': 187}, {u'key': u'224.0.0.252', u'doc_count': 4}, {u'key': u'192.168.0.16', u'doc_count': 3}, {u'key': u'192.168.2.101', u'doc_count': 1}, {u'key': u'192.168.2.102', u'doc_count': 1}, {u'key': u'192.168.2.103', u'doc_count': 1}], u'sum_other_doc_count': 6, u'doc_count_error_upper_bound': 0}, u'key': u'192.168.0.11', u'doc_count': 55459}], u'sum_other_doc_count': 410259, u'doc_count_error_upper_bound': 4257}}\n\tg = AggToNetworkGraph(aggDict, bucket1, bucket2, labelVertices, labelEdges)\n\tg.write_graphml(\"./ip_traffic.graphml\")\n\tgraphPlot = PlotNetworkGraph(g, labelVertices, labelEdges)\n\tgraphPlot.save(\"ipTraffic.png\")\n\tadjacencyMatrix = g.get_adjacency(attribute=\"weight\", default=0)\n\tprint(str(type(adjacencyMatrix))+\"\\n\"+str(adjacencyMatrix))\n\t\n\tPlotDirectedEdgeHistogram(g, \"weight\")",
"def aggregate_query(self):\n raise NotImplementedError",
"def _var(self):\n return self.sumsquares / self.sum_weights",
"def get_results_from_aggregation_sources(self, context):",
"def vars(self):\n return self.v",
"def variable(self):",
"def qcs(self):\n return self.aggregation",
"def name(self):\n return f\"{self.client_name} {self.variable}\"",
"def gen_values(self):",
"def _generate_es_query_external():\n\n internal_ips = [net[0] for service in Service.objects.all() for net in get_internal_ips(service)]\n\n query_object = {\n \"size\": 0,\n \"query\": {\n \"constant_score\": {\n \"filter\": {\n \"and\": [\n {\"term\": {\"direction\": \"out\"}},\n {\"range\": {\"@timestamp\": {\"gt\": \"now-10m\"}}},\n {\n \"not\": {\n \"terms\": {\"ip\": internal_ips}\n }\n }\n ]\n }\n }\n }\n }\n\n query_object[\"aggregations\"] = {\n \"external_services\": {\n \"terms\": {\n \"field\": \"ip\"\n },\n \"aggregations\": {\n \"clients\": {\n \"terms\": {\n \"field\": \"beat.hostname\"\n },\n \"aggregations\": {\n \"redis\": {\n \"terms\": {\n \"field\": \"type\",\n \"include\": \"redis\"\n },\n \"aggregations\": {\n \"oks\": {\n \"missing\": {\"field\": \"redis.error\"}\n },\n \"errors\": {\n \"filter\": {\"exists\": {\"field\": \"redis.error\"}}\n }\n }\n },\n \"mysql\": {\n \"terms\": {\n \"field\": \"type\",\n \"include\": \"mysql\"\n },\n \"aggregations\": {\n \"oks\": {\n \"filter\": {\"term\": {\"mysql.iserror\": \"false\"}}\n },\n \"errors\": {\n \"filter\": {\"term\": {\"mysql.iserror\": \"true\"}}\n }\n }\n },\n \"pgsql\": {\n \"terms\": {\n \"field\": \"type\",\n \"include\": \"pgsql\"\n },\n \"aggregations\": {\n \"oks\": {\n \"filter\": {\"term\": {\"pgsql.iserror\": \"false\"}}\n },\n \"errors\": {\n \"filter\": {\"term\": {\"pgsql.iserror\": \"true\"}}\n }\n }\n }\n }\n }\n }\n }\n }\n\n return query_object",
"def addConstraints(const1, const2):\n \n #get unique keys\n keys = list(set(list(const1.keys()) + list(const2.keys())))\n const_sum = {}\n \n for key in keys:\n addrhs = np.array(np.add(const1[key].rhs, const2[key].rhs))\n const_sum[key] = cenquery.Constraint(query=const1[key].query,\n rhs=addrhs, sign=const1[key].sign, name =const1[key].name )\n \n return(const_sum)",
"def sum(app, args):\n if not args.key:\n db = get_db(app)\n notrans = db.transient.count()\n print(\"No Transient records: \", notrans)\n if notrans > 0:\n print(\"Total data Transient: \", nicesize(\n list(db.transient.aggregate([\n {\"$group\": {\"_id\": None,\n \"total\": {\"$sum\": \"$size\"}}}]))[0]['total']))\n print(\" No Core records: \", db.transient.count())\n return\n\n kname, kinfo = key_info(app.conf, args.key)\n res = _single_sum(app, group_by=kname, force=args.force)\n total_size = int(0)\n total_count = 0\n mgn = len(\"Total\")\n for reshost in res:\n gid = reshost['_id']\n if gid is None:\n mgn = max(4, mgn)\n else:\n mgn = max(len(str(reshost['_id'])), mgn)\n\n fms = \"{:\" + str(mgn) + \"}\\t{:>10}\\t{:>9}\"\n if args.human:\n print(\"# {}:\".format(kname))\n for reshost in res:\n total = reshost['total']\n count = reshost['count']\n total_size += int(total)\n total_count += count\n if args.human:\n total_human = nicesize(total)\n count_human = nicenumber(count)\n categ = reshost['_id']\n if categ is None:\n categ = \"<undefined>\"\n\n print(fms.format(\n categ, total_human, count_human))\n else:\n print(\"{}\\t{}\\t{}\".format(\n reshost['_id'], total, count))\n\n if args.human:\n total_size_human = nicesize(total_size)\n total_count_human = nicenumber(total_count)\n print(fms.format('', '-'*10, '-'*9))\n print(fms.format(\n \"Total\", total_size_human, total_count_human))\n else:\n print(\"Total\\t{}\\t{}\".format(total_size, total_count))",
"def vars(cls):\n raise NotImplementedError(\"This is an abstract function that needs to be implemented for each value function\")",
"def get_variable_values(self, vars):\n raise NotImplementedError()",
"def addInvariants(invar1, invar2):\n invar_sum= {}\n for key in invar1.keys():\n invar_sum[key] = np.array(np.add(invar1[key], invar2[key]))\n \n return(invar_sum)"
]
| [
"0.5818746",
"0.5604993",
"0.55532503",
"0.543757",
"0.5422631",
"0.5242082",
"0.52301157",
"0.498277",
"0.49819478",
"0.49520943",
"0.48781514",
"0.48485976",
"0.48349586",
"0.48041868",
"0.47949913",
"0.47696248",
"0.47664636",
"0.47661284",
"0.4756242",
"0.47452343",
"0.4744864",
"0.47432745",
"0.473176",
"0.47189194",
"0.47032624",
"0.46998122",
"0.46939453",
"0.46860772",
"0.4684133",
"0.46682367"
]
| 0.65746534 | 0 |
Variables for scalar bucket_counts. | def scalar_bucket_counts(**kwargs):
attributes_list = ["ping_type", "os", "app_version", "app_build_id", "channel"]
fixed_attributes = ["app_version", "channel"]
cubed_attributes = [x for x in attributes_list if x not in fixed_attributes]
return dict(
attributes=",".join(attributes_list),
cubed_attributes=cubed_attributes,
attribute_combinations=compute_datacube_groupings(cubed_attributes),
scalar_metric_types="""
"counter",
"quantity",
"labeled_counter",
"timespan"
""",
boolean_metric_types="""
"boolean"
""",
aggregate_attributes="""
metric,
metric_type,
key
""",
aggregate_attributes_type="""
metric STRING,
metric_type STRING,
key STRING
""",
**{
# re-use variables from previous query
key: clients_scalar_aggregates()[key]
for key in ["user_data_attributes", "user_data_type"]
},
**kwargs,
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def raw_counts(self):\n return np.array([1, 2, 3])",
"def raw_counts(self):\n return np.array([1, 2, 3])",
"def histogram_bucket_counts(**kwargs):\n attributes_list = [\"ping_type\", \"os\", \"app_version\", \"app_build_id\", \"channel\"]\n metric_attributes_list = [\"metric\", \"metric_type\", \"key\", \"agg_type\"]\n fixed_attributes = [\"app_version\", \"channel\"]\n cubed_attributes = [x for x in attributes_list if x not in fixed_attributes]\n return dict(\n attributes_list=attributes_list,\n attributes=\",\".join(attributes_list),\n cubed_attributes=cubed_attributes,\n attribute_combinations=compute_datacube_groupings(cubed_attributes),\n metric_attributes_list=metric_attributes_list,\n metric_attributes=\",\".join(metric_attributes_list),\n custom_distribution_metadata_list=get_custom_distribution_metadata(\"fenix\"),\n **kwargs,\n )",
"def varCount(self, aKind):\n return self.counts[aKind]",
"def __init__(self):\n self.num_counts = {}",
"def var_count(self, kind):\n return self.counter[kind]",
"def test_counts(self):\n c = array([5,0,1,1,5,5])\n obs = counts(c)\n exp = array([1,2,0,0,0,3])\n self.assertEqual(obs, exp)\n d = array([2,2,1,0])\n obs = counts(d, obs)\n exp = array([2,3,2,0,0,3])\n self.assertEqual(obs, exp)",
"def make_histogram(points, bucket_size):\r\n return Counter(bucketize(point, bucket_size) for point in points)",
"def _getValueCounts(mapping):\n return Counter({k: len(v) for k, v in viewitems(mapping)})",
"def make_histogram(points, bucket_size):\n return Counter(bucketize(point, bucket_size) for point in points)",
"def make_histogram(points, bucket_size):\n return Counter(bucketize(point, bucket_size) for point in points)",
"def raw_counts(self):\n return np.array([[1, 2], [3, 4], [5, 6]])",
"def count_vars(scope=''):\n v = get_vars(scope)\n return sum([np.prod(var.shape.as_list()) for var in v])",
"def totalcounts(self):\n return self.datacounts + self.bkgdcounts",
"def count():",
"def _collect_counts(self):\n for t in self.system.keys():\n if t in self.gold:\n self.tp += 1\n else:\n self.fp += 1\n for t in self.gold.keys():\n if t not in self.system:\n self.fn += 1",
"def __init__(self, bucket_ranges):\n # An array of the histogram bucket boundaries, such as 1, 10, 30, 100\n self.__bucket_ranges = list(bucket_ranges)\n last_value = None\n for i in self.__bucket_ranges:\n if last_value is not None and i < last_value:\n raise ValueError(\"The bucket_ranges argument must be sorted.\")\n else:\n last_value = i\n\n # __counts[i] holds the total number of values we have seen >= to __boundaries[i-1] and < __boundaries[i]\n self.__counts = [0] * len(bucket_ranges)\n # __overflows holds the number of values >= __boundaries[-1]\n self.__overflow = 0\n # The minimum and maximum values seen.\n self.__min = None\n self.__max = None\n # The total number of values collected.\n self.__total_count = 0\n # The sum of the values collected\n self.__total_values = 0",
"def split_counts(self) -> Dict[int, int]:\n if self._split_counts is None:\n self.RefreshStats()\n return self._split_counts",
"def __init__(self):\n self.counts = {}",
"def freq():",
"def stats(self):\n nqbits = self.operator.num_qubits",
"def bucket_dist(g_var, x_var, all_bins, tar_bin, label, df):\n return (\n df.groupby(g_var)[x_var]\n .value_counts(normalize=True, bins=all_bins)\n [:, tar_bin]\n .to_frame()\n .assign(Interval = label)\n )",
"def test_bins(self):\n min_val = 0\n max_val = 1\n buckets = 10\n values_per_bucket = 10\n\n import numpy\n\n data = list(numpy.linspace(min_val, max_val, buckets * values_per_bucket))\n bins = numpy.linspace(min_val, max_val + sys.float_info.epsilon, buckets + 1)\n digitized = numpy.digitize(data, bins)\n counts = numpy.bincount(digitized)\n self.assertEqual(buckets + 1, len(counts))\n self.assertEqual(0, counts[0])\n for bucket in counts[1:]:\n self.assertEqual(values_per_bucket, bucket)",
"def _get_counts(self, X: np.ndarray) -> Dict[int, np.ndarray]:\n return {f: np.bincount(X[:, f].astype(int), minlength=n_cat) for f, n_cat in\n self.categories_per_feature.items()}",
"def buckets(self):\n return self.indexed",
"def freq(self) -> int:",
"def counts_vals(self):\n\n return unumpy.nominal_values(self.counts)",
"def gal_count(clusters):\n sum = 0\n for x in clusters:\n sum += x.ngal\n return sum",
"def bucket(self, bucket_count, bucket_salt=None):\n if bucket_salt:\n return hash(MetaTestCase._cmp_str(self) + bucket_salt) % bucket_count\n else:\n return hash(MetaTestCase._cmp_str(self)) % bucket_count",
"def getCounts(self):\n ret = [0]*len(self.numToLabel)\n for block in self.blocks:\n for label in block[1]: ret[label] += 1\n return ret"
]
| [
"0.619195",
"0.619195",
"0.6113734",
"0.61020154",
"0.6027104",
"0.60071844",
"0.58921134",
"0.58503234",
"0.57796675",
"0.576681",
"0.576681",
"0.5760521",
"0.5759519",
"0.57510465",
"0.5747679",
"0.57229006",
"0.57080144",
"0.5696534",
"0.56908894",
"0.56377625",
"0.5584959",
"0.5582118",
"0.55599236",
"0.55550504",
"0.5543751",
"0.5514877",
"0.5491598",
"0.54850936",
"0.5479446",
"0.5455331"
]
| 0.6860446 | 0 |
Variables for clients histogram bucket counts. | def histogram_bucket_counts(**kwargs):
attributes_list = ["ping_type", "os", "app_version", "app_build_id", "channel"]
metric_attributes_list = ["metric", "metric_type", "key", "agg_type"]
fixed_attributes = ["app_version", "channel"]
cubed_attributes = [x for x in attributes_list if x not in fixed_attributes]
return dict(
attributes_list=attributes_list,
attributes=",".join(attributes_list),
cubed_attributes=cubed_attributes,
attribute_combinations=compute_datacube_groupings(cubed_attributes),
metric_attributes_list=metric_attributes_list,
metric_attributes=",".join(metric_attributes_list),
custom_distribution_metadata_list=get_custom_distribution_metadata("fenix"),
**kwargs,
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def split_counts(self) -> Dict[int, int]:\n if self._split_counts is None:\n self.RefreshStats()\n return self._split_counts",
"def __init__(self):\n self.num_counts = {}",
"def make_histogram(points, bucket_size):\r\n return Counter(bucketize(point, bucket_size) for point in points)",
"def cassandra_histograms(mycluster=RING_1_dev__allnodes):\n cassandra_nodetool(mycluster,cmd=\"cfhistograms\")",
"def list_buckets():\n pass",
"def make_histogram(points, bucket_size):\n return Counter(bucketize(point, bucket_size) for point in points)",
"def make_histogram(points, bucket_size):\n return Counter(bucketize(point, bucket_size) for point in points)",
"def __init__(self, bucket_ranges):\n # An array of the histogram bucket boundaries, such as 1, 10, 30, 100\n self.__bucket_ranges = list(bucket_ranges)\n last_value = None\n for i in self.__bucket_ranges:\n if last_value is not None and i < last_value:\n raise ValueError(\"The bucket_ranges argument must be sorted.\")\n else:\n last_value = i\n\n # __counts[i] holds the total number of values we have seen >= to __boundaries[i-1] and < __boundaries[i]\n self.__counts = [0] * len(bucket_ranges)\n # __overflows holds the number of values >= __boundaries[-1]\n self.__overflow = 0\n # The minimum and maximum values seen.\n self.__min = None\n self.__max = None\n # The total number of values collected.\n self.__total_count = 0\n # The sum of the values collected\n self.__total_values = 0",
"def buckets(self):\n return self.indexed",
"def Analise_do_churn(self):\r\n # grafico = px.histogram(self.tabela_clientes, x='Churn', color='Churn') # criando gráfico da tabela clientes onde o eixo x tem as informações de churn com cores diferentes\r\n # grafico.show() # exibe o grafico\r\n # # para edições nos gráficos: https://plotly.com/python/histograms/\r\n \r\n \r\n # for coluna in self.tabela_clientes.index: # index para percorrer as linhas \r\n for coluna in self.tabela_clientes: # para percorrer as colunas\r\n if coluna != \"IDCliente\":\r\n # criar a figura\r\n fig = px.histogram(self.tabela_clientes, x=coluna, color=\"Churn\")\r\n # exibir a figura\r\n fig.show()\r\n print(self.tabela_clientes.pivot_table(index=\"Churn\", columns=coluna, aggfunc='count')[\"IDCliente\"])",
"def task3b(self):\n browser_count = {}\n for entry in self.records:\n if((entry['visitor_device'] == 'browser') and (entry['event_type'] == 'read')):\n if (entry['visitor_useragent'].find('/') > -1):\n browser = entry['visitor_useragent'][0:entry['visitor_useragent'].find('/')]\n else: browser = entry['visitor_useragent']\n if (browser in browser_count):\n browser_count[browser] += 1\n else:\n browser_count[browser] = 1\n GUI.show_histo(browser_count, \"vert\", \"Number of Accesses using Browser\", \"Browser Distribution\")",
"def client_count(request):\n return request.param",
"def histogramintegrals(self):\n return {}",
"def multiple_connections_histogram(synapses):\n count_of_synapses = synapses.groupby(['pre', 'post']).size()\n return count_of_synapses",
"def __init__(self):\n self.buckets = 1009\n self.table = [{} for _ in range(self.buckets)]",
"def __init__(self):\n self.counts = {}",
"def count():",
"def all_client_number():\n\n url = CMX_URL + '/api/location/v2/clients/count'\n header = {'content-type': 'application/json', 'accept': 'application/json'}\n response = requests.get(url, headers=header, auth=CMX_AUTH, verify=False)\n response_json = response.json()\n clients_number = response_json['count']\n return clients_number",
"def reading_count_histogram():\r\n cursor = connections[\"ldc\"].cursor()\r\n cursor.execute(\"select count(smr.read_datetime) \"\r\n \"from essex_annotated.SmartMeterReadings smr \" \r\n \"inner join Meters m \"\r\n \" on m.MeterID = smr.MeterID and m.`Phase` = 1 \"\r\n \"left join meter_data_quality_flag mdqf \"\r\n \" on smr.MeterID = mdqf.MeterID \" \r\n \" and mdqf.applicability_start_date <= '2011-05-01' \"\r\n \" and mdqf.applicability_end_date >= '2012-10-31' \"\r\n \"where smr.MeterID >= 20000 and smr.MeterID < 30000 \"\r\n \"and mdqf.MeterID is null \" \r\n \"and smr.ReadDate >= '2011-05-01' \" \r\n \"and smr.ReadDate <= '2011-10-31' \"\r\n \"group by smr.MeterID\")\r\n # Create a list of observation counts\r\n reading_counts = [row[0] for row in cursor.fetchall()]\r\n \r\n # Create histogram\r\n sample_size = len(reading_counts)\r\n trim_p = 0.001\r\n p_title = \"Number of Readings Histogram \\n\" \\\r\n \"(Summer 2011, sample: \" + str(sample_size) + \" meters, \" \\\r\n \"left trim:\" + str(trim_p * 100.0) + \"%)\"\r\n p_xlabel = \"Number of Readings During Timeframe\"\r\n p_ylabel = \"Number of Meters\"\r\n create_histogram(reading_counts, trim_p, 1, p_title, p_ylabel, p_xlabel, \r\n \"sm_reading_count_histogram_summer2011\", 0, \"left\")",
"def histogram(self):\n return self._hist",
"def htable(nbuckets):",
"def __init__(self):\n self.buckets = 1009\n self.table = [[] for _ in range(self.buckets)]",
"def scalar_bucket_counts(**kwargs):\n attributes_list = [\"ping_type\", \"os\", \"app_version\", \"app_build_id\", \"channel\"]\n fixed_attributes = [\"app_version\", \"channel\"]\n cubed_attributes = [x for x in attributes_list if x not in fixed_attributes]\n return dict(\n attributes=\",\".join(attributes_list),\n cubed_attributes=cubed_attributes,\n attribute_combinations=compute_datacube_groupings(cubed_attributes),\n scalar_metric_types=\"\"\"\n \"counter\",\n \"quantity\",\n \"labeled_counter\",\n \"timespan\"\n \"\"\",\n boolean_metric_types=\"\"\"\n \"boolean\"\n \"\"\",\n aggregate_attributes=\"\"\"\n metric,\n metric_type,\n key\n \"\"\",\n aggregate_attributes_type=\"\"\"\n metric STRING,\n metric_type STRING,\n key STRING\n \"\"\",\n **{\n # re-use variables from previous query\n key: clients_scalar_aggregates()[key]\n for key in [\"user_data_attributes\", \"user_data_type\"]\n },\n **kwargs,\n )",
"def guests_counter(window, n_guests):\r\n window.write_event_value('-COUNT-', n_guests)",
"def count(cls, client) :\n\t\ttry :\n\t\t\tobj = lbprofile()\n\t\t\toption_ = options()\n\t\t\toption_.count = True\n\t\t\tresponse = obj.get_resources(client, option_)\n\t\t\tif response :\n\t\t\t\treturn response[0].__dict__['___count']\n\t\t\treturn 0\n\t\texcept Exception as e :\n\t\t\traise e",
"def __init__(self):\n self.cnt = {}",
"def setup_hist(self):\n self.x_min = {}\n self.x_max = {}\n self.x_max_minus_min = {}\n self.dx = {}\n self.n_bins = {}\n\n self.histogram_edges = {}\n self.histogram_values = {}\n self.histogram_cdf = {}",
"def count(cls, client) :\n try :\n obj = nshttpprofile()\n option_ = options()\n option_.count = True\n response = obj.get_resources(client, option_)\n if response :\n return response[0].__dict__['___count']\n return 0\n except Exception as e :\n raise e",
"def __init__(self):\n self.buckets = collections.defaultdict(list)",
"def bincount(conn):\n c = conn.cursor()\n r = c.execute(\"\"\"\n select sum(length) as bin_length\n from event\n where bin_id not null\n group by bin_id\"\"\").fetchall()\n bindata = np.array(r, dtype=float)\n\n if len(bindata):\n binfo = (len(bindata), np.min(bindata), np.max(bindata), np.mean(bindata), np.std(bindata) )\n else:\n binfo = (0, 0, 0, 0, 0)\n\n return {\n \"bins\":{\n \"count\":float(binfo[0]),\n \"length\":{\n \"min\":float(binfo[1]),\n \"max\":float(binfo[2]),\n \"mean\":float(binfo[3]),\n \"std\":float(binfo[4]),\n }\n }\n }"
]
| [
"0.64437705",
"0.6230765",
"0.5934437",
"0.5867544",
"0.5862011",
"0.5840779",
"0.5840779",
"0.5733316",
"0.57241476",
"0.5720756",
"0.57149744",
"0.5714322",
"0.57127947",
"0.57107496",
"0.56810373",
"0.5674694",
"0.5669135",
"0.56563807",
"0.5625264",
"0.56244755",
"0.5613489",
"0.5579098",
"0.55766344",
"0.5564804",
"0.55630785",
"0.55537444",
"0.55517983",
"0.5545971",
"0.55438554",
"0.5541798"
]
| 0.6240644 | 1 |
Variables for scalar percentiles. | def scalar_percentiles(**kwargs):
attributes = ["ping_type", "os", "app_version", "app_build_id", "channel"]
fixed_attributes = ["app_version", "channel"]
cubed_attributes = [x for x in attributes if x not in fixed_attributes]
return dict(
# TODO: be consistent with naming of attributes (e.g. attributes_list)
attributes=attributes,
cubed_attributes=cubed_attributes,
attribute_combinations=compute_datacube_groupings(cubed_attributes),
aggregate_attributes="""
metric,
metric_type,
key
""",
**kwargs,
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def p(n):\n def p_(x):\n return np.percentile(x, n)\n\n p_.__name__ = \"p_%s\" % n\n return p_",
"def matlab_percentile(in_data, percentiles):\n data = np.sort(in_data)\n p_rank = 100.0 * (np.arange(data.size) + 0.5) / data.size\n perc = np.interp(percentiles, p_rank, data, left=data[0], right=data[-1])\n return perc",
"def test_lots_of_percentiles(self):\n data = np.array(\n [\n [[14.4, -46, 10.2], [-73.0, -46, -89.2], [-89.2, -93.25, -94.6]],\n [[19.2, 8.25, 10.6], [-19, 8.25, -67.6], [-67.6, -79.75, -83.8]],\n [[24.0, 8.75, 11.0], [8.33333, 8.75, -46.0], [-46.0, -66.25, -73.0]],\n [[28.8, 9.25, 11.4], [9.0, 9.25, -24.4], [-24.4, -52.75, -62.2]],\n [[33.6, 9.75, 11.8], [9.666667, 9.75, -2.8], [-2.8, -39.25, -51.4]],\n [\n [38.4, 10.333333, 16.8],\n [10.333333, 10.2, 8.5],\n [8.333333, -25.75, -40.6],\n ],\n [[43.2, 11.0, 26.4], [11.0, 10.6, 9.5], [9.0, -12.25, -29.8]],\n [\n [48.0, 11.666667, 36.0],\n [11.666667, 11.0, 10.5],\n [9.666667, 1.25, -19.0],\n ],\n [[52.8, 24, 45.6], [24, 11.4, 11.5], [10.5, 8.5, -8.2]],\n [[57.6, 48, 55.2], [48, 11.8, 36.0], [11.5, 9.5, 2.6]],\n ],\n dtype=np.float32,\n )\n\n percentiles = np.arange(5, 100, 10)\n result = Plugin()._probabilities_to_percentiles(self.cube, percentiles)\n self.assertArrayAlmostEqual(result.data, data, decimal=5)",
"def __init__(self, values):\n self.n=float(values.size)\n self.values=np.sort(values.flatten())\n self.quantiles=mquantiles(values,prob=np.arange(self.n)/self.n,alphap=1/3., betap=1./3,)",
"def default_quantile():\n return np.logspace(-5, 0, 100)",
"def five_num_summary(items):\n \n percentile = np.percentile(items, [0,25,50,75,100]) #A list of percentiles\n minimum = round(percentile[0], 2)\n quartile_1 = round(percentile[1], 2)\n median = round(percentile[2], 2)\n quartile_3 = round(percentile[3], 2)\n maximmu = round(percentile[4], 2)\n \n return {'max':maximmu,'median':median,'min':minimum,'q1':quartile_1,'q3':quartile_3}\n pass",
"def test_profiled_quantiles(self):\n\n # this data has 4 bins, range of 3\n # with equal bin size, each bin has the width of 0.75\n\n data = [\"1.0\", \"2.0\", \"3.0\", \"4.0\"]\n df = pd.Series(data)\n profiler = FloatColumn(df.name)\n profiler.update(df)\n profile = profiler.profile\n\n est_quartiles = profile['quantiles']\n est_Q1 = est_quartiles[249]\n est_Q2 = est_quartiles[499]\n est_Q3 = est_quartiles[749]\n\n data_to_num = [float(item) for item in data]\n exact_Q1 = np.percentile(data_to_num, 25)\n exact_Q2 = np.percentile(data_to_num, 50)\n exact_Q3 = np.percentile(data_to_num, 75)\n\n self.assertEqual(est_Q1, exact_Q1)\n self.assertEqual(est_Q2, exact_Q2)\n self.assertEqual(est_Q3, exact_Q3)",
"def get_statistics_percentile(self,table,field):\n dict = {}\n for x in xrange(1,11):\n dict[x] = db.session.execute(\"select statistics_viewCount as percentile from meta order by percentile asc limit 1 OFFSET 19346*\"+str(x)+\"/10-1\").first().percentile",
"def test_check_data_specifying_percentiles(self):\n expected_data = np.array(\n [self.percentile_25, self.percentile_50, self.percentile_75]\n )\n result = Plugin().process(self.cube, percentiles=[25, 50, 75])\n self.assertArrayAlmostEqual(result.data, expected_data, decimal=5)",
"def _percentile_factory(perc):\n def percentile_(values):\n return numpy.percentile(values, perc)\n percentile_.__name__ = f\"percentile_{perc}\"\n return percentile_",
"def get_percentile(self, q):\n return None",
"def get_percentile(self, q):\n return None",
"def set_up_percentiles_cube():\n\n test_data = np.full((5, 4, 4), -1, dtype=float)\n for i in range(5):\n test_data[i].fill(100*i + 200)\n\n percentiles = DimCoord(np.linspace(0, 100, 5), long_name=\"percentiles\",\n units=\"%\")\n grid_x = DimCoord(np.arange(4), standard_name=\"projection_x_coordinate\",\n units=\"km\")\n grid_y = DimCoord(np.arange(4), standard_name=\"projection_y_coordinate\",\n units=\"km\")\n test_cube = iris.cube.Cube(test_data, long_name=\"snow_level\", units=\"m\",\n dim_coords_and_dims=[(percentiles, 0),\n (grid_y, 1), (grid_x, 2)])\n return test_cube",
"def Quartiles(data):\n q = np.percentile(data, [25, 50, 75])\n\n return q[0], q[1], q[2]",
"def get_percentile(self, q):\n raise NotImplementedError(\"This is an abstract method and needs to be implemented in derived classes.\")",
"def test_percentile_coord(self):\n result = Plugin()._probabilities_to_percentiles(self.cube, self.percentiles)\n self.assertIsInstance(result.coord(\"percentile\"), DimCoord)\n self.assertArrayEqual(result.coord(\"percentile\").points, self.percentiles)\n self.assertEqual(result.coord(\"percentile\").units, unit.Unit(\"%\"))",
"def test_check_data_specifying_no_of_percentiles(self):\n expected_data = np.array(\n [self.percentile_25, self.percentile_50, self.percentile_75]\n )\n result = Plugin().process(self.cube, no_of_percentiles=3)\n self.assertArrayAlmostEqual(result.data, expected_data, decimal=5)",
"def _pctiles_from_sorted(self, values, pcs):\n if not all(0 <= pc <= 100 for pc in pcs):\n raise ValueError(\"pctiles must be between 0 and 100\")\n nvals = len(values)\n pctiles = []\n for pc in pcs:\n if pc == 0:\n new_pct = values[0]\n elif pc == 100:\n new_pct = values[nvals-1]\n else:\n n = pc * nvals / 100\n if n == int(n):\n new_pct = (values[int(n)-1] + values[int(n)]) / 2\n else:\n new_pct = values[floor(n)]\n pctiles.append(new_pct)\n return pctiles",
"def _calculate_percentile_cutoff(run_numbers):\n mcp_values = []\n andor_values = []\n for run_number in run_numbers:\n current_data_path = ''.join([DATA_PATH, 'run', str(run_number), 'allevts.h5'])\n f = h5py.File(current_data_path, 'r')\n current_phot = _get_photon_energy(f, run_number)\n current_mcp = np.array(f['Acqiris2']['acq'])\n current_mcp = current_mcp[(current_phot > 781) & (current_phot < 782)]\n mcp_values.extend(current_mcp)\n current_andor = np.array(f['Andor']['signal'])\n current_andor = current_andor[(current_phot > 781) & (current_phot < 782)]\n andor_values.extend(current_andor)\n #plt.figure()\n #plt.scatter(mcp_values, andor_values)\n mcp_percentile_cutoff = min([percentileofscore(andor_values, 4000), 99.9])\n return mcp_percentile_cutoff",
"def wilks(percentiles, alpha=0.10):\n percentiles = np.asarray(percentiles)\n pvals = list(percentiles.flat) # want in decimal\n pvals = sorted(2 * min(pval, 1 - pval) for pval in pvals)\n ptest = [alpha * i / len(pvals) for i in range(len(pvals))]\n ppick = max(pv for pv, pt in zip(pvals, ptest) if pv <= pt) / 2\n mask = (percentiles <= ppick) | (percentiles >= (1 - ppick))\n return percentiles[mask]",
"def setup_percentiles_pediatrics_new():\n df_cdc = pd.read_csv(Path(\"growthviz-data/ext/growthfile_cdc_ext.csv.gz\"))\n df_who = pd.read_csv(Path(\"growthviz-data/ext/growthfile_who.csv.gz\"))\n df = df_cdc.merge(df_who, on=[\"agedays\", \"sex\"], how=\"left\")\n\n # Add weighting columns to support smoothing between 2-4yo\n df = df.assign(ageyears=lambda r: (r[\"agedays\"] / 365.25))\n df[\"cdcweight\"] = 0\n df.loc[df[\"ageyears\"].between(2, 4, inclusive=\"left\"), \"cdcweight\"] = (\n df[\"ageyears\"] - 2\n )\n df[\"whoweight\"] = 0\n df.loc[df[\"ageyears\"].between(2, 4, inclusive=\"left\"), \"whoweight\"] = (\n 4 - df[\"ageyears\"]\n )\n\n PERCENTILES = [0.03, 0.05, 0.10, 0.25, 0.50, 0.75, 0.85, 0.90, 0.95, 0.97]\n\n # Compute percentiles for the full set of vars\n for s in [\"who\", \"cdc\"]:\n pvars = [\"ht\", \"wt\"]\n if s == \"cdc\":\n pvars.append(\"bmi\")\n for p in pvars:\n for pct in PERCENTILES:\n lvar = f\"{s}_{p}_l\"\n mvar = f\"{s}_{p}_m\"\n svar = f\"{s}_{p}_s\"\n tvar = f\"{s}_{p}_p{int(pct * 100)}\"\n df.loc[df[lvar] == 0, tvar] = df[mvar] * (df[svar] ** norm.ppf(pct))\n df.loc[df[lvar] != 0, tvar] = df[mvar] * (\n 1 + (df[lvar] * df[svar] * norm.ppf(pct))\n ) ** (1 / df[lvar])\n\n # Add smoothed percentiles\n for p in [\"ht\", \"wt\"]:\n for pct in PERCENTILES:\n cdc_var = f\"cdc_{p}_p{int(pct * 100)}\"\n who_var = f\"who_{p}_p{int(pct * 100)}\"\n s_var = f\"s_{p}_p{int(pct * 100)}\"\n df.loc[df[\"ageyears\"] <= 2, s_var] = df[who_var]\n df.loc[df[\"ageyears\"].between(2, 4, inclusive=\"neither\"), s_var] = (\n (df[who_var] * df[\"whoweight\"]) + (df[cdc_var] * df[\"cdcweight\"])\n ) / 2\n df.loc[df[\"ageyears\"] >= 4, s_var] = df[cdc_var]\n\n return df",
"def __init__(self, quantiles, mask=None):\n self.quantiles = torch.tensor(quantiles).float()\n self.n_quantiles = len(quantiles)\n self.mask = mask\n if self.mask:\n self.mask = np.float32(mask)",
"def transfo_quantile(xx):\n\n # nn = np.zeros(len(xx))\n # oo = np.argsort(xx)\n # nn[oo] = np.arange(len(xx)) / len(xx) + 1 / (2 * len(xx))\n # return nn\n\n return rankdata(xx) / len(xx) - 1 / (2 * len(xx))",
"def calc_percentile(self, p):\n bounds = self.range_bins\n r = []\n v = []\n for b in bounds:\n d = self._get_data_distance(0., b)\n if len(d) < 1:\n continue\n r.append(b)\n v.append(np.percentile(d, p * 100.)) # percentile value\n\n r = np.asarray(r)\n np.asarray(v)\n\n o = {'r': np.asarray(r), 'value': np.asarray(v)}\n if 'percentiles' not in self.statistic.keys():\n self.statistic.update({'percentiles': {}})\n\n self.statistic['percentiles'].update({p: o})",
"def percentile_plots(plot_var, i_case, plot_settings):\n column_titles = [\"5th percentile\", \"32nd percentile\", \"50th percentile\"]\n plot_var_suffix = [\"_perc5\", \"_perc32\", \"_perc50\"]\n\n # Read data from NetCDF source file.\n plot_items = []\n plot_data_max = 0\n for s in plot_var_suffix:\n d = nc.variables[plot_var+s][i_case, :, :]\n if plot_var[0] == \"p\":\n d *= 1e-3\n plot_items.append({'data': d})\n if np.amax(d) > plot_data_max:\n plot_data_max = np.amax(d)\n\n # Mapping plot properties and splitting up into individual and shared properties.\n plot_handling = plot_settings[\"plot_handling\"]\n contour_fill_levels = plot_handling[\"contour_fill_levels\"]\n contour_line_levels = plot_handling.get(\"contour_line_levels\", 3 * [contour_fill_levels])\n colorbar_ticks = plot_handling.get(\"colorbar_ticks\", contour_fill_levels)\n colorbar_label = plot_settings[\"color_label\"]\n\n # Write the contour handling to plot_items.\n for i, plot_item in enumerate(plot_items):\n plot_item['contour_line_levels'] = contour_line_levels[i]\n\n # Write the row dependent settings to row_items.\n row_item = {\n 'colorbar_ticks': colorbar_ticks,\n 'colorbar_label': colorbar_label,\n 'contour_fill_levels': contour_fill_levels,\n }\n if 'colorbar_tick_fmt' in plot_handling:\n row_item['colorbar_tick_fmt'] = plot_handling[\"colorbar_tick_fmt\"]\n if 'contour_line_label_fmt' in plot_handling:\n row_item['contour_line_label_fmt'] = plot_handling[\"contour_line_label_fmt\"]\n\n plot_panel_1x3(plot_items, column_titles, row_item)",
"def write_preflop_percentiles():\n suited_result = [\n [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], # not used\n [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],\n [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], # 2\n [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],\n [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],\n [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], # 5\n [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], # 6\n [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], # 7\n [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], # 8\n [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], # 9\n [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], # 10\n [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], # J\n [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], # Q\n [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], # K\n [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0] # A\n ]\n unsuited_result = [\n [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], # not used\n [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],\n [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], # 2\n [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],\n [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],\n [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], # 5\n [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], # 6\n [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], # 7\n [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], # 8\n [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], # 9\n [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], # 10\n [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], # J\n [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], # Q\n [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], # K\n [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0] # A\n ]\n hands = combinations(LookupTables.deck, 2)\n # This could be faster, but it doesn't run very much so whatever\n for hand in hands:\n sorted_rank = sorted([hand[0].rank, hand[1].rank])\n if hand[0].suit == hand[1].suit:\n preflop_order = LookupTables.Two.preflop_order_matrix[sorted_rank[1] - 2][sorted_rank[0] - 2]\n else:\n preflop_order = LookupTables.Two.preflop_order_matrix[sorted_rank[0] - 2][sorted_rank[1] - 2]\n\n # this is fraction of hands you beat\n preflop_percentile = 1 - sum(LookupTables.Two.preflop_count_matrix[0:preflop_order - 1]) / \\\n LookupTables.Two.preflop_count_sum\n\n if hand[0].suit == hand[1].suit:\n suited_result[hand[0].rank][hand[1].rank] = preflop_percentile\n else:\n unsuited_result[hand[0].rank][hand[1].rank] = preflop_percentile\n \n print \"suited_ranks_to_percentile = [\"\n for sublist in suited_result:\n print sublist\n print \"]\"\n \n print \"unsuited_ranks_to_percentile = [\"\n for sublist in unsuited_result:\n print sublist\n print \"]\"",
"def percentile(scores, student_score):\n scores = np.array(sorted(scores))\n num_scores = len(scores)\n return round(sum(scores <= student_score) / float(num_scores) * 100, 2)",
"def iqr(self, arr):\n a = np.asarray(arr)\n self.q1 = stats.scoreatpercentile(a, 25)\n self.q2 = stats.scoreatpercentile(a, 50)\n self.q3 = stats.scoreatpercentile(a, 75)",
"def test_check_data_specifying_single_percentile(self):\n expected_data = np.array(self.percentile_25)\n result = Plugin().process(self.cube, percentiles=[25])\n self.assertArrayAlmostEqual(result.data, expected_data, decimal=5)",
"def get_IQR(lst):\n return (float(np.percentile(lst, 75)) - float(np.percentile(lst, 25)))"
]
| [
"0.61870414",
"0.61465955",
"0.61424303",
"0.60795677",
"0.6067243",
"0.60474974",
"0.6034548",
"0.5990514",
"0.5940601",
"0.59211874",
"0.5808842",
"0.5808842",
"0.57736224",
"0.5736083",
"0.56959504",
"0.5695266",
"0.56910336",
"0.56861234",
"0.5676175",
"0.5672638",
"0.56653535",
"0.5653766",
"0.56420386",
"0.5638011",
"0.56251746",
"0.56105936",
"0.5567032",
"0.55643356",
"0.55638176",
"0.5555382"
]
| 0.6244538 | 0 |
Sort list by Name attribute | def sort_by_name(list_to_sort):
return sorted(
list_to_sort,
key=lambda k: k['Name'].lower()
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def sort_names(li, by_which):\n \n if by_which == 'first':\n li.sort(key = Name.first)\n elif by_which == 'last':\n li.sort(key = Name.last)",
"def sortByName(requestContext, seriesList):\n def compare(x,y):\n return cmp(x.name, y.name)\n\n seriesList.sort(compare)\n return seriesList",
"def sort_by_name(self, reverse=False):\n self.sort_by(\"name\", reverse=reverse)",
"def sortednameslist(nameslist):\n sortednames = sorted(nameslist, key=lambda x: x[1])\n return sortednames",
"def sort_by_attribute(list_carevaluation,attribute_name,desc=False):\n return sorted(list_carevaluation,key = lambda x: x.get_int_value(attribute_name),reverse=desc)",
"def sort_by_name(children_events_options_list: List[ChildrenEventOption], sort_type: SortType = SortType.ASCENDING):\n return _sort_by(children_events_options_list, sort_type, key=attrgetter('name'))",
"def sort(self):\r\n self.list.sort(key=lambda x: ''.join(x))",
"def sort_unit_lst(self, attrname, lst2sort):\n comp = []\n for unit in lst2sort:\n importance = self._importance_rank(unit, attrname)\n comp.append((unit, importance))\n comp = sorted(comp, key= lambda x: x[1], reverse=True)\n\n return [x[0] for x in comp]",
"def keyListSort(keyList):\n keyList.sort(key=lambda y: y.GetName().lower())",
"def _sort_by_name(bam_fn):",
"def sort_records_by_name(records):\n return sorted(records, key=lambda x: (x.last_name, x.first_name), reverse=True)",
"def sort(self):\n self.list.sort(key=lambda x: ''.join)",
"def sort_list(self,list_):\r\n list_.sort()",
"def sort(self):\n # sort the contents of the container alphabetically\n # this is done automatically whenever an item is added/removed from the Container\n self.items.sort(key=lambda item: item.name)",
"def select_sort_by_name_ascendant(self):\n msg = \"The new order of the items is by ascendant name\"\n with self.allure.step(msg):\n self.__product_sort.select_by_text('Name (A to Z)')\n self.allure.attach_image(self.driver, msg)",
"def sortby(self):\n ...",
"def human_sort(l):\n l.sort(key=alphanum_key)\n return l",
"def sort_books(self):\n self.foods = sorted(self.foods, key=attrgetter(\"name\"))",
"def sort_members_in_alphabetical_order(interface):\n sorted_interface = OrderedDict()\n for member_type in EXTATTRIBUTES_AND_MEMBER_TYPES:\n member_names = []\n sorted_member_names = OrderedDict()\n sorted_members = []\n for member in interface[member_type]:\n if sorted_members:\n pointer = 0\n for sorted_member in sorted_members:\n if member['Name'] < sorted_member['Name']:\n sorted_members.insert(pointer, member)\n break\n elif pointer >= (len(sorted_members)-1):\n sorted_members.append(member)\n else:\n pointer += 1\n else:\n sorted_members.append(member)\n sorted_interface[member_type] = sorted_members\n return sorted_interface",
"def get_sorted_attributes_list(attributes):\n listattr = []\n for a in attributes:\n listattr.append([a, attributes[a]])\n listattr = sorted(listattr, key=lambda x: x[1], reverse=True)\n return listattr",
"def sort(self, *args, **kargs):\n list.sort(self, *args, **kargs)\n self.emit('modified')",
"def _mySort(self, alist):\n return sorted(alist, key=lambda x: (x[0].isdigit(), x.lower()))",
"def _sort(self, groups):\n return sorted(groups, key=lambda group: (group.name.lower(), group.pubid))",
"def sorted(cls, tags: list, reverse: bool = False) -> list:\n return sorted(tags, key=lambda x: x.name.lower(), reverse=reverse)",
"def sortedFields(cls):\n return [\n i[0] for i in sorted(cls._nameToValue.items(), key=lambda item: item[1])\n ]",
"def __cmp__(self, other):\n\t\treturn cmp (self.name, other.name)",
"def sort(self, *args: Any, **kwargs: Any) -> BaseList:\n super().sort(*args, **kwargs)\n return self",
"def sorted_tags(self):\n return sorted(self.tags, key=lambda x: x.name)",
"def sort_list(self, key_):\n options = {\n 'index': 0,\n 'name' : 1,\n 'surname': 2,\n 'email': 3,\n 'phone': 4,\n }\n if key_ in options.keys():\n key_ = options.get(key_)\n\n return(sorted(self.contacts, key = lambda x: x[key_]))",
"def test05_students_list_sort_by_name(self):\n # get sorted list with function without None\n students_table = self.students_page.students_table()\n sorted_list_by_function = \\\n remove_none_from_list(sorted(students_table))\n print(sorted_list_by_function)\n\n # get sorted list with button without None\n sorted_list_by_button = \\\n remove_none_from_list(self.students_page.\n click_students_list_sort_by_name_button().\n students_table())\n print(sorted_list_by_button)\n self.assertEqual(sorted_list_by_button, sorted_list_by_function)"
]
| [
"0.75667953",
"0.73988223",
"0.7169916",
"0.7092047",
"0.70860684",
"0.68805975",
"0.6856633",
"0.68530154",
"0.6819512",
"0.6807089",
"0.6799249",
"0.6778685",
"0.6757527",
"0.66386515",
"0.65819705",
"0.6440485",
"0.642067",
"0.64073896",
"0.64035416",
"0.63455665",
"0.63340604",
"0.6319005",
"0.62929857",
"0.6268491",
"0.62570786",
"0.62509435",
"0.6224678",
"0.6223467",
"0.621757",
"0.62162596"
]
| 0.7980984 | 0 |
Get all instance reservations for a profile | def get_ec2_reservations(profile, running_filter):
try:
ec2_client = boto3.Session(profile_name=profile).client('ec2')
except ProfileNotFound:
print("Profile: %s not found" % profile, file=sys.stderr)
sys.exit(1)
filtered_instances = ec2_client.describe_instances(Filters=running_filter)
return filtered_instances['Reservations'] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_all_reservations(config):\n reservations = []\n region_list = regions(aws_access_key_id=config.keys.api,\n aws_secret_access_key=config.keys.secret)\n for region in region_list:\n _logger.info(\"Searching %s\", region)\n cnx = region.connect(aws_access_key_id=config.keys.api,\n aws_secret_access_key=config.keys.secret)\n for reservation in cnx.get_all_instances():\n _logger.info(\"Found %s %s\", reservation,\n [str(i.id) for i in reservation.instances])\n reservations.append(reservation)\n return reservations",
"def reservations(self):\n session_id = plone_session.get_session_id(self.context)\n return db.reservations_by_session(session_id).all()",
"def get_instances(self, region):\n try:\n conn = ec2.connect_to_region(region, **self.credentials)\n region_instances = []\n reservations = conn.get_all_reservations()\n for reservation in reservations:\n for instance in reservation.instances:\n region_instances.append(instance)\n except boto.exception.EC2ResponseError:\n return []\n return region_instances",
"def __get_reservations(self, instance_ids=None):\n if instance_ids:\n self.__validate_instance_id(instance_ids)\n euca_conn = self.__make_connection()\n try:\n return euca_conn.get_all_instances(instance_ids)\n except:\n euca.display_error_and_exit('%s' % ex)\n return False",
"def get_wharton_gsr_reservations():\n\n sessionid = get_wharton_sessionid()\n\n if not sessionid:\n return jsonify({'error': 'No Session ID provided.'})\n\n try:\n reservations = wharton.get_reservations(sessionid)\n save_wharton_sessionid()\n return jsonify({'reservations': reservations})\n except APIError as e:\n return jsonify({\"error\": str(e)}), 400",
"def instances(self):\r\n # It would be more efficient to do this with filters now\r\n # but not all services that implement EC2 API support filters.\r\n instances = []\r\n rs = self.connection.get_all_instances()\r\n for reservation in rs:\r\n uses_group = [g.name for g in reservation.groups if g.name == self.name]\r\n if uses_group:\r\n instances.extend(reservation.instances)\r\n return instances",
"def _quota_reservations(session, context, reservations):\n\n # Get the listed reservations\n return model_query(context, models.Reservation,\n read_deleted=\"no\",\n session=session).\\\n filter(models.Reservation.uuid.in_(reservations)).\\\n with_lockmode('update').\\\n all()",
"def reserved_compare(options):\n running_instances = defaultdict(dict)\n reserved_purchases = defaultdict(dict)\n regions = boto.ec2.regions()\n good_regions = [r for r in regions if r.name not in ['us-gov-west-1',\n 'cn-north-1']]\n for region in good_regions:\n if options.trace:\n print \" Scanning region {0}\".format(region.name)\n conn = region.connect()\n filters = {'instance-state-name': 'running'}\n zones = defaultdict(dict)\n\n if options.trace:\n print \" Fetching running instances\"\n reservations = conn.get_all_instances(filters=filters)\n for reservation in reservations:\n for inst in reservation.instances:\n if options.debug:\n print instance_string(inst, options, verbose=True)\n if inst.state != 'running':\n if options.debug:\n print \"Skip {0.id} state {0.state}\".format(inst)\n continue\n if inst.spot_instance_request_id:\n if options.debug:\n print \"Skip {0.id} has spot id {0.spot_instance_request_id}\".format(inst)\n continue\n if 'aws:autoscaling:groupName' in inst.tags:\n if options.debug:\n print \"Skip {0.id} is an autoscale instance\".format(inst)\n continue\n if inst.platform == 'Windows' or inst.platform == 'windows':\n if options.debug:\n print \"Skip {0.id} has platform {0.platform}\".format(inst)\n continue\n if inst.instance_type not in zones[inst.placement]:\n zones[inst.placement][inst.instance_type] = []\n zones[inst.placement][inst.instance_type].append(inst)\n\n if zones:\n running_instances[region.name] = zones\n\n purchased = defaultdict(dict)\n if options.trace:\n print \" Fetching reservations\"\n\n reserved = conn.get_all_reserved_instances()\n for r in reserved:\n if options.debug:\n print reservation_string(r, verbose=True)\n if r.state != 'active':\n continue\n if r.instance_tenancy != 'default':\n print 'WARNING: Non-default tenancy %s: %s' % (r.instance_tenancy, reservation_string(r))\n continue\n if r.instance_type not in purchased[r.availability_zone]:\n purchased[r.availability_zone][r.instance_type] = [r]\n else:\n purchased[r.availability_zone][r.instance_type].append(r)\n\n if purchased:\n reserved_purchases[region.name] = purchased\n\n return check_reservation_use(options, running_instances,\n reserved_purchases)",
"def reservation_data(self):\n reservations = []\n\n for reservation in self.reservations():\n resource = utils.get_resource_by_uuid(reservation.resource)\n\n if resource is None:\n log.warn('Invalid UUID %s' % str(reservation.resource))\n continue\n\n resource = resource.getObject()\n\n data = {}\n\n data['title'] = utils.get_resource_title(resource)\n\n timespans = []\n for start, end in reservation.timespans():\n timespans.append(u'◆ ' + utils.display_date(start, end))\n\n data['time'] = '<br />'.join(timespans)\n data['quota'] = utils.get_reservation_quota_statement(\n reservation.quota\n ) if reservation.quota > 1 else u''\n\n data['url'] = resource.absolute_url()\n data['remove-url'] = ''.join((\n resource.absolute_url(),\n '/your-reservations?remove=',\n reservation.token.hex\n ))\n reservations.append(data)\n\n return reservations",
"def get(profile):\n client = boto3client.get(\"iam\", profile)\n return client.list_instance_profiles()",
"def get_reservations():\n start_date = request.args.get('start')\n end_date = request.args.get('end')\n\n if start_date is not None and end_date is not None:\n start = parse_datetime(request.json['start'])\n end = parse_datetime(request.json['end'])\n if start is None or end is None:\n abort(400, 'cannot parse start or end date')\n\n reservations = Reservation.query.filter(\n Reservation.end >= start, Reservation.start <= end)\n else:\n reservations = Reservation.query.filter(\n or_(Reservation.start >= datetime.datetime.now(),\n Reservation.end >= datetime.datetime.now()))\n\n reservations = map(lambda x: x.as_dict(), reservations)\n\n return json.dumps(reservations)",
"def reservas(self):\n reservas = []\n for cliente in self.clientes:\n for reserva in cliente.reservas:\n reservas.append(reserva)\n return reservas",
"def _quota_reservations_query(context, reservations):\n return model_query(\n context, models.Reservation,\n read_deleted=\"no\",\n ).filter(\n models.Reservation.uuid.in_(reservations),\n ).with_for_update()",
"def fetch_all(profile):\n params = {}\n params[\"profile\"] = profile\n response = utils.do_request(instanceprofile, \"get\", params)\n data = utils.get_data(\"InstanceProfiles\", response)\n return data",
"def reservations(self):\r\n reservations = dict()\r\n for key in self._mounts.keys():\r\n reservations.update(self._mounts[key]['reservations'])\r\n return reservations",
"def get_all_vpc_instances ( ec2_conn, vpc ) :\n return ec2_conn.get_only_instances( filters = { \"vpc-id\" : vpc.id } )",
"def _instancelist(self):\n\n rv = []\n self.iname = {}\n for resv in self.conn.get_all_reservations():\n for inst in resv.instances:\n if inst.state != 'terminated':\n name = inst.tags.get('Name',None)\n rv.append([inst.id,inst.state])\n if name is not None:\n rv.append([name,inst.state])\n else:\n rv.append([inst.id+'-needsName',inst.state])\n self.iname[name] = inst.id\n self.iname[inst.id] = inst.id\n return rv",
"def get_ec2_instances(client):\n reservations = client.describe_instances().get(\"Reservations\")\n instances = list(map(lambda x: x.get(\"Instances\"), reservations))\n instances = list(itertools.chain.from_iterable(instances))\n return list(map(lambda x: {\n 'name': next((t['Value'] for t in x.get('Tags', []) if t.get('Key') == 'Name'), 'Unknown'),\n 'id': x.get('InstanceId'),\n 'state': x.get('State'),\n }, instances))",
"def reservations_by_recurring_allocation(self):\n\n allocation_id = self.recurring_allocation_id\n allocation = Session.query(Allocation).get(allocation_id)\n if not allocation:\n return None\n\n reservation_tokens = [each.reservation_token for each\n in allocation.reserved_slots]\n return Session.query(Reservation).filter(\n Reservation.token.in_(reservation_tokens)\n )",
"def _get_running_ec2_instances(theargs):\n mapstr = ''\n if theargs.profile is not None:\n boto3.setup_default_session(profile_name=theargs.profile)\n ec2 = boto3.client('ec2', region_name='us-west-2')\n\n response = ec2.describe_regions()\n for region in response['Regions']:\n rname = region['RegionName']\n sys.stdout.write('Running ec2 query in region: ' + rname + '\\n')\n ec2 = boto3.client('ec2', region_name=rname)\n mapstr += 'Region: ' + rname + '\\n'\n respy = ec2.describe_instances()\n for reso in respy['Reservations']:\n for entry in reso['Instances']:\n namey = ''\n try:\n for keyval in entry['Tags']:\n if keyval['Key'] == 'Name':\n namey = keyval['Value']\n break\n except KeyError:\n pass\n\n mapstr += ('\\t\\t' + entry['PublicDnsName'] + '\\n' +\n '\\t\\tLaunch Date: ' + str(entry['LaunchTime']) +\n '\\n' + \n '\\t\\tId: ' + entry['InstanceId'] + '\\n' +\n '\\t\\tType: ' + entry['InstanceType'] + '\\n' +\n '\\t\\tName: ' + namey + '\\n' +\n '\\t\\tState: ' + entry['State']['Name'] + '\\n\\n')\n sys.stdout.write('\\nResults:\\n\\n')\n return mapstr",
"def __get_multi_instances(self, reservations, instance_ids=None, policies=None):\n check_instance_ids = False\n if ( instance_ids and len(instance_ids) > 0 ):\n check_instance_ids = True\n instances = [] \n for reservation in reservations:\n if check_instance_ids:\n for instance in reservation.instances:\n if instance.id in instance_ids:\n instances.append(instance)\n elif policies:\n for instance in reservation.instances:\n if 'typevm' in policies and instance.instance_type == policies['typevm']:\n instances.append(instance) \n elif policies.get('level')==1:\n if self.__compare_types_instances(policies, instance.instance_type.encode(\"latin-1\")):\n instances.append(instance)\n elif policies.get('level') == 0:\n if self.__is_adaptive_instance(self.__get_metrics_adapted(policies), instance.instance_type.encode(\"latin-1\")):\n instances.append(instance)\n else:\n instances=[]\n else:\n instances += reservation.instances\n return instances, len(instances)",
"def run(self):\n ilist = []\n key_filter = filters[self.args['filter_group']]\n for item in self.client.describe_instances()['Reservations']:\n for instance in item['Instances']:\n idict = {}\n for tag in instance['Tags']:\n if not any(t['Key'] == 'Name' for t in instance['Tags']):\n tag['Value'] = 'Unnamed'\n idict['Name'] = tag['Value']\n if tag['Key'] == 'Name':\n if tag['Value'] == \"\":\n tag['Value'] = 'Unnamed'\n idict['Name'] = tag['Value']\n for key in key_filter:\n try:\n if key in ['AvailabilityZone','Tenancy']:\n idict[key] = instance['Placement'][key]\n elif key == 'SecurityGroups':\n sg_list = []\n for sg in instance[key]:\n sg_list.append(sg['GroupId'])\n if self.args['output'] == 'csv':\n sg_string = \" \\n\"\n idict[key] = sg_string.join(sg_list)\n else:\n idict[key] = ','.join(sg_list)\n elif key == 'BlockDeviceMappings':\n devices = []\n for dev in instance[key]:\n devices.append(dev['DeviceName'])\n if self.args['output'] == 'csv':\n dev_string = \" \\n\"\n idict[key] = dev_string.join(devices)\n else:\n idict[key] = ','.join(devices)\n elif key == 'State':\n idict[key] = instance[key]['Name']\n else:\n if instance[key]:\n idict[key] = instance[key]\n except Exception as e:\n idict[key] = 'N/A'\n ilist.append(idict)\n self.template(self.sortList(ilist))",
"def schedule():\n for profile in schedules['profiles']:\n instances = _get_instances(profile['instance_tags'], profile['region'])\n start_stop_instances(instances, profile['schedule'])\n reregister_elb_instances(profile)",
"def get_all_instances(self, instance_ids=None, filters=None):\r\n params = {}\r\n if instance_ids:\r\n self.build_list_params(params, instance_ids, 'InstanceId')\r\n if filters:\r\n if 'group-id' in filters:\r\n warnings.warn(\"The group-id filter now requires a security \"\r\n \"group identifier (sg-*) instead of a group \"\r\n \"name. To filter by group name use the \"\r\n \"'group-name' filter instead.\", UserWarning)\r\n self.build_filter_params(params, filters)\r\n return self.get_list('DescribeInstances', params,\r\n [('item', Reservation)], verb='POST')",
"def get_instance_list():\n return parse_list_output(Popen('nova list --all-tenants'.split(),\n stdout=STDOUT, stderr=STDERR).communicate()[0])",
"def show_reservations(self, user_id = None):\n\n # create an instance of the model\n reserv_model = Reservation(self.settings)\n\n # query the model\n results = reserv_model.find_reservations(user_id)\n\n # return the result in a json-ifiable form\n json_results = []\n for reservation in results:\n json_results.append(reservation.to_json())\n\n # return\n print json_results\n return json_results",
"def aws_get_instances_by_id(region, instance_id, raw=True):\n client = boto3.session.Session().client('ec2', region)\n try:\n matching_reservations = client.describe_instances(InstanceIds=[instance_id]).get('Reservations', [])\n except ClientError as exc:\n if exc.response.get('Error', {}).get('Code') != 'InvalidInstanceID.NotFound':\n raise\n return []\n instances = []\n [[instances.append(_aws_instance_from_dict(region, instance, raw)) # pylint: disable=expression-not-assigned\n for instance in reservation.get('Instances')] for reservation in matching_reservations if reservation]\n return instances",
"def listInstancesRegionZone(region,zone):\n\tprint \"-\"*80\n\tprint \"# Region :\",region,\" Zone\", zone\t\n\tprint \"-\"*80\n\tinstances = getInstancesRegionZone(region,zone)\n\tif instances:\n\t\tfor instance in instances:\n\t\t\tprint \"[\",instance.ami_launch_index,\"]\",instance.ip_address,\" (\",instance.private_ip_address,\") \",instance.instance_type,\" key=\",instance.key_name",
"def list(self, arguments):\n instance_name = arguments['<instance>']\n instance_name = self.activate(instance_name)\n\n vmrun = VMrun(self.vmx, user=self.user, password=self.password)\n print(vmrun.listSnapshots())",
"def _generate_rds_instances_and_sg(resource, session):\n for db_instance in resource.describe_db_instances()[\"DBInstances\"]:\n for security_group in db_instance[\"VpcSecurityGroups\"]:\n yield db_instance, security_group, _get_sg_name(security_group[\"VpcSecurityGroupId\"], session)"
]
| [
"0.70828646",
"0.69551444",
"0.63018495",
"0.62302214",
"0.6171069",
"0.61087954",
"0.6061365",
"0.6009173",
"0.58852756",
"0.5856838",
"0.5851381",
"0.5816237",
"0.5808627",
"0.57474256",
"0.57241356",
"0.57098675",
"0.56716835",
"0.5623796",
"0.5615587",
"0.56060773",
"0.55310875",
"0.5507804",
"0.5502579",
"0.54516816",
"0.5362737",
"0.53578407",
"0.5294057",
"0.5281246",
"0.5231615",
"0.5216862"
]
| 0.7721795 | 0 |
Test that getting tags is successful | def test_get_tags_successful(self):
Tag.objects.create(user=self.user, name='Vegan')
Tag.objects.create(user=self.user, name='Dessert')
res = self.client.get(TAGS_URL)
tags = Tag.objects.all().order_by('-name')
serializer = TagSerializer(tags, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_list(self):\n response = self.app.get(self.url('tags.list'))\n # Test response...",
"def test_show_tags(self):\r\n\r\n with app.test_client() as client:\r\n resp = client.get(\"/tags\")\r\n html = resp.get_data(as_text=True)\r\n tags = Tag.query.all()\r\n\r\n self.assertEqual(resp.status_code, 200)\r\n self.assertIn(\"Tag List\", html)\r\n self.assertIn(tags[0].name, html)",
"def test_retrieve_tags(self):\n\n Tag.objects.create(user=self.user, name='Vegan')\n Tag.objects.create(user=self.user, name='Dessert')\n\n res = self.client.get(TAGS_URL)\n\n tags = Tag.objects.all().order_by('-name')\n serializer = TagSerializer(tags, many=True)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(res.data, serializer.data)",
"def test_get_tag(self):\n self.seed_static_data()\n params = {'id': 1, 'event_id': 1}\n response = self.app.get('/api/v1/tag', headers=self.user1_headers, data=params)\n data = json.loads(response.data)\n self.assertEqual(data['id'], 1)\n self.assertEqual(data['event_id'], 1)\n self.assertEqual(data['tag_type'], 'RESPONSE')\n self.assertDictEqual(data['name'], {\n 'en': 'English Tag 1 Event 1',\n 'fr': 'French Tag 1 Event 1'\n })\n self.assertDictEqual(data['description'], {\n 'en': 'English Tag 1 Event 1 Description',\n 'fr': 'French Tag 1 Event 1 Description'\n })",
"def test_tag_search(self):\n url = reverse_lazy('tag-list') + '?search={}'.format('testtag')\n response = self.client.get(url)\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n results = response.data['results']\n self.assertEqual(len(results), 3)\n\n for tag in ('testtag1', 'testtag3'):\n result = list(filter(lambda it: it['title'] == tag, results))\n self.assertEqual(len(result), 1)\n result = result[0]\n\n self.assertEqual(len(result['posts']), 3)",
"def test_badge_should_have_tags(self):\n\n badge = self.get_sample_badge()\n # It's a string, even though it is used as a URL\n self.assertIsInstance(badge.tags, list)",
"def test_get_all_tags(self):\n print(self.session.tags)\n self.assertEqual(\n len(self.session.tags),\n (3 * len(self.session.wp_post_objects)) #3 tags added by default\n )",
"def testTagr(self):\n\t\ttags = (\n\t\t\t \t('bob', 'bo<strong>b</strong>'),\n\t\t\t \t('Jack', 'Jac<strong>k</strong>'),\n\t\t\t \t)\n\t\t\n\t\tf = Flickr()\n\t\tfor tag, tagr in tags:\n\t\t\tresult = f.make_tagr(tag)\n\t\t\tself.assertEqual(result, tagr)",
"def test_todos_by_tag(self):",
"def test_tags(question):\n assert \"tags\" in question[\"instance\"]\n tags = set(question[\"instance\"][\"tags\"])\n # there should be at least one tag\n assert len(tags) >= 1\n # each tags should be in VALID_TAGS\n assert len(tags - VALID_TAGS) == 0\n # there should be exactly one category-defining tag\n assert len(tags.intersection(CATEGORY_TAGS)) == 1",
"def test_show_add_tag(self):\r\n\r\n with app.test_client() as client:\r\n resp = client.get(\"/tags/new\") \r\n html = resp.get_data(as_text=True) \r\n\r\n self.assertEqual(resp.status_code, 200)\r\n self.assertIn(\"Create Tag\", html)",
"def test_tags_limited_to_user_tags(self):\n\n user2 = create_user(\n fname='Test2',\n lname='User2',\n email='[email protected]',\n password='testpass2'\n )\n\n Tag.objects.create(user=user2, name='Vegan')\n tag = Tag.objects.create(user=self.user, name='Dessert')\n\n res = self.client.get(TAGS_URL)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(len(res.data), 1)\n self.assertEqual(res.data[0]['name'], tag.name)",
"def test_show_tag_details(self):\r\n \r\n with app.test_client() as client:\r\n resp = client.get(f\"/tags/{self.tag.id}\")\r\n html = resp.get_data(as_text=True)\r\n\r\n self.assertEqual(resp.status_code, 200)\r\n self.assertIn(\"Marvel\", html)\r\n self.assertIn(\"Avengers\", html)",
"def test_multiple_tags(self):\n self.request.log(\"Hello World\", tags=[\"tag1\", \"tag2\"])\n self.request.end()\n entry = self.get_entry()\n assert len(entry['tags']) == 2\n assert entry['tags'][0] == \"tag1\"\n assert entry['tags'][1] == \"tag2\"",
"def test_create_tags_successfull(self):\n payload = {'name': 'Test Tag'}\n self.client.post(TAG_URL, payload)\n exists = Tag.objects.filter(user=self.user, name = payload['name']).exists()\n self.assertTrue(exists)",
"def test_tag_list(self):\n self.seed_static_data()\n params = {\n 'event_id': 1,\n 'language': 'en'\n }\n\n response = self.app.get('/api/v1/tags', headers=self.user1_headers, data=params)\n data = json.loads(response.data)\n\n self.assertEqual(len(data), 2)\n self.assertEqual(data[0]['id'], 1)\n self.assertEqual(data[0]['event_id'], 1)\n self.assertEqual(data[0]['tag_type'], 'RESPONSE')\n self.assertEqual(data[0]['name'], 'English Tag 1 Event 1')\n self.assertEqual(data[0]['description'], 'English Tag 1 Event 1 Description')\n self.assertEqual(data[1]['id'], 2)\n self.assertEqual(data[1]['event_id'], 1)\n self.assertEqual(data[1]['tag_type'], 'RESPONSE')\n self.assertEqual(data[1]['name'], 'English Tag 2 Event 1')\n self.assertEqual(data[1]['description'], 'English Tag 2 Event 1 Description')\n\n params = {\n 'event_id': 1,\n 'language': 'fr'\n }\n\n response = self.app.get('/api/v1/tags', headers=self.user1_headers, data=params)\n data = json.loads(response.data)\n\n self.assertEqual(len(data), 2)\n self.assertEqual(data[0]['id'], 1)\n self.assertEqual(data[0]['event_id'], 1)\n self.assertEqual(data[0]['tag_type'], 'RESPONSE')\n self.assertEqual(data[0]['name'], 'French Tag 1 Event 1')\n self.assertEqual(data[0]['description'], 'French Tag 1 Event 1 Description')\n self.assertEqual(data[1]['id'], 2)\n self.assertEqual(data[1]['event_id'], 1)\n self.assertEqual(data[1]['tag_type'], 'RESPONSE')\n self.assertEqual(data[1]['name'], 'French Tag 2 Event 1')\n self.assertEqual(data[1]['description'], 'French Tag 2 Event 1 Description')",
"def test_tags_limited_to_user(self):\n user2 = get_user_model().objects.create_user(\n '[email protected]',\n '123456'\n )\n Tag.objects.create(user=user2, name='Fruity')\n tag = Tag.objects.create(user=self.user, name='Comfort Food')\n\n res = self.client.get(TAGS_URL)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(len(res.data), 1)\n self.assertEqual(res.data[0]['name'], tag.name)",
"def test_tag_count_matches_tagged_items(self):\n\n po = self.catalog.load_pageobject('TagsBrowsePage')\n po.goto_page()\n po.form.footer.display_limit('All')\n tags_browse_url = po.current_url()\n\n po2 = self.catalog.load_pageobject('TagsViewPage')\n\n for row in po.search_result_rows():\n tag_info = row.value()\n\n self.browser.proxy_client.new_har(\"page\")\n row.goto_tag()\n har_entry = self.browser.page_load_details()\n\n tags_view_url = po2.current_url()\n\n # check for errors loading the page\n assert har_entry is not None, \\\n \"failed to load the uri. http archive unavailable.\"\n assert self.browser.error_loading_page(har_entry) is False, \\\n \"clicking on the tag '%s' on '%s' \" \\\n % (tag_info['name'],tags_browse_url) + \\\n \"returned an error response code on the page '%s'. \" \\\n % (tags_view_url) + \\\n \"http archive follows:\\n%s\" \\\n % (pprint.pformat(har_entry))\n\n # get the total number of resources\n (junk,junk,total) = po2.get_pagination_counts()\n\n # compare the total number of resources\n # with the count provided by the tag\n assert tag_info['count'] == total, \\\n \"The number of resources listed for the\" \\\n + \" tag '%s' (%s) on %s does not match the total\" \\\n % (tag_info['name'],tag_info['count'],tags_browse_url) \\\n + \" number of resources listed on %s (%s)\" \\\n % (tags_view_url,total)\n\n # go back to the Tags page\n self.browser._browser.back()",
"def test_single_tag(self):\n self.request.log(\"Hello World\", tags=[\"tag1\"])\n self.request.end()\n entry = self.get_entry()\n assert len(entry['tags']) == 1\n assert entry['tags'][0] == \"tag1\"",
"def test_retrieve_tags_assigned_unique(self):\n tag1 = Tag.objects.create(user = self.user,name='Breakfast')\n tag2 = Tag.objects.create(user=self.user,name='Lunch')\n\n recipe1 = Recipe.objects.create(user=self.user,title='Goose Liver on toast',price=5.00,time_minutes=15)\n recipe2 = Recipe.objects.create(user = self.user,title='Egg Benedict',price=5.00,time_minutes=15)\n recipe1.tags.add(tag1)\n recipe2.tags.add(tag1)\n\n res = self.client.get(TAGS_URL,{'assigned_only':1})\n self.assertEqual(len(res.data),1)",
"def test_get_tags(chikin):\n assert len(list(chikin.sections)) == 2\n assert str(chikin.section) == repr(chikin.section) == chikin.section.string == 'Chikin Tales'",
"def test_tags_limited_to_user(self):\n user2 = create_user('[email protected]', 'OtherPassword')\n Tag.objects.create(user=user2, name='Fruity')\n tag = Tag.objects.create(user=self.user, name='Home Food')\n\n res = self.client.get(TAGS_URL)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(len(res.data), 1)\n self.assertEqual(res.data[0]['name'], tag.name)",
"def test_tags_limited_to_user(self):\n user2 = get_user_model().objects.create_user(\n 'Other User', '[email protected]', 'otherpassword')\n Tag.objects.create(user=user2, name='Nonveg')\n tag = Tag.objects.create(user=self.user, name='Fruity')\n\n res = self.client.get(TAGS_URL)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(len(res.data), 1)\n self.assertEqual(res.data[0]['name'], tag.name)",
"def test_tags_tag_search_valid_tag(self,tag_with_items):\n\n tag = tag_with_items\n\n assert tag is not None, 'Could not find a tag with items'\n\n po = self.catalog.load_pageobject('TagsPage')\n po.goto_page()\n\n # perform the search\n self.browser.proxy_client.new_har(\"page\")\n po.search_for_tags(tag)\n har_entry = self.browser.page_load_details()\n\n # check for errors\n assert har_entry is not None, \\\n \"failed to load the uri. http archive unavailable.\"\n assert self.browser.error_loading_page(har_entry) is False, \\\n \"performing a tag search using an the tag\" \\\n + \"'%s' returned an error response code on\" % (tag) \\\n + \"the page %s http archive follows:\\n%s\" \\\n % (po.current_url(),pprint.pformat(har_entry))\n\n # check for valid pagination total on tags view page\n po = self.catalog.load_pageobject('TagsViewPage')\n (start,end,total) = po.get_pagination_counts()\n\n assert total >= 0, \\\n \"performing a tag search using the tag\" \\\n + \"'%s' took user to page (%s) with invalid pagination\"\\\n % (tag,po.current_url())",
"def test_networking_project_network_tag_get(self):\n pass",
"def test_tags_limited_to_user(self):\n user2 = get_user_model().objects.create_user(\n '[email protected]',\n 'testpass'\n )\n Tag.objects.create(user=user2, name='Fruity')\n tag = Tag.objects.create(user=self.user, name='Comfort Food')\n\n res = self.client.get(TAGS_URL)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(len(res.data), 1)\n self.assertEqual(res.data[0]['name'], tag.name)",
"def test_create_recipe_with_tags(self):\n tag1 = sample_tag(user=self.user, name = 'Vegan')\n tag2 = sample_tag(user=self.user, name = 'Dessert')\n payload = {\n 'title': 'Avocado lime Cheesecake',\n 'tags': [tag1.id, tag2.id], # this is how tags are assigned\n 'time_minutes': 20,\n 'price': 20.00,\n }\n res = self.client.post(RECIPE_URL, payload)\n\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n recipe = Recipe.objects.get(id=res.data['id'])\n tags = recipe.tags.all()\n\n self.assertEqual(tags.count(), 2)\n self.assertIn(tag1, tags)\n self.assertIn(tag2, tags)",
"def test_search_tags(self):\n page = self.page1\n page.search_tags = \"Chutes, Ladders\"\n page.save_revision().publish()\n taglist = page.clean_search_tags\n for name in [\"Chutes\", \"Ladders\"]:\n self.assertIn(name, taglist)",
"def test_create_recipe_with_tags(self):\n tag1 = sample_tag(user=self.user,name='vegan')\n tag2 = sample_tag(user=self.user, name='dessert')\n payload = {\n 'title':'cheesecake',\n 'tag':[tag1.id,tag2.id],\n 'time_minutes':60,\n 'price':10.00,\n }\n res = self.client.post(RECIPE_URL,payload)\n self.assertEqual(res.status_code,status.HTTP_201_CREATED)\n recipe = Recipe.objects.get(id=res.data['id'])\n tags = recipe.tag.all()\n self.assertEqual(len(tags),2)\n self.assertIn(tag1,tags)\n self.assertIn(tag2,tags)",
"def test_retrive_tags_assigned_unique(self):\n tag = Tag.objects.create(user=self.user, name=\"Breakfast\")\n Tag.objects.create(user=self.user, name=\"Lunch\")\n recipe1 = Recipe.objects.create(\n title=\"Pancake\",\n making_time_minutes=10,\n price=5.00,\n user=self.user\n )\n recipe2 = Recipe.objects.create(\n title=\"Italian Fried Egg\",\n making_time_minutes=5,\n price=10.00,\n user=self.user\n )\n recipe1.tags.add(tag)\n recipe2.tags.add(tag)\n\n res = self.client.get(TAGS_URL, {'assigned_only': 1})\n self.assertEqual(len(res.data), 1)"
]
| [
"0.783578",
"0.7753292",
"0.771596",
"0.7457215",
"0.74495363",
"0.73183525",
"0.7141053",
"0.7133542",
"0.71330744",
"0.7128664",
"0.7118904",
"0.71148205",
"0.70716166",
"0.70590526",
"0.70323956",
"0.70219934",
"0.701507",
"0.7002467",
"0.69986093",
"0.69817287",
"0.6981429",
"0.6956649",
"0.6951933",
"0.6928798",
"0.69287056",
"0.6904701",
"0.6904491",
"0.6893461",
"0.6885095",
"0.6870231"
]
| 0.7995485 | 0 |
Test that adding a tag is successful | def test_add_tag_successful(self):
payload = {'name': 'test tag'}
self.client.post(TAGS_URL, payload)
# self.assertEqual(res.status_code, status.HTTP_201_CREATED)
exists = Tag.objects.filter(
user=self.user,
name=payload['name']
).exists()
self.assertTrue(exists) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_add_tag(self):\r\n\r\n with app.test_client() as client:\r\n d = {\"name\": \"hero\"}\r\n resp = client.post(\"/tags/new\", data=d, follow_redirects=True)\r\n html = resp.get_data(as_text=True) \r\n\r\n self.assertEqual(resp.status_code, 200)\r\n self.assertIn(\"hero\", html)",
"def test_add_tagitem(self):\n record = self.good_record()\n record['tag'] = self.tag\n # Create a fake record in mongo\n id = self.images.insert(record)\n\n status = self.m.add_tag(id, self.system, 'testtag')\n self.assertTrue(status)\n rec = self.images.find_one({'_id': id})\n self.assertIsNotNone(rec)\n self.assertIn(self.tag, rec['tag'])\n self.assertIn('testtag', rec['tag'])",
"def test_show_add_tag(self):\r\n\r\n with app.test_client() as client:\r\n resp = client.get(\"/tags/new\") \r\n html = resp.get_data(as_text=True) \r\n\r\n self.assertEqual(resp.status_code, 200)\r\n self.assertIn(\"Create Tag\", html)",
"def test_add_tag_invalid(self):\r\n\r\n with app.test_client() as client: \r\n d = {\"name\": \"\"}\r\n resp = client.post(\"/tags/new\", data=d, follow_redirects=True)\r\n html = resp.get_data(as_text=True)\r\n\r\n self.assertEqual(resp.status_code, 200)\r\n self.assertIn(\"Please enter tag name\", html)",
"def test_add_tag(self):\n fc = self.read_feature(region='Adriatic_Sea')\n\n fc.tag(tags=['tag1', 'tag2', 'Mediterranean_Basin'])\n assert (fc.features[0]['properties']['tags'] ==\n 'Adriatic_Sea;Mediterranean_Basin;tag1;tag2')\n\n self.check_feature(fc.features[0])",
"def test_add_tag_invalid(self):\n payload = {'name': ''}\n res = self.client.post(TAGS_URL, payload)\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)",
"def test_create_tags_successfull(self):\n payload = {'name': 'Test Tag'}\n self.client.post(TAG_URL, payload)\n exists = Tag.objects.filter(user=self.user, name = payload['name']).exists()\n self.assertTrue(exists)",
"def test_create_tag(self):\n\n tag_payload = {'name': 'Test Tag'}\n self.client.post(URL_TAGS, tag_payload)\n\n is_tag_created = Tag.objects.filter(\n user=self.user,\n name=tag_payload['name']\n ).exists()\n\n self.assertTrue(is_tag_created)",
"def test_create_tag_successful(self):\n payload = {'name': 'Test tag'}\n self.client.post(TAGS_URL, payload)\n\n exists = Tag.objects.filter(user=self.user, name=payload['name']).exists()\n\n self.assertTrue(exists)",
"def test_create_tag_successful(self):\n payload = {'name': 'Test tag'}\n self.client.post(TAGS_URL, payload)\n exists = Tag.objects.filter(\n user=self.user,\n name=payload['name'],\n ).exists()\n\n self.assertTrue(exists)",
"def test_add_text(self):\n text = 'test'\n info = self.api.add_text(text, tags=['asd'])\n self.assertEqual(info['value'], text)\n tags = [t['name'] for t in info['tags']]\n self.assertEqual(tags, ['asd'])",
"def test_create_tag_succesful(self):\n payload = {'name': 'Test tag'}\n res = self.client.post(TAGS_URL, payload)\n\n exists = Tag.objects.filter(\n user=self.user,\n name=payload['name']\n ).exists()\n self.assertTrue(exists)\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)",
"def test_create_tag_is_successful(self):\n\n payload = {\n 'name': 'Test Tag'\n }\n\n self.client.post(TAGS_URL, payload)\n\n exists = Tag.objects.filter(\n user=self.user,\n name=payload['name']\n ).exists()\n\n self.assertTrue(exists)",
"def test_tags_on_article(self):\n self.article.tags.add(self.tag1, self.tag2)\n self.assertEqual('Django', str(self.article.tags.all()[0]))",
"def test_create_tag_successful(self):\n tag_data = {'name': 'Snack'}\n res = self.client.post(TAGS_URL, data=tag_data)\n\n exists = Tag.objects.filter(\n user=self.user,\n name=tag_data['name']\n ).exists()\n\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n self.assertTrue(exists)",
"def testAddTag(self):\n project = self.session.create_project()\n\n project.add_tag(\"test\")\n self.assertEqual(project.tags, [\"test\"], \"Can add a tag to a project.\")\n\n json_str = project.to_json()\n doc = json.loads(json_str)\n\n self.assertEqual(doc['meta']['tags'], [\"test\"],\n \"JSON representation had correct tags after add_tag().\")\n\n # Try adding the same tag yet again, shouldn't get a duplicate\n with self.assertRaises(ValueError):\n project.add_tag(\"test\")\n\n json_str = project.to_json()\n doc2 = json.loads(json_str)\n\n self.assertEqual(doc2['meta']['tags'], [\"test\"],\n \"JSON document did not end up with duplicate tags.\")",
"def test_add_remove_tag(self):\n record = self.good_record()\n # Create a fake record in mongo\n id = self.images.insert(record)\n\n self.assertIsNotNone(id)\n before = self.images.find_one({'_id': id})\n self.assertIsNotNone(before)\n # Add a tag a make sure it worked\n status = self.m.add_tag(id, self.system, 'testtag')\n self.assertTrue(status)\n after = self.images.find_one({'_id': id})\n self.assertIsNotNone(after)\n self.assertIn('testtag', after['tag'])\n self.assertIn(self.tag, after['tag'])\n # Remove a tag and make sure it worked\n status = self.m.remove_tag(self.system, 'testtag')\n self.assertTrue(status)\n after = self.images.find_one({'_id': id})\n self.assertIsNotNone(after)\n self.assertNotIn('testtag', after['tag'])",
"def test_add_remove_withtag(self):\n record = self.good_record()\n # Create a fake record in mongo\n id = self.images.insert(record)\n\n session = self.m.new_session(self.auth, self.system)\n i = self.query.copy()\n status = self.m.add_tag(id, self.system, 'testtag')\n self.assertTrue(status)\n rec = self.m.lookup(session, i)\n self.assertIsNotNone(rec)\n self.assertIn(self.tag, rec['tag'])\n self.assertIn('testtag', rec['tag'])",
"def test_create_tag_invalid(self):\n payload = {'name':''}\n res = self.client.post(TAG_URL,payload)\n self.assertEqual(res.status_code,status.HTTP_400_BAD_REQUEST)",
"def test_create_recipe_with_tag(self):\n tag1 = sample_tag(user=self.user, name = 'Vegen')\n tag2 = sample_tag(user=self.user, name='Dessert')\n\n payload = {\n 'title': 'Avocado lime cheescake',\n 'tags' : [tag1.id, tag2.id],\n 'time_minuts': 50,\n 'price': 400\n }\n\n res = self.client.post(RECIPE_URL, payload)\n\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n recipe = Recipe.objects.get(id=res.data['id'])\n tags = recipe.tags.all()\n self.assertEqual(tags.count(),2)\n self.assertIn(tag1, tags)\n self.assertIn(tag2,tags)",
"def test_create_tag_successful(self):\n payload = {'name':'Desserts'}\n res = self.client.post(TAGS_URL,payload)\n\n exist = Tag.objects.filter(\n user = self.user,\n name = payload['name']\n ).exists()\n\n self.assertTrue(exist)\n #self.assertEqual(res.status_code,status.HTTP_200_OK)",
"def test_add_url(self):\n url = 'http://test.com/'\n info = self.api.add_url(url, tags=['asd'])\n self.assertEqual(info['value'], url)\n tags = [t['name'] for t in info['tags']]\n self.assertEqual(tags, ['asd'])",
"def test_create_tag_invalid(self):\n payload = {'name': ''}\n res = self.client.post(TAGS_URL, payload)\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)",
"def test_create_tag_invalid(self):\n tag_data = {'name': ''}\n res = self.client.post(TAGS_URL, data=tag_data)\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)",
"def test_single_tag(self):\n self.request.log(\"Hello World\", tags=[\"tag1\"])\n self.request.end()\n entry = self.get_entry()\n assert len(entry['tags']) == 1\n assert entry['tags'][0] == \"tag1\"",
"def test_create_tag_invalid(self):\n payload = {'name': ''}\n res = self.client.post(TAGS_URL, payload)\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)",
"def test_add_tag(driver):\n print(\"-\"*80)\n print(\"Test: Adding a tag\")\n print(\"-\"*80)\n\n time.sleep(1)\n # Get all receipts\n receipts = driver.find_elements_by_class_name('receipt')\n\n # Choose a receipt randomly to add tag \n i = random.randint(0, len(receipts)-1)\n e = receipts[i]\n\n # Click on the add-tag element\n old_tags = get_tags(e)\n tag = add_tag(e, driver)\n if DEBUG>=2:\n driver.refresh() # Probably don't require\n\n time.sleep(1)\n # Fetch the new receipts again\n receipts = driver.find_elements_by_class_name('receipt')\n e = receipts[i]\n\n new_tags = get_tags(e)\n added_tags_ = list(set(new_tags) - set(old_tags))\n if len(added_tags_) != 1 or tag not in added_tags_[0]:\n print(\"\"\"\nERROR: The number of newly added tags did not match.\nExpected: {!r}, Found: {!r}\"\"\".format([tag], added_tags_))\n return -1\n print(\"Success!!!\")\n print('<>'*40 + '\\n')\n return 0",
"def test_create_services_with_tag(self):\n tag1 = sample_tag(user=self.user, name='Electrical')\n tag2 = sample_tag(user=self.user, name='Distribution')\n\n payload = {\n 'title' : 'Fitting Job',\n 'tags' : [tag1.id, tag2.id],\n 'price' : 100.00\n }\n\n res = self.client.post(SERVICES_URL, payload)\n\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n services = Service.objects.get(id=res.data['id'])\n tags = services.tags.all()\n self.assertEqual(tags.count(), 2)\n self.assertIn(tag1, tags)\n self.assertIn(tag2, tags)",
"def test_add_or_update_single_tag(self):\n p = self.load_policy({\n 'name': 'test-azure-tag',\n 'resource': 'azure.vm',\n 'filters': [\n {'type': 'value',\n 'key': 'name',\n 'op': 'eq',\n 'value_type': 'normalize',\n 'value': 'cctestvm'}\n ],\n 'actions': [\n {'type': 'tag',\n 'tag': 'tag1',\n 'value': 'value1'}\n ],\n })\n p.run()\n\n # verify that the a new tag is added without modifying existing tags\n s = Session()\n client = s.client('azure.mgmt.compute.ComputeManagementClient')\n vm = client.virtual_machines.get('test_vm', 'cctestvm')\n self.assertEqual(vm.tags, {'tag1': 'value1', 'testtag': 'testvalue'})",
"def test_add_asn(self):\n asn = '123'\n info = self.api.add_asn(asn, tags=['asd'])\n self.assertEqual(info['value'], asn)\n tags = [t['name'] for t in info['tags']]\n self.assertEqual(tags, ['asd'])"
]
| [
"0.84031516",
"0.7963376",
"0.78905445",
"0.78477657",
"0.77803254",
"0.7744458",
"0.7717115",
"0.7699082",
"0.7671048",
"0.7661872",
"0.76451296",
"0.7629898",
"0.7573351",
"0.75318795",
"0.75171554",
"0.7457491",
"0.74124134",
"0.7369319",
"0.73623157",
"0.73550355",
"0.7352944",
"0.73452777",
"0.7334246",
"0.72506386",
"0.72505486",
"0.7248755",
"0.72139066",
"0.7142612",
"0.7116249",
"0.7109858"
]
| 0.7980848 | 1 |
1. Query all of the puppies and return the results in ascending alphabetical order | def query_one():
puppies = session.query(Puppy.name).order_by(Puppy.name.asc()).all()
for puppy in puppies:
print puppy.name | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def query_three():\n \n puppies = session.query(Puppy.name, Puppy.weight).order_by(Puppy.weight.asc()).all()\n \n for puppy in puppies:\n print \"{puppy_name}: {weight}\".format(puppy_name=puppy[0], weight=puppy[1])",
"def __qualitaetsListeProteins(self):\n rv = []\n pam30_sortierbar = {}\n for key in pam30.keys():\n pam30_sortierbar[str(pam30[key]) + \";\" + ''.join(key)] = pam30[key]\n if key[0] != key[1]:\n pam30_sortierbar[\n str(pam30[key]) + \";\" + ''.join((key[1], key[0]))\n ] = pam30[key]\n sorted_keys = list(pam30_sortierbar.keys())\n sorted_keys.sort(key=lambda k: int(k.split(\";\")[0]), reverse=True)\n # debugging kept for historical reasons\n # for key in iter(sorted_keys):\n # print(key.split(\";\")[1] + \" has score \" + str(pam30_sortierbar[key]))\n for key in iter(sorted_keys):\n rv.append(key.split(\";\")[1])\n return(rv)",
"def query_four():\n puppies = session.query(Shelter, func.count(Puppy.id)).join(Puppy).group_by(Shelter.id).all()\n \n for shelter_puppy in puppies:\n \n print \"{shelter_name}: {puppy}\".format(shelter_name=shelter_puppy[0].name, puppy=shelter_puppy[1])",
"def sort_results(self):\n pass",
"def orderby():\n pass",
"def popPapers(self):\r\n curSymposium = self.symposiumBox.getVal()\r\n print(curSymposium)\r\n res = self.dbConn.execute(\"SELECT * FROM Papers WHERE PaperID>0 ORDER BY Title\").fetchall()\r\n self.paperList = [ln[\"Title\"] for ln in res]",
"def baby_search_engine(name):\r\n \r\n name_ranking = []\r\n \r\n for publication_name, name_list in baby_names.items():\r\n publication = {}\r\n if name.capitalize() in name_list:\r\n publication['list'] = publication_name\r\n publication['rank'] = name_list.index(name.capitalize()) + 1\r\n name_ranking.append(publication)\r\n\r\n \r\n return sorted(name_ranking, key=lambda k: k['rank'])",
"def drug_names():\n results = set()\n if 'qry' in request.args and len(request.args['qry']) >= 3:\n look_for = f\"{request.args['qry'].lower()}%\"\n drug_list = FTA.find_by_name(look_for, False )\n results = set([f\"{d.PROPRIETARY_NAME} - {d.NONPROPRIETARY_NAME}\" for d in drug_list if d.ACTIVE])\n\n results = sorted(list(results))\n return jsonify(results)",
"def get_sorted():\n return sorted(country_list, key=get_pop_and_name)",
"def fetchTitles(self):\r\n cur = self.dbConn.execute(\"SELECT DISTINCT Title FROM People ORDER BY Title\")\r\n self.titleList = [ln[\"Title\"] for ln in cur.fetchall()]",
"def sortby(self):\n ...",
"def popAuthors(self):\r\n# cur = self.dbConn.execute(\"SELECT * FROM People WHERE PersonID>0 ORDER BY Lastname\")\r\n# res = cur.fetchall()\r\n res = self.dbConn.execute(\"SELECT * FROM People WHERE PersonID>0 ORDER BY Lastname\").fetchall()\r\n\r\n self.authorList = [formatNameSQL(ln) for ln in res]\r\n self.quickAuthors = [ln[\"Lastname\"].lower() for ln in res]\r\n vals = [ln[\"PersonID\"] for ln in res]\r\n \r\n self.authorLookup = dict(zip(self.authorList,vals))",
"def get_results(cls):\n cls.all_hoechstzahls.sort(key=attrgetter('value', 'topic.category.weight'), reverse=True)\n for hoechstzahl in cls.all_hoechstzahls:\n yield hoechstzahl",
"def sort_by(self, param):\n sorted(self.books_all, key=lambda k: k[param])\n return self.books_all",
"def load_suppliers(self):\n return_list = []\n with Transaction().start(DBNAME, 1):\n partylist = self.Party.search([('categories', '=', self.category)])\n for i in partylist:\n dictionary = {}\n dictionary['code'] = str(i.pan)\n dictionary['name'] = i.name\n dictionary['address'] = i.addresses[0].full_address\n return_list.append(dictionary)\n return return_list",
"def sort_books(self):\n self.foods = sorted(self.foods, key=attrgetter(\"name\"))",
"def get_cheapest_query_results(self, url):\n data = self.get_query_url_results(url)\n data = self._sort_by_price(data)\n headers = {\n \"name\": \"Item Name\",\n \"seller\": \"Seller IGN\",\n \"sockets\": \"Sockets\",\n \"price\": \"Price\",\n \"pdps\": \"pDPS\",\n }\n print tabulate(data[:5], headers, tablefmt=\"rst\")",
"def get_sorted_parties():\n return list(dfSeatsPerPartyBy2ndVotes.sort_values(by=[\"party\"])[\"party\"])",
"def pre_sort(self, qs):\n return qs",
"def sort_list_by_president_order(pronoun_proportion_list):\n return sorted(pronoun_proportion_list, key=lambda (k,d,v): (d,k,v))",
"def pick_goods(self, by=None):",
"def pick_goods(self, by=None):",
"def test_query_order_named_property(self):\n class Q(ndb.Model):\n name = ndb.StringProperty('n')\n e = [Q(name=s) for s in string.letters]\n keys = ndb.put_multi(e)\n l = map(lambda x: x.name, Q.query().order(Q.name).fetch())\n l_ref = sorted(string.letters)\n try:\n self.assertEqual(l, l_ref)\n finally:\n ndb.delete_multi(keys)",
"def get_people(self):\n return sorted(list(self.plist.keys()))",
"def dataSort(self, collectionName, catagory, method='ASCENDING'):\n if method == 'ASCENDING':\n results = collectionName.find().sort(catagory, pymongo.ASCENDING)\n elif method == 'DESCENDING':\n results = collectionName.find().sort(catagory, pymongo.DESCENDING)\n return results",
"def toppings(request, pizza_id):\r\n pizza = Pizza.objects.get(id=pizza_id)\r\n toppings = pizza.topping_set.order_by('name')\r\n context = {'pizza': pizza, 'toppings': toppings}\r\n return render(request, 'pizzas/toppings.html', context)",
"def test_order_by(self):\n self.Person(name=\"User B\", age=40).save()\n self.Person(name=\"User A\", age=20).save()\n self.Person(name=\"User C\", age=30).save()\n\n names = [p.name for p in self.Person.objects.order_by(\"-age\")]\n assert names == [\"User B\", \"User C\", \"User A\"]\n\n names = [p.name for p in self.Person.objects.order_by(\"+age\")]\n assert names == [\"User A\", \"User C\", \"User B\"]\n\n names = [p.name for p in self.Person.objects.order_by(\"age\")]\n assert names == [\"User A\", \"User C\", \"User B\"]\n\n ages = [p.age for p in self.Person.objects.order_by(\"-name\")]\n assert ages == [30, 40, 20]\n\n ages = [p.age for p in self.Person.objects.order_by()]\n assert ages == [40, 20, 30]\n\n ages = [p.age for p in self.Person.objects.order_by(\"\")]\n assert ages == [40, 20, 30]",
"def getPriorityList(self):",
"def get_queryset(self):\n\n qs = Aid.objects \\\n .published() \\\n .open() \\\n .select_related('perimeter', 'author') \\\n .prefetch_related('financers', 'instructors')\n\n filter_form = self.form\n results = filter_form.filter_queryset(qs)\n ordered_results = filter_form.order_queryset(results).distinct()\n return ordered_results",
"def sort_plans(request):\n stars = Product.objects.annotate(\n avg_review=Avg('productreview__rating'),\n )\n select = request.GET['sortp']\n items = Product.objects.filter(category__icontains='P')\n if select == 'LtoH':\n results = items.order_by('price')\n return render(request, \"products.html\",\n {\"products\": results, 'stars': stars})\n elif select == 'HtoL':\n results = items.order_by('-price')\n return render(request, \"products.html\",\n {\"products\": results, 'stars': stars})\n elif select == 'AtoZ':\n results = items.order_by('name')\n return render(request, \"products.html\",\n {\"products\": results, 'stars': stars})\n elif select == 'ZtoA':\n results = items.order_by('-name')\n return render(request, \"products.html\",\n {\"products\": results, 'stars': stars})"
]
| [
"0.62195045",
"0.6189954",
"0.60429305",
"0.59504145",
"0.593689",
"0.59041137",
"0.58872336",
"0.5828653",
"0.5522957",
"0.551187",
"0.549347",
"0.54705834",
"0.5439277",
"0.539479",
"0.53898966",
"0.5383236",
"0.53803074",
"0.5371636",
"0.53633094",
"0.5298849",
"0.5276708",
"0.5276708",
"0.5267469",
"0.5250388",
"0.5248373",
"0.52395165",
"0.5219191",
"0.5217372",
"0.5213001",
"0.521232"
]
| 0.69107115 | 0 |
2. Query all of the puppies that are less than 6 months old organized by the youngest first | def query_two():
today = datetime.date.today()
max_days_old = 180
max_birthday = today - datetime.timedelta(days = max_days_old)
puppies = session.query(Puppy.name, Puppy.dateOfBirth).filter(Puppy.dateOfBirth >= max_birthday).order_by(Puppy.dateOfBirth.desc()).all()
for puppy in puppies:
print "{puppy_name}: {dob}".format(puppy_name= puppy[0], dob=puppy[1]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def upcoming_meetups_query(cls):\r\n # Warning, this timestamp inequality is actually done as a string comparison\r\n # in the db for some reason. BUT, since epoch seconds won't get another digit\r\n # for another 275 years, we're good for now...\r\n return Meetup._query(Meetup.c.timestamp > time.time() - g.meetup_grace_period, data=True, sort='_date')",
"def execQ13():\n frame = pan.DataFrame(data, columns=['Product', 'Price', 'Period'])\n kiwi = frame[(dFrame.Series_title_1 == \"Kiwifruit, 1kg\") & (dFrame.Period >= 2013.01) & (dFrame.Period < 2014.01)]\\\n .sort_values(by='Price')\n return kiwi",
"def execQ17():\n frame = pan.DataFrame(data, columns=['Product', 'Price', 'Period'])\n lettuce = frame[(dFrame.Series_title_1 == \"Lettuce, 1kg\") & (dFrame.Period >= 2013.01) & (dFrame.Period < 2014.01)].sort_values(\n by='Price')\n return lettuce",
"def execQ6():\n frame = pan.DataFrame(data, columns=['Product', 'Price', 'Period'])\n expensive = frame.sort_values(by='Price', ascending=False).head(1)\n return expensive",
"def topOccupations(years):\n\toccs = []\n\tfor year in years:\n\t\tfilename = \"SmartEnergy\" +str(year) +\".xlsx\"\n\t\tDB = pd.read_excel(filename, sheet_name= \"Report1_Data\", usecols =[\"BGTOCC\",\"Job Postings\"])\n\t\t# occs.append(DB.head(5))\n\t\toccs.append(DB)\n\t\tTopOccs =pd.concat(occs, keys = list(years), names =['year'])\n\t\n\t# Which occupations appear throughout the years, which are new and which no longer appear?\n\t\n\t\n\tv = TopOccs.BGTOCC.value_counts().sort_index()\t\n\t\n\t# create list of occupations that are posted each year\n\trecurringOccs = v[v==8].index.tolist()\n\t\n\treturn TopOccs\n\t# emergingOccs = \n\t# print(TopOccs.loc[2012,:])",
"def execQ15():\n frame = pan.DataFrame(data, columns=['Product', 'Price', 'Period'])\n apple = frame[(dFrame.Series_title_1 == \"Apples, 1kg\") & (dFrame.Period >= 2013.01) & (dFrame.Period < 2014.01)].sort_values(\n by='Price')\n return apple",
"def get_unresolved_future_prices():\n #TODO this is inefficient, hits the db A LOT\n latest_bitcoin_time = get_latest_bitcoin_time()\n\n potentially_unresolved = Future_Price.objects.filter(\n time_to_match_price__lte=latest_bitcoin_time\n #TODO would like a __gt condition somehow\n )\n\n unresolved_future_prices = []\n for p in potentially_unresolved:\n has_no_returned_amounts_from_before_window = Returned_Amount.objects.filter(to_prediction__future_price=p, from_received_amount__time__lt=F('from_received_amount__prediction__future_price__time_window_closes')).count() == 0\n if has_no_returned_amounts_from_before_window:\n has_received_amounts_from_before_window = Received_Amount.objects.filter(prediction__future_price=p, time__lt=F('prediction__future_price__time_window_closes')).count() > 0\n if has_received_amounts_from_before_window:\n bitcoin_price_exists = Bitcoin_Price.objects.filter(time=p.time_to_match_price).count() == 1\n if bitcoin_price_exists:\n unresolved_future_prices.append(p)\n\n return unresolved_future_prices\n\n \"\"\"\n The following commented-out method:\n - assumes that there is always a bitcoin_price for every minute before the\n last bitcoin_price\n - assumes that every future_prediction before the last returned_amount has\n been evaluated\n ...I am not willing to make these assumptions\n \n latest_bitcoin_time = get_latest_bitcoin_time()\n\n try:\n latest_returned_amount = Returned_Amount.objects.order_by('-from_received_amount__prediction__future_price__time_to_match_price')[0]\n latest_returned_time = latest_returned_amount.from_received_amount.prediction.future_price.time_to_match_price\n except IndexError:\n latest_returned_time = datetime.datetime(1970, 1, 1, 0, 0, 0, 0, utc)\n\n unresolved_future_prices = Future_Price.objects.filter(\n time_to_match_price__lte=latest_bitcoin_time,\n time_to_match_price__gt=latest_returned_time\n )\n\n return unresolved_future_prices\n \"\"\"",
"def top_sources_female(args: Dict[str, Any]) -> List[object]:\n query = [\n {\n \"$match\": {\n \"body\": {\"$ne\": \"\"},\n \"quotesUpdated\": {\"$exists\": True},\n \"outlet\": {\"$in\": args[\"outlets\"]},\n \"publishedAt\": {\n \"$gte\": args[\"begin_date\"],\n \"$lt\": args[\"end_date\"] + timedelta(days=1),\n },\n }\n },\n {\"$project\": {\"outlet\": 1.0, \"sourcesFemale\": 1.0}},\n {\"$unwind\": {\"path\": \"$sourcesFemale\", \"preserveNullAndEmptyArrays\": False}},\n {\"$group\": {\"_id\": \"$sourcesFemale\", \"count\": {\"$sum\": 1.0}}},\n {\"$sort\": {\"count\": args[\"sort\"]}},\n {\"$limit\": args[\"limit\"]},\n ]\n return query",
"def youngest():\n # fill it out\n newlist = sorted(PEOPLE_LIST, key=itemgetter('age'))\n return newlist",
"def user_story_13(self):\n for family in self.families.values():\n if family.children != 'NA':\n bday_dict = dict() # { iid1: bday1, iid2: bday1, iid3: bday2 }\n for child in family.children:\n bday_dict[child] = self.individuals[child].birthday\n for i1, i2 in itertools.combinations(bday_dict, 2):\n older = bday_dict[i1] if bday_dict[i1] < bday_dict[i2] else bday_dict[i2]\n younger = bday_dict[i1] if bday_dict[i1] >= bday_dict[i2] else bday_dict[i2]\n if older + relativedelta(days=1) < younger and younger < older + relativedelta(months=8):\n print(f'US13 - {min(self.individuals[i1].name, self.individuals[i2].name)} and {max(self.individuals[i1].name, self.individuals[i2].name)} have birthdays that are too close together on lines {min(self.individuals[i1]._birthday_line, self.individuals[i2]._birthday_line)} and {max(self.individuals[i1]._birthday_line, self.individuals[i2]._birthday_line)}')",
"def find_months_needing_update(\n self,\n product_name: str,\n only_those_newer_than: datetime,\n ) -> Iterable[Tuple[date, int]]:\n dataset_type = self.get_dataset_type(product_name)\n\n # Find the most-recently updated datasets and group them by month.\n return sorted(\n (month.date(), count)\n for month, count in self._engine.execute(\n select(\n [\n func.date_trunc(\n \"month\", datetime_expression(dataset_type.metadata_type)\n ).label(\"month\"),\n func.count(),\n ]\n )\n .where(ODC_DATASET.c.dataset_type_ref == dataset_type.id)\n .where(dataset_changed_expression() > only_those_newer_than)\n .group_by(\"month\")\n .order_by(\"month\")\n )\n )",
"def oldest():\n # fill it out\n newlist = sorted(PEOPLE_LIST, key=itemgetter('age'), reverse=True)\n return newlist",
"def execQ7():\n frame = pan.DataFrame(data, columns=['Product', 'Price', 'Period'])\n filtered_frame = frame.sort_values(by='Price', ascending=True).drop_duplicates(subset='Product').head(10)\n return filtered_frame",
"def precipitation():\n last_date = session.query(Measurement.date).order_by(Measurement.date.desc()).first()\n last_year = dt.date(2017, 8, 23) - dt.timedelta(days=365)\n rain = session.query(Measurement.date, Measurement.prcp).\\\n filter(Measurement.date > last_year).\\\n order_by(Measurement.date).all()",
"def execQ5():\n frame = pan.DataFrame(data, columns=['Product', 'Price', 'Period'])\n cheapest = frame.sort_values(by='Price', ascending=True).head(1)\n return cheapest",
"def execQ16():\n frame = pan.DataFrame(data, columns=['Product', 'Price', 'Period'])\n banana = frame[(dFrame.Series_title_1 == \"Bananas, 1kg\") & (dFrame.Period >= 2013.01) & (dFrame.Period < 2014.01)].sort_values(\n by='Price')\n return banana",
"def get_most_popular_app(engine, publisher_id):\n connection = engine.connect()\n result = connection.execute(\n '''\n SELECT a.period,\n a.company_name,\n a.active_users,\n a.publisher_id,\n a.app_id\n FROM aa_months a\n WHERE a.publisher_id='{0}'\n AND a.period >= (SELECT MAX(period) FROM aa_months)\n AND a.period < DATE_ADD((SELECT MAX(period) FROM aa_months), INTERVAL 1 DAY)\n ORDER BY a.active_users DESC\n LIMIT 1\n '''.format(publisher_id))\n connection.close()\n return result.fetchone()",
"def oldest():\n def get_age(person_list):\n return person_list['age']\n return sorted(PEOPLE_LIST, key = get_age, reverse=True)",
"def decideWorkers(availableWorkers,today,num=4,mopping=False):\n secondary = 0 # count for how many secondaries are working today\n\n availableWorkers.sort(key=lambda worker: worker.getCleanCount())\n for minion in availableWorkers[:num]: # take the first four lowest\n if minion.getWorkEnd() != 9.0: # find the number of secondaries\n secondary += 1\n\n for minion in availableWorkers[:num+secondary]: # take the first four lowest and additionals to replace secondaries\n if mopping and (minion.getWorkEnd() == 9.0): #only people who will be at the end of lab to mop gets mopping counted\n minion.mopped()\n else: #if they aren't mopping or cannot mop, they only clean\n minion.cleaned()\n # Add this assignment to the schedule\n working = (minion.getName(),minion.getCleanCount()) #make the name and clean count a tuple\n \n \n \n today.append(working)\n return today",
"def youngest():\n def get_age(person_list):\n return person_list['age']\n return sorted(PEOPLE_LIST, key = get_age)",
"def latest_question(questions):\n return questions.order_by('-pub_date')[:5]",
"def newbies(self):\n newness = datetime.now() - timedelta(days=self.DAYS_FOR_NEWBIE_CHECK)\n newbies = (\n self.valid_choices.filter(\n Q(roster__accounthistory__start_date__gte=newness)\n & Q(roster__accounthistory__end_date__isnull=True)\n )\n .distinct()\n .order_by(\"db_key\")\n )\n return list(newbies)",
"def execQ8():\n frame = pan.DataFrame(data, columns=['Product', 'Price', 'Period'])\n filtered_frame = frame.sort_values(by='Price', ascending=False).drop_duplicates('Product').head(10)\n return filtered_frame",
"def Tobs_past_year(): \n results = pd.DataFrame(session.query(Measurement.date,Measurement.tobs).\\\nfilter(Measurement.date.between(One_yrs_ago,current_time)).all());\n\n dates_of_last_year=list(results.sort_values(by='date')['date'].unique()) \n aa1=results.sort_values(by='date').groupby('date')\n last_year_tobs={dates_of_last_year[i]:list(aa1.get_group(dates_of_last_year[i])['tobs'])\\\n for i in range(len(aa1))}\n print(f\"Route /api/v1.0/tobs/past_year is being visited\")\n return jsonify(last_year_tobs)",
"def six_best_months():\n if not monthly_averages:\n return [('', 0.0), ('', 0.0), ('', 0.0), ('', 0.0), ('', 0.0), ('', 0.0)]\n monthly_averages.sort(key=operator.itemgetter(1),reverse=True)\n return monthly_averages[:6]",
"def test_date_by_gt_yr_mo(self):\n spi_search = \"find date > 1978-10-21\"\n inv_search = 'year:1978-10-21->9999'\n self._compare_searches(inv_search, spi_search)",
"def get_queryset(self):\n #.1 below code was showing future poll/questions\n #.1 return Question.objects.order_by('-pub_date')[:5]\n\n #re-defining\n \"\"\"\n Return the last five published questions (not including those set to be\n published in the future).\n \"\"\" \n #imported timezone\n \n return Question.objects.filter(pub_date__lte=timezone.now()).order_by('-pub_date')[:5]",
"def find_years_needing_update(self, product_name: str) -> List[int]:\n updated_months = TIME_OVERVIEW.alias(\"updated_months\")\n years = TIME_OVERVIEW.alias(\"years_needing_update\")\n product = self.get_product_summary(product_name)\n\n # Years that have already been summarised\n summarised_years = {\n r[0].year\n for r in self._engine.execute(\n select([years.c.start_day])\n .where(years.c.period_type == \"year\")\n .where(\n years.c.product_ref == product.id_,\n )\n )\n }\n\n # Empty product? No years\n if product.dataset_count == 0:\n # check if the timeoverview needs cleanse\n if not summarised_years:\n return []\n else:\n return summarised_years\n\n # All years we are expected to have\n expected_years = set(\n range(\n product.time_earliest.astimezone(timezone).year,\n product.time_latest.astimezone(timezone).year + 1,\n )\n )\n\n missing_years = expected_years.difference(summarised_years)\n\n # Years who have month-records updated more recently than their own record.\n outdated_years = {\n start_day.year\n for [start_day] in self._engine.execute(\n # Select years\n select([years.c.start_day])\n .where(years.c.period_type == \"year\")\n .where(\n years.c.product_ref == product.id_,\n )\n # Where there exist months that are more newly created.\n .where(\n exists(\n select([updated_months.c.start_day])\n .where(updated_months.c.period_type == \"month\")\n .where(\n func.extract(\"year\", updated_months.c.start_day)\n == func.extract(\"year\", years.c.start_day)\n )\n .where(\n updated_months.c.product_ref == product.id_,\n )\n .where(\n updated_months.c.generation_time > years.c.generation_time\n )\n )\n )\n )\n }\n return sorted(missing_years.union(outdated_years))",
"def test_list_past_meeting_polls(self):\n pass",
"def precipitation():\n # Calculate the date 1 year ago from the last data point in the database\n #Create our session (link) from Python to the DB\n session = Session(engine)\n\n # Use query from notebook. Get the last date in database, then calc a year before \n last_date = session.query(func.max(Measurement.date)).first() \n year_ago = dt.date(2017, 8, 23) - dt.timedelta(days=365)\n # filter to one year ago \n twelve_months_precip = session.query(Measurement.date, Measurement.prcp).filter(Measurement.date >= year_ago).all()\n\n session.close()\n\n # create a list for results to jsonify \n\n list_data = []\n for months in twelve_months_precip:\n data = {}\n data[\"date\"] = months[0]\n data[\"prcp\"] = months[1]\n list_data.append(data)\n\n # jsonify the results \n\n return jsonify(list_data)"
]
| [
"0.5563784",
"0.55196935",
"0.5485716",
"0.5437919",
"0.5433165",
"0.5424259",
"0.5388219",
"0.5372176",
"0.5352127",
"0.53500694",
"0.53387654",
"0.53329307",
"0.5328321",
"0.5301275",
"0.52543426",
"0.52214634",
"0.5210267",
"0.5205575",
"0.52035064",
"0.5195869",
"0.51663715",
"0.5163763",
"0.51608926",
"0.5149592",
"0.5124659",
"0.5100615",
"0.5079923",
"0.50692385",
"0.5063254",
"0.505425"
]
| 0.5989886 | 0 |
3. Query all puppies by ascending weight. | def query_three():
puppies = session.query(Puppy.name, Puppy.weight).order_by(Puppy.weight.asc()).all()
for puppy in puppies:
print "{puppy_name}: {weight}".format(puppy_name=puppy[0], weight=puppy[1]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getByWeight(list, w):\n itemId = 0\n partialWeight = list[0][1]\n while partialWeight < w:\n itemId += 1\n partialWeight += list[itemId][1]\n return list[itemId]",
"def partial_pgs(dset: admix.Dataset, weight: np.ndarray):\n pass",
"def selection_wheel(self, weighted_population):\n weight_total = sum((item[1] for item in weighted_population))\n n = random.uniform(0, weight_total)\n for item, weight in weighted_population:\n if n < weight:\n return item\n n = n - weight\n return item",
"def calculate_weighted_results():\n pass",
"def weight(self):",
"def collect_queries_by_weight(deck_format):\n result = []\n for query_name, details in consts.QUERIES[deck_format].iteritems():\n this_query = [query_name] * details.get('weight', 0)\n result += this_query\n\n return sorted(result)",
"def weights_above_gen_from(self, minimum_weight: float) -> List:\r\n\r\n def func_gen(fit: af.Fit, minimum_weight: float) -> List[object]:\r\n samples = fit.value(name=\"samples\")\r\n\r\n weight_list = []\r\n\r\n for sample in samples.sample_list:\r\n if sample.weight > minimum_weight:\r\n weight_list.append(sample.weight)\r\n\r\n return weight_list\r\n\r\n func = partial(func_gen, minimum_weight=minimum_weight)\r\n\r\n return self.aggregator.map(func=func)",
"def _greedy_packing(items: List[Item], cap: int,\n func: Callable) -> Tuple[Set[int], int]:\n items.sort(key=func)\n included = set()\n total_val, total_weight = 0, 0\n for item in items:\n if total_weight + item.weight > cap:\n continue\n included.add(item.idx)\n total_val += item.val\n total_weight += item.weight\n return included, total_val\n # Running time complexity: O(nlog n)",
"def pick_goods(self, by=None):",
"def pick_goods(self, by=None):",
"def sort_weight(self):\n self._elements = list(\n _[-1] for _ in sorted((e.weight, e.value, e) for e in self)\n )",
"def all_above_weight_gen_from(self, minimum_weight: float) -> Generator:\r\n\r\n def func_gen(fit: af.Fit, minimum_weight: float) -> List[object]:\r\n samples = fit.value(name=\"samples\")\r\n\r\n all_above_weight_list = []\r\n\r\n for sample in samples.sample_list:\r\n if sample.weight > minimum_weight:\r\n instance = sample.instance_for_model(model=samples.model)\r\n\r\n all_above_weight_list.append(\r\n self.object_via_gen_from(fit=fit, galaxies=instance.galaxies)\r\n )\r\n\r\n return all_above_weight_list\r\n\r\n func = partial(func_gen, minimum_weight=minimum_weight)\r\n\r\n return self.aggregator.map(func=func)",
"def weighted_choice(items):\n weight_total = sum((item[1] for item in items))\n n = random.uniform(0, weight_total)\n for item, weight in items:\n if n < weight:\n return item\n n = n - weight\n return item",
"def getWeightDistribution(self):\n return self.akselInfo\n #for num in range(len(akselInfo)):\n #print(\"Avstand mellom aksel:\", num+1, \"og\", num+2, \"=\", akselInfo[num][0])\n\n #for num in range(len(akselInfo)):\n #print(\"Tillatt last på aksel:\", num+1,\"=\", akselInfo[num][1])\n\n return akselInfo",
"def weights(self) -> List[float]:",
"def weighted_choice(items):\n weight_total = sum((item[1] for item in items))\n n = random.uniform(0, weight_total)\n for item, weight in items:\n if n < weight:\n return item\n n = n - weight\n return item",
"def brute_force_cow_transport(cows,limit=10):\r\n # TODO: Your code here\r\n #print(cows)\r\n #trip=[]\r\n #import copy\r\n cowsNames=cows.keys()\r\n #print(cowsNames)\r\n cowNamesList=[]\r\n \r\n #for cowName in cowsNames:\r\n # if cows[cowName] <=limit:\r\n # cowNamesList.append(cowName)\r\n # print(cowNamesList)\r\n\r\n herd = sorted(cows.items(), key=lambda cows:cows[1], reverse=True) \r\n #print(herd)\r\n #limit = 10\r\n #weight = [v for x, v in cows.items()] \r\n #name = [x for x, v in cows.items()]\r\n #print('weight', weight)\r\n #print('name', name)\r\n #for i in weight:\r\n #print (i)\r\n # if sum(trip) <= limit: \r\n # trip.append(i)\r\n # print(trip)\r\n #trips=[]\r\n number_of_trips=len(cows)\r\n results=None\r\n limit=10\r\n #best_trips=len(cows) + 1\r\n for trips in get_partitions(herd): \r\n #print(trips) \r\n #flag = False\r\n #numberOfTrips = 0\r\n weights=[]\r\n for trip in trips:\r\n print(trip)\r\n weight=(sum([v for x, v in cows.items() if x in trip]))\r\n #print('weight',weight) \r\n weights.append(weight)\r\n #print('weights',weights)\r\n #print('max weight',max(weights))\r\n for w in weights:\r\n #print (w)\r\n if w <= limit: #and len(trips) <= number_of_trips:\r\n #print(limit) \r\n #print(len(trips))\r\n #number_of_trips=len(trips)\r\n #print(number_of_trips)\r\n results = trips\r\n #print(trips)\r\n return results \r\n #for cow in one_trip:\r\n #print('cow',cow)\r\n #trip_weight+=cow[1]\r\n #print('trip weight', trip_weight)\r\n #temp_results=[] \r\n #if trip_weight > limit: \r\n #print('name',cow[0])\r\n #flag = False \r\n #break\r\n #if flag and (len(trips) < best_trips):\r\n #best_trips = len(trips)\r\n # print(best_trips)\r\n #for trip in trips:\r\n #temp_results=[]\r\n #print(l)\r\n #for cow in trip:\r\n #temp_results = trips.append(cow[0]) \r\n #print(trips)\r\n #print(temp_results)\r\n #results.append(temp_results)\r\n #return results \r\n #print('trips',trips)\r\n #if len(i) < fewest_trips:\r\n\r\n #trips.append(i[0])\r\n\r\n\r\n # trips = len(i)\r\n # for j in i:\r\n # temp = []\r\n # for cow in i:\r\n # temp.append(i[0])\r\n # print(temp)\r\n #for k in j:\r\n # print(k)\r\n #result=[sum(z) for z in trip[1]]\r\n #print(result)\r\n #print('limit',limit)\r\n #for i in result:\r\n # if i <= limit:\r\n # trip.append(name)\r\n # print(trip)\r\n \r\n #print(alist)\r\n #for p in partition:\r\n # print(p) \r\n #if weight <= limit:\r\n #result = (brute_force_cow_transport(weight, limit))\r\n #print(True)\r\n \r\n \r\n #if j==[] or limit==0:\r\n # result = (0,())\r\n \r\n #elif j[1] > limit:\r\n #explore right branch only\r\n # result = brute_force_cow_transport(cows[1], limit) \r\n # else:\r\n #nextItem = cows\r\n #print(nextItem)\r\n #explore left branch\r",
"def chosen_items(sack, items, weight):\n total = total_profit(sack, items, weight)\n chosen = []\n \n while total != 0:\n for i in range(items + 1):\n if total in sack[i]:\n chosen.append(i) \n total = total - profit[i - 1] \n break \n \n return sorted(chosen)",
"def dp_make_weight(egg_weights, target_weight, memo={}):\n\n \"\"\"\n 根据提示: 每个pound类型的蛋是无限的。\n 问题是提供一种蛋的组合,最好pound数等于或是接近总的weight 并且要满足数量要越少越好。\n 这是两个限制条件。但是提示也给了总是有egg为value1的,那么难度小了很多。\n 现在是怎样让蛋的数量越少越好。\n \n 1.最优子结构\n egg_weights 现在假设是(1, 5, 10, 25)\n dp_make_weight((1, 5, 10, 25),x,memo) , 当x - n >= 0 时(n代表 1,5,10,25),\n 然后在 dp_make_weight((1,5,10,25,x-n,memo) +1 中 挑选最小值。+1的原因是包含本次\n 2.重叠子问题\n 详见ps1b的图片。\n 那么memo记录的key 为 avail(即剩余的容量) ,value 为avail下最小的蛋的数量n。\n \n 那么base_case是什么?\n target == 0时,返回0\n 现在按照深度优先的思路思考\n \"\"\"\n\n if target_weight == 0:\n return 0\n\n if target_weight in memo:\n return memo[target_weight]\n\n result = None # 占位符,没有多大用\n\n for elt in egg_weights:\n if target_weight - elt >= 0: # 这样才有继续探索的必要\n tmp_result = dp_make_weight(egg_weights, target_weight - elt, memo) + 1\n if result is None or tmp_result < result:\n result = tmp_result\n memo[target_weight] = result\n return result",
"def test_get_similar_objects_weights(self): # pylint: disable=invalid-name\n similar_objects = Assessment.get_similar_objects_query(\n id_=self.assessment.id,\n types=[\"Assessment\"],\n threshold=0, # to include low weights too\n ).all()\n\n # casting to int from Decimal to prettify the assertion method output\n id_weight_map = {obj.id: int(obj.weight) for obj in similar_objects}\n\n self.assertDictEqual(id_weight_map, self.id_weight_map)",
"def pulse(*args, **kwargs):\n weight_key = kwargs.get('weight')\n\n pulses_generator = _pulse(*args, **kwargs)\n\n if not configuration.pulse_return_best:\n return pulses_generator\n\n best = None\n best_cost = None\n for pulse, weights in pulses_generator:\n if best is None or best_cost > weights.get(weight_key):\n best_cost = weights.get(weight_key)\n best = pulse, weights\n\n return [best]",
"def _weighted(self):\n return self.dataset.weighted(self.probability)",
"def greedy_cow_transport(cows,limit=10):\n cow_names = sorted(cows.keys(), key = lambda x: cows[x], reverse = True)\n all_trips = []\n \n while len(cow_names) != 0:\n current_trip = []\n current_weight = 0\n i = 0\n \n \n while current_weight < limit and i < len(cow_names):\n if (cows[cow_names[i]] + current_weight <= limit):\n current_trip.append(cow_names[i])\n current_weight += cows[cow_names[i]]\n i += 1\n \n for cow in current_trip:\n cow_names.remove(cow)\n \n all_trips.append(current_trip)\n \n \n return all_trips",
"def _buildWeights(self):\r\n # Compute the spatial tree\r\n kd = spatial.cKDTree(self.XYin)\r\n \r\n # Perform query on all of the points in the grid\r\n dist,self.ind=kd.query(self.XYout,distance_upper_bound=self.maxdist,k=self.NNear)\r\n \r\n self.Nc = np.size(self.ind,axis=0)\r\n print '%d interpolation points.'%self.Nc\r\n # Now loop through and get the weights for each point\r\n self.W = np.zeros((self.NNear,self.Nc))\r\n\r\n # Print percentages\r\n p0=0\r\n pstep=5\r\n for ii in range(0,self.Nc):\r\n \r\n if self.verbose:\r\n pfinish = float(ii)/float(self.Nc)*100.0\r\n if pfinish> p0:\r\n print '%3.1f %% complete...'%pfinish\r\n p0+=pstep\r\n \r\n W = self.getWeights(dist[ii,:],self.XYin[self.ind[ii,:],0],self.XYin[self.ind[ii,:],1])\r\n self.W[:,ii] = W.T",
"def sampleWeight(self):\r\n x=random.random()\r\n i = 0\r\n n = len(self.weights)-1\r\n cummulativeWeight = 0\r\n #Distribute the exploration weight evenly among all the actions that have been\r\n #taken up to this point in time by any of the users\r\n if len(self.sampledActions) == 0:\r\n explorationWeight = 0\r\n else:\r\n explorationWeight = self.explorationFund / len(self.sampledActions)\r\n #Compute the normalization factor. If no action has been sampled by this user yet,\r\n #then each action k has weight eta*pi_k, where pi_k is the weight of k in the\r\n #prior distribution. Then, the normalization factor is the sum(eta*pi_k) for all k,\r\n #which is equal to eta*sum(pi_k), which is just eta, since the sum of the previous\r\n #weights has to add up to 1.\r\n #If one or more actions have been already sampled, the normalization factor is the\r\n #sum of 1) the weights already in self.weights, 2) the exploration fund, and 3) the\r\n #weights of the actions that are not yet in self.weights. Each one of these actions\r\n #has weight eta*pi_k (because it hasn't been sampled yet), so the total weight of the\r\n #mass of actions not yet in self.weights is eta*(1-sum(pi_l)), where the sum is over all\r\n #the weights already in self.weights\r\n if n < 0:\r\n normalizationFactor = self.priorBelief\r\n else:\r\n normalizationFactor = sum(self.weights) + self.explorationFund + \\\r\n self.priorBelief*(1-self.priorTopicDistr.cummulative[n])\r\n #Keep getting the next weight until the combined mass of the weights is less than the\r\n #random number x\r\n while True:\r\n w = self.__getitem__(i)\r\n if i in self.sampledActions:\r\n w += explorationWeight\r\n cummulativeWeight += w\r\n if x <= cummulativeWeight/normalizationFactor:\r\n if i not in self.sampledActions:\r\n self.sampledActions.append(i)\r\n return w\r\n i += 1",
"def test_weighting(self):\n dset = self.dset.spec.sel(\n lons=self.lons_inexact, lats=self.lats_inexact, method=\"idw\"\n )\n for stat in [\"hs\", \"tp\"]:\n idw = dset.spec.stats([stat])[stat].values\n site0 = self.dset.isel(site=[0]).spec.stats([stat])[stat].values\n site1 = self.dset.isel(site=[1]).spec.stats([stat])[stat].values\n lower = np.array([min(s1, s2) for s1, s2 in zip(site0, site1)])\n upper = np.array([max(s1, s2) for s1, s2 in zip(site0, site1)])\n assert (upper - idw > 0).all() and (idw - lower > 0).all()",
"def OneByOneStrategy(I_list,box_list):\n SortedItems = quick_sort(I_list)\n lemon = []\n for i in box_list:\n for item in range(len(SortedItems)):\n if i.max_cap - i.curr_cap == 0:\n break\n if SortedItems[item].weight <= i.max_cap - i.curr_cap:\n if SortedItems[item] not in lemon:\n lemon.append(SortedItems[item])\n i.items_list.append(SortedItems[item])\n i.curr_cap += SortedItems[item].weight\n else:\n pass\n print('Results from Greedy Strategy 3')\n if len(lemon) != len(SortedItems):\n print('Unable to pack all items')\n else:\n print('All items successfully packed!')\n for s in box_list:\n print('Box',s.id,'of weight capacity',s.max_cap,'contains:')\n for item in s.items_list:\n print(item.name,'of weight',item.weight)\n for item in SortedItems:\n if item not in lemon:\n print(item.name,'of weight',item.weight,'got left behind')\n print('\\n')",
"def query_one():\n puppies = session.query(Puppy.name).order_by(Puppy.name.asc()).all()\n\n for puppy in puppies:\n print puppy.name",
"def weighted_choice(weighted_items, num_items=1):\n total = 0\n cume_list = []\n\n for item, weight in weighted_items.items():\n total += weight\n cume_list.append([item, total])\n\n for pair in cume_list:\n pair[1] /= total\n\n items = []\n\n for _ in range(num_items):\n rand = random()\n\n for item, val in cume_list:\n if rand <= val:\n items.append(item)\n break\n\n assert num_items == len(items), (weighted_items, items)\n\n if num_items == 1:\n return items[0]\n\n return items",
"def weighted_choice(items: List[Tuple[str, float]]) -> str:\r\n total_weight = sum(item[1] for item in items)\r\n n = random.uniform(0, total_weight)\r\n for item, weight in items:\r\n if weight > n:\r\n return item\r\n n -= weight\r\n return item"
]
| [
"0.62865865",
"0.5884093",
"0.58838344",
"0.57850003",
"0.57574505",
"0.5615625",
"0.55487806",
"0.5510328",
"0.5503547",
"0.5503547",
"0.54957587",
"0.5479928",
"0.5474046",
"0.5428138",
"0.54146487",
"0.54041064",
"0.539627",
"0.5385185",
"0.5379307",
"0.53660184",
"0.5361855",
"0.53357625",
"0.5325847",
"0.53133017",
"0.5313141",
"0.5310535",
"0.5308348",
"0.52911437",
"0.52895904",
"0.5283268"
]
| 0.7313781 | 0 |
4. Query all puppies grouped by the shelter in which they are staying. Show count of puppies at each shelter | def query_four():
puppies = session.query(Shelter, func.count(Puppy.id)).join(Puppy).group_by(Shelter.id).all()
for shelter_puppy in puppies:
print "{shelter_name}: {puppy}".format(shelter_name=shelter_puppy[0].name, puppy=shelter_puppy[1]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def count(cls, queryset):\n result_group = queryset.values('date')\n annotate_report = result_group.annotate(\n breakfast=CountTrue('breakfast'),\n lunch=CountTrue('lunch')\n )\n\n def serialize(queryset):\n return [\n {\"breakfast\": res[\"breakfast\"], \"lunch\": res[\"lunch\"],\n \"date\": res[\"date\"]} for res in queryset\n ]\n return serialize(annotate_report)\n # lunch = queryset.filter(lunch=1).values('date').count()",
"def num_producers(self):\n producers = self.info_wells.groupby('well_type').get_group('prod')\n return producers['well'].nunique()",
"def get_shuttles_count():\n return Shuttle.objects.count()",
"def select_most_sold_products_from_personas_kinderen():\n return sql_select(\"\"\"SELECT prodid, name,\n COUNT(*)\n FROM v\n\n GROUP BY prodid ,name \n ORDER BY COUNT(*) DESC ; \"\"\")",
"def count_sheeps(sheep):\n return sheep.count(True)",
"def create_count(less_data):\n # create count and distinct list\n less_data['count'] = less_data.groupby('food_id')['food_id'].transform('count')\n less_data = less_data[['food_id', 'count']].drop_duplicates().sort_values(by=['count'], ascending=False)\n\n return less_data",
"def select_most_sold_products_from_personas_vrouwen():\n return sql_select(\"\"\"SELECT prodid, name,\n COUNT(*)\n FROM personas_vrouwen\n\n GROUP BY prodid ,name \n ORDER BY COUNT(*) DESC ; \"\"\")",
"def count():",
"def make_pool_counts(*pools):\n return sf.Frame.from_records(\n pools, columns=['star', 'color', 'count']\n ).set_index_hierarchy(\n ('star', 'color'), drop=True\n )['count']",
"def counter_picks(session, r_query, team) -> DataFrame:\n counters = DataFrame(columns=short_alphabetical_names,\n index=short_alphabetical_names)\n counters = counters.fillna(0)\n\n def _process(side):\n side_filt = Replay.get_side_filter(team, side)\n replays = r_query.filter(side_filt)\n\n r: Replay\n for r in replays:\n draft = []\n selection: PickBans\n for selection in (r.teams[0].draft + r.teams[1].draft):\n draft.append(\n {\"hero\": selection.hero,\n \"is_pick\": selection.is_pick,\n \"side\": selection.team,\n \"order\": selection.order}\n )\n draft = sorted(draft, key=lambda k: k['order'])\n opp_picks = []\n for pick_ban in draft:\n if not pick_ban['is_pick']:\n continue\n name = heroShortName[pick_ban['hero']]\n if pick_ban['side'] == side:\n for o in opp_picks:\n counters[name][o] += 1\n else:\n opp_picks.append(name)\n return\n\n _process(Team.DIRE)\n _process(Team.RADIANT)\n\n return counters",
"def GetCountPerOfficeProp( placements, officeData, persoData, officeTags=['roomID'], persoTags=['inService'] ) :\n persoFilter = pd.pivot_table(persoData.loc[:,persoTags], columns=persoTags, index=persoData.index, aggfunc=len).fillna(0)\n persoFilter = persoFilter.values.T\n officeFilter = pd.pivot_table(officeData.loc[:,officeTags], columns=officeTags, index=officeData.index, aggfunc=len).fillna(0)\n return np.dot( np.dot(persoFilter, placements), officeFilter )",
"def culggroup_thickestdonecount(As, Rps, group, dones):\n pairs = sorted(((get_culg_dimension(As, Rps, l), dones[l], l)\n for l in group),\n reverse=True)\n count = len(tuple(itt.takewhile(lambda p: p[1], pairs)))\n return count",
"def camp_sweeps_by_neighborhood(request):\n raw_sql_query = \"\"\"\n SELECT count(camp_sweeps.id) AS sweep_count, rlis_neighborhoods.name\n FROM camp_sweeps\n\tINNER JOIN rlis_neighborhoods ON st_intersects(camp_sweeps.geom, rlis_neighborhoods.geom)\n\tGROUP BY rlis_neighborhoods.name, rlis_neighborhoods.geom\n\tHAVING count(camp_sweeps) > 3\n ORDER BY 1 DESC;\n \"\"\"\n with connection.cursor() as cursor:\n cursor.execute(raw_sql_query, )\n result = utils.dictfetchall(cursor)\n return Response(data=result)",
"def propietarios_count(self):\n return self.expedientepersona_set.filter(propietario=True).count()",
"def semestral_count_submission(self):\n serie_count = self.count(self.__data[\"normal_semestral_groupby\"])\n self.analysis[\"semestral_count_application\"] = serie_count.to_dict()",
"def test_properties_count_group_by_group_by_and_sub_group_by_get(self):\n pass",
"def test_properties_count_group_by_group_by_get(self):\n pass",
"def _count_parties(data_set): #DEMOCRATS, THEN REPUBLICANS\r\n reps = 0\r\n dems = 0\r\n for data_point in data_set:\r\n if data_point.dat_party == \"R\": reps+=1\r\n if data_point.dat_party == \"D\": dems+=1\r\n\r\n return (dems, reps)",
"def tally_results(self, verbose=False):\n\t\tself.tally={\n\t\t\tself.player1.name: {\n\t\t\t\t'wins': 0,\n\t\t\t\t'draws': 0,\n\t\t\t\t'losses': 0\n\t\t\t},\n\t\t\tself.player2.name: {\n\t\t\t\t'wins': 0,\n\t\t\t\t'draws': 0,\n\t\t\t\t'losses': 0\n\t\t\t}\n\t\t}\n\t\tfor key, value in self.winner_dict.items():\n\t\t\tself.tally[self.player1.name]['wins']+=1 if value == self.player1.name else 0\n\t\t\tself.tally[self.player1.name]['draws']+=1 if value is None else 0\n\t\t\tself.tally[self.player1.name]['losses']+=1 if value == self.player2.name else 0\n\t\t\tself.tally[self.player2.name]['wins']+=1 if value == self.player2.name else 0\n\t\t\tself.tally[self.player2.name]['draws']+=1 if value is None else 0\n\t\t\tself.tally[self.player2.name]['losses']+=1 if value == self.player1.name else 0\n\t\tif verbose:\n\t\t\tprint('\\n--- FINAL RESULT ---\\n')\n\t\t\ttally_pretty=pd.DataFrame(self.tally).to_markdown()\n\t\t\tprint(tally_pretty)\n\t\t\tif self.tally[self.player1.name]['wins'] == self.tally[self.player2.name]['wins']:\n\t\t\t\tprint('\\nIt\\'s a draw!\\n')\n\t\t\telse:\n\t\t\t\twinner=self.player1.name if self.tally[self.player1.name]['wins'] > self.tally[self.player2.name]['wins'] else self.player2.name\n\t\t\t\tprint('\\n{player} wins the game!\\n'.format(player=winner))",
"def select_most_sold_products_from_personas_mannen():\n return sql_select(\"\"\"SELECT prodid, name,\n COUNT(*)\n FROM personas_mannen\n \n GROUP BY prodid ,name \n ORDER BY COUNT(*) DESC ; \"\"\")",
"def clothing_type_count(clothes_list):\n types_count = {}\n for garment in clothes_list:\n if garment.db.clothing_type:\n type = garment.db.clothing_type\n if type not in types_count.keys():\n types_count[type] = 1\n else:\n types_count[type] += 1\n return types_count",
"def getFishPopulation(genus):\n r = req.get(\"https://fishbase.ropensci.org/popchar?fields=\"+genus)\n my_dict = r.json()\n return my_dict['count']",
"def num_species_on_map(self):\n # tot_herbivores = 0\n # tot_carnivores = 0\n # for cells in itertools.chain.from_iterable(self.map):\n # curr_herbivore, curr_carnivore = cells.num_species_per_cell()\n # tot_herbivores += curr_herbivore\n # tot_carnivores += curr_carnivore\n\n return (sum(x) for x in zip(*[cells.num_species_per_cell() for cells in itertools.chain.from_iterable(self.map)]))\n\n # (sum(x) for x in zip(*[cells.num_species_per_cell() for cells in itertools.chain.from_iterable(self.map)]))",
"def countGreatPeople(self, tCoords):\n\t\tiCount = 0\n\t\tplot = gc.getMap().plot(tCoords[0], tCoords[1])\n\t\tif plot.isCity():\n\t\t\tcity = plot.getPlotCity()\n\t\t\tiGreatPriest = gc.getInfoTypeForString(\"SPECIALIST_GREAT_PRIEST\")\n\t\t\tfor i in range(iGreatPriest, iGreatPriest+7, 1):\n\t\t\t\tiCount += city.getFreeSpecialistCount(i)\n\t\treturn iCount",
"def count_houses_delivered_with_robot(s):\n s_santa, s_robot = s[::2], s[1::2]\n deliveries_santa = make_deliveries(s_santa)\n deliveries_robot = make_deliveries(s_robot)\n all_deliveries = combine_dicts(deliveries_santa, deliveries_robot, lambda x,y: x+y, 0)\n return len(all_deliveries)",
"def count_gates(qobj, basis, qubits):\n\n #TO DO\n pass",
"def get_counts(df,col_name):\n return df.groupBy(col_name).count().show()",
"def query_three():\n \n puppies = session.query(Puppy.name, Puppy.weight).order_by(Puppy.weight.asc()).all()\n \n for puppy in puppies:\n print \"{puppy_name}: {weight}\".format(puppy_name=puppy[0], weight=puppy[1])",
"def pie_chart_data():\n\n user_id = session[\"user_id\"]\n\n pie_chart_query = db.session.query(Water.postal, func.count(Water.postal)).filter(Water.user_id==user_id).group_by(Water.postal).all()\n\n print('pie_chart_query, ', pie_chart_query)\n postal = [] \n qty = []\n for item1, item2 in pie_chart_query:\n\n if item1 is not None:\n postal.append(item1)\n if item2 is not 0:\n qty.append(item2)\n\n print('Postal: ', postal)\n print('Qty: ', qty) \n\n return jsonify(postal=postal, qty=qty)",
"def get_counts_by_manufacturers(table):\n manufacturers = []\n for item in table:\n if item[2] not in manufacturers:\n manufacturers.append(item[2])\n\n manufacturers_games = {}\n\n for record in manufacturers:\n games_counter = 0\n for item in table:\n if item[2] == record:\n games_counter += 1\n manufacturers_games[record] = games_counter\n\n return manufacturers_games"
]
| [
"0.58302456",
"0.5761377",
"0.57559884",
"0.57153726",
"0.55361503",
"0.53359115",
"0.53024775",
"0.53024465",
"0.52998257",
"0.52760255",
"0.5253698",
"0.52469337",
"0.5225774",
"0.5219363",
"0.52180797",
"0.52175015",
"0.5206509",
"0.5189646",
"0.51462865",
"0.5143075",
"0.51404047",
"0.51380676",
"0.51244086",
"0.51120424",
"0.5106752",
"0.51051146",
"0.51004225",
"0.5087333",
"0.5078549",
"0.5067802"
]
| 0.75888115 | 0 |
Same functionality as RelPosEmb1D | def rel_pos_emb_1d(q, rel_emb, shared_heads):
if shared_heads:
emb = torch.einsum('b h t d, r d -> b h t r', q, rel_emb)
else:
emb = torch.einsum('b h t d, h r d -> b h t r', q, rel_emb)
return relative_to_absolute(emb) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def GetPosition(self):\n ...",
"def get_position(self, position):",
"def embed():",
"def parent(self, pos): \n return pos//2",
"def position(self):\r\n pass",
"def prepare_emb(self):\n with tf.variable_scope(\"PrepEmb\", reuse=tf.AUTO_REUSE):\n self.src_ten = tf.cast(tf.convert_to_tensor(self.src_ten), tf.float32)\n self.tgt_ten = tf.cast(tf.convert_to_tensor(self.tgt_ten), tf.float32)\n # Mapping\n self.src_ten = tf.matmul(self.src_ten, self.W)\n # Normalization\n self.src_ten = tf.nn.l2_normalize(self.src_ten, axis=1)\n self.tgt_ten = tf.nn.l2_normalize(self.tgt_ten, axis=1)",
"def test_synth_positions_small_width():\n background = Image.new('RGB', (20, 20))\n patch_1 = Image.new('RGB', (10, 20))\n patch_2 = Image.new('RGB', (11, 20))\n\n parameters = {'data': [background, patch_1, patch_2]}\n\n positions = images.synth_positions(parameters)",
"def embed_xseq(self,xseq):\n xseq_embed = self.emat[xseq]\n return xseq_embed",
"def move_to_position2(self):",
"def getPosition(*args):",
"def getPosition(*args):",
"def getPosition(*args):",
"def getPosition(*args):",
"def getPosition(*args):",
"def getPosition(*args):",
"def getPosition(*args):",
"def getPosition(*args):",
"def getPosition(*args):",
"def getPosition(*args):",
"def getPosition(*args):",
"def move_to_position1(self):",
"def left_child(self, pos): \n return 2 * pos",
"def pos(self, *args, **kwargs) -> Any:\n pass",
"def cleavagePos(self):\n raise NotImplementedError",
"def cleavagePos(self):\n raise NotImplementedError",
"def parent(self, pos):\n return pos // 2",
"def parent(self, pos):\n return pos // 2",
"def normal_at(self, p):\n pass",
"def test_synth_positions_small_height():\n background = Image.new('RGB', (20, 20))\n patch_1 = Image.new('RGB', (10, 21))\n patch_2 = Image.new('RGB', (10, 21))\n\n parameters = {'data': [background, patch_1, patch_2]}\n\n positions = images.synth_positions(parameters)",
"def forward(self, x):\n return x + self.pos_embedding"
]
| [
"0.5444562",
"0.54409677",
"0.5416211",
"0.5387021",
"0.5341349",
"0.5335706",
"0.53265387",
"0.53168595",
"0.52536285",
"0.5244918",
"0.5244918",
"0.5244918",
"0.5244918",
"0.5244918",
"0.5244918",
"0.5244918",
"0.5244918",
"0.5244918",
"0.5244918",
"0.5244918",
"0.52423215",
"0.5226182",
"0.5193314",
"0.5188865",
"0.5188865",
"0.5165676",
"0.5165676",
"0.51562506",
"0.5150356",
"0.51305306"
]
| 0.6280389 | 0 |
Updates current querystring with a given dict of params, removing existing occurrences of such params. Returns a urlencoded querystring. | def updated_querystring(request, params):
original_params = request.GET.copy()
for key in params:
if key in original_params:
original_params.pop(key)
original_params.update(params)
return original_params.urlencode() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def replace_query_params(cls, url: str, **params: Mapping[str, str]) -> str:\n url, _ = cls.separate_query_params(url, params.keys())\n return cls.add_query_params(url, **params)",
"def get_query_string(p, new_params=None, remove=None):\n if new_params is None:\n new_params = {}\n if remove is None:\n remove = []\n\n for r in remove:\n for k in p.keys():\n if k.startswith(r):\n del p[k]\n for k, v in new_params.items():\n if k in p and v is None:\n del p[k]\n elif v is not None:\n p[k] = v\n return mark_safe(\n '?' + '&'.join(\n [u'%s=%s' % (k, v) for k, v in p.items()]\n ).replace(' ', '%20')\n )",
"def modify_query_params(context, **kwargs):\n request = context['request']\n params = request.GET.copy()\n for key, value in kwargs.items():\n if value == '':\n if key in params:\n del params[key]\n else:\n params[key] = value\n return ('?' + params.urlencode()) if params else ''",
"def build_query_string(params: Optional[Dict[str, Any]] = None) -> str:\n if params is None:\n return ''\n components = []\n for key, value in params.items():\n if isinstance(value, (list, tuple, set)):\n for v in value:\n c = '='.join([key, quote_plus(str(v))])\n components.append(c)\n else:\n c = '='.join([key, quote_plus(str(value))])\n components.append(c)\n if len(components) > 0:\n return '?{}'.format('&'.join(components))\n return ''",
"def create_query_sting(param_dict):\n params = \"&\".join([f\"{key}={value}\" for key, value in param_dict.items()])\n return params.replace(\"#\", \"%23\")",
"def modify_query(**values):\n args = request.args.copy()\n\n for attr, new_value in values.items():\n if new_value is not None:\n args[attr] = new_value\n elif attr in args:\n del args[attr]\n\n if args:\n return request.base_url + \"?\" + url_encode(args)\n else:\n return request.base_url",
"def make_query_string(query_string, params):\n\n for param in params:\n if param:\n index = params.index(param)+1\n query_string = query_string.replace(f\"%param{index}%\", param)\n\n return query_string",
"def make_query_string(query, params):\n query_string = query\n\n index = 1\n for param in params:\n if param:\n to_replace = \"%%param%d%%\" % index\n query_string = query_string.replace(to_replace, param)\n index += 1\n\n return query_string",
"def normalize_params(url, params):\n # parse the url\n parse = urlparse(url)\n\n # Get the query list\n qs_list = parse_qsl(parse.query, keep_blank_values=True)\n must_encode = False if parse.query == urllib.parse.unquote(parse.query) else True\n if params is None:\n combined_list = qs_list\n else:\n # Needs to be encoded before sorting\n combined_list = [encode_pair(must_encode, key, value) for (key, value) in list(qs_list)]\n combined_list += params.items()\n\n encoded_list = [\"%s=%s\" % (key, value) for (key, value) in combined_list]\n sorted_list = sorted(encoded_list, key=lambda x: x)\n\n return \"&\".join(sorted_list)",
"def param_replace(context, **kwargs):\n d = context['request'].GET.copy()\n for k,v in kwargs.items():\n d[k] = v\n for k in [k for k,v in d.items() if not v]:\n del d[k]\n return d.urlencode()",
"def querystring_replace(context, **kwargs):\n # borrowed as-is from derrida codebase\n # inspired by https://stackoverflow.com/questions/2047622/how-to-paginate-django-with-other-get-variables\n\n # get a mutable copy of the current request\n querystring = context[\"request\"].GET.copy()\n # update with any parameters passed in\n # NOTE: needs to *set* fields rather than using update,\n # because QueryDict update appends to field rather than replacing\n for key, val in kwargs.items():\n querystring[key] = val\n # return urlencoded query string\n return querystring.urlencode()",
"def _get_query_part(params: dict) -> str:\n params_cleaned = {k: v for k, v in params.items() if v is not None}\n return ('?' + urlencode(params_cleaned, quote_via=quote, safe=\"/,\")) if params_cleaned else \"\"",
"def append_query_params(original_url, **kwargs):\n scheme, netloc, path, query_string, fragment = urlsplit(original_url)\n query_params = parse_qs(query_string)\n if kwargs is not None:\n for key, value in kwargs.items():\n query_params[key] = [value]\n\n new_query_string = urlencode(query_params, doseq=True)\n new_url = urlunsplit((scheme, netloc, path, new_query_string, fragment))\n return new_url",
"def append_query_params(original_url, **kwargs):\n scheme, netloc, path, query_string, fragment = urlsplit(original_url)\n query_params = parse_qs(query_string)\n if kwargs is not None:\n for key, value in kwargs.items():\n query_params[key] = [value]\n\n new_query_string = urlencode(query_params, doseq=True)\n new_url = urlunsplit((scheme, netloc, path, new_query_string, fragment))\n return new_url",
"def add_query_params(url: str, additional_params: dict) -> str:\n url_components = urlparse(url)\n original_params = parse_qs(url_components.query)\n # Before Python 3.5 you could update original_params with\n # additional_params, but here all the variables are immutable.\n merged_params = {**original_params, **additional_params}\n updated_query = urlencode(merged_params, doseq=True)\n # _replace() is how you can create a new NamedTuple with a changed field\n return url_components._replace(query=updated_query).geturl()",
"def updated_query_str(request, *args):\n\n return urllib.urlencode(updated_query(request, *args))",
"def add_querystring(context, **kwargs):\n\n updated = context['request'].GET.copy()\n\n # have to iterate over and not use .update as it's a QueryDict not a dict\n for k, v in kwargs.items():\n updated[k] = v\n\n return '?{}'.format(updated.urlencode()) if updated else ''",
"def composeQueryUrl(self, params):\n\t\ttextparams = urllib.urlencode(params)\n\t\treturn self.api_url + \"?\" + textparams",
"def url_query_cleaner(url, parameterlist=(), sep='&', kvsep='=', remove=False, unique=True, keep_fragments=False):\n\n if isinstance(parameterlist, (six.text_type, bytes)):\n parameterlist = [parameterlist]\n url, fragment = urldefrag(url)\n base, _, query = url.partition('?')\n seen = set()\n querylist = []\n for ksv in query.split(sep):\n if not ksv:\n continue\n k, _, _ = ksv.partition(kvsep)\n if unique and k in seen:\n continue\n elif remove and k in parameterlist:\n continue\n elif not remove and k not in parameterlist:\n continue\n else:\n querylist.append(ksv)\n seen.add(k)\n url = '?'.join([base, sep.join(querylist)]) if querylist else base\n if keep_fragments:\n url += '#' + fragment\n return url",
"def param_remove(params, arg):\n d = params.copy()\n if arg in d:\n del d[arg]\n return d.urlencode()",
"def _clean_and_encode_params(params: Mapping):\n # Keep only the parameters that were given a value\n params = {k: v for k, v in params.items() if v is not None}\n\n # All query parameters are later urlencoded - for projection, comma-separated\n # list is supported only on literal comma; convert comma-separated list\n # to a list of values which will be encoded to multiple query parameters\n try:\n params[\"projection\"] = [x.strip() for x in params[\"projection\"].split(\",\")]\n except KeyError:\n pass\n return params",
"def url_replace(context, **kwargs):\n query = context['request'].GET.dict()\n query.update(kwargs)\n return urlencode(query)",
"def url_replace(context, **kwargs):\n query = context['request'].GET.dict()\n query.update(kwargs)\n return urlencode(query)",
"def add_query_params(\n url: str, **params: Mapping[str, Union[str, List[str]]]\n ) -> str:\n o = urlparse(url)\n qp = parse_qs(o.query, keep_blank_values=True)\n\n for k, v in params.items():\n if isinstance(v, str):\n v = [v]\n try:\n qp[k].extend(v)\n except KeyError:\n qp[k] = v\n\n qs = urlencode(qp, doseq=True, quote_via=quote)\n return urlunparse(o._replace(query=qs))",
"def to_query_str(params):\n if not params:\n return ''\n\n # PERF: This is faster than a list comprehension and join, mainly\n # because it allows us to inline the value transform.\n query_str = '?'\n for k, v in params.items():\n if v is True:\n v = 'true'\n elif v is False:\n v = 'false'\n else:\n v = str(v)\n\n query_str += k + '=' + v + '&'\n\n return query_str[:-1]",
"def query_append(*query_params):\n li = []\n for qp in query_params:\n qs = urlencode_s(query_unflatten(qp))\n if qs:\n li.append(qs)\n return \"&\".join(li)",
"def _update_request_uri_query(self, request):\n if \"?\" in request.path:\n request.path, _, query_string = request.path.partition(\"?\")\n if query_string:\n query_params = query_string.split(\"&\")\n for query in query_params:\n if \"=\" in query:\n name, _, value = query.partition(\"=\")\n request.query.append((name, value))\n\n request.path = url_quote(request.path, \"/()$=',\")\n\n # add encoded queries to request.path.\n if request.query:\n request.path += \"?\"\n for name, value in request.query:\n if value is not None:\n request.path += \"{}={}{}\".format(name, url_quote(value, \"/()$=',\"), \"&\")\n request.path = request.path[:-1]\n\n return request.path, request.query",
"def _extend_url(self, url, params):\n # filter out None parameters\n params = {k:v for k,v in params.items() if v is not None}\n for key in params:\n url = url + \"&{}={}\".format(key, params[key])\n return url",
"def build_qs(self, params, key=None):\n parts = []\n\n if params and hasattr(params, 'items'):\n for name, value in params.items():\n\n if hasattr(value, 'values'):\n # Encode a dict\n parts.extend(self.build_qs(params=value.values(),\n key=self.build_qs_key(key, cgi.escape(name))))\n\n elif hasattr(value, '__iter__'):\n # Encode an iterable (list, tuple, etc)\n parts.extend(self.build_qs(params=dict(zip(xrange(0, len(value)), value)),\n key=self.build_qs_key(key, cgi.escape(name))))\n\n else:\n parts.extend('%s=%s' % (self.build_qs_key(key, cgi.escape(name)), cgi.escape(str(value))))\n\n return '&'.join(parts)",
"def set_query_parameters(url, params):\n url_parts = list(urlparse(url))\n\n query = dict(parse_qsl(url_parts[4]))\n query.update(params)\n url_parts[4] = urlencode(query)\n\n return urlunparse(url_parts)"
]
| [
"0.7936514",
"0.74957633",
"0.7153125",
"0.6790267",
"0.67709136",
"0.6738045",
"0.67204905",
"0.6706251",
"0.6668768",
"0.66175866",
"0.65436184",
"0.64668345",
"0.64521825",
"0.64521825",
"0.64507204",
"0.6441597",
"0.63582",
"0.635697",
"0.6322598",
"0.6310018",
"0.62548095",
"0.62402856",
"0.62402856",
"0.62086076",
"0.6202035",
"0.61988574",
"0.6198592",
"0.6190123",
"0.61629194",
"0.61536175"
]
| 0.8049712 | 1 |
If template context variable with `name` not set get default value from django.settings | def context_or_settings(context, name):
if name in context:
return context[name]
return getattr(settings, 'DEFAULT_' + name.upper()) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def context_or_settings(context, name):\n if name in context:\n return context[name]\n return getattr(settings, \"DEFAULT_\" + name.upper())",
"def get(self, name, default=''):\n return getattr(settings, name, DEFAULT_SETTINGS.get(name, default))",
"def overridable(name, default):\n return getattr(django_settings, name, default)",
"def default_context(request):\n return {\n 'GOOGLE_ANALYTICS_ID': settings.GOOGLE_ANALYTICS_ID,\n 'SITE_TITLE': settings.SITE_TITLE,\n }",
"def test_setting_default(self):\n request = mock.Mock()\n request.resolver_match.kwargs.get.return_value = None\n request.path = '/'\n context = context_processors.decide_base_template(request)\n self.assertEqual(context['base_template'], \"base_site.html\")",
"def default(request):\n return request.param",
"def setting(name, default=None):\n return getattr(settings, name, default)",
"def get_default(name, value):\n return os.environ.get('EXAMPLE_{}'.format(name.upper()), value)",
"def template_context(request):\n context = {\n 'application_version': settings.APPLICATION_VERSION,\n }\n context.update(settings.STATIC_CONTEXT_VARS)\n return context",
"def _set_default(name, value, context):\n if name not in context:\n context[name] = value",
"def setting(self, config, name, default=None):\n\n return config.get(name, default) if config else default",
"def _coalesceOption(self, name, default = ''):\n return self.view.settings().get(name, self.options.get(name, default))",
"def get_template_name(self):\n template = None\n if self.template:\n template = self.template\n if not template:\n for p in self.get_ancestors(ascending=True):\n if p.template:\n template = p.template\n break\n if not template:\n template = settings.CMS_TEMPLATES[0][0]\n for t in settings.CMS_TEMPLATES:\n if t[0] == template:\n return t[1] \n return _(\"default\")",
"def extra_context(self):\n from django.conf import settings\n\n return {\n \"site_name\": (lambda r: settings.LEONARDO_SITE_NAME\n if getattr(settings, 'LEONARDO_SITE_NAME', '') != ''\n else settings.SITE_NAME),\n \"debug\": lambda r: settings.TEMPLATE_DEBUG\n }",
"def test_setting_override(self):\n request = mock.Mock()\n request.resolver_match.kwargs.get.return_value = None\n request.path = '/'\n context = context_processors.decide_base_template(request)\n self.assertEqual(context['base_template'], \"test.html\")",
"def csettings(name):\n return getattr(settings, name, \"\")",
"def define(name, default=None):\n if not hasattr(settings, name):\n return setattr(settings, name, default)",
"def company_context(request):\n return {'COMPANY': settings.COMPANY}",
"def settings(request):\n from django.conf import settings\n return {\n 'exhibitBaseTemplate': settings.EXHIBIT_TEMPLATE,\n 'thumbnailUrl': settings.THUMBNAIL_URL,\n 'calisphere': settings.CALISPHERE\n }",
"def theme_name(context):\n return {'THEME_NAME': settings.THEME_NAME}",
"def initial(self):\n from setman import settings\n return getattr(settings, self.name, self.default)",
"def _get_value(self, value, context):\n try:\n var_value = template.Variable(value).resolve(context)\n except template.VariableDoesNotExist:\n try:\n var_value = self.var_value.var\n except AttributeError:\n var_value = self.var_value\n return var_value",
"def site_name(request):\n return {'site_name':'CatFood'}",
"def get_setting(setting_name, default=None):\n settings_dict = getattr(settings, 'SIMPLE_FORUMS', None)\n\n if settings_dict:\n return settings_dict.get(setting_name, default)\n\n return default",
"def settings_context(_request):\n # Note: we intentionally do NOT expose the entire settings\n # to prevent accidental leaking of sensitive information\n return {\"DEBUG\": settings.DEBUG}",
"def _set_from_env(name, context, default):\n if default is _DEFAULT_ARG and name not in os.environ:\n return\n\n context[name] = os.environ.get(name, default)",
"def get_system_value(name: str):\n return Config.objects.first().__dict__[name]",
"def defaultTemplateParameter(self):\n self.tplparam['BODY'] = self.content\n self.tplparam['FLASH'] = (self.flash or '').replace('\"', r'\\\"')\n self.tplparam['PYMFRAMEVERSION'] = self.pymframeversion\n self.tplparam['USER'] = self.session.getAttribute(self.settings.authenvar)\n self.tplparam['RIGHTS'] = repr(self.session.getAttribute('rights'))\n self.tplparam['MENU'] = self.routing.getMenu(self.path,self.checkRights)\n self.tplparam['PATH'] = self.path",
"def template_context(**kwrds):\n usr = User.get_user()\n\n default = {\n 'usr': usr\n }\n default.update(kwrds)\n return default",
"def settings(request):\n return {\"SETTINGS\": django_settings, \"GLOBAL_DEFINITIONS\": global_definitions}"
]
| [
"0.7920417",
"0.69148594",
"0.668963",
"0.64151293",
"0.6401593",
"0.6313355",
"0.62323475",
"0.6224653",
"0.61965585",
"0.6167027",
"0.6102653",
"0.6031418",
"0.60309875",
"0.6024571",
"0.6023175",
"0.60073507",
"0.5987906",
"0.59244883",
"0.591425",
"0.59125286",
"0.5911162",
"0.589354",
"0.58927256",
"0.5888107",
"0.5861091",
"0.5827744",
"0.5824981",
"0.58175504",
"0.5805009",
"0.5764616"
]
| 0.79297477 | 0 |
Parses raw data in family field. | def parse_raw_family_string(family_raw):
return map(parse_family_member, filter(None, family_raw.split(";"))) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def parse(self, data):\n raise NotImplementedError",
"def parse_data(self):\n\t\traise NotImplementedError('%s: No parse function implemented!' % self.name)",
"def parse(cls, data):\n raise NotImplementedError",
"def parse (cls, raw_data):\n # Parse text\n model = NFFGModel.parse(raw_data)\n # Create new NFFG\n nffg = NFFG(id=model.id, name=model.name, service_id=model.service_id,\n version=model.version, mode=model.mode, metadata=model.metadata)\n # Load Infras\n for infra in model.node_infras:\n nffg.add_node(infra)\n # Load SAPs\n for sap in model.node_saps:\n nffg.add_node(sap)\n # Load NFs\n for nf in model.node_nfs:\n nffg.add_node(nf)\n # Load Links\n for link in model.edge_links:\n if link.src.node.type == NFFG.TYPE_NF or \\\n link.dst.node.type == NFFG.TYPE_NF:\n link.type = str(NFFG.TYPE_LINK_DYNAMIC)\n nffg.add_edge(link.src.node, link.dst.node, link)\n # Load SG next hops\n for hop in model.edge_sg_nexthops:\n nffg.add_edge(hop.src.node, hop.dst.node, hop)\n # Load Requirements\n for req in model.edge_reqs:\n nffg.add_edge(req.src.node, req.dst.node, req)\n return nffg",
"def parse_dataset(self, data):\n pass",
"def parse_data(fp):\n pass",
"def parse(self):",
"def _read_data(self):",
"def extract(self, data):",
"def parse_records(self):\n for record in sp.parse(gzip.open(\n \"./human_uniprot_04_07_20.gz\", 'rt')):\n # print(record.taxonomy_id)\n # if record.organism != \"Homo sapiens\":\n # continue\n # print(record.features[0])\n # for comment in record.comments:\n # if comment.startswith(\"SUBCELLULAR LOCATION\"):\n # print(comment)\n self.extract_features_to_dict(record)\n self.extract_localization(record)",
"def parse(self,gff3_line):\r\n split_line = gff3_line.strip().split('\\t')\r\n self.seqid = split_line[0]\r\n self.source = split_line[1]\r\n self.type = split_line[2]\r\n self.start = int(split_line[3])\r\n self.end = int(split_line[4])\r\n self.score = split_line[5]\r\n self.strand = split_line[6]\r\n self.phase = split_line[7]\r\n self.attributes.parse(split_line[8])\r\n return self",
"def load_simple_families_file(infile, ped_sep=\"\\t\") -> FamiliesData:\n fam_df = pd.read_csv(\n infile,\n sep=ped_sep,\n index_col=False,\n skipinitialspace=True,\n converters={\n \"role\": Role.from_name,\n \"gender\": Sex.from_name,\n \"sex\": Sex.from_name,\n },\n dtype={\"familyId\": str, \"personId\": str},\n comment=\"#\",\n )\n\n fam_df = fam_df.rename(\n columns={\n \"gender\": \"sex\",\n \"personId\": \"person_id\",\n \"familyId\": \"family_id\",\n \"momId\": \"mom_id\",\n \"dadId\": \"dad_id\",\n \"sampleId\": \"sample_id\",\n },\n )\n\n fam_df[\"status\"] = pd.Series(index=fam_df.index, data=1)\n fam_df.loc[fam_df.role == Role.prb, \"status\"] = 2\n fam_df[\"status\"] = fam_df.status.apply(Status.from_value)\n\n fam_df[\"mom_id\"] = pd.Series(index=fam_df.index, data=\"0\")\n fam_df[\"dad_id\"] = pd.Series(index=fam_df.index, data=\"0\")\n\n if \"sample_id\" not in fam_df.columns:\n sample_ids = pd.Series(data=fam_df[\"person_id\"].values)\n fam_df[\"sample_id\"] = sample_ids\n\n families = defaultdict(list)\n for rec in fam_df.to_dict(orient=\"records\"):\n families[rec[\"family_id\"]].append(rec)\n\n result = defaultdict(list)\n for fam_id, members in families.items():\n mom_id = None\n dad_id = None\n children = []\n for member in members:\n role = member[\"role\"]\n if role == Role.mom:\n mom_id = member[\"person_id\"]\n elif role == Role.dad:\n dad_id = member[\"person_id\"]\n else:\n assert role in set([Role.prb, Role.sib])\n children.append(member)\n for child in children:\n child[\"mom_id\"] = mom_id\n child[\"dad_id\"] = dad_id\n\n result[fam_id] = [\n Person(**member) # type: ignore\n for member in members\n ]\n\n return FamiliesData.from_family_persons(result)",
"def ParseFEM(self, use_cython=True, raw=None):\n if not vtk_loaded:\n raise Exception('Unable to load VTK module. Cannot parse raw cdb data')\n return\n \n if self.CheckRaw():\n raise Exception('Missing key data. Cannot parse into unstructured grid.') \n \n # Convert to vtk style arrays\n if use_cython and cython_loaded:\n self.data = CDBparser.ParseForFEM(self.raw)\n else:\n self.data = PythonParser.ParseForFEM(self.raw)\n \n # Create unstructured grid\n self.uGrid = Utilities.MakeuGrid(self.data['offset'], self.data['cells'], \n self.data['cell_type'],\n self.data['nodes'][:, :3])\n\n # Store original ANSYS cell and node numbering\n Utilities.AddPointScalars(self.uGrid, self.data['orignode'], 'ANSYSnodenum')\n\n # Extract ANSYS element numbering and store\n ansyselem = self.raw['enum'].compress(self.data['elemused'])\n Utilities.AddCellScalars(self.uGrid, ansyselem, 'ANSYSelemnum')\n\n # Add node components to unstructured grid\n ibool = np.empty(self.uGrid.GetNumberOfPoints(), dtype=np.int8)\n for comp in self.data['node_comps']:\n ibool[:] = 0 # reset component array\n ibool[self.data['node_comps'][comp]] = 1 \n Utilities.AddPointScalars(self.uGrid, ibool, comp)\n \n # Add tracker for original node numbering\n Utilities.AddPointScalars(self.uGrid,\n np.arange(self.uGrid.GetNumberOfPoints()),\n 'VTKorigID')\n \n return self.data, self.uGrid, self.data['cellarr'], self.data['ncellpts']",
"def parseDataField(self):\r\n devId = str(self.deviceId)\r\n datamap = self._datamaps[devId]\r\n work = ''\r\n dataIndex = 0\r\n fieldIndex = 0\r\n mapIndex = 0\r\n self.fields=[]\r\n while mapIndex < len(datamap):\r\n mapChar = datamap[mapIndex]\r\n mapValue = int(mapChar)\r\n if fieldIndex == mapValue:\r\n #we've found another character in our current field\r\n work = work + self.dataField[dataIndex]\r\n mapIndex = mapIndex + 1\r\n dataIndex = dataIndex + 1\r\n elif fieldIndex+1 == mapValue:\r\n #we've found the end of the field we're working on\r\n self.fields.append(int(work, 16))\r\n work = ''\r\n fieldIndex = fieldIndex + 1\r\n else:\r\n if len(work) > 0:\r\n self.fields.append(int(work, 16))\r\n work = ''\r\n fieldIndex = fieldIndex + 1\r\n mapIndex = mapIndex + 1\r\n dataIndex = dataIndex + 1\r\n\r\n if len(work) > 0:\r\n self.fields.append(int(work, 16))\r\n\r\n self.service = self._servicemaps[devId]\r\n self.types = self._typemaps[devId]\r\n self.units = self._unitmaps[devId]\r\n\r\n self.customConvert()\r\n self.extendedConvert()\r\n self.convertLittleEndian()\r\n\r\n return",
"def _parse(self):\n pass",
"def parse_from_bytes(self, raw_buffer):\n\n try:\n (cpu_svn,\n self.misc_select,\n _,\n attributes,\n mr_enclave,\n _,\n mr_signer,\n _,\n self.isv_prod_id,\n self.isv_svn,\n _,\n report_data) = \\\n struct.unpack(self._format, raw_buffer)\n\n # Further parse embedded structures\n self.cpu_svn.parse_from_bytes(cpu_svn)\n self.attributes.parse_from_bytes(attributes)\n self.mr_enclave.parse_from_bytes(mr_enclave)\n self.mr_signer.parse_from_bytes(mr_signer)\n self.report_data.parse_from_bytes(report_data)\n except struct.error as se:\n raise ValueError('Unable to parse: {}'.format(se))",
"async def parse(self, raw: str) -> dict:",
"def parse(self):\n raise NotImplementedError",
"def deserialize(self, data):",
"def load_family_members():\n\n Member.query.delete()\n\n for row in open('data/seed_data_sample_plain'):\n strip_row = row.strip()\n split_row = strip_row.split('|')\n\n member_id = split_row[0].strip()\n\n first_name = split_row[1].strip()\n\n last_name = split_row[2].strip()\n\n if split_row[3].strip() is not None:\n eng_title = split_row[3].strip()\n else:\n eng_title = None\n\n if split_row[4].strip() is not None:\n alt_name = split_row[4].strip()\n else:\n alt_name = None\n\n if split_row[5].strip() is not None:\n lineage = split_row[5].strip()\n else:\n lineage = None\n\n if split_row[6].strip() == 1:\n deceased = split_row[6].strip()\n else:\n deceased = 0\n\n if split_row[7].strip() is not None:\n image_url = split_row[7].strip()\n else:\n image_url = None\n\n if split_row[8].strip() is not None:\n parents = split_row[8].strip()\n else:\n parents = None\n\n if split_row[9].strip() is not None:\n string_list_of_child_member_ids = split_row[9].strip() # produces a string\n list_of_child_member_ids = string_list_of_child_member_ids.split() # produces a list from the string\n\n children = [Member(member_id=int(num)) for num in list_of_child_member_ids]\n else:\n children = None\n\n if split_row[10].strip() is not None:\n string_list_of_spouse_member_ids = split_row[10].strip()\n list_of_spouse_member_ids = string_list_of_spouse_member_ids.split()\n\n spouse = [Member(member_id=int(num)) for num in list_of_spouse_member_ids]\n else:\n spouse = None\n\n try:\n member = Member(member_id=member_id,\n first_name=first_name,\n last_name=last_name,\n eng_title=eng_title,\n alt_name=alt_name,\n lineage=lineage,\n deceased=deceased,\n image_url=image_url,\n parents=parents,\n children=children,\n spouse=spouse)\n\n db.session.add(member)\n except:\n import pdb; pdb.set_trace()\n\n db.session.commit()",
"def parse(self):\n pass",
"def parse(self):\n pass",
"def parse(self):\n pass",
"def parse(self):\n pass",
"def parse_string(self, data):\n pass",
"def family_detail_extractor(data):\n family_list = {}\n try:\n cnt = 0\n family_id = data['hof_Details']['FAMILYIDNO']\n for key in data:\n if key == 'family_Details':\n for items in data[key]:\n cnt = user_entry(cnt, family_list, items, False, family_id)\n elif key == 'hof_Details':\n cnt = user_entry(cnt, family_list, data[key], True, family_id)\n return family_list\n except AttributeError:\n print 'Json object cannot be decoded // Extractor'",
"def parser(raw_seq, date):\n taxon_id = int(raw_seq.features[0].qualifiers['db_xref'][0][6:])\n organism = raw_seq.annotations['organism']\n accession = raw_seq.annotations['accessions'][0]\n gene = []\n records = []\n frag_type = 'whole'\n begin = 1\n end = len(raw_seq)\n sequence = str(raw_seq.seq)\n name = organism\n strand = 1\n rec = [taxon_id, organism, accession, name, frag_type, begin, end, strand, sequence, date]\n records.append(rec)\n for i in raw_seq.features:\n if i.type == 'gene' and 'gene' in i.qualifiers:\n if i.location_operator != 'join':\n frag_type = 'gene'\n begin = int(i.location.start)\n end = int(i.location.end)\n sequence = str(raw_seq.seq[begin:end])\n name = str(i.qualifiers['gene'][0])\n strand = str(i.location.strand)\n rec = [taxon_id, organism, accession, name, frag_type, begin, end, strand, sequence, date]\n elif i.location_operator == 'join':\n frag_type = 'gene'\n begin = int(i.sub_features[0].location.start)\n end = int(i.sub_features[0].location.end)\n name = str(i.qualifiers['gene'][0])\n strand = str(i.location.strand)\n sequence = ''\n rec = [taxon_id, organism, accession, name, frag_type, begin, end, strand, sequence, date]\n gene.append(rec)\n begin = int(i.sub_features[1].location.start)\n end = int(i.sub_features[1].location.end)\n sequence = ''.join([str(raw_seq.seq[begin:end]), str(raw_seq.seq[begin:end])])\n rec = [taxon_id, organism, accession, name, frag_type, begin, end, strand, sequence, date]\n elif i.type == 'CDS' and 'gene' in i.qualifiers:\n frag_type = 'cds'\n begin = int(i.location.start)\n end = int(i.location.end)\n sequence = str(raw_seq.seq[begin:end])\n name = str(i.qualifiers['gene'][0]).replace(' ', '_')\n strand = str(i.location.strand)\n rec = [taxon_id, organism, accession, name, frag_type, begin, end, strand, sequence, date]\n elif i.type == 'tRNA' and 'gene' in i.qualifiers:\n frag_type = 'tRNA'\n begin = int(i.location.start)\n end = int(i.location.end)\n sequence = str(raw_seq.seq[begin:end])\n if len(sequence) >= 100:\n sequence = ''\n name = str(i.qualifiers['gene'][0]).replace(' ', '_')\n strand = str(i.location.strand)\n rec = [taxon_id, organism, accession, name, frag_type, begin, end, strand, sequence, date]\n elif i.type == 'rRNA':\n frag_type = 'rRNA'\n begin = int(i.location.start)\n end = int(i.location.end)\n sequence = str(raw_seq.seq[begin:end])\n name = str(i.qualifiers['product'][0]).replace(' ', '_')\n strand = str(i.location.strand)\n rec = [taxon_id, organism, accession, name, frag_type, begin, end, strand, sequence, date]\n elif i.type == 'exon' and 'gene' in i.qualifiers:\n frag_type = 'exon'\n begin = int(i.location.start)\n end = int(i.location.end)\n sequence = str(raw_seq.seq[begin:end])\n if 'number' in i.qualifiers:\n name = '{0}_exon_{1}'.format(i.qualifiers['gene'][0],\n i.qualifiers['number'][0])\n else:\n name = '{0}_exon'.format(i.qualifiers['gene'][0])\n strand = int(i.location.strand)\n rec = [taxon_id, organism, accession, name, frag_type, begin, end, strand, sequence, date]\n elif i.type == 'intron' and 'gene' in i.qualifiers:\n frag_type = 'intron'\n begin = int(i.location.start)\n end = int(i.location.end)\n sequence = str(raw_seq.seq[begin:end])\n strand = str(i.location.strand)\n if 'number' in i.qualifiers:\n name = '{0}_{1}_intron'.format(i.qualifiers['gene'][0],\n i.qualifiers['number'][0])\n else:\n name = '{0}_intron'.format(i.qualifiers['gene'][0])\n rec = [taxon_id, organism, accession, name, frag_type, begin, end, strand, sequence, date]\n\n records.append(rec)\n gene.sort(key=lambda x: x[5])\n\n for i in range(len(gene) - 1):\n frag_type = 'spacer'\n now = gene[i]\n then = gene[i + 1]\n tail = now[6] + 1\n head = then[5] - 1\n sequence = str(raw_seq.seq[tail:head])\n name = '{0}-{1}_spacer'.format(now[3], then[3])\n strand = 0\n rec = [taxon_id, organism, accession, name, frag_type, begin, end, strand, sequence, date]\n records.append(rec)\n records.extend(gene)\n\n database.extend(records)",
"def test_03_Family(self):\n l_xml = self.m_xml.light\n l_device = self.m_device_obj\n l_light = FamUtil.read_family_data(self.m_pyhouse_obj, l_device, l_xml)\n # print(PrettyFormatAny.form(l_light, 'C4-03-A - Light'))\n self.assertEqual(str(l_light.UPBAddress), TESTING_UPB_ADDRESS)",
"def parse(self):\n\t\tsub = self.body.split(' ')\n\t\tif len(sub) == 3:\n\t\t\tself.latitude = float(sub[1])\n\t\t\tself.longitude = float(sub[2])\n\t\telse:\n\t\t\tself.latitude = None\n\t\t\tself.longitude = None\n\t\t\traise Exception(\"Invalid message\")",
"def parse_data(self, reading):\n\t\tif len(reading) == 5:\n\t\t\ttry:\n\t\t\t\tpacket = TrollPacket.from_binary_packet(reading)\n\t\t\t\tself.update_listeners(packet)\n\t\t\texcept KeyError as e:\n\t\t\t\terr_msg = 'Arduino metadata %s. Binary packet: %s' % (e, reading.hex().upper())\n\t\t\t\tlogging.exception(err_msg)"
]
| [
"0.639509",
"0.6316901",
"0.6251834",
"0.60667557",
"0.5943526",
"0.5904677",
"0.590174",
"0.58886784",
"0.58568",
"0.5732877",
"0.5695509",
"0.5673828",
"0.5630869",
"0.5599959",
"0.5596141",
"0.55798674",
"0.5560866",
"0.5554267",
"0.551541",
"0.5472412",
"0.5449194",
"0.5449194",
"0.5449194",
"0.5449194",
"0.5443753",
"0.5415604",
"0.5408812",
"0.5390921",
"0.5382928",
"0.53758764"
]
| 0.7011084 | 1 |
Decodes jwt token and returns u_id | def decode_token(token):
payload = None
try:
payload = jwt.decode(token.encode('utf-8'), '1$Arh"1bWa/7+OS', algorithm='HS256')['u_id']
except jwt.InvalidTokenError:
pass
return payload | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def decode_token(token):\n decoded_token = jwt.decode(token, secret_key, algorithms=['HS256'])\n return decoded_token",
"def decode_token(token):\n\n return jwt.decode(\n token, settings.JWT_SECRET, algorithms=[settings.JWT_ALGO])",
"def decode_auth_token(auth_token):\n try:\n payload = jwt.decode(auth_token, key)\n return payload['sub']\n except jwt.ExpiredSignatureError:\n return 'Signature expired. Please log in again.'\n except jwt.InvalidTokenError:\n return 'Invalid token. Please log in again.'",
"def decode(encoded_token):\n return jwt.decode(encoded_token, key=settings.JWT_AUTH['JWT_SECRET_KEY'])",
"def decode_token(token):\n try:\n payload = jwt.decode(\n token, app.config.get('SECRET_KEY'), algorithms='HS256')\n return payload['sub']\n except jwt.ExpiredSignatureError:\n return \"Expired token. Please login to get a new token\"\n except jwt.InvalidTokenError:\n return \"Invalid token. Please register or login\"",
"def decode_token(token):\n try:\n # Decode token with our secret key\n payload = jwt.decode(token, SECRET_KEY)\n return payload['sub']\n except jwt.ExpiredSignatureError:\n # token has expired\n return \"Timed out. Please login to get a new token\"\n except jwt.InvalidTokenError:\n return \"Invalid token. Please register or login\"",
"def decode(token):\n return jwt.decode(token, app.config[\"JWT_SECRET\"], algorithms=[\"HS256\"])",
"def decode_auth_token(auth_token: str) -> Union[str, int]:\n try:\n payload = jwt.decode(auth_token, key, algorithms='HS256')\n \n user=Usuario.query.filter_by(id=payload['uid']).first()\n is_blacklisted_token = BlacklistToken.check_blacklist(auth_token)\n if is_blacklisted_token:\n return 'Conta deslogada. Por favor realizar o login novamente.'\n elif user.ativo!=True:\n return 'Conta invativa. Por favor entrar em contato com o administrador.'\n else:\n return payload['uid']\n except jwt.ExpiredSignatureError:\n return 'Token expirado. Por favor realizar o login novamente.'\n except jwt.InvalidTokenError:\n return 'Token inválido. Por favor realizar o login novamente.'",
"def decode_auth_token(auth_token):\n try:\n payload = jwt.decode(auth_token, Config.SECRET_KEY,algorithms='HS256')\n return payload\n except jwt.ExpiredSignatureError:\n return 'Signature expired. Please log in again.'\n except jwt.InvalidTokenError:\n return 'Invalid token. Please log in again.'",
"def decode_auth_token(auth_token):\n if len(auth_token) != 139:\n return \"Invalid token. Please log in again.\"\n try:\n payload = jwt.decode(auth_token, key)\n is_blacklisted_token = BlacklistToken.check_blacklist(auth_token)\n if is_blacklisted_token:\n return 'Token blacklisted. Please log in again.'\n else:\n return payload['sub']\n except jwt.ExpiredSignatureError:\n return 'Signature expired. Please log in again.'\n except jwt.InvalidTokenError:\n return 'Invalid token. Please log in again.'",
"def decode_token(token):\n try:\n # try to decode the token using our SECRET variable\n payload = jwt.decode(token, app.config.get('SECRET_KEY'), algorithms=['HS256'])\n return payload['sub']\n except jwt.ExpiredSignatureError:\n # the token is expired, return an error string\n return \"Expired token. Please login to get a new token\"\n except jwt.InvalidTokenError:\n # the token is invalid, return an error string\n return \"Invalid token. Please register or login\"",
"def decode_auth_token(auth_token, config):\n secret_key = config['JWT_SECRET_KEY']\n try:\n payload = jwt.decode(auth_token, secret_key)\n return payload['sub']\n except jwt.ExpiredSignatureError as error:\n raise ExpiredToken from error\n except (jwt.InvalidTokenError, jwt.DecodeError) as error:\n raise InvalidToken from error",
"def decode_auth_token(auth_token):\n try:\n payload = jwt.decode(auth_token, app.config.get('SECRET_KEY'))\n\n # is_blacklisted_token = BlacklistToken.check_blacklist(auth_token)\n # if is_blacklisted_token:\n # return 'Token blacklisted. Please log in again.'\n # else:\n return payload['sub']\n except jwt.ExpiredSignatureError:\n return 'Signature expired. Please log in again.'\n except jwt.InvalidTokenError:\n return 'Invalid token. Please log in again.'",
"def parse_token(token):\n return jwt.decode(token, app.config['JWT_SECRET'])",
"def decode_auth_token(auth_token):\n try:\n payload = jwt.decode(auth_token, Config.SECRET_KEY,algorithms='HS256')\n return payload['role']\n except jwt.ExpiredSignatureError:\n return 'Signature expired. Please log in again.'\n except jwt.InvalidTokenError:\n return 'Invalid token. Please log in again.'",
"def decode_auth_token(secret_key, auth_token):\n try:\n payload = jwt.decode(auth_token, secret_key) \n is_blacklisted_token = BlacklistToken.check_blacklist(auth_token)\n if is_blacklisted_token:\n return 'Token blacklisted. Please log in again.' \n else: \n return payload['sub']\n except jwt.ExpiredSignatureError:\n return 'Signature expired. Please log in again.'\n except jwt.InvalidTokenError:\n return 'Invalid token. Please log in again.'",
"def decode_token(token):\n try:\n # try to decode the token using our SECRET variable\n payload = jwt.decode(token, os.environ.get('SECRET', 'test'))\n return \"\", payload['sub']\n except jwt.ExpiredSignatureError:\n # the token is expired, return an error string\n return \"Expired token. Please login to get a new token\", None\n except jwt.InvalidTokenError:\n # the token is invalid, return an error string\n return \"Invalid token. Please register or login\", None",
"def decodeJWT(self, token):\n try:\n return jwt.decode(token, self.secret, algorithms=[self.algorithm])\n except jwt.exceptions.InvalidSignatureError:\n raise ValueError(f'The following JWT is invalid: {token}')",
"def decode_auth_token(auth_token): \n try: \n payload = jwt.decode(auth_token, getattr(settings, \"SECRET_KEY\", \"\"),algorithms=['HS256']) \n is_blacklisted_token = User.check_blacklist(auth_token)\n if is_blacklisted_token:\n return False,'Token blacklisted. Please log in again.'\n else:\n return True, payload['sub']\n except jwt.ExpiredSignatureError:\n return False,'Signature expired. Please log in again.'\n except jwt.InvalidTokenError:\n return False,'Invalid token. Please log in again.'",
"def try_get_user_id_from_token(token):\n dot_index = token.find('.')\n if (dot_index > 0):\n token_base64 = token[:dot_index]\n \n try:\n token_string = b64decode(token_base64)\n except Base64DecodeError:\n user_id = 0\n else:\n try:\n user_id = int(token_string)\n except ValueError:\n user_id = 0\n else:\n user_id = 0\n \n return user_id",
"def decodeAuthToken(authToken):\n try:\n return jwt.decode(authToken, current_app.config['SECRET_KEY'], algorithm='HS256')['sub']\n except jwt.ExpiredSignatureError:\n return 'signature expired, Please login again'\n except jwt.InvalidTokenError:\n return 'Invalid token'",
"def jwt_get_user_id_from_payload_handler(payload):\n user_id = payload.get('user_id')\n return user_id",
"def get_user_id(jwt_token):\n return (\n jwt_token.payload[\"user\"].get(\"id\")\n if jwt_token.payload.get(\"user\")\n else jwt_token.payload[\"session_id\"]\n )",
"def decode_payload(encoded_payload):\n jwt_secret = app.config['SECRET_KEY']\n payload = jwt.decode(encoded_payload, jwt_secret, algorithms='HS256')\n\n return payload",
"def decode_request(self, data):\n return decode_jwt(data[\"jwt\"], data[\"cert_name\"], self.node.node_name, self.node.id)",
"def get_token(self):\n\n try:\n return jwt.decode(self.fetch_token(), KEY, algorithms=['HS256'])\n except jwt.exceptions.DecodeError:\n raise InvalidToken",
"def test_decode_token(token):\n payload = User.decode_auth_token(token)\n user = User.find_by_id(payload.get('id'))\n assert isinstance(user, User) is True\n assert user.email == '[email protected]'",
"def decode_token(token, options=JWT_OPTIONS):\n return jwt.decode(\n token,\n SECRET_KEY,\n issuer=JWT_ISSUER,\n audience=JWT_AUDIENCE,\n options=options,\n algorithms=(JWT_OPTIONS_ALGORITHM,)\n )",
"def get_username_from_jwt(token: str):\n return decode_token(token).get(\"username\")",
"def decode_token(token: str):\n try:\n decoded = b64decode(token.encode())\n key_data = orjson.loads(decoded)\n timestamp = int(key_data['t'])\n pub_key = key_data['p']\n signature = key_data['s']\n except (ValueError, TypeError, KeyError, orjson.JSONDecodeError, binascii.Error) as e:\n logging.debug(\"Invalid token format: %s\", token)\n raise HTTPException(status_code=403, detail=\"Invalid token\") from e\n\n if timestamp > time.time() or timestamp < time.time() - TOKEN_EXPIRE_INTERVAL:\n raise HTTPException(status_code=403, detail=\"Token expired\")\n\n try:\n check_signature(\n ''.join([pub_key, str(timestamp)]),\n signature,\n pub_key\n )\n except InvalidSignature as e:\n logging.error(\"Invalid token signature. Might be access violation.\")\n raise HTTPException(status_code=403, detail=\"Invalid token\") from e\n\n return pub_key"
]
| [
"0.73388416",
"0.7323046",
"0.7312567",
"0.73051447",
"0.72864336",
"0.7283443",
"0.7226972",
"0.72155005",
"0.7181544",
"0.71759796",
"0.7158139",
"0.7103684",
"0.7087449",
"0.70397997",
"0.70281523",
"0.70019835",
"0.6946715",
"0.6909414",
"0.684371",
"0.6787965",
"0.6777479",
"0.6774249",
"0.6763163",
"0.6600118",
"0.6567968",
"0.65248775",
"0.64975816",
"0.6485667",
"0.6447479",
"0.64452565"
]
| 0.8062185 | 0 |
Turn a BeautifulSoup form into a dict of fields and default values | def __extract_form_fields(self, soup):
fields = OrderedDict()
for input in soup.find_all('input', attrs={'name': True}):
if 'type' not in input.attrs:
input.attrs['type'] = 'text'
# Single element name/value fields
if input.attrs['type'].lower() in ('text', 'email', 'hidden', 'password', 'submit', 'image'):
value = ''
if 'value' in input.attrs:
value = input.attrs['value']
fields[input.attrs['name']] = value
continue
# Checkboxes and radios
if input.attrs['type'].lower() in ('checkbox', 'radio'):
value = ''
if input.has_attr("checked"):
if input.has_attr('value'):
value = input.attrs['value']
else:
value = 'on'
if value:
fields[input.attrs['name']] = value
continue
# Textareas
for textarea in soup.find_all('textarea', attrs={'name': True}):
fields[textarea.attrs['name']] = textarea.string or ''
# Select fields
for select in soup.find_all('select', attrs={'name': True}):
value = ''
options = select.find_all('option')
is_multiple = select.has_attr('multiple')
selected_options = [
option for option in options
if option.has_attr('selected') and option.has_attr('value')
]
# If no select options, go with the first one
if not selected_options and options:
selected_options = [options[0]]
if not is_multiple:
if len(selected_options) == 1:
if selected_options[0].has_attr('value'):
value = selected_options[0]['value']
else:
value = ''
else:
value = [
option['value'] for option in selected_options
if option.has_attr('value')
]
fields[select['name']] = value
return fields | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_form_fields(url):\n page = urlopen(url)\n soup = BeautifulSoup(page)\n form = soup.form\n\n # Setting up data structure\n form_data = dict(fields=[])\n form_attr = dict(form.attrs)\n\n form_data['title'] = soup.h1 and soup.h1.text or soup.title.text\n form_data['action'] = urljoin(url, form_attr['action'])\n form_data['method'] = form_attr['method']\n \n # Get a list of the entry labels\n labels = form.findAll(['label'], {\"class\": \"ss-q-title\"})\n\n label_contents = []\n for label in labels:\n label_contents.append({label.attrs[1][0]: label.attrs[1][1], 'contents': label.contents[0]})\n \n #print(label_contents)\n \n #\n # Handle text input boxes\n #\n textboxes = form.findAll(['input'], {\"type\": \"text\"})\n \n #textbox_description = {}\n\n for textbox in textboxes: \n textbox_description = {} \n for index, label in enumerate(label_contents):\n if label_contents[index]['for'] == textbox['id']:\n #print(label_contents[index]['contents'].strip())\n textbox_description['label'] = label_contents[index]['contents'].strip()\n break\n \n abbreviated_attributes = dict((k,v) for (k,v) in textbox.attrs if k == \"type\" or k == \"name\")\n # abbreviated_attributes = {k : v for k in textbox.attrs} # 2.7 and above\n \n # Merge abbreviated attributes with textbox description\n textbox_description = dict(textbox_description.items() + abbreviated_attributes.items())\n \n form_data['fields'].append(textbox_description)\n \n #\n # Handle the textareas\n #\n textareas = form.findAll(['textarea'])\n \n for textarea in textareas:\n textarea_description = {}\n for index, label in enumerate(label_contents):\n if label_contents[index]['for'] == textarea['id']:\n textarea_description['label'] = label_contents[index]['contents'].strip()\n break\n \n abbreviated_attributes = dict((k,v) for (k,v) in textarea.attrs if k == \"name\")\n abbreviated_attributes['type'] = textarea.name\n \n textarea_description = dict(textarea_description.items() + abbreviated_attributes.items())\n \n form_data['fields'].append(textarea_description)\n \n \"\"\"\n Ignore groups of checkboxes for now\n \n ####\n # Handle groups of checkboxes\n ####\n \n checkboxes = form.findAll(['input'], {'type': 'checkbox'})\n\n # Get your checkbox groups\n checkbox_groups = []\n for checkbox in checkboxes:\n if checkbox['name'] not in checkbox_groups:\n checkbox_groups.append(checkbox['name'])\n\n checkbox_questions = {}\n\n for group in checkbox_groups:\n checkbox_questions[group] = {'label': {}, 'options': []}\n \n for checkbox in checkboxes:\n for group in checkbox_groups:\n if checkbox['name'] == group:\n checkbox_questions[group]['options'].append({'attributes': dict(checkbox.attrs)})\n \n # Handle the label\n checkbox_name_pieces = checkbox['name'].split('.')\n checkbox_name_map = checkbox_name_pieces[0] + '_' + checkbox_name_pieces[1]\n \n for label in label_contents:\n if label['for'] == checkbox_name_map:\n checkbox_questions[group]['label'] = label\n page_data['form_contents'].append({'checkbox_groups': checkbox_questions})\n \"\"\"\n \n return form_data",
"def parse_login_form_fields (self, form_soup):\n login_input_fields = {}\n login_inputs = form_soup.find_all('input')\n # gather all form fields, set an empty string as the default value\n for item in login_inputs:\n keys = dict(item.attrs).keys()\n if 'name' in keys and 'value' not in keys:\n login_input_fields[item['name']] = ''\n elif 'name' in keys and 'value' in keys:\n login_input_fields[item['name']] = item['value']\n return login_input_fields",
"def fields(self, required=False):\n form = h.simplsale_form(self._index_xml)\n if required:\n required = '.required'\n else:\n required = ''\n elements = CSSSelector('input[type!=\"submit\"]%s, select%s'\n % (required, required))(form)\n names = []\n for e in elements:\n name = e.attrib.get('name', None)\n if name is not None:\n names.append(name)\n if 'billing_amount' in names and not required:\n names.extend(['billing_amount_price', 'billing_amount_name'])\n d = dict((key, '') for key in names)\n return d",
"def minimal_form_data():\n\n form_data = { \n 'status': '0',\n 'title': 'Recurso de teste',\n 'description': 'Recurso para testes',\n 'abstract': 'Resumo',\n \n 'main-descriptor-content_type-object_id-TOTAL_FORMS': '0', \n 'main-descriptor-content_type-object_id-INITIAL_FORMS': '0',\n\n 'main-keyword-content_type-object_id-TOTAL_FORMS': '0', \n 'main-keyword-content_type-object_id-INITIAL_FORMS': '0',\n\n 'main-resourcethematic-content_type-object_id-TOTAL_FORMS': '0',\n 'main-resourcethematic-content_type-object_id-INITIAL_FORMS': '0',\n }\n\n return form_data",
"def get_hidden(self, soup):\n hidden = soup.find_all(\"input\", {\"type\": \"hidden\"})\n return {field[\"name\"]: field[\"value\"] for field in hidden}",
"def getFormData(page):\n soup = BeautifulSoup(page, 'html.parser')\n viewstate = soup.find('input', {'id': '__VIEWSTATE' })['value']\n generator = soup.find('input', {'id': '__VIEWSTATEGENERATOR'})['value']\n validation = soup.find('input', {'id': '__EVENTVALIDATION' })['value']\n return (viewstate, generator, validation)",
"def _get_plugin_form_data(self, fields):\n form_data = {}\n for field, default_value in fields:\n try:\n form_data.update(\n {field: self.plugin_data.get(field, default_value)}\n )\n except Exception as err:\n logger.debug(\n \"Error in class %s. Details: %s\",\n self.__class__.__name__,\n str(err)\n )\n return form_data",
"def extract_form_data(response):\n\n def add_param(element):\n \"\"\" Add the info of the given element to params if it has a name \"\"\"\n nonlocal params\n name = element.attrib.get(\"name\", None)\n value = element.attrib.get(\"value\", \"\")\n if name:\n params[name] = value\n\n # find and iterate over all forms contained in the response\n form_data = []\n forms = response.xpath(\"//form\")\n for form in forms:\n action = form.attrib.get(\"action\", None)\n form_id = form.attrib.get(\"id\", None)\n method = form.attrib.get(\"method\", None)\n # only process forms with action and method attribute\n if (action is None) or (not method):\n continue\n # adjust action and method strings\n if action == \"#\" or action == \"\":\n action = response.url\n action = action.replace(\"&\", \"&\")\n action = action.replace(\"&\", \"&\")\n method = method.upper()\n\n # extract all the different parameters\n params = {}\n for _input in form.xpath(\"//input\"):\n add_param(_input)\n\n for select in form.xpath(\"//select\"):\n add_param(select)\n\n for textarea in form.xpath(\"//textarea\"):\n add_param(textarea)\n\n # handle the use of form IDs\n if form_id:\n for _input in response.xpath(\"//input[@form='%s']\" % form_id):\n add_param(_input)\n\n for select in response.xpath(\"//select[@form='%s']\" % form_id):\n add_param(select)\n\n for textarea in response.xpath(\"//textarea[@form='%s']\" % form_id):\n add_param(textarea)\n\n # if there is only one form, consider all inputs of the page to be part of this form\n if len(forms) == 1:\n for _input in response.xpath(\"//input\"):\n add_param(_input)\n\n for select in response.xpath(\"//select\"):\n add_param(select)\n\n for textarea in response.xpath(\"//textarea\"):\n add_param(textarea)\n\n form_data.append({\"action\": action, \"method\": method, \"params\": params, \"id\": form_id})\n return form_data",
"def convert_for_form(data):\n if \"name\" in data:\n data[\"full_name\"] = data[\"name\"].get(\"value\")\n try:\n data[\"given_names\"] = data[\"name\"].get(\n \"value\").split(\",\")[1].strip()\n except IndexError:\n data[\"given_names\"] = \"\"\n data[\"family_name\"] = data[\"name\"].get(\"value\").split(\",\")[0].strip()\n data[\"display_name\"] = data[\"name\"].get(\"preferred_name\")\n data[\"status\"] = data[\"name\"].get(\"status\", \"\").lower()\n if \"urls\" in data:\n data[\"websites\"] = []\n for url in data[\"urls\"]:\n if \"description\" not in url:\n data[\"websites\"].append({\"webpage\": url[\"value\"]})\n else:\n if url[\"description\"].lower() == \"twitter\":\n data[\"twitter_url\"] = url[\"value\"]\n elif url[\"description\"].lower() == \"blog\":\n data[\"blog_url\"] = url[\"value\"]\n elif url[\"description\"].lower() == \"linkedin\":\n data[\"linkedin_url\"] = url[\"value\"]\n del data[\"urls\"]\n if \"field_categories\" in data:\n data[\"research_field\"] = data['field_categories']\n if \"positions\" in data:\n data[\"institution_history\"] = []\n for position in data[\"positions\"]:\n if not any(\n [\n key in position for key in ('name', 'rank',\n 'start_year', 'end_year')\n ]\n ):\n if 'email' in position:\n # Only email available, take as public_email\n data[\"public_email\"] = position.get(\"email\")\n continue\n pos = {}\n pos[\"name\"] = position.get(\"institution\", {}).get(\"name\")\n pos[\"rank\"] = position.get(\"rank\", \"\")\n pos[\"start_year\"] = position.get(\"start_date\", \"\")\n pos[\"end_year\"] = position.get(\"end_date\", \"\")\n pos[\"current\"] = True if position.get(\"status\") else False\n pos[\"old_email\"] = position.get(\"old_email\", \"\")\n if position.get(\"email\"):\n pos[\"email\"] = position.get(\"email\", \"\")\n if not data.get(\"public_email\"):\n data[\"public_email\"] = position.get(\"email\")\n data[\"institution_history\"].append(pos)\n data[\"institution_history\"].reverse()\n if 'advisors' in data:\n advisors = data['advisors']\n data['advisors'] = []\n for advisor in advisors:\n adv = {}\n adv[\"name\"] = advisor.get(\"name\", \"\")\n adv[\"degree_type\"] = advisor.get(\"degree_type\", \"\")\n data[\"advisors\"].append(adv)\n if \"ids\" in data:\n for id in data[\"ids\"]:\n try:\n if id[\"type\"] == \"ORCID\":\n data[\"orcid\"] = id[\"value\"]\n elif id[\"type\"] == \"BAI\":\n data[\"bai\"] = id[\"value\"]\n elif id[\"type\"] == \"INSPIRE\":\n data[\"inspireid\"] = id[\"value\"]\n except KeyError:\n # Protect against cases when there is no value in metadata\n pass",
"def _hidden_inputs_as_dict(self, elements):\n\n data = {}\n\n # Make sure elements is a list\n if not isinstance(elements, list):\n elements = [elements]\n\n for element in elements:\n for input in element.select('input[type=hidden]'):\n data[input.attrs['name']] = input.attrs.get('value', '')\n\n return data",
"def get_form_details_sqli(form):\n details = {}\n # get the form action (target url)\n action = form.attrs.get(\"action\").lower()\n # get the form method (POST, GET, etc.)\n method = form.attrs.get(\"method\", \"get\").lower()\n # get all the input details such as type and name\n inputs = []\n cookies = {}\n for input_tag in form.find_all(\"input\"):\n input_type = input_tag.attrs.get(\"type\", \"text\")\n input_name = input_tag.attrs.get(\"name\")\n if input_name == 'csrf' or input_name == 'PHPSESSID':\n cookies[input_name] = input_tag.attrs.get(\"value\")\n inputs.append({\"type\": input_type, \"name\": input_name})\n # put everything to the resulting dictionary\n #print(cookies)\n details[\"action\"] = action\n details[\"method\"] = method\n details[\"inputs\"] = inputs\n return details,cookies",
"def _form_data(self, response):\n SQFI_audit_type = response.xpath(self.filters[6]).extract_first()\n SQFI_audit_type_val = response.xpath(self.filters[7]).extract_first()\n food_sector_categories = response.xpath(self.filters[8]).extract_first()\n food_sector_categories_val = response.xpath(self.filters[9]).extract()\n audit_rating = response.xpath(self.filters[10]).extract_first()\n audit_rating_val = response.xpath(self.filters[11]).extract()\n country = response.xpath(self.filters[12]).extract_first()\n country_val = response.xpath(self.filters[13]).extract()\n form_data = {\n SQFI_audit_type: SQFI_audit_type_val,\n food_sector_categories: food_sector_categories_val,\n audit_rating: audit_rating_val,\n country: country_val,\n }\n return form_data",
"def get_soup_general_data(soup):\n data_dict = {}\n\n name = soup.find(class_='product_title')\n if name:\n data_dict['name_of_game'] = name.h1.text\n\n pub = soup.find('li', class_='summary_detail publisher')\n if pub:\n data_dict['publisher'] = pub.a.text.strip()\n\n rel_date = soup.find('li', class_='summary_detail release_data')\n if rel_date:\n rel_date = rel_date.find('span', class_='data')\n if rel_date:\n data_dict['release_date'] = rel_date.text.strip()\n\n num_p = soup.find(\"li\", class_=\"summary_detail product_players\")\n if num_p:\n data_dict['num_players'] = num_p.find(class_=\"data\").text\n\n genres = soup.find(\"li\", class_='summary_detail product_genre')\n if genres:\n genres = genres.find_all('span', class_='data')\n data_dict['genres'] = [genre.text for genre in genres]\n\n age = soup.find(\"li\", class_=\"summary_detail product_rating\")\n if age:\n data_dict['age_rating'] = age.find('span', class_=\"data\").text\n\n return data_dict",
"def _fields_to_dict(fields_in):\n dict_out = {}\n\n for key, val in fields_in.items():\n param = {}\n param['default'] = val.missing\n param['type'] = type(val.missing)\n if key == 'files' or key == 'urls':\n param['type'] = str\n\n val_help = val.metadata['description']\n if 'enum' in val.metadata.keys():\n val_help = \"{}. Choices: {}\".format(val_help, \n val.metadata['enum'])\n param['help'] = val_help\n\n try:\n val_req = val.required\n except:\n val_req = False\n param['required'] = val_req\n\n dict_out[key] = param\n return dict_out",
"def extract_form_fields(item):\n # Strip off any trailing \\r\\n\n formitems = item.value.rstrip('\\r\\n')\n # Split the items by newline, this gives us a list of either 1, 3, 4\n # or 5 items long\n itemlist = formitems.split(\"\\n\")\n # Setup some regular expressions to parse the items\n re_list = [\n re.compile(\n '^[0-1][0-9]:[0-5][0-9]:[0-5][0-9] DEBUG - $'),\n re.compile('^(payload)({\".*)$'),\n re.compile('^([a-z]+): (.*)$'),\n ]\n itemdict = {}\n # Go through the 1, 3, 4 or 5 items list\n for anitem in itemlist:\n # Compare each item to the regular expressions\n for a_re in re_list:\n match = re.search(a_re, anitem)\n if match:\n if len(match.groups()) == 0:\n # We have a match but no groups, must be\n # the preamble.\n itemdict['preamble'] = match.group(0)\n elif len(match.groups()) == 2:\n # All other re's should have 2 matches\n itemdict[match.group(1)] = match.group(2)\n # We already have a match, skip other regular expressions.\n continue\n return itemdict",
"def _fields_to_dict(fields_in):\n\n dict_out = {}\n\n for key, val in fields_in.items():\n param = {}\n param['default'] = val.missing\n param['type'] = type(val.missing)\n if key == 'files' or key == 'urls':\n param['type'] = str\n\n val_help = val.metadata['description']\n # argparse hates % sign:\n if '%' in val_help:\n # replace single occurancies of '%' with '%%'\n # since '%%' is accepted by argparse\n val_help = re.sub(r'(?<!%)%(?!%)', r'%%', val_help)\n\n if 'enum' in val.metadata.keys():\n val_help = \"{}. Choices: {}\".format(val_help,\n val.metadata['enum'])\n param['help'] = val_help\n\n try:\n val_req = val.required\n except Exception:\n val_req = False\n param['required'] = val_req\n\n dict_out[key] = param\n return dict_out",
"def ingest_form_vars(request):\n data = {}\n for param, value in request.arguments.items():\n for i, item in enumerate(value):\n item = item.decode('utf-8')\n item = strings.as_numeric(item)\n value[i] = item\n data[param] = value[0] if len(value) == 1 else value\n return data",
"def extract_data_from_form(\n self, data: JobForm, many: bool, **kwargs\n ) -> Dict[str, Any]:\n\n def slugify(text: str) -> str:\n return text.lower().strip().replace(\" \", \"-\")\n\n return {\n \"experiment_name\": slugify(data.experiment_name.data),\n \"queue\": slugify(data.queue.data),\n \"timeout\": data.timeout.data or None,\n \"entry_point\": data.entry_point.data,\n \"entry_point_kwargs\": data.entry_point_kwargs.data or None,\n \"depends_on\": data.depends_on.data or None,\n \"workflow\": data.workflow.data,\n }",
"def field_to_dict(self, field):\n input_field = {}\n x = {}\n if not DEBUG:\n x = {\n \"type\": str(field.__class__.__name__),\n \"widget\": str(field.widget.__class__.__name__),\n }\n\n # help text for input\n if hasattr(field, 'help_text'):\n x.update({\"help_text\": field.help_text})\n\n # label for input\n if hasattr(field, 'label'):\n x.update({\"label\": field.label})\n\n # place holder object for for input\n if hasattr(field, 'initial'):\n input_field.update({\"placeholder\": field.initial})\n\n # min length object for for input\n if hasattr(field, 'min_length'):\n input_field.update({\"min_length\": field.min_length})\n\n # max object for for input\n if hasattr(field, 'max_length'):\n input_field.update({\"max_length\": field.max_length})\n\n # hidden object for for input\n if hasattr(field, 'widget.is_hidden'):\n input_field.update({\"hidden\": field.widget.is_hidden})\n\n # is required object for for input\n if hasattr(field.widget, 'is_required'):\n input_field.update({\"required\": field.widget.is_required})\n\n # all attributes for for input\n if hasattr(field.widget, 'attrs'):\n x.update({\"attrs\": field.widget.attrs})\n\n # type object for for input\n if 'data-field-type' in field.widget.attrs:\n input_field.update({\"type\": field.widget.attrs['data-field-type']})\n\n x.update({\"input_field\": input_field})\n return x",
"def form(title, fields, methods, data, icon=None, id=None, **context):\n f = {}\n\n for field_id, _field in fields.items():\n f[field_id] = {\"value\": data.get(field_id, None)}\n f[field_id].update(_field)\n\n result = {\n \"class\": \"form\",\n \"title\": title,\n \"fields\": f,\n \"icon\": icon,\n \"methods\": {method_id: _method for method_id, _method in methods.items()},\n \"context\": context\n }\n\n if id:\n result[\"id\"] = id\n\n return result",
"def get_fields(node):\r\n return dict(iter_fields(node))",
"def get_general(soup):\n \n general_info = {}\n general_info.update(get_route_name(soup))\n general_info.update(get_box_data(soup))\n general_info.update(get_description(soup))\n general_info.update(get_hierarchy(soup))\n general_info.update(get_first_img_source(soup))\n\n return general_info",
"def parse_api(self, soup):\n return {}",
"def getform():\n form = cgi.FieldStorage()\n host = form.getvalue('host')\n user = form.getvalue('user')\n passwd = form.getvalue('passwd')\n cert = form.getvalue('cert')\n proxy = form.getvalue('proxy')\n name = form.getvalue('name')\n return (host, user, passwd, cert, proxy, name)",
"def your_reservation_defaults(self, defaults):\n\n default_email = self.email()\n if default_email:\n defaults['email'] = self.email()\n\n data = self.additional_data()\n\n if not data:\n return defaults\n\n for form in data:\n if form in self.context.formsets:\n for field in data[form]['values']:\n defaults[\"%s.%s\" % (form, field['key'])] = field['value']\n\n return defaults",
"def input_fields(self, preamble, *args):\n\n self.new_section()\n if preamble is not None:\n self.message(preamble)\n\n if any([True for x in args if len(x) > 3]):\n self.message(\"\"\"\n Some questions have default answers which can be selected by\n pressing 'Enter' at the prompt.\"\"\")\n\n output_dict = { }\n for field in args:\n (field_name, prompt, field_type) = field[:3]\n\n default = None\n if len(field) > 3:\n default = field[3]\n\n if field_type == 'string':\n output_dict[field_name] = self.input(prompt, default = default)\n elif field_type == 'password':\n output_dict[field_name] = self.input(prompt, no_echo=True)\n elif field_type == 'boolean':\n output_dict[field_name] = self.input_boolean(prompt, default = default)\n elif field_type == 'integer':\n output_dict[field_name] = self.input_integer(prompt, default = default)\n\n return output_dict",
"def get_login_post_data(self, soup):\n data = []\n for i in soup.find_all('input'):\n name = i['name']\n if name == 'username':\n data.append((name, enrollware_username))\n elif name == 'password':\n data.append((name, enrollware_password))\n elif name == 'rememberMe':\n data.append((name, 'on'))\n else:\n data.append((name, i['value']))\n return data",
"def complete_form_data():\n\n missing_fields = {\n 'link' : 'http://bvsalud.org',\n 'originator' : 'BIREME',\n 'source_type': 1,\n 'source_language': 1,\n 'originator_location' : 1,\n\n 'main-descriptor-content_type-object_id-TOTAL_FORMS' : '1',\n\n 'main-descriptor-content_type-object_id-0-id' : '',\n 'main-descriptor-content_type-object_id-0-text' : 'malaria',\n 'main-descriptor-content_type-object_id-0-code' : '^d8462',\n 'main-descriptor-content_type-object_id-0-status' : '0',\n\n 'main-resourcethematic-content_type-object_id-TOTAL_FORMS' : '1',\n 'main-resourcethematic-content_type-object_id-0-thematic_area' : '1',\n 'main-resourcethematic-content_type-object_id-0-status' : '0',\n }\n\n complete_form_data = minimal_form_data()\n complete_form_data.update(missing_fields)\n\n return complete_form_data",
"def _dataset_fields(geno):\n return {'title': geno['title'], 'notes': geno.get('notes', '')}",
"def audit_fields(elem, fields):\r\n errs = []\r\n parsed = {}\r\n for field, field_type, dict_field in fields:\r\n if field not in elem.attrib:\r\n errs.append(('missing value', field))\r\n else:\r\n value = ensure_type(elem.get(field), field_type)\r\n if not value:\r\n errs.append(('wrong type', field))\r\n else:\r\n parsed[dict_field] = value\r\n \r\n if errs:\r\n parsed = None\r\n return parsed, errs"
]
| [
"0.71022296",
"0.6459439",
"0.6177059",
"0.6105775",
"0.599876",
"0.59322363",
"0.591192",
"0.58990943",
"0.58737135",
"0.5773327",
"0.5691127",
"0.56752855",
"0.5608315",
"0.5581501",
"0.55641794",
"0.5464512",
"0.5438915",
"0.54178506",
"0.54005194",
"0.53804064",
"0.5377911",
"0.5376467",
"0.5368574",
"0.53443676",
"0.53204733",
"0.5315964",
"0.5311938",
"0.52906084",
"0.5250445",
"0.5250349"
]
| 0.75426733 | 0 |
Try to find the name of username field among a list of input fields. Looks for the most evocative value for the "name" attribute | def __find_username_field_via_name(self, inputs):
for input in inputs:
for n in ('name', 'login', 'user', 'mail'):
if n in input.attrs['name'].lower():
return input.attrs['name']
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_names_users(self):\n user_1 = self.view.entry_player_1.get()\n user_2 = self.view.entry_player_2.get()\n if len(user_1) == 0 or len(user_2) == 0:\n\n tk.messagebox.showwarning(\"Warning\", \"Please enter players name\")\n self.logger.warning(\"Please enter players name\")\n return False\n self.update_players_name(user_1, user_2)\n return True",
"def preenche_username(self):\n driver = self.selenium_test.driver\n for names in ['username', 'senha']:\n element = driver.find_element_by_name(names)\n assert element is not None\n element.send_keys('ajna')",
"def unique_username(form, field) -> None:\n user = User.query.filter(User.name == field.data).first()\n if user is not None:\n raise ValidationError('There is already a user with this name')",
"def test_username_validators(self):\n name = self.form.name_for_user\n field_source = self.form.fields if name in self.form.fields else self.form.base_fields\n field = field_source.get(name, None)\n self.assertIsNotNone(field)\n expected = 2\n count_strict = expected + 1\n original_strict = getattr(self.form, 'strict_username', None)\n self.form.strict_username = False\n func = self.form.name_for_user_validators\n actual = self.validators_applied_count(field, func, field_source)\n required_not_strict = self.validators_effect_required(field, func, field_source)\n self.form.strict_username = True\n actual_strict = self.validators_applied_count(field, func, field_source)\n required_strict = self.validators_effect_required(field, func, field_source)\n\n self.assertIsNone(required_not_strict)\n self.assertEqual(expected, actual)\n self.assertIsNone(required_strict)\n self.assertEqual(count_strict, actual_strict)\n\n self.form.strict_username = original_strict\n if original_strict is None:\n del self.form.strict_username",
"def field_name(self, name):\n\t\tlogging.info(\"Getting the field name \" + str(name))\n\t\ttry:\n\t\t\tfieldName = self.fields.keys()[self.fields.values().index(name)]\n\t\t\tlogging.info(\"The field name for \" + str(name) + \" is \" + str(fieldName))\n\t\t\treturn fieldName\n\t\texcept:\n\t\t\tlogging.error(str(name)+ \" Field Name was not found\")\n\t\t\treturn False",
"def test_name_field(self):\n field = self.record.find('field[@name=\\'name\\']')\n self.assertEqual(field.text, 'GUH-ADT', 'Incorrect Name Field')",
"def ValidateName(args):\n account = properties.VALUES.core.account.Get(required=True)\n if account.find('@') == -1:\n username = account\n else:\n username = account[0:account.find('@')]\n\n args.name = args.name or username",
"def findNames(self, query):\t\t\t\t\t\t\t## Multiple Elements\n\t\ttry:\n\t\t\tassert(type(query)) == str or Pattern\n\t\t\treturn self.driver.find_elements_by_name(query)\n\t\texcept Exception as e:\n\t\t\tprint(\"Unable to find name {}\\n\\n{}\".format(query, e))\n\t\t\treturn -1",
"def username_field(self):\n\n if 'phone' in self.initial_data:\n return 'phone'\n if 'user_name' in self.initial_data:\n return 'user_name'\n return get_username_field()",
"def input_last_name(self, name):\n self.send_keys_to_element(self.lastname_textbox_selector, name)",
"def username_exist_check(form, field):\n username = field.data\n user = UserModel.query(UserModel.username==username).get()\n if user:\n raise validators.ValidationError('username exists, choose a different one!')",
"def parse_login_form_fields (self, form_soup):\n login_input_fields = {}\n login_inputs = form_soup.find_all('input')\n # gather all form fields, set an empty string as the default value\n for item in login_inputs:\n keys = dict(item.attrs).keys()\n if 'name' in keys and 'value' not in keys:\n login_input_fields[item['name']] = ''\n elif 'name' in keys and 'value' in keys:\n login_input_fields[item['name']] = item['value']\n return login_input_fields",
"def get_field(self, name):\n for field_name, field in self._all_fields.iteritems():\n if name == self._sanitize_field_name(field_name):\n return field",
"def find_focus_field(self):\n fields = self.get_current_fields()\n found_names = []\n for field_name, field in fields.items():\n has_focus = field.widget.attrs.get('autofocus', None)\n if has_focus:\n found_names.append(field_name)\n return found_names",
"def user_names(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"user_names\")",
"def user_names(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"user_names\")",
"def clean_username(self):\r\n username = self.cleaned_data.get(\"username\")\r\n\r\n if not username: \r\n return username\r\n\r\n \r\n if User.objects.filter(username__iexact=username).exclude(pk=self.instance.pk):\r\n raise forms.ValidationError(\"That username is already used.\")\r\n else:\r\n return username",
"def getPlayerName(i):\n while True:\n str_to_ask = \"Input name for Player \" + str(i) + \": \"\n name = input(str_to_ask).strip()\n if name != '': \n return name",
"def UserName_availabity():\r\n try:\r\n \r\n UserName=request.args.get(\"UserName\")\r\n user_details=fetch_details(UserName)\r\n user_name=user_details[0]['UserName']\r\n if str(UserName)==str(user_name):\r\n msg=\"UserName is already taken kindly choose another one\"\r\n except IndexError:\r\n msg=\"UserName is available.\"\r\n return msg",
"def test_interface_compute_name_for_user(self):\n self.form.name_for_user = self.form._meta.model.USERNAME_FIELD\n self.form.name_for_email = self.form._meta.model.get_email_field_name()\n expected = \"Unique test response value\"\n\n def confirm_func(username_field_name=None, email_field_name=None): return expected\n original_func = self.form.username_from_email_or_names\n self.form.username_from_email_or_names = confirm_func\n actual = self.form.compute_name_for_user()\n self.form.username_from_email_or_names = original_func\n\n self.assertEqual(expected, actual)",
"def input_first_name(self, name):\n self.send_keys_to_element(self.firstname_textbox_selector, name)",
"def __getitem__(self,name):\n items = [ f for f in self.fields if f.name() == name ]\n if len(items) > 0:\n return items[0]\n else:\n raise ValueError,\"No input field named: %s\" % name \n #return self.groups.get(name,None)",
"def get_user_list(self):\n self.user_list = db.get_user_list()\n for each in self.user_list:\n print each[1] # username\n while(True):\n selection = raw_input(\"Enter username to use\")\n if selection in self.user_list:\n return selection",
"def check_user_name(self, username):\n usernames = []\n for user in self.__users:\n if user['username'] == username:\n usernames.append(user)\n return usernames",
"def validate_username(form, field):\n if User.query.filter_by(username=form.username.data).first():\n form.username.errors.append(\"Username already taken!\")\n raise ValidationError",
"def _check_name_composing_fields(cls, **_kwargs: Any) -> List[checks.CheckMessage]:\n if not cls.name_composing_fields:\n return [\n checks.Warning(\n \"You must specify the fields that make up the name by \"\n \"listing them in name_composing_fields.\",\n obj=cls.__name__\n )\n ]\n errors = []\n for field in cls.name_composing_fields:\n try:\n get_fields_and_lookups(cls, field)\n except (exceptions.FieldDoesNotExist, exceptions.FieldError) as e:\n errors.append(\n checks.Error(\n \"Attribute 'name_composing_fields' contains invalid item: \"\n \"'%s'. %s\" % (field, e),\n obj=cls\n )\n )\n return errors",
"def GetFirstUserName(tableBodyElement):\n\n firstrow = GetElement(tableBodyElement, By.TAG_NAME, 'tr')\n username = GetText(firstrow, By.TAG_NAME, 'span')\n #printFP(username)\n return username",
"def test_get_field_names(self):\n\n survey = self._create_test_survey()\n assert survey is not None\n\n survey.save_user_answers(self.student, self.student_answers, self.course_id)\n survey.save_user_answers(self.student2, self.student2_answers, self.course_id)\n\n names = survey.get_field_names()\n\n assert sorted(names) == ['ddl', 'field1', 'field2']",
"def validate_name(self, username: str) -> bool:\n\t\treturn not self.registry.name_taken(username)",
"def clean_username(self):\n existing = User.objects.filter(username__iexact=self.cleaned_data['username'])\n if existing.exists():\n raise forms.ValidationError(_(\"This username is already taken.\"))\n else:\n return self.cleaned_data['username']"
]
| [
"0.62485945",
"0.60185325",
"0.57569325",
"0.56318814",
"0.5626669",
"0.5613124",
"0.5558389",
"0.55256784",
"0.55114937",
"0.5503218",
"0.54988205",
"0.5492651",
"0.54753804",
"0.54659086",
"0.54486895",
"0.54486895",
"0.54365706",
"0.5434026",
"0.54223335",
"0.5385833",
"0.53808117",
"0.53753847",
"0.53744024",
"0.53644294",
"0.5361418",
"0.5355816",
"0.53530574",
"0.53473264",
"0.5343717",
"0.5325438"
]
| 0.82819116 | 0 |
clear ip arp inspection statistics | def clear_ip_arp_inspection_stats(device):
log.info("clear ip arp inspection statistics on {device}".format(device=device))
dialog = Dialog([Statement(pattern=r'\[confirm\].*', action='sendline(\r)',loop_continue=True,continue_timer=False)])
try:
device.execute("clear ip arp inspection statistics", reply=dialog)
except SubCommandFailure as e:
raise SubCommandFailure(
"Could not clear ip arp inspection statistics on {device}. Error:\n{error}".format(device=device, error=e)
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def flush_arp(self):\n self.cli.cmd('ip neighbour flush all')",
"def clear_statistics(self, sniff_port_list):\n pass",
"def clear(self):\n self.sum_hit_at_one = 0.0\n self.sum_perr = 0.0\n self.sum_loss = 0.0\n self.map_calculator.clear()\n self.global_ap_calculator.clear()\n self.num_examples = 0",
"def clearHotspots( self ):\n self._hotspots = []",
"def cleanup(self):\n all_aps_info = self.zd.get_all_ap_info()\n all_aps_ins = self.testbed.components['AP']\n for ap_ins in all_aps_ins:\n for ap_info in all_aps_info:\n if ap_ins.base_mac_addr.upper() == ap_info.get('mac').upper() and ap_info.get('ip_addr') != '':\n ap_ins.ip_addr = ap_info.get('ip_addr')",
"def reset(self) -> None:\n self.memory = self.intcode.copy()\n self.ip = 0\n self.stdout.clear()",
"def clear_stats(self):\n self._stats = None",
"def fusion_api_clear_interconnect_ports(self, body, uri, api=None, param='', headers=None):\n param = '/statistics/reset%s' % param\n return self.ic.put(body=body, uri=uri, api=api, headers=headers, param=param)",
"def clear_data_structure():\n \n packetFilter = \"\"\n # Reset to its default value\n packetFilterOperator = \"AND\"\n \n del actions[:]\n variables.clear()\n symbol_table.clear()",
"def reset_stats() -> None:\n STATS[\"cleaned\"] = 0\n STATS[\"null\"] = 0\n STATS[\"unknown\"] = 0",
"def clear():",
"def run_forever(self):\n scapy.sniff(prn=self.arp_cb, filter=\"arp\", store=0, count=0)",
"def disable_proxy_arp():\n tap_name = sys.argv[1]\n print \"Disabling proxy arp on %s\" % tap_name\n with open('/proc/sys/net/ipv4/conf/%s/proxy_arp' % tap_name, 'wb') as f:\n f.write('0')\n print \"Disabled proxy arp on %s\" % tap_name",
"def clearListing(self, address: ghidra.program.model.address.Address) -> None:\n ...",
"def clear(self):\n self._fingerprint = 0",
"def stats_reset(self):\n self.stats.reset()",
"def stats_reset(self):\n self.stats.reset()",
"def reset(self) -> None:\n self.statistics = defaultdict(int)",
"def forget(self):\n self.ingress_tbl.clear()\n self.rootsw_tbl.clear()",
"def reset(self):\n self.sum_metric = 0.\n self.num_inst = 0.\n self.metrics.reset_stats()",
"def __clearBonuses(self, hp=1):\n if hp:\n self.hpBonuses = [{}, {}, {}, {}]\n else:\n self.kbBonuses = [{}, {}, {}, {}]",
"def reset(self) -> None:\n self.statistics = defaultdict(self._mp_hack)\n self._is_ddp = get_rank() > -1",
"def reset(self):\n self.stats = {}",
"def reset_hl_stats(self):\n\n self.ships_left = self.settings.ship_limit\n self.score = 0\n self.level = 1",
"def clear_summaries(self):\n\n\t\tself.summaries = [{ key: 0 for key in self.keys() }, 0]\n\t\tif self.encoded_summary == 1:\n\t\t\tfor i in range(len(self.encoded_keys)):\n\t\t\t\tself.encoded_counts[i] = 0",
"def removeAll(self, addr: ghidra.program.model.address.Address) -> None:\n ...",
"def clearstats(self) :\n\t\ttry :\n\t\t\treturn self._clearstats\n\t\texcept Exception as e:\n\t\t\traise e",
"def clearstats(self) :\n\t\ttry :\n\t\t\treturn self._clearstats\n\t\texcept Exception as e:\n\t\t\traise e",
"def clearstats(self) :\n\t\ttry :\n\t\t\treturn self._clearstats\n\t\texcept Exception as e:\n\t\t\traise e",
"def clearstats(self) :\n\t\ttry :\n\t\t\treturn self._clearstats\n\t\texcept Exception as e:\n\t\t\traise e"
]
| [
"0.7376848",
"0.6851561",
"0.61201775",
"0.6055344",
"0.6003891",
"0.58312273",
"0.5794471",
"0.57746357",
"0.57414865",
"0.57137805",
"0.5713237",
"0.563474",
"0.5631463",
"0.5629161",
"0.55918115",
"0.55341196",
"0.55341196",
"0.5510395",
"0.55059004",
"0.548406",
"0.5477334",
"0.5450359",
"0.54429257",
"0.54252225",
"0.54164386",
"0.5414092",
"0.5399604",
"0.5399604",
"0.5399604",
"0.5399604"
]
| 0.8344019 | 0 |
A dictionary to map required slots to an extracted entity | def slot_mappings(self) -> Dict[Text, Union[Dict, List[Dict]]]:
return {
"bug":[self.from_entity(
entity="bug",
intent="inform"),
self.from_text(
intent="inform")],
"beverage": [self.from_entity(
entity="beverage",
intent="inform"),
self.from_text(
intent="inform")],
"second_person_plural": [self.from_entity(
entity="second_person_plural",
intent="inform"),
self.from_text(
intent="inform")],
"cot_caught": [self.from_entity(
entity="cot_caught",
intent="inform"),
self.from_text(
intent="inform")],
"rain_sun": [self.from_entity(
entity="rain_sun",
intent="inform"),
self.from_text(
intent="inform")],
"crawfish": [self.from_entity(
entity="crawfish",
intent="inform"),
self.from_text(
intent="inform")],
"halloween": [self.from_entity(
entity="halloween",
intent="inform"),
self.from_text(
intent="inform")],
"sandwich": [self.from_entity(
entity="sandwich",
intent="inform"),
self.from_text(
intent="inform")],
"side_road": [self.from_entity(
entity="side_road",
intent="inform"),
self.from_text(
intent="inform")],
"shoes": [self.from_entity(
entity="shoes",
intent="inform"),
self.from_text(
intent="inform")],
"highway": [self.from_entity(
entity="highway",
intent="inform"),
self.from_text(
intent="inform")],
"yard_sale": [self.from_entity(
entity="yard_sale",
intent="inform"),
self.from_text(
intent="inform")],
"rubbernecking": [self.from_entity(
entity="rubbernecking",
intent="inform"),
self.from_text(
intent="inform")],
"frosting": [self.from_entity(
entity="frosting",
intent="inform"),
self.from_text(
intent="inform")],
"lawyer": [self.from_entity(
entity="lawyer",
intent="inform"),
self.from_text(
intent="inform")],
"kitty_corner": [self.from_entity(
entity="kitty_corner",
intent="inform"),
self.from_text(
intent="inform")],
"firefly": [self.from_entity(
entity="firefly",
intent="inform"),
self.from_text(
intent="inform")],
"verge": [self.from_entity(
entity="verge",
intent="inform"),
self.from_text(
intent="inform")],
"brew_thru": [self.from_entity(
entity="brew_thru",
intent="inform"),
self.from_text(
intent="inform")],
"water_fountain": [self.from_entity(
entity="water_fountain",
intent="inform"),
self.from_text(
intent="inform")]
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def slot_mappings(self) -> Dict[Text, Union[Dict, List[Dict]]]:\n\n return {\n \"product\": [\n self.from_entity(entity=\"product\", intent=[\"inform\"]),\n ],\n \"applicant_name\": [\n self.from_entity(entity=\"applicant_name\", intent=[\"inform\"]),\n ],\n \"applicant_dob\": [\n self.from_entity(entity=\"applicant_dob\", intent=[\"inform\"]),\n ],\n \"applicant_phoneno\": [\n self.from_entity(entity=\"applicant_phoneno\", intent=[\"inform\"]),\n ],\n \"applicant_address\": [\n self.from_entity(entity=\"applicant_address\", intent=[\"inform\"]),\n ]\n }",
"def slot_mappings(self):\n # type: () -> Dict[Text: Union[Dict, List[Dict]]]\n\n return {\"ingredient\": self.from_entity(entity=\"ingredient\",\n not_intent=\"greet\")}",
"def slot_mappings(self) -> Dict[Text, Union[Dict, List[Dict]]]:\n\n return {\n \"search_type\": [\n self.from_trigger_intent(\n intent=\"search_transactions\", value=\"spend\"\n ),\n self.from_trigger_intent(\n intent=\"check_earnings\", value=\"deposit\"\n ),\n ],\n \"time\": [\n self.from_entity(entity=\"time\"),\n ]\n }",
"def slot_mappings(self) -> Dict[Text, Union[Dict, List[Dict]]]:\n\n return {\n \"amount_of_money\": [\n self.from_entity(entity=\"amount-of-money\"),\n self.from_entity(entity=\"number\"),\n ],\n \"confirm\": [\n self.from_intent(value=True, intent=\"affirm\"),\n self.from_intent(value=False, intent=\"deny\"),\n ],\n }",
"def slot_mappings(self) -> Dict[Text, Union[Dict, List[Dict]]]:\n\n # return { \"faq_choice\": self.from_entity(\"faq_choice\"),\"faq_question\": self.from_entity(\"faq_question\"), \"faq_text\": [self.from_text()]}\n\n return {\"faq_choice\": [self.from_entity(\"faq_choice\"), self.from_text()], \"faq_text\": [self.from_text(), self.from_entity(entity=\"navigation\")]}",
"def slot_mappings(self):\n # type: () -> Dict[Text: Union[Dict, List[Dict]]]\n\n return {\"name\": [self.from_entity(entity=\"name\"),\n self.from_text()],\n \"roomcount\": [self.from_entity(entity=\"roomcount\"),\n self.from_text()],\n \"roomtype\": [self.from_entity(entity=\"roomtype\"),\n self.from_text()]}",
"def slot_mappings(self) -> Dict[Text, Union[Dict, List[Dict]]]:\n # type: () -> Dict[Text: Union[Dict, List[Dict]]]\n return {\n \"numero_prendas\": [\n self.from_entity(entity=\"number\"),\n\n ]\n }",
"def slot_mappings(self) -> Dict[Text, Union[Dict, List[Dict]]]:\n # type: () -> Dict[Text: Union[Dict, List[Dict]]]\n return {\n \"numero_prendas\": [\n self.from_entity(entity=\"number\"),\n\n ]\n }",
"def slot_mappings(self) -> Dict[Text, Union[Dict, List[Dict]]]:\n\n return {\n \"payment_amount\": [\n self.from_entity(entity=\"payment_amount\"),\n self.from_entity(entity=\"amount-of-money\"),\n self.from_entity(entity=\"number\"),\n ],\n \"confirm\": [\n self.from_intent(value=True, intent=\"affirm\"),\n self.from_intent(value=False, intent=\"deny\"),\n ],\n }",
"def slot_mappings(self) -> Dict[Text, Union[Dict, List[Dict[Text, Any]]]]:\n return {\"use_case\": self.from_text(intent=\"inform\")}",
"def slot_mappings(self):\n # type: () -> Dict[Text: Union[Dict, List[Dict]]]\n return {\"use_case\": self.from_text(intent=\"inform\")}",
"def slot_mappings(self) -> Dict[Text, Union[Dict, List[Dict]]]:\n # type: () -> Dict[Text: Union[Dict, List[Dict]]]\n return {\n \"fecha_hora\": [\n self.from_entity(entity=\"time\"),\n\n ]\n }",
"def slot_mappings(self) -> Dict[Text, Union[Dict, List[Dict]]]:\n # type: () -> Dict[Text: Union[Dict, List[Dict]]]\n return {\n \"fecha_hora\": [\n self.from_entity(entity=\"time\"),\n\n ]\n }",
"def slots(self):\n highSlots = self._getAttribute(Attribute.highSlots)\n medSlots = self._getAttribute(Attribute.medSlots)\n lowSlots = self._getAttribute(Attribute.lowSlots)\n\n if None in [highSlots, medSlots, lowSlots]:\n # This is a T3 ship.\n highSlots = medSlots = lowSlots = 0\n\n # Get rigs and subs.\n rigSlots = self._getAttribute(Attribute.rigSlots, 0)\n subSlots = self._getAttribute(Attribute.subSlots, 0)\n\n # Get missile and turret slots.\n missileSlots = self._getAttribute(Attribute.missileSlots, 0)\n turretSlots = self._getAttribute(Attribute.turretSlots, 0)\n\n return {\n \"highSlots\": int(highSlots),\n \"medSlots\": int(medSlots),\n \"lowSlots\": int(lowSlots),\n \"rigSlots\": int(rigSlots),\n \"subSlots\": int(subSlots),\n \"turretSlots\": int(turretSlots),\n \"missileSlots\": int(missileSlots)\n }",
"def convert_slots(slots: typing.List[Slot]) -> {}:\n resolved = {}\n\n for slot in slots:\n slot_name = slot.slot_name\n slot_value = slot.value\n\n slot_entity = slot.entity\n if slot_entity.startswith('snips/'):\n resolved[slot_name] = slot.value\n resolved[slot_name + '_raw'] = slot.raw_value\n else:\n # assuming Rasa NLU slot\n slot_extractor = slot_value['extractor']\n if not slot_extractor:\n slot_extractor = 'Unknown'\n else:\n del slot_value['extractor']\n\n if slot_name not in resolved:\n resolved[slot_name] = {}\n if slot_extractor not in resolved[slot_name]:\n resolved[slot_name][slot_extractor] = []\n\n # take the text entity extractor as the raw value\n if slot_extractor == 'CRFEntityExtractor':\n resolved[slot_name + '_raw'] = slot.raw_value\n\n resolved[slot_name][slot_extractor].append(slot_value)\n\n return resolved",
"def initSlotObjectDict(cls):\n restslotattributedict.update(dict({extension_tunnel: \"name\"}))\n restslotattributedict.update(dict({extension_circuit: \"name\"}))\n restslotattributedict.update(dict({extension_ip_interface: \"name\"}))\n restslotattributedict.update(dict({extension_ip_route: \"name\"}))\n restslotattributedict.update(dict({gigabitethernet: \"name\"}))\n restslotattributedict.update(dict({blade: \"slot_number\"}))",
"def required_slots(tracker: Tracker) -> List[Text]:\n\n return [\"ingredient\"]",
"def slot_info(self) -> Dict[int, Tuple[int, str, int]]:\n return self._slot_info",
"def required_slots(tracker: Tracker) -> List[Text]:\n\n return [\"product\", \"applicant_name\", \"applicant_dob\", \"applicant_phoneno\", \"applicant_address\"]",
"def required_slots(tracker: Tracker) -> List[Text]:\n return [\n \"tipo_prenda\",\n \"numero_prendas\",\n \"tipo_lavado\"\n ]",
"def required_slots(tracker: Tracker) -> List[Text]:\n return [\n \"tipo_prenda\",\n \"numero_prendas\",\n ]",
"def required_slots(tracker: Tracker) -> List[Text]:\n return [\n \"tipo_prenda\",\n \"tipo_compostura\"\n ]",
"def required_slots(tracker: Tracker) -> List[Text]:\n\n return [\"PERSON\", \"amount_of_money\", \"confirm\"]",
"def get_slots_query(scene: GameScene, entity: int):\n\n def query():\n\n paper_doll: PaperDoll = scene.cm.get_one(PaperDoll, entity)\n equipment = paper_doll.get_equipment()\n\n return [\n (k, scene.cm.get_one(Entity, v))\n for k, v in equipment.items()\n ]\n return query",
"def required_slots(self,tracker) -> List[Text]:",
"def get_assessment_part_mdata():\n return {\n 'assessment_part': {\n 'element_label': {\n 'text': 'assessment part',\n 'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),\n 'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),\n 'formatTypeId': str(DEFAULT_FORMAT_TYPE),\n },\n 'instructions': {\n 'text': 'accepts an osid.id.Id object',\n 'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),\n 'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),\n 'formatTypeId': str(DEFAULT_FORMAT_TYPE),\n },\n 'required': False,\n 'read_only': False,\n 'linked': False,\n 'array': False,\n 'default_id_values': [''],\n 'syntax': 'ID',\n 'id_set': [],\n },\n 'assessment': {\n 'element_label': {\n 'text': 'assessment',\n 'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),\n 'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),\n 'formatTypeId': str(DEFAULT_FORMAT_TYPE),\n },\n 'instructions': {\n 'text': 'accepts an osid.id.Id object',\n 'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),\n 'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),\n 'formatTypeId': str(DEFAULT_FORMAT_TYPE),\n },\n 'required': False,\n 'read_only': False,\n 'linked': False,\n 'array': False,\n 'default_id_values': [''],\n 'syntax': 'ID',\n 'id_set': [],\n },\n 'weight': {\n 'element_label': {\n 'text': 'weight',\n 'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),\n 'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),\n 'formatTypeId': str(DEFAULT_FORMAT_TYPE),\n },\n 'instructions': {\n 'text': 'enter a cardinal value',\n 'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),\n 'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),\n 'formatTypeId': str(DEFAULT_FORMAT_TYPE),\n },\n 'required': False,\n 'read_only': False,\n 'linked': False,\n 'array': False,\n 'default_cardinal_values': [None],\n 'syntax': 'CARDINAL',\n 'minimum_cardinal': None,\n 'maximum_cardinal': None,\n 'cardinal_set': []\n },\n 'allocated_time': {\n 'element_label': {\n 'text': 'allocated time',\n 'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),\n 'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),\n 'formatTypeId': str(DEFAULT_FORMAT_TYPE),\n },\n 'instructions': {\n 'text': 'enter a valid duration object.',\n 'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),\n 'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),\n 'formatTypeId': str(DEFAULT_FORMAT_TYPE),\n },\n 'required': False,\n 'read_only': False,\n 'linked': False,\n 'array': False,\n 'default_duration_values': [None],\n 'syntax': 'DURATION',\n 'date_time_set': [],\n },\n }",
"def required_slots(tracker):\n print(tracker.get_slot('order_number'))\n return [\"order_number\"]",
"def _packaged_dict_for_entity(rt):\n entity = rt.entity\n return {u'entity_id': entity.id,\\\n u'name': entity.aggregation_paths['_geo'][-1]}",
"def to_dict(self):\n print(\"\\n\\nSTARTING...\")\n ea = db.session.query(entity_assets).filter(entity_assets.c.entity_id == self.id).all()\n print(\"\\n\\nmade it\", ea)\n em = db.session.query(entity_meters).filter(entity_meters.c.entity_id == self.id).all()\n est = db.session.query(entity_statuses).filter(entity_statuses.c.entity_id == self.id).all()\n \n return {\n \"id\": self.id,\n \"user_id\": self.user_id,\n \"creator\": self.user.username,\n \"type\": self.type,\n \"category\": self.category,\n \"title\": self.title,\n \"description\": self.description,\n \"color\": self.color,\n \"icon\": self.icon,\n \"image\": self.image,\n \"created_at\": self.created_at,\n\n \"location_id\": self.location_id,\n \"generated_id\": self.generated_id,\n \n \"assets\": [(a.asset_id, a.quantity) for a in ea],\n \"statuses\": [(s.status_id, s.expiry) for s in est],\n \"meters\": [(m.meter_id, m.points) for m in em],\n \"slots\": [(slot.slot_id, slot.filler_id) for slot in self.entity_slots],\n }",
"def required_slots(tracker: Tracker) -> List[Text]:\n\n return [\"search_type\", \"time\"]"
]
| [
"0.6995648",
"0.6558813",
"0.64900076",
"0.6489124",
"0.64611995",
"0.64086974",
"0.63684666",
"0.63684666",
"0.6338538",
"0.62612367",
"0.62104833",
"0.6197755",
"0.6197755",
"0.5999761",
"0.59434724",
"0.59309",
"0.588155",
"0.58714616",
"0.5779117",
"0.5742074",
"0.5714348",
"0.57009083",
"0.565323",
"0.5623322",
"0.5564382",
"0.5521141",
"0.5488658",
"0.54579973",
"0.5399381",
"0.53929"
]
| 0.6862953 | 1 |
Database of multiple choice answers | def answers_db() -> Dict[str, List]:
return{"lawyer":["either","other","law","boy"],
"cot_caught":["different","other","same"],
"second_person_plural":["other","y'all","yins",
"you","you'uns","you all","you guys","you lot",
"yous, youse"],
"yard_sale":["car boot","car boot sale",
"carport sale","garage sale","jumble (sale)",
"other","patio sale","rummage sale","sidewalk sale",
"stoop sale","tag sale","thrift sale","yard sale"],
"verge":["beltway","berm","curb strip",
"I have no word for this","other","parking",
"terrace","tree lawn","verge"],
"sandwich":["baguette","bomber","grinder","hero",
"hoagie","I have no word for this","Italian sandwich",
"other","poor boy","sarney","sub"],
"firefly":["firefly","I have no word for this",
"I use lightning bug and firefly interchangeably",
"lightning bug","other","peenie wallie"],
"crawfish":["craw","crawdad","crawfish","crayfish",
"crowfish","I have no word for this critter","mudbug","other"],
"shoes":["gymshoes","I have no general word for this",
"jumpers","other","runners","running shoes","sand shoes",
"shoes","sneakers","tennis shoes","trainers"],
"bug":["basketball bug","centipede","doodle bug",
"I have no idea what this creature is",
"I know what this creature is, but have no word for it",
"millipede","other","pill bug","potato bug","roll-up bug",
"roly poly","sow bug","twiddle bug","wood louse"],
"kitty_corner":["catercorner","catty-corner",
"I can only use \"diagonal\" for this","I have no term for this",
"kitacorner","kitty-corner","kitty cross","kitty wampus","other"],
"highway":["a freeway has limited access (no stop lights, no intersections), whereas a highway can have stop lights and intersections",
"a freeway is bigger than a highway",
"a freeway is free (i.e., doesn't charge tolls); a highway isn't",
"expressway","freeway","highway","other","parkway",
"throughway/thru-way","turnpike"],
"rain_sun":["fox's wedding","I have no term or expression for this",
"liquid sun","monkey's wedding","other","pineapple rain","sunshower",
"the devil is beating his wife","the wolf is giving birth"],
"frosting":["both","frosting","icing",
"icing is thinner than frosting, white, and/or made of powdered sugar and milk or lemon juice",
"neither","other"],
"side_road":["access road","feeder road","frontage road",
"gateway","I've never heard of this concept","other",
"service road","we have them but I have no word for them"],
"water_fountain":["bubbler","drinking fountain","other","water bubbler",
"water fountain"],
"beverage":["cocola","coke","dope","fizzy drink",
"lemonade","other","pop","soda","soft drink","tonic"],
"rubbernecking":["curiosity delay","gapers' block",
"gapers' delay","gawk block","I have no word for this",
"Lookie Lou","other","rubberneck","rubbernecking",
"rubbernecking is the thing you do, not the traffice jam"],
"halloween":["cabbage night","devil's eve","devil's night",
"gate night","goosy night","I have no word for this",
"mischief night","other","trick night"],
"brew_thru":["beer barn","beverage barn","bootlegger","brew thru",
"I have never heard of such a thing","other","party barn",
"we have these in my area, but we have no special term for them"]} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_answers(self):\r\n pass",
"def answers_all(self):\n return self.answer_set.all()",
"def quiz_selection():\n\n verbs = crud.get_verbs()\n tenses = crud.get_tenses()\n\n return render_template(\"verb-conjugation.html\", verbs=verbs, tenses=tenses)",
"def collection(self):\n questions = []\n choice_list = []\n answers = []\n\n if self.form=='The correct German word':\n for i in range(self.num_ques):\n question, options, answer = self.generate_eng2ger()\n questions.append(question)\n choice_list.append(options)\n answers.append(answer)\n else:\n for i in range(self.num_ques):\n question, options, answer = self.generate_ger2eng()\n questions.append(question)\n choice_list.append(options)\n answers.append(answer)\n\n return questions, choice_list, answers",
"def get_answers(self):\r\n anshtml = '<span class=\"openended-answer\"><pre><code>{0}</code></pre></span>'.format(self.answer)\r\n return {self.answer_id: anshtml}",
"def quick_quiz(character_set):",
"def get_answers(self):\r\n return self.answer_values",
"def gen_questions(self, number_of_questions):",
"def answers():\n answer = ([[1, 2, 2, 1], # 3. Not necessarily legal if terms of service disallow\n ['https://soundcloud.com/', # 1. soundcloud, some disallow\n 'https://cfmriweb.ucsd.edu/', # 1. wiki, some disallow\n 'https://www.thesaurus.com/', # 1. thesaurus, some disallow\n 'https://ucsd.sona-systems.com/', # 2. SONA, disallow completely\n 'https://www.linkedin.com/', # 2. LinkedIn, disallow completely\n 'https://facebook.com/']]) # 2. Facebook, disallow completely\n return answer",
"def generate_eng2ger(self):\n question = []\n data_len = len(self.df)+1\n n = random.randint(0, data_len)\n lst = []\n options = []\n for i in range(3):\n no = random.randint(0, data_len)\n lst.append(no)\n lst.append(n)\n lst = random.sample(lst, len(lst))\n ### Creating the question\n question.append(f'Select a german word for \"{self.df.iloc[n, 1]}\":')\n ### Creating options/choices\n for l in lst:\n options.append(f'{self.df.iloc[l, 0]}')\n ### Allocating the answer\n answer = self.df.iloc[n, 0]\n\n return question, options, answer",
"def picksomechoices(question, answer):\n \"\"\" because of the way dict() works all 4 choices will be unique \"\"\"\n choices = dict()\n choices[question] = answer\n for choice in random.sample(nlist, 10):\n choices[choice[0]] = choice[1]\n if len(choices.keys()) > 3:\n break\n\n return choices",
"def picksomechoices(question, answer):\n \"\"\" because of the way dict() works all 4 choices will be unique \"\"\"\n choices = dict()\n choices[question] = answer\n for choice in random.sample(nlist, 10):\n choices[choice[0]] = choice[1]\n if len(choices.keys()) > 3:\n break\n\n return choices",
"def picksomechoices(question, answer):\n \"\"\" because of the way dict() works all 4 choices will be unique \"\"\"\n choices = dict()\n choices[question] = answer\n for choice in random.sample(nlist, 10):\n choices[choice[0]] = choice[1]\n if len(choices.keys()) > 3:\n break\n\n return choices",
"def picksomechoices(question, answer):\n \"\"\" because of the way dict() works all 4 choices will be unique \"\"\"\n choices = dict()\n choices[question] = answer\n for choice in random.sample(nlist, 10):\n choices[choice[0]] = choice[1]\n if len(choices.keys()) > 3:\n break\n\n return choices",
"def test_answers(self):\n row = self.dataset.iloc[87]\n self.assertEqual(\"When was the trial due to start ?\", row.question)\n self.assertEqual([\"Wednesday\"], _get_answers(row))\n\n row = self.dataset.iloc[31221]\n self.assertEqual(\"Whose rights have not improved under the Taliban ?\", row.question)\n self.assertEqual([\"Conditions for women\"], _get_answers(row))\n\n row = self.dataset.iloc[45648]\n self.assertEqual(\"What does Vertu make ?\", row.question)\n self.assertEqual([\"phones starting at $ 6,000\"], _get_answers(row))",
"def multiple_choice(correct_choice, all_choices):\r\n # format for character is {'あ': 'ah'}\r\n # format for character is {'japanese character': 'english sound'}\r\n\r\n # get 3 different characters from all_choices, randomly\r\n # add all 3 'values', of the k:v pair, to the choices\r\n # if the input from the user != the 'key' of the correct character then it is wrong\r\n # if wrong, try again.\r",
"def picksomequestions():\n answers = dict()\n for question in nlist:\n answers[question[0]] = question[1]\n if len(answers.keys()) > 49:\n break\n\n return answers",
"def __init__(self):\n self.answers = []",
"def check():\n db.create_all()\n allEntries = Entry.query.all()\n return render_template('answers.html',result=allEntries)",
"def question_generator(self):\n self.status_conv = 'yes_no_question_asked'\n questions = config.questions\n if not self.voyage.get('voyageurs') and 'voyageur_add' not in self.infos_needed:\n self.infos_needed.append('voyageur_add')\n if self.infos_needed:\n if self.is_hotel_needed() and 'hotel' not in self.infos_needed and 'hotel' not in self.voyage:\n self.infos_needed.insert(1, 'hotel')\n self.hotel_asked = True\n key = self.infos_needed[0]\n self.info_asked = key\n return questions[key]\n else :\n self.status_conv = 'confirmation_asked'\n return self.conv_recap()",
"def display_possible_answers(question):\n answers = question['incorrect'] + [question['correct']]\n random.shuffle(answers)\n answer_dict = {}\n for i, answer in enumerate(answers):\n answer_dict[str(i + 1)] = answer\n print(f\"{i + 1}: {answer}\\n\")\n return answer_dict",
"def answers(self):\n return self.answer_set.filter(active=True)",
"def check_that_suggested_answers_work(problem):\r\n # These are actual answers we get from the responsetypes\r\n real_answers = problem.get_question_answers()\r\n\r\n # all_answers is real_answers + blanks for other answer_ids for which the\r\n # responsetypes can't provide us pre-canned answers (customresponse)\r\n all_answer_ids = problem.get_answer_ids()\r\n all_answers = dict((answer_id, real_answers.get(answer_id, \"\"))\r\n for answer_id in all_answer_ids)\r\n\r\n log.debug(\"Real answers: {0}\".format(real_answers))\r\n if real_answers:\r\n try:\r\n real_results = dict((answer_id, result) for answer_id, result\r\n in problem.grade_answers(all_answers).items()\r\n if answer_id in real_answers)\r\n log.debug(real_results)\r\n assert(all(result == 'correct'\r\n for answer_id, result in real_results.items()))\r\n except UndefinedVariable as uv_exc:\r\n log.error(\"The variable \\\"{0}\\\" specified in the \".format(uv_exc) +\r\n \"solution isn't recognized (is it a units measure?).\")\r\n except AssertionError:\r\n log.error(\"The following generated answers were not accepted for {0}:\"\r\n .format(problem))\r\n for question_id, result in sorted(real_results.items()):\r\n if result != 'correct':\r\n log.error(\" {0} = {1}\".format(question_id, real_answers[question_id]))\r\n except Exception as ex:\r\n log.error(\"Uncaught error in {0}\".format(problem))\r\n log.exception(ex)",
"def load_data(squad_per_lang):\n question_set = QuestionSet()\n candidate_set = CandidateSet()\n\n for lang, squad in squad_per_lang.items():\n for question, answer, context, context_sentences, xling_id, context_id in (\n generate_examples(squad, lang)):\n question = Question(question, xling_id, lang)\n question_set.add(question)\n assert answer in context_sentences, (\n \"answer doesn't appear in context_sentences\")\n for sent_pos, sentence in enumerate(context_sentences):\n candidate = Candidate(sentence, context, lang, context_id, sent_pos)\n candidate = candidate_set.add_or_retrieve_candidate(candidate)\n if sentence == answer:\n candidate_set.update_xling_id(candidate, xling_id)\n print(\"Totals across languages: questions={}, candidates={}\".format(\n len(question_set.as_list()), len(candidate_set.as_list())))\n\n return question_set, candidate_set",
"def test_store_three_responses(self):\n question = \"What language did you first learn to speak?\"\n my_survey = AnonymousSurvey(question)\n responses = ['English','German','French']\n for response in responses:\n my_survey.store_response(response)\n\n for response in responses:\n self.assertIn(response, my_survey.responses)",
"def add_answers(conn, cur, answers):\n \n print 'Adding answers...',\n \n for i, answer in enumerate(answers):\n cur.execute('INSERT INTO answers VALUES (\"{_id}\", \"{task_id}\", \"{text}\")'.format(\n _id = i+1,\n task_id = answer['task_id'],\n text = answer['text']\n )\n )\n \n conn.commit()\n \n print 'done.'",
"def picksomequestions():\n answers = dict()\n for question in nlist:\n answers[question[0]] = question[1]\n if len(answers.keys()) > 50:\n break\n\n return answers",
"def picksomequestions():\n answers = dict()\n for question in nlist:\n answers[question[0]] = question[1]\n if len(answers.keys()) > 50:\n break\n\n return answers",
"def picksomequestions():\n answers = dict()\n for question in nlist:\n answers[question[0]] = question[1]\n if len(answers.keys()) > 50:\n break\n\n return answers",
"def choose(multichoice, question, correct):\n counter = 1\n ncorrect = 0\n allowed = '12345'\n print(\"choose a synonym for \"+question)\n for option in multichoice.values():\n print(str(counter)+\")\"+option)\n if option == correct:\n ncorrect = counter\n counter = counter + 1\n res = raw_input(\">\")\n while (len(res) != 1 or res not in allowed):\n \tres = raw_input(\">\")\n #return res\n if int(res) == ncorrect:\n print(\"CORRECT!\")\n return 1\n else:\n print(\"\\n >>>>>> The answer is actually -- \" + correct)\n\tprint \n return 0"
]
| [
"0.6372834",
"0.6058923",
"0.6049124",
"0.60463506",
"0.6011711",
"0.59754276",
"0.5964876",
"0.58967185",
"0.58833796",
"0.5869681",
"0.58407676",
"0.58407676",
"0.58407676",
"0.58407676",
"0.5830177",
"0.58156514",
"0.5808251",
"0.5800475",
"0.5790557",
"0.5755108",
"0.5726378",
"0.572568",
"0.5718635",
"0.57025373",
"0.5694916",
"0.568736",
"0.56655353",
"0.56655353",
"0.56655353",
"0.56466615"
]
| 0.6919275 | 0 |
Function generate our validation functions, since they're pretty much the same for each slot | def create_validation_function(name_of_slot):
def validate_slot(
self,
value: Text,
dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any],
) -> Dict[Text, Any]:
"""Validate user input."""
if value.lower() in self.answers_db()[name_of_slot]:
# validation succeeded, set the value of the slot to
# user-provided value
return {name_of_slot: value}
else:
# find the closest answer by some measure (edit distance?)
choices = self.answers_db()[name_of_slot]
answer = process.extractOne(value.lower(), choices)
# check to see if distnace is greater than some threshold
if answer[1] < 45:
# if so, set slot to "other"
return {name_of_slot: "other"}
else:
return {name_of_slot: answer[0]}
return(validate_slot) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def validate():",
"def question_new_validate():",
"def __validate():\n # TODO: implement",
"def validate(self):",
"def validate(self):",
"def validate(self):\n\n\tmissing = []\n\tbadcheck = []\n\tfor name, checkfunc, params in self._required:\n\t try:\n\t\targ = self.make_required(name)\n\t\tif checkfunc is not None:\n\t\t if params is not None:\n\t\t\tparams = (self.param_map[name], arg) + params\n\t\t else:\n\t\t\tparams = (self.param_map[name], arg)\n\t\t try:\n\t\t\tapply(checkfunc, params)\n\t\t except ValidationError, msg:\n\t\t\tbadcheck.append(msg)\n\t except ValidationError, args:\n\t\tmissing.append(args)\n\n\tfor (name, checkfunc, params) in self._optional:\n\t tup = self.make_optional(name)\n\t if tup and checkfunc is not None:\n\t\tif params is not None:\n\t\t params = (self.param_map[name], tup) + params\n\t\telse:\n\t\t params = (self.param_map[name], tup)\n\t\ttry:\n\t\t apply(checkfunc, params)\n\t\texcept ValidationError, msg:\n\t\t badcheck.append(msg)\n\n\tif (missing or badcheck) and self.log_errors:\n\t self.log_error(missing, badcheck)\n\n\tif (missing or badcheck) and self.generate_error_page:\n\t self.generate_HTML(missing, badcheck)\n\n\tself.missing = missing\n\tself.badcheck = badcheck\n\n\treturn not (missing or badcheck)",
"def validate(self, name, values):\r\n \r\n pass",
"def build_validation_fn(action_validators, parameter_validators, data_validators):\n ins_action_validators = []\n ins_parameter_validators = {}\n ins_data_validators = {}\n\n # Validators instantiation if they are not already\n for cls_or_inst in action_validators:\n if isclass(cls_or_inst):\n ins_action_validators.append(cls_or_inst())\n else:\n ins_action_validators.append(cls_or_inst)\n for parameter_field, parameter_validators in parameter_validators.items():\n for validator in parameter_validators:\n if isclass(validator):\n ins_parameter_validators.setdefault(parameter_field, []).append(validator())\n else:\n ins_parameter_validators.setdefault(parameter_field, []).append(validator)\n for data_field, data_validators in data_validators.items():\n for validator in data_validators:\n if isclass(validator):\n ins_data_validators.setdefault(data_field, []).append(validator())\n else:\n ins_data_validators.setdefault(data_field, []).append(validator)\n\n def fn(**kwargs):\n # Action validators\n for valid in ins_action_validators:\n if not valid.validation_statement(**kwargs):\n raise ValueError(valid.error_message(**kwargs))\n # Parameters validators\n for param_field, validators in ins_parameter_validators.items():\n for validator in validators:\n if param_field in kwargs[\"params\"] and \\\n not validator.is_valid(kwargs[\"request\"], value=kwargs[\"params\"][param_field]):\n raise ValueError(validator.error_message(error_field=param_field, **kwargs))\n\n # Data validators\n for data_field, validators in ins_data_validators.items():\n for validator in validators:\n if data_field in kwargs[\"params\"][\"data\"] and \\\n not validator.is_valid(kwargs[\"request\"], value=kwargs[\"params\"][\"data\"][data_field]):\n raise ValueError(validator.error_message(error_field=data_field, **kwargs))\n\n return fn",
"def validate(self):\n for rule in self.get_rules():\n rule.validate(self.get_val())",
"def validate(self):\n ...",
"def validate(self, arg):\n new_values = []\n for i in self.cast(arg):\n# new_values.append(self.checkValues(i))\n new_values.append(self.template.validate(i))\n return new_values",
"def __get_validators__(cls) -> Generator:\n yield cls.validate_type\n yield cls.validate_strip\n yield cls.validate_allow_empty\n yield cls.validate_max_length\n yield cls.validate_min_length\n yield cls.validate_regex",
"def data_validation(self):\n print \"Starting basic data validation ...\"\n allattr = dir(bdefile)\n idx = [ii for ii, attr in enumerate(allattr) if \"validate_oee_error_\" in attr]\n vfunclist = []\n for ii in idx:\n vfunclist += [allattr[ii]]\n\n errorcodes = []\n for vfunc in vfunclist:\n errorcodes += [int(vfunc.split('_')[3])]\n\n errorcodes.sort()\n\n for code in errorcodes:\n sys.stdout.write(\"Checking validation rule %d ... \" % code)\n success, lines = (eval('self.validate_oee_error_'+str(code)))()\n if success:\n print \"PASSED\"\n else:\n self.report_error(code, lines)\n return False\n \n print \"Basic data validation succeeded.\\n\"\n return True",
"def validate(self):\n # should this just be folded into the constructor for ProgramNode?\n for func in self.functions:\n func.validate()\n self.validated = True",
"def _validate(self):\n pass",
"def run_parameters_validations(self):\n if self.risk_rule:\n if 'connectApi' not in self.services:\n return_error(\"You entered a risk rule but the 'connectApi' service is not chosen. \"\n \"Add the 'connectApi' service to the list or remove the risk rule.\")\n else:\n for risk_rule in self.risk_rule:\n if not is_valid_risk_rule(self, risk_rule):\n return_error(f\"The given risk rule: {risk_rule} does not exist,\"\n f\"please make sure you entered it correctly. \\n\"\n f\"To see all available risk rules run the '!rf-get-risk-rules' command.\")\n\n if self.fusion_file_path is not None:\n if 'fusion' not in self.services:\n return_error(\"You entered a fusion file path but the 'fusion' service is not chosen. \"\n \"Add the 'fusion' service to the list or remove the fusion file path.\")",
"def check_validity(self):",
"def validate(cls, data, errors):",
"def test_validators():",
"def test_validation_function(self):\n\n for data in ('tbldata', 'dihedraldata', 'rdcdata', 'danidata'):\n v = self.web.query_nodes(key=data)\n\n if not v.empty():\n self.assertTrue(validate_tbl(v.value, pcs=False))",
"def generate_validator(self, t, **kwargs):\n def validator(val, field_name=''):\n if val is None and 'required' in kwargs and not kwargs['required']:\n return True\n elif val is None:\n raise ValidationError('%s: None is not allowed (field required)' % field_name)\n if not isinstance(val, t):\n raise ValidationError('%s: \"%s\" not an instance of %s but an instance of %s' %\n (field_name, val, t, type(val)))\n if isinstance(val, dict):\n check_keys(val) # check against . & $ in keys\n return True\n return validator",
"def validate(self):\n pass",
"def validate(self):\n pass",
"def validate(self):\n pass",
"def validate(self):\n pass",
"def validate(self):\n pass",
"def validate(self):\n pass",
"def validate(self):\n pass",
"def validate(self):\n pass",
"def validate_chain():"
]
| [
"0.69904894",
"0.6749303",
"0.65610975",
"0.64473385",
"0.64473385",
"0.6413806",
"0.63149947",
"0.6285735",
"0.62739533",
"0.62182045",
"0.61723286",
"0.61714697",
"0.61690015",
"0.6168363",
"0.6159842",
"0.61568487",
"0.6098348",
"0.6089911",
"0.5990922",
"0.5985952",
"0.5981574",
"0.59741575",
"0.59741575",
"0.59741575",
"0.59741575",
"0.59741575",
"0.59741575",
"0.59741575",
"0.59741575",
"0.59686726"
]
| 0.7123748 | 0 |
Database of slot values & corresponding questions | def slot_key_db() -> Dict[str, List]:
return {'q50': 'second_person_plural',
'q28': 'cot_caught',
'q80': 'rain_sun',
'q66': 'crawfish',
'q110': 'halloween',
'q64': 'sandwich',
'q90': 'side_road',
'q105': 'beverage',
'q73': 'shoes',
'q79': 'highway',
'q58': 'yard_sale',
'q107': 'rubbernecking',
'q94': 'frosting',
'q14': 'lawyer',
'q76': 'kitty_corner',
'q65': 'firefly',
'q60': 'verge',
'q118': 'brew_thru',
'q103': 'water_fountain'} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def return_questions_data():\n conn = sq.connect(host='localhost', user='root',\n password='student', database='quiz')\n cursor = conn.cursor()\n \n cursor.execute(\"select * from questions\")\n data = cursor.fetchall()\n\n table = PrettyTable()\n table.field_names = ['Question', 'Answer']\n questions = {}\n for q,a in data:\n table.add_row([q,a])\n questions[q] = a\n conn.close()\n\n return table, questions",
"def required_slots(tracker: Tracker) -> List[Text]:\n\n return [\"product\", \"applicant_name\", \"applicant_dob\", \"applicant_phoneno\", \"applicant_address\"]",
"def required_slots(self,tracker) -> List[Text]:",
"def slot_info(self) -> Dict[int, Tuple[int, str, int]]:\n return self._slot_info",
"def createVarsAndConstraints(self, solver):\n\n # maintain dict from variables for actual slots to variables for meeting_times\n vasl_to_mtx = { }\n \n exactly_one_slot = solver.Constraint(1, 1)\n\n # One decision variable for each element of this.meeting_times, of which exactly one must be true (=1)\n for s in self.meeting_times:\n \n x = solver.IntVar(0, 1, self.name + \" in \" +str(s))\n self.vars_meeting_time[s] = x\n exactly_one_slot.SetCoefficient(x, 1)\n\n actual_slots = list(s)\n\n # One decision variable for each actual slot\n for asl in actual_slots:\n if asl not in self.vars_actualslots:\n vasl = solver.IntVar(0, 1, self.name + \" in actual \" + asl)\n self.vars_actualslots[asl] = vasl\n\n vasl = self.vars_actualslots[asl] \n # Ensure that if x is true (i.e., slot s is chosen) then\n # vasl is true (i.e., we are marked as this course being in this actual slot\n cn = solver.Constraint(0, solver.infinity())\n \n cn.SetCoefficient(x, -1)\n cn.SetCoefficient(vasl, 1)\n\n if vasl not in vasl_to_mtx:\n vasl_to_mtx[vasl] = []\n\n\n vasl_to_mtx[vasl].append(x)\n\n # now go through the vasl_to_mtx dict to make sure that vasl is true only if one of its slots is true\n for vasl in vasl_to_mtx:\n cn = solver.Constraint(0, solver.infinity())\n\n cn.SetCoefficient(vasl, -1)\n for sx in vasl_to_mtx[vasl]:\n cn.SetCoefficient(sx, 1)",
"def __init_q_values(self, game_state):\n encoded_game_state = self.__encode_state(game_state)\n if encoded_game_state in self.q_values:\n return\n self.q_values[encoded_game_state] = {}\n for free_seat in self.__get_free_seats(game_state):\n self.q_values[encoded_game_state][free_seat] = (self.INITIAL_STATE_VALUE, 0)",
"def get_slots(self) -> int:",
"def data_for_question(self, question_type):\n\t\treturn {}",
"def gen_questions(self, number_of_questions):",
"def slots(self):\n highSlots = self._getAttribute(Attribute.highSlots)\n medSlots = self._getAttribute(Attribute.medSlots)\n lowSlots = self._getAttribute(Attribute.lowSlots)\n\n if None in [highSlots, medSlots, lowSlots]:\n # This is a T3 ship.\n highSlots = medSlots = lowSlots = 0\n\n # Get rigs and subs.\n rigSlots = self._getAttribute(Attribute.rigSlots, 0)\n subSlots = self._getAttribute(Attribute.subSlots, 0)\n\n # Get missile and turret slots.\n missileSlots = self._getAttribute(Attribute.missileSlots, 0)\n turretSlots = self._getAttribute(Attribute.turretSlots, 0)\n\n return {\n \"highSlots\": int(highSlots),\n \"medSlots\": int(medSlots),\n \"lowSlots\": int(lowSlots),\n \"rigSlots\": int(rigSlots),\n \"subSlots\": int(subSlots),\n \"turretSlots\": int(turretSlots),\n \"missileSlots\": int(missileSlots)\n }",
"def register_question(obj):\n if Question.registered_questions.has_key(obj.game_round) and \\\n obj.tier in Question.registered_questions[obj.game_round]:\n raise IndexError(\"Slot for Question {0} is alredy taken\".format(\n obj.question,))\n elif Question.registered_questions.has_key(obj.game_round):\n Question.registered_questions[obj.game_round].append(obj.tier)\n else:\n Question.registered_questions[obj.game_round] = [obj.tier]",
"def qubit_values(self):\n return self.label",
"def required_slots(tracker: Tracker) -> List[Text]:\n print(\"required_slots(tracker: Tracker)\")\n return [\"name\",\"roomcount\",\"roomtype\"]",
"def required_slots(tracker: Tracker) -> List[Text]:\n return [\n \"sucursal\",\n \"fecha_hora\"\n ]",
"def __init__(self, slots):\n self.slots = slots\n self.table = []\n for i in range(slots):\n self.table.append([])",
"def __init__(self, slots):\n self.slots = slots\n self.table = []\n for i in range(slots):\n self.table.append([])",
"def required_slots(tracker: Tracker) -> List[Text]:\n\n return [\"PERSON\", \"amount_of_money\", \"confirm\"]",
"def get_slots_query(scene: GameScene, entity: int):\n\n def query():\n\n paper_doll: PaperDoll = scene.cm.get_one(PaperDoll, entity)\n equipment = paper_doll.get_equipment()\n\n return [\n (k, scene.cm.get_one(Entity, v))\n for k, v in equipment.items()\n ]\n return query",
"def required_slots(tracker: Tracker) -> List[Text]:\n return [\n \"domicilio\",\n \"fecha_hora\"\n ]",
"def process_question(qu):\n\n ## global ranking\n rank_info = {}\n rank_info_k = [\"viewcount\",\"score\",\"favoritecount\"]\n for k in rank_info_k:\n rank_info[k] = int(qu[k])\n qu.pop(k,None)\n\n rank_info[\"creationdate\"] = qu[\"creationdate\"]\n\n if qu[\"acceptedanswer\"]:\n qu[\"acceptedanswer\"] = list(qu[\"acceptedanswer\"])\n else:\n qu[\"acceptedanswer\"] = []\n\n qu.pop('comments',None) # discard comments, maybe add back later\n qu[\"rank_info\"] = rank_info\n\n return qu",
"def required_slots(tracker: Tracker) -> List[Text]:\n return [\n \"tipo_prenda\",\n \"numero_prendas\",\n \"tipo_lavado\"\n ]",
"def required_slots(tracker: Tracker) -> List[Text]:\n return [\n \"tipo_prenda\",\n \"numero_prendas\",\n ]",
"def required_slots(tracker):\n print(tracker.get_slot('order_number'))\n return [\"order_number\"]",
"def __init__(self, question, answer):\n\n self.question = question\n self.answer = answer\n\n self.q_and_a = {\n 'Question:': self.question,\n 'Correct Answer:': self.answer,\n }",
"def __init__(self, data_info, slot_temp):\n self.ID = data_info['ID']\n self.turn_domain = data_info['turn_domain']\n self.turn_id = data_info['turn_id']\n self.dialog_history = data_info['dialog_history']\n self.turn_belief = data_info['turn_belief']\n self.gating_label = data_info['gating_label']\n self.turn_uttr = data_info['turn_uttr']\n self.generate_y = data_info[\"generate_y\"]\n self.num_total_seqs = len(self.dialog_history)\n self.slot_temp = slot_temp",
"def slot_mappings(self) -> Dict[Text, Union[Dict, List[Dict]]]:\n\n # return { \"faq_choice\": self.from_entity(\"faq_choice\"),\"faq_question\": self.from_entity(\"faq_question\"), \"faq_text\": [self.from_text()]}\n\n return {\"faq_choice\": [self.from_entity(\"faq_choice\"), self.from_text()], \"faq_text\": [self.from_text(), self.from_entity(entity=\"navigation\")]}",
"def choose_slot(data):\n firebase_uid = data[\"session\"].split(\"/\")[-1]\n db = firebase.database()\n slot = data[\"queryResult\"][\"parameters\"][\"slot\"]\n for i in data[\"queryResult\"][\"outputContexts\"]:\n if \"ticket-id\" in i[\"name\"]:\n ticket_id = i[\"parameters\"][\"ticket_id\"]\n db.child(\"user_data\").child(firebase_uid).child(\"Complaints\").child(ticket_id).child(\"Time Slot Chosen\").set(str(int(slot)))\n break\n response = {\n \"fulfillmentText\": \"I have updated your preference.\"\n }\n return response",
"def _setData(self):\n #offset = datetime.timedelta(prefs.getNoOfDaysBeforeQuestionSchedule())\n date_formatter = date.getLocaleFormatter(self.request, \"date\", \"long\")\n def _q_data_item(q):\n item = {}\n item[\"qid\"]= \"q_%s\" % q.question_id\n if q.question_number:\n item[\"subject\"] = u\"Q %s %s\" % (q.question_number, q.short_name)\n else:\n item[\"subject\"] = q.short_name\n item[\"title\"] = q.short_name\n item[\"result_item_class\"] = \"workflow-state-%s\" % q.status\n item[\"url\"] = url.set_url_context(\"questions/obj-%s\" % q.question_id)\n item[\"status\"] = misc.get_wf_state(q)\n item[\"status_date\"] = date_formatter.format(q.status_date)\n item[\"owner\"] = \"%s %s\" %(q.owner.first_name, q.owner.last_name)\n item[\"type\"] = _(q.type)\n item[\"to\"] = q.ministry.short_name\n return item\n self._data = [ _q_data_item(question) for question in self.query.all() ]",
"def required_slots(tracker: Tracker) -> List[Text]:\n return [\n \"tipo_prenda\",\n \"tipo_compostura\"\n ]",
"def generate_slot(slot_name, slot_description, slot_raw_filename):\n slot = {\n 'enumerationValues': [],\n \"name\": slot_name,\n \"description\": slot_description\n }\n slot_raw_vals = read_raw_vals(slot_raw_filename)\n for slot_val in slot_raw_vals:\n slot['enumerationValues'].append({'value': slot_val})\n\n return slot"
]
| [
"0.5948487",
"0.572332",
"0.5715427",
"0.5700892",
"0.56761324",
"0.56518036",
"0.5600143",
"0.55988514",
"0.55528927",
"0.55419713",
"0.5529572",
"0.55215013",
"0.5486724",
"0.5486546",
"0.5483428",
"0.5483428",
"0.54654187",
"0.5455922",
"0.54557544",
"0.5442031",
"0.54399633",
"0.53803265",
"0.53800166",
"0.53661984",
"0.53476506",
"0.5305713",
"0.52863836",
"0.5281722",
"0.5227964",
"0.5194826"
]
| 0.6630833 | 0 |
Calculates pairs of nodes (i, j) that have at least one common neighbour. | def _get_common_neighbour_node_pairs(self):
node_pairs = []
for node1 in self.graph.nodes():
for node2 in self.graph.nodes():
if node1 != node2:
neighbour_count = self.neighbour_counts[(node1, node2)]
if neighbour_count >= 1:
node_pairs.append((node1, node2))
return node_pairs | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_common_neighbours(p1: Position, p2: Position) -> List[Position]:\n i, j = p1\n l1 = [(i + x, j + y) for x, y in NEIGHBORS_1]\n i, j = p2\n l2 = [(i + x, j + y) for x, y in NEIGHBORS_1]\n return [k for k in l1 if k in l2]",
"def get_neighbors(size_i, size_j, ij):\n i,j = ij\n neighbors = set()\n if i>0:\n neighbors.add((i-1, j))\n if j>0:\n neighbors.add((i, j-1))\n if i<size_i-1:\n neighbors.add((i+1, j))\n if j<size_j-1:\n neighbors.add((i, j+1))\n return neighbors",
"def neighbors(i , j) :\n ns = []\n # vector de direction\n dx = [+1, +1, 0, 1]\n dy = [0, +1, 1, -1]\n for d in range(4) :\n ns.append((i + dx[d], j + dy[d]))\n #remove neagative element\n ns = [i for i in ns if i[0] >= 0 and i[1] >= 0]\n return ns",
"def __find_similar_pairs(self):\n size = len(self.__indexclusters)\n candidates = []\n for i in range(size):\n for j in range(i+1, size):\n simi = self.__cluster_simi(i, j)\n #print simi, self.__indexclusters[i],self.__indexclusters[j]\n if simi >= self.__threshold:\n candidates.append((simi, i, j))\n candidates.sort(reverse = True, key = lambda x: x[0])\n\n\n # filter overlapped pairs\n to_remove = set()\n appeared = set()\n for index, cand in enumerate(candidates):\n if cand[1] not in appeared and cand[2] not in appeared:\n appeared.add(cand[1])\n appeared.add(cand[2])\n else:\n to_remove.add(index)\n\n #print 'ahha'\n #print [(cand[1], cand[2]) for index, cand in enumerate(candidates) if index not in to_remove]\n\n return [(cand[1], cand[2]) for index, cand in enumerate(candidates)\n if index not in to_remove]",
"def neighbors((min_i, min_j), (max_i, max_j), (i, j)):\n if j + 1 <= max_j:\n yield (i, j + 1)\n if j - 1 >= min_j:\n yield (i, j - 1)\n if i + 1 <= max_i:\n yield (i + 1, j)\n if i - 1 >= min_i:\n yield (i - 1, j)",
"def _connect_neighbours(self):\n for prev in self.unvisited:\n for next in self.unvisited:\n if (next[0] == prev[0] and next[1] == prev[1] + 1) or (next[0] == prev[0] + 1 and next[1] == prev[1]):\n self.graph.addEdge((prev, next))\n self.visited.add(prev)\n self.visited.add(next)\n if self._find_intersection():\n self.intersection.append(prev)\n self.intersection.append(next)",
"def neighbours(self):\n seen = set()\n return [l.other(self) for l in self.dovetails \\\n if id(l) not in seen and not seen.add(id(l))]",
"def likely_pairs(self, k=2):\n for a in self.G.nodes():\n if not self.eligible_node(a):\n continue\n for b in neighbourhood(self.G, a, k):\n if not self.eligible_node(b):\n continue\n yield (a, b)",
"def neighbours(x, y):\n n = []\n for c in ((y-1, x-1), (y-1, x), (y-1, x+1), (y, x-1), (y, x+1), (y+1, x-1), (y+1, x), (y+1, x+1)):\n n.append(c)\n return n",
"def clusters_connected( self):\n def check_connected( k, vertices, edges):\n dads = {}\n for p in vertices:\n dads[p] = p\n\n def Find( c):\n while c != dads[c]:\n c = dads[c]\n return c\n\n def Union( p, q):\n dads[Find(p)] = Find(q)\n\n for p,q in edges:\n Union( p, q)\n\n stuff = set([ Find(p) for (k,p) in dads.items()])\n assert len(stuff) == 1, \"More than one partition\"\n\n vertices = collections.defaultdict( list)\n for p in itertools.product( range(self.n), repeat=2):\n vertices[self.raster[p]].append( p)\n\n def X():\n for x in range(self.n-1):\n for y in range(self.n):\n yield (x,y),(x+1,y)\n\n def Y():\n for x in range(self.n):\n for y in range(self.n-1):\n yield (x,y),(x,y+1)\n\n connections = collections.defaultdict( list)\n for (p,q) in itertools.chain( X(), Y()):\n if self.raster[p] == self.raster[q]:\n connections[self.raster[p]].append( ( p, q))\n\n for (k,v) in vertices.items():\n check_connected( k, v, connections[k])",
"def get_ij_neighbors(self, i, j):\n ii, jj = np.mgrid[i-1:i+2, j-1:j+2]\n ii, jj = ii.ravel(), jj.ravel()\n filtr = (ii >= 0) & (ii < self.height) & (jj >= 0) & (jj < self.width)\n ij_neighbors = set(zip(ii[filtr], jj[filtr]))\n ij_neighbors.remove((i, j))\n return ij_neighbors",
"def common_peers(self, i, j):\n ir = self.get(i, self.router.network)\n jr = self.get(j, self.router.network)\n \n if not ir or not jr:\n return []\n\n ir = [tuple(p['node']) for p in ir if p['transactions']]\n jr = [tuple(p['node']) for p in jr if p['transactions']]\n\n result = list(set(ir).intersection(jr))\n log(\"cmn: %s %s %i: %s\" % (i, j, len(result), result))\n return result",
"def connected_pair(self, first, second):\n cover = set()\n queue = {first}\n while queue:\n new = queue.pop()\n cover.add(new)\n for adjacent in new.parents() | new.children():\n if adjacent == second:\n return True\n elif not adjacent in cover:\n queue.add(adjacent)\n return False",
"def neighbours(self, i, j):\n nearest = []\n for x_offset, y_offset in [(0, -1), (0, 1), (1, 0), (-1, 0)]:\n try:\n nearest.append(self.as_list[checkNonNegIndex(i + x_offset)][checkNonNegIndex(j + y_offset)])\n except IndexError:\n continue\n except TypeError:\n continue\n return nearest",
"def find_common_neighbors(self, t2) -> list:\n neighs = []\n for n in self.neighbors:\n if n in t2.neighbors:\n neighs.append(n)\n return neighs",
"def connected_components(self) -> int:\n # visited = set()\n def get_component(vert: Tuple[int, int]) -> Set[Tuple[int, int]]:\n \"\"\" \"\"\"\n nonlocal visited\n visited.add(vert)\n if graph.vertices[vert]:\n for neighbor in graph.vertices[vert]:\n if neighbor not in visited:\n visited.add(neighbor)\n neighbor_components = get_component(neighbor)\n visited = visited.union(neighbor_components)\n else:\n continue\n\n return visited\n else:\n return visited\n\n components: List[Set[Tuple[int, int]]] = list()\n for vertex in graph.vertices.keys():\n visited: Set[Tuple[int, int]] = set()\n component = get_component(vertex)\n if component not in components:\n components.append(component)\n else:\n continue\n \n return len(components)",
"def connected(index,i):\n adj = concatenate([ where(ind==j)[0] for j in ind[i] if j >= 0 ])\n return unique(adj[adj != i])",
"def neighbours(t,p):\r\n neighbour = set()\r\n\r\n if p[t][1] != 0:\r\n neighbour.add(tuple(p[t][1]))\r\n if p[t][2] != 0:\r\n neighbour.add(tuple(p[t][2]))\r\n if p[t][3] != 0:\r\n neighbour.add(tuple(p[t][3]))\r\n if p[t][4] != 0:\r\n neighbour.add(tuple(p[t][4]))\r\n \r\n return neighbour",
"def Neighbourgs(abcd, h):\n\n Nelem = len(abcd)\n\n a = abcd[h][0]\n b = abcd[h][1]\n c = abcd[h][2]\n d = abcd[h][3]\n\n el1, el2, el3, el4 = 0, 0, 0, 0\n\n N = 0\n\n for j in range(0, Nelem - 1):\n\n if N == 4:\n break\n\n if a in abcd[j, :] and b in abcd[j, :] and j != h:\n N += 1\n el1 = j + 1\n\n if b in abcd[j, :] and c in abcd[j, :] and j != h:\n N += 1\n el2 = j + 1\n\n if c in abcd[j, :] and d in abcd[j, :] and j != h:\n N += 1\n el3 = j + 1\n\n if d in abcd[j, :] and a in abcd[j, :] and j != h:\n N += 1\n el4 = j + 1\n\n return [el1, el2, el3, el4]",
"def pairs_from_knn(ind):\n \n NN = ind.shape[1]\n source_nodes = np.repeat(ind[:,0], NN-1).reshape(-1,1)\n target_nodes = ind[:,1:].reshape(-1,1)\n pairs = np.hstack((source_nodes, target_nodes))\n pairs = remove_duplicate_pairs(pairs)\n return pairs",
"def neighbours_R(self):\n seen = set()\n return [l.other(self) for l in self.dovetails_R \\\n if id(l) not in seen and not seen.add(id(l))]",
"def neighbours_L(self):\n seen = set()\n return [l.other(self) for l in self.dovetails_L \\\n if id(l) not in seen and not seen.add(id(l))]",
"def GetNodeCommonality(self):\n\n self.__do_essential_memebers_exist__()\n\n elements = self.elements.ravel()\n idx_sort = np.argsort(elements)\n sorted_elements = elements[idx_sort]\n vals, idx_start = np.unique(sorted_elements, return_index=True)\n\n # Sets of indices\n flat_pos = np.split(idx_sort, idx_start[1:])\n els = np.split(idx_sort // int(self.elements.shape[1]), idx_start[1:])\n pos = np.split(idx_sort % int(self.elements.shape[1]), idx_start[1:])\n\n # In case one wants to return only the duplicates i.e. filter keeping only items occurring more than once\n # vals, idx_start, count = np.unique(sorted_elements, return_counts=True, return_index=True)\n # vals = vals[count > 1]\n # res = filter(lambda x: x.size > 1, res)\n\n return els, pos, flat_pos",
"def common_nbrs(self, u, v):\n u_adj = self.nx_graph.neighbors(u)\n v_adj = self.nx_graph.neighbors(v)\n nbrs = []\n for u in u_adj:\n if u in v_adj:\n nbrs.append(u)\n\n return nbrs, u_adj, v_adj",
"def get_pairs(N, row, col):\n pairs = np.array(list(combinations(range(N), 2)))\n pairs = np.column_stack((pairs, np.zeros(len(pairs), dtype=int)))\n # fill in edges\n for (r, c) in zip(row, col):\n k = r * (2 * N - r - 1) / 2 - r + c - 1\n pairs[int(k), 2] = 1\n\n return pairs",
"def adjacent_pairs(self, nodes: Tuple[int], k: int) -> List[Tuple[int, int]]:\n n = len(nodes)\n return [(u, nodes[j % n])\n for i, u in enumerate(nodes)\n for j in range(i + 1, i + 1 + k // 2)]",
"def neighbors(node, topology):\n return [n for n in topology[node]]",
"def neighbours2((u,v)):\r\n\r\n return ((u-1, v+1), (u,v+1), (u+1,v+1), \r\n (u-1,v), (u+1,v),\r\n (u-1,v-1), (u,v-1), (u+1,v-1))",
"def cell_neighbours(self, x, y):\n if self.maze_map[y][x]:\n return set()\n neighbours = set()\n for (direction, ((i, j), dummy)) in MazeGraph.DIRECTIONS.items():\n xi, yj = (x + i) % self.width, (y + j) % self.height\n if not self.maze_map[yj][xi]:\n neighbours.add((direction, (xi, yj)))\n return neighbours",
"def get_bond_connectivity(self):\n m, connectivity = self.owner, []\n for index, i in enumerate(self.rix):\n for j in self.rix[index + 1:]:\n b1 = m.rings[i].bix\n b2 = m.rings[j].bix\n if set(b1).intersection(b2):\n connectivity.append((i, j))\n return tuple(connectivity)"
]
| [
"0.71448857",
"0.6700568",
"0.66831475",
"0.6655144",
"0.655701",
"0.64960265",
"0.6442587",
"0.64309496",
"0.63989466",
"0.6389736",
"0.63726455",
"0.63648593",
"0.6331582",
"0.6319956",
"0.629356",
"0.6202063",
"0.6175751",
"0.6175056",
"0.6142384",
"0.61294764",
"0.61103463",
"0.61081976",
"0.60927033",
"0.6058725",
"0.6055972",
"0.6021041",
"0.6014079",
"0.6011324",
"0.6005727",
"0.60043466"
]
| 0.77886975 | 0 |
Return list of service_ids that match a given datetime | def getServiceIdsForDate(self, dt, exclude0000=True):
dayDict = {1:"monday", 2:"tuesday", 3:"wednesday", 4:"thursday",
5:"friday", 6:"saturday", 7:"sunday"}
sqlQuery = "select service_id from calendar" \
+ " where {} = 1".format(dayDict[dt.isoweekday()]) \
+ " and start_date <= ? and end_date >= ?;"
dtstr = dt.date().isoformat().replace('-','')
queryTuple = (dtstr, dtstr)
cursor = self.conn.execute(sqlQuery, queryTuple)
serviceIdList = []
for row in cursor:
serviceIdList.append(row[0])
# Get rid of weird serviceId
if exclude0000:
serviceIdList = [ii for ii in serviceIdList if ii != '0000']
return serviceIdList | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_serviceIDs_for_date(date):\n global SDHandler\n return SDHandler.effective_service_ids(date);",
"def filter_list_by_day(vevent_seq, dt=None):\n dt = dt or datetime_now()\n if is_naive(dt):\n dt = make_aware(dt)\n start = datetime.datetime(dt.year, dt.month, dt.day, tzinfo=dt.tzinfo)\n end = start.replace(hour=23, minute=59, second=59)\n return (vevent for vevent in vevent_seq if has_overlap(vevent, start, end))",
"def findTaggedServiceIds(self, name):\n pass;",
"def get_ts_ids(self, datetime_start, datetime_end):\n # First figure out the timestamp range\n query_str = \"SELECT TIMESTAMP_INFO.TS_ID FROM TIMESTAMP_INFO WHERE TIMESTAMP >= %(ps)s AND TIMESTAMP <= %(ps)s\"\n query_variables = (\n datetime_start.strftime(\"%Y-%m-%d %H:%M:%S\"),\n datetime_end.strftime(\"%Y-%m-%d %H:%M:%S\"))\n\n result = self.query(query_str=query_str,\n query_variables=query_variables)\n ts_ids = [x[0] for x in result]\n ### TODO: make sure this works (it's templated down in test_bin_cache_lmtdb.py)\n return min(ts_ids), max(ts_ids)",
"def match_dates(datetimes, datetime_axis):\n\n dates = list(map(split_dt, datetimes))\n date_axis = list(map(split_dt, datetime_axis[:])) \n \n match_datetimes = []\n miss_datetimes = [] \n\n for i in range(0, len(datetime_axis)):\n if date_axis[i] in dates:\n match_datetimes.append(datetime_axis[i])\n else:\n miss_datetimes.append(datetime_axis[i]) \n\n return match_datetimes, miss_datetimes",
"async def get_appservices_by_state(\n self, state: ApplicationServiceState\n ) -> List[ApplicationService]:\n results = await self.db_pool.simple_select_list(\n \"application_services_state\", {\"state\": state.value}, [\"as_id\"]\n )\n # NB: This assumes this class is linked with ApplicationServiceStore\n as_list = self.get_app_services()\n services = []\n\n for res in results:\n for service in as_list:\n if service.id == res[\"as_id\"]:\n services.append(service)\n return services",
"def get_matching_appids(\n service: str, instance: str, client: MarathonClient, embed_tasks: bool = False\n) -> List[str]:\n marathon_apps = get_all_marathon_apps(\n client, service_name=service, instance_name=instance, embed_tasks=embed_tasks\n )\n return [\n app.id for app in marathon_apps if does_app_id_match(service, instance, app.id)\n ]",
"def get_dataitems_for_client_on_period(self, id, start_datetime, end_datetime):\n return self.dbsession.query(DataItem).filter(\n and_(DataItem.client_id == id,\n start_datetime <= DataItem.startDatetime,\n DataItem.endDatetime <= end_datetime)).all()",
"async def find_sessions_by_date(date: datetime, mongo: MongoDB = mongodb) -> List[SessionOutModel]:\n if cursor := mongo.session_coll.find(\n {\"sessionDate\": {\"$gte\": date},\n \"status\": \"pending\"}):\n sessions = []\n for document in await cursor.to_list(length=100):\n sessions.append(document)\n if sessions:\n return [SessionOutModel(**session) for session in sessions]\n else:\n return [SessionOutModel()]",
"def aws_waits ( func, matching_ids ) :\n done = False\n found_ids = []\n while not done :\n found_ids = []\n time.sleep( 1 )\n items = func( )\n for item in items :\n for matching_id in matching_ids :\n if item.id == matching_id :\n found_ids.append( item )\n break\n\n if len( found_ids ) == len( matching_ids ) :\n done = True\n break\n\n return found_ids",
"def get_service_ids(self) -> list[bluetooth.BluetoothUuid]:\n return [bluetooth.BluetoothUuid(i) for i in self.serviceIds()]",
"def get_sku_id(sku_id, status, start_time, end_time):\n all_data = []\n if not (sku_id, status, start_time, end_time):\n return all_data\n\n for i in sku_database.find({\"SKU_id\": sku_id, \"Status\": status}, {\"_id\": 0}):\n if start_time < i[\"Time_stamp\"] < end_time:\n all_data.append(i)\n else:\n continue\n\n return all_data",
"def get_made_appointments(iso_datetime):\n appointments = []\n request_d_time = datetime.fromisoformat(iso_datetime)\n request_date = datetime(request_d_time.year,\n request_d_time.month,\n request_d_time.day)\n try:\n query = db.session.query(Appointment).filter(\n Appointment.d_time >= request_date\n ).all()\n appointments = list(map(lambda appointment: appointment.d_time, query))\n except Exception as e:\n app.logger.error(str(e))\n raise\n else:\n return appointments",
"def _get_service_list(self, service_name):\n service_list = self.service_dict[service_name]\n\n return service_list",
"def get_effective_services(self):\n myname = self['hostgroup_name']\n if not myname: return []\n \n result = []\n for service in Service.objects.all:\n hostgroup_name = service['hostgroup_name'] or \"\"\n hostgroups = service['hostgroups'] or \"\"\n if myname in hostgroups.split(','):\n result.append( service )\n elif myname in hostgroup_name.split(\",\"):\n result.append( service )\n return result",
"def _get_ids_from_ip(self, ip_address):\r\n try:\r\n # Does it look like an ip address?\r\n socket.inet_aton(ip_address)\r\n except socket.error:\r\n return []\r\n\r\n # Find the VS via ip address. First try public ip, then private\r\n results = self.list_instances(public_ip=ip_address, mask=\"id\")\r\n if results:\r\n return [result['id'] for result in results]\r\n\r\n results = self.list_instances(private_ip=ip_address, mask=\"id\")\r\n if results:\r\n return [result['id'] for result in results]",
"def get_services_by_status(self, status):\n\n services_project_ids = \\\n self.storage_controller.get_services_by_status(status)\n\n return services_project_ids",
"def since(datetime_obj):\n id_s, iterator = set(), get_match_ets(endpage=None)\n match_et = next(iterator)\n while _get_match_date(match_et) >= datetime_obj:\n s = {int(match_et.find(ha).find(\"coach\").attrib[\"id\"])\n for ha in (\"home\", \"away\")}\n for id_ in s - id_s:\n yield id_\n id_s |= s\n match_et = next(iterator)",
"def filter_by_date(items, start_time, end_time=None):\n start_time = parser.parse(start_time + \"UTC\").timestamp()\n if end_time:\n end_time = parser.parse(end_time + \"UTC\").timestamp()\n else:\n end_time = time.time()\n\n filtered_items = []\n for item in items:\n if 'time' in item:\n item_time = item['time']\n elif 'timestamp' in item:\n item_time = item['timestamp']\n timestamp = parser.parse(item_time + \"UTC\").timestamp()\n if end_time > timestamp > start_time:\n filtered_items.append(item)\n\n return filtered_items",
"def find_services(self) -> List[str]:\n results = self.collection.distinct(\"process.serviceName\")\n return [result for result in results]",
"def get_services_by_company(company_id: int) -> QuerySet:\n return ServiceDocument.search().filter(\"term\", **{\"company.id\": company_id}).to_queryset()",
"def get_booking_at(self, datetime):\n for booking in self.booking_set.all():\n if booking.schedule_start <= datetime < booking.schedule_end and not booking.is_cancelled():\n return booking\n return None",
"def list_services(self,honeypotids):\n req = {\"type\":\"get_all_services\",\n \"to\":honeypotids,\n \"from\":self.network.mc_id}\n expect_dict = {\"type\":\"send_all_services\"}\n msg_list = self.send_receive(req,honeypotids,expect_dict)\n answer = {}\n for msg in msg_list:\n answer[msg[\"from\"]] = msg[\"services\"]\n return answer",
"def get_service_uuids(self) -> list[bluetooth.BluetoothUuid]:\n return [bluetooth.BluetoothUuid(i) for i in self.serviceUuids()]",
"def get_ids(self, start_time, stop_time=None):\n start_time = _get_string_from_date(start_time)\n if stop_time == None:\n stop_time = start_time\n else:\n stop_time = _get_string_from_date(stop_time)\n xml = str(self._server.getCablingIds(start_time, stop_time))\n parseString(xml, self._cabling_handler)\n return self._cabling_handler.get_ids()",
"def get_all_slots(iso_datetime):\n d_time = datetime.fromisoformat(iso_datetime)\n d_date = date(d_time.year, d_time.month, d_time.day)\n schedule = AppointmentService.APPOINTMENT_SCHEDULE.get(d_date.weekday(), {})\n slots = []\n\n if schedule:\n begin_time = datetime.combine(d_date, schedule['begin'])\n end_time = datetime.combine(d_date, schedule['end'])\n\n while begin_time < end_time:\n slots.append(begin_time)\n begin_time += AppointmentService.APPOINTMENT_DURATION\n\n return slots",
"def _filter_by_date(from_date, until_date):\n qlist = []\n\n if from_date:\n qlist.append(Q(oai_date_stamp__gte=from_date))\n\n if until_date:\n qlist.append(Q(oai_date_stamp__lte=until_date))\n\n return qlist",
"def list_versions(self, service_id):\n return [self.fastly_cache[service_id]['service_details']]",
"def services_by_name(self,servicename):\n\t\tres = []\n\t\tfor k,v in self.services.items():\n\t\t\tif k[1].lower() == servicename.lower():\n\t\t\t\tres += [self.services[k]]\n\t\treturn res",
"def get_services(self, srv_record, host_name, service_name):\n ans = None\n\n # Form service record query: _radiovis._tcp at example.com\n # becomes _radiovis._tcp.example.com\n query = '.'.join([srv_record, host_name])\n\n self.log(\"Querying: \" + query)\n\n try:\n ans = self._resolver.query(query, 'SRV')\n\n except dns.resolver.NoAnswer as e:\n self.log(\"No answer\")\n except dns.resolver.NXDOMAIN as e:\n pass\n except dns.exception.DNSException as e:\n self.log(\"Exception: \" + str(type(e)))\n\n services = []\n\n if ans is not None and len(ans) > 0:\n for record in ans:\n # Remove last (blank) field from hostname then create\n # hostname string by joining with \".\".\n target = record.target.labels[0:-1]\n target = map(lambda s: str(s, 'utf-8'), target)\n target = \".\".join(target)\n\n self.log(\"Found: \" + target + \", port \" + str(record.port))\n\n service_record = ServiceRecord(name = service_name,\n query = query,\n port = record.port,\n priority = record.priority,\n target = target,\n weight = record.weight)\n services.append(service_record)\n else:\n self.log(\"No services\")\n\n return services"
]
| [
"0.7054457",
"0.55842924",
"0.5545218",
"0.5449232",
"0.53947216",
"0.5309217",
"0.52697635",
"0.52399474",
"0.5219247",
"0.51845676",
"0.51796013",
"0.5153664",
"0.51219136",
"0.50670344",
"0.5062298",
"0.50604385",
"0.5058337",
"0.5056613",
"0.5055468",
"0.50297636",
"0.50162566",
"0.5009036",
"0.49805883",
"0.4963752",
"0.49515802",
"0.4949248",
"0.4910452",
"0.49073952",
"0.4898021",
"0.48949453"
]
| 0.71749496 | 0 |
Provides the template tag {% attendance SESSION USER as ATTENDING TENTATIVE %} | def do_attendance(parser, token):
try:
_tagname, session, user, _as, context_attending, context_tentative = token.split_contents()
except ValueError:
raise template.TemplateSyntaxError(u'%(tagname)r tag syntax is as follows: '
'{%% %(tagname)r SESSION USER as VARIABLE1 VARIABLE2 %%}' % {'tagname': _tagname})
return AttendanceNode(session, user, context_attending, context_tentative) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def myevents(request):\n context = RequestContext(request)\n\n user = request.user\n\n return render_to_response('myevents.html', context)",
"def attendingAamas(request):\n # Test Comment\n assert isinstance(request, HttpRequest)\n return render(\n request,\n 'attendingAamas.html',\n context_instance=RequestContext(request, {})\n )",
"def view_attendance(request):\n\n\tcontext_dict = {\n\t\t'title': 'All Attendance',\n\t}\n\treturn render(request, \"viewAttendance.html\", context_dict)",
"def AddSessionUtilization(asg_name, arn_scalein, arn_scaleout):\n logger.info('Creating Session Utilization CloudWatch alarm for ASG: ' + asg_name)\n alarmname= asg_name + '-cw-su'\n return common_alarm_func_add(asg_name, \"panSessionUtilization\", lib.get_cw_name_space(stackname, asg_name), arn_scalein, arn_scaleout,\n\t\t\talarmname, \"Session Utilization\", 'Percent')",
"def user():\r\n return render_base_template(\"user.html\", user=current_user)",
"def UserAttendance(activity, contacts, attended):\n contacts = [c.strip() for c in contacts.strip().split()]\n\n res = {}\n for contact in contacts:\n user = utils.GetAppEngineUser(contact)\n lock = rule_engine.RegistrationLock(user, activity.key())\n res[contact] = lock.RunSynchronous(_UserAttendanceUnsafe, activity, user,\n attended) is not None\n return res",
"def template_nlg(self, user_acts):\n utt_list = []\n for user_act in user_acts:\n user_dialogue_act = user_act['dialogue_act']['value']\n if user_dialogue_act == UserAct.INFORM:\n # Template based NLG based on intent\n intent_slot = user_act.get('intent', None)\n slots = [intent_slot] if intent_slot is not None else []\n slots += user_act.get(\"slots\", list())\n slot_list = []\n for slot in slots:\n if slot[\"slot\"] in [\n \"object_mask_str\", \"gesture_click\",\n \"original_b64_img_str\"\n ]:\n slot_msg = slot[\"slot\"] + \"=\" + slot[\"value\"][:5]\n elif slot[\"slot\"] == \"mask_strs\":\n slot_msg = slot[\"slot\"] + \"=\" + len(slot[\"value\"])\n else:\n slot_msg = slot[\"slot\"] + \"=\" + str(slot[\"value\"])\n slot_list.append(slot_msg)\n utt = \"I want \" + ', '.join(slot_list) + \".\"\n elif user_dialogue_act == UserAct.AFFIRM:\n utt = \"Yes.\"\n elif user_dialogue_act == UserAct.NEGATE:\n utt = \"No.\"\n elif user_dialogue_act == UserAct.WAIT:\n utt = \"(Waiting...)\"\n elif user_dialogue_act == UserAct.BYE:\n utt = \"Bye.\"\n else:\n raise ValueError(\n \"Unknown user_dialogue_act: {}\".format(user_dialogue_act))\n utt_list.append(utt)\n\n utterance = ' '.join(utt_list)\n return utterance",
"def is_attended(value, user: User):\n return value.is_attended(user)",
"def addsession(update, context):\n\tupdate.message.reply_text('Ok, for this I will need two items:\\n\\n'\n\t\t'1. Date and time of the registration opening (your local time)\\n' \n\t\t'2. URL-link to the session page\\n\\n' \n\t\t'You can always cancel the input of a new session by typing /cancel.')\n\n\tupdate.message.reply_text('Let\\'s start with the first one.\\n\\n' \n\t\t'When does the registration open for your swimming session?\\n' \n\t\t'Please, send me the date and time in the following format:\\n\\n'\n\t\t'dd/mm/yyyy hh:mm')\n\n\treturn DATETIME",
"def generate_activity(user):\n data = {}\n random_index = random.randint(0, 5)\n data['uid'] = user[0]\n data['username'] = user[1]\n data['action'] = actions[random_index]\n data['ts'] = datetime.datetime.now().isoformat()\n return json.dumps(data)",
"def active_users(request):\n user_id = BhagirathSession.objects.all().filter(logout_timestamp=None).values('user').distinct(true_or_false=True)\n \n dict = {}\n list = []\n for i in user_id:\n k = User.objects.get(pk=i['user'])\n j = BhagirathSession.objects.all().filter(user=k,logout_timestamp=None)\n dict['username'] = k.username\n dict['login_timestamp'] = j[0].login_timestamp\n list.append(dict)\n data = {\n 'active_users_list':list,\n 'count':len(list)\n }\n return render_to_response('my_admin_tools/menu/active_users.html',data,context_instance=RequestContext(request))",
"def Show_attendance_by_session(self, day):\n sessions = {}\n for x in self.Attendees.values():\n if (x.Sessions[day] in sessions) and (x.Sessions[day] is not \"\"):\n sessions[x.Sessions[day]] = sessions[x.Sessions[day]] + 1\n else:\n sessions[x.Sessions[day]] = 1\n return sessions",
"def push_attendance(self, request, video_id=None):\n serializer = serializers.LiveAttendanceSerializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n\n video_id = self.get_related_video_id()\n video = get_object_or_404(Video, pk=video_id)\n\n try:\n if self.request.resource and is_lti_token(\n self.request.resource.token\n ): # LTI context\n token = self.request.resource.token\n token_user = self.request.resource.user\n livesession, _ = get_livesession_from_lti(token, video_id)\n\n # Update username only if defined in the token user\n if token_user.get(\"username\"):\n livesession.username = token_user[\"username\"]\n # Update email only if defined in the token user\n if token_user.get(\"email\"):\n livesession.email = token_user[\"email\"]\n elif self.request.resource: # Anonymous context\n anonymous_id = self.request.query_params.get(\"anonymous_id\")\n if anonymous_id is None:\n return Response(\n {\"detail\": \"anonymous_id is missing\"},\n status=status.HTTP_400_BAD_REQUEST,\n )\n livesession, _ = get_livesession_from_anonymous_id(\n video_id=video.id, anonymous_id=anonymous_id\n )\n else: # Standalone context\n livesession, _ = get_livesession_from_user_id(\n video_id=video.id, user_id=request.user.id\n )\n\n livesession.live_attendance = (\n (serializer.data[\"live_attendance\"] | livesession.live_attendance)\n if livesession.live_attendance\n else serializer.data[\"live_attendance\"]\n )\n\n if serializer.data.get(\"language\"):\n livesession.language = serializer.data[\"language\"]\n\n livesession.save()\n return Response(self.get_serializer(livesession).data, status.HTTP_200_OK)\n except (Video.DoesNotExist, ConsumerSite.DoesNotExist) as exception:\n raise Http404(\"No resource matches the given query.\") from exception\n except IntegrityError as error:\n if \"livesession_unique_video_display_name\" in error.args[0]:\n return Response(\n {\"display_name\": \"User with that display_name already exists!\"},\n status=status.HTTP_409_CONFLICT,\n )\n\n raise error",
"def __str__(self):\r\n return '{user}\\'s timeline'.format(user=self.user.username)",
"def user(self, user):\n signer = Signer()\n try:\n username = signer.unsign(user)\n user = User.objects.get(username=username)\n except (BadSignature, User.DoesNotExist):\n user = None\n if user:\n # Getting all events that the user has/is participating in\n self.add_events(Event.objects.filter(\n attendance_event__attendees__user=user\n ).order_by('event_start').prefetch_related(\n 'attendance_event', 'attendance_event__attendees'\n ))\n self.filename = username",
"def user(self, user):\n signer = Signer()\n try:\n username = signer.unsign(user)\n user = User.objects.get(username=username)\n except (BadSignature, User.DoesNotExist):\n user = None\n if user:\n # Getting all events that the user has/is participating in\n self.add_events(Event.objects.filter(\n attendance_event__attendees__user=user\n ).order_by('event_start').prefetch_related(\n 'attendance_event', 'attendance_event__attendees'\n ))\n self.filename = username",
"def who_am_i():\n request = req('GET', USER_API + 'session/whoami')\n r = request.json()\n user = {\n 'Email': demisto.get(r, 'data.email'),\n 'Login': demisto.get(r, 'data.login'),\n 'Name': demisto.get(r, 'data.name'),\n 'Organization': demisto.get(r, 'data.organization_id'),\n 'Role': demisto.get(r, 'data.role')\n }\n demisto.results({\n 'Type': entryTypes['note'],\n 'EntryContext': {'ThreatGrid.User': user},\n 'HumanReadable': tableToMarkdown('ThreatGrid - Current Session User', [user], [\n 'Email', 'Login', 'Name', 'Organization', 'Role'\n ]),\n 'ContentsFormat': formats['json'],\n 'Contents': user\n })",
"def mark_attendance(employee):\r\n # loads date from computer\r\n today = datetime.datetime.now()\r\n mark = today.strftime(\"%d/%m/%Y %H:%M\")\r\n # adds to attendance list in object\r\n employee.attendance.append(mark)\r\n return employee.attendance",
"def print_user(self, user):\n status = \"active\"\n token = user.token\n\n if token in [\"finished\", \"revoked\"]:\n status = token\n\n if token is None:\n token = \"\"\n\n subid = \"%s\\t%s[%s]\" % (user.id, token, status)\n print(subid)\n return subid",
"def format_username(self, at_char, user):\r\n return u'<a href=\"http://{domain}/user/{user}\" data-user=\"{user}\">{char}{user}</a>'.format(\r\n **dict(domain=self.domain, user=user, char=at_char, text=user))\r\n\r\n #return u'<a href=\"http://%s/user/%s\" data-user=\"\">%s%s</a>' \\\r\n # % (self.domain, user, at_char, user)\r",
"def get_attendance(employee, date):\n # empty list to append the date come from database, after convert it from tuple to string\n day = []\n # excute sql query to get list of data each date come as tuple [('2020-04-01',)]\n FetchDay = c.execute(\"SELECT day FROM Attendance where employee=:employee\", {\n 'employee': employee})\n # get all date as list of tuples\n day_as_tuple = c.fetchall()\n\n # iterate over list of tuple and append each date to day list\n for days in day_as_tuple:\n for ele in days:\n day.append(ele)\n\n # test the case to check if date in day list or not\n if date in day:\n attended = True\n else:\n attended = False\n\n # make report as dictionary\n report = {}\n report['attended'] = attended\n # Time duration function to compute time duration\n duration = TimeDuration(employee, date)\n report['duration'] = str(duration)[:5]\n return report",
"def check_first_sign_in(self, cr, uid, att, context=None):\n pre_att_ids = self.search(cr, uid, [('employee_id', '=', att.employee_id.id), \n ('name', '<', att.name), \n ('action', 'in', ('sign_in', 'sign_out'))], limit=1, order='name DESC')\n inconsistence_reason = ''\n if not pre_att_ids and att.action == 'sign_out':\n return True, 'First attendance must be sign in.'\n if not pre_att_ids:\n return False, inconsistence_reason\n pre_att = self.read(cr, uid, pre_att_ids[0], ['action', 'name'], context=context)\n if att.action == 'sign_out' and pre_att['action'] == 'sign_in':\n working_hour_obj = self.pool.get('hr.payroll.working.hour')\n \n # check 2 attendances have same working hour?\n time_early_late = self.compute_max_early_max_late(cr, uid, att.name, context=context)\n working_hour_ids = working_hour_obj.search(cr, uid, [('employee_id', '=', att.employee_id.id),\n ('expected_start', '<=', time_early_late[0].strftime(DEFAULT_SERVER_DATETIME_FORMAT)),\n ('expected_end', '>=', time_early_late[1].strftime(DEFAULT_SERVER_DATETIME_FORMAT)),\n ], context=context, limit=1, order='expected_start')\n# working_hour_ids = working_hour_obj.search(cr, uid, [('employee_id', '=', att.employee_id.id),\n# ('expected_start', '<=', att.name),\n# ('expected_end', '>=', att.name),\n# ], context=context)\n \n time_pre_early_late = self.compute_max_early_max_late(cr, uid, pre_att['name'], context=context)\n working_hour_pre_ids = working_hour_obj.search(cr, uid, [('employee_id', '=', att.employee_id.id),\n ('expected_start', '<=', time_pre_early_late[0].strftime(DEFAULT_SERVER_DATETIME_FORMAT)),\n ('expected_end', '>=', time_pre_early_late[1].strftime(DEFAULT_SERVER_DATETIME_FORMAT)),\n ], context=context, limit=1, order='expected_start')\n# working_hour_pre_ids = working_hour_obj.search(cr, uid, [('employee_id', '=', att.employee_id.id),\n# ('expected_start', '<=', pre_att['name']),\n# ('expected_end', '>=', pre_att['name']),\n# ], context=context)\n # check 2 attendances have same overtime ?\n overtime_obj = self.pool.get('hr.overtime')\n orertime_ids = overtime_obj.search(cr, uid, [('employee_id', '=', att.employee_id.id),\n ('mode', '=', 'by_employee'),\n ('datetime_start', '<=', time_early_late[0].strftime(DEFAULT_SERVER_DATETIME_FORMAT)),\n ('datetime_stop', '>=', time_early_late[1].strftime(DEFAULT_SERVER_DATETIME_FORMAT)),\n ('state', 'in', ['confirmed', 'done'])\n ])\n orertime_pre_ids = overtime_obj.search(cr, uid, [('employee_id', '=', att.employee_id.id),\n ('mode', '=', 'by_employee'),\n ('datetime_start', '<=', time_pre_early_late[0].strftime(DEFAULT_SERVER_DATETIME_FORMAT)),\n ('datetime_stop', '>=', time_pre_early_late[1].strftime(DEFAULT_SERVER_DATETIME_FORMAT)),\n ('state', 'in', ['confirmed', 'done'])\n ])\n # (overtime and working hours) are continuously.\n if working_hour_ids and not working_hour_pre_ids and not orertime_ids and orertime_pre_ids:\n working_start = working_hour_obj.read(cr, uid, working_hour_ids[0],['expected_start'], context=context)['expected_start']\n overtime_stop = overtime_obj.read(cr, uid, orertime_pre_ids[0],['datetime_stop'], context=context)['datetime_stop']\n # - 1 seconds\n working_stop_pre = datetime.strptime(overtime_stop, DEFAULT_SERVER_DATETIME_FORMAT) - timedelta(seconds = 1)\n self.create(cr, uid, {'name': working_stop_pre.strftime(DEFAULT_SERVER_DATETIME_FORMAT),\n 'action': 'sign_out',\n 'employee_id': att.employee_id.id,\n }, context=context)\n self.create(cr, uid, {'name': working_start,\n 'action': 'sign_in',\n 'employee_id': att.employee_id.id,\n }, context=context)\n# self.check_consistency(cr, uid, [att_new_id],context=context)\n return False, inconsistence_reason\n # (working hours and overtime) are continuously.\n if orertime_ids and not orertime_pre_ids and not working_hour_ids and working_hour_pre_ids:\n working_stop = working_hour_obj.read(cr, uid, working_hour_pre_ids[0],['expected_end'], context=context)['expected_end']\n overtime_start = overtime_obj.read(cr, uid, orertime_ids[0],['datetime_start'], context=context)['datetime_start']\n working_stop_pre = datetime.strptime(working_stop, DEFAULT_SERVER_DATETIME_FORMAT) - timedelta(seconds = 1)\n \n self.create(cr, uid, {'name': working_stop_pre.strftime(DEFAULT_SERVER_DATETIME_FORMAT),\n 'action': 'sign_out',\n 'employee_id': att.employee_id.id,\n }, context=context)\n self.create(cr, uid, {'name': overtime_start,\n 'action': 'sign_in',\n 'employee_id': att.employee_id.id,\n }, context=context)\n# self.check_consistency(cr, uid, [att_new_id],context=context)\n return False, inconsistence_reason\n if not orertime_ids and working_hour_ids and working_hour_ids != working_hour_pre_ids:\n inconsistence_reason = 'First attendance must be sign in every working hour.'\n return True, inconsistence_reason\n \n if not working_hour_ids and orertime_ids and orertime_ids != orertime_pre_ids:\n inconsistence_reason = 'First attendance must be sign in every overtime.'\n return True, inconsistence_reason\n return False, inconsistence_reason",
"def add_session_attr(typename, session):\n old_session = getattr(typename, 'session', None)\n setattr(typename, 'session', session)\n yield\n if old_session:\n setattr(typename, 'session', old_session)",
"def add_user(self, attrs):\n pass",
"def str_ax_user(self) -> str:\n value = \"User: ??\"\n if self.STARTED and self.current_user:\n value = self.current_user.str_connect\n return value",
"def test_api_livesession_read_attendances_admin_live_attendance_key_string(self):\n video = VideoFactory(\n live_state=RUNNING,\n live_info={\n \"started_at\": \"1533686400\",\n },\n live_type=JITSI,\n )\n\n AnonymousLiveSessionFactory(\n live_attendance={\"data\": True},\n video=video,\n )\n\n AnonymousLiveSessionFactory(\n live_attendance={\"1533686400\": {\"wonderful\": True}},\n video=video,\n )\n\n # token with right context_id and lti_user_id\n jwt_token = InstructorOrAdminLtiTokenFactory(\n playlist=video.playlist,\n consumer_site=str(video.playlist.consumer_site.id),\n context_id=str(video.playlist.lti_id),\n )\n\n response = self.client.get(\n self._get_url(video),\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(response.status_code, 400)\n\n self.assertEqual(\n response.json(), {\"live_attendance\": \"keys in fields should be timestamps\"}\n )",
"def user_appears(self, user):\n pass",
"def see_patient(self, request):\n appt = self.get_object() # Appointment object\n appt.status = IN_SESSION\n appt.seen_time = timezone.localtime(timezone.now()).time()\n response = patch_appointment_status(request, appt.id, IN_SESSION)\n if response:\n appt.save()\n return None\n return err_patch",
"def attend_events(id):\n\n events = []\n\n check_admin()\n gl = GuestList.query.all()\n user = User.query.get_or_404(id)\n for item in gl:\n if item.guest_id == id and item.is_attending == True:\n events.append(Event.query.get_or_404(item.event_id))\n \n\n return render_template('admin/userlist/AttendEvents.html', action=\"View\",\n title=\"Previous events\", events=events, user=user)",
"def make_template_values(request):\n user = users.get_current_user()\n values = {'request': request}\n if user:\n values['logged_in'] = True\n values['loginout_url'] = users.create_logout_url(request.uri)\n values['loginout_url_linktext'] = 'Logout ({})'.format(user)\n else:\n values['logged_in'] = False\n values['loginout_url'] = users.create_login_url(request.uri)\n values['loginout_url_linktext'] = 'Login'\n return values"
]
| [
"0.5380975",
"0.5284373",
"0.51338685",
"0.50877583",
"0.47516084",
"0.4744274",
"0.46639973",
"0.46611938",
"0.46537858",
"0.46502122",
"0.4644335",
"0.46430787",
"0.46318945",
"0.46318626",
"0.4629813",
"0.4629813",
"0.46247694",
"0.4561819",
"0.45509246",
"0.45318717",
"0.45144656",
"0.4482435",
"0.44807014",
"0.44733742",
"0.44557467",
"0.4441548",
"0.44401374",
"0.44289",
"0.44272947",
"0.44252968"
]
| 0.7040058 | 0 |
Test the ability to send http requests and receive responses. | def test_http_request(self):
response = requests.get(self.live_server_url)
assert response.status_code == 200 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def test_http_communicator():\n communicator = HttpCommunicator(SimpleHttpApp(), \"GET\", \"/test/?foo=bar\")\n response = await communicator.get_response()\n assert response[\"body\"] == b\"test response\"\n assert response[\"status\"] == 200",
"def test_requests():\n resp = requests.get('http://www.google.com')\n return True if resp.status_code == 200 else False",
"def test_orchestrator_http_simple(self):\n pass",
"def test_requests(self):\n\n text_data = \"\"\"\n BERNARDO: Who's there?\n FRANCISCO: Nay, answer me: stand, and unfold yourself.\n BERNARDO: Long live the king!\n FRANCISCO: Bernardo?\n BERNARDO: He.\n FRANCISCO: You come most carefully upon your hour.\n BERNARDO: 'Tis now struck twelve; get thee to bed, Francisco.\n \"\"\"\n binary_data = text_data.encode()\n file_content = {\"/foo.txt\": binary_data}\n\n # Test all possible combinations of:\n # - whether or not the server supports compression\n # - whether or not the server supports random access\n # - chosen buffering policy\n for allow_gzip in (False, True):\n for allow_range in (False, True):\n with DummyHTTPServer(\n file_content=file_content,\n allow_gzip=allow_gzip,\n allow_range=allow_range,\n ) as server:\n url = server.url(\"/foo.txt\")\n for buffering in (-2, -1, 0, 20):\n self._test_text(url, text_data, buffering)\n self._test_binary(url, binary_data, buffering)",
"def test_request():\n response = requests.get('http://jsonplaceholder.typicode.com/todos')\n assert response.ok",
"def test_get_requests(self):\n response = self.client.open('/api/provisioning/port',\n method='GET')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))",
"def test_connection():\n response = echo_client(\"GET webroot/sample.txt HTTP/1.1\")\n print response\n assert \"HTTP/1.1 200 OK\" in response",
"def test_send_http_request_valid(self):\n na_element = zapi_fakes.FAKE_NA_ELEMENT\n self.mock_object(self.root, '_create_request',\n return_value=('abc', zapi_fakes.FAKE_NA_ELEMENT))\n self.mock_object(netapp_api, 'LOG')\n self.root._opener = zapi_fakes.FAKE_HTTP_OPENER\n self.mock_object(self.root, '_build_opener')\n self.mock_object(self.root, '_get_result',\n return_value=zapi_fakes.FAKE_NA_ELEMENT)\n opener_mock = self.mock_object(self.root._opener, 'open')\n opener_mock.read.side_effect = ['resp1', 'resp2']\n\n self.root.send_http_request(na_element)",
"def test_request():\n return make_response(\"ok\")",
"def test_connection(self):\n req = requests.get(\"http://{}:{}\".format(self.config.options.get(\"Server\", \"ListenAddress\"),\n self.config.options.get(\"Server\", \"Port\")))\n\n self.assertEqual(req.status_code, 200)",
"def test_client_can_do_get_request(self):\n response = self.httpbin.get_my_headers(headers={'User-agent': 'Fake user agent'})\n self.assertEqual(response.request.method, 'GET')\n self.assertEqual(response.status_code, 200)",
"def test_request(self):\n client = RestClient(host=self.host, username='')\n rest_url = 'some/url/'\n\n # Mock good get response\n with responses.RequestsMock() as rsps:\n rsps.add(responses.GET, f'{self.host}/{rest_url}', status=200,\n json={'value':\"good!\"})\n r = client.request('get', rest_url)\n assert r.status_code == 200\n assert r.json()['value'] == 'good!'\n \n # Mock bad get response\n with responses.RequestsMock() as rsps:\n rsps.add(responses.GET, f'{self.host}/{rest_url}', status=401,\n json={'value':\"bad!\"})\n with raises(requests.HTTPError):\n r = client.request('get', rest_url)\n r = client.request('get', rest_url, checkstatus=False)\n assert r.status_code == 401\n assert r.json()['value'] == 'bad!'",
"def test_client_can_do_post_request(self):\n response = self.httpbin_4.test_requests_post_method()\n self.assertEqual(response.request.method, 'POST')\n self.assertEqual(response.status_code, 200)",
"def test_make_request(self, m_requests, m_sleep):\r\n request = testing.DummyRequest({mut.URL_KEY: SAMPLE_URL})\r\n m_response, response_dict = self.mock_response()\r\n m_requests.get.return_value = m_response\r\n self.assertEqual(response_dict, mut.make_request(request))\r\n m_requests.get.assert_called_with(url=SAMPLE_URL)\r\n m_sleep.assert_called_with(mut.SECURITY_SLEEP)",
"async def test_method_lower(http2_serv):\n url = http2_serv\n async with aiosonic.HTTPClient() as client:\n res = await client.request(url, method=\"get\", verify=False)\n assert res.status_code == 200\n assert \"Hello World\" == await res.text()",
"def test_unauthenticated_request(self):\n http = FakeHttp([(FakeResponse(200), {})])\n self.mock.Http.return_value = http\n response, _ = http_utils.request('https://url/', body='test body')\n\n # Ensure that all expected requests were made.\n self.assertEqual(http.replies, [])\n\n self.assertEqual(http.last_body, '\"test body\"')\n self.assertEqual(http.last_headers, {})\n self.assertEqual(response.status, 200)",
"def test_allow(self) -> None:\n response = self.request(\"/\", method=\"HEAD\")\n self.assert_allowed(response, (\"GET\", \"POST\"))",
"def test_urls_work(url):\n with requests.get(url) as r:\n assert r.status_code == 200",
"def _test_good(self,\n the_request_method,\n the_request_uri,\n the_request_headers,\n the_request_body,\n the_response_code,\n the_response_headers,\n the_response_body,\n the_response_content_type):\n\n the_response_is_ok = True\n the_request_principal = \"[email protected]\"\n\n def async_app_service_forwarder_forward_patch(http_client, request, callback):\n self.assertIsNotNone(request)\n\n expected_url = \"http://%s%s\" % (\n self.__class__._app_service,\n the_request_uri\n )\n self.assertEqual(request.url, expected_url)\n\n self.assertIsNotNone(request.method)\n self.assertEqual(request.method, the_request_method)\n\n self.assertIsNotNone(request.headers)\n self.assertEqual(len(request.headers), 1 + len(the_request_headers))\n expected_headers = tornado.httputil.HTTPHeaders(the_request_headers)\n expected_headers[\"Authorization\"] = \"%s %s\" % (\n self.__class__._app_service_auth_method,\n the_request_principal)\n self.assertEqual(request.headers, expected_headers)\n\n response = mock.Mock()\n response.error = None\n response.code = the_response_code\n response.body = the_response_body\n response.headers = tornado.httputil.HTTPHeaders(the_response_headers)\n if response.body:\n response.headers[\"Content-type\"] = the_response_content_type\n response.headers[\"Content-length\"] = str(len(response.body))\n response.request_time = 24\n callback(response)\n\n def on_async_app_service_forward_done(is_ok,\n http_status_code,\n headers,\n body):\n\n self.assertIsNotNone(is_ok)\n self.assertEqual(is_ok, the_response_is_ok)\n\n if not is_ok:\n return\n\n self.assertIsNotNone(http_status_code)\n self.assertEqual(http_status_code, the_response_code)\n\n self.assertIsNotNone(headers)\n\n if the_response_body is None:\n self.assertIsNone(body)\n\n self.assertEqual(headers, the_response_headers)\n else:\n self.assertIsNotNone(body)\n self.assertEqual(body, the_response_body)\n\n self.assertEqual(len(headers), 2 + len(the_response_headers))\n the_expected_headers = tornado.httputil.HTTPHeaders(the_response_headers)\n the_expected_headers[\"Content-type\"] = the_response_content_type\n the_expected_headers[\"Content-length\"] = str(len(body))\n self.assertEqual(headers, the_expected_headers)\n\n name_of_method_to_patch = \"tornado.httpclient.AsyncHTTPClient.fetch\"\n with mock.patch(name_of_method_to_patch, async_app_service_forwarder_forward_patch):\n aasf = async_app_service_forwarder.AsyncAppServiceForwarder(\n the_request_method,\n the_request_uri,\n the_request_headers,\n the_request_body,\n the_request_principal)\n aasf.forward(on_async_app_service_forward_done)",
"def _verify_http_connection(self, ssh_client, ssh_server,\n test_ip, test_port, servers, should_pass=True):\n utils.kill_nc_process(ssh_server)\n url = 'http://%s:%d' % (test_ip, test_port)\n utils.spawn_http_server(ssh_server, port=test_port, message='foo_ok')\n utils.process_is_running(ssh_server, 'nc')\n try:\n ret = utils.call_url_remote(ssh_client, url)\n if should_pass:\n self.assertIn('foo_ok', ret)\n return\n self.assertNotIn('foo_ok', ret)\n except Exception as e:\n if not should_pass:\n return\n self._log_console_output(servers)\n self._log_local_network_status()\n raise e",
"def test_handle_request_get(self):\n # setup\n incoming_message = cast(\n HttpMessage,\n self.build_incoming_message(\n message_type=HttpMessage,\n performative=HttpMessage.Performative.REQUEST,\n to=self.skill_id,\n sender=self.sender,\n method=self.get_method,\n url=self.url,\n version=self.version,\n headers=self.headers,\n body=self.body,\n ),\n )\n\n # operation\n with patch.object(self.logger, \"log\") as mock_logger:\n self.http_handler.handle(incoming_message)\n\n # after\n self.assert_quantity_in_outbox(1)\n\n mock_logger.assert_any_call(\n logging.INFO,\n \"received http request with method={}, url={} and body={!r}\".format(\n incoming_message.method, incoming_message.url, incoming_message.body\n ),\n )\n\n # _handle_get\n message = self.get_message_from_outbox()\n has_attributes, error_str = self.message_has_attributes(\n actual_message=message,\n message_type=HttpMessage,\n performative=HttpMessage.Performative.RESPONSE,\n to=incoming_message.sender,\n sender=incoming_message.to,\n version=incoming_message.version,\n status_code=200,\n status_text=\"Success\",\n headers=incoming_message.headers,\n body=json.dumps({\"tom\": {\"type\": \"cat\", \"age\": 10}}).encode(\"utf-8\"),\n )\n assert has_attributes, error_str\n\n mock_logger.assert_any_call(\n logging.INFO,\n f\"responding with: {message}\",\n )",
"def test_input():\n encoded_text = u'Post test/test/test HTTP/1.1'\n response = echo_client(encoded_text)\n assert '405' in response",
"def test_basic(self):\n request = fake_twisted_request(request_headers={\n b'x-foo': [b'bar'],\n })\n self.assertThat(\n _nevow_request_to_request_map(request),\n ContainsDict({\n 'content_type': Equals(b'application/octet-stream'),\n 'content_length': Equals(0),\n 'character_encoding': Is(None),\n 'headers': Equals({b'Content-Length': [0],\n b'X-Foo': [b'bar'],\n b'Host': [b'example.com']}),\n 'remote_addr': Equals(b'192.168.1.1'),\n 'request_method': Equals(b'GET'),\n 'server_name': Equals(b'example.com'),\n 'server_port': Equals(80),\n 'scheme': Equals(b'http'),\n 'uri': Equals(URL.from_text(u'/one'))}))",
"def test_make_request_method(self, m_requests, m_sleep):\r\n request = testing.DummyRequest({mut.URL_KEY: SAMPLE_URL, \r\n mut.METHOD_KEY: SAMPLE_METHOD})\r\n m_response, response_dict = self.mock_response()\r\n m_requests.post.return_value = m_response\r\n self.assertEqual(response_dict, mut.make_request(request))\r\n m_requests.post.assert_called_with(url=SAMPLE_URL)\r\n m_sleep.assert_called_with(mut.SECURITY_SLEEP)",
"def test_health(self):\n self.assert_request('get', '/_health')",
"def test_for_client():",
"async def test_get_http2(http2_serv):\n url = http2_serv\n connector = TCPConnector(timeouts=Timeouts(sock_connect=3, sock_read=4))\n\n async with aiosonic.HTTPClient(connector) as client:\n res = await client.get(url, verify=False)\n assert res.status_code == 200\n assert \"Hello World\" == await res.text()",
"def basicRequest(self):\n endpoint = \"/foo\"\n\n def verify(request):\n o(request.method).equals(\"GET\")(\"Checking basic request method.\")\n o(request.url).equals(endpoint)(\"Checking basic request url.\")\n request.respond(200)\n self.testServer.respondWith(verify)\n\n server.request(endpoint)\n self.testServer.respond()",
"def test_app(self):\n response = self.client.get('/')\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 200)\n self.assertIn('pong!', data['message'])\n self.assertIn('success', data['status'])",
"def _test(\n self,\n headers: list,\n expected_warning: str = WARN_UNKNOWN,\n status_code: str = \"403\",\n disable_hshc: bool = False,\n ):\n head = [\n (\":scheme\", \"https\"),\n (\":method\", \"GET\"),\n ]\n head.extend(headers)\n\n client = self.base_scenario(\n frang_config=\"http_strict_host_checking true;\",\n requests=[head],\n disable_hshc=disable_hshc,\n )\n self.check_response(client, status_code=status_code, warning_msg=expected_warning)"
]
| [
"0.73358715",
"0.7149893",
"0.706139",
"0.7047892",
"0.6992253",
"0.6875096",
"0.68440574",
"0.6793313",
"0.6736568",
"0.6731907",
"0.6665411",
"0.6582444",
"0.65607095",
"0.6537129",
"0.6513156",
"0.6468829",
"0.6438426",
"0.6429819",
"0.6426431",
"0.6422942",
"0.6403098",
"0.63666826",
"0.6334654",
"0.63290226",
"0.6324378",
"0.63077825",
"0.6306672",
"0.63023496",
"0.62925756",
"0.62650067"
]
| 0.74512917 | 0 |
Test the ability to send and receive messages over WebSocket. | def test_websocket_message(self):
ws = websocket.create_connection(self.live_server_ws_url)
ws.send('test')
response = ws.recv()
ws.close()
assert 'test' == response | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def test_websocket_communicator():\n communicator = WebsocketCommunicator(SimpleWebsocketApp(), \"/testws/\")\n # Test connection\n connected, subprotocol = await communicator.connect()\n assert connected\n assert subprotocol is None\n # Test sending text\n await communicator.send_to(text_data=\"hello\")\n response = await communicator.receive_from()\n assert response == \"hello\"\n # Test sending bytes\n await communicator.send_to(bytes_data=b\"w\\0\\0\\0\")\n response = await communicator.receive_from()\n assert response == b\"w\\0\\0\\0\"\n # Test sending JSON\n await communicator.send_json_to({\"hello\": \"world\"})\n response = await communicator.receive_json_from()\n assert response == {\"hello\": \"world\"}\n # Close out\n await communicator.disconnect()",
"async def test_websocket_application():\n application = URLRouter([path(\"testws/<str:message>/\", KwargsWebSocketApp())])\n communicator = WebsocketCommunicator(application, \"/testws/test/\")\n connected, subprotocol = await communicator.connect()\n # Test connection\n assert connected\n assert subprotocol is None\n message = await communicator.receive_from()\n assert message == \"test\"\n await communicator.disconnect()",
"def test_websocket_mechanics():\n transport = StringTransportWithDisconnection()\n service = hey_joe.WebSocketService(\"127.0.0.1\", 9000)\n protocol = service.buildProtocol(service._hey_joe_addr)\n protocol.transport = transport\n transport.protocol = protocol\n protocol.connectionMade()\n data_to_send = b'GET / HTTP/1.1\\r\\nHost: somewhere_in_the_world:9000\\r\\nConnection: keep-alive, Upgrade\\r\\nUpgrade: websocket\\r\\nSec-WebSocket-Version: 13\\r\\nSec-WebSocket-Key: F76ObkF/aCKX8WkmAgx2OQ==\\r\\n\\r\\n'\n protocol.dataReceived(data_to_send)\n assert transport.value().startswith(b'HTTP/1.1 101 Switching Protocols\\r\\nServer: hendrix')",
"def test_broadcast_message(self):\n\n typhoonae.websocket.broadcast_message('My broadcast message.')",
"def test_send_message(self):\n\n typhoonae.websocket.send_message('1', 'My first message.')\n\n self.assertRaises(\n typhoonae.websocket.BadArgumentError,\n typhoonae.websocket.send_message, 1, 'My second message.')\n\n self.assertRaises(\n typhoonae.websocket.BadArgumentError,\n typhoonae.websocket.send_message, [None], 'My second message.')",
"def test_message(self):\n\n message = typhoonae.websocket.Message(\n {'from': 0, 'body': 'Message body'})\n\n self.assertEqual(0, message.socket)\n self.assertEqual('Message body', message.body)",
"def test_broadcast(self, mock_get, mock_broadcast):\n mock_get.return_value = {'XXX': False}\n token = jwt.encode({'room': '123', 'uuid': 'XXX'}, 'XXXX').decode('utf-8')\n ws = yield self.ws_connect('/socket?token={}'.format(token))\n mock_get.assert_called_with('123')\n ws.write_message('hello')\n yield self.close(ws)\n mock_broadcast.assert_called_with('hello', channel='123', sender='XXX')",
"def test_must_be_subbed_to_send(self) -> None:\n user = self.example_user(\"hamlet\")\n self.login_user(user)\n # Create Saxony as an invite-only stream.\n self.assert_json_success(\n self.common_subscribe_to_streams(user, [\"Saxony\"], invite_only=True)\n )\n\n cordelia = self.example_user(\"cordelia\")\n with self.assertRaises(JsonableError):\n self.send_stream_message(cordelia, \"Saxony\")",
"async def should_be_websocket_welcome(token):\n\n communicator = make_communicator(token)\n connected, _ = await communicator.connect()\n assert connected\n message = await communicator.receive_json_from()\n await communicator.disconnect()\n assert message.get('type') == 'notification'\n assert message.get('code') == 'api-motd'",
"def test_im_chat_messages(self):\n pass",
"async def test_send(self):\n await self.webhook_connection.connect()\n assert self.webhook_connection.is_connected is True\n\n http_message = HttpMessage(\n dialogue_reference=(\"\", \"\"),\n target=0,\n message_id=1,\n performative=HttpMessage.Performative.REQUEST,\n method=\"get\",\n url=\"/\",\n headers=\"\",\n body=\"\",\n version=\"\",\n )\n envelope = Envelope(\n to=\"addr\",\n sender=\"my_id\",\n message=http_message,\n )\n with patch.object(self.webhook_connection.logger, \"warning\") as mock_logger:\n await self.webhook_connection.send(envelope)\n await asyncio.sleep(0.01)\n mock_logger.assert_any_call(\n RegexComparator(\n \"Dropping envelope=.* as sending via the webhook is not possible!\"\n )\n )",
"def test_chat_send_message(self):\n body = SendMessageRequest()\n response = self.client.open(\n '/api/chat/send_message',\n method='POST',\n data=json.dumps(body),\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))",
"def test_basic_connect(self, mock_get, mock_subscribe):\n mock_get.return_value = {'XXX': False}\n token = jwt.encode({'room': '123', 'uuid': 'XXX'}, 'XXXX').decode('utf-8')\n ws = yield self.ws_connect('/socket?token={}'.format(token))\n mock_get.assert_called_with('123')\n self.assertTrue(mock_subscribe.called)\n args, kwargs = mock_subscribe.call_args\n self.assertEqual(args[0], '123')\n self.assertEqual(args[1].uuid, 'XXX')\n yield self.close(ws)",
"def test_allowed_origina(self, mock_get, mock_subscribe):\n mock_get.return_value = {'XXX': False}\n token = jwt.encode({'room': '123', 'uuid': 'XXX'}, 'XXXX').decode('utf-8')\n url = 'ws://localhost:{}/socket?token={}'.format(self.get_http_port(), token)\n headers = {'Origin': 'http://example.com'}\n ws = yield websocket_connect(HTTPRequest(url, headers=headers))\n mock_get.assert_called_with('123')\n self.assertTrue(mock_subscribe.called)\n args, kwargs = mock_subscribe.call_args\n self.assertEqual(args[0], '123')\n self.assertEqual(args[1].uuid, 'XXX')\n yield self.close(ws)",
"def test_postMessage(self): #GIVEN the appropriate environment variables are configured\n testBot = bot.Bot(os.environ['bot_id'], os.environ['token'], os.environ['group_ID'])\n status = testBot.postMessage('Zygium') #WHEN the bot posts a message\n self.assertTrue(status == 202) # a status code of 202 should be returned",
"async def test_chatrooms_accounts(rooms):\n\n # Register all the users.\n for name in USERS:\n username = name\n password = name * 2 + '$12345'\n email = name + '@example.org'\n await attempt_register(username, password, email)\n\n # Login all the users.\n tokens = {}\n for name in USERS:\n username = name\n password = name * 2 + '$12345'\n tokens[name] = await attempt_login(username, password)\n\n # Test profile for all of them.\n for name in USERS:\n await attempt_profile(tokens[name])\n\n # \"erin\" and \"frank\" will logout.\n for name in [\"erin\", \"frank\"]:\n await attempt_logout(tokens[name])\n\n # \"erin\" and \"frank\" are not authorized for the profile endpoint.\n for name in [\"erin\", \"frank\"]:\n await attempt_profile(tokens[name], 401)\n\n # The others are still authorized:\n for name in [\"alice\", \"bob\", \"carl\", \"david\"]:\n await attempt_profile(tokens[name])\n\n ###################################################\n # Now testing the websockets side of the session. #\n ###################################################\n\n # The four still-valid tokens should connect with no issue.\n for name in [\"alice\", \"bob\", \"carl\", \"david\"]:\n await should_be_websocket_welcome(tokens[name])\n\n # The other two, should receive a not-authenticated error.\n for name in [\"erin\", \"frank\"]:\n await should_be_websocket_rejected_because_anonymous(tokens[name])\n\n # Now alice connects and, in the meantime, she should fail\n # to connect again, simultaneously.\n alice_communicator = make_communicator(tokens['alice'])\n alice_connected, _ = await alice_communicator.connect()\n _ = await alice_communicator.receive_json_from()\n assert alice_connected\n await should_be_websocket_rejected_because_duplicated(tokens['alice'])\n\n # Now we destroy the session for alice via logout.\n await attempt_logout(tokens['alice'])\n message = await alice_communicator.receive_json_from()\n # A message will be received: logged-out\n assert message.get('type') == 'notification'\n assert message.get('code') == 'logged-out'\n await alice_communicator.disconnect()",
"async def test_chatroom_broadcast():\n\n # Login all the users.\n tokens = {}\n for name in USERS:\n username = name\n password = name * 2 + '$12345'\n tokens[name] = await attempt_login(username, password)\n\n # Alice, Bob, Carl connect to the server.\n communicators = {}\n for name in ['alice', 'bob', 'carl']:\n communicator = make_communicator(tokens[name])\n communicators[name] = communicator\n connected, _ = await communicator.connect()\n assert connected\n motd = await communicator.receive_json_from()\n assert motd['type'] == 'notification'\n assert motd['code'] == 'api-motd'\n await communicator.send_json_to({'type': 'join', 'room_name': 'family'})\n await asyncio.sleep(0.5)\n # Alice expects 3 joins.\n joined = await communicators['alice'].receive_json_from()\n assert joined['type'] == 'room:notification'\n assert joined['code'] == 'joined'\n assert joined['user'] == 'alice'\n assert joined['you']\n assert joined['room_name'] == 'family'\n joined = await communicators['alice'].receive_json_from()\n assert joined['type'] == 'room:notification'\n assert joined['code'] == 'joined'\n assert joined['user'] == 'bob'\n assert not joined['you']\n assert joined['room_name'] == 'family'\n joined = await communicators['alice'].receive_json_from()\n assert joined['type'] == 'room:notification'\n assert joined['code'] == 'joined'\n assert joined['user'] == 'carl'\n assert not joined['you']\n assert joined['room_name'] == 'family'\n # Bob expects 2 joins.\n joined = await communicators['bob'].receive_json_from()\n assert joined['type'] == 'room:notification'\n assert joined['code'] == 'joined'\n assert joined['user'] == 'bob'\n assert joined['you']\n assert joined['room_name'] == 'family'\n joined = await communicators['bob'].receive_json_from()\n assert joined['type'] == 'room:notification'\n assert joined['code'] == 'joined'\n assert joined['user'] == 'carl'\n assert not joined['you']\n assert joined['room_name'] == 'family'\n # Carl expects 1 join.\n joined = await communicators['carl'].receive_json_from()\n assert joined['type'] == 'room:notification'\n assert joined['code'] == 'joined'\n assert joined['user'] == 'carl'\n assert joined['you']\n assert joined['room_name'] == 'family'\n # Now Alice sends a \"Hello guys\" message, and bob and carl\n # will read it.\n await communicators['alice'].send_json_to({'type': 'message', 'room_name': 'family', 'body': 'Hello guys'})\n message = await communicators['alice'].receive_json_from()\n assert message['type'] == 'room:notification'\n assert message['code'] == 'message'\n assert message['you']\n assert message['user'] == 'alice'\n assert message['room_name'] == 'family'\n assert message['body'] == 'Hello guys'\n message = await communicators['bob'].receive_json_from()\n assert message['type'] == 'room:notification'\n assert message['code'] == 'message'\n assert not message['you']\n assert message['user'] == 'alice'\n assert message['room_name'] == 'family'\n assert message['body'] == 'Hello guys'\n message = await communicators['carl'].receive_json_from()\n assert message['type'] == 'room:notification'\n assert message['code'] == 'message'\n assert not message['you']\n assert message['user'] == 'alice'\n assert message['room_name'] == 'family'\n assert message['body'] == 'Hello guys'\n # Now they all leave the channel.\n for name in ['alice', 'bob', 'carl']:\n await communicators[name].send_json_to({'type': 'part', 'room_name': 'family'})\n await asyncio.sleep(0.5)\n # And they will receive all the part messages.\n parted = await communicators['alice'].receive_json_from()\n assert parted['type'] == 'room:notification'\n assert parted['code'] == 'parted'\n assert parted['user'] == 'alice'\n assert parted['you']\n assert parted['room_name'] == 'family'\n parted = await communicators['bob'].receive_json_from()\n assert parted['type'] == 'room:notification'\n assert parted['code'] == 'parted'\n assert parted['user'] == 'alice'\n assert not parted['you']\n assert parted['room_name'] == 'family'\n parted = await communicators['bob'].receive_json_from()\n assert parted['type'] == 'room:notification'\n assert parted['code'] == 'parted'\n assert parted['user'] == 'bob'\n assert parted['you']\n assert parted['room_name'] == 'family'\n parted = await communicators['carl'].receive_json_from()\n assert parted['type'] == 'room:notification'\n assert parted['code'] == 'parted'\n assert parted['user'] == 'alice'\n assert not parted['you']\n assert parted['room_name'] == 'family'\n parted = await communicators['carl'].receive_json_from()\n assert parted['type'] == 'room:notification'\n assert parted['code'] == 'parted'\n assert parted['user'] == 'bob'\n assert not parted['you']\n assert parted['room_name'] == 'family'\n parted = await communicators['carl'].receive_json_from()\n assert parted['type'] == 'room:notification'\n assert parted['code'] == 'parted'\n assert parted['user'] == 'carl'\n assert parted['you']\n assert parted['room_name'] == 'family'\n # And the 3 will disconnect.\n for name in ['alice', 'bob', 'carl']:\n await communicator.disconnect()",
"def test_sendimmessages(self):\n pass",
"def test_new_room_socket(self, mock_create):\n mock_create.return_value = '1234'\n response = self.fetch('/rooms', method='POST', body='')\n with self.assertJSON(response) as result:\n protocol = 'ws' if self.get_protocol() == 'http' else 'wss'\n expected = '{}://localhost:{}/socket'.format(protocol, self.get_http_port())\n self.assertEqual(result['socket'], expected)\n self.assertIn('user', result)\n self.assertIn('token', result)\n user, token = result['user'], result['token']\n info = jwt.decode(token, 'XXXX')\n self.assertEqual(info['uuid'], user)\n self.assertEqual(info['room'], '1234')",
"def test_user_channel(self, mock_get, mock_subscribe):\n mock_get.return_value = {'XXX': False}\n token = jwt.encode({'room': '123', 'uuid': 'XXX'}, 'XXXX').decode('utf-8')\n ws = yield self.ws_connect('/socket?token={}&channel=XXX'.format(token))\n mock_get.assert_called_with('123')\n self.assertTrue(mock_subscribe.called)\n args, kwargs = mock_subscribe.call_args\n self.assertEqual(args[0], 'XXX')\n self.assertEqual(args[1].uuid, 'XXX')\n yield self.close(ws)",
"def test_im_chat_sessions(self):\n pass",
"async def test_connection(self):\n await self.webhook_connection.connect()\n assert self.webhook_connection.is_connected is True",
"def test_create_websocket_url(self):\n\n self.assertEqual(\n 'ws://host:8888/',\n typhoonae.websocket.create_websocket_url())\n\n self.assertEqual(\n 'ws://host:8888/foo',\n typhoonae.websocket.create_websocket_url('/foo'))",
"def test_send_before_connected(connection, writer):\n assert not connection.connected\n connection.send(\"test\")\n assert not writer.used",
"def test_message_send(url):\n test_clear(url)\n admin_tk = channel_user_create_0(url)[0]\n \n test_channels = {\n 'token': admin_tk,\n 'name': 'channel_1',\n 'is_public': True,\n }\n ch_id_resp = requests.post(url + \"channels/create\", json=test_channels)\n ch_id = ch_id_resp.json()\n\n resp = requests.get(url + \"channel/messages\", params={\n 'token': admin_tk,\n 'channel_id': 1,\n 'start': 0 \n })\n channel_msgs_resp = resp.json()\n assert channel_msgs_resp == {\n 'messages': [], \n 'start': 0, \n 'end': 50,\n }\n \n test_message = {\n 'token': admin_tk,\n 'channel_id': ch_id['channel_id'],\n 'message': 'Hello'\n }\n resp = requests.post(url + \"message/send\", json=test_message)\n message_send_resp = resp.json()\n assert message_send_resp['message_id'] == 1\n \n resp = requests.get(url + \"channel/messages\", params={\n 'token': admin_tk,\n 'channel_id': 1,\n 'start': 0, \n })\n channel_msgs_resp = resp.json()\n assert channel_msgs_resp['messages'][0]['message_id'] == 1\n assert channel_msgs_resp['messages'][0]['u_id'] == 1\n assert channel_msgs_resp['messages'][0]['message'] == 'Hello'",
"def test_invalid_channel(self, mock_get, mock_subscribe):\n mock_get.return_value = {'XXX': False}\n token = jwt.encode({'room': '123', 'uuid': 'XXX'}, 'XXXX').decode('utf-8')\n ws = yield self.ws_connect('/socket?token={}&channel=ABC'.format(token))\n self.assertSocketError(ws, 4300, 'Invalid channel.')\n self.assertTrue(mock_get.called)\n self.assertFalse(mock_subscribe.called)",
"def test_other_user_channel(self, mock_get, mock_subscribe):\n mock_get.return_value = {'XXX': False, 'YYY': False}\n token = jwt.encode({'room': '123', 'uuid': 'XXX'}, 'XXXX').decode('utf-8')\n ws = yield self.ws_connect('/socket?token={}&channel=YYY'.format(token))\n mock_get.assert_called_with('123')\n self.assertTrue(mock_subscribe.called)\n args, kwargs = mock_subscribe.call_args\n self.assertEqual(args[0], 'YYY')\n self.assertEqual(args[1].uuid, 'XXX')\n yield self.close(ws)",
"def test_chat_poll_messages(self):\n response = self.client.open(\n '/api/chat/messages',\n method='POST')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))",
"def test_run_without_message(connection, events, loop):\n loop.run_until_complete(connection.run())\n assert events.triggered(\"CLIENT_CONNECT\")\n assert events.triggered(\"CLIENT_DISCONNECT\")",
"def test_explicit_room_channel(self, mock_get, mock_subscribe):\n mock_get.return_value = {'XXX': False}\n token = jwt.encode({'room': '123', 'uuid': 'XXX'}, 'XXXX').decode('utf-8')\n ws = yield self.ws_connect('/socket?token={}&channel=123'.format(token))\n mock_get.assert_called_with('123')\n self.assertTrue(mock_subscribe.called)\n args, kwargs = mock_subscribe.call_args\n self.assertEqual(args[0], '123')\n self.assertEqual(args[1].uuid, 'XXX')\n yield self.close(ws)"
]
| [
"0.7966903",
"0.76768017",
"0.7575018",
"0.7060776",
"0.6980282",
"0.69595164",
"0.6943459",
"0.6832196",
"0.6824839",
"0.6571906",
"0.6556549",
"0.65429413",
"0.6536917",
"0.653471",
"0.65176815",
"0.6514772",
"0.6420771",
"0.64101034",
"0.63527155",
"0.6324924",
"0.6294606",
"0.6278188",
"0.62601876",
"0.6259382",
"0.62313044",
"0.62121546",
"0.6186772",
"0.6158334",
"0.6152321",
"0.6117158"
]
| 0.7992703 | 0 |
Verify that the __version__ in the module is being correctly pulled from the pyproject.toml config | def test_module_version_matches_pyproject_version():
version_from_package_init = __version__
# this is so that the test finds the pyproject.toml file when run from the command line or from within Pycharm
this_directory = os.path.dirname(os.path.realpath(__file__))
pyproject_toml_path = os.path.join(this_directory, "..", "pyproject.toml")
with open(pyproject_toml_path) as pyproject_file:
pyproject_contents = pyproject_file.read()
pyproject_meta_data = tomlkit.parse(pyproject_contents)["tool"]["poetry"]
version_from_pyproject = pyproject_meta_data["version"]
assert version_from_package_init == version_from_pyproject | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_version(self) -> None:\n with open(\"pyproject.toml\") as f:\n for line in f:\n if \"version\" in line:\n version = line.split()[-1].replace('\"', \"\")\n break\n self.assertEqual(__version__, version)",
"def test_version():\n with open(\"pyproject.toml\") as f:\n tomllines = f.read().splitlines()\n tomlversion = set([l for l in tomllines if \"version =\" in l])\n initversion = set([f'version = \"{mei2volpiano.__version__}\"'])\n # set is there to catch any duplicate/additional entries\n assert initversion == tomlversion",
"def testCheckPythonModuleVersion(self):\n dependencies_file = self._GetTestFilePath(['dependencies.ini'])\n self._SkipIfPathNotExists(dependencies_file)\n\n dependency_helper = dependencies.DependencyHelper(\n dependencies_file=dependencies_file)\n\n module_object = dependency_helper._ImportPythonModule('os')\n\n result, _ = dependency_helper._CheckPythonModuleVersion(\n 'os', module_object, '__version__', '1.0', '2.0')\n self.assertFalse(result)\n\n # TODO: add test with version with suffix 17.0.0b1",
"def test_version():\n assert(hasattr(tekel, '__version__'))",
"def test_version():\n assert __version__ == \"0.1.0\"",
"def test_version():\n assert __version__",
"def test_version(self):\n version_instance = get_version('kolibri', __file__)\n self.assertIn(version_instance.major_version, kolibri.__version__)",
"def validate_module_versioning(module_name):\n module = sys.modules[module_name]\n\n _validate_continuous_versioning(module)\n _validate_missing_versioned_tags(module)",
"def validate_project_version(config: Dict[str, Any]) -> None:\n spacy_version = config.get(\"spacy_version\", None)\n if spacy_version and not is_compatible_version(about.__version__, spacy_version):\n err = (\n f\"The {PROJECT_FILE} specifies a spaCy version range ({spacy_version}) \"\n f\"that's not compatible with the version of spaCy you're running \"\n f\"({about.__version__}). You can edit version requirement in the \"\n f\"{PROJECT_FILE} to load it, but the project may not run as expected.\"\n )\n msg.fail(err, exits=1)",
"def test_local_version(self):\n self.assertIsInstance(__version__, float)",
"def test_version(self):\n pass",
"def test_version_exists():\n assert ztm.__version__",
"def test_package_version():\n coverage_version = package_version('coverage')\n pytest_version = package_version('pytest')\n\n assert coverage_version is not None\n assert coverage_version < (1000, 0, 0)\n assert pytest_version is not None\n assert pytest_version > (5, 0)",
"def test_version():\n assert pywren.__version__ is not None",
"def main() -> int:\n version: str | None = None\n\n if (path_pyproject := Path(\"pyproject.toml\")).is_file():\n with open(path_pyproject, \"rb\") as fp:\n data = tomllib.load(fp)\n\n try:\n version = data[\"project\"][\"version\"]\n except KeyError:\n pass\n\n if version is None and (path_setup_cfg := Path(\"setup.cfg\")).is_file():\n parser = configparser.ConfigParser()\n parser.read(path_setup_cfg)\n\n try:\n version = parser[\"metadata\"][\"version\"]\n except KeyError:\n pass\n\n if version is None:\n return 1\n print(version)\n return 0",
"def test_get_version(self):\n pass",
"def test_ifVersionIsCorrect():\n \n for name in config.toTest:\n testConfig = dynamicallyLoadModule(name)\n if \"version\" in testConfig.config:\n print \"Version: \"+ testConfig.config[\"name\"]\n yield assertionFunctions.checkIfVersionIsExact, testConfig.config\n \n if \"minimum_version\" in testConfig.config:\n print \"Minimum Version: \"+ testConfig.config[\"name\"]\n yield assertionFunctions.checkIfMinimumVersionIsMet, testConfig.config",
"def test_version() -> None:\n assertion.assert_(Version, nanoqm.__version__)",
"def test_2x_only_python_version_deploy():\n pass",
"def test_release_version():\n pkg_vars = {}\n with open(VERSION_FILE) as f:\n exec(f.read(), pkg_vars) # nosec\n project_version = pkg_vars[\"__version__\"]\n assert (\n RELEASE_TAG == f\"v{project_version}\"\n ), \"RELEASE_TAG does not match the project version\"",
"def test_python_version():\n assert sys.version_info.major == 3",
"def test_semantic_version():\n semantic_version.Version(settings.VERSION)",
"def _check_version(self, project, targetdir):\r\n versionfile = os.path.join(targetdir, 'project.version')\r\n if (os.path.exists(versionfile)):\r\n file_ = open(versionfile, \"r\")\r\n projectname = file_.read().strip()\r\n file_.close()\r\n if (projectname == project.objectname):\r\n return True\r\n return False",
"def _get_version():\n return re.search(r'^__version__\\s*=\\s*[\\'\"]([^\\'\"]*)[\\'\"]',\n _read(\"cfunits/__init__.py\"),\n re.MULTILINE).group(1)",
"def py_versiontest(c):\n pass",
"def test_3x_only_python_versions_deploy():\n pass",
"def test_python_version(self):\n assert 2 == sys.version_info.major\n assert 7 == sys.version_info.minor\n assert 6 <= sys.version_info.micro",
"def check_version():\n reset_flag = False\n try:\n data = du.read_yml(du.DEFAULT)\n if (\n data[\"version\"].split(\".\")[0] != __version__.split(\".\")[0]\n ): # If Version if different from \"1.x.y\" remove data:\n reset_flag = True\n except (KeyError, FileNotFoundError, TypeError):\n reset_flag = True\n\n if reset_flag:\n print(\"Your configuration file version is older than 1.0.0\")\n print(\n \"Your .Experiment file will be removed, please run daf.init to generate an up-to-date file\"\n )\n if os.path.isfile(du.DEFAULT):\n os.remove(du.DEFAULT)\n sys.exit(0)",
"def test_versionString(self):\n self.assertIn(\"%d.%d.%d\" % nevow.__version_info__, nevow.__version__)",
"def test_version_string_consistency_pyproject_toml():\n\n repository_root = os.path.join(os.path.dirname(__file__), '..')\n fixture = os.path.join(repository_root, \"pyproject.toml\")\n\n with open(fixture, \"r\", encoding=\"utf-8\") as f:\n contents = f.read()\n\n match = re.search(r\"^version = (?P<semver>.*)$\", contents, re.MULTILINE)\n actual_version = match[\"semver\"].strip('\"')\n\n assert expected_version == actual_version, \"Expected version string used in pyproject.toml to be consistent with\" \\\n \" that in matchms.__version__\""
]
| [
"0.76714927",
"0.7583335",
"0.70532936",
"0.70007664",
"0.69990474",
"0.6964934",
"0.69035417",
"0.6880883",
"0.6865948",
"0.6860865",
"0.6843608",
"0.6820196",
"0.67528504",
"0.6709303",
"0.6704592",
"0.66768426",
"0.66471404",
"0.663688",
"0.6608777",
"0.65195024",
"0.6516601",
"0.64906245",
"0.6488725",
"0.64610904",
"0.64595187",
"0.64359564",
"0.640269",
"0.6393914",
"0.6379557",
"0.6368462"
]
| 0.7678285 | 0 |
Writes the loss onto the Tensorboard log | def log_loss(self, loss: float, duration: float, iteration: int):
self.train_writer.add_scalar("Time", duration, iteration)
self.train_writer.add_scalar("Loss", loss, iteration) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def write_log(callback, name, loss, batch_no):\n tf.summary.scalar(name,loss)\n tf.summary.scalar(name,batch_no)\n callback.flush()",
"def report(LOGDIR, epoch, e_dict, saver, sess, fh_log):\n # print loss\n print (\"Epoch: %i; Loss: %f; KLd: %f; CE %f\" % (epoch, e_dict[\"loss\"][-1], e_dict[\"KLd\"][-1], e_dict[\"CE\"][-1]))\n fh_log.write(\"%i\\t%0.5e\\t%0.5e\\t%0.5e\\n\" % (epoch, e_dict[\"loss\"][-1], e_dict[\"KLd\"][-1], e_dict[\"CE\"][-1]))",
"def save_loss(self, epoch, step, loss):\n file_path = os.path.join(self.exp_path, 'losses.txt')\n with open(file_path, 'a+') as f:\n f.write('Epoch: ' + str(epoch) + ', step: ' + str(step) + ', loss: ' + str(float(loss)) + '\\n')",
"def _log_loss(y_pred, y):\n y = y.get_label()\n y_pred = y_pred.reshape((len(y), 3))\n return 'logloss', log_loss(y, y_pred)",
"def _create_tensor_board(self):\n self.log_writer = tf.summary.FileWriter(\"logs/%s\" % self.model_dir, self.sess.graph)",
"def to_tensorboard_tf(self, **kwargs) -> 'PlotLosses':\n self.outputs.append(outputs.TensorboardTFLogger(**kwargs))\n return self",
"def to_tensorboard(self, **kwargs) -> 'PlotLosses':\n self.outputs.append(outputs.TensorboardLogger(**kwargs))\n return self",
"def loss_fn(self, targets, outputs, model):",
"def _create_loss(self):\n with tf.device('/cpu:0'):\n with tf.name_scope('loss'):\n self.loss = tf.reduce_mean(\n tf.nn.softmax_cross_entropy_with_logits(\n labels=self.labels_placeholder, \n logits=self.logits, name='loss'))",
"def _loss(self):\n\n cross_entropy = tf.reduce_mean(-tf.log(self.probability + epsilon) * self.y)\n self.loss = cross_entropy\n\n self.accuracy = tf.reduce_mean(\n tf.cast(tf.equal(tf.argmax(self.y, 1), self.prediction), tf.float32))",
"def loss(self, **kwargs):\n pass",
"def loss_func(self, logits, targets):\r\n return -np.sum(targets * np.log(logits)) / logits.shape[0]",
"def loss_op(self):\n return self.loss",
"def add_loss_op(self, preds):\n ### YOUR CODE HERE (~2-4 lines)\n trans = tf.get_variable('trans',\n shape=[Config.n_classes, Config.n_classes],\n initializer=tf.contrib.layers.xavier_initializer())\n log_likelihood, _ = crf_log_likelihood(preds,\n self.labels_placeholder,\n self.length_placeholder,\n trans)\n #log_likelihood = tf.boolean_mask(log_likelihood, self.mask_placeholder)\n loss = tf.reduce_mean(-1.0 * log_likelihood)\n \n ### END YOUR CODE\n return trans, loss",
"def build_loss(self):\n if self.mode != \"encode\":\n total_loss = tf.losses.get_total_loss()\n tf.summary.scalar(\"losses/total\", total_loss)\n\n self.total_loss = total_loss",
"def log_history(save_path, step, loss):\n f = open(os.path.join(save_path ,'loss_history.csv'), 'a')\n f.write(\"%d, %f\\n\" % (step, loss))\n f.close()",
"def loss_fn(y_true,y_pred): \n loss = tf.nn.softmax_cross_entropy_with_logits_v2(y_true,\n y_pred,\n axis=-1,\n )\n loss = tf.reduce_mean(loss,name=\"loss\")\n return loss",
"def loss_(self, batch):\n raise NotImplementedError",
"def write_loss_report(mean_loss, mean_inner_val_loss, mean_outer_val_loss, mean_test_loss, fpath):\n\tloss_report = open(fpath, 'a')\n\tprint(\"{:50} {}\".format(\"Training loss (mse):\", mean_loss), file=loss_report)\n\tprint(\"{:50} {}\".format(\"Inner Validation loss (mse):\", mean_inner_val_loss), file=loss_report)\n\tprint(\"{:50} {}\".format(\"Outer Validation loss (mse):\", mean_outer_val_loss), file=loss_report)\n\tprint(\"{:50} {:.4f}\".format(\"Test loss (mse):\", mean_test_loss), file=loss_report)\n\n\t# Close file\n\tloss_report.close()",
"def tensorboard_log(log_dir, tag, data):\n # Create a file writer for TensorBoard logs\n file_writer = tf.summary.create_file_writer(log_dir)\n file_writer.set_as_default()\n\n # Send to TensorBoard both results\n for i in range(len(data)):\n tf.summary.scalar(tag, data=data[i], step=i)\n file_writer.flush()",
"def save_loss_epoch(self, epoch, losses):\n file_path = os.path.join(self.exp_path, 'mean_losses.txt')\n with open(file_path, 'a+') as f:\n f.write('Epoch: ' + str(epoch) + ', loss: ' + str(float(np.mean(np.array(losses)))) + '\\n')",
"def log_update(self, policy_loss, entropy, kl_divergence, clipping_fraction,\n value_loss, explained_variance, steps):\n\n # Diagnostics\n self.writer.add_scalar(\"Diagnostics/Policy/PolicyLoss\",\n policy_loss,\n self.update_counter)\n self.writer.add_scalar(\"Diagnostics/Policy/Entropy\",\n entropy,\n self.update_counter)\n self.writer.add_scalar(\"Diagnostics/Policy/KLDivergence\",\n kl_divergence,\n self.update_counter)\n self.writer.add_scalar(\"Diagnostics/Policy/ClipFraction\",\n clipping_fraction,\n self.update_counter)\n self.writer.add_scalar(\"Diagnostics/Value/ValueLoss\",\n value_loss,\n self.update_counter)\n self.writer.add_scalar(\"Diagnostics/Value/ValueEstimate\",\n np.mean(self.buffer.values),\n self.update_counter)\n self.writer.add_scalar(\"Diagnostics/Value/ExplainedVariance\",\n explained_variance,\n self.update_counter)\n self.writer.add_scalar(\"Diagnostics/Info/LearningRate\",\n self.lr_pi,\n self.update_counter)\n self.writer.add_scalar(\"Diagnostics/Info/TotalTimesteps\",\n self.update_counter * self.batch_size,\n self.update_counter)\n self.writer.add_scalar(\"Diagnostics/Info/KLDivCoef\",\n self.kl_coef,\n self.update_counter)\n # Training Episodes\n self.writer.add_scalar(\"Training/Episodes/PolicyGradientSteps\",\n steps,\n self.update_counter)\n mean_frames = np.mean(self.buffer.episode_lengths)\n std_frames = np.std(self.buffer.episode_lengths)\n self.writer.add_scalar(\"Training/Episodes/Mean_Frames\",\n mean_frames,\n self.update_counter)\n self.writer.add_scalar(\"Training/Episodes/Std_Frames\",\n std_frames,\n self.update_counter)\n self.writer.add_histogram(\"Training/Episodes/Frames\",\n np.array(self.buffer.episode_lengths),\n self.update_counter)\n mean_reward = np.mean(self.buffer.episode_rewards)\n std_reward = np.std(self.buffer.episode_rewards)\n rews_per_frame = np.array(self.buffer.episode_rewards) / \\\n np.array(self.buffer.episode_lengths, dtype=np.float)\n self.writer.add_scalar(\"Training/Episodes/Mean_Reward\",\n mean_reward,\n self.update_counter)\n self.writer.add_scalar(\"Training/Episodes/Std_Reward\",\n std_reward,\n self.update_counter)\n self.writer.add_scalar(\"Training/Episodes/Reward_per_Frame_Mean\",\n np.mean(rews_per_frame),\n self.update_counter)\n self.writer.add_scalar(\"Training/Episodes/Reward_per_Frame_Std\",\n np.std(rews_per_frame),\n self.update_counter)\n self.writer.add_histogram(\"Training/Episodes/Rewards\",\n np.array(self.buffer.episode_rewards),\n self.update_counter)\n self.writer.add_histogram(\"Training/Episodes/Rewards_per_Frame\",\n rews_per_frame,\n self.update_counter)\n actions = np.array(self.buffer.actions)\n self.writer.add_histogram(\"Training/Action/DeltaVel\",\n actions[:, 0],\n self.update_counter)\n self.writer.add_histogram(\"Training/Action/DeltaLat\",\n actions[:, 1],\n self.update_counter)\n self.writer.add_histogram(\"Training/Values\",\n np.array(self.buffer.values),\n self.update_counter)\n self.writer.add_histogram(\"Training/Avantages\",\n np.array(self.buffer.advantages),\n self.update_counter)\n self.writer.add_histogram(\"Training/GradNorms\",\n np.array(self.grad_norms),\n self.update_counter)\n self.writer.add_histogram(\"Training/Ratio/Ratio\",\n np.array(self.ratios).flatten(),\n self.update_counter)\n self.writer.add_histogram(\"Training/Ratio/ClippedRatio\",\n np.array(self.clipped_ratios).flatten(),\n self.update_counter)\n\n self.writer.flush()\n\n print(\"-\" * 30)\n print(\"PPO Optimization\")\n print(\"Policy_Loss: {}\\t\\t\".format(policy_loss))\n print(\"Value_Loss: {}\\t\\t\".format(value_loss))\n print(\"Entropy: {}\\t\\t\".format(entropy))\n print(\"Lr_pi: {}\\t\\t\".format(self.lr_pi))\n print(\"Lr_vf: {}\\t\\t\".format(self.lr_vf))\n print(\"KL_Divergence: {}\\t\\t\".format(kl_divergence))\n print(\"Clip_Fraction: {}\\t\\t\".format(clipping_fraction))\n print(\"Exp_Variance: {}\\t\\t\".format(explained_variance))\n print(\"Mean_Reward: {}\\t\\t\".format(mean_reward))\n print(\"Std_Reward: {}\\t\\t\".format(std_reward))\n print(\"Mean_Frames: {}\\t\\t\".format(mean_frames))\n print(\"Std_Frames: {}\\t\\t\".format(std_frames))\n print(\"Mean_Reward_per_frame: {}\\t\\t\".format(np.mean(rews_per_frame)))\n print(\"Std_Reward_per_frame: {}\\t\\t\".format(np.std(rews_per_frame)))\n print(\"Optimization steps: {}\\t\\t\". format(self.update_counter))\n print(\"-\" * 30)",
"def loss(self, logits, labels):\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n loss = tf.nn.softmax_cross_entropy_with_logits(\n labels=labels,\n logits=logits,\n name='softmax_cross_entropy_loss'\n )\n loss = tf.reduce_mean(loss, name='mean_softmax_cross_entropy_loss')\n\n tf.summary.scalar('mean cross entropy loss', loss)\n\n complexity_cost = self._complexity_cost()\n if complexity_cost is not None:\n loss = tf.add(loss, complexity_cost, name='total_loss')\n tf.summary.scalar('total loss', loss)\n\n ########################\n # END OF YOUR CODE #\n #######################\n\n return loss",
"def setup_loss(self):\n with vs.variable_scope(\"loss\"):\n self.loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self.label_placeholder, logits=self.label_predictions))",
"def _init_loss(self) -> None:\n labels_one_hot = tf.one_hot(self.labels, depth=Data.num_classes)\n cross_entropy_loss = tf.losses.softmax_cross_entropy(labels_one_hot, self.logits)\n # cross_entropy_loss is a scalar\n tf.add_to_collection(tf.GraphKeys.LOSSES, cross_entropy_loss)\n self.loss = tf.add_n(tf.get_collection(tf.GraphKeys.LOSSES))\n self.logger_factory.add_scalar('loss', self.loss, log_frequency=10)\n self.logger_factory.add_scalar('cross_entropy_loss', cross_entropy_loss, log_frequency=25)",
"def push_tensorboard_losses(self, epoch):\n if self.training_losses:\n self.tensorboard_writer.add_scalar('losses/train', self.training_losses[-1], epoch)\n if self.validation_losses:\n self.tensorboard_writer.add_scalar('losses/validation', self.validation_losses[-1], epoch)\n if self.testing_losses:\n self.tensorboard_writer.add_scalar('losses/testing', self.testing_losses[-1], epoch)",
"def add_to_tensorboard(self, writer: SummaryWriter, global_step):\n \n # losses\n kl_pres = torch.sum(torch.tensor(self.things['kl_pres_list'])).item()\n kl_where = torch.sum(torch.tensor(self.things['kl_where_list'])).item()\n kl_what = torch.sum(torch.tensor(self.things['kl_what_list'])).item()\n #\n kl_total = self.things['kl']\n # baseline_loss = self.things['baseline_loss']\n neg_reinforce = -self.things['reinforce_term']\n neg_likelihood = -self.things['likelihood']\n neg_elbo = -self.things['elbo']\n #\n writer.add_scalar('kl/kl_pres', kl_pres, global_step)\n writer.add_scalar('kl/kl_where', kl_where, global_step)\n writer.add_scalar('kl/kl_what', kl_what, global_step)\n writer.add_scalar('loss/kl_total', kl_total, global_step)\n # writer.add_scalar('loss/baseline_loss', baseline_loss, global_step)\n writer.add_scalar('loss/neg_reinforce', neg_reinforce, global_step)\n writer.add_scalar('loss/neg_likelihood', neg_likelihood, global_step)\n writer.add_scalar('loss/neg_elbo', neg_elbo, global_step)\n \n imgs = [x.detach().cpu().numpy() for x in self.things['imgs']]\n canvas = [[x.detach().cpu().numpy() for x in y] for y in self.things['canvas']]\n z_pres = [[x.detach().cpu().item() for x in y] for y in self.things['z_pres']]\n z_pres_prob = [[x.detach().cpu().item() for x in y] for y in self.things['z_pres_prob']]\n id = [[x.detach().cpu().item() for x in y] for y in self.things['id']]\n z_where = [[x.detach().cpu().numpy() for x in y] for y in self.things['z_where']]\n proposal = [[x.detach().cpu().numpy() for x in y] for y in self.things['proposal']]\n object_enc = [[x.detach().cpu().numpy() for x in y] for y in self.things['object_enc']]\n object_dec = [[x.detach().cpu().numpy() for x in y] for y in self.things['object_dec']]\n mask = [[x.detach().cpu().numpy() for x in y] for y in self.things['mask']]\n \n # image = self.things['image']\n # writer.add_image('vis/original', image.detach(), global_step)\n fig = create_fig(imgs, canvas, z_pres, z_pres_prob, z_where, id, object_enc, object_dec, mask, proposal)\n fig.show()\n # fig.show()\n # writer.add_scalar('train', global_step, global_step)\n writer.add_figure('vis/reconstruct', fig, global_step)\n plt.close(fig)",
"def add_loss_op(self, pred):\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=self.labels_placeholder,\n logits=pred,\n name=\"loss\"\n )\n loss = tf.reduce_mean(loss)\n\n return loss",
"def compute_loss(self):",
"def log_tensorboard(self, callback, names, logs, batch_no):\n\n for name, value in zip(names, logs):\n summary = tf.Summary()\n summary_value = summary.value.add()\n summary_value.simple_value = value\n summary_value.tag = name\n callback.writer.add_summary(summary, batch_no)\n callback.writer.flush()"
]
| [
"0.77510965",
"0.68144965",
"0.68104905",
"0.6748108",
"0.66942436",
"0.6621524",
"0.65469015",
"0.64916056",
"0.6481054",
"0.6480031",
"0.6452032",
"0.6398478",
"0.63887477",
"0.63484746",
"0.6345198",
"0.63233024",
"0.63219935",
"0.6314104",
"0.63047945",
"0.6295101",
"0.62947184",
"0.62433773",
"0.6238215",
"0.62236816",
"0.61774004",
"0.6170181",
"0.61683124",
"0.6145469",
"0.61430067",
"0.61348695"
]
| 0.7034058 | 1 |
Remove a record with a given id from the table. | def remove(table, id_):
table, successful = common.remove_record(table, id_)
if not successful:
ui.print_error_message('Error!')
return table | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def remove(table, id_):\n\n record = common.find_id(table, id_[0])\n if record in table:\n table = common.remove_record(table, record)\n\n return table",
"def delete_record(self, id_: str) -> None:\n instance = self._get(id_)\n self._delete_from_db(instance)",
"def delete_record(self, record_id):\r\n self.record.deleteObject(id=record_id)",
"def delete(self, id):\n\n query = \"DELETE FROM {} WHERE id = {}\".format(self.table, id)\n\n self.remove(query)\n return True",
"def sqlite3_simple_delete_record(data_base, table, id_column, record_id):\n con = sqlite3.connect(data_base)\n cur = con.cursor()\n query = 'DELETE FROM ' + table + ' WHERE ' + id_column + \" = '\" + record_id + \"'\"\n cur.execute(query)\n con.commit()\n cur.close()\n con.close()",
"def delete(table_name, record_id):\n with get_connection() as conn:\n return rethink.table(table_name).get(record_id).delete().run(conn)",
"def delete(self, _id):\n self._db[_id].delete()",
"def destroy_user_record_by_id(record_id):\n session = get_session()\n with session.begin():\n session.query(models.UserAccountRecord).\\\n filter_by(id=record_id).\\\n update({'deleted': True,\n 'deleted_at': datetime.datetime.utcnow(),\n 'updated_at': datetime.datetime.utcnow()})",
"def delete(self, id=None):\n if id is not None:\n self.where('id', '=', id)\n\n sql = self._grammar.compile_delete(self)\n\n return self._connection.delete(sql, self.get_bindings())",
"def delete_item_by_id(self, id):\n response = self.table_connector.delete_item(Key={self.primary_key: id})\n print(response)",
"def delete_by_id(cls, id):\n\t\tbook = Book.query.get(id)\n\t\tdb.session.delete(book)\n\t\tdb.session.commit()",
"def remove_row(self, row_id):",
"def delete_row(self, id):\n logger.debug('Function delete_row start')\n\n cur = self.conn.cursor()\n\n logger.info(\"Deleting row with id: \"+str(id))\n print(\"Deleting row with id: \"+str(id))\n cur.execute(\"DELETE FROM sensor_data WHERE id=?\", (id,))\n\n self.conn.commit()\n\n logger.debug('Function delete_row end')",
"def remove(table, id_):\n count=0\n searched_index=-1\n in_it=False\n for i in table:\n if i[0]==id_:\n searched_index=count\n in_it=True\n count+=1\n\n if in_it: \n table.pop(searched_index)\n else:\n ui.print_error_message(\"ID not found\")\n \n return table",
"def remove(table, id_):\n return common.remove_line(table, id_)",
"def delete(self):\n query = \"DELETE FROM \" + self.table + \" WHERE \" + self.idfield + \"=%s\"\n dbh = dbstuff.getRW(self.dbh_key)\n try:\n c = dbh.cursor()\n c.execute(query, self.id)\n c.close()\n dbh.commit()\n finally:\n dbstuff.release(dbh,self.dbh_key)",
"def remove(table, id_):\n\n # your code\n for i, row in enumerate(table):\n if row[ID] == id_:\n table.pop(i)\n return table\n\n ui.print_error_message(\"Wrong ID!\")\n\n return table\n\n return table",
"def remove(table, id_):\n\n # 3\n for index in range(len(table)):\n if table[index][0] == id_:\n table.pop(index)\n data_manager.write_table_to_file('hr/persons.csv', table)\n return table",
"def remove(table, id_):\n\n common.toremoveid(\"inventory/inventory.csv\",data_manager.get_table_from_file(\"inventory/inventory.csv\"),id_)",
"def remove(table, id_):\n\n entry_index = 0\n for entry in table:\n entry_id_ = entry[0]\n if entry_id_ == id_:\n del table[entry_index]\n entry_index += 1\n data_manager.write_table_to_file(\"model/sales/sales.csv\", table)\n return table",
"def deleteFileRecordByID(file_id):\n session = Queries.createSession()\n try:\n file_db = session.query(FileTable).filter_by(id=file_id).first()\n servers = file_db.server_id[:]\n for server in servers:\n file_db.server_id.remove(server)\n session.commit()\n session.delete(file_db)\n session.commit()\n except sqlalchemy.exc.ArgumentError:\n print 'SQLAlchemy ERROR: Invalid or conflicting function argument is supplied'\n except sqlalchemy.exc.CompileError:\n print 'SQLAlchemy ERROR: Error occurs during SQL compilation'\n finally:\n session.close()",
"def delete_by_id(cls, id):\n\t\tauthor = Author.query.get(id)\n\t\tauthor.saved = False\n\t\tdb.session.commit()",
"def remove_employee(self, id):\n cursor = self.dbconnect.get_cursor()\n try:\n cursor.execute('DELETE FROM employee WHERE employeeID=%s', (id,))\n self.dbconnect.commit()\n except(Exception, self.dbconnect.get_error()) as error:\n self.dbconnect.rollback()\n raise Exception('\\nUnable to remove Employee!\\n(%s)' % (error))",
"def delete(self, id):\n self.cursor.execute(\"DELETE FROM Book WHERE Id = ?\", (id,))\n self.connection.commit()",
"def remove(table, id_):\n\n # your code\n\n key = common.check_for_key(id_,table)\n\n if key == None:\n ui.print_error_message('Key does not exist')\n else:\n table.pop(key)\n data_manager.write_table_to_file('hr/persons.csv', table) \n\n #print(table)\n return table",
"def delete(self, id):\n raise NotImplementedError",
"def remove(table, id_):\n\n for element in table:\n if id_ == element[0]:\n table.remove(element)\n\n return table",
"def delRR(self, name, record_id):\n reply = self.rpc.removeZoneRecord(self.username,\n self.password,\n self.domain,\n name,\n int(record_id))\n if reply != \"OK\":\n raise Exception(\"RPC returned error: \" + reply)",
"def rpc_database_delete_row_by_id(self, row_id):\n\t\ttable = DATABASE_TABLE_OBJECTS.get(self.path.split('/')[-2])\n\t\tassert table\n\t\tsession = db_manager.Session()\n\t\ttry:\n\t\t\tsession.delete(db_manager.get_row_by_id(session, table, row_id))\n\t\t\tsession.commit()\n\t\tfinally:\n\t\t\tsession.close()\n\t\treturn",
"def _remove_record(self, model, row_id) -> bool:\n try:\n model = self.session.query(model).filter_by(id=row_id).first()\n if model:\n self.session.delete(model)\n self.session.commit()\n\n return True\n else:\n return False\n\n except Exception:\n self.session.rollback()\n\n raise"
]
| [
"0.80548465",
"0.8014713",
"0.76164806",
"0.7429268",
"0.7287538",
"0.72552764",
"0.70760125",
"0.70389813",
"0.70386046",
"0.6974744",
"0.69381046",
"0.69041747",
"0.6890031",
"0.68539524",
"0.68431026",
"0.68202865",
"0.68175966",
"0.68118525",
"0.68029445",
"0.67899823",
"0.6776024",
"0.677244",
"0.6765055",
"0.6706278",
"0.6701502",
"0.6692428",
"0.667431",
"0.6670608",
"0.6649756",
"0.6634493"
]
| 0.8177736 | 0 |
Check how many different games are made by each manufacturer. Return dictionary where key is manufacturer and value is number of games by it. | def get_counts_by_manufacturers(table):
manufacturers_dict = {}
for item in table:
try:
if item[2]:
try:
if item[2] in manufacturers_dict.keys():
manufacturers_dict[item[2]] += 1
else:
manufacturers_dict[item[2]] = 1
except IndexError:
pass
else:
raise ValueError
except ValueError:
pass
return manufacturers_dict | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_counts_by_manufacturers(table):\n manufacturers = []\n for item in table:\n if item[2] not in manufacturers:\n manufacturers.append(item[2])\n\n manufacturers_games = {}\n\n for record in manufacturers:\n games_counter = 0\n for item in table:\n if item[2] == record:\n games_counter += 1\n manufacturers_games[record] = games_counter\n\n return manufacturers_games",
"def rank_by_manufacturer(table):\n\n manufacturers_counts = {}\n product_id_index = 3\n amount_sold_index = 4\n id_index = 0\n manufacturer_index = 2\n\n store_table = store.get_table()\n store.check_table(store_table)\n\n for record in table:\n product_id = record[product_id_index]\n amount_sold = record[amount_sold_index]\n for game in store_table:\n game_id = game[id_index]\n manufacturer = game[manufacturer_index]\n if product_id == game_id:\n if manufacturer in manufacturers_counts:\n manufacturers_counts[manufacturer] += int(amount_sold)\n else:\n manufacturers_counts[manufacturer] = int(amount_sold)\n\n return manufacturers_counts",
"def shopee_dict(quote, results=100):\n\n brands = {}\n total_items = 0\n for brand in get_brands(quote, results):\n if brand in brands:\n brands[brand] += 1\n else:\n brands[brand] = 1\n total_items += 1\n return brands, total_items",
"def histogram_genres(our_data):\n genre_list = []\n for album in our_data:\n genre_list.extend(genre.strip() for genre in album['genre'].split(','))\n genre_dict = {}\n for genre in genre_list:\n if genre in genre_dict:\n genre_dict[genre] += 1\n else:\n genre_dict[genre] = 1\n return genre_dict",
"def tx_counts(self) -> Dict[str, Dict[str, int]]:\n agent_pbk_to_name = self.game.configuration.agent_pbk_to_name\n result = {agent_name: 0 for agent_name in agent_pbk_to_name.values()}\n results = {\"seller\": result.copy(), \"buyer\": result.copy()}\n\n temp_game = Game(self.game.configuration, self.game.initialization)\n\n # compute the partial scores for every agent after every transaction\n # (remember that indexes of the transaction start from one, because index 0 is reserved for the initial scores)\n for idx, tx in enumerate(self.game.transactions):\n temp_game.settle_transaction(tx)\n results[\"seller\"][agent_pbk_to_name[tx.seller_pbk]] += 1\n results[\"buyer\"][agent_pbk_to_name[tx.buyer_pbk]] += 1\n\n return results",
"def get_penalty_counts(game):\n game_type = get_game_type_from_season_type(game)\n\n pen_counts = dict()\n pen_counts['home'] = defaultdict(int)\n pen_counts['road'] = defaultdict(int)\n\n game_events_src_path = os.path.join(\n CONFIG['base_data_dir'], 'game_events',\n str(game['season']), str(game_type), \"%d.json\" % game['game_id'])\n events_data = json.loads(open(game_events_src_path).read())\n\n for period in events_data:\n for event in events_data[period]:\n if event['type'] == 'penalty':\n duration = int(event['data']['duration'] / 60)\n if event['data']['team'] == 'home':\n pen_counts['home'][duration] += 1\n else:\n pen_counts['road'][duration] += 1\n\n return pen_counts",
"def num_producers(self):\n producers = self.info_wells.groupby('well_type').get_group('prod')\n return producers['well'].nunique()",
"def countUMIs(barcode_dictionary):\n new_dict = {}\n for i in barcode_dictionary:\n \tnew_dict[i] = (sum(Counter(barcode_dictionary[i][1:]).values()), len(Counter(barcode_dictionary[i][1:]).keys()))\n return new_dict",
"def count_choices(self) -> dict:\r\n times_chosen = dict()\r\n\r\n # exclude the optimistic value when counting choices\r\n for arm, values in self.rewards.items():\r\n if self.optim_c not in values:\r\n times_chosen[arm] = len(values)\r\n else:\r\n times_chosen[arm] = 0\r\n\r\n return times_chosen",
"def count_correct_tags(self):\n correct_dict = {}\n for gold_tag, predict_tag in zip(self.golden_tags, self.predict_tags):\n if gold_tag == predict_tag:\n if gold_tag not in correct_dict:\n correct_dict[gold_tag] = 1\n else:\n correct_dict[gold_tag] += 1\n\n return correct_dict",
"def get_statistic():\n\n data = get_data_from_URL(url)[\"data\"]\n results = dict()\n\n for genre in data:\n # get information about one genre\n genre_url = f\"{url}/{genre['id']}/artists\"\n genre_data = get_data_from_URL(genre_url)[\"data\"]\n\n nb_fan = 0\n for artist in genre_data:\n # get information about one artist (number of fans)\n art_data = get_data_from_URL(\n f'https://api.deezer.com/artist/{artist[\"id\"]}')\n nb_fan += art_data[\"nb_fan\"]\n\n # add to dictionary received information\n results[genre[\"name\"]] = (len(genre_data), nb_fan)\n\n return results",
"def user_counts(**kwargs):\n attributes = [\"ping_type\", \"os\", \"app_version\", \"app_build_id\", \"channel\"]\n fixed_attributes = [\"app_version\", \"channel\"]\n cubed_attributes = [x for x in attributes if x not in fixed_attributes]\n\n return dict(\n attributes=\",\".join(attributes),\n cubed_attributes=cubed_attributes,\n attribute_combinations=compute_datacube_groupings(cubed_attributes),\n **kwargs,\n )",
"def count_matches(reading):\n dictionary = {}\n the_list = list()\n with open(reading, \"r\") as text_file:\n for lines in text_file:\n sentence = lines.strip()\n if not sentence or sentence.find(\"v\") < 0: continue\n else:\n tup = tuple(sentence.split(\" v \"))\n teams = frozenset(tup)\n the_list.append(teams)\n\n for keys in the_list:\n dictionary[keys] = dictionary.get(keys, 0) + 1\n\n return dictionary",
"def getCounts(training_data, test_row, k):\n neighbors = getNeighbors(training_data, test_row, k)\n output_vals = [row[-1] for row in neighbors]\n\n counts = dict()\n\n for i in output_vals:\n counts[i] = counts.get(i, 0) + 1\n \n return counts",
"def ranking(availability_info,mapds):\n rank=Counter(dict())\n for key in availability_info.keys():\n rank[mapds[key]]=len(availability_info[key])\n #print rank\n return rank",
"def counts(sequence):\n # initialize the countainer\n count = defaultdict(int)\n # iterates through sequence elements\n for item in sequence:\n # if element not in counts add 0\n # else add 1\n count[item] = count.get(item, 0) + 1\n return dict(count)",
"def create_count_map(self) -> Dict[int, int]:\n res: Dict[int, int] = {}\n for sequence_data in self.model.values():\n sequence_data: NGramsSequence = cast(NGramsSequence, sequence_data)\n for count in sequence_data.next_count.values():\n count: int = cast(int, count)\n if count not in res:\n res[count] = 0\n res[count] += 1\n self.count_map = res\n logger.success('created count map')\n return res",
"def resultCounter(detections):\n counter = 0\n for attribute, value in classIterator(detections):\n if 'crease' in attribute:\n counter += len(value)\n return counter",
"def count_kmers(seq, k=3):\n # Start with an empty dictionary\n counts = {}\n # Calculate how many kmers of length k there are\n num_kmers = len(str(seq)) - k + 1\n # Loop over the kmer start positions\n for i in range(num_kmers):\n # Slice the string to get the kmer\n kmer = str(seq)[i:i+k]\n # Add the kmer to the dictionary if it's not there\n if kmer not in counts:\n counts[kmer] = 0\n # Increment the count for this kmer\n counts[kmer] += 1\n # Return the final counts\n return counts",
"def get_kmer_counts(kmer_list, kmer_counts):\n counts = defaultdict(int)\n for kmer in kmer_list:\n counts[kmer] = counts.get(kmer, 0) + kmer_counts[kmer]\n return counts",
"def play_n_game(n_games, n_toss):\n results_list = []\n for _ in range(n_games):\n results_list.append(play_one_game(n_toss))\n dict_proba = {}\n for j in range (n_toss + 1):\n if results_list.count(j) != 0:\n dict_proba[j] = results_list.count(j)/n_games\n else:\n continue\n return dict_proba",
"def _get_number_of_gpu_devices_connected(self):\n gpu_devices = self._get_gpu_pci_devices()\n gpu_devices_count = len(gpu_devices)\n return {'pci_gpu_devices': gpu_devices_count}",
"def view_counts():\n out = {}\n for i in range(len(classes)):\n out.update({decoded[i]: storage.count(classes[i])})\n return out",
"def get_suits(hand, board):\n suits = {}\n for card in hand + board:\n if card[1] in suits:\n suits[card[1]] += 1\n else:\n suits[card[1]] = 1\n return suits",
"def get_average_by_manufacturer(table, manufacturer):\n games = 0\n manufacturer_apperance = 0\n\n for i in range(len(table)):\n try:\n if manufacturer.lower() == table[i][2].lower():\n games += int(table[i][-1])\n manufacturer_apperance += 1\n except (IndexError, ValueError):\n pass\n\n try:\n avrg_games_by_manufacturer = games / manufacturer_apperance\n except ZeroDivisionError as err:\n ui.print_error_message(\"No entry with that manufacturer\")\n\n avrg_games_by_manufacturer = round(avrg_games_by_manufacturer, 2)\n\n return avrg_games_by_manufacturer",
"def summarize(self) -> Mapping[str, int]:\n return dict(\n compounds=self.count_compounds(),\n side_effects=self.count_side_effects(),\n indications=self.count_indications(),\n umls=self.count_umls(),\n )",
"def final_kmer_counts(seq_dict, num_seqs, alphabet, min_k, max_k):\n counted = Counter()\n len_seqs = 0\n for name, sequence in seq_dict.items():\n seq = seq_cleaner(sequence, alphabet)\n len_seqs += len(seq)\n counted.update(count_kmers_cython(seq, min_k, max_k))\n final_count = {k: (v // num_seqs) for k, v in counted.items()}\n # total_len = (len_seqs // num_seqs)\n return final_count, len_seqs",
"def get_numbers(hand, board):\n numbers = {}\n for card in hand + board:\n if card[0] in numbers:\n numbers[card[0]] += 1\n else:\n numbers[card[0]] = 1\n return numbers",
"def count_mers(sequence, alphabet, kmin, kmax):\n alphabet = set(alphabet)\n counts = defaultdict(int)\n for kmer in get_kmers_from_sequence(sequence, kmin, kmax):\n if set(kmer).issubset(alphabet):\n counts[kmer] = counts.get(kmer, 0) + 1\n return counts",
"def get_popular_locations_dict(film_set):\n popular_locations = dict()\n for film in film_set:\n try:\n location = film[1].split(', ')[-1]\n if location in popular_locations.keys():\n popular_locations[location] += 1\n else:\n popular_locations[location] = 1\n except (TypeError, AttributeError, IndexError):\n continue\n\n return popular_locations"
]
| [
"0.80333066",
"0.6666297",
"0.60503423",
"0.5857039",
"0.5830689",
"0.5794255",
"0.57780874",
"0.57385373",
"0.5657014",
"0.5654159",
"0.56414384",
"0.5587509",
"0.5579291",
"0.55584073",
"0.55410427",
"0.55083615",
"0.5503112",
"0.54968363",
"0.54839206",
"0.5473748",
"0.54694384",
"0.54418594",
"0.5441768",
"0.54221773",
"0.539172",
"0.5388602",
"0.53860354",
"0.53749603",
"0.53744954",
"0.5370114"
]
| 0.73525363 | 1 |
Give back average amount of games in stock of a given manufacturer. Count how many different games are in stock by given manufacturer and how many copies of them are in total. On that base avarege amount is count. | def get_average_by_manufacturer(table, manufacturer):
games = 0
manufacturer_apperance = 0
for i in range(len(table)):
try:
if manufacturer.lower() == table[i][2].lower():
games += int(table[i][-1])
manufacturer_apperance += 1
except (IndexError, ValueError):
pass
try:
avrg_games_by_manufacturer = games / manufacturer_apperance
except ZeroDivisionError as err:
ui.print_error_message("No entry with that manufacturer")
avrg_games_by_manufacturer = round(avrg_games_by_manufacturer, 2)
return avrg_games_by_manufacturer | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_average_by_manufacturer(table, manufacturer):\n games_sum = 0\n games_occurance = 0\n for item in table:\n if item[2] == manufacturer:\n games_sum += int(item[4])\n games_occurance += 1\n\n average_amount = games_sum / games_occurance\n\n return average_amount",
"def total_sold(album):\n return album.total_sold",
"def stock_average(stock):\n closing_price=stock['Close']\n average=stats.mean(closing_price)\n return average",
"def get_avg(all_stock_data):\n try:\n sum_close_vol = 0.0\n sum_vol = 0.0\n for item in all_stock_data:\n adj_close = item[1]\n volume = item[2]\n sum_close_vol += adj_close * volume\n sum_vol += item[2]\n return sum_close_vol / sum_vol\n\n except Exception as e:\n print(e)\n exit()",
"def average(self):\n return self.summation() / self.count()",
"def rank_by_manufacturer(table):\n\n manufacturers_counts = {}\n product_id_index = 3\n amount_sold_index = 4\n id_index = 0\n manufacturer_index = 2\n\n store_table = store.get_table()\n store.check_table(store_table)\n\n for record in table:\n product_id = record[product_id_index]\n amount_sold = record[amount_sold_index]\n for game in store_table:\n game_id = game[id_index]\n manufacturer = game[manufacturer_index]\n if product_id == game_id:\n if manufacturer in manufacturers_counts:\n manufacturers_counts[manufacturer] += int(amount_sold)\n else:\n manufacturers_counts[manufacturer] = int(amount_sold)\n\n return manufacturers_counts",
"def average(self):\n total = 0\n for t in self.memory:\n total += t.reward\n return total/self.__len__()",
"def showAverageBetUsed(self) :\n averageBetUsed = 0\n for level in self.level_history :\n averageBetUsed += level.bet\n averageBetUsed = averageBetUsed/len(self.level_history)\n Scenario.messageGetAverageBetUsed(averageBetUsed)",
"def average_result(set_):\n db = TinyDB(CARD_DATA_FILE)\n card_data = db.table('card_data')\n set_results = card_data.search(where('set') == set_)\n print(set_results)\n c = r = e = l = g_c = g_r = g_e = g_l = total = 0\n # TODO: can revamp with some collections.counter usage, probably\n for entry in set_results:\n total += 1\n c += entry['commons']\n r += entry['rares']\n e += entry['epics']\n l += entry['legendaries']\n g_c += entry['g_commons']\n g_r += entry['g_rares']\n g_e += entry['g_epics']\n g_l += entry['g_legendaries']\n\n print('Average of: {} commons, {} rares, {} epics, {} legendaries \\n'\n ' {} golden commons, {} golden rares, {} golden epics, {} '\n 'golden legendaries'.format(c/total, r/total, e/total, l/total, g_c/total, g_r/total, g_e/total, g_l/total))\n\n pass",
"def average(data, event):\n if len(data) == 0:\n return 0\n\n score = 0\n # scores = []\n count = 0\n for i in data:\n count += 1\n if event == 'Swim' or event == 'Run':\n num = time_seconds(i[event])\n #print(\"first if\")\n #Sprint(num)\n else:\n num = int(i[event])\n #print(\"second if\")\n #print(num)\n #scores[count] =\n #print(\"end of loop count\" + str(count))\n score += num\n #print (\"score\" + str(score))\n\n # total = 0\n # for x in range(0,len(scores)):\n # total += scores[x]\n score = float(score)\n\n return score / count",
"def average_rating(self):\n ratings = Rating.objects.filter(game=self)\n\n if len(ratings):\n # Sum all of the ratings for the game\n total_rating = 0\n for rating in ratings:\n total_rating += rating.value\n\n # Calculate the averge and return it.\n average = total_rating / len(ratings)\n return average\n\n # else: \n return 0",
"def get_average(data):\n average = sum(data) / len(data)\n\n return average",
"def get_average(self, s_freq, e_freq):\n s_ind = self.get_bin(s_freq)\n e_ind = self.get_bin(e_freq)\n lst = self.mags[s_ind:e_ind+1]\n try:\n avg = sum(lst)/len(lst)\n except:\n print(s_ind, e_ind)\n print('werid stuff')\n avg = 0\n return avg",
"def average_damage(self) -> float:\r\n number_of_dice = int(self.damage.split(\"d\")[0])\r\n damage_of_dice = int(self.damage.split(\"d\")[1])\r\n average_damage = (number_of_dice + number_of_dice * damage_of_dice) / 2\r\n return average_damage",
"def get_num_of_shares(stock, investment):\n return int(investment // float(stock['Price']))",
"def get_counts_by_manufacturers(table):\n manufacturers = []\n for item in table:\n if item[2] not in manufacturers:\n manufacturers.append(item[2])\n\n manufacturers_games = {}\n\n for record in manufacturers:\n games_counter = 0\n for item in table:\n if item[2] == record:\n games_counter += 1\n manufacturers_games[record] = games_counter\n\n return manufacturers_games",
"def average(data):\r\n sum =0\r\n for i in data:\r\n sum+=i\r\n return sum/len(data)",
"def get_mean_volume(symbol):\n df = pd.read_csv(\"data/{}.csv\".format(symbol)) # read in data\n # Quiz: Compute and return the mean volume for this stock\n return df['Volume'].mean()",
"def get_average_sales(data):\n print(\"Calculating stock data...\\n\")\n avg_sales = []\n for list in data:\n int_list_avg = sum(int(item) for item in list) / len(list)\n avg_plus_extra = round(int_list_avg * 1.1)\n avg_sales.append(avg_plus_extra)\n\n return avg_sales",
"def get_average(self):\n self.avg = math.floor((self.maths + self.phy + self.che) / 3, )\n self.assign_grade()\n return self.avg\n # End of method get_average",
"def calculate_averages(data):\n def mean(item_key):\n all_items = [i[item_key] for i in data]\n return sum(all_items)/float(len(all_items))\n\n return {\n \"mean_error_count\": mean('error_count'),\n \"mean_success_count\": mean('success_count'),\n \"mean_mean_response_time\": mean('mean_respone_time'),\n \"mean_data_sent_mb\": mean('data_sent_mb'),\n }",
"def get_average_durability_by_manufacturers(table):\n\n average_durability = {}\n\n for i in range(len(table)):\n table[i][4] = int(table[i][4])\n\n manufacturers = [name[2] for name in table]\n\n single_manufacturers = list(set(manufacturers))\n\n for i in range(len(single_manufacturers)):\n durability_sum = 0\n count = 0\n for j in range(len(manufacturers)):\n if single_manufacturers[i] == manufacturers[j]:\n count += 1\n durability_sum += int(table[j][4])\n average_durability[single_manufacturers[i]] = durability_sum / count\n\n return average_durability",
"def get_avg_score(game_id):\r\n\r\n scores = []\r\n game = Game.query.get(game_id)\r\n for rating in game.ratings:\r\n scores.append(rating.score)\r\n \r\n avg_score = sum(scores)/len(scores)\r\n \r\n \r\n return avg_score",
"def aveVolumeCalc(ins, date):\n cal = ins.Currency().Calendar()\n enddate = cal.AdjustBankingDays(date, 0)\n startdate = cal.AdjustBankingDays(date, AVERAGING_PERIOD)\n\n prices=[]\n histprices = acm.FPrice.Select(\"instrument = %s and market = '%s' \\\n and day > '%s' and day <='%s'\" % \n (ins.Oid(), DAILY_MARKET, startdate, enddate))\n \n for price in histprices:\n settle = price.Settle()\n if settle >= 0:\n prices.append(settle)\n \n #upgrade 2013 fix for failure during run - acm.Math().AverageOf seems buggy\n try:\n avgprice = (sum(prices)/len(prices))\n except ZeroDivisionError:\n avgprice = 0\n \n #avgprice = acm.Math().AverageOf(prices, None)\n \n #Overwrite today's price if you find it \n newPrice = acm.FPrice.Select01(\"instrument = %s and market = '%s' and day = %s\" % \n (ins.Oid(), THREE_MONTH_MARKET, enddate),\n 'NaN')\n if not newPrice:\n newPrice = acm.FPrice()\n newPrice.Instrument(ins)\n newPrice.Day(enddate)\n newPrice.Market(THREE_MONTH_MARKET)\n newPrice.Currency(ins.Currency())\n\n newPrice.Settle(avgprice)\n try:\n newPrice.Commit()\n print 'INFO: %s price for %s was created on %s' %(THREE_MONTH_MARKET, ins.Name(), date)\n except Exception, err:\n print 'ERROR: %s price for %s did not commit: %s' %(THREE_MONTH_MARKET, ins.Name(), str(err))\n \n return newPrice",
"def get_average_rating(self):\n count = 0\n total = 0\n num_books = len(self.books)\n if num_books > 0:\n for rating in self.books.values():\n if rating:\n count += 1\n total += rating\n average = total / count\n if count > 0:\n return average\n else:\n print(\"Books with ratings not found for user {user}\".format(user=self.name))",
"def calculateAverage(self, data):\n\n nValidTrials = data['nValid'][-1]\n nRewardTrials = data['nRewarded'][-1]\n return float(nRewardTrials)/nValidTrials",
"def avarage_for_group(data: Dict[int, int]) -> float:\n values = data.values()\n summary = sum(values)\n return summary // len(data)",
"def _mean(items):\n return sum(items) / len(items)",
"def average():\n dict_ = {}\n lst = []\n average_cost = {}\n for book in root.findall('Book'):\n publisher = book.find('Publisher').text\n price = book.find('Price').text\n lst.append(publisher)\n if publisher not in dict_:\n dict_[publisher] = float(price)\n else:\n dict_[publisher] += float(price)\n publishers = {i: lst.count(i) for i in lst}\n for key1, value1 in dict_.items():\n for key2, value2 in publishers.items():\n if key1 == key2:\n average_cost[key1] = round(value1 / value2, 2)\n return average_cost",
"def average(self):\n return (self.current + self.last) / 2.0"
]
| [
"0.7342873",
"0.6410505",
"0.6404865",
"0.6355063",
"0.60661423",
"0.5972831",
"0.5928848",
"0.58975023",
"0.5817373",
"0.5790253",
"0.578803",
"0.57879853",
"0.5711856",
"0.569899",
"0.56955236",
"0.5678931",
"0.56765807",
"0.5669755",
"0.56412584",
"0.5638682",
"0.56296676",
"0.55920374",
"0.5578212",
"0.55628693",
"0.55435604",
"0.55325705",
"0.552293",
"0.55206484",
"0.55190986",
"0.5501985"
]
| 0.7130538 | 1 |
Checks whether the lattice is binary or not i.e if every vertex except the bottom covers maximum two elements and is covered by maximum two elements | def is_binary(self):
for element in self:
if element != self.bottom and not self.isa_binary_element(element):
return False
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_upper_triangular(self):\n self.check_square()\n\n for i in range(self.rows):\n for j in range(i):\n if self[i, j] != 0:\n return False\n return True",
"def check_bp(self):\n return self.min_basepairs <= self.seqdata.basepairs <= self.max_basepairs",
"def check_boundary(self, width, height):\r\n if 0 <= self.head[0] + self.direction[0]*10 <= width - 10 and 0 <= self.head[1] + self.direction[1]*10 <= height - 10:\r\n return True\r\n else:\r\n return False",
"def _unbalanced(self):\n if self.internal():\n if self.full():\n if abs(self._leftchild._height-self._rightchild._height) >= 2:\n return True\n elif self._leftchild and not self._rightchild:\n if self._leftchild._height >= 2:\n return True\n elif self._rightchild._height >= 2:\n return True\n return False",
"def _in_bounds(self, x, y):\r\n return 0 <= x < 8 and 0 <= y < 8",
"def isa_binary_element(self, element):\n\n return len(self.above(element)) <= 2 and len(self.under(element)) <= 2",
"def check_boundary(self,x):\n b_cells = np.zeros(self.n_c)\n b_cells[self.n_C:] = 1\n vBC = b_cells[self.tris]\n considered_triangles = vBC.sum(axis=1) == 2\n add_extra = ((self.Angles*(1-vBC)>np.pi/2).T*considered_triangles.T).T\n if add_extra.any():\n I,J = np.nonzero(add_extra)\n for k,i in enumerate(I):\n j = J[k]\n xs = x[self.tris[i]]\n re = xs[np.mod(j-1,3)] - xs[np.mod(j+1,3)]\n re = re/np.linalg.norm(re)\n re = np.array([re[1],-re[0]])\n rpe = xs[j]\n x_new = 2*np.dot(xs[np.mod(j-1,3)]-rpe,re)*re + rpe\n x = np.vstack((x,x_new))\n self.n_c = x.shape[0]\n self._triangulate(x)\n self.assign_vertices()\n\n C = get_C_boundary(self.n_c,self.CV_matrix)\n #\n # #Remove extra cells\n # keep_mask = C[self.n_C:, :self.n_C].sum(axis=1)>0 #I'm assuming this is the same thing. This removes all boundary centroids that are not connected to at least one real centroid.\n # if keep_mask.any():\n # c_keep = np.nonzero(keep_mask)[0]\n # x = np.concatenate((x[:self.n_C],x[c_keep + self.n_C]))\n # self.n_c = x.shape[0]\n # self._triangulate(x)\n # self.assign_vertices()\n #\n\n #Remove all boundary particles not connected to exactly two other boundary particles\n remove_mask = C[self.n_C:, self.n_C:].sum(axis=1)!=2\n if remove_mask.any():\n c_keep = np.nonzero(~remove_mask)[0]\n x = np.concatenate((x[:self.n_C],x[c_keep + self.n_C]))\n self.n_c = x.shape[0]\n self._triangulate(x)\n self.assign_vertices()\n self.Angles = tri_angles(x, self.tris)\n #\n # remove_mask = C[self.n_C:, self.n_C:].sum(axis=1)==0\n # if remove_mask.any():\n # c_keep = np.nonzero(~remove_mask)[0]\n # x = np.concatenate((x[:self.n_C],x[c_keep + self.n_C]))\n # self.n_c = x.shape[0]\n # self._triangulate(x)\n # self.assign_vertices()\n # self.Angles = tri_angles(x, self.tris)\n\n\n return x",
"def in_box_bounds(self, test_vec):\n above_min = np.greater(test_vec, self.lower_vertex).all()\n below_max = np.greater(self.upper_vertex, test_vec).all()\n return above_min and below_max",
"def is_upper_hull(grid, simplex):\n point = grid[:,simplex]\n if np.isclose(point, MIN_POINT_PRECISION).any():\n return True\n else:\n return False",
"def in_grid(self, tile):\n return 0 <= tile[0] < self.gs[0] and 0 <= tile[1] < self.gs[1]",
"def check_edges(self):\n if self.rect.right >= self.screen_rect.right or self.rect.left <= 0:\n return True",
"def check_edges(self):\r\n screen_rect = self.screen.get_rect()\r\n if self.rect.right >= screen_rect.right:\r\n return True\r\n elif self.rect.left <= 0:\r\n return True",
"def checkBottom(self):\n exposed = True\n for sprite in self.overlapping_sprites:\n if sprite not in self.game.neutrinos:\n a = abs(self.bottom - sprite.top)\n b = abs(self.top - sprite.bottom)\n c = abs(self.left - sprite.right)\n d = abs(self.right - sprite.left)\n if a < b and a < c and a < d:\n exposed = False\n break\n return exposed",
"def is_lattice(self):\n return hasattr(self,\"uc\") and len(self.maximals())==1 and type(self.get_meet())!=str and type(self.get_join())!=str",
"def check_edges(self):\n\t\tscreen_rect = self.screen.get_rect()\n\t\tif self.rect.right >= screen_rect.right:\n\t\t\treturn True\n\t\telif self.rect.left <= 0:\n\t\t\treturn True",
"def regular(self):\n return all(numpy.allclose(w, w[0]) for w in self.binwidths)",
"def out_of_bounds(self):\n return not 0 <= self.nodes[0].x < WIDTH * SCALE or not 0 <= self.nodes[0].y < HEIGHT * SCALE",
"def check_edges(self):\n\t\tbottom_screen_limit = 2 * self.rect.height\n\t\tscreen_rect = self.screen.get_rect()\n\t\tif (self.rect.top <= 100) or (self.rect.bottom >= self.screen_rect.bottom):\n\t\t#self.rect.bottom >= self.screen_rect.bottom:\n\t\t\treturn True",
"def checkBINn(L, n):\n binaire = [0,1]\n if len(L)==n:\n for i in range(len(L)):\n if L[i] not in binaire:\n return False\n return True\n else: \n return False",
"def is_crossed(self):\n left_boundary_clusters = np.extract(self.cluster[0] > 0,\n self.cluster[0])\n right_boundary_clusters = np.extract(self.cluster[-1] > 0,\n self.cluster[-1])\n return np.in1d(left_boundary_clusters, right_boundary_clusters).any()",
"def check(m) :\n #find Connected-component\n lst = find_c(m)\n for e in lst :\n # verify len , 3 is the len of large boat\n if len(e) > 3 :\n return False\n if not is_vert(e) and not is_hori(e):\n return False\n return True",
"def block(array):\r\n grid = []\r\n for z in range(0,7,3): #0,3,6\r\n #vertical down 3\r\n for n in range(0,7,3): #0,3,6\r\n #horiz across 3\r\n line = []\r\n for i in range(3):\r\n for j in range(3):\r\n vert,hor = i+z,j+n\r\n line.append(array[vert][hor])\r\n grid.append(line)\r\n won = True\r\n for i in range(len(grid)):\r\n if won == True:\r\n if len(grid[i]) != len(set(grid[i])):\r\n won = False\r\n else:\r\n pass\r\n else:\r\n break\r\n return won",
"def check_edges(self):\n screen_rect = self.screen.get_rect()\n if self.rect.right >= screen_rect.right or self.rect.left <= 0:\n return True",
"def is_planar(G):\n result=True\n bad_minor=[]\n n=len(G.nodes())\n iterazione=0\n if n>5:\n print 'N >5'\n\n for subnodes in it.combinations(G.nodes(),6):\n iterazione+=1\n print 'iterazione %d'%iterazione\n subG=G.subgraph(subnodes)\n if bipartite.is_bipartite(G):# check if the graph G has a subgraph K(3,3)\n X, Y = bipartite.sets(G)\n if len(X)==3:\n result=False\n bad_minor=subnodes\n return result,bad_minor\n iterazione=0\n if n>4 and result:\n print 'N >4'\n\n for subnodes in it.combinations(G.nodes(),5):\n print 'iterazione %d'%iterazione\n subG=G.subgraph(subnodes)\n if len(subG.edges())==10:# check if the graph G has a subgraph K(5)\n result=False\n bad_minor=subnodes\n return result,bad_minor\n\n return result,bad_minor",
"def check_edges(self):\n screen_rect = self.screen.get_rect()\n if self.rect.right >= screen_rect.right:\n return True\n elif self.rect.left <= screen_rect.left:\n return True",
"def is_upper(self):\n M = self.rep\n for i in range(self.rows):\n for j in range(i):\n if M[i, j]:\n return False\n return True",
"def is_binate(self, vs=None):\n return not (self.is_neg_unate(vs) or self.is_pos_unate(vs))",
"def illegal(self):\r\n ## First checks for integer value\r\n if type(self.blockList) is int: return 1\r\n ## Then checks for 6 rows\r\n if len(self.blockList) > 6: return 1\r\n for row in self.blockList:\r\n ## Then checks that each row has 6 columns\r\n if len(self.blockList[row]) > 6: return 1\r\n for column in self.blockList[row]:\r\n ## 18 blocks is the maximum number of blocks that can be on the board\r\n if block < 0 or block > 18: return 1\r\n return 0",
"def check_edges(self):\n\t\tscreen_rect = self.screen.get_rect()\n\t\tif self.rect.bottom >= screen_rect.bottom or self.rect.top <= -1:\n\t\t\treturn True",
"def test(brickheight,bricklength,row,column,walllength,wallheight,occupied,answer):\n if brickheight>wallheight or bricklength>walllength:\n return False\n elif over(brickheight,bricklength,row,column,walllength,wallheight):\n return False\n else:\n for x in range(column,column+bricklength):\n for y in range(row,row+brickheight):\n if (x,y) in occupied:\n return False \n break\n else:\n return True"
]
| [
"0.6223635",
"0.6164103",
"0.6037212",
"0.6003846",
"0.6003286",
"0.600052",
"0.59370893",
"0.59369665",
"0.5901001",
"0.5893176",
"0.5862025",
"0.5842988",
"0.5840645",
"0.5815979",
"0.5813464",
"0.57918024",
"0.57914144",
"0.57866615",
"0.5781662",
"0.5779035",
"0.57772624",
"0.57768345",
"0.5766432",
"0.57635355",
"0.5757511",
"0.5741789",
"0.57376295",
"0.5736268",
"0.572229",
"0.5669037"
]
| 0.62754655 | 0 |
Checks whether a given element is binary or not i.e if it covers maximum two elements and is covered by maximum two elements | def isa_binary_element(self, element):
return len(self.above(element)) <= 2 and len(self.under(element)) <= 2 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_binary(self):\n\n for element in self:\n if element != self.bottom and not self.isa_binary_element(element):\n return False\n return True",
"def is_power_of_2(x):\n return x == get_lowest_one_mask(x)",
"def majority_logical(*bit_arrays):\n\n if (len(bit_arrays) == 0):\n raise TypeError(\"len(bit_arrays) must be > 0.\")\n\n MINIMUM_MAJORITY = (len(bit_arrays) // 2) + 1\n\n answer = itertools.combinations(bit_arrays, MINIMUM_MAJORITY)\n answer = map(all, answer)\n answer = any(answer)\n return answer",
"def test_bin_right(self):\n list_val=[0,1,2,3,4,5,6]\n low=0\n high= len(list_val)-1\n target=1\n self.assertEqual(bin_search(1,0,len(list_val)-1, list_val),1)",
"def multiple_elements(self) -> bool:\n return self.max is None or self.max > 1",
"def test_binary_search(self):\n A = sorted([1,4,6,7,8,2,3,4,5,7,10,15,25,30,35])\n self.assertTrue(mst.binary_search(A, 3))\n self.assertFalse(mst.binary_search(A, 17))",
"def checkBINn(L, n):\n binaire = [0,1]\n if len(L)==n:\n for i in range(len(L)):\n if L[i] not in binaire:\n return False\n return True\n else: \n return False",
"def is_even(bin_num):\n if list(bin_num).count('1') % 2 == 0:\n return True\n else:\n return False",
"def is_bitop(*args):\n return _ida_hexrays.is_bitop(*args)",
"def isavalidinput(self , x , u):\n ans = False\n for i in range(self.m):\n ans = ans or ( u[i] < self.u_lb[i] )\n ans = ans or ( u[i] > self.u_ub[i] )\n \n return not(ans)",
"def contains(self, x):\n if isinstance(x, list):\n x = np.array(x) # Promote list to array for contains check\n return ((x == 0) | (x == 1)).all() and self.low_limit <= np.count_nonzero(x) <= self.high_limit",
"def regular(self):\n return all(numpy.allclose(w, w[0]) for w in self.binwidths)",
"def binsok(lista, elem):\n left, right = 0, len(lista)-1\n while left <= right:\n mid = (left + right) // 2\n if lista[mid] == elem:\n return True\n elif lista[mid] < elem:\n left = mid + 1\n else:\n right = mid - 1\n return False",
"def highest_bin_freq(ary):\n num_true = 0\n num_false = 0\n\n for val in ary:\n num_true += 1 if val == '1' else 0\n num_false += 1 if val == '0' else 0\n\n return '1' if num_true > num_false else '0'",
"def bornoff(self, board):\n res = False\n if (self.player):\n if (reduce(lambda x, y: x+y, board.p1vec) < reduce(lambda x, y: x+y, self.board.p1vec)):\n res = True\n else:\n if (reduce(lambda x, y: x+y, board.p2vec) < reduce(lambda x, y: x+y, self.board.p2vec)):\n res = True\n return res",
"def __check_bit_size(self, value, num_bits):\n is_fit = False\n if value <= 2 ** num_bits - 1:\n is_fit = True\n return is_fit",
"def isMergeable(int1,int2):\n if set(int1.span)&set(int2.span) or int1.maxval+1==int2.minval:\n return True\n else:\n return False",
"def binary(f, k=1):\n from numpy import asarray\n f=asarray(f)\n return (f >= k)",
"def can_binarize(a, r, phrase_index):\n if r.arity() <= 2:\n return 1\n if r.arity() > 3:\n raise ValueError(\"4-ary rules and above not supported yet\")\n\n fvars = [x for x in r.fpos if type(x) is tuple]\n for (fi,fj) in phrase_index:\n if fi <= fvars[0][0] and fvars[1][1] <= fj <= fvars[2][0]:\n return 1\n if fvars[0][1] <= fi <= fvars[1][0] and fvars[2][1] <= fj:\n return 1\n\n return 0",
"def square(self):\n return self.regular and numpy.allclose(*(w[0] for w in self.binwidths))",
"def test_binops(self):",
"def BS(self, arr, N, X):\n lo = 0\n hi = N - 1\n while lo <= hi:\n mid = (lo + hi) // 2\n if arr[mid] == X:\n return True\n elif arr[mid] < X:\n lo = mid + 1\n else:\n hi = mid - 1\n return False",
"def _check_binary_data(data):\n if not np.array_equal(data, data.astype(bool)):\n raise ValueError(\n \"This mechanism works with binary data, \"\n \"but input is not binary.\")",
"def check_bp(self):\n return self.min_basepairs <= self.seqdata.basepairs <= self.max_basepairs",
"def is_power_two(x):\n\n return (x != 0) and (drop_lowest_set_bit(x) == 0)",
"def isavalidstate(self , x ):\n ans = False\n for i in range(self.n):\n ans = ans or ( x[i] < self.x_lb[i] )\n ans = ans or ( x[i] > self.x_ub[i] )\n \n return not(ans)",
"def majority_bitwise(*bit_arrays):\n\n if (len(bit_arrays) == 0):\n raise TypeError(\"len(bit_arrays) must be > 0.\")\n\n MINIMUM_MAJORITY = (len(bit_arrays) // 2) + 1\n\n answer = 0\n for bit_array_subset in \\\n itertools.combinations(bit_arrays, MINIMUM_MAJORITY):\n answer |= functools.reduce(operator.and_, bit_array_subset)\n\n return answer",
"def _check_binop_operands(self, a, b):\n if isinstance(a, int) and (a < self._config.min_int or a > self._config.max_int):\n _raise_in_context(NumberTooHigh, \"This number is too large\")\n if isinstance(b, int) and (b < self._config.min_int or b > self._config.max_int):\n _raise_in_context(NumberTooHigh, \"This number is too large\")",
"def in_field(self, vec):\n return (abs(vec[0]) + abs(vec[1]) + abs(vec[2])) <= 2 * self.n",
"def __contains__(self, x):\n # if not isinstance(x, int) or not x % 2:\n if not (isinstance(x, int) and (x % 2)):\n return False\n return True"
]
| [
"0.66362745",
"0.61674225",
"0.6033844",
"0.5968726",
"0.5959528",
"0.593749",
"0.5853412",
"0.5815236",
"0.5798761",
"0.57288164",
"0.570948",
"0.5697407",
"0.56866753",
"0.56554615",
"0.5654972",
"0.5636119",
"0.55755115",
"0.5565799",
"0.5554582",
"0.55395794",
"0.5535122",
"0.5517706",
"0.55073494",
"0.550724",
"0.550561",
"0.550517",
"0.54872584",
"0.54511046",
"0.5449147",
"0.54376304"
]
| 0.7928481 | 0 |
Returns the successor of element different from first_successor in a binary lattice | def other_above(self, element, first_successor):
successors = list(self.above(element))
if len(successors) != 2:
raise ValueError("element is not binary in lattice")
elif successors[0] == first_successor:
return successors[1]
elif successors[1] == first_successor:
return successors[0]
else:
raise ValueError("first_successor is not a successor of element in lattice") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def other_under(self, element, first_predecessor):\n predecessors = list(self.under(element))\n if len(predecessors) != 2:\n raise ValueError(\"element is not binary in lattice\")\n elif predecessors[0] == first_predecessor:\n return predecessors[1]\n elif predecessors[1] == first_predecessor:\n return predecessors[0]\n else:\n raise ValueError(\"first_successor is not a successor of element in lattice\")",
"def _get_successor(self, node):\n current_node = node\n if node._rkid:\n current_node = node._rkid\n while True:\n if not current_node._lkid:\n break\n current_node = current_node._lkid\n return current_node",
"def successor(self, u, predecessor):\n if u[0] == predecessor:\n return u[1]\n elif u[1] == predecessor:\n return u[0]\n else:\n raise ValueError(\n 'edge has to link predecessor with some successor')",
"def _get_successor(self):\n tmp = self.right\n while tmp.left:\n tmp = tmp.left\n return tmp",
"def successor(self, node):\n if node is None:\n return None\n if node.right_child is not None:\n succ = node.right_child\n while succ.left_child is not None:\n succ = succ.left_child\n return succ\n else:\n p = node.parent\n while p is not None:\n if node is not p.right_child:\n break\n node = p\n p = p.parent\n return p",
"def successor_state(self):\n return self._successor_state",
"def get_successor(self, root):\n # Take a right traversal and go left\n # As much as you can. Last node will be successor\n root = root.right\n while root.left:\n root = root.left\n return root",
"def getSuccessor(self, gameState, action):\n successor = gameState.generateSuccessor(self.index, action)\n pos = successor.getAgentState(self.index).getPosition()\n if pos != util.nearestPoint(pos):\n return successor.generateSuccessor(self.index, action)\n else:\n return successor",
"def successor(self, state):\n pass # abstract",
"def _successor(self):\n if self.right is None:\n # get first rightward ancestor\n m = self\n n = m.parent\n while n is not None and m is n.right:\n m = n\n n = n.parent\n else:\n # get leftmost of right child\n n = self.right\n while n.left is not None:\n n = n.left\n return n",
"def getSuccessor(self, gameState, action):\n successor = gameState.generateSuccessor(self.index, action)\n pos = successor.getAgentState(self.index).getPosition()\n if pos != nearestPoint(pos):\n # Only half a grid position was covered\n return successor.generateSuccessor(self.index, action)\n else:\n return successor",
"def successor(self, key):\r\n node = self.root\r\n successor = None\r\n while node:\r\n index = node.locate_successor(key)\r\n if index < node.num_keys():\r\n successor = node.keys[index]\r\n node = node.children[index] if not node.is_leaf() else None\r\n return successor",
"def retSuccOf(self, thisNode, retiredSuccessor):\n if self.nodes.get(thisNode) != None:\n # reconstruct the list without content \n # of retiredSuccessor\n self.nodes[thisNode] = [x for x in self.nodes[thisNode] if x not in retiredSuccessor]\n else:\n pass # nothing to do",
"def getSuccessor(self, gameState, action):\n successor = gameState.generateSuccessor(self.index, action)\n pos = successor.getAgentState(self.index).getPosition()\n if pos != nearestPoint(pos):\n # Only half a grid position was covered\n return successor.generateSuccessor(self.index, action)\n else:\n return successor",
"def getSuccessor(self, gameState, action):\n successor = gameState.generateSuccessor(self.index, action)\n pos = successor.getAgentState(self.index).getPosition()\n if pos != nearestPoint(pos):\n # Only half a grid position was covered\n return successor.generateSuccessor(self.index, action)\n else:\n return successor",
"def getSuccessor(self, gameState, action):\n successor = gameState.generateSuccessor(self.index, action)\n pos = successor.getAgentState(self.index).getPosition()\n if pos != nearestPoint(pos):\n # Only half a grid position was covered\n return successor.generateSuccessor(self.index, action)\n else:\n return successor",
"def getSuccessor(self, gameState, action):\n successor = gameState.generateSuccessor(self.index, action)\n pos = successor.getAgentState(self.index).getPosition()\n if pos != nearestPoint(pos):\n # Only half a grid position was covered\n return successor.generateSuccessor(self.index, action)\n else:\n return successor",
"def getSuccessor(self, gameState, action):\n successor = gameState.generateSuccessor(self.index, action)\n pos = successor.getAgentState(self.index).getPosition()\n if pos != nearestPoint(pos):\n # Only half a grid position was covered\n return successor.generateSuccessor(self.index, action)\n else:\n return successor",
"def getSuccessor(self, gameState, action):\n successor = gameState.generateSuccessor(self.index, action)\n pos = successor.getAgentState(self.index).getPosition()\n if pos != nearestPoint(pos):\n # Only half a grid position was covered\n return successor.generateSuccessor(self.index, action)\n else:\n return successor",
"def getSuccessor(self, gameState, action):\n successor = gameState.generateSuccessor(self.index, action)\n pos = successor.getAgentState(self.index).getPosition()\n if pos != nearestPoint(pos):\n # Only half a grid position was covered\n return successor.generateSuccessor(self.index, action)\n else:\n return successor",
"def getSuccessor(self, gameState, action):\n successor = gameState.generateSuccessor(self.index, action)\n pos = successor.getAgentState(self.index).getPosition()\n if pos != nearestPoint(pos):\n # Only half a grid position was covered\n return successor.generateSuccessor(self.index, action)\n else:\n return successor",
"def successor(self, key):\r\n index = self.locate_successor(key)\r\n self.keys[index] if index < self.num_keys() else None",
"def getSuccessor(self, gameState, action):\r\n successor = gameState.generateSuccessor(self.index, action)\r\n pos = successor.getAgentState(self.index).getPosition()\r\n if pos != nearestPoint(pos):\r\n # Only half a grid position was covered\r\n return successor.generateSuccessor(self.index, action)\r\n else:\r\n return successor",
"def enumerate_lattice(self):\n node_to_succs = dict()\n node_to_preds = dict()\n start_node = self.state_to_node([self.x_lims[0], self.y_lims[0]])\n for i in range(self.num_cells[0]):\n x = start_node[0] + i\n for j in range(self.num_cells[1]):\n y = start_node[1] + j\n node = (x,y)\n node_to_succs[node] = self.get_successors(node)\n node_to_preds[node] = self.get_predecessors(node)\n \n assert(len(node_to_succs.keys()) == self.total_cells), \"Did not enumerate all possible successor cells\"\n assert(len(node_to_preds.keys()) == self.total_cells), \"Did not enumerate all possible predecessor cells\"\n return node_to_succs, node_to_preds",
"def getSuccessor(self, gameState, action):\n successor = gameState.generateSuccessor(self.index, action)\n pos = successor.getAgentState(self.index).getPosition()\n if pos != util.nearestPoint(pos):\n # Only half a grid position was covered\n return successor.generateSuccessor(self.index, action)\n else:\n return successor",
"def find_successor(self, node): \n current_node = node\n\n if current_node.right != None:\n current_node = current_node.right\n while current_node.get_children() != 0:\n if current_node.left != None:\n current_node = current_node.left\n else:\n current_node = current_node.right\n return current_node\n else:\n return None",
"def get_subsequent_mask(seq):\n len_s = seq.size(1)\n subsequent_mask = 1 - torch.triu(\n torch.ones((len_s, len_s), device=seq.device), diagonal=1)\n subsequent_mask = subsequent_mask.unsqueeze(0).bool()\n return subsequent_mask",
"def successor(self, state):\n for direct in self.direction :\n newState = state.move(direct)\n if newState :\n yield (None, newState)",
"def getSuccessor(self, gameState, action):\n successor = gameState.generateSuccessor(self.index, action)\n pos = successor.getAgentState(self.index).getPosition()\n if pos != util.nearestPoint(pos):\n # Only half a grid position was covered\n return successor.generateSuccessor(self.index, action)\n return successor",
"def get_successor(self, value):\n\t\tstack = [self]\n\t\tcurrent = None\n\t\tprevious = None\n\t\twhile not previous == value and stack:\n\t\t\titem = stack.pop()\n\t\t\tif isinstance(item, BSTreeNode):\n\t\t\t\tif item.right:\n\t\t\t\t\tstack.append(item.right)\n\t\t\t\tstack.append(item.value)\n\t\t\t\tif item.left:\n\t\t\t\t\tstack.append(item.left)\n\t\t\t\tcontinue\n\t\t\tprevious = current\n\t\t\tcurrent = item\n\t\treturn current if previous == value else None"
]
| [
"0.6546926",
"0.64019597",
"0.60879385",
"0.6056825",
"0.6039657",
"0.5892914",
"0.5845297",
"0.57128656",
"0.566253",
"0.56558645",
"0.56257665",
"0.56140566",
"0.5604517",
"0.56000495",
"0.56000495",
"0.56000495",
"0.56000495",
"0.56000495",
"0.56000495",
"0.56000495",
"0.56000495",
"0.5592897",
"0.55911535",
"0.558699",
"0.55805117",
"0.55666137",
"0.55608225",
"0.5560444",
"0.5555085",
"0.5550861"
]
| 0.67283386 | 0 |
Returns the predecessor of element different from first_predecessor in a binary lattice | def other_under(self, element, first_predecessor):
predecessors = list(self.under(element))
if len(predecessors) != 2:
raise ValueError("element is not binary in lattice")
elif predecessors[0] == first_predecessor:
return predecessors[1]
elif predecessors[1] == first_predecessor:
return predecessors[0]
else:
raise ValueError("first_successor is not a successor of element in lattice") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def predecessor(self, node):\n if node is None:\n return None\n if node.left_child is not None:\n pred = node.left_child\n while pred.right_child is not None:\n pred = pred.right_child\n return pred\n else:\n p = node.parent\n while p is not None:\n if node is not p.left_child:\n break\n node = p\n p = p.parent\n return p",
"def predecessor(self, key):\r\n node = self.root\r\n predecessor = None\r\n while node:\r\n index = node.locate_predecessor(key)\r\n if index >= 0:\r\n predecessor = node.keys[index]\r\n node = node.children[index+1] if not node.is_leaf() else None\r\n return predecessor",
"def predecessor(self, key):\r\n index = self.locate_predecessor(key)\r\n return self.keys[index] if index >= 0 else None",
"def locate_predecessor(self, key):\r\n index = self.search(key)\r\n return index-1",
"def predecessor(self, n, lower, upper):\n pred = -math.inf\n pos = None\n for i, ele in enumerate(self.nums[lower: upper], start=lower):\n if pred < ele < n:\n pred = ele\n pos = i\n return pos",
"def get_predecessors(self, pos: tuple):\n return self.get_successors(pos)",
"def other_above(self, element, first_successor):\n successors = list(self.above(element))\n if len(successors) != 2:\n raise ValueError(\"element is not binary in lattice\")\n elif successors[0] == first_successor:\n return successors[1]\n elif successors[1] == first_successor:\n return successors[0]\n else:\n raise ValueError(\"first_successor is not a successor of element in lattice\")",
"def get_predecessor(self, key):\r\n parent_node, search_node = self.__compare(key, method='search')\r\n self.__check_node(search_node)\r\n\r\n # if the node has a left tree\r\n if search_node.left_child.key:\r\n pred_node, _ = self.__compare(method='max', source=search_node.left_child)\r\n\r\n # if the node has no left tree\r\n else:\r\n while search_node.key < parent_node.key:\r\n search_node = parent_node\r\n parent_node = parent_node.parent\r\n\r\n # if it reaches the root, means there is no predecessor\r\n if not parent_node:\r\n return NodeRBT(None, None)\r\n\r\n pred_node = parent_node\r\n\r\n return pred_node",
"def predecessor(self) -> Union[\"Node\", None]:\n if self.left is not None: # case 1: the node has a left child\n return self.left.max()\n\n else: # case 2: the node does not have a left child\n current = self\n while current.parent is not None: # traverse up\n if current == current.parent.right:\n return current.parent\n else:\n current = current.parent\n\n return None # the root is reached, so no predecessor exists",
"def before(self,p):\r\n \r\n current = self.head #test from the head node\r\n \r\n if p == current: #if the head node = p\r\n return 'null' #there cannot be a node before it\r\n \r\n while current != p: #else keep checking the elements until it reaches p\r\n current = current.next\r\n return current.prev.data #now current = p, so return the node before p\r",
"def get_predecessors(self, node): \n preds = []\n child_state = self.node_to_state(node)\n for it in self.predecessors:\n parent_node = (node[0] + it[0], node[1] + it[1])\n parent_state = self.node_to_state(parent_node)\n edge = self.interpolate(child_state, parent_state, self.distance_bw_states(child_state, parent_state)/self.path_resolution)\n preds.append([parent_node, edge])\n return preds",
"def RemoteGetAndSetPredecessor(self, destAddress, node):\r\n if(destAddress == self.selfNodeAddress):\r\n return self.predecessor\r\n argDict = {}\r\n argDict[RPC_ARG_REQUEST_TYPE] = GET_PREDECESSOR_REQUEST\r\n argDict[RPC_ARG_NODE_INFO] = node\r\n predecessor = self.RemoteCall(destAddress, argDict)\r\n return predecessor",
"def predecessor_pair(basepair, start, stop):\n\tx , y = basepair\n\tif (x - 1 < start) or (y + 1 > stop):\n\t\treturn (-1,-1)\n\telse:\n\t\treturn ( x - 1 , y + 1 )",
"def nav_prev_sibling(self):\r\n siblings = self.nav_siblings()\r\n prev_sibling = None\r\n for i, sibling in enumerate(siblings):\r\n if sibling == self and i > 0:\r\n prev_sibling = siblings[i-1]\r\n return prev_sibling",
"def predecessors(xi):\n X_K = xi.berkovich_line()\n if xi.is_limit_point():\n xi = xi.approximation()\n y = xi.parameter()\n v = xi.pseudovaluation_on_polynomial_ring()\n v_list = v.augmentation_chain()[1:-1]\n return [X_K.point_from_pseudovaluation_on_polynomial_ring(w, y)\n for w in v_list]",
"def _previous(self, coord):\n candidates = [(coord[0] - 1, coord[1]), (coord[0] + 1, coord[1]), (coord[0], coord[1] - 1), (coord[0], coord[1] + 1)]\n for candidate in (x for x in candidates if 0 <= x[0] < self.dimension and 0 <= x[1] < self.dimension):\n if self.board[candidate[0]][candidate[1]].next == self.board[coord[0]][coord[1]]:\n return candidate",
"def _first_index_with_smaller_neighbour(P):\n i = len(P) - 1\n while i > 0 and P[i-1] <= P[i]:\n i -= 1\n return i",
"def deep_predecessor(self, index):\r\n return self.children[index].deep_max()",
"def successor(self, u, predecessor):\n if u[0] == predecessor:\n return u[1]\n elif u[1] == predecessor:\n return u[0]\n else:\n raise ValueError(\n 'edge has to link predecessor with some successor')",
"def predecessor(self, key: int) -> TreeNode:\n tree_node = self.search(key, possible_parent=True)\n if tree_node:\n if tree_node.left and tree_node.val >= key:\n left_subtree = tree_node.left\n while left_subtree.right:\n left_subtree = left_subtree.right\n return left_subtree\n else:\n while tree_node:\n if tree_node.val < key:\n return tree_node\n tree_node = tree_node.parent\n return",
"def previous_min(L):\n\n return itertoolsextra.max_diff(L)",
"def GetPrevSibling(self, item):\r\n\r\n i = item\r\n parent = i.GetParent()\r\n \r\n if parent == None:\r\n \r\n # root item doesn't have any siblings\r\n return None\r\n \r\n siblings = parent.GetChildren()\r\n index = siblings.index(i)\r\n\r\n return (index == 0 and [None] or [siblings[index-1]])[0]",
"def __find_immediate_predecessors(storm_object_table, target_row):\n\n error_checking.assert_is_integer(target_row)\n error_checking.assert_is_geq(target_row, 0)\n error_checking.assert_is_less_than(\n target_row, len(storm_object_table.index)\n )\n\n predecessor_sec_id_strings = [\n storm_object_table[c].values[target_row]\n for c in PREV_SECONDARY_ID_COLUMNS\n if storm_object_table[c].values[target_row] != ''\n ]\n\n num_predecessors = len(predecessor_sec_id_strings)\n if num_predecessors == 0:\n return numpy.array([], dtype=int)\n\n target_time_unix_sec = storm_object_table[\n tracking_utils.VALID_TIME_COLUMN].values[target_row]\n\n predecessor_rows = numpy.full(num_predecessors, -1, dtype=int)\n\n for i in range(num_predecessors):\n these_rows = numpy.where(numpy.logical_and(\n storm_object_table[tracking_utils.SECONDARY_ID_COLUMN].values ==\n predecessor_sec_id_strings[i],\n storm_object_table[tracking_utils.VALID_TIME_COLUMN].values <\n target_time_unix_sec\n ))[0]\n\n if len(these_rows) == 0:\n continue\n\n this_subrow = numpy.argmax(\n storm_object_table[tracking_utils.VALID_TIME_COLUMN].values[\n these_rows]\n )\n\n predecessor_rows[i] = these_rows[this_subrow]\n\n return predecessor_rows[predecessor_rows >= 0]",
"def predecessors(self, node: Node):\n return iter(self.get_node(node_id) for node_id in node.in_nodes_ids)",
"def get_predecessors(self, word):\n predecessors = []\n for idx in range(len(word)):\n predecessors.append(word[:idx]+word[idx+1:])\n\n return predecessors",
"def previous(self, item) -> LinkedListNode:\n node = self.head\n while node is not self._nil:\n if node.next.item is item:\n return node\n node = node.next\n return node",
"def pred(self, index, prev):\n return self.tour[index - 1] if prev else self.tour[index + 1]",
"def forwarding(predecessor, source):\n # Defining the list of nodes that will be checked\n nodes = list(predecessor.keys())\n nodes.remove(source)\n\n # Getting minimum node (initial node) and removing it from the list\n T = dict.fromkeys(nodes, [])\n\n # Looping through notes and getting the next hop node\n for n in nodes:\n nextnode = n\n while nextnode != source:\n T[n] = (source, nextnode)\n # This is presented in the from that was presented to us in the lectures\n nextnode = predecessor[nextnode][0]\n return T",
"def _node_lowest_neighbour(self, node):\n\n lowest = self.neighbour_array_lo_hi[node][0]\n\n if lowest != node:\n return lowest\n else:\n return -1",
"def get_min_path(self, node):\r\n if self.have_min_distance(node):\r\n path = []\r\n while node != self.start:\r\n path.insert(0, node)\r\n node = self.table[node][\"prev\"]\r\n path.insert(0, node)\r\n return path\r\n return None"
]
| [
"0.6908084",
"0.6575059",
"0.65220636",
"0.64290535",
"0.6356892",
"0.62963617",
"0.62842953",
"0.6266937",
"0.6206771",
"0.61826193",
"0.6116143",
"0.60884154",
"0.60773015",
"0.6038158",
"0.6006918",
"0.598322",
"0.5974723",
"0.5910431",
"0.5852234",
"0.5840869",
"0.5837235",
"0.57443845",
"0.57025784",
"0.5699197",
"0.56690454",
"0.5664289",
"0.5609696",
"0.5599721",
"0.5547562",
"0.5536235"
]
| 0.65804267 | 1 |
Tensorflow peak picking via local maxima Returns the indices of the local maxima of the first dimension of the tensor | def find_local_maxima(tens):
return tf.squeeze(tf.where(tf.equal(label_local_extrema(tens), 'P'))) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def local_max(img, size=(70,100,100)):\n # Apply a maximum filter.\n max_f = ndi.maximum_filter(img, size=size)\n # Find pixels that are local maxima.\n local_max = np.where(max_f == img, 1, 0)\n return(local_max)",
"def get_peak_inds(map_):\n return np.unravel_index(np.argmax(map_, axis=None), map_.shape)",
"def maxs(self) -> Tensor:\n return self._ranges[:, 1]",
"def peak_local_max_nD(img, size=(70,100,100), min_dist=0):\n def has_neighbor(peak, peak_list, min_dist):\n \"\"\"Find whether a peak already exists within minimum distance of this peak\"\"\"\n for testpeak in peak_list:\n if (distance.euclidean(peak, testpeak) < min_dist):\n return True\n return False\n # Find pixels that represent local maxima. Produces clusters of connected\n # pixels at the centers of objects.\n maxes = local_max(img, size)\n # Connect these pixels in a labelmask.\n conn_comp, info = ndi.label(maxes)\n # Get the centroids of each local max object, update mask and list.\n local_peak_mask = np.zeros_like(img)\n local_peaks = []\n peak_num=1\n\n for id_ in np.unique(conn_comp)[1:]:\n centroid = get_object_centroid(conn_comp, id_)\n # If there is no already-added seed within the minimum distance,\n # add this seed to the mask and list.\n if (not has_neighbor(centroid, local_peaks, min_dist)):\n local_peak_mask[centroid] = peak_num\n local_peaks.append(centroid)\n peak_num = peak_num + 1\n return local_peak_mask, local_peaks",
"def local_max(x, threshold=1e-5):\n maxima = np.r_[True, x[1:] > x[:-1]] & np.r_[x[:-1] > x[1:] , True]\n # select all local maxima above the threshold\n maxima_f = maxima & np.r_[x > threshold , True][:-1]\n peak_indices = np.where(maxima_f==True)[0]\n return np.array(peak_indices)",
"def peak_finder(filt_im, dist, threshold):\n from skimage.feature import peak_local_max\n coordinates = peak_local_max(filt_im, min_distance=dist, threshold_abs=threshold)\n return coordinates",
"def argmax_feature_map_locations(feature_map):\n batch_size, _, width, num_channels = _get_shape(feature_map, 4)\n\n feature_map_flattened = tf.reshape(\n feature_map, [batch_size, -1, num_channels])\n peak_flat_indices = tf.math.argmax(\n feature_map_flattened, axis=1, output_type=tf.dtypes.int32)\n # Get x and y indices corresponding to the top indices in the flat array.\n y_indices, x_indices = (\n row_col_indices_from_flattened_indices(peak_flat_indices, width))\n channel_indices = tf.tile(\n tf.range(num_channels)[tf.newaxis, :], [batch_size, 1])\n return y_indices, x_indices, channel_indices",
"def masked_maximum(data, mask, dim=1):\n axis_minimums = tf.reduce_min(data, dim, keepdims=True)\n masked_maximums = tf.reduce_max(\n tf.math.multiply(data - axis_minimums, mask), dim,\n keepdims=True) + axis_minimums\n return masked_maximums",
"def max_decode(M):\r\n return scipy.array([ f.val.argmax() for f in M])",
"def find_bump_peak_idxs(map1d,**kwargs):\n map_side=int(np.sqrt(len(map1d)))\n map2d=map1d.reshape(map_side,map_side)\n A=gl.detect_peaks(map2d,size=2,**kwargs)\n MA=masked_array(map2d,mask=A.astype(bool))\n if len(MA.nonzero()[0])>0:\n idx2d=np.unravel_index(MA.argmax(), MA.shape)\n return idx2d\n else:\n return None",
"def max(self):\n max_i = np.nanargmax(self.ys)\n return self.xs[max_i], self.ys[max_i]",
"def local_maxima2(array, min_distance = 1, periodic=False, edges_allowed=True):\n array = np.asarray(array)\n cval = 0 \n \n if periodic: \n mode = 'wrap' \n elif edges_allowed: \n mode = 'nearest' \n else: \n mode = 'constant' \n cval = array.max()+1 \n max_points = array == ndimage.maximum_filter(array, 1+2*min_distance, mode=mode, cval=cval) \n \n return [indices[max_points] for indices in np.indices(array.shape)][0]",
"def getPeakInfo(A, lambd, shift=150):\n\n lambd = lambd[shift:]\n if A.ndim == 2:\n A = A[:, shift:]\n if torch.is_tensor(A):\n max_idx = A.argmax(dim=1)\n A_max = A.max(dim=1).values\n else:\n max_idx = A.argmax(axis=1)\n A_max = A.max(axis=1)\n else:\n A = A[shift:]\n max_idx = A.argmax()\n A_max = A[max_idx]\n\n lambd_max = lambd[max_idx]\n return lambd_max, A_max",
"def window_argmaxes(windows, data):\n data = np.array(data)\n argmaxes = []\n\n for window in windows:\n data_segment = data[window]\n argmaxes.append(window[np.argmax(data_segment)])\n\n return np.array(argmaxes)",
"def _masked_maximum(data, mask, dim=1):\n axis_minimums = tf.math.reduce_min(data, dim, keepdims=True)\n masked_maximums = (\n tf.math.reduce_max(\n tf.math.multiply(data - axis_minimums, mask), dim, keepdims=True\n )\n + axis_minimums\n )\n return masked_maximums",
"def globalMaximum(self):\n # The global maximum is at one peak's position\n potential_max = list()\n for func, pos, height, width in zip(self.peaks_function,\n self.peaks_position,\n self.peaks_height,\n self.peaks_width):\n potential_max.append((func(pos, pos, height, width), pos))\n return max(potential_max)",
"def get_max_indices(self, input):\n \n min_element = torch.min(torch.abs(input.contiguous().view(-1)))\n input_temp = input + min_element + self.epsilon\n masked_input_temp = input_temp * self.mask\n values, indices = torch.sort(masked_input_temp, dim=1, descending=True)\n\n return indices[:, :self.num_active_nodes,:]",
"def find_max_score_location(grid, shape):",
"def filtermax(f, maxfiltsize=10):\n # Maximum filter to ignore deeper fluxes of absorption lines\n f_maxfilt = maximum_filter1d(f, size=maxfiltsize)\n # Find points selected by maximum filter\n idxmax = np.array([i for i in range(len(f)) if f[i]-f_maxfilt[i] == 0.])\n\n return f_maxfilt, idxmax",
"def test_find_maximum_with_multiple_init_tensor(ntrain=10, n_inits=5, dtype = tf.float32):\n tf.reset_default_graph()\n\n xdim = 2\n xmin = -10.\n xmax = 10.\n \n func = lambda x: tf.sin( tf.matmul(x,x,transpose_b=True) )\n\n initializers = tf.random.uniform(shape=(n_inits,xdim), dtype=dtype) * (xmax - xmin) + xmin\n\n xs, xs_list, fvals = gen_fval_xs(func, n_inits, xdim, xmin, xmax, dtype=dtype, name='test_max_f_mulinit')\n\n assign_inits = []\n for i in range(n_inits):\n assign_inits.append( tf.assign(xs_list[i], tf.reshape(initializers[i,:], shape=(1,xdim))) )\n\n optimizer = tf.train.AdamOptimizer()\n\n trains, max_idx = find_maximum_with_multiple_init_tensor(xs_list, fvals, n_inits, xdim, dtype=dtype, name='find_maximum_multiple_inputs', optimizer=optimizer)\n\n\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n\n sess.run(assign_inits)\n\n xs_val, xs_list_val, fvals_val = sess.run([xs, xs_list, fvals])\n print('')\n print('input = ', xs_val[0,...])\n print(xs_list_val[0])\n print('output = ', fvals_val[0])\n\n for i in range(ntrain):\n _, max_idx_val, xs_val, xs_list_val, fvals_val = sess.run([trains, max_idx, xs, xs_list, fvals])\n\n print('')\n print('input = ', xs_val[0,...])\n print(xs_list_val[0])\n print('output = ', fvals_val[0])",
"def auxmaxf1(x):\n \n# Sum over data points\n f = 0.0\n for m_ind in range(cfg.ntrain):\n f += auxmax_f1_part_i(x,m_ind) \n \n return f",
"def find_maxima(x):\n\n idx = []\n for i in range(len(x)):\n # `i` is a local maximum if the signal decreases before and after it\n if x[i-1] < x[i] and x[i+1] < x[i]:\n idx.append(i)\n return idx",
"def maximums(self):\n # The maximums are at the peaks position but might be swallowed by \n # other peaks\n maximums = list()\n for func, pos, height, width in zip(self.peaks_function,\n self.peaks_position,\n self.peaks_height,\n self.peaks_width):\n val = func(pos, pos, height, width)\n if val >= self.__call__(pos, count=False)[0]:\n maximums.append((val, pos))\n return sorted(maximums, reverse=True)",
"def findmaxidx(datasets, target='atom_label'):\n\n if target == 'atom_label':\n return _findmaxidx(datasets, 0)\n elif target == 'wle_label':\n return _findmaxidx(datasets, 2)",
"def batch_maximum(imstack):\n maxpos = np.zeros((imstack.shape[0], 2))\n for i in range(imstack.shape[0]):\n if np.isnan(imstack[i, 0, 0]):\n maxpos[i, 0] = np.nan\n maxpos[i, 1] = np.nan\n else:\n ind = np.unravel_index(\n np.argmax(np.squeeze(imstack[i]), axis=None),\n np.squeeze(imstack[i]).shape,\n )\n maxpos[i, 0] = ind[1]\n maxpos[i, 1] = ind[0]\n return maxpos",
"def get_max_index(a):\n return a.argmax()",
"def get_max_index(a):\n return a.argmax()",
"def get_max_index(a):\n return a.argmax()",
"def max_point(self):\n x = self.max(0).idxmax()\n y = self.loc[:, x].idxmax()\n return x, y",
"def masked_maximum(data, mask, dim=1):\n # sourced from:\n # https://github.com/tensorflow/tensorflow/blob/r1.14/tensorflow/contrib/losses/python/metric_learning/metric_loss_ops.py#L122-L138\n axis_minimums = tf.math.reduce_min(data, dim, keepdims=True)\n masked_maximums = tf.math.reduce_max(\n tf.math.multiply(data - axis_minimums, mask), dim,\n keepdims=True) + axis_minimums\n return masked_maximums"
]
| [
"0.6616471",
"0.6606883",
"0.6585712",
"0.65430903",
"0.64255357",
"0.62576705",
"0.6244409",
"0.6212893",
"0.6207875",
"0.61622685",
"0.60652435",
"0.6040522",
"0.6009267",
"0.59946674",
"0.5976845",
"0.59472924",
"0.59412134",
"0.5932775",
"0.5926134",
"0.5915045",
"0.58859336",
"0.58508617",
"0.5849932",
"0.58145744",
"0.58082014",
"0.5803546",
"0.5803546",
"0.5803546",
"0.57890725",
"0.57859576"
]
| 0.75924444 | 0 |
Tensorflowbased implementation of np.fft.fftfreq | def fft_frequencies(sr=22050, n_fft=2048):
# TODO endpoint=True
return tf.linspace(0, tf.cast(sr/2., dtype=tf.int32), tf.cast(1. + n_fft // 2., dtype=tf.int32)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_fft_freq(dt, nfft, full):\n freqs = da.fft.fftfreq(nfft, d=dt) \n\n if (nfft % 2 == 0):\n freqs = da.hstack([freqs[0: nfft // 2], -freqs[nfft //2], freqs[nfft // 2:nfft]])\n if full:\n freqs = da.fft.fftshift(freqs)\n else:\n freqs = freqs[0:nfft // 2 + 1]\n\n return(freqs)",
"def _fftfreq(self, n):\r\n val = 1.0 / n\r\n N = floor((n - 1) / 2) + 1\r\n results = [i for i in range(0, int(N))]\r\n p1 = [i for i in range(0, int(n))]\r\n for k in range(0, int(N)):\r\n results[k] = k * val\r\n for j in range(0, int(n)):\r\n results[j] = -floor(n / 2) - (N - j) * val\r\n return results",
"def freq_from_fft(self, sig, fs):\n\t # Compute Fourier transform of windowed signal\n\t windowed = sig * blackmanharris(len(sig))\n\t f = rfft(windowed)\n\n\t # Find the peak and interpolate to get a more accurate peak\n\t i = argmax(abs(f)) # Just use this for less-accurate, naive version\n\t true_i = parabolic(log(abs(f)), i)[0]\n\n\t # Convert to equivalent frequency\n\t return fs * true_i / len(windowed)",
"def fft_frequency(fft, index):\n\treturn index * AUDIO_RATE / len(fft) / 2 # Same as in fft_index, see above",
"def frequency_fft(vis, freq, dim, taper=np.ones_like, n_obs =1):\n ft = []\n W = (freq.max() - freq.min()) / n_obs\n L = int(len(freq) / n_obs)\n \n for ii in range(n_obs):\n ft.append(fft(vis[:,:,ii*L:(ii+1)*L] * taper(L), W, axes=(2,), a=0, b=2 * np.pi)[0][:,:,int(L/2):]) # return the positive part)\n \n ft = np.array(ft)\n return ft",
"def FourierFrequency(xdata, nPoints):\r\n freq = np.fft.fftfreq(nPoints, d=(xdata.max()-xdata.min())/xdata.shape[0])\r\n return freq",
"def in_freq(self):\n if self._in_freq is None:\n self._in_freq = np.fft.rfft(self._in_time)\n return self._in_freq",
"def get_fft(signal):\n fs = 100.\n Fk = np.fft.rfft(signal)/len(signal)\n \n f = np.fft.rfftfreq(len(signal), 1/fs)\n #remove the noise\n Fk[0] = 0\n \n return Fk, f",
"def n_fft(self):\n return self._n_fft",
"def get_freq_array(bandwidth, n_chans):\n return numpy.arange(n_chans)*float(bandwidth)/n_chans",
"def get_frequency(time_series):\n if len(time_series.index) == 0:\n return 0\n ft = np.fft.rfft(time_series)\n return np.fft.fftfreq(len(time_series))[np.argmax(abs(ft))]",
"def fft_frequencies(sr=22050, n_fft=2048):\n\n return np.linspace(0, float(sr) / 2, int(1 + n_fft // 2), endpoint=True)",
"def fourier_freqs(times):\n # get the number of samples and the sample rate\n N = len(times)\n dt = np.mean(np.diff(times))\n\n # get the Nyquist frequency\n f_nyq = 1.0 / (2 * dt)\n\n # return the frequency array\n return np.linspace(-f_nyq, f_nyq, N, endpoint=False)",
"def frequency(self):\n return self.reference_clock_speed / 4096 / self.prescale_reg",
"def den2freq(den):\n\n return k * sqrt(den)",
"def rfft2d_freqs(h, w):\n fy = np.fft.fftfreq(h)[:, None]\n fx = np.fft.fftfreq(w)\n return np.sqrt(fx * fx + fy * fy)",
"def fftfreq_RLyonBook(N, d):\n if N % 2 == 0:\n # even\n a1 = np.arange(0, N / 2 + 1, 1)\n a2 = np.arange(-N / 2 + 1, 0, 1)\n return np.concatenate((a1, a2)) / (N * d)\n else:\n # odd\n a1 = np.arange(0, (N + 1) / 2, 1)\n a2 = np.arange(-(N - 1) / 2, 0, 1)\n return np.concatenate((a1, a2)) / (N * d)",
"def rfft2d_freqs(h, w):\n\n fy = np.fft.fftfreq(h)[:, None]\n # when we have an odd input dimension we need to keep one additional\n # frequency and later cut off 1 pixel\n if w % 2 == 1:\n fx = np.fft.fftfreq(w)[: w // 2 + 2]\n else:\n fx = np.fft.fftfreq(w)[: w // 2 + 1]\n return np.sqrt(fx * fx + fy * fy)",
"def fft_index(fft, frequency):\n\treturn 2 * int(len(fft) * frequency / AUDIO_RATE) # Not entirely clear on why I need to multiply by 2 here. I don't need to if I use fft instead of rfft, but then I get a bunch of crazy high frequency FFT data, or is it complex numbers or something...",
"def modified_dft(arr, fs, nfft, window, axis, detrend, scaling):\n\n nsamples = arr.shape[axis]\n\n if nfft < nsamples:\n # crop arr before detrending & windowing; see rfft crop\n arr = slice_along_axis(arr, 0, nfft, axis=-1)\n\n # detrend the array\n arr = sps.detrend(arr, axis=axis, type=detrend)\n\n # fetch and apply window\n coeffs = sps.get_window(window, arr.shape[axis])\n arr = multiply_along_axis(arr, coeffs, axis=axis)\n\n # compute real DFT. Zeropad for nfft > nsamples is automatic\n # rfft uses 'backward' norm default which is no norm on rfft\n arr = np.fft.rfft(arr, nfft, axis=axis)\n freqs = np.fft.rfftfreq(nfft, d=1/fs)\n\n # scale using weighted mean of window values\n if scaling == 'spectrum':\n norm = 1 / np.sum(coeffs)**2\n\n elif scaling == 'density':\n #process loss Shiavi Eqn 7.54\n norm = 1 / (fs * np.sum(coeffs**2))\n \n else:\n msg = 'Unknown scaling: {}'\n raise ValueError(msg.format(scaling))\n \n # before conjugate multiplication unlike scipy\n # see _spectral_helper lines 1808 an 1842.\n arr *= np.sqrt(norm)\n\n return freqs, arr",
"def calcFFT(self):\n try:\n arr = np.fromstring(self.fqueue, dtype=np.float32)\n # Slice for improved performance [200:len(arr)/2:50]#[len(arr)/2:]\n fft = np.fft.fft(arr)[len(arr)/2:]\n f = lambda x: 100 + 20. * log(abs(x))\n mag = map(f, fft)\n\n self.sock.send('f' + json.dumps({\n 'nsamp': len(mag),\n 'samples': mag}))\n self.fqueue = \"\"\n self.fcount = 0\n except:\n pass",
"def _frequency_2_real(self, copy=False):\n Fkernel = self._Fkernel\n if copy:\n Fkernel = self._Fkernel.copy()\n return np.fft.fftshift(self.basis._ifftn(Fkernel),\n axes=self.basis._axes)",
"def fourier_transform(signal, fs):\n freqs = np.fft.rfftfreq(4*len(signal), 1/fs)\n fft = np.abs(np.fft.rfft(signal, 4*len(signal)))\n return freqs, fft",
"def _real_2_frequency(self, kernel):\n return self.basis._fftn(np.fft.ifftshift(kernel,\n axes=self.basis._axes))",
"def freq():",
"def frequencies(self):\r\n\r\n self.method['Fs'] = self.method.get('Fs', self.input.sampling_rate)\r\n NFFT = self.method.get('NFFT', 64)\r\n Fs = self.method.get('Fs')\r\n freqs = tsu.get_freqs(Fs, NFFT)\r\n lb_idx, ub_idx = tsu.get_bounds(freqs, self.lb, self.ub)\r\n\r\n return freqs[lb_idx:ub_idx]",
"def frequencies(self):\r\n\r\n #XXX Use NFFT in the method in order to calculate these, without having\r\n #to calculate the spectrum:\r\n f, spectrum = tsa.get_spectra(self.input.data, method=self.method)\r\n return f",
"def call(self, input):\n for r in range(self.tile_num):\n for c in range(self.tile_num):\n # do frequency conv on each tile\n offset = [[r*self.tile_size+self.tile_size/2, c*self.tile_size+self.tile_size/2] for i in range(BATCHSIZE)]\n input_tile = tf.image.extract_glimpse(input, \n [self.tile_size, self.tile_size],\n offset, centered=False, normalized=False) \n pad_pixels = (self.fft_size - self.tile_size) / 2\n input_tile = tf.image.pad_to_bounding_box(\n input_tile, pad_pixels, pad_pixels, self.fft_size, self.fft_size)\n\n input_tile = tf.transpose(input_tile, perm=[0,3,1,2])\n input_fft = tf.spectral.fft2d(tf.cast(input_tile, tf.complex64))\n output_fft = tf.multiply(self.kernel_freq, input_fft[0,:])\n output_fft_accum = tf.reduce_sum(output_fft, 1)\n output_batch_i = tf.spectral.ifft2d(output_fft_accum)\n bias_expand = tf.expand_dims(tf.expand_dims(self.bias, 1),1)\n output_tile_accum = tf.expand_dims(tf.real(output_batch_i) + bias_expand, 0)\n for b in range(1,BATCHSIZE):\n output_fft = tf.multiply(self.kernel_freq, input_fft[b,:])\n output_fft_accum = tf.reduce_sum(output_fft, 1)\n output_fft_batch_i = tf.spectral.ifft2d(output_fft_accum)\n bias_expand = tf.expand_dims(tf.expand_dims(self.bias, 1),1)\n output_tile_accum = tf.concat([output_tile_accum, \n tf.expand_dims(tf.real(output_fft_batch_i) + bias_expand, 0)],0)\n\n # Concat col tiles\n output_accum_col = output_tile_accum\n if c != 0:\n overlap = output_accum_col[:,:,:,-pad_pixels:] + output_tile_accum[:,:,:,0:pad_pixels]\n output_accum_col = tf.concat([output_accum_col[:,:,:,0:-pad_pixels], \n overlap, \n output_tile_accum[:,:,:,pad_pixels:]], \n 3)\n # Concat tow output tiles\n output_accum = output_accum_col\n if r != 0:\n overlap = output_accum[:,:,-pad_pixels:,:] + output_accum_col[:,:,0:pad_pixels,:]\n output_accum = tf.concat([output_accum[:,:,0:-pad_pixels,:], \n overlap, \n output_accum_col[:,:,pad_pixels:,:]], \n 2)\n\n output_accum = tf.transpose(output_accum, perm=[0,2,3,1])\n return tf.image.crop_to_bounding_box(output_accum, 0, 0, self.img_size, self.img_size)",
"def frequency(self):\n return self.reference_clock_speed / 4096 / self.prescale_reg",
"def freq2den(freq):\n\n return freq * freq * k_2"
]
| [
"0.6975749",
"0.69176376",
"0.689881",
"0.6862554",
"0.6815903",
"0.67304593",
"0.6688847",
"0.6614525",
"0.65949523",
"0.6556522",
"0.65385807",
"0.6481248",
"0.64473915",
"0.642961",
"0.6424027",
"0.64006203",
"0.6398523",
"0.6341776",
"0.633343",
"0.633183",
"0.63218015",
"0.63188714",
"0.62951744",
"0.6286877",
"0.6286661",
"0.626792",
"0.626005",
"0.62597287",
"0.6246244",
"0.6217171"
]
| 0.733585 | 0 |
Tensorflowbased implementation of librosa.core.fourier_tempo_frequencies | def fourier_tempo_frequencies(sr=22050, win_length=384, hop_length=512):
return fft_frequencies(sr=sr * 60 / float(hop_length), n_fft=win_length) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fft_frequencies(sr=22050, n_fft=2048):\n # TODO endpoint=True\n return tf.linspace(0, tf.cast(sr/2., dtype=tf.int32), tf.cast(1. + n_fft // 2., dtype=tf.int32))",
"def get_frequencies(self):\n num_seconds = float(self.timestamps[-2] - self.timestamps[0]) / float(1000)\n samples_per_second = len(self.data) / num_seconds\n num_samples = len(self.data)\n oscilations_per_sample = [float(oscilations) / num_samples for oscilations in range(0, num_samples)]\n return [ops * samples_per_second for ops in oscilations_per_sample]",
"def freq():",
"def frequencies(self):\r\n\r\n #XXX Use NFFT in the method in order to calculate these, without having\r\n #to calculate the spectrum:\r\n f, spectrum = tsa.get_spectra(self.input.data, method=self.method)\r\n return f",
"def frequencies(self):\r\n\r\n self.method['Fs'] = self.method.get('Fs', self.input.sampling_rate)\r\n NFFT = self.method.get('NFFT', 64)\r\n Fs = self.method.get('Fs')\r\n freqs = tsu.get_freqs(Fs, NFFT)\r\n lb_idx, ub_idx = tsu.get_bounds(freqs, self.lb, self.ub)\r\n\r\n return freqs[lb_idx:ub_idx]",
"def freq(self) -> int:",
"def get_frequency(self):\r\n # print '*********in get freq'\r\n self.cntr.run('FREQ 1')\r\n f_0_ = self.cntr.get_measurements(1)\r\n self.f_0 = f_0_[0]\r\n self.cntr.run('FREQ 2')\r\n f_rep_ = self.cntr.get_measurements(1)\r\n self.f_rep = f_rep_[0]",
"def fourier_freqs(times):\n # get the number of samples and the sample rate\n N = len(times)\n dt = np.mean(np.diff(times))\n\n # get the Nyquist frequency\n f_nyq = 1.0 / (2 * dt)\n\n # return the frequency array\n return np.linspace(-f_nyq, f_nyq, N, endpoint=False)",
"def frequencies(self):\r\n\r\n # Get the sampling rate from the seed time-series:\r\n self.method['Fs'] = self.method.get('Fs', self.seed.sampling_rate)\r\n NFFT = self.method.get('NFFT', 64)\r\n Fs = self.method.get('Fs')\r\n freqs = tsu.get_freqs(Fs, NFFT)\r\n lb_idx, ub_idx = tsu.get_bounds(freqs, self.lb, self.ub)\r\n\r\n return freqs[lb_idx:ub_idx]",
"def fourier_cos(t, params):\n amps = tf.reshape(\n tf.cast(params['amps'].get_value(), dtype=tf.float64),\n [params['amps'].shape[0], 1]\n )\n freqs = tf.reshape(\n tf.cast(params['freqs'].get_value(), dtype=tf.float64),\n [params['freqs'].shape[0], 1]\n )\n t = tf.reshape(\n tf.cast(t, dtype=tf.float64),\n [1, t.shape[0]]\n )\n return tf.reduce_sum(amps * tf.cos(freqs * t), 0)",
"def test_freq(self):\n model = BDF(debug=False)\n sid = 101\n freqs = 0.1\n freq = model.add_freq(sid, freqs, comment='freq')\n #print(freq)\n\n freqs = [2.0, 3.0]\n freq = model.add_freq(sid, freqs, comment='freq')\n #print(freq)\n\n f1 = 0.\n df = 2.0\n freq1 = model.add_freq1(sid, f1, df, ndf=5, comment='freq1')\n assert len(freq1.freqs) == 6, 'freqs=%s' % freq1.freqs\n #print(freq1)\n\n f1 = 1.\n f2 = 8.0\n freq2 = model.add_freq2(sid, f1, f2, nf=6, comment='freq2')\n assert len(freq2.freqs) == 7, 'freqs=%s' % freq2.freqs\n assert np.allclose(freq2.freqs.max(), f2), freq2.freqs\n #print(freq2)\n\n freq4 = model.add_freq4(sid, f1, f2, fspread=0.1, nfm=3, comment='freq4')\n #print(model.frequencies[sid])\n #print(freq4)\n\n fractions = [0., 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]\n freq5 = model.add_freq5(sid, fractions, f1=0., f2=100., comment='freq5')\n\n fractions = np.linspace(0., 1.)\n unused_freq5b = model.add_freq5(sid, fractions, f1=0., f2=100., comment='freq5')\n model.validate()\n\n freq.raw_fields()\n freq.write_card()\n freq.write_card(size=16)\n\n freq1.raw_fields()\n freq1.write_card()\n freq1.write_card(size=16)\n\n freq2.raw_fields()\n freq2.write_card()\n freq2.write_card(size=16)\n\n freq4.raw_fields()\n freq4.write_card()\n freq4.write_card(size=16)\n\n freq5.raw_fields()\n freq5.write_card()\n freq5.write_card(size=16)\n\n bdf_file = StringIO()\n model.write_bdf(bdf_file, close=False)\n unused_out = bdf_file.getvalue()\n bdf_file.seek(0)\n\n model2 = read_bdf(bdf_file, punch=True, debug=False)\n model2.uncross_reference()\n model2.safe_cross_reference()\n model2.uncross_reference()\n save_load_deck(model)",
"def freq(self, frequency: Optional[int]):",
"def get_frequency(time_series):\n if len(time_series.index) == 0:\n return 0\n ft = np.fft.rfft(time_series)\n return np.fft.fftfreq(len(time_series))[np.argmax(abs(ft))]",
"def source_freq(self) -> int:",
"def gen_freqs(ndata, dt):\n dn = 2 # if you like the central frequency to be negative, change dn to 1\n return 1/(ndata*dt) * np.hstack((np.arange(0, (ndata+dn)//2),\n np.arange(-(ndata+dn)//2+dn, 0)))",
"def __init__(self, freqs, num_parallel_calls=10):\n self.num_parallel_calls = tf.convert_to_tensor(num_parallel_calls, tf.int32)\n self.freqs = tf.convert_to_tensor(freqs, dtype=float_type)\n self.freq_feed = tf.data.Dataset.from_tensors(self.freqs).repeat()#tf.data.Dataset.from_generator(self._get_freqs, float_type)\n self.feed = self.freq_feed",
"def tf(word, document):\n return freq(word,document) / wordCount(document)",
"def get_frequency(self):\r\n return self.f",
"def frequencies(self):\n radii = self.radii\n freqs = (1 / (self.shape[0] * self.pixel[0])) * radii\n return freqs",
"def frequency(self):\n return self.reference_clock_speed / 4096 / self.prescale_reg",
"def get_frequency(self,):\n\n # TODO: Find way to appropriately reconvert the frequency to its initial\n # TODO: Value or alert that the value is APPROXIMATE\n FTW = int (0)\n freq = int(0)\n\n FTW_bytes = self._read('CFTW0')\n FTW = FTW.from_bytes(FTW_bytes,'big')\n freq = FTW*self.clock_freq/2**32\n\n print('Latest frequency set: ', \"{:.2e}\".format(freq), 'Hz')\n print(['%.2e' % elem for elem in self.frequencies])\n\n return self.frequencies",
"def GetFrequency(self):\n ...",
"def freq(self, value: int, /) -> None:",
"def tone_to_freq(tone):\n return math.pow(2, (tone - 69.0) / 12.0) * 440.0",
"def get_frequency(self, detune=0) -> float:\n return np.power(2, (self._cents + detune)/1200) * 440",
"def test_Frequency():\r\n tuc = ts.time_unit_conversion\r\n for unit in ['ns', 'ms', 's', None]:\r\n f = ts.Frequency(1, time_unit=unit)\r\n npt.assert_equal(f.to_period(), tuc[unit])\r\n\r\n f = ts.Frequency(1000, time_unit=unit)\r\n npt.assert_equal(f.to_period(), tuc[unit] / 1000)\r\n\r\n f = ts.Frequency(0.001, time_unit=unit)\r\n npt.assert_equal(f.to_period(), tuc[unit] * 1000)",
"def get_frequency(self):\r\n x = self.query('FREQ?')\r\n if x == None: return None\r\n return float(x)",
"def plp_tf(\n y,\n sr=22050,\n tempo_min=30,\n tempo_max=300,\n hop_length=1,\n win_length=512,\n hop_length_novelty=256,\n win_length_novelty=1024,\n loudness_min=0.1,\n loudness_max=1.,\n prior=None):\n y = tf.squeeze(y)\n\n # get spectral flux novelty\n oenv, sr_ = audio_to_spectralflux_tf(y, win_length_novelty, hop_length_novelty, sr)\n\n # get fourier tempogram\n tempogram = tf.transpose(PDDSP_spectral_ops.stft(oenv, win_length,\n frame_step=hop_length,\n fft_length=win_length, pad_end=False,\n center=True, window_fn=tf.signal.hann_window))\n\n # restrict to tempo range prior\n tempo_frequencies = tf.cast(fourier_tempo_frequencies(sr=sr_,\n hop_length=hop_length,\n win_length=win_length), dtype=tf.float32)\n mask = tempo_frequencies < tempo_max\n mask = tf.tile(mask[:, tf.newaxis], [1, tempogram.shape[1]])\n tempogram = tempogram * tf.cast(mask, dtype=tempogram.dtype)\n mask = tempo_frequencies > tempo_min\n mask = tf.tile(mask[:, tf.newaxis], [1, tempogram.shape[1]])\n tempogram = tempogram * tf.cast(mask, dtype=tempogram.dtype)\n\n # discard everything below the peak\n ftmag = tf.math.log1p(1e6 * np.abs(tempogram))\n if prior is not None:\n log_prob = tf.squeeze(prior.log_prob(tempo_frequencies))\n log_prob = tf.tile(log_prob[:, tf.newaxis], [1, ftmag.shape[1]])\n ftmag += log_prob\n peak_values = tf.math.reduce_max(ftmag, axis=0, keepdims=True)\n peak_values = tf.tile(peak_values, [ftmag.shape[0], 1])\n tempogram = tf.cast(ftmag >= peak_values, dtype=tempogram.dtype) * tempogram\n\n # todo keep only phase\n #ftgram = tempogram.numpy()\n #import librosa\n #ftgram /= librosa.util.tiny(ftgram) ** 0.5 + np.abs(ftgram.max(axis=0, keepdims=True))\n #tempogram = tf.cast(ftgram, dtype=tf.complex64)\n\n # Compute pulse by inverting the tempogram\n pulse = PDDSP_spectral_ops.inverse_stft(\n tf.transpose(tempogram), win_length, hop_length, fft_length=win_length, center=True,\n window_fn=tf.signal.inverse_stft_window_fn(hop_length, forward_window_fn=tf.signal.hann_window))\n\n # retain only the positive part and normalize\n pulse /= tf.math.reduce_max(pulse)\n pulse -= tf.math.reduce_mean(pulse)\n pulse = tf.clip_by_value(pulse, clip_value_min=0, clip_value_max=100000)\n\n # compute mean period and expected next onset position\n F_mean = dominant_freq_from_tempogram(tempogram, tempo_frequencies)\n period_mean, mean_offset, next_onset_shift, peaks = period_from_pulse(pulse, F_mean,\n sr=sr_, loudness_min=loudness_min,\n loudness_max=loudness_max)\n period_mean, next_onset_shift, mean_offset = (period_mean/sr_)*sr, (next_onset_shift/sr_)*sr, (mean_offset/sr_)*sr\n\n return pulse, tempogram, oenv, sr_, F_mean, period_mean, mean_offset, next_onset_shift",
"def fft_frequency(fft, index):\n\treturn index * AUDIO_RATE / len(fft) / 2 # Same as in fft_index, see above",
"def fft_frequencies(sr=22050, n_fft=2048):\n\n return np.linspace(0, float(sr) / 2, int(1 + n_fft // 2), endpoint=True)"
]
| [
"0.69349486",
"0.6646392",
"0.6491517",
"0.6421155",
"0.64123386",
"0.6288628",
"0.6275574",
"0.6232782",
"0.6211876",
"0.61571944",
"0.6095463",
"0.60664654",
"0.6022247",
"0.6018121",
"0.6014456",
"0.5987318",
"0.59843725",
"0.5984003",
"0.5983363",
"0.5969597",
"0.5968721",
"0.59652865",
"0.5962759",
"0.59560186",
"0.59400535",
"0.5936698",
"0.592046",
"0.5907817",
"0.58911264",
"0.5881806"
]
| 0.6719496 | 1 |
Bandpass filters audio to given frequency range | def bandpass_filter_audio(audio, f_low=400, f_high=450):
filtered_audio = core.sinc_filter(audio, f_low, window_size=256, high_pass=True)
filtered_audio = core.sinc_filter(filtered_audio, f_high, window_size=256, high_pass=False)
return tf.squeeze(filtered_audio) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def bandpassFilter (self, lowerFreq, upperFreq):\n self.bandpassLimits = (lowerFreq, upperFreq)\n # stuff to do",
"def butterworth_bandpass_filter( values, lowFreq, highFreq, sampleFreq, order=5):\n nyq = 0.5 * sampleFreq\n low = lowFreq / nyq\n high = highFreq /nyq\n b,a = butter( order, [low,high], btype='band' )\n return y = lfilter(b, a, data)",
"def butter_bandpass_filter(data, lowcut, highcut, fs, order=5, axis=0): \n omega = 0.5 * fs\n low = lowcut / omega\n high = highcut / omega\n b, a = signal.butter(order, [low, high], btype='band')\n y = signal.lfilter(b, a, data, axis=0)\n return y",
"def bandpass_filtfilt(rawsong, samp_freq, freq_cutoffs=(500, 10000)):\n if freq_cutoffs[0] <= 0:\n raise ValueError('Low frequency cutoff {} is invalid, '\n 'must be greater than zero.'\n .format(freq_cutoffs[0]))\n\n Nyquist_rate = samp_freq / 2\n if freq_cutoffs[1] >= Nyquist_rate:\n raise ValueError('High frequency cutoff {} is invalid, '\n 'must be less than Nyquist rate, {}.'\n .format(freq_cutoffs[1], Nyquist_rate))\n\n if rawsong.shape[-1] < 387:\n numtaps = 64\n elif rawsong.shape[-1] < 771:\n numtaps = 128\n elif rawsong.shape[-1] < 1539:\n numtaps = 256\n else:\n numtaps = 512\n\n cutoffs = np.asarray([freq_cutoffs[0] / Nyquist_rate,\n freq_cutoffs[1] / Nyquist_rate])\n # code on which this is based, bandpass_filtfilt.m, says it uses Hann(ing)\n # window to design filter, but default for matlab's fir1\n # is actually Hamming\n # note that first parameter for scipy.signal.firwin is filter *length*\n # whereas argument to matlab's fir1 is filter *order*\n # for linear FIR, filter length is filter order + 1\n b = scipy.signal.firwin(numtaps + 1, cutoffs, pass_zero=False)\n a = np.zeros((numtaps+1,))\n a[0] = 1 # make an \"all-zero filter\"\n padlen = np.max((b.shape[-1] - 1, a.shape[-1] - 1))\n filtsong = scipy.signal.filtfilt(b, a, rawsong, padlen=padlen)\n return filtsong",
"def bandpass(filename,f1,f2,Q,wout=True,plot=True):\n start=time.time()\n n, data, data_dB,sr,ch=inputwav(filename)\n b, a = butter(Q,Wn=(f1/sr,f2/sr),btype='bandpass')\n data_filtered=lfilter(b,a,data,axis=0)\n print('Applying FFT...')\n if wout==True:\n print('Exporting...')\n sf.write(filename[0:len(filename)-4]+'_BP.wav',data_filtered,sr,'PCM_16')\n if plot==True:\n print('Plotting...')\n py.close()\n w, h = freqz(b,a,worN=16384)\n fig, (ax1, ax2) = py.subplots(nrows=2)\n ax1.semilogx(0.5*sr*w/np.pi,abs(h),'k-')\n ax1.set_xlabel('Frequency (Hz)')\n ax1.set_ylabel('Rel. Amplitude')\n ax1.grid()\n ax1.set_ylim(0,1.1)\n ax1.set_xlim(1,20000)\n ax2.plot(data,'k-',label='Raw data')\n ax2.plot(data_filtered,'m-',lw=1,label='Filtered data')\n ax2.set_xlim(0,10000)\n ax2.set_ylim(-1,1)\n ax2.set_ylabel('Amplitude (Norm Bits)')\n ax2.set_xlabel('Samples')\n ax2.legend(loc=2,frameon=False,ncol=2)\n py.subplots_adjust(hspace=0.35) \n print('Done!')\n end=time.time()\n elapsed=int(1000*(end-start))\n print('...............................')\n print('Completed in '+str(elapsed)+' milliseconds.')\n return data_filtered",
"def bandpass_filtfilt(rawsong, samp_freq, freq_cutoffs=None):\n\n Nyquist_rate = samp_freq / 2\n if freq_cutoffs is None:\n freq_cutoffs = [500, 10000]\n if rawsong.shape[-1] < 387:\n numtaps = 64\n elif rawsong.shape[-1] < 771:\n numtaps = 128\n elif rawsong.shape[-1] < 1539:\n numtaps = 256\n else:\n numtaps = 512\n\n cutoffs = np.asarray([freq_cutoffs[0] / Nyquist_rate,\n freq_cutoffs[1] / Nyquist_rate])\n # code on which this is based, bandpass_filtfilt.m, says it uses Hann(ing)\n # window to design filter, but default for matlab's fir1\n # is actually Hamming\n # note that first parameter for scipy.signal.firwin is filter *length*\n # whereas argument to matlab's fir1 is filter *order*\n # for linear FIR, filter length is filter order + 1\n b = scipy.signal.firwin(numtaps + 1, cutoffs, pass_zero=False)\n a = np.zeros((numtaps+1,))\n a[0] = 1 # make an \"all-zero filter\"\n padlen = np.max((b.shape[-1] - 1, a.shape[-1] - 1))\n filtsong = scipy.signal.filtfilt(b, a, rawsong, padlen=padlen)\n return filtsong",
"def butter_bandpass_filter(data, lowcut, highcut, fs, order=1):\n b, a = butter_bandpass(lowcut, highcut, fs, order=order)\n y = filtfilt(b, a, data)\n return y",
"def __band_filter(data: dict, lowFreq: Union[int, float], highFreq: Union[int, float], timestep: int=0,\n samplingFreq: int=240, order: int=5, eegSensor: int=0, filterType: str='bandpass',\n lengthOfTestSeconds: Union[int, float]=32, example: int=0) -> dict:\n #Test\n # Filter.__band_filter_test(data=data, low=lowFreq, high=highFreq, samplingFreq=samplingFreq, order=order,\n # eegSensor=eegSensor, filterType=filterType, lengthOfTestSeconds=lengthOfTestSeconds)\n #Code\n nyq = 0.5 * samplingFreq\n low = lowFreq / nyq\n high = highFreq / nyq\n b, a = signal.butter(order, [low, high], btype=filterType)\n y = signal.lfilter(b, a, data['Signal'])\n ##Graph - This belongs somewhere else probably.\n # t = np.linspace(0, len(data), len(data), endpoint=False)\n # plt.plot(t, y, label='Sensor #' + str(eegSensor) + ' (' + str(lowFreq) + '-' + str(highFreq) + ') Hz')\n # plt.grid(True)\n # plt.axis('tight')\n # plt.xticks(range(10), range(lengthOfTestSeconds)) ##32 seconds per test?\n # plt.xlabel(\"Time in Seconds\")\n # plt.legend(loc='upper left')\n # plt.show()\n output = {}\n timestep = []\n for index, eegChannel in enumerate(y[0]):#the extra [0] is becuase signal.lfilter() puts it in a 1D array. Grrr\n timestep.append(eegChannel)\n output['Signal'] = timestep\n Visualization.channelGraph(y[0][0])\n return output #output is 2D 64xTimeSamples",
"def filters(array, sample_frequency):\n strain = TimeSeries(array, sample_rate=int(sample_frequency))\n white_data = strain.whiten(fftlength=4, fduration=4)\n bp_data = white_data.bandpass(50, 250)\n return bp_data.value",
"def bandpass_filter(data, lowcut, highcut, fs=2000, numtaps=255):\n nyq = fs / 2\n\n # design filter\n fe1 = lowcut / nyq\n fe2 = highcut / nyq\n b = firwin(numtaps, (fe1, fe2), pass_zero=False)\n\n filtered = lfilter(b, 1, data)\n\n return filtered",
"def butter_bandpass_filter(data, lowcut, highcut, fs, order=2):\n nyq = 0.5 * fs\n low = lowcut / nyq\n high = highcut / nyq\n \n # butter() and lfilter() are from scipy.signal\n \n b, a = butter(order, [low, high], btype='band')\n y = lfilter(b, a, data)\n return y",
"def bandpass_filter(s, sample_rate, low_freq, high_freq, filter_order=5, rescale=False):\n #create a butterworth filter\n nyq = sample_rate / 2.0\n f = np.array([low_freq, high_freq]) / nyq\n b,a = filter_design.butter(filter_order, f, btype='bandpass')\n\n #filter the signal\n filtered_s = filtfilt(b, a, s)\n\n if rescale:\n #rescale filtered signal\n filtered_s /= filtered_s.max()\n filtered_s *= s.max()\n\n return filtered_s",
"def _butter_bandpass_filter(self, data: np.ndarray, lowcut: float, highcut: float, fs: float, order: int = 5):\n b, a = self._butter_bandpass(lowcut, highcut, fs, order=order)\n y = lfilter(b, a, data)\n return y",
"def temporal_bandpass_filter(video_to_filter, low, high, fps):\n fft = fftpack.fft(video_to_filter, axis=0)\n frequencies = fftpack.fftfreq(video_to_filter.shape[0], d=1.0 / fps)\n bound_low = (np.abs(frequencies - low)).argmin()\n bound_high = (np.abs(frequencies - high)).argmin()\n fft[:bound_low] = 0\n fft[bound_high:-bound_high] = 0\n fft[-bound_low:] = 0\n iff = fftpack.ifft(fft, axis=0)\n return iff",
"def bandpass_filter(files, lowpass_freq=0.1, highpass_freq=0.01, tr=2):\n import os\n\n import nibabel as nb\n import numpy as np\n from nipype.utils.filemanip import (\n filename_to_list,\n list_to_filename,\n split_filename\n )\n\n fs = 1./tr\n\n out_files = []\n for filename in filename_to_list(files):\n path, name, ext = split_filename(filename)\n out_file = os.path.join(os.getcwd(), name + '_bandpassed' + ext)\n\n img = nb.load(filename)\n timepoints = img.shape[-1]\n F = np.zeros((timepoints))\n\n lowidx = int(timepoints / 2) + 1\n if lowpass_freq > 0:\n lowidx = np.round(float(lowpass_freq) / fs * timepoints)\n\n highidx = 0\n if highpass_freq > 0:\n highidx = np.round(float(highpass_freq) / fs * timepoints)\n F[int(highidx):int(lowidx)] = 1\n F = ((F + F[::-1]) > 0).astype(int)\n data = img.get_data()\n if np.all(F == 1):\n filtered_data = data\n else:\n filtered_data = np.real(np.fft.ifftn(np.fft.fftn(data) * F))\n img_out = nb.Nifti1Image(filtered_data, img.affine, img.header)\n img_out.to_filename(out_file)\n out_files.append(out_file)\n\n return list_to_filename(out_files)",
"def butter_filter(datalist):\n fs = 200.00\n fHigh = 50.00\n fLow = 5.00\n N=4\n [b,a]=sg.butter(N,[fLow/fs, fHigh/fs], btype='band')\n global filtered\n #IIR filter\n return sg.filtfilt(b,a,datalist)",
"def butter_filter(self, data, low_pass, high_pass, fs, order=10):\n\n nyq = fs/2\n low = low_pass/nyq\n high = high_pass/nyq\n\n b, a = signal.butter(order, [low, high], btype='band')\n filt_data = np.abs(signal.hilbert(signal.filtfilt(b, a, data, axis=1), axis=1))\n return filt_data",
"def butter_bandstop_filter(data, lowcut, highcut, fs, order):\n\n nyq = 0.5 * fs\n low = lowcut / nyq\n high = highcut / nyq\n\n i, u = sg.butter(order, (low, high), btype='bandstop')\n y = sg.filtfilt(i, u, data)\n return y",
"def butter_bandpass_filter(\n data: numpy.ndarray,\n lowcut: float,\n highcut: float,\n samplerate: float,\n order: int = 2,\n):\n nyq = 0.5 * samplerate\n lowf = lowcut / nyq\n highf = highcut / nyq\n # generic names for coefficients in filters\n # pylint: disable=invalid-name\n a, b = butter(order, [lowf, highf], btype=\"band\")\n if len(data) < BUTTER_MIN_LENGTH:\n return None\n return filtfilt(a, b, data)",
"def bessel_wave(freq, alpha, beta):\n return FMFilter(SineWave(freq), SineWave(alpha), beta)",
"def _filter_frequencies(self):\n import scipy.signal as spsg\n freq_bands = ['alpha', 'beta', 'gamma']\n if len(freq_bands) != self.n_bands:\n raise ValueError('Rename frequency bands')\n freqs_ts = np.empty([0, self.total_trials, self.ms, self.n_raw_features])\n for i_band in range(self.n_bands):\n freq_band = freq_bands[i_band]\n\n if freq_band == 'alpha':\n low_f = 8./self.sampling_freq\n high_f = 15./self.sampling_freq\n elif freq_band == 'beta':\n # beta\n low_f = 15./self.sampling_freq\n high_f = 32./self.sampling_freq\n elif freq_band == 'gamma':\n # gamma\n low_f = 32./self.sampling_freq\n high_f = 80./self.sampling_freq\n else:\n raise NameError('unknown filter')\n\n b, a = spsg.iirfilter(self.band_filter_order, [low_f, high_f],\n btype='bandpass', ftype='butter', output='ba')\n # ts_data: (trials, t, n)\n filtered_ts = spsg.filtfilt(b, a, self.ts_data, axis=-2)\n freqs_ts = np.concatenate((freqs_ts, np.array([filtered_ts])))\n\n return freqs_ts",
"def bandpass(self, min_f, max_f, out_of_bounds_ok=True):\n\n if min_f >= max_f:\n raise ValueError(\n f\"min_f must be less than max_f (got min_f {min_f}, max_f {max_f}\"\n )\n\n if not out_of_bounds_ok:\n # self.frequencies fully coveres the spec's frequency range\n if min_f < min(self.frequencies) or max_f > max(self.frequencies):\n raise ValueError(\n \"with out_of_bounds_ok=False, min_f and max_f must fall\"\n \"inside the range of self.frequencies\"\n )\n\n # find indices of the frequencies in spec_freq closest to min_f and max_f\n lowest_index = np.abs(self.frequencies - min_f).argmin()\n highest_index = np.abs(self.frequencies - max_f).argmin()\n\n # take slices of the spectrogram and spec_freq that fall within desired range\n return self.__class__(\n self.spectrogram[lowest_index : highest_index + 1, :],\n frequencies=self.frequencies[lowest_index : highest_index + 1],\n times=self.times,\n decibel_limits=self.decibel_limits,\n window_samples=self.window_samples,\n overlap_samples=self.overlap_samples,\n window_type=self.window_type,\n audio_sample_rate=self.audio_sample_rate,\n scaling=self.scaling,\n )",
"def bandpass_cnt(data, low_cut_hz, high_cut_hz, fs, filt_order=3, axis=0):\n if (low_cut_hz == 0 or low_cut_hz is None) and (\n high_cut_hz == None or high_cut_hz == fs / 2.0):\n log.info(\"Not doing any bandpass, since low 0 or None and \"\n \"high None or nyquist frequency\")\n return data.copy()\n if low_cut_hz == 0 or low_cut_hz == None:\n log.info(\"Using lowpass filter since low cut hz is 0 or None\")\n return lowpass_cnt(data, high_cut_hz, fs, filt_order=filt_order, axis=axis)\n if high_cut_hz == None or high_cut_hz == (fs / 2.0):\n log.info(\n \"Using highpass filter since high cut hz is None or nyquist freq\")\n return highpass_cnt(data, low_cut_hz, fs, filt_order=filt_order, axis=axis)\n\n nyq_freq = 0.5 * fs\n low = low_cut_hz / nyq_freq\n high = high_cut_hz / nyq_freq\n b, a = scipy.signal.butter(filt_order, [low, high], btype='bandpass')\n assert filter_is_stable(a), \"Filter should be stable...\"\n data_bandpassed = scipy.signal.lfilter(b, a, data, axis=axis)\n return data_bandpassed",
"def butterworth_filter(signal, Fs, highpassfreq=None, lowpassfreq=None, order=4, filtfunc='filtfilt'):\n Fn = Fs / 2.\n \n # set the function for filtering\n if filtfunc is 'lfilter':\n ffunc = spsig.lfilter\n elif filtfunc is 'filtfilt':\n ffunc = spsig.filtfilt\n else:\n raise ValueError(\"filtfunc must to be either 'filtfilt' or 'lfilter'\")\n \n # set parameters\n if lowpassfreq and highpassfreq:\n if highpassfreq < lowpassfreq:\n Wn = (highpassfreq / Fn, lowpassfreq / Fn)\n btype = 'bandpass'\n else:\n Wn = (lowpassfreq / Fn, highpassfreq / Fn)\n btype = 'bandstop'\n elif lowpassfreq:\n Wn = lowpassfreq / Fn\n btype = 'lowpass'\n elif highpassfreq:\n Wn = highpassfreq / Fn\n btype = 'highpass'\n else:\n raise ValueError(\"Specify highpassfreq and/or lowpathfreq\")\n \n # filter design\n b, a = spsig.butter(order, Wn, btype=btype)\n \n return ffunc(b, a, signal)",
"def filters(self, low_freq=1/7, high_freq=128, notch_freq=50):\n self.raw.filter(l_freq=low_freq, h_freq=high_freq)\n self.raw.notch_filter(range(notch_freq, high_freq, notch_freq), filter_length='auto',\n phase='zero', fir_design='firwin')",
"def make_filter_banks(power_frames, sampling_rate, NFFT, num_filt = 40):\n low_freq_mel = 0\n high_freq_mel = Hz_to_Mel(sampling_rate/2) # Convert Hz to Mel\n #mel_points = np.arange(low_freq_mel, high_freq_mel, (high_freq_mel - low_freq_mel)/(num_filt + 2)) # Equally spaced in Mel scale\n mel_points = np.linspace(low_freq_mel, high_freq_mel, num_filt + 2) # Equally spaced in Mel scale\n #hz_points = Mel_to_Hz(mel_points) # Convert Mel to Hz\n bins = np.floor((NFFT + 1) * Mel_to_Hz(mel_points) / sampling_rate)\n \n #bank = np.empty((num_filt, int(np.floor(NFFT / 2 + 1))))\n bank = np.zeros((num_filt, int(np.floor(NFFT / 2 + 1))))\n for m in range(1, num_filt + 1):\n f_s = bins[m - 1 : m + 2]\n f_prev = int(f_s[0]) # left\n f = int(f_s[1]) # center\n f_next = int(f_s[2]) # right\n\n np.put(bank[m - 1], list(range(f_prev)), 0) # k < f_prev\n\n for k in range(f_prev, f):\n np.put(bank, ((m - 1)*int(np.floor(NFFT / 2 + 1))) + k, (k - f_prev) / (f - f_prev)) \n \n for k in range(f, f_next):\n np.put(bank, ((m - 1)*int(np.floor(NFFT / 2 + 1))) + k, (f_next - k) / (f_next - f))\n\n np.put(bank[m - 1], list(range(f_next, len(bank))), 0) # k > f_next\n\n filter_banks = np.where(np.dot(power_frames, bank.T) == 0, np.finfo(float).eps, np.dot(power_frames, bank.T))\n #filter_banks = np.where(filter_banks == 0, np.finfo(float).eps, filter_banks) # Numerical Stability\n filter_banks = 20 * np.log10(filter_banks) # dB\n\n return filter_banks",
"def bandpass_filterbank(bands, fs=1.0, order=8, output=\"sos\"):\n\n filters = []\n nyquist = fs / 2.0\n\n for band in bands:\n # remove bands above nyquist frequency\n if band[0] >= nyquist:\n raise ValueError(\"Bands should be below Nyquist frequency\")\n\n # Truncate the highest band to Nyquist frequency\n norm_band = np.minimum(0.99, np.array(band) / nyquist)\n\n # Compute coefficients\n coeffs = butter(order / 2, norm_band, \"bandpass\", output=output)\n filters.append(coeffs)\n\n return filters",
"def bandpass_filter_raw_plot(data, fs, f1, f2):\n b, a = sp.butter(N=2, Wn=np.array([f1, f2]) / fs * 2, btype='bandpass') # build a bandpass butterworth filter of order 4, with cut-off frequencies 1 and 45\n w, h = sp.freqz(b, a) # compute the frequency response of the filter\n f = w / np.pi * fs / 2\n plt.figure()\n plt.plot(f, 10 * np.log10(abs(h)))\n plt.xlabel('frequency (Hz)')\n plt.ylabel('Magnitude (dB)')\n plt.title('frequency response of butterworth bandpass [1, 45]Hz')\n plt.grid()\n\n data1 = sp.filtfilt(b, a, data)\n return data1",
"def bandstop_filter(data, lowcut, highcut, fs=2000, numtaps=255):\n nyq = fs / 2\n\n # design filter\n fe1 = lowcut / nyq\n fe2 = highcut / nyq\n b = firwin(numtaps, (fe1, fe2), pass_zero=True)\n\n filtered = lfilter(b, 1, data)\n\n return filtered",
"def butterworth_bandpass(lowcut, highcut, fs, order=5):\n nyq = 0.5 * fs # nyquist sampling rate\n low = lowcut / nyq # normalize frequency\n high = highcut / nyq # normalize frequency\n b, a = butter(order, [low, high], btype='band')\n return b, a"
]
| [
"0.73646635",
"0.7264086",
"0.72377187",
"0.7138188",
"0.7078694",
"0.70686543",
"0.70422363",
"0.6981005",
"0.691496",
"0.6907205",
"0.69069135",
"0.6898185",
"0.6839194",
"0.6818611",
"0.6705521",
"0.66445243",
"0.6612388",
"0.66105556",
"0.65803343",
"0.6549235",
"0.6520607",
"0.6510182",
"0.64639735",
"0.64353395",
"0.63971263",
"0.63174385",
"0.63046056",
"0.62944627",
"0.6291009",
"0.6288037"
]
| 0.7543435 | 0 |
Calculate dominant frequency from tempogram. | def dominant_freq_from_tempogram(tempogram, tempo_frequencies, return_Hz = True):
tempo_BPM_max = tempo_frequencies \
* tf.cast(tf.math.abs(tempogram[:, 0])
== tf.math.reduce_max(tf.math.abs(tempogram[:, 0])),
tempo_frequencies.dtype)
if return_Hz:
dominant_tempo = tf.cast(tf.math.reduce_max(tempo_BPM_max)/60, dtype=tf.float32)
else:
dominant_tempo = tf.cast(tf.math.reduce_max(tempo_BPM_max), dtype=tf.float32)
weights = tf.cast(tf.math.abs(tempogram[:, 0]), dtype=tf.float32)
weighted_mean = tf.nn.weighted_moments(tempo_frequencies, axes=[0], frequency_weights=weights)[0]
if return_Hz:
weighted_mean_tempo = tf.expand_dims(tf.cast(weighted_mean/60, dtype=tf.float32), axis = 0)
else:
weighted_mean_tempo = tf.expand_dims(tf.cast(weighted_mean, dtype=tf.float32), axis = 0)
dominant_tempo = tf.expand_dims(dominant_tempo, axis=0)
out = tf.concat([dominant_tempo, weighted_mean_tempo], axis=0)
return tf.cast(out, dtype=tf.float32) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def DominantFrequency(data):\n w = fft.fft(data)\n freqs = fft.fftfreq(len(data))\n i = argmax(abs(w))\n dom_freq = freqs[i]\n dom_freq_hz = abs(dom_freq * 32.0)\n return dom_freq_hz",
"def freq():",
"def freq(self) -> int:",
"def compute_frequency(record):\n try:\n info = record.info\n except:\n info = record.INFO\n\n alt_freq = [float(count) / info[\"DP\"] for count in info[\"AO\"]]\n return alt_freq",
"def freq(self, frequency: Optional[int]):",
"def raw_freq(*marginals):\n return float(marginals[NGRAM]) / marginals[TOTAL]",
"def dominance(counts):\n freqs = counts/float(counts.sum())\n return (freqs*freqs).sum()",
"def get_frequency(self):\r\n x = self.query('FREQ?')\r\n if x == None: return None\r\n return float(x)",
"def frequency(self):\n return float(self.get_frequency())",
"def GetFrequency(self):\n ...",
"def frequency(self):\n return infer_frequency(self._obj, 'ignore')",
"def get_frequency(self,):\n\n # TODO: Find way to appropriately reconvert the frequency to its initial\n # TODO: Value or alert that the value is APPROXIMATE\n FTW = int (0)\n freq = int(0)\n\n FTW_bytes = self._read('CFTW0')\n FTW = FTW.from_bytes(FTW_bytes,'big')\n freq = FTW*self.clock_freq/2**32\n\n print('Latest frequency set: ', \"{:.2e}\".format(freq), 'Hz')\n print(['%.2e' % elem for elem in self.frequencies])\n\n return self.frequencies",
"def annual_cycle_dominant(tas):\n detrend = signal.detrend(tas)\n L = len(tas)\n freqs = np.fft.fftfreq(L)\n tas_fft = np.fft.fft(detrend)\n R = tas_fft.real\n Im = tas_fft.imag\n mag = np.sqrt(R**2+Im**2)\n the_period = 1./np.abs(freqs[np.argmax(mag)])\n return the_period",
"def frequency(self, pt):\n return math.floor(abs(self.id - pt.id) / 2)",
"def freq(self, value: int, /) -> None:",
"def frequencies(self):\r\n\r\n #XXX Use NFFT in the method in order to calculate these, without having\r\n #to calculate the spectrum:\r\n f, spectrum = tsa.get_spectra(self.input.data, method=self.method)\r\n return f",
"def frequency(self):\n return self._pca.frequency",
"def get_frequency(self):\r\n return self.f",
"def get_frequency(processed_text_list):\n \n word_frequency = FreqDist(word for word in processed_text_list)\n\n sorted_counts = sorted(word_frequency.items() , key = lambda x: x[1] ,\n reverse = True)\n\n return sorted_counts",
"def frequency():\n\n return make_simple_tsv_get_response(FREQ_FILE, 'frequency')",
"def freq(self, x):\n return self.d.get(x, 0)",
"def statistic_tfidf(self):\n\t\t# calculate df-idf for all words\n\t\tcount_dict = {x: self.main_doc.term_dict[x] * self.__count_term_in_env(x) for x in self.main_doc.term_dict}\n\t\t# sort them by df and idf\n\t\treturn sorted(count_dict.items(), key=operator.itemgetter(1), reverse=True)",
"def _frequency_of(self, token):\n frequency_value_of_word = self._word_2_frequency.get(token)\n if not frequency_value_of_word:\n return 0\n return frequency_value_of_word",
"def test_constant():\n generator = SignalGenerator()\n data = generator.constant()\n freq_features = FrequencyFeature(data, sr=50)\n freq_features.fft().peaks()\n assert np.allclose(freq_features.dominant_frequency(n=1), np.zeros((1, 3)))\n assert np.allclose(\n freq_features.dominant_frequency_power(n=1), np.zeros((1, 3)))",
"def get_frequency(self):\r\n x = self.query('SOUR:FREQ:CW?')\r\n if x == None: return None\r\n return float(x)",
"def get_frequency(self):\r\n x = self.query('SOUR:FREQ:CW?')\r\n if x == None: return None\r\n return float(x)",
"def tf(word, document):\n return freq(word,document) / wordCount(document)",
"def computeFreq(self):\n for x in self.data:\n i = 0\n for interval in self.classesInterval:\n if interval[0] <= x <= interval[1]:\n self.frequencies[i] += 1\n break\n i += 1\n\n self.minFreq = self.frequencies[0]\n self.maxFreq = self.frequencies[0]\n for f in self.frequencies:\n if f < self.minFreq:\n self.minFreq = f\n elif f > self.maxFreq:\n self.maxFreq = f",
"def frequency(self) -> NumType:\n return self._freq",
"def prep_natural_frequency(self) -> np.ndarray:\n\n params = self.freq_params\n\n x = gauss_width(**params)\n weights = gaussian(x, **params)\n prob = weights/np.sum(weights) # pdf for weights\n\n rng = np.random.default_rng()\n frequency = rng.choice(x, size=np.prod(self.ic.shape), p=prob, replace=True)\n\n print(f' natural frequency stats in hz:\\n'\n f' mean: {np.round(np.mean(frequency),3)}\\n'\n f' st dev: {np.round(np.std(frequency),3)}\\n'\n f' converted to phase angle on output')\n return frequency*np.pi*2"
]
| [
"0.7576257",
"0.7283553",
"0.69760036",
"0.6434121",
"0.6414743",
"0.6393278",
"0.6370433",
"0.6363414",
"0.63366425",
"0.6316601",
"0.62846553",
"0.61912936",
"0.61501634",
"0.61432326",
"0.6078933",
"0.6078761",
"0.607784",
"0.60529363",
"0.60146475",
"0.5996548",
"0.5993181",
"0.59876525",
"0.59597456",
"0.59344095",
"0.59264475",
"0.59264475",
"0.5924314",
"0.5916937",
"0.5915339",
"0.589656"
]
| 0.7321282 | 1 |
Run PLP encoder over all chunks in a song | def encode_song(y, sr, chunks=8,
tempo_min=60,
tempo_max=300,
f_low=400, f_high=450,
loudness_min=0.1, loudness_max=1,
filter=False, plot=True,
padding_seconds=4,
frame_step=0.1):
if chunks != 0:
y_list = tf.signal.frame(y, sr*chunks, int(sr*frame_step), pad_end=True, pad_value=0, axis=-1) # TODO padding
else:
y_list = [tf.cast(y, dtype=tf.float32)]
tempo_mean_list, period_mean_list, beats_list = None, None, None
for y, index in zip(y_list, range(len(y_list))):
# Bandpass filter audio
if filter:
y = bandpass_filter_audio(y[tf.newaxis,:], f_low=f_low, f_high=f_high)
# Compute phase and period
pulse, tempogram, oenv, sr_, F_mean, period_mean, mean_offset, next_onset_shift = plp_tf(
y=y, sr=sr,
tempo_min=tempo_min,
tempo_max=tempo_max,
hop_length=1,
win_length=512,
hop_length_novelty=256,
win_length_novelty=1024,
loudness_min=0.2,
loudness_max=1.)
if tempo_mean_list is None:
tempo_mean_list = [F_mean] # in Hz
period_mean_list = [mean_offset/sr] # in seconds
else:
tempo_mean_list.append(F_mean) # in Hz
period_mean_list.append(mean_offset/sr) # in seconds
# Compute beat positions via local maxima
beats = find_local_maxima(tf.clip_by_value(pulse,
clip_value_min=loudness_min,
clip_value_max=loudness_max))[1:]
# correct timing in each chunk
beats = tf.cast(beats, dtype=tf.float32) + (tf.cast(index, dtype=tf.float32) * pulse.shape[0])
beats = beats - padding_seconds*sr_ #remove padding #TODO fix
if beats_list is None:
beats_list = beats
else:
beats_list = np.concatenate([beats_list, beats], axis=0)
# Optionally plot tempogram and pulse for each input
if plot:
plot_tempogram_and_pulse(tempogram, pulse, oenv, sr_, 1)
plot_librosa_tempogram(y.numpy(), sr)
# samples to time
beats_list = np.asarray(beats_list) / sr_
return tempo_mean_list, period_mean_list, beats_list, oenv.numpy() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def encoding_loop(self, commands):\n try:\n enc_path = self.temp / 'split'\n done_path = self.temp / 'done.json'\n\n if self.resume and done_path.exists():\n log('Resuming...\\n')\n\n with open(done_path) as f:\n data = json.load(f)\n\n total = data['total']\n done = len(data['done'])\n initial = sum(data['done'].values())\n\n log(f'Resumed with {done} encoded clips done\\n\\n')\n else:\n initial = 0\n total = frame_probe_fast(self.input)\n\n if total == 0:\n total = frame_probe(self.input)\n\n d = {'total': total, 'done': {}}\n with open(done_path, 'w') as f:\n json.dump(d, f)\n\n clips = len([x for x in enc_path.iterdir() if x.suffix == \".mkv\"])\n self.workers = min(self.workers, clips)\n\n print(f'\\rQueue: {clips} Workers: {self.workers} Passes: {self.passes}\\n'\n f'Params: {self.video_params.strip()}')\n\n with concurrent.futures.ThreadPoolExecutor(max_workers=self.workers) as executor:\n counter = Manager().Counter(total, initial)\n future_cmd = {executor.submit(self.encode, (cmd, counter)): cmd for cmd in commands}\n for future in concurrent.futures.as_completed(future_cmd):\n future_cmd[future]\n try:\n future.result()\n except Exception as exc:\n _, _, exc_tb = sys.exc_info()\n print(f'Encoding error {exc}\\nAt line {exc_tb.tb_lineno}')\n terminate()\n except KeyboardInterrupt:\n terminate()",
"def chunks(sequence, chunk_size):\r\n\r\n # YOUR CODE HERE\r",
"def remix(self):\n self.log(\"Looking up track...\", 5)\n self.getTag()\n self.processArt()\n\n self.log(\"Listening to %s...\" % ('\"%s\"' % self.tag['title'] if 'title' in self.tag else 'song'), 5)\n self.original = audio.LocalAudioFile(self.infile, False)\n if not 'title' in self.tag:\n self.detectSong(self.original)\n self.st = FastModify()\n \n self.log(\"Choosing key and tempo...\", 10)\n self.tonic = self.original.analysis.key['value']\n self.tempo = self.original.analysis.tempo['value']\n self.bars = self.original.analysis.bars\n self.beats = self.original.analysis.beats\n self.sections = self.original.analysis.sections\n self.tag['key'] = self.keys[self.tonic] if self.tonic >= 0 and self.tonic < 12 else '?'\n self.tag['tempo'] = self.template['tempo']\n\n self.log(\"Arranging intro...\", 40.0/(len(self.sections) + 1))\n self.partialEncode(self.compileIntro())\n\n past_progress = 0\n hats = audio.AudioData(self.sample_path + self.template['hats'], sampleRate=44100, numChannels=2, verbose=False)\n\n i = 0 # Required if there are no sections\n for i, section in enumerate(self.sections):\n self.log(\"Arranging section %s of %s...\" % (i+1, len(self.sections)), 40.0/(len(self.sections) + 1))\n a, b = self.compileSection(i, section, hats)\n self.partialEncode(a)\n self.partialEncode(b)\n del a, b\n del hats\n self.original.unload()\n\n self.log(\"Adding ending...\", 5)\n self.partialEncode(\n audio.AudioData(\n self.sample_path + self.template['splash_ends'][(i + 1) % len(self.template['splash_ends'])],\n sampleRate=44100,\n numChannels=2,\n verbose=False\n )\n )\n \n self.log(\"Mixing...\", 5)\n self.mixwav(self.tempfile)\n\n if self.deleteOriginal:\n try:\n unlink(self.infile)\n except:\n pass # File could have been deleted by an eager cleanup script\n\n self.log(\"Mastering...\", 5)\n self.lame(self.tempfile, self.outfile)\n unlink(self.tempfile)\n \n self.log(\"Adding artwork...\", 20)\n self.updateTags(titleSuffix = \" (Wub Machine Remix)\")\n \n return self.outfile",
"def chunkify(song):\n assert len(song) >= CHUNK_SIZE * 2\n for i in xrange(0, len(song) - CHUNK_SIZE, CHUNK_SIZE // 2):\n yield np.fft.rfft(song[i: i + CHUNK_SIZE])",
"def all_wav_to_mp3(self):\n for each_file, artist in self.past_songs_db_data:\n self.convert_wav_to_mp3(each_file)",
"def step(self, chunk: th.Tensor) -> th.Tensor:\n for conv1d in self.enc_layers:\n chunk = conv1d(chunk)\n return chunk",
"def data_music_parcelled():\n pass",
"def divide_chunks(audio_file_, chunksize):\n\n for j in range(0, len(audio_file_), self.chunksize):\n yield audio_file[j:j + chunksize]",
"def process_songs(songs):\r\n print(\"[SETUP] STATUS: Creating the pool.\")\r\n workers = multiprocessing.Pool(pool_size())\r\n print(\"[SETUP] STATUS: Pool created with {} workers, assigning work.\".format(pool_size()))\r\n results = workers.map(cut_and_eq, songs)\r\n workers.close()\r\n workers.join()\r\n\r\n results = [result for result in results if result is not None]\r\n return results",
"def _encode(self):\n with tf.variable_scope('encoding'):\n self.sep_p_encodes, _ = bilstm_layer(self.p_emb, self.p_length, self.hidden_size)\n tf.get_variable_scope().reuse_variables()\n self.sep_q_encodes, _ = bilstm_layer(self.q_emb, self.q_length, self.hidden_size)\n if self.use_dropout:\n self.sep_p_encodes = tf.nn.dropout(self.sep_p_encodes, 1-self.dropout)\n self.sep_q_encodes = tf.nn.dropout(self.sep_q_encodes, 1-self.dropout)",
"def apply_fourier_transform(chunked_audio):\n pass",
"def unchunkify(chunks):\n recreated_chunks = list(map(lambda x: np.fft.irfft(combine_phase_and_power(*x)), chunks))\n total_length = len(recreated_chunks) * CHUNK_SIZE // 2\n output = np.zeros(total_length)\n window = np.power(np.sin(np.linspace(0, np.pi, CHUNK_SIZE)), 2)\n \n for i, j in enumerate(xrange(0, total_length - CHUNK_SIZE, CHUNK_SIZE // 2)):\n o = window * recreated_chunks[i]\n \n output[j: j+CHUNK_SIZE] += o\n return output",
"def forward(\n self,\n encoder_out,\n encoded_captions: torch.Tensor,\n caption_lengths: torch.Tensor\n ):\n pass",
"def output( class_label_path ):\n #print \"{0:04d}\".format(1)\n # \n seq_list = []\n x =2.0\n p = 0\n i = 0\n for i in range( 0, 10 ):\n seq_list.append( 2*(i-1)+2 )\n for ii in range( 0, i ):\n p = p + x/(i) \n #seq_list.append( 3*(i-1)+1 )\n print seq_list\n #seq_list\n\n \n f = open( class_label_path , 'r' ) #read\n same_label_list = pickle.load( f ) #np\n f.close()\n #same_label_list = outmod2.loadFile2List( \"./data/L\" + str(input_label) + \"-List.txt\" ) # 改行区切りのリストファイルをList型へ変換\n \n # Listとして設定\n player_pack = []\n for i in range(MAX_PLAY_NUM): # \n player_pack.append( outmod.AudioPlayer() ) # 新たなAudioPlayerをListに追加\n out_wav_num = random.choice( same_label_list )\n #out_wav_num = same_label_list[i-1]\n player_pack[i].setAudioFile( \"../clustering/hayakuti_data/\" + \"{0:03d}\".format(int(out_wav_num)) + \"/sound.wav\" )\n player_pack[i].setAudioWaitTime( random.uniform( seq_list[i] , seq_list[i] ) )\n player_pack[i].setAudioLoopTimes( random.randint( 0, 0 ) )\n\n # 基本再生\n # outmod2.playLoop( player1 )\n\n # List再生\n for player_i in player_pack:\n outmod.playLoop( player_i )",
"def compute_chunk_features(mp3_file):\n # On convertit le fichier mp3 en un fichier wav mono, 1avec un sample rate de 10000Hertz: on utilise\n # On utilise l'application sox \"c:/Program Files (x86)/sox-14.4.0/sox\"\n\n sox_command = \"./sox-14.4.0/sox\"\n out_file = 'temp.wav'\n #cmd = sox_command % (out_file, mp3_file)\n temp2 = subprocess.call([sox_command, mp3_file,'-r 10000','-c 1',out_file])\n # Read in chunks of data from WAV file\n wav_data1, wav_data2 = read_wav(out_file)\n # We'll cover how the features are computed in the next section!\n return np.array(features(wav_data1)), np.array(features(wav_data2))",
"def write_input_files(pst, pst_path=\".\"):\n par = pst.parameter_data.copy()\n par.index = par.index.str.lower()\n par.loc[:, \"parval1_trans\"] = (par.parval1 * par.scale) + par.offset\n pairs = np.array(list(zip(pst.template_files, pst.input_files)))\n num_tpl = len(pairs)\n chunk_len = 50\n num_chunk_floor = num_tpl // chunk_len\n main_chunks = (\n pairs[: num_chunk_floor * chunk_len].reshape([-1, chunk_len, 2]).tolist()\n ) # the list of files broken down into chunks\n remainder = pairs[num_chunk_floor * chunk_len :].tolist() # remaining files\n chunks = main_chunks + [remainder]\n # procs = []\n # for chunk in chunks:\n # # write_to_template(pst.parameter_data.parval1_trans,os.path.join(pst_path,tpl_file),\n # # os.path.join(pst_path,in_file))\n # p = mp.Process(\n # target=_write_chunk_to_template,\n # args=[chunk, pst.parameter_data.parval1_trans, pst_path],\n # )\n # p.start()\n # procs.append(p)\n # for p in procs:\n # p.join()\n pool = mp.Pool(processes=min(mp.cpu_count(), len(chunks), 60))\n x = [\n pool.apply_async(\n _write_chunk_to_template,\n args=(chunk, par.parval1_trans, pst_path),\n )\n for i, chunk in enumerate(chunks)\n ]\n [xx.get() for xx in x]\n pool.close()\n pool.join()",
"def get_large_audio_transcription(path):\n # open the audio file using pydub\n r = sr.Recognizer()\n sound = AudioSegment.from_mp3(path)\n sound.export(\"tmp.wav\", format=\"wav\")\n sound = AudioSegment.from_wav('tmp.wav')\n # split audio sound where silence is 700 miliseconds or more and get chunks\n chunks = split_on_silence(sound,\n # experiment with this value for your target audio file\n min_silence_len = 500,\n # adjust this per requirement\n silence_thresh = sound.dBFS-14,\n # keep the silence for 1 second, adjustable as well\n keep_silence=500,\n )\n folder_name = \"audio-chunks\"\n # create a directory to store the audio chunks\n if not os.path.isdir(folder_name):\n os.mkdir(folder_name)\n whole_text = \"\"\n\n chapter=(str(path.split('/')[-1])).split('_')[3]\n # if chapter == '01':\n # target=2\n # else:\n # target=1\n target=2\n # process each chunk\n for i, audio_chunk in enumerate(chunks, start=1):\n # export audio chunk and save it in\n # the `folder_name` directory.\n if i==1:\n chunk_filename = os.path.join(folder_name, f\"chunk{i}.wav\")\n audio_chunk.export(chunk_filename, format=\"wav\")\n # recognize the chunk\n with sr.AudioFile(chunk_filename) as source:\n audio_listened = r.record(source)\n # try converting it to text\n try:\n text = r.recognize_google(audio_listened,language=\"en-US\")\n\n except sr.UnknownValueError as e:\n print(\"Error:\", str(e))\n else:\n #text = f\"{text.capitalize()}. \"\n #print(chunk_filename, \":\", text)\n whole_text += text\n # return the text for all chunks detected\n else:\n chunk_filename = os.path.join(folder_name, f\"chunk{i}.wav\")\n audio_chunk.export(chunk_filename, format=\"wav\")\n # recognize the chunk\n with sr.AudioFile(chunk_filename) as source:\n audio_listened = r.record(source)\n # try converting it to text\n try:\n text = r.recognize_google(audio_listened, language=\"en-US\")\n\n except sr.UnknownValueError as e:\n print(\"Error:\", str(e))\n else:\n #text = f\"{text.capitalize()}. \"\n # print(chunk_filename, \":\", text)\n if chapter == '01':\n whole_text += ' ' +text\n if str(text).isalnum():\n if str(text).split(' ')[0]==' ':\n whole_text += text\n else: whole_text += ' '+text\n # return the text for all chunks detected\n\n if i==target:\n break\n if os.path.isfile('tmp.wav') :os.remove('tmp.wav')\n subprocess.run([\"rm\", \"-rf\", folder_name])\n return whole_text",
"def compute_chunk_features(mp3_file):\n # Extract MP3 file to a mono, 10kHz WAV file\n sox_command = \"/usr/local/bin/sox\"\n out_file = 'temp.wav'\n #cmd = sox_command % (out_file, mp3_file)\n temp2 = subprocess.call([sox_command, mp3_file,'-r 10000','-c 1',out_file])\n # Read in chunks of data from WAV file\n wav_data1, wav_data2 = read_wav(out_file)\n # We'll cover how the features are computed in the next section!\n return np.array(features(wav_data1)), np.array(features(wav_data2))",
"def binaryEncode(peptide):\n\n #do 1 hot encoding\n binaryPeptide=''\n for aa in peptide:\n binaryAmino=''\n if aa =='A':\n binaryAmino='10000000000000000000'\n if aa =='R':\n binaryAmino='01000000000000000000'\n if aa =='N':\n binaryAmino='00100000000000000000'\n if aa =='D':\n binaryAmino='00010000000000000000'\n if aa =='C':\n binaryAmino='00001000000000000000'\n if aa =='Q':\n binaryAmino='00000100000000000000'\n if aa =='E':\n binaryAmino='00000010000000000000'\n if aa =='G':\n binaryAmino='00000001000000000000'\n if aa =='H':\n binaryAmino='00000000100000000000'\n if aa =='I':\n binaryAmino='00000000010000000000'\n if aa =='L':\n binaryAmino='00000000001000000000'\n if aa =='K':\n binaryAmino='00000000000100000000'\n if aa =='M':\n binaryAmino='00000000000010000000'\n if aa =='F':\n binaryAmino='00000000000001000000'\n if aa =='P':\n binaryAmino='00000000000000100000'\n if aa =='S':\n binaryAmino='00000000000000010000'\n if aa =='T':\n binaryAmino='00000000000000001000'\n if aa =='W':\n binaryAmino='00000000000000000100'\n if aa =='Y':\n binaryAmino='00000000000000000010'\n if aa =='V':\n binaryAmino='00000000000000000001'\n binaryPeptide=binaryPeptide +binaryAmino\n if len(binaryPeptide) == 500*20:\n break \n \n while len(binaryPeptide) < 500*20:\n binaryPeptide = binaryPeptide +str(0)\n \n binaryPeptide = np.array(list(binaryPeptide),dtype=float)\n binaryPeptide = np.reshape(binaryPeptide,(binaryPeptide.shape[0],1))\n binaryPeptide = np.transpose(binaryPeptide)\n return binaryPeptide",
"def features_combine():\n\n\n\t# PROCESSING AUDIO",
"def PCMs(partitioning, only_binary=False):\n if only_binary:\n dimensions = 1 + max(\n n for partition in partitioning for n in partition)\n for assignment in itertools.product(*partitioning):\n PCM = np.zeros((dimensions, len(partitioning)))\n for partition, partition_assignment in enumerate(assignment):\n PCM[partition_assignment, partition] = 1.\n yield PCM\n else:\n # There are uncountably many non-binary PCMs.\n raise NotImplementedError",
"def index(self):\n with self.saver.thread():\n batches = self.collection.enumerate_batches(rank=self.rank)\n for chunk_idx, offset, passages in tqdm.tqdm(batches, disable=self.rank > 0):\n if self.config.resume and self.saver.check_chunk_exists(chunk_idx):\n Run().print_main(\n f\"#> Found chunk {chunk_idx} in the index already, skipping encoding...\"\n )\n continue\n # Encode passages into embeddings with the checkpoint model\n embs, doclens = self.encoder.encode_passages(passages)\n if self.use_gpu:\n assert embs.dtype == torch.float16\n else:\n assert embs.dtype == torch.float32\n embs = embs.half()\n\n Run().print_main(\n f\"#> Saving chunk {chunk_idx}: \\t {len(passages):,} passages \"\n f\"and {embs.size(0):,} embeddings. From #{offset:,} onward.\"\n )\n\n self.saver.save_chunk(\n chunk_idx, offset, embs, doclens\n ) # offset = first passage index in chunk\n del embs, doclens",
"def extract(self, audio_chunk: np.ndarray): \n pass",
"def _insertAllSteps(self): \n self.uMics = self.inputCoordinatesTiltedPairs.get().getUntilted().getMicrographs()\n self.tMics = self.inputCoordinatesTiltedPairs.get().getTilted().getMicrographs()\n\n self.inputMics = self._createSetOfParticles('auxMics')\n self.inputMics.copyInfo(self.uMics)\n self.inputMics.setStore(False)\n \n for micU, micT in izip(self.uMics, self.tMics):\n micU.cleanObjId()\n micT.cleanObjId()\n self.inputMics.append(micU)\n self.inputMics.append(micT)\n\n self.samplingInput = self.uMics.getSamplingRate()\n \n\n if self.downsampleType.get() != OTHER:\n # If 'same as picking' or 'original' get sampling rate from input micrographs\n #TODO: Review this when downsampling before picking is possible\n self.samplingFinal = self.samplingInput\n else:\n # If 'other' multiply the input sampling rate by the factor provided\n self.samplingFinal = self.samplingInput*self.downFactor.get()\n \n # Write pos files for each micrograph\n firstStepId = self._insertFunctionStep('writePosFilesStep')\n \n # For each micrograph insert the steps\n #run in parallel\n \n deps = []\n for mic in self.inputMics:\n localDeps = [firstStepId]\n micrographToExtract = mic.getFileName()\n micName = removeBaseExt(mic.getFileName())\n micId = mic.getObjId()\n\n # If downsample type is 'other' perform a downsample\n if self.downsampleType == OTHER:\n fnDownsampled = self._getTmpPath(micName+\"_downsampled.xmp\")\n downFactor = self.downFactor.get()\n args = \"-i %(micrographToExtract)s -o %(fnDownsampled)s --step %(downFactor)f --method fourier\"\n localDeps=[self._insertRunJobStep(\"xmipp_transform_downsample\", args % locals(),prerequisites=localDeps)]\n micrographToExtract = fnDownsampled\n \n # If remove dust \n if self.doRemoveDust:\n fnNoDust = self._getTmpPath(micName+\"_noDust.xmp\")\n \n thresholdDust = self.thresholdDust.get() #TODO: remove this extra variable\n args=\" -i %(micrographToExtract)s -o %(fnNoDust)s --bad_pixels outliers %(thresholdDust)f\"\n localDeps=[self._insertRunJobStep(\"xmipp_transform_filter\", args % locals(),prerequisites=localDeps)]\n micrographToExtract = fnNoDust\n \n #self._insertFunctionStep('getCTF', micId, micName, micrographToExtract)\n micName = removeBaseExt(mic.getFileName())\n \n # Actually extract\n deps.append(self._insertFunctionStep('extractParticlesStep', micId, micName, \n None, micrographToExtract, prerequisites=localDeps))\n # TODO: Delete temporary files\n \n # Insert step to create output objects \n self._insertFunctionStep('createOutputStep', prerequisites=deps)",
"def ptb_producer(raw_data, batch_size, num_steps, word_to_id):\n x = []\n y = []\n n_batches = len(raw_data) // batch_size\n for sentence in raw_data:\n mask_index = get_mask_index(sentence)\n current_label = sentence[mask_index]\n sentence[mask_index] = word_to_id['<mask>']\n y.append(current_label)\n x.append(sentence)\n x = np.array(x)\n x = x[:n_batches*batch_size]\n x = np.reshape(x, [n_batches, batch_size, num_steps])\n y = np.array(y)\n y = y[:n_batches * batch_size]\n y = np.reshape(y, [n_batches, batch_size])\n return x, y",
"def main():\r\n\r\n # contents = ['ATGGCCATGGCCCCCAGAACTGAGATCAATAGTACCCGTATTAACGGGTGA', 'MA'] # sample input\r\n contents = []\r\n for line in sys.stdin:\r\n contents.append(line.strip())\r\n myPeptide = GenomeEncoding(contents[0], contents[1])\r\n myPeptide.getCodonSeqs()\r\n myPeptide.getRevCodonSeqs()\r\n myPeptide.printEncodePep()",
"def encode(self, xs, task='all', streaming=False, cnn_lookback=False, cnn_lookahead=False, xlen_block=-1):\n if self.input_type == 'speech':\n if self.n_stacks > 1:\n xs = [stack_frame(x, self.n_stacks, self.n_skips) for x in xs]\n if self.n_splices > 1:\n xs = [splice(x, self.n_splices, self.n_stacks) for x in xs]\n if streaming:\n xlens = torch.IntTensor([xlen_block])\n else:\n xlens = torch.IntTensor([len(x) for x in xs])\n xs = pad_list([np2tensor(x, self.device).float() for x in xs], 0.0)\n if self.specaug is not None and self.training:\n xs = self.specaug(xs)\n if self.weight_noise_std > 0 and self.training:\n self.add_weight_noise(std=self.weight_noise_std)\n if self.input_noise_std > 0 and self.training:\n xs = add_input_noise(xs, std=self.input_noise_std)\n if self.ssn is not None:\n xs = self.ssn(xs, xlens)\n elif self.input_type == 'text':\n xlens = torch.IntTensor([len(x) for x in xs])\n xs = [np2tensor(np.fromiter(x, dtype=np.int64), self.device) for x in xs]\n xs = pad_list(xs, self.pad)\n xs = self.dropout_emb(self.embed(xs))\n eout_dict = self.enc(xs, xlens, task.split('.')[0], streaming, cnn_lookback, cnn_lookahead)\n if self.main_weight < 1 and self.enc_type in ['conv', 'tds', 'gated_conv']:\n for sub in ['sub1', 'sub2']:\n eout_dict['ys_' + sub]['xs'] = eout_dict['ys']['xs'].clone()\n eout_dict['ys_' + sub]['xlens'] = eout_dict['ys']['xlens'][:]\n return eout_dict",
"def _parse(self, verbose=False):\n instructions = json.load(open(self.filename, 'rb'))\n self.bpm = instructions['header']['bpm']\n self.ticks_per_beat = instructions['header']['PPQ']\n self.song_length = instructions['duration']\n self.phraseLength = instructions['phraseLength']\n\n print ('Parsing file:', self.filename)\n print ('Title', instructions['header']['name']) \n print ('BPM', self.bpm) \n\n EIGHTH_NOTE_INTERVAL_S = 60 / (2*self.bpm)\n\n # Parse the messages into buckets for each half-beat. Put them in 32-beat chunks\n chunks = []\n current_chunk = []\n index = 0\n for time in np.arange(0, self.song_length, EIGHTH_NOTE_INTERVAL_S):\n for message in instructions['tracks'][1]['notes']:\n if (message['time'] >= time and message['time'] < time + EIGHTH_NOTE_INTERVAL_S):\n current_chunk.append(str(message['midi']))\n chunks.append(current_chunk)\n index += 1\n current_chunk = []\n\n # For each bucktet, create parsed messages\n phrases = []\n current_phrase = []\n current_phrase_parsed = []\n for phrase_index in range(self.phraseLength):\n current_phrase = chunks[phrase_index*self.phraseLength:(phrase_index+1)*self.phraseLength]\n index_word = 0\n for word in current_phrase:\n word_parsed = str(index_word) + ',' + ','.join(word)\n if index_word == 0:\n self.initial_notes.append(word_parsed)\n current_phrase_parsed.append(word_parsed)\n index_word += 1\n phrases.append(current_phrase_parsed)\n current_phrase_parsed = []\n current_phrase=[]\n\n # Put them in the markov-chain\n for phrase in phrases:\n self._sequence(phrase)\n \n # Print out the resulting chunks\n if verbose:\n print ('Initial notes', self.initial_notes)\n print ('Matrix')\n self.markov_chain.print_as_matrix(20)",
"def convert_to_mp3(filename: str, title: str, start: int=None, end: int=None) -> list:\n\t# setup args for ffmpeg\n\tfile_a = f\"{path_to_wrk_dir}{filename}.mp4\" # input file\n\tfile_b = f\"{path_to_wrk_dir}{title}.mp3\" # output file\n\tfiles_b = [] # this list need if file more than 30 mb\n\targs = [\n\t\t\"/usr/bin/ffmpeg\", # path to ffmpeg\n\t\t\"-i\", # flag for input file\n\t\tfile_a, # input file\n\t\t\"-acodec\", # setup codec\n\t\t\"libmp3lame\", # codec name\n\t\t]\n\n\t# now need setup timings for target encode\n\tif start is not None and start != 0:\n\t\targs = args + [\"-ss\", str(start)]\n\tif end is not None and end != 0:\n\t\targs = args + [\"-t\", str(end - start)]\n\n\t# and last part for args to ffmpeg\n\targs = args + [\n\t\t\"-metadata\", # setup metadata for file\n\t\tf\"title={title}\", # title\n\t\t\"-metadata\",\n\t\tf\"artist={title}\", # and artist\n\t\t\"-b:a\", # setup bitrate\n\t\t\"320k\", # setup max bitrate\n\t\tfile_b,\n\t\t]\n\tprint(f\"{args}\")\n\t# start subprocess for encoding\n\tpopen = subprocess.Popen(args)\n\tpopen.wait()\n\n\t# check size file. if he more than 30 mb, bot need split him to chunks.\n\tsize = getsize(file_b) / 1024 / 1024\n\tif size > 30 and ( start or end is None ):\n\t\t# setup args for split to chunks\n\t\targs = [\n\t\t\t\"ffprobe\",\n\t\t\t\"-show_entries\",\n\t\t\t\"format=duration\",\n\t\t\t\"-i\",\n\t\t\tfile_b,\n\t\t\t]\n\n\t\t# get duration video.\n\t\tpopen = subprocess.Popen(args, stdout=subprocess.PIPE)\n\t\tpopen.wait()\n\t\toutput = popen.stdout.read()\n\t\t# now we know how long this audio file\n\t\t# split to 10 min chunks\n\t\tdur = re.findall(r\"\\d{1,10}\", str(output))\n\t\t# get chunks count for loop\n\t\tcount_chunks = (int(dur[0]) // 600) + 1\n\t\tfor chunk_start_time in range(0, count_chunks):\n\t\t\t# setup args for split\n\t\t\t# big parts of args the same for encode\n\t\t\targs = [\n\t\t\t\t\"/usr/bin/ffmpeg\",\n\t\t\t\t\"-i\",\n\t\t\t\tfile_b,\n\t\t\t\t\"-ss\",\n\t\t\t\tf\"{chunk_start_time * 600}\", # when start chunk\n\t\t\t\t\"-t\",\n\t\t\t\t\"600\", # 10 mints duration\n\t\t\t\t\"-acodec\",\n\t\t\t\t\"copy\", # copy\n\t\t\t\t\"-b:a\",\n\t\t\t\t\"320k\",\n\t\t\t\tf\"{path_to_wrk_dir}{title}_{chunk_start_time}.mp3\", # now we have path to video with chunk number.\n\t\t\t]\n\t\t\ttry:\n\t\t\t\t# start process for cut chunk\n\t\t\t\tpopen = subprocess.Popen(args, stdout=subprocess.PIPE)\n\t\t\t\tpopen.wait()\n\t\t\t# handle except.\n\t\t\texcept Exception as e:\n\t\t\t\tprint(f\"Exception - {e}\")\n\t\t\tfiles_b.append(f\"{path_to_wrk_dir}{title}_{chunk_start_time}.mp3\") # append name of file in list\n\t\tremove(file_b)\n\ttry:\n\t\t# remove tmp file\n\t\tremove(file_a)\n\t# handle except\n\texcept FileNotFoundError:\n\t\tfiles = get_file_list(path_to_wrk_dir)\n\t\tfor i in files:\n\t\t\tif -1 != f\"{path_to_wrk_dir}{i}\".find(f\"{filename}\") and f\"{i}\".find(f\".mp3\") == -1:\n\t\t\t\ttry:\n\t\t\t\t\tremove(f\"{path_to_wrk_dir}{i}\")\n\t\t\t\texcept FileNotFoundError:\n\t\t\t\t\tprint(f\"can't remove file {path_to_wrk_dir}{i}\")\n\tif len(files_b) == 0:\n\t\treturn [file_b]\n\telse:\n\t\treturn files_b",
"def test_pipeline1(self):\n\n nlp = Pipeline[MultiPack]()\n reader = MultiPackSentenceReader()\n nlp.set_reader(reader)\n dummy = DummyPackProcessor()\n nlp.add(dummy, selector=FirstPackSelector())\n nlp.initialize()\n data_path = data_samples_root + \"/random_texts/0.txt\"\n num_packs = 0\n for pack in nlp.process_dataset(data_path):\n types = list(pack.get_pack(\"pack\").get_entries_by_type(NewType))\n num_packs += 1\n self.assertEqual(len(types), 1)\n self.assertEqual(types[0].value, \"[PACK]\")\n\n # check that all packs are yielded\n self.assertEqual(num_packs, reader.count)"
]
| [
"0.5605265",
"0.5559755",
"0.5549146",
"0.55153775",
"0.5348678",
"0.53030664",
"0.5284979",
"0.5227434",
"0.5210017",
"0.5203231",
"0.516057",
"0.5159312",
"0.51228446",
"0.5113281",
"0.5102108",
"0.5093781",
"0.50582504",
"0.50577176",
"0.50477487",
"0.5035931",
"0.50243163",
"0.5019511",
"0.5017323",
"0.4972044",
"0.49665508",
"0.49607968",
"0.49483404",
"0.49481046",
"0.49478948",
"0.494188"
]
| 0.59695196 | 0 |
Plots tempogram and local pulse. | def plot_tempogram_and_pulse(tempogram, pulse, oenv, sr_, hop_length, plot_pulse=True):
tempogram = tempogram.numpy()
librosa.display.specshow(np.abs(tempogram), sr=sr_, hop_length=hop_length,
x_axis='time', y_axis='fourier_tempo', cmap='magma')
plt.show()
peaks = find_local_maxima(tf.clip_by_value(pulse, clip_value_min=0.1,
clip_value_max=1.))[1:]
if plot_pulse:
oenv = oenv.numpy()
pulse = pulse.numpy()
plt.plot(oenv, color="black")
plt.plot(pulse, color="blue")
plt.plot(peaks, pulse[peaks.numpy()], "ro")
plt.show() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def generate_plot(self):\r\n\t\tx, y = zip(*[p.p for p in self.universe])\r\n\t\tself.ax.cla()\r\n\t\tself.ax.plot(x, y, '.')\r\n\t\tself.ax.set_title('Universe at time: %d' % self.universe.time)\r\n\t\tself.ax.set_xlim([P_MU-4*P_STD, P_MU+4*P_STD])\r\n\t\tself.ax.set_ylim([P_MU-4*P_STD, P_MU+4*P_STD])",
"def plot(self):\n\t\tself.plotOfHeatingCurrent().plot()",
"def show():\n setup()\n plt.show()",
"def plot_time_frames(self):\n\n fig = plt.figure()\n plt.grid(True)\n\n plt.ylim([-1.5,1.5])\n plt.xlim([0,1])\n\n for key in self.timeframes.keys():\n if key == 0:\n plt.plot(self.x, self.timeframes[key], label=\"time: \" + str(round(key*self.dt, 3)), linewidth=5)\n else:\n plt.plot(self.x, self.timeframes[key], label=\"time: \" + str(round(key*self.dt, 3)))\n\n plt.title(\"Wave at different times\")\n plt.legend(loc=\"upper right\")\n plt.show()\n\n # fig.savefig('results/pics_wave/vibrating_string_'+ self.type + '.png', dpi=150)",
"def temphum_plot(self, kwargs=None):\n\n def valuechange():\n \"\"\"This is the function which is called, when a value is changed in the spin boxes\"\"\"\n\n tempmin.setMaximum(tempmax.value())\n tempmax.setMinimum(tempmin.value())\n hummin.setMaximum(hummax.value())\n hummax.setMinimum(hummin.value())\n\n self.variables.default_values_dict[\"settings\"][\n \"current_tempmin\"\n ] = tempmin.value()\n self.variables.default_values_dict[\"settings\"][\n \"current_tempmax\"\n ] = tempmax.value()\n self.variables.default_values_dict[\"settings\"][\n \"current_hummin\"\n ] = hummin.value()\n self.variables.default_values_dict[\"settings\"][\n \"current_hummax\"\n ] = hummax.value()\n\n max = build_command(\n self.variables.devices_dict[\"temphum_controller\"],\n (\"set_hummax\", hummax.value()),\n )\n min = build_command(\n self.variables.devices_dict[\"temphum_controller\"],\n (\"set_hummin\", hummin.value()),\n )\n\n self.variables.vcw.write(\n self.variables.devices_dict[\"temphum_controller\"], max\n )\n self.variables.vcw.write(\n self.variables.devices_dict[\"temphum_controller\"], min\n )\n\n def dry_air_action():\n if dry_air_btn.isChecked():\n device_dict = self.variables.devices_dict[\"temphum_controller\"]\n try:\n command = build_command(\n device_dict, (\"set_environement_control\", \"ON\")\n )\n answer = self.variables.vcw.write(device_dict, command)\n if not answer:\n self.log.error(\n \"The environement controller did not responsed accordingly. Answer: \"\n + str(answer).strip()\n )\n return 0\n except:\n self.log.error(\n \"An error occured while changing the environement control\"\n )\n return 0\n dry_air_btn.setText(\"Humidity ctl. on\")\n self.variables.default_values_dict[\"settings\"][\n \"humidity_control\"\n ] = True\n\n else:\n device_dict = self.variables.devices_dict[\"temphum_controller\"]\n try:\n command = build_command(\n device_dict, (\"set_environement_control\", \"OFF\")\n )\n answer = self.variables.vcw.write(device_dict, command)\n if not answer:\n self.log.error(\n \"The environement controller did not responsed accordingly. Answer: \"\n + str(answer).strip()\n )\n\n return 0\n except:\n self.log.error(\n \"An error occured while changing the environement control\"\n )\n return 0\n dry_air_btn.setText(\"Humidity ctl. off\")\n self.variables.default_values_dict[\"settings\"][\n \"humidity_control\"\n ] = False\n\n def light_action():\n \"\"\"This function is debricated\"\"\"\n if light_btn.isChecked():\n self.variables.default_values_dict[\"settings\"][\"external_lights\"] = True\n else:\n self.variables.default_values_dict[\"settings\"][\n \"external_lights\"\n ] = False\n\n def check_light_state():\n if (\n self.variables.default_values_dict[\"settings\"][\"lights\"]\n and not light_btn.text() == \"Lights on\"\n ): # Checks if the lights are on and the button is off\n light_btn.setText(\"Lights on\")\n light_btn.setStyleSheet(\"background : rgb(0,255,0); border-radius: 5px\")\n elif (\n not self.variables.default_values_dict[\"settings\"][\"lights\"]\n and not light_btn.text() == \"Lights off\"\n ):\n light_btn.setText(\"Lights off\")\n light_btn.setStyleSheet(\"background : rgb(255,0,0); border-radius: 5px\")\n\n def config_plot(plot, plot2, pg):\n plot = plot.plotItem\n plot.setLabel(\"right\", \"humidity\", units=\"%\")\n plot.setLabel(\"bottom\", \"time\")\n plot.setLabel(\"left\", \"temperature\", units=\"Celsius\")\n plot.getAxis(\"left\").setPen(pg.mkPen(color=\"#c4380d\", width=3))\n plot.getAxis(\"right\").setPen(pg.mkPen(color=\"#025b94\", width=3))\n plot.showAxis(\"top\", show=True)\n plot.getAxis(\"top\").setTicks([])\n plot.getAxis(\"bottom\").setScale(1e-9)\n # plot.setRange(yRange=[15, 35])\n\n # For second plot\n plot.scene().addItem(\n plot2\n ) # inserts the second plot into the scene of the first\n plot2.setGeometry(plot.vb.sceneBoundingRect())\n plot.getAxis(\"right\").linkToView(\n plot2\n ) # links the second y axis to the second plot\n plot2.setXLink(plot) # sync the x axis of both plots\n # plot2.setRange(yRange=[0, 50])\n\n def __cut_arrays(data_array, maximum_time, arrays_to_cut):\n \"\"\"This function cuts an array to a maximum time difference\n This function is supposed to be used only for temp and humidity shaped arrays\n \"\"\"\n\n try:\n begin_time = data_array[arrays_to_cut[0]][0][0]\n end_time = data_array[arrays_to_cut[0]][0][-1]\n delta_time = (\n data_array[arrays_to_cut[0]][0][1]\n - data_array[arrays_to_cut[0]][0][0]\n )\n total_time = end_time - begin_time\n if total_time > maximum_time:\n over_time = total_time - maximum_time\n array_elm_to_drop = int(over_time / delta_time)\n for arrays in arrays_to_cut:\n data_array[arrays][0] = data_array[arrays][0][\n array_elm_to_drop:\n ]\n data_array[arrays][1] = data_array[arrays][1][\n array_elm_to_drop:\n ]\n except:\n pass\n\n def update_temphum_plots(kwargs=None):\n # for rooms in self.rooms:\n if self.variables.default_values_dict[\"settings\"][\"new_data\"]:\n temphum_plot.clear() # clears the plot and prevents a memory leak\n hum_plot_obj.clear()\n p1 = temphum_plot.plotItem\n\n ax = p1.getAxis(\"bottom\") # This is the trick\n __cut_arrays(\n self.variables.meas_data,\n float(\n self.variables.default_values_dict[\"settings\"].get(\n \"temp_history\", 3600\n )\n ),\n [\"temperature\", \"humidity\"],\n )\n ax.setTicks(\n [\n get_thicks_for_timestamp_plot(\n self.variables.meas_data[\"temperature\"][0],\n 5,\n self.variables.default_values_dict[\"settings\"][\n \"time_format\"\n ],\n )\n ]\n )\n\n try:\n if len(self.variables.meas_data[\"temperature\"][0]) == len(\n self.variables.meas_data[\"humidity\"][1]\n ): # sometimes it happens that the values are not yet ready\n p1.plot(\n self.variables.meas_data[\"temperature\"][0],\n self.variables.meas_data[\"temperature\"][1],\n pen={\"color\": \"r\", \"width\": 2},\n clear=True,\n )\n plot_item = setpg.PlotCurveItem(\n self.variables.meas_data[\"humidity\"][0],\n self.variables.meas_data[\"humidity\"][1],\n pen={\"color\": \"b\", \"width\": 2},\n clear=True,\n )\n hum_plot_obj.addItem(plot_item)\n del plot_item # the plot class needs a plot item which can be rendered, to avoid a mem leak delete the created plot item or 20k ram will be used\n # hum_plot_obj.addItem(setpg.plot(self.variables.meas_data[\"humidity\"][0],self.variables.meas_data[\"humidity\"][1],pen={'color': \"b\", 'width': 2}, clear=True))\n hum_plot_obj.setGeometry(\n p1.vb.sceneBoundingRect()\n ) # resize the second plot!\n except:\n pass\n\n # Create sublayout\n temphum_layout = QGridLayout()\n\n # Frame over the objects\n frame = QLabel()\n frame.setFrameStyle(QFrame.Box | QFrame.Raised)\n frame.setLineWidth(0)\n frame.setMidLineWidth(2)\n\n self.layout.addWidget(\n frame, self.temp_ypos, self.temp_xpos, self.temp_ysize, self.temp_xsize\n )\n\n x = np.zeros(1)\n y = np.zeros(1)\n\n setpg = pq\n # date_axis = CAxisTime(orientation='bottom') # Correctly generates the time axis\n hum_plot_obj = setpg.ViewBox() # generate new plot item\n temphum_plot = pq.PlotWidget()\n config_plot(temphum_plot, hum_plot_obj, setpg) # config the plot items\n\n self.variables.add_update_function(update_temphum_plots)\n\n # Additional Variables will be generated for temp and hum\n # self.variables.default_values_dict[\"settings\"].update({\"lights\": False, \"humidity_control\": True, \"current_tempmin\": 20, \"current_tempmax\": 25, \"current_hummin\": 20,\"current_hummax\": 25})\n\n # Spin Boxes for temp and humidity\n\n tempmin = QSpinBox()\n tempmax = QSpinBox()\n hummin = QSpinBox()\n hummax = QSpinBox()\n\n # Spinbox label\n textbox_temp = QLabel()\n textbox_temp.setText(\"Min temp. Max temp.\")\n textbox_temp.setFont(self.font)\n textbox_hum = QLabel()\n textbox_hum.setText(\"Min hum. Max hum.\")\n textbox_hum.setFont(self.font)\n\n # Config\n\n tempmin.setRange(15, 35)\n tempmin.setValue(\n float(\n self.variables.default_values_dict[\"settings\"].get(\"current_tempmin\", 0)\n )\n )\n tempmax.setRange(15, 35)\n tempmax.setValue(\n float(\n self.variables.default_values_dict[\"settings\"].get(\"current_tempmax\", 0)\n )\n )\n tempmin.valueChanged.connect(valuechange)\n tempmax.valueChanged.connect(valuechange)\n\n hummin.setRange(0, 70)\n hummin.setValue(\n float(\n self.variables.default_values_dict[\"settings\"].get(\"current_hummin\", 0)\n )\n )\n hummax.setRange(0, 70)\n hummax.setValue(\n float(\n self.variables.default_values_dict[\"settings\"].get(\"current_hummax\", 0)\n )\n )\n hummin.valueChanged.connect(valuechange)\n hummax.valueChanged.connect(valuechange)\n\n # Push buttons on the right for humidity control and light control\n\n dry_air_btn = QPushButton(\"Humidity ctl. off\")\n self.variables.default_values_dict[\"settings\"][\"humidity_control\"] = False\n dry_air_btn.setCheckable(True)\n dry_air_btn.toggle()\n dry_air_btn.clicked.connect(dry_air_action)\n dry_air_btn.setChecked(False)\n\n light_btn = QLabel()\n light_btn.setText(\"State not defined\")\n light_btn.setAlignment(QtCore.Qt.AlignVCenter | QtCore.Qt.AlignHCenter)\n light_btn.setStyleSheet(\"background : rgb(255,0,0); border-radius: 5px\")\n\n # light_btn.setCheckable(True)\n # light_btn.clicked.connect(light_action)\n\n # Humidity\n # temphum_plot.plot(x,y, pen=\"b\")\n\n # Widgets add\n temphum_layout.addWidget(textbox_temp, 0, 0, 1, 2)\n temphum_layout.addWidget(tempmin, 1, 0)\n temphum_layout.addWidget(tempmax, 1, 1)\n\n temphum_layout.addWidget(textbox_hum, 2, 0, 1, 2)\n temphum_layout.addWidget(hummin, 3, 0)\n temphum_layout.addWidget(hummax, 3, 1)\n\n temphum_layout.addWidget(dry_air_btn, 4, 0, 1, 2)\n temphum_layout.addWidget(light_btn, 5, 0, 3, 2)\n\n temphum_layout.addWidget(temphum_plot, 0, 3, 10, 2)\n\n temphum_layout.setContentsMargins(8, 8, 0, 8) # Makes a margin to the layout\n\n # Add the layout to the main layout\n self.layout.addLayout(\n temphum_layout,\n self.temp_ypos,\n self.temp_xpos,\n self.temp_ysize,\n self.temp_xsize,\n )\n\n def update():\n pass\n\n self.variables.add_update_function(update)\n self.variables.add_update_function(check_light_state)",
"def plot_time(self, X, x0, t):\n\n Pressure = [Solution(self, (x-x0)/t).pressure for x in X]\n Velocity = [Solution(self, (x-x0)/t).velocity for x in X]\n Density = [Solution(self, (x-x0)/t).rho for x in X]\n\n fig, axs = plt.subplots(3, sharex=True)\n fig.suptitle(\"Solution of the Riemann problem\\nat t = {}s\".format(t))\n axs[0].plot(X, Density)\n axs[1].plot(X, Velocity)\n axs[2].plot(X, Pressure)\n\n axs[0].grid()\n axs[0].set(ylabel = \"Density\")\n axs[1].grid()\n axs[1].set(ylabel = \"Velocity\")\n axs[2].grid()\n axs[2].set(ylabel = \"Pressure\")\n\n plt.xlabel(\"Location x\")",
"def plot(self):\n\t\tself.plotOfSpect()",
"def Plot(self):\n\n ### Create the path names ###\n folder_string = self.params.folder+\"/plots/\"\n u_string = self.params.folder+\"/plots/u.pdf\"\n p_string = self.params.folder+\"/plots/p.pdf\"\n\n ### Check if folder exists ###\n if not os.path.exists(folder_string): os.makedirs(folder_string)\n\n ### Plot the x component of velocity ###\n plot(self.u_next[0],title=\"Velocity in the x Direction\")\n plt.savefig(u_string)\n plt.figure()\n\n ### Plot the pressure ###\n plot(self.p_next,title=\"Pressure\")\n plt.savefig(p_string)\n plt.show()",
"def plot(self):\n\t\t\t\n\t\tfig,p1=_plt.subplots(4,sharex=True)\n\t\tp1[0].plot(self.time*1e3,self.eRogA,label='Rogowski A')\n\t\tp1[1].plot(self.time*1e3,self.eRogB,label='Rogowski B')\n\t\tp1[2].plot(self.time*1e3,self.eRogC,label='Rogowski C')\n\t\tp1[3].plot(self.time*1e3,self.eRogD,label='Rogowski D')\n\t\t_plot.finalizeSubplot(p1,xlabel='Time (ms)',ylabel='Current (A)')\n\t\t_plot.finalizeFigure(fig,title=self.title)\n\t\t\n\t\treturn p1",
"def visualize_signal(self):\n plt.figure()\n plt.title('Accelerometer Signal')\n plt.plot(range(len(self.data)), self.data[1])",
"def plot_examples(cms):\r\n data = amp_axis\r\n\r\n fig, axs = plt.subplots(1, 2, figsize=(30, 8)) #create two plots\r\n for [ax, cmap] in zip(axs, cms):\r\n psm = ax.pcolormesh(time_axis, tof_axis, data, cmap=cmap, rasterized=True, vmin = 250) #specify axis and minimum amplitude value to show on the graph\r\n fig.colorbar(psm, ax=ax, label = 'Amplitude') #define the legend of the amplitude data\r\n \r\n ax.set_ylabel('Time of Flight [\\u03bcs]') #set label for y axis\r\n ax.set_xlabel('Time [min]') #set label for x axis\r\n \r\n ax.hlines(8.744, 0, stop_time, colors = 'white') #create two white lines for the safe operating range for ToF\r\n ax.hlines(9.555, 0, stop_time, colors = 'white') \r\n \r\n plt.show()",
"def plot():\n pass",
"def plot(self):\n\t\tself.plotOfLoopVoltage()",
"def plot(self):\n\t\tself.plotOfSpect().plot()",
"def plot_fitter(self):\n\n total_time=self.interval*self.maxspectra\n times = np.linspace(self.interval,total_time + 1,self.interval)\n spectra_fitter.main(self.rt_plot.sum_data, times)",
"def do_plot_abs(the_input):\n pressurefield = None\n\n if the_input is None:\n raise Exception(\"You must supply a pressurefield or world:cueBeamCore2.CueBeamWorld\")\n\n if (type(the_input) is CueBeamWorld):\n world = the_input\n pressurefield = world.rxPlane.pressurefield\n\n if (type(the_input) is numpy.ndarray):\n world = CueBeamWorld() # create new, default world\n pressurefield = the_input\n\n if pressurefield is None:\n raise Exception(\"Something wrong: pressurefield is still None\")\n\n hfig = plt.figure(num=1, figsize=(8, 6), dpi=90, facecolor='white', edgecolor='black')\n\n imgplot = plt.imshow(\n X=numpy.real(pressurefield),\n extent=(\n world.rxPlane.z0, world.rxPlane.z0 + world.rxPlane.nz * world.rxPlane.dz,\n world.rxPlane.y0, world.rxPlane.y0 + world.rxPlane.ny * world.rxPlane.dy\n ),\n # interpolation=\"spline36\",\n interpolation=\"nearest\",\n clim=(0, 8.0),\n origin=\"lower\")\n # end imshow\n plt.set_cmap(\"plasma\") # black-to-yellow color map\n plt.xlabel(\"z-axis[m]\")\n plt.ylabel(\"y-axis[m]\")\n plt.show()",
"def plot_series(self, t1=0, t2=100, t1p=None, t2p=None):\n \n plot_discretized(self.ts, self.ts_dis, t1=t1, t2=t2, t1p=t1p, t2p=t2p)",
"def plots(self, events=None, title=None):\n data = self.data\n P = PH.regular_grid(3 , 1, order='columnsfirst', figsize=(8., 6), showgrid=False,\n verticalspacing=0.08, horizontalspacing=0.08,\n margins={'leftmargin': 0.07, 'rightmargin': 0.20, 'topmargin': 0.03, 'bottommargin': 0.1},\n labelposition=(-0.12, 0.95))\n scf = 1e12\n ax = P.axarr\n ax = ax.ravel()\n PH.nice_plot(ax)\n for i in range(1,2):\n ax[i].get_shared_x_axes().join(ax[i], ax[0])\n # raw traces, marked with onsets and peaks\n tb = self.timebase[:len(data)]\n ax[0].plot(tb, scf*data, 'k-', linewidth=0.75, label='Data') # original data\n ax[0].plot(tb[self.onsets], scf*data[self.onsets], 'k^', \n markersize=6, markerfacecolor=(1, 1, 0, 0.8), label='Onsets')\n if len(self.onsets) is not None:\n# ax[0].plot(tb[events], data[events], 'go', markersize=5, label='Events')\n# ax[0].plot(tb[self.peaks], self.data[self.peaks], 'r^', label=)\n ax[0].plot(tb[self.smpkindex], scf*np.array(self.smoothed_peaks), 'r^', label='Smoothed Peaks')\n ax[0].set_ylabel('I (pA)')\n ax[0].set_xlabel('T (s)')\n ax[0].legend(fontsize=8, loc=2, bbox_to_anchor=(1.0, 1.0))\n \n # deconvolution trace, peaks marked (using onsets), plus threshold)\n ax[1].plot(tb[:self.Crit.shape[0]], self.Crit, label='Deconvolution') \n ax[1].plot([tb[0],tb[-1]], [self.sdthr, self.sdthr], 'r--', linewidth=0.75, \n label='Threshold ({0:4.2f}) SD'.format(self.sdthr))\n ax[1].plot(tb[self.onsets]-self.idelay, self.Crit[self.onsets], 'y^', label='Deconv. Peaks')\n if events is not None: # original events\n ax[1].plot(tb[:self.Crit.shape[0]][events], self.Crit[events],\n 'ro', markersize=5.)\n ax[1].set_ylabel('Deconvolution')\n ax[1].set_xlabel('T (s)')\n ax[1].legend(fontsize=8, loc=2, bbox_to_anchor=(1.0, 1.0))\n# print (self.dt, self.template_tmax, len(self.template))\n # averaged events, convolution template, and fit\n if self.averaged:\n ax[2].plot(self.avgeventtb[:len(self.avgevent)], scf*self.avgevent, 'k', label='Average Event')\n maxa = np.max(self.sign*self.avgevent)\n #tpkmax = np.argmax(self.sign*self.template)\n if self.template is not None:\n maxl = int(np.min([len(self.template), len(self.avgeventtb)]))\n temp_tb = np.arange(0, maxl*self.dt, self.dt)\n #print(len(self.avgeventtb[:len(self.template)]), len(self.template))\n ax[2].plot(self.avgeventtb[:maxl], scf*self.sign*self.template[:maxl]*maxa/self.template_amax, \n 'r-', label='Template')\n # compute double exp based on rise and decay alone\n # print('res rise: ', self.res_rise)\n # p = [self.res_rise.x[0], self.res_rise.x[1], self.res_decay.x[1], self.res_rise.x[2]]\n # x = self.avgeventtb[:len(self.avg_best_fit)]\n # y = self.doubleexp(p, x, np.zeros_like(x), risepower=4, fixed_delay=0, mode=0)\n # ax[2].plot(x, y, 'b--', linewidth=1.5)\n tau1 = np.power(10, (1./self.risepower)*np.log10(self.tau1*1e3)) # correct for rise power\n tau2 = self.tau2*1e3\n ax[2].plot(self.avgeventtb[:len(self.avg_best_fit)], scf*self.avg_best_fit, 'c--', linewidth=2.0,\n label='Best Fit:\\nRise Power={0:.2f}\\nTau1={1:.3f} ms\\nTau2={2:.3f} ms\\ndelay: {3:.3f} ms'.\n format(self.risepower, self.res_rise.x[1]*1e3, self.res_decay.x[1]*1e3, self.bfdelay*1e3))\n # ax[2].plot(self.avgeventtb[:len(self.decay_fit)], self.sign*scf*self.rise_fit, 'g--', linewidth=1.0,\n # label='Rise tau {0:.2f} ms'.format(self.res_rise.x[1]*1e3))\n # ax[2].plot(self.avgeventtb[:len(self.decay_fit)], self.sign*scf*self.decay_fit, 'm--', linewidth=1.0,\n # label='Decay tau {0:.2f} ms'.format(self.res_decay.x[1]*1e3))\n if title is not None:\n P.figure_handle.suptitle(title)\n ax[2].set_ylabel('Averaged I (pA)')\n ax[2].set_xlabel('T (s)')\n ax[2].legend(fontsize=8, loc=2, bbox_to_anchor=(1.0, 1.0))\n if self.fitted:\n print('measures: ', self.risetenninety, self.decaythirtyseven)\n mpl.show()",
"def plot_temp():\r\n work_book = xlrd.open_workbook(\"Temp.xls\")\r\n sheet1 = work_book.sheet_by_name(\"Temperature\")\r\n time_x = sheet1.col_values(1)\r\n temp_y = sheet1.col_values(0)\r\n plt.title(\"Time\")\r\n plt.xlabel(\"Time\")\r\n plt.ylabel(\"Temperature\")\r\n plt.plot(time_x, temp_y)\r\n plt.show()",
"def example3():\n arrive_time=example2() # Get packets arrive time using example1\n time_series.plot_time_series(arrive_time) # Plot time series using packets arrive time",
"def plot(self):\n self.plotsite()\n self.plotbond()\n plt.show()",
"def plot_plateau(x,y,p,n,Vdc):\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_xlabel('Vrf [V]')\n ax.set_ylabel('Current [nA]')\n fig.suptitle('Vdc = '+str(Vdc)+' n = '+str(n), fontsize=24)\n \n plt.plot(x,y,'x',label='Experimental data') \n t = np.linspace(min(x),max(x),1000)\n plt.plot(t,f(t,p[0],p[1],p[2]),label='Fit')\n plt.axhline(y=n*e*frequency*1e9, color='black', linestyle='-')\n\n ax.legend()\n plt.show(block=True)\n plt.pause(0.3)\n plt.close()\n \n return None",
"def plot(self):\n pass",
"def plot(self):\n\t\tplot_chain(self.database_path, self.temp_folder)\n\t\tplot_density(self.database_path, self.temp_folder, self.cal_params)",
"def plot_trajectories_XYZ(t_start,t_stop):\n \n time, ankle_l_trajectory, ankle_r_trajectory,foot_l_contact,foot_r_contact,muscle_lh_activations, muscle_rh_activations,muscle_lh_forces,muscle_rh_forces,joint_lh_positions,joint_rh_positions = load_data()\n \n index_start = np.where(time == t_start)[0][0]\n index_end = np.where(time == t_stop)[0][0]\n \n time = time[index_start:index_end+1]\n ankle_l_trajectory = ankle_l_trajectory[index_start:index_end+1,:]\n ankle_r_trajectory = ankle_r_trajectory[index_start:index_end+1,:]\n \n #time=np.linspace(1,len(ankle_l_trajectory[:,0]),len(ankle_l_trajectory[:,0]));\n \n plt.figure('Trajectories')\n plt.subplot(311)\n plt.plot(time,ankle_l_trajectory[:,0])\n plt.plot(time,ankle_r_trajectory[:,0])\n #plt.title('Trajectory of the X component')\n plt.xlabel('Time [s]')\n plt.ylabel('X Position [cm]')\n plt.legend(['Left ankle','Right ankle'],loc='upper right')\n \n plt.subplot(312)\n plt.plot(time,ankle_l_trajectory[:,1])\n plt.plot(time,ankle_r_trajectory[:,1])\n #plt.title('Trajectory of the Y component')\n plt.xlabel('Time [s]')\n plt.ylabel('Y Position [cm]')\n plt.legend(['Left ankle','Right ankle'],loc='upper right')\n \n plt.subplot(313)\n plt.plot(time,ankle_l_trajectory[:,2])\n plt.plot(time,ankle_r_trajectory[:,2])\n #plt.title('Trajectory of the Z component')\n plt.xlabel('Time [s]')\n plt.ylabel('Z Position [cm]')\n plt.legend(['Left ankle','Right ankle'],loc='upper right')\n \n# plt.suptitle('Decomposition of the trajectories of the hind feet')\n return",
"def plot_v(t, v):\n p1 = plt.plot(t,v)\n plt.xlabel('Time [s]')\n plt.ylabel('Velocity [m/s]')\n plt.title('Velocity for the skydiver as a function of time')\n plt.show()\n plt.savefig('Parachute_velocity.png')",
"def makePlot(timeStamp):\n\n #-------------------------------------------------------------------------\n # Create figure and axes\n #-------------------------------------------------------------------------\n\n width = 12 # inches\n height = 8 # inches\n fig = plt.figure(figsize=(width, height))\n\n # We'll use gridspec to create axes in rectangular 6-by-5 lattice\n import matplotlib.gridspec as gridspec\n nrows = 6\n ncols = 5\n Grid = gridspec.GridSpec(nrows, ncols)\n\n # axis for elevation time series\n axElev = fig.add_subplot(Grid[:2, :2]) # first 2 rows, first 2 columns\n # axis for slab\n axSlab = fig.add_subplot(Grid[:2, 2:]) # first 2 rows, columns > 2\n # and the transects\n axTran1 = fig.add_subplot(Grid[2:4, :]) # rows 2,3,4, all columns\n # rows 5,6,7, all columns, share x/y axis with previous (sets same ticks\n # etc)\n axTran2 = fig.add_subplot(Grid[4:6, :], sharex=axTran1, sharey=axTran1)\n\n # gridspec allows to tune the spacing between plots (unit is fraction of\n # font size)\n boundary_pad = 3.5\n horizontal_pad = 0.2\n vertical_pad = 1.0\n # figure area left,bottom,right,top in normalized coordinates [0,1]\n bounds = [0, 0, 1, 1]\n Grid.tight_layout(\n fig,\n pad=boundary_pad,\n w_pad=horizontal_pad,\n h_pad=vertical_pad,\n rect=bounds)\n\n #-------------------------------------------------------------------------\n # Create plots\n #-------------------------------------------------------------------------\n\n # for all avaiable colormaps see ( '_r' reverses the colormap )\n # http://matplotlib.org/examples/color/colormaps_reference.html\n colormap = plt.get_cmap('Spectral_r')\n colormap_kine = plt.get_cmap('gist_heat')\n\n # slab\n salt_clim = [0, 32]\n ncontours = 16\n # bouding box for slab [xmin,xmax,ymin,ymax] in model x,y coordinates\n estuarybbox = [330000, 360000, 284500, 297500]\n dia = slabSnapshotDC(\n clabel='Salinity',\n unit='psu',\n clim=salt_clim,\n cmap=colormap)\n dia.setAxes(axSlab)\n dia.addSample(slabDC, timeStamp=timeStamp, plotType='contourf',\n bbox=estuarybbox, N=ncontours)\n # overrides default format for colorbar floats\n dia.showColorBar(format='%.2g')\n #dia.addTitle('in case you want a custom title')\n # get transect (x,y) coordinates from the transectDC\n transectXYCoords = generateTransectFromDataContainer(transectDC_salt, 0)[4]\n # plot transect on the map (thin black on thick white)\n dia.addTransectMarker(transectXYCoords[:, 0], transectXYCoords[:, 1],\n color='w', linewidth=2.0)\n dia.addTransectMarker(transectXYCoords[:, 0], transectXYCoords[:, 1],\n color='k', linewidth=1.0)\n # plot station markers\n for station in stationsToPlot:\n staX = staFileObj.getX(station)\n staY = staFileObj.getY(station)\n dia.addStationMarker(\n staX,\n staY,\n label=station,\n printLabel=True,\n marker='*')\n # add text to plot. x,y are in normalized axis coordinates [0,1]\n dia.ax.text(0.05, 0.98, 'custom text', fontsize=fontsize,\n verticalalignment='top', horizontalalignment='left',\n transform=dia.ax.transAxes)\n\n # elevation time series\n # define the time range to plot\n elevStartTime = datetime.datetime(2012, 5, 4, 0, 0)\n elevEndTime = datetime.datetime(2012, 5, 5, 0, 15)\n elevMeanTime = elevStartTime + (elevEndTime - elevStartTime) / 2\n elevLim = [-1.5, 2.5]\n dia = timeSeriesPlotDC2(\n xlabel=elevMeanTime.strftime('%Y %b %d'),\n ylim=elevLim)\n dia.setAxes(axElev)\n #dia.addShadedRange( timeStamp, timeStamp+datetime.timedelta(seconds=30), facecolor='IndianRed')\n dia.addShadedRange(\n timeStamp,\n timeStamp,\n edgecolor='IndianRed',\n facecolor='none',\n linewidth=2)\n tag = elevDC.getMetaData('tag')\n dia.addSample(\n elevDC.timeWindow(\n elevStartTime,\n elevEndTime),\n label=tag,\n color='k')\n dia.addTitle('Elevation ({0:s}) [m]'.format(\n elevDC.getMetaData('location').upper()))\n # adjust the number of ticks in x/y axis\n dia.updateXAxis(maxticks=5)\n dia.updateYAxis(maxticks=3, prune='lower')\n\n # transects\n dia = transectSnapshotDC(\n clabel='Salinity',\n unit='psu',\n cmap=colormap,\n clim=salt_clim)\n dia.setAxes(axTran1)\n #transectDC_salt.data *= 1e-3\n dia.addSample(transectDC_salt, timeStamp, N=ncontours)\n dia.addTitle('')\n dia.showColorBar()\n # plot station markers\n for station in stationsToPlot:\n staX = staFileObj.getX(station)\n staY = staFileObj.getY(station)\n dia.addStationMarker(staX, staY, label=station, color='k',\n linewidth=1.5, linestyle='dashed')\n # do not show x axis ticks and label for this plot\n dia.hideXTicks()\n\n dia = transectSnapshotDC(clabel='TKE', unit='m2s-1', logScale=True,\n clim=[-7, -2], climIsLog=True, cmap=colormap_kine)\n dia.setAxes(axTran2)\n dia.addSample(transectDC_kine, timeStamp, N=ncontours)\n # plot station markers\n for station in stationsToPlot:\n staX = staFileObj.getX(station)\n staY = staFileObj.getY(station)\n dia.addStationMarker(staX, staY, label=station, color='k',\n linewidth=1.5, linestyle='dashed')\n dia.addTitle('')\n dia.showColorBar()\n dia.updateXAxis(maxticks=15)\n dia.updateYAxis(maxticks=6)\n\n #-------------------------------------------------------------------------\n # Save to disk\n #-------------------------------------------------------------------------\n dateStr = timeStamp.strftime('%Y-%m-%d_%H-%M')\n filename = '_'.join([imgPrefix, dateStr])\n saveFigure(\n imgDir,\n filename,\n imgFiletype,\n verbose=True,\n dpi=200,\n bbox_tight=True)\n plt.close()",
"def plotTime(self):\n plt.figure()\n t = [i for i in range(len(self.nodes_infected))]\n print(t)\n plt.title('Nodos infectados vs Tiempo')\n plt.xlabel('Instantes de tiempo')\n plt.ylabel('# de nodos infectados')\n plt.plot(t, self.nodes_infected)\n plt.grid(True)\n plt.show()",
"def plot(self, *args, **kwargs):\n pass",
"def plot_temperature(timestamps,timelabels,temperatures):\n\n #into x,y data and 2nd column as the x-axis tick\n TOOLS = \"pan,wheel_zoom,box_zoom,reset,save,box_select,lasso_select\"\n p = plt.figure(title=\"Christchurch Temperature\", tools=TOOLS,\n x_axis_label='Record Time', y_axis_label='Temperature(\"C)')\n\n # add a line renderer with legend and line thickness\n\n p.xaxis.ticker = timestamps\n p.xaxis.major_label_overrides=(dict(zip(timestamps,timelabels)))\n p.xaxis.major_label_orientation = pi/2\n p.xaxis.ticker.desired_num_ticks = 1\n\n p.line(timestamps,temperatures, legend_label=\"Temperature\", line_width=2)\n\n from bokeh.resources import CDN\n from bokeh.embed import components\n script, div = components(p)\n \n return get_bokeh_plot_head(), script, div"
]
| [
"0.6167347",
"0.6045964",
"0.6005844",
"0.60007274",
"0.5986079",
"0.5946726",
"0.5940409",
"0.59366363",
"0.5927733",
"0.59202886",
"0.5919486",
"0.59166664",
"0.589321",
"0.5878967",
"0.5878385",
"0.58383024",
"0.5815395",
"0.58096075",
"0.5796197",
"0.5794684",
"0.5793674",
"0.5772768",
"0.57591444",
"0.57572913",
"0.5755575",
"0.57536596",
"0.5752423",
"0.5751891",
"0.57407427",
"0.57320255"
]
| 0.7408538 | 0 |
find the octant of the point p | def octant(p):
x = p[0]
y = p[1]
z = p[2]
if z > 0:
if y > 0:
if x > 0:
return 1
else:
return 2
else:
if x > 0:
return 4
else:
return 3
else:
if y > 0:
if x > 0:
return 5
else:
return 6
else:
if x > 0:
return 8
else:
return 7 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def pentagonal_index(P):\n return (1 + sqrt(1 + 24 * P)) / 6",
"def pentagonal(n):\n return (n * ((3 * n) - 1)) / 2",
"def octagonal(n: int) -> int:\n return int(n * (3 * n - 2))",
"def pentagon(n) -> int:\n\n return (n * (3 * n - 1)) // 2",
"def pentagonal(n: int) -> int:\n return int(n * (3 * n - 1) / 2)",
"def draw_p_to_eps(p):\n return ppf((p + 1.0) / 2)",
"def get_abscissa(self, p):\n return np.dot(p - self.zero, self.direction)",
"def classify_point(self, point):\n diagonal = point.coordinates - self.center\n octant = tuple(1 if x >= 0 else -1 for x in diagonal)\n return octant",
"def _renorm_p(self, p):\n return np.sign(p)*np.sqrt(np.sqrt(np.abs(p)))",
"def pent( a, b ):\n return P(a) - b",
"def is_pentagonal(P):\n return sqrt(1 + 24 * P) % 6 == 5",
"def get_line_distance(self, p):\n\n y = 1000 * p.y\n R = 1000 * self.geometry.R\n x = copysign(sqrt(y ** 2 + (R - sqrt(R ** 2 - y ** 2))), y)\n x = 2 * R * asin(x / (2 * R))\n #x=y\n b = -x / sqrt(R ** 2 - x ** 2)\n theta = atan(b) # grating tangent angle\n print b, theta\n d = 0\n for n, a in enumerate(self.an):\n d += a * x ** n\n d *= cos(theta)\n return 1e-3 / d",
"def legendre_symbol(a, p):\n ls = pow((int)(a), (int)((p - 1) / 2), (int)(p))\n return -1 if ls == p - 1 else ls",
"def legendre_symbol(a, p):\r\n ls = pow(a, (p - 1) // 2, p)\r\n return -1 if ls == p - 1 else ls",
"def legendre_symbol(a, p):\n ls = pow(a, (p - 1) // 2, p)\n return -1 if ls == p - 1 else ls",
"def projectPoint(self,p):\n a,b,c = self.a, self.b, self.c\n x,y = p\n return numpy.array( [ b*(x*b-y*a) - c*a, a*(y*a-x*b) - c*b ] )",
"def legendre_symbol(a, p):\n ls = pow(a, (p - 1) // 2, p)\n return -1 if ls == p - 1 else ls",
"def legendre_symbol(a, p):\n ls = pow(a, (p - 1) / 2, p)\n return -1 if ls == p - 1 else ls",
"def legendre_symbol(a, p):\n ls = pow(a, (p - 1) / 2, p)\n return -1 if ls == p - 1 else ls",
"def sign_line(pt, P1, P2):\n x1, y1 = P1\n x2, y2 = P2\n x, y = pt\n\n return np.sign((x - x1)*(y2 - y1) - (y-y1)*(x2-x1))",
"def flipy(self, p):\n return int(p.x), int(-p.y+self.h)",
"def fun_lorentzian(p,r):\n return p[1] / ((r/p[0])**2 + 1)",
"def sign(p):\n to_count = filter(lambda x: x[0] > x[1], combinations(p, 2))\n sign_exp = sum(1 for _ in to_count) % 2\n return (-1)**sign_exp",
"def polygonal_number(s, n):\n return (n*n*(s-2)-n*(s-4))/2",
"def legendre_symbol(a, p):\n\tls = pow(a, (p - 1) // 2, p)\n\treturn -1 if ls == p - 1 else ls",
"def oppervlakte(self):\n x = self.r*self.r*pi\n return x",
"def is_pentagonal(n):\r\n if ((1+(24*n+1)**0.5) / 6)%1 == 0:\r\n return True\r\n return False",
"def _re(self, p):\n return self.edges[:, 0, :] - p # 0 is arbitrary - the other end also works",
"def angle(z):",
"def _rawprng(self):\n self.p += 1 \n if self.p >= self.o:\n\t\t\tself.p = 0\n t = 1768863 * self.s[self.p] + self.c * 2.3283064365386963e-10\n self.c = int(t) | 0\n self.s[self.p] = t - self.c\n return self.s[self.p]"
]
| [
"0.6850405",
"0.6668282",
"0.66609925",
"0.66571915",
"0.6533113",
"0.6394671",
"0.6280675",
"0.606131",
"0.6050526",
"0.6026284",
"0.5963669",
"0.59469676",
"0.5932404",
"0.5902425",
"0.58995277",
"0.5889232",
"0.58607674",
"0.5808065",
"0.5808065",
"0.578582",
"0.57765496",
"0.5776483",
"0.5764316",
"0.5753727",
"0.57405114",
"0.57176644",
"0.57053804",
"0.5672032",
"0.5656223",
"0.565531"
]
| 0.81374913 | 0 |
Check if an axis from beta1 to beta2 is nearly perpendicular with a maximal distance to an axis from alpha1 to alpha2 and if their distance is under maxDist | def isSecondOblateAxis(alpha1, alpha2, beta1, beta2, maxDist, maxTorsAngle):
a1 = np.asarray(alpha1)
a2 = np.asarray(alpha2)
b1 = np.asarray(beta1)
b2 = np.asarray(beta2)
#lent = alpha1 - beta1
adir = a2 - a1
bdir = b2 - b1
aLength = np.sqrt ( np.dot(adir, adir) )
bLength = np.sqrt ( np.dot(bdir, bdir) )
DotProdNormed = np.dot(adir, bdir) / ( aLength * bLength )
maxTors = np.cos( np.radians( maxTorsAngle ))
if (abs(DotProdNormed) > maxTors):
# print beta1, beta2, "not rectangular, angle = ", np.arccos(DotProdNormed)
return False
# print beta1, beta2, "is rectangular."
# find nearest point to alpha mid on the potential beta axis by bisection
# midAlpha = [a2 + 0.5 * dAlph for a2, dAlph in zip(alpha2, dirAlpha)]
axisDist = minimalDistance(a1, a2, b1, b2)
# print "Distance of", a1, "<->", a2, " to ", b1, "<->", b2, "is", axisDist
#midBeta = [b2 + 0.5 * dBeta for b2, dBeta in zip(beta2, dirBeta)]
if axisDist < maxDist:
# print b1, "<->", b2, "is possible axis"
return True
else:
# print b1, "<->", b2, "is too far (", axisDist ,") from", a1, "<->", a2, ", maximal allowed distance =", maxDist
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _check_optimality(self):\n\n dual_obj = -0.5* np.dot(self.beta, self.beta) + np.sum(self.alpha)\n\n prim_obj = 0.5* np.dot(self.beta, self.beta) + self.C * np.sum( np.maximum(1 - np.multiply(np.dot(self.X, self.beta), self.y), 0))\n\n # print (prim_obj - dual_obj)\n self.gap = prim_obj - dual_obj\n if self.gap <= 1e-6:\n return True\n else:\n return False",
"def _is_max(self, y0, y1, y2):\n return True if (y1 - y0 > 0) and (y2 - y1 < 0) else False",
"def IsPointInsideMesh2(obj, p, max_dist = 1.84467e+19):\n bResult, point, normal, face = obj.closest_point_on_mesh(p, max_dist)\n p2 = point-p\n v = p2.dot(normal)\n return not(v < 0.0)",
"def is_separating_axis(o, p1, p2):\n min1, max1 = float('+inf'), float('-inf')\n min2, max2 = float('+inf'), float('-inf')\n\n for v in p1:\n projection = np.dot(v, o)\n\n min1 = min(min1, projection)\n max1 = max(max1, projection)\n\n for v in p2:\n projection = np.dot(v, o)\n\n min2 = min(min2, projection)\n max2 = max(max2, projection)\n\n if max1 >= min2 and max2 >= min1:\n d = min(max2 - min1, max1 - min2)\n # push a bit more than needed so the shapes do not overlap in future\n # tests due to float precision\n d_over_o_squared = d/np.dot(o, o) + 1e-10\n pv = d_over_o_squared*o\n return False, pv\n else:\n return True, None",
"def is_within_distance(target_location, current_location, orientation, max_distance, d_angle_th_up, d_angle_th_low=0):\n target_vector = np.array([target_location.x - current_location.x, target_location.y - current_location.y])\n norm_target = np.linalg.norm(target_vector)\n\n # If the vector is too short, we can simply stop here\n if norm_target < 0.001:\n return True\n\n if norm_target > max_distance:\n return False\n\n forward_vector = np.array(\n [math.cos(math.radians(orientation)), math.sin(math.radians(orientation))])\n d_angle = math.degrees(math.acos(np.clip(np.dot(forward_vector, target_vector) / norm_target, -1., 1.)))\n\n return d_angle_th_low < d_angle < d_angle_th_up",
"def is_within_distance(target_location, current_location, orientation, max_distance, d_angle_th_up, d_angle_th_low=0):\n target_vector = np.array([target_location.x - current_location.x, target_location.y - current_location.y])\n norm_target = np.linalg.norm(target_vector)\n\n # If the vector is too short, we can simply stop here\n if norm_target < 0.001:\n return True\n\n if norm_target > max_distance:\n return False\n\n forward_vector = np.array(\n [math.cos(math.radians(orientation)), math.sin(math.radians(orientation))])\n d_angle = math.degrees(math.acos(np.clip(np.dot(forward_vector, target_vector) / norm_target, -1., 1.)))\n\n return d_angle_th_low < d_angle < d_angle_th_up",
"def isCollinear(a,b,c):\r\n #return slope(a, b) == slope(b, c) == slope(c, a) #DOES NOT WORK\r\n #return (b[0] - a[0]) * (c[1] - a[1]) == (c[0] - a[0]) * (b[1] - a[1]) \r\n #return distance(a,b) + distance(b,c) == distance(a,c)\r\n x1 = a[0]\r\n y1 = a[1]\r\n x2 = b[0]\r\n y2 = b[1]\r\n x3 = c[0]\r\n y3 = c[1] \r\n if (x1*(y2 - y3)) + (x2*(y3 - y1)) + (x3*(y1-y2)) == 0: \r\n return True\r\n else:\r\n return False",
"def __gt__(self, other):\n return self.x ** 2 + self.y ** 2 > other.x ** 2 + other.y ** 2",
"def is_same_waypoint(self, wp1, wp2, max_d=0.5, max_v=0.5):\n dl = lambda a, b: math.sqrt((a.x-b.x)**2 + (a.y-b.y)**2 + (a.z-b.z)**2)\n ddif = dl(wp1.pose.pose.position, wp2.pose.pose.position)\n if ddif < max_d:\n return True\n return False",
"def is_approaching(self, other_particle):\n if self.pos_x < other_particle.pos_x:\n d_v_x = self.velocity_x - other_particle.velocity_x\n else:\n d_v_x = other_particle.velocity_x - self.velocity_x\n\n if self.pos_y < other_particle.pos_y:\n d_v_y = self.velocity_y - other_particle.velocity_y\n else:\n d_v_y = other_particle.velocity_y - self.velocity_y\n\n return d_v_x > 0 or d_v_y > 0",
"def derivative_of_dist_to_obstacle(min_dist, x_jnt0, y_jnt0, x_jnt1, y_jnt1,\n dx_jnt0, dy_jnt0, dx_jnt1, dy_jnt1,\n link_slope, x_obs, y_obs):\n dist, point_type = min_dist\n if point_type == 0:\n dist_der = ((x_jnt0 - x_obs) * dx_jnt0 + (y_jnt0 - y_obs) * dy_jnt0)\n dist_der /= dist\n elif point_type == 1:\n dist_der = ((x_jnt1 - x_obs) * dx_jnt1 + (y_jnt1 - y_obs) * dy_jnt1)\n dist_der /= dist\n elif point_type == 2:\n if link_slope is None:\n dist_der = dx_jnt0 if x_jnt0 > x_obs else -dx_jnt0\n elif link_slope == 0:\n dist_der = dy_jnt0 if y_jnt0 > y_obs else -dy_jnt0\n else:\n x_intersect = (\n x_obs / link_slope + y_obs + link_slope * x_jnt0 - y_jnt0\n ) / (link_slope + 1 / link_slope)\n y_intersect = link_slope * (x_intersect - x_jnt0) + y_jnt0\n dlink_slope = (\n (1 / (x_jnt1 - x_jnt0))\n * (dy_jnt1 - dy_jnt0 + link_slope * (dx_jnt1 - dx_jnt0))\n )\n dx_intersect = (\n link_slope**4 * dx_jnt0\n + link_slope**2 * dlink_slope * (y_jnt0 - y_obs)\n - link_slope**3 * dy_jnt0\n + dlink_slope * (y_obs - y_jnt0)\n + 2 * link_slope * dlink_slope * (x_jnt0 - x_obs)\n + link_slope**2 * dx_jnt0\n - link_slope * dy_jnt0\n ) / (1 + link_slope**2) ** 2\n dy_intersect = (link_slope * (dx_intersect - dx_jnt0)\n + dlink_slope * (x_intersect - x_jnt0)\n + dy_jnt0)\n dist_der = (\n (x_intersect - x_obs) * dx_intersect\n + (y_intersect - y_obs) * dy_intersect\n ) / dist\n return dist_der",
"def __ge__(self, other):\n return self.x ** 2 + self.y ** 2 >= other.x ** 2 + other.y ** 2",
"def _inside_bounds(A, B):\n for axis in 'xyz':\n minA, maxA = axis_bounds(A, axis)\n minB, maxB = axis_bounds(B, axis)\n if (minA <= minB) or (maxA >= maxB):\n return False\n\n return True",
"def test_epipolar(dxy_0, ep_vec, dxy, tol):\n delta=np.abs(np.dot((dxy-dxy_0), [ep_vec[1], -ep_vec[0]]))\n disp_mag=np.sqrt((dxy[:,0]-dxy_0[0])**2 +(dxy[:,1]-dxy_0[1])**2)\n good=(delta < tol) | (delta < 0.02 * disp_mag )\n return good, delta",
"def find_out_difference_perpendiculars(lap, ref_lap):\n\n distances = []\n\n for i in lap.index:\n point = lap.loc[i]\n\n closest_index = find_closest_point(point, ref_lap)\n closest_point = ref_lap.loc[closest_index]\n\n neighbor_i = len(ref_lap) - 1 if closest_index == 0 else closest_index - 1\n neighbor1 = ref_lap.loc[neighbor_i]\n neighbor_i = 0 if len(ref_lap) == closest_index + 1 else closest_index + 1\n neighbor2 = ref_lap.loc[neighbor_i]\n\n v1 = create_vector(closest_point, point)\n v2 = create_vector(closest_point, neighbor1)\n v3 = create_vector(closest_point, neighbor2)\n\n angle1 = find_angle_between_vectors(v1, v2)\n angle2 = find_angle_between_vectors(v1, v3)\n\n degrees90 = math.pi / 2\n min_dist = -1\n if angle1 > degrees90 and angle2 > degrees90:\n min_dist = line_length(point.LAT, point.LON, closest_point.LAT, closest_point.LON)\n elif angle1 < degrees90 and angle2 < degrees90:\n dist1 = find_shortest_distance(point, closest_point, neighbor1)\n dist2 = find_shortest_distance(point, closest_point, neighbor2)\n min_dist = dist1 if dist1 <= dist2 else dist2\n elif angle1 <= degrees90:\n min_dist = find_shortest_distance(point, closest_point, neighbor1)\n elif angle2 <= degrees90:\n min_dist = find_shortest_distance(point, closest_point, neighbor2)\n\n if min_dist == -1:\n print('ERROR: Could not find distance')\n print(\"Indices: {} {}\\nAngles: {} {}\".format(i, closest_index, angle1, angle2))\n elif math.isnan(min_dist):\n print(\"NAN value!!!\\nIndices: {} {}\\nAngles: {} {}\".format(i, closest_index, angle1, angle2))\n elif min_dist < 0:\n print(\"Negative value!!!\\nIndices: {} {}\\nAngles: {} {}\".format(i, closest_index, angle1, angle2))\n else:\n min_dist = degrees2kilometers(min_dist) * 100000 # in centimeters\n distances.append(min_dist)\n\n return distances",
"def validate_points(a, b):\r\n\tdiff_y = b[0] - a[0]\r\n\tdiff_x = b[1] - a[1]\r\n\r\n\treturn (diff_y == 0 and diff_x != 0) or (diff_x == 0 and diff_y != 0) or abs(diff_x) == abs(diff_y)",
"def distance_checker(xyz1, xyz2):\n return math.sqrt((xyz1[0] - xyz2[0])**2 + (xyz1[1] - xyz2[1])**2 +\n (xyz1[2] - xyz2[2])**2)",
"def surface_distances(x, y, hausdorff_percentile=None, return_coordinate_max_distance=False):\n\n assert x.shape == y.shape, 'both inputs should have same size, had {} and {}'.format(x.shape, y.shape)\n n_dims = len(x.shape)\n\n hausdorff_percentile = 100 if hausdorff_percentile is None else hausdorff_percentile\n hausdorff_percentile = utils.reformat_to_list(hausdorff_percentile)\n\n # crop x and y around ROI\n _, crop_x = edit_volumes.crop_volume_around_region(x)\n _, crop_y = edit_volumes.crop_volume_around_region(y)\n\n # set distances to maximum volume shape if they are not defined\n if (crop_x is None) | (crop_y is None):\n return max(x.shape), max(x.shape)\n\n crop = np.concatenate([np.minimum(crop_x, crop_y)[:n_dims], np.maximum(crop_x, crop_y)[n_dims:]])\n x = edit_volumes.crop_volume_with_idx(x, crop)\n y = edit_volumes.crop_volume_with_idx(y, crop)\n\n # detect edge\n x_dist_int = distance_transform_edt(x * 1)\n x_edge = (x_dist_int == 1) * 1\n y_dist_int = distance_transform_edt(y * 1)\n y_edge = (y_dist_int == 1) * 1\n\n # calculate distance from edge\n x_dist = distance_transform_edt(np.logical_not(x_edge))\n y_dist = distance_transform_edt(np.logical_not(y_edge))\n\n # find distances from the 2 surfaces\n x_dists_to_y = y_dist[x_edge == 1]\n y_dists_to_x = x_dist[y_edge == 1]\n\n max_dist = list()\n coordinate_max_distance = None\n for hd_percentile in hausdorff_percentile:\n\n # find max distance from the 2 surfaces\n if hd_percentile == 100:\n max_dist.append(np.max(np.concatenate([x_dists_to_y, y_dists_to_x])))\n\n if return_coordinate_max_distance:\n indices_x_surface = np.where(x_edge == 1)\n idx_max_distance_x = np.where(x_dists_to_y == max_dist)[0]\n if idx_max_distance_x.size != 0:\n coordinate_max_distance = np.stack(indices_x_surface).transpose()[idx_max_distance_x]\n else:\n indices_y_surface = np.where(y_edge == 1)\n idx_max_distance_y = np.where(y_dists_to_x == max_dist)[0]\n coordinate_max_distance = np.stack(indices_y_surface).transpose()[idx_max_distance_y]\n\n # find percentile of max distance\n else:\n max_dist.append(np.percentile(np.concatenate([x_dists_to_y, y_dists_to_x]), hd_percentile))\n\n # find average distance between 2 surfaces\n if x_dists_to_y.shape[0] > 0:\n x_mean_dist_to_y = np.mean(x_dists_to_y)\n else:\n x_mean_dist_to_y = max(x.shape)\n if y_dists_to_x.shape[0] > 0:\n y_mean_dist_to_x = np.mean(y_dists_to_x)\n else:\n y_mean_dist_to_x = max(x.shape)\n mean_dist = (x_mean_dist_to_y + y_mean_dist_to_x) / 2\n\n # convert max dist back to scalar if HD only computed for 1 percentile\n if len(max_dist) == 1:\n max_dist = max_dist[0]\n\n # return coordinate of max distance if necessary\n if coordinate_max_distance is not None:\n return max_dist, mean_dist, coordinate_max_distance\n else:\n return max_dist, mean_dist",
"def distance_hyperbox(b1,b2):\n return max(0,np.max(np.hstack((b1.l-b2.u,b2.l-b1.u))))",
"def __comparing_points(self, point1, point2) -> bool:\n return (abs(point1.x - point2.x) <= self.dirt_pos_tolerance and abs(\n point1.y - point2.y) <= self.dirt_pos_tolerance)",
"def is_bound(pos1, el1, pos2, el2):\n threshold = 0.1\n if el1 == 'H' or el2 == 'H':\n threshold = 0.2\n if np.linalg.norm(np.array(pos1) - np.array(pos2)) < covalence_radius[el1] + covalence_radius[el2] + threshold:\n return True\n return False",
"def is_within_distance_ahead(target_transform, current_transform, max_distance):\n target_vector = np.array([target_transform.location.x - current_transform.location.x, target_transform.location.y - current_transform.location.y])\n norm_target = np.linalg.norm(target_vector)\n\n # If the vector is too short, we can simply stop here\n if norm_target < 0.001:\n return True\n\n if norm_target > max_distance:\n return False\n\n fwd = current_transform.get_forward_vector()\n forward_vector = np.array([fwd.x, fwd.y])\n d_angle = math.degrees(math.acos(np.clip(np.dot(forward_vector, target_vector) / norm_target, -1., 1.)))\n\n return d_angle < 90.0",
"def calc_dist(self, points): \n dist_x = [self._current_pose.position.x - p.pose.position.x for p in points]\n dist_y = [self._current_pose.position.y - p.pose.position.y for p in points]\n dist = np.hypot(dist_x,dist_y) \n if len(dist) > 0:\n return min(dist) \n else: \n return 0",
"def eucl_dist(x_0, y_0, x_1, y_1):\n return sqrt((x_1 - x_0)**2 + (y_1 - y_0)**2)",
"def __abs__(self):\n abspos = abs(self.pos)\n absvel = abs(self.vel)\n return np.amax((abspos, absvel))",
"def kolmogorov_distance(pdfx, pdfy):\n cdfx = np.cumsum(pdfx)/np.sum(pdfx)\n cdfy = np.cumsum(pdfy)/np.sum(pdfy)\n return np.max(np.abs(cdfx - cdfy))",
"def max_min_distance(self, T0: SE3, T1: SE3, T2: SE3) -> (float, float, str):\n tol = 10e-10\n # T_rel_01 = T0.inv().dot(T1)\n T_rel_12 = T1.inv().dot(T2)\n\n p0 = T0.as_matrix()[0:3, 3]\n z1 = T1.as_matrix()[0:3, 2]\n x1 = T1.as_matrix()[0:3, 0]\n p1 = T1.as_matrix()[0:3, 3]\n p2 = T2.as_matrix()[0:3, 3]\n\n p0_proj = p0 - (z1.dot(p0 - p1)) * z1 # p0 projected onto T1 plane\n p2_proj = p2 - (z1.dot(p2 - p1)) * z1 # p2 projected onto T1 plane\n\n if norm(p1 - p0_proj) < tol or norm(p2_proj - p1) < tol:\n d = norm(T2.trans - T0.trans)\n return d, d, False\n\n r = norm(p2_proj - p1) # radius of circle p2_proj is on\n delta_th = arctan2(cross(x1, p2_proj - p1).dot(z1), np.dot(x1, p2_proj - p1))\n\n # closest and farthest point from p0_proj\n sol_1 = r * (p0_proj - p1) / norm(p0_proj - p1) + p1\n sol_2 = -r * (p0_proj - p1) / norm(p0_proj - p1) + p1\n sol_min = min(sol_1 - p0_proj, sol_2 - p0_proj, key=norm) + p0_proj\n sol_max = max(sol_1 - p0_proj, sol_2 - p0_proj, key=norm) + p0_proj\n\n th_max = arctan2(cross(x1, sol_max - p1).dot(z1), np.dot(x1, sol_max - p1))\n th_min = arctan2(cross(x1, sol_min - p1).dot(z1), np.dot(x1, sol_min - p1))\n\n rot_min = rot_axis(th_min - delta_th, \"z\")\n d_min = norm(T1.dot(rot_min).dot(T_rel_12).trans - T0.trans)\n\n rot_max = rot_axis(th_max - delta_th, \"z\")\n d_max = norm(T1.dot(rot_max).dot(T_rel_12).trans - T0.trans)\n\n if abs(th_max - delta_th) < tol and d_max > d_min:\n return d_max, d_min, \"below\"\n elif abs(th_min - delta_th) < tol and d_max > d_min:\n return d_max, d_min, \"above\"\n else:\n return d_max, d_min, False",
"def is_perpendicular_to(self, vector):\n\n if abs(self.dot(vector)) < 0.01:\n return True\n return False",
"def is_within_distance_ahead(target_transform, current_transform, max_distance):\n target_vector = np.array([target_transform.location.x - current_transform.location.x,\n target_transform.location.y - current_transform.location.y])\n norm_target = np.linalg.norm(target_vector)\n\n # If the vector is too short, we can simply stop here\n if norm_target < 0.001:\n return True\n\n if norm_target > max_distance:\n return False\n\n fwd = current_transform.get_forward_vector()\n forward_vector = np.array([fwd.x, fwd.y])\n d_angle = math.degrees(math.acos(np.clip(np.dot(forward_vector, target_vector) / norm_target, -1., 1.)))\n\n return d_angle < 90.0",
"def max_distance(posA, posB, width, height, skip_up=True):\n x, y = posA\n x_dest, y_dest = posB\n\n cross = list(bresenham(x, y, x_dest, y_dest))[1:]\n x_dest = x_prev = x\n y_dest = y_prev = y\n #print('{} ==> {}'.format(posA, posB))\n #print('cross {}'.format(cross))\n #print('cells {}'.format(Globals.instance.cells))\n\n for p in cross:\n # see if any the next points will hit an obstacle (coming down)\n next_area = to_area(p[0], p[1], width, height, bottom_only=True)\n #print(' next area {}'.format(next_area))\n clear = True\n if p[1] > y_prev or skip_up is False:\n for pn in next_area:\n if pn in Globals.instance.cells and is_wall(pn, cells=Globals.instance.cells[pn]):\n #print(' there is something at {}'.format(pn))\n clear = False\n break\n if clear:\n x_dest = int(p[0])\n y_dest = int(p[1])\n else:\n break\n x_prev = p[0]\n y_prev = p[1]\n #print('jumping to {}'.format((x_dest,y_dest)))\n return x_dest, y_dest"
]
| [
"0.6011611",
"0.59310466",
"0.58656365",
"0.56454873",
"0.5604216",
"0.5604216",
"0.5479761",
"0.54793507",
"0.5468954",
"0.5461638",
"0.54490155",
"0.54316163",
"0.5398481",
"0.5396198",
"0.53858936",
"0.5370805",
"0.5352234",
"0.5349109",
"0.53405297",
"0.5321658",
"0.5305877",
"0.5293749",
"0.5287598",
"0.52870667",
"0.52807903",
"0.527949",
"0.5278495",
"0.5260152",
"0.52568424",
"0.5208354"
]
| 0.70764565 | 0 |
checks if a sample point (sx,sy,sz) is inside the prolate shape with semi=axes alpha > beta. The translation vector and rotation matrix have to describe the transformation for aligning the alphaaxis with the x of the coordinate system and in setting the center to the origin. The fundamental ellipsoidal equation is applied the transformed sample point | def isInProlate(sample, alpha, beta):
E = sample[0] * sample[0] / (alpha * alpha)
E += (sample[1] * sample[1] + sample[2] * sample[2] ) / (beta * beta)
if E > 1.0:
return False
else:
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def in_ellipse(x,y,a,b):\n return ellipse(x,y,a,b) <= 1",
"def isInside(self, point):\n # we rotate back the point to the frame parallel to the axis of the ellipse\n rotatedPoint = self.rotatePoint(point)\n # we check if each point is inside the associated liquid drop\n return ((rotatedPoint[:, :, 0]/self.axisA[:, None])**2 + (rotatedPoint[:, :, 1]/self.axisB[:, None])**2 < 1)",
"def inside( self, point ):\n for i in range( 0, len(point) ):\n if math.fabs( self.center[i] - point[i] ) > self.dimLens[i]/2.0:\n return False;\n return True;",
"def point_in_ellipse(point: Vector, center: Vector, angle: float, length: float, width: float) -> bool:\n c, s = np.cos(angle), np.sin(angle)\n r = np.matrix([[c, -s], [s, c]])\n ru = r.dot(point - center)\n return np.sum(np.square(ru / np.array([length, width]))) < 1",
"def _in_box(self, point, extent):\n return ((point[0] >= extent[0]) and\n (point[0] <= extent[1]) and\n (point[1] >= extent[2]) and\n (point[1] <= extent[3]))",
"def check_normality(self,alpha = 0.05):\n\n stat1, p = shapiro(self.x)\n \n if self.y is not None:\n stat2, p2 = shapiro(self.y)\n \n if p < alpha:\n if self.y is not None:\n if p2 < alpha:\n self._verbose('x and y do not look Gaussian (reject H0)')\n return False\n else:\n self._verbose('x does not look Gaussian, but y looks Gaussian (fail to reject H0)')\n return True\n else:\n self._verbose('Sample does not look Gaussian (reject H0)')\n return False\n\n else:\n if self.y is not None:\n if p2 < alpha:\n self._verbose('x looks Gaussian, but y does not look Gaussian (fail to reject H0)')\n return False\n else:\n self._verbose('x and y look Gaussian (fail to reject H0)')\n return True\n else:\n self._verbose('Sample looks Gaussian (fail to reject H0)')\n return True",
"def __contains__(self, point): \n corners = self.corners\n\n if isinstance(point, tuple):\n from pyresample.spherical_geometry import Coordinate\n retval = planar_point_inside(Coordinate(*point), corners)\n else:\n retval = planar_point_inside(point, corners)\n\n #print ' retval from FALSE CORNERS contains '+str(retval)\n\n return retval",
"def in_square(self, point):\n size = self.size\n centre = self.centre\n # Find the upper and lower bounds for the square in-terms of x and y\n lower_x, upper_x = centre.x - size / 2, centre.x + size / 2\n lower_y, upper_y = centre.y - size / 2, centre.y + size / 2\n # Equals with lower bounds only\n return (lower_x <= point.x < upper_x) and (lower_y < point.y <= upper_y)",
"def at_loc((x, y), (cx, cy), eps=0.000035):\n\treturn (x - cx)**2 + (y - cy)**2 <= eps**2",
"def HasPoint(self, vtkAMRBox, , , p_float_6, p_float_7, p_float_8):\n ...",
"def is_inside(self, mX, mY, point):\n return (math.sqrt((point[0] - mX) * (point[0] - mX)\n + (point[1] - mY) * (point[1] - mY)) <= 2)",
"def point_inside_polygon(self, location, points):\n # Simplification: if the point is above the mean altitude of all the \n # points, then do not consider it to be inside the polygon. We could \n # also perform interesting calculations here, but we won't have that \n # many objects of differing altitude anyway.\n avg_alt = float(sum([point.alt for point in points]))/len(points)\n if avg_alt < location.alt - self.altitude_margin:\n return False\n\n edges = get_point_edges(points)\n num = sum(ray_intersects_segment(location, e[0], e[1]) for e in edges)\n return num % 2 == 1",
"def contains(self, point):\n return 0 <= point.x <= 1 \\\n and 0 <= point.y <= 1 \\\n and 0 <= point.z <= 1",
"def contains(self, point):\n scaledPoint = Point( point.x / self.scaleX\n , point.y / self.scaleY\n , point.z / self.scaleZ\n )\n\n return self.scaledObject.contains(scaledPoint)",
"def relative_interior_contains(self, point):\n try:\n p = vector(point)\n except TypeError: # point not iterable or no common ring for elements\n if len(point)>0:\n return False\n else:\n p = vector(self.field(), [])\n\n if len(p)!=self.ambient_dim():\n return False\n \n for eq in self.equation_generator():\n if not eq.contains(p):\n return False\n\n for ine in self.inequality_generator():\n if not ine.interior_contains(p):\n return False\n\n return True",
"def is_point_inside_hypersphere(point: np.array, c: List[float], r: float) -> bool:\n return np.linalg.norm(point - c) < r",
"def __contains__(self, point, e=10e-10):\n v1 = self.vector\n v2 = Vector.createFromTwoPoints(self.point, point)\n return abs(v1.angle - v2.angle) < e",
"def bee_at(self, point):\n \"\"\" Responsible for transformations \"\"\"\n pos, com, success = self.perception\n \n point = point.copy()\n \n if self.phase == 2:\n point -= self.position\n elif self.phase > 2:\n point = self.transform(point - self.position)\n\n return any(map(lambda x: array_equal(point,x),pos))",
"def __contains__(self, point):\n if not isinstance(point, np.ndarray):\n point = np.array(point)\n test = self.A.dot(point.flatten()) - self.b < ABS_TOL\n return np.all(test)",
"def __contains__(self, point, e=1e-10):\n if point == self.p1:\n return True\n v1 = Vector.createFromTwoPoints(self.p1, point)\n v2 = self.getVector()\n return (abs(v1.angle - v2.angle) % (2 * math.pi) < e) and (v1.norm <= v2.norm)",
"def is_point_inside_hypercube(point: List[float], c: List[float], r: float) -> bool:\n diff = np.subtract(point, c)\n return np.all(np.absolute(diff) <= r)",
"def __contains__(self, point):\n #### Original \n from pyresample.spherical_geometry import point_inside, Coordinate\n corners = self.corners\n\n if isinstance(point, tuple):\n return point_inside(Coordinate(*point), corners)\n else:\n return point_inside(point, corners)\n #### End Original\n #from .spherical import SphPolygon\n #log.info('RUNNING SPHERICAL in __contains__')\n #sphpoly = SphPolygon(corners)\n #return sphpoly.intersection(SphPolygon(point), sphpoly)",
"def isinsidepointXY(x,p):\n \n return dist(x,p) < epsilon",
"def IsPointInsideMesh(MeshObj, PointInObjectSpace):\n #direction is irellevant unless mesh is REALLY wierd shaped\n direction = mathutils.Vector((1,0,0)) \n epsilon = direction * 1e-6 \n count = 0 \n result, PointInObjectSpace, normal, index = MeshObj.ray_cast(PointInObjectSpace, direction) \n while result: \n count += 1 \n result, PointInObjectSpace, normal, index = MeshObj.ray_cast(PointInObjectSpace + epsilon, direction) \n return (count % 2) == 1",
"def IsPointInsideMesh2(obj, p, max_dist = 1.84467e+19):\n bResult, point, normal, face = obj.closest_point_on_mesh(p, max_dist)\n p2 = point-p\n v = p2.dot(normal)\n return not(v < 0.0)",
"def is_inside(self, p):\n s, t = self.get_barycentric_coord(p)\n if 0 <= s <= 1 and 0 <= t <= 1 and s + t <= 1:\n return True\n else:\n return False",
"def test_centeredEquation(self):\n\n A33, K = self.cs.centeredEquation\n self.assertTrue((self.A33 == A33).all())\n self.assertEqual(K, 1.)",
"def contains_point(self, x, y):\r\n if self.m == None:\r\n if abs(x - self.start[0]) > 0.6:\r\n return False\r\n else:\r\n if (y >= self.start[1] and y <= self.end[1]) or \\\r\n (y <= self.start[1] and y >= self.end[1]):\r\n return True\r\n else:\r\n return False\r\n else: \r\n y0 = int(self.m * x + self.n)\r\n if abs(y - y0) > 0.6: \r\n return False \r\n else: \r\n if ((x >= self.start[0] and x <= self.end[0]) or \\\r\n (x <= self.start[0] and x >= self.end[0])) and \\\r\n ((y >= self.start[1] and y <= self.end[1]) or \\\r\n (y <= self.start[1] and y >= self.end[1])): \r\n return True\r\n else:\r\n return False",
"def is_valid_point(self, P):\n x, y = P\n assert 0 <= x < self.fp and 0 <= y < self.fp, 'Point outside the group'\n LS = (y ** 2) % self.fp\n PS = (x ** 3 + self.a * x + self.b) % self.fp\n assert LS == PS, 'Point not valid - equation'",
"def phantom_ellipses(n_points,E):\n \n #Rescaling according to image size \n E[:,0] = E[:,0]*n_points/2 #semiaxis a\n E[:,1] = E[:,1]*n_points/2 #semiaxis b\n E[:,2] = E[:,2]*n_points/2 #x\n E[:,3] = E[:,3]*n_points/2 #y\n E[:,4] = E[:,4]*math.pi/180\n \n x,y = np.meshgrid(np.arange(0,n_points)-n_points//2 ,np.arange(0,n_points)-n_points//2 )\n nrow,ncol = E.shape\n phantom1 = np.zeros((y.shape[0], y.shape[1], nrow))\n\n for k in range(nrow): #itero sulle ellissi\n x_new = x - E[k,2]\n y_new = y - E[k,3]\n\n #find(( (x.*cosp + y.*sinp).^2)./asq + ((y.*cosp - x.*sinp).^2)./bsq <= 1); \n cosp = math.cos(E[k,4])\n sinp = math.sin(E[k,4])\n cond = np.square( x_new * cosp + y_new * sinp )*1/(E[k,0]*E[k,0]) + \\\n np.square(y_new * cosp - x_new * sinp)*1/(E[k,1]*E[k,1]) - 1\n\n for i in range(x.shape[0]):\n for j in range(x.shape[1]):\n if (cond[i,j] <= 0.0):\n phantom1[i,j,k] = E[k,5]; # gray scale\n else:\n phantom1[i,j,k] = 0.0;\n #endif\n #endfor\n #endfor\n #endfor\n phantom1 = phantom1.sum(axis=2)\n phantom = np.flipud(phantom1)\n return phantom"
]
| [
"0.6214587",
"0.6080027",
"0.59774446",
"0.5851352",
"0.57803434",
"0.5715725",
"0.56959075",
"0.5690712",
"0.56784356",
"0.56699437",
"0.5643856",
"0.5614901",
"0.5609464",
"0.5566931",
"0.5535911",
"0.5525101",
"0.55000794",
"0.5498535",
"0.5493109",
"0.547834",
"0.54584306",
"0.5446712",
"0.54466015",
"0.5427881",
"0.54177713",
"0.53983516",
"0.5380257",
"0.5375548",
"0.53687954",
"0.53573924"
]
| 0.6873963 | 0 |
The harvesting event occurs when the voltage U has an extremum. This is equivalent with I_Cp = Iq = 0. To distinguish the positive and negative zero crossing, the event function becomes Iq sign(U) <= 0. | def ev_harvest(t, x, y):
Iq, UC, U = x
return Iq * sign(U) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def elu(x):\n return np.where(x < 0, np.exp(x) - 1, x)",
"def signal_hammer(icu, icu_slope, hammer_icu, hammer_slope):\n\n return (icu > hammer_icu and icu_slope > 0) or (icu_slope > hammer_slope)",
"def test_interferometer(self, tol):\n # fmt:off\n U = np.array([[0.83645892-0.40533293j, -0.20215326+0.30850569j],\n [-0.23889780-0.28101519j, -0.88031770-0.29832709j]])\n # fmt:on\n\n S = symplectic.interferometer(U)\n expected = np.block([[U.real, -U.imag], [U.imag, U.real]])\n\n assert np.allclose(S, expected, atol=tol, rtol=0)",
"def i0(x):\n return tt.switch(tt.lt(x, 5), 1 + x**2 / 4 + x**4 / 64 + x**6 / 2304 + x**8 / 147456\n + x**10 / 14745600 + x**12 / 2123366400,\n np.e**x / (2 * np.pi * x)**0.5 * (1 + 1 / (8 * x) + 9 / (128 * x**2) + 225 / (3072 * x**3)\n + 11025 / (98304 * x**4)))",
"def energy_func(self):\n return (\n self.inl[0].m.val_SI * (\n self.outl[0].h.val_SI - self.inl[0].h.val_SI) +\n self.inl[1].m.val_SI * (\n self.outl[1].h.val_SI - self.inl[1].h.val_SI))",
"def get_convection_vent(self,T_i,el):\n\n rad = radiation.Radiation()\n T_atm = rad.getTemp(el)\n\n Q_vent = self.mdot*self.Cp_air0*(T_i-T_atm) # Convection due to released air\n return Q_vent",
"def integrand(u):\n return erfcx(-u)\n #if u < -4.0:\n #return -1. / np.sqrt(np.pi) * (1.0 / u - 1.0 / (2.0 * u**3) + \n ##3.0 / (4.0 * u**5) - \n ##15.0 / (8.0 * u**7))\n #else:\n #return np.exp(u**2) * (1. + erf(u))",
"def energy_func(self):\n i = self.inl[0].to_flow()\n o = self.outl[0].to_flow()\n\n T_m = (T_mix_ph(i, T0=self.inl[0].T.val_SI) +\n T_mix_ph(o, T0=self.outl[0].T.val_SI)) / 2\n\n iam = (\n 1 - self.iam_1.val * abs(self.aoi.val) -\n self.iam_2.val * self.aoi.val ** 2)\n\n return (i[0] * (o[2] - i[2]) -\n self.A.val * (\n self.E.val * self.eta_opt.val * self.doc.val ** 1.5 * iam -\n (T_m - self.Tamb.val_SI) * self.c_1.val -\n self.c_2.val * (T_m - self.Tamb.val_SI) ** 2))",
"def _UHVI_indicator(self, kernel):\n return self._UHVI_indicator_archive(kernel).hypervolume_improvement",
"def vi2ev(v,mu):\n return 0.5*mu*mp*v**2/eV2J",
"def energy_func(self):\n i = self.inl[0].to_flow()\n o = self.outl[0].to_flow()\n\n T_m = (T_mix_ph(i, T0=self.inl[0].T.val_SI) +\n T_mix_ph(o, T0=self.outl[0].T.val_SI)) / 2\n\n return (i[0] * (o[2] - i[2]) -\n self.A.val * (\n self.E.val * self.eta_opt.val -\n (T_m - self.Tamb.val_SI) * self.lkf_lin.val -\n self.lkf_quad.val * (T_m - self.Tamb.val_SI) ** 2))",
"def keV(E):\n if np.min(E) >= 100:\n return E / 1000\n else:\n return E",
"def inv_acquisition(x):\n return -acquisition(x)",
"def test_SemiF47_level_0_5(self):\n self.assertEqual(viol_check(self.vol,5), [[131, 143]])",
"def qvib(v):\n T = s.Symbol(\"T\")\n return 1.0 / (1.0 - s.exp(-1.0 * (h * v) / (k * T)))",
"def eV(E):\n if np.max(E) < 100:\n return E * 1000\n else:\n return E",
"def _eventRs(self, phi, u):\n with np.errstate(all='ignore'):\n return 1/u[0] - self.Rs",
"def ev2vi(eV,mu): \n return cv*np.sqrt( eV*(eV+2.e0*mu*mpc2))/(eV+mu*mpc2)",
"def Evac_minus_EF_from_charge(Evac_minus_Ei, ni, charge_from_dopants, net_charge):\r\n # eh_charge is the charge from electrons and holes only\r\n eh_charge = net_charge - charge_from_dopants\r\n \r\n if eh_charge > 30 * ni:\r\n # Plenty of holes, negligible electrons\r\n p = eh_charge\r\n return Evac_minus_Ei + kT_in_eV * math.log(p / ni)\r\n if eh_charge < -30 * ni:\r\n # Plenty of electrons, negligible holes\r\n n = -eh_charge\r\n return Evac_minus_Ei - kT_in_eV * math.log(n / ni)\r\n \r\n # Starting here, we are in the situation where BOTH holes and electrons\r\n # need to be taken into account. Solve the simultaneous equations\r\n # p * n = ni**2 and p - n = eh_charge to get p and n.\r\n \r\n def solve_quadratic_equation(a,b,c):\r\n \"\"\" return larger solution to ax^2 + bx + c = 0 \"\"\"\r\n delta = b**2 - 4 * a * c\r\n if delta < 0:\r\n raise ValueError(\"No real solution...that shouldn't happen!\")\r\n return (-b + math.sqrt(delta)) / (2*a)\r\n\r\n if eh_charge > 0:\r\n # Slightly more holes than electrons\r\n p = solve_quadratic_equation(1, -eh_charge, -ni**2)\r\n return Evac_minus_Ei + kT_in_eV * math.log(p / ni)\r\n else:\r\n # Slightly more electrons than holes\r\n n = solve_quadratic_equation(1, eh_charge, -ni**2)\r\n return Evac_minus_Ei - kT_in_eV * math.log(n / ni)",
"def test_function_continuity(self):\n self.check_function_continuity()",
"def test_function_continuity(self):\n self.check_function_continuity()",
"def test_function_continuity(self):\n self.check_function_continuity()",
"def test_function_continuity(self):\n self.check_function_continuity()",
"def test_function_continuity(self):\n self.check_function_continuity()",
"def test_function_continuity(self):\n self.check_function_continuity()",
"def test_function_continuity(self):\n self.check_function_continuity()",
"def test_function_continuity(self):\n self.check_function_continuity()",
"def test_function_continuity(self):\n self.check_function_continuity()",
"def test_function_continuity(self):\n self.check_function_continuity()",
"def test_function_continuity(self):\n self.check_function_continuity()"
]
| [
"0.5832589",
"0.5714973",
"0.5664049",
"0.56627977",
"0.56614596",
"0.5622842",
"0.5586296",
"0.55655104",
"0.5534538",
"0.55224675",
"0.5492418",
"0.5483554",
"0.547994",
"0.5463935",
"0.54606885",
"0.5450898",
"0.54117996",
"0.53925806",
"0.5379839",
"0.5368728",
"0.5368728",
"0.5368728",
"0.5368728",
"0.5368728",
"0.5368728",
"0.5368728",
"0.5368728",
"0.5368728",
"0.5368728",
"0.5368728"
]
| 0.6363804 | 0 |
The SECE principle could modeled with only one FSM state CHARGE. After every extremum of the output voltage U is reseted to zero. Also the current Iq is set to zero avoiding numerical problems. Additional the time discrete state vector saves the energy E transfered through the output and every extremum of the voltage U. | def CHARGE(t, x, y):
Iq, UC, U = x
E, Umax = y # time discrete state vector
E_new = E + Cp/2 * U**2
return array([0.0, UC, 0.0]), array([E_new, U]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def sweep():\n \n set_enable_load(True) # turn input ON\n time.sleep(.250)\n \n print('Begin IV curve measurement')\n \n voc = open_circ() # measure open circuit voltage\n iv_curve(voc) # measure iv curve\n short_circ() # measure short circuit current\n \n time.sleep(.250)\n set_enable_load(False) # turn input OFF",
"def ekf_algorithm(self, u, y):\n x_est_before = self.x_estimated_state\n cov_matrix_before = self.cov_matrix\n # 1. Prediction\n # predict the state by integrate the time continuous system numerically\n sim_state_predict = self.heliSim.calc_step(u[0][0], u[1][0], self.no_disturbance_eval)\n x_est_predict = np.resize(sim_state_predict, (8, 1))\n # predict the new covariance by linearizing and discretizing the model\n Ak, Bk, Ck, Dk = self.get_linear_discrete_matrices(x_est_before, u[0][0], u[1][0])\n cov_matrix_predict = Ak @ cov_matrix_before @ np.transpose(Ak) + Bk @ self.N @ np.transpose(Bk)\n # 2. Update\n # compute kalman gain\n Kl = cov_matrix_predict @ np.transpose(Ck) @ np.linalg.inv(Ck @ cov_matrix_predict @ np.transpose(Ck) + self.W)\n # update state\n y_est = x_est_predict[0:3]\n x_est_update = x_est_predict + Kl @ (y - y_est)\n # update covariance matrix (identity matrix must have as many lines as the Kalman gain\n cov_matrix_update = (np.eye(np.size(Kl, 0)) - Kl @ Ck) @ cov_matrix_predict\n\n # check if the update step state needs to be changed because of limit crossing\n # if that is the case, correct the state and set the state of the simulation accordingly\n corrected_state = self.heliSim.get_limited_state_and_change_state(x_est_update.reshape((1, 8))[0],\n ModelType.GYROMOMENT)\n x_est_update = np.resize(corrected_state, (8, 1))\n\n self.x_estimated_state = x_est_update\n self.cov_matrix = cov_matrix_update\n # print(\"------\")\n # print(cov_matrix_predict)\n return x_est_update",
"def ekf_algorithm(self, u, y):\n x_est_before = self.x_estimated_state\n cov_matrix_before = self.cov_matrix\n # 1. Prediction\n # predict the state by integrate the time continuous system numerically\n sim_state_predict = self.heliSim.calc_step(u[0][0], u[1][0], self.no_disturbance_eval)\n x_est_predict = np.resize(sim_state_predict, (6, 1))\n # predict the new covariance by linearizing and discretizing the model\n Ak, Bk, Ck, Dk = self.get_linear_discrete_matrices(x_est_before, u[0][0], u[1][0])\n cov_matrix_predict = Ak @ cov_matrix_before @ np.transpose(Ak) + Bk @ self.N @ np.transpose(Bk)\n # 2. Update\n # compute kalman gain\n Kl = cov_matrix_predict @ np.transpose(Ck) @ np.linalg.inv(Ck @ cov_matrix_predict @ np.transpose(Ck) + self.W)\n # update state\n y_est = x_est_predict[0:3, ]\n x_est_update = x_est_predict + Kl @ (y - y_est)\n # update covariance matrix (identity matrix must have as many lines as the Kalman gain\n cov_matrix_update = (np.eye(np.size(Kl, 0)) - Kl @ Ck) @ cov_matrix_predict\n\n # check if the update step state needs to be changed because of limit crossing\n # if that is the case, correct the state and set the state of the simulation accordingly\n # heliSim_state = np.resize(x_est_update, (1, 6))[0]\n # heliSim_state = np.pad(heliSim_state, (0, 2), \"constant\")\n corrected_state = self.heliSim.get_limited_state_and_change_state(x_est_update.reshape((1, 6))[0],\n ModelType.EASY)\n x_est_update = np.resize(corrected_state, (6, 1))\n\n self.x_estimated_state = x_est_update\n self.cov_matrix = cov_matrix_update\n return x_est_update",
"def SCB(t,init,rhoS,deltaSC,rhoC,deltaCB,rhoB):\n\n #Declaring useful parameters\n [S0,C0,B0]=init\n lambdaS=deltaSC-rhoS #Introducing global decay rates lambda.\n lambdaC=deltaCB-rhoC\n lambdaB=-rhoB\n\n #Checking for eigenvalues equalitites\n thresh=1e-5 #threshold difference for considering two eignevalues as equal\n bSC=(lambdaC-lambdaS)*(abs(lambdaC-lambdaS)>=thresh)\n bSB=(lambdaB-lambdaS)*(abs(lambdaB-lambdaS)>=thresh)\n bCB=(lambdaB-lambdaC)*(abs(lambdaB-lambdaC)>=thresh)\n\n #S has always the same expression\n S=S0*np.exp(-lambdaS*t)\n\n #there are two cases for C\n if bSC!=0:\n c2=deltaSC*S0/bSC; c1=C0-c2\n C=c1*np.exp(-lambdaC*t)+c2*np.exp(-lambdaS*t)\n\n #there are three subcases for B in that case\n if bCB==0:\n b2=deltaCB*c1; b3=deltaCB*c2/bSB; b1=B0-b3\n B=(b1+b2*t)*np.exp(-lambdaB*t)+b3*np.exp(-lambdaS*t)\n\n elif bSB==0:\n b2=deltaCB*c1/bCB; b3=deltaCB*c2; b1=B0-b2\n B=(b1+b3*t)*np.exp(-lambdaB*t)+b2*np.exp(-lambdaC*t)\n\n else:\n b2=deltaCB*c1/bCB; b3=deltaCB*c2/bSB; b1=B0-b2-b3\n B=b1*np.exp(-lambdaB*t)+b2*np.exp(-lambdaC*t)+b3*np.exp(-lambdaS*t)\n\n else:\n c2=deltaSC*S0\n c1=C0\n C=(c1+c2*t)*np.exp(-lambdaS*t)\n\n #there are two subcases for B in that case\n if bCB!=0:\n b3=deltaCB*c2/bSB; b2=(deltaCB*c1-b3)/bSB; b1=B0-b2\n B=b1*np.exp(-lambdaB*t)+(b2+b3*t)*np.exp(-lambdaC*t)\n\n else:\n b1=B0; b2=deltaCB*c1; b3=deltaCB*c2/2\n B=(b1+b2*t+b3*t**2)*np.exp(-lambdaB*t)\n\n return(np.vstack((S,C,B)))",
"def calc_equil(sst, ft_qv, use_NT=False):\n \n run_main(sst, ft_qv, use_NT)\n \n # grab csv file\n with open('dumpmodel.csv','r') as f:\n df_result=pd.read_csv(f)\n\n # last time step into named tupple\n out=df_result.iloc[-1]\n steady_state=make_tuple(out.to_dict())\n steady_state\n \n # obtain steady-state values\n dth=steady_state.deltheta\n dqt=steady_state.delqv\n thetal_m=steady_state.theta\n qt_m=steady_state.qv\n h=steady_state.h\n press=tf.find_press(steady_state.h) #kPa\n thetal_ft = steady_state.theta + dth\n qt_ft = steady_state.qv + dqt\n zb = steady_state.LCL\n zi = steady_state.h\n we = steady_state.went\n \n # calculate thetal at z = 3000 m (take qt(z = 3000m) = qt(z = h), so delta_qt = dqt)\n gamma = 6e-3 \n thetal_3000 = thetal_ft + gamma*(3000-h)\n LTS = thetal_3000 - steady_state.theta\n \n # calculate delta_Fr\n delta_Frstar = 82.0 # Wm^-2\n Frlambda = 7.9 # Wm^-2, using with CTL from Gesso\n delta_Fr = delta_Frstar - Frlambda*qt_ft*1000 # convert qt_ft to g kg^-1\n\n # calculate LWP\n rho = 1.\n LWP = 0.5*rho*(zi-zb)**2\n \n # put all required variables into output array\n out_array = np.array([thetal_m, qt_m, zi, zb, we, LWP, delta_Fr, LTS, dqt])\n \n return out_array",
"def ekf_algorithm(self, u, y):\n x_est_before = self.x_estimated_state\n cov_matrix_before = self.cov_matrix\n # 1. Prediction\n # predict the state by integrate the time continuous system numerically\n sim_state_predict = self.heliSim.calc_step(u[0][0], u[1][0], self.no_disturbance_eval)\n x_est_predict = np.resize(sim_state_predict, (8, 1))\n # predict the new covariance by linearizing and discretizing the model\n Ak, Bk, Ck, Dk = self.get_linear_discrete_matrices(x_est_before, u[0][0], u[1][0])\n cov_matrix_predict = Ak @ cov_matrix_before @ np.transpose(Ak) + Bk @ self.N @ np.transpose(Bk)\n # 2. Update\n # compute kalman gain\n Kl = cov_matrix_predict @ np.transpose(Ck) @ np.linalg.inv(Ck @ cov_matrix_predict @ np.transpose(Ck) + self.W)\n # update state\n y_est = np.concatenate((x_est_predict[0:3], x_est_predict[6:8]))\n x_est_update = x_est_predict + Kl @ (y - y_est)\n # update covariance matrix (identity matrix must have as many lines as the Kalman gain\n cov_matrix_update = (np.eye(np.size(Kl, 0)) - Kl @ Ck) @ cov_matrix_predict\n\n corrected_state = self.heliSim.get_limited_state_and_change_state(x_est_update.reshape((1, 8))[0],\n ModelType.GYROMOMENT)\n # corrected_state = x_est_update\n x_est_update = np.resize(corrected_state, (8, 1))\n\n self.x_estimated_state = x_est_update\n self.cov_matrix = cov_matrix_update\n # print(\"------\")\n # print(cov_matrix_predict)\n return x_est_update",
"def run_vqe(\n self,\n backend=Aer.get_backend(\"statevector_simulator\"),\n var_form=None,\n optimizer=None,\n reps=None,\n mode=\"min_val\",\n ):\n # N=int(np.ceil(np.log2(len(self.mat))))\n # hk = np.zeros((2**N,2**N),dtype='complex')\n # hk[:self.mat.shape[0], :self.mat.shape[1]] = self.mat\n N = self.n_qubits()\n if mode == \"max_val\":\n Hamil_mat = aqua.operators.MatrixOperator(-1 * self.mat)\n # Hamil_mat = MatrixOperator(-1 * self.mat)\n else:\n Hamil_mat = aqua.operators.MatrixOperator(self.mat)\n # Hamil_mat = MatrixOperator(self.mat)\n Hamil_qop = aqua.operators.op_converter.to_weighted_pauli_operator(\n Hamil_mat\n )\n if var_form is None:\n if reps is None:\n reps = 2\n # reps=5\n from qiskit.circuit.library import EfficientSU2\n\n var_form = EfficientSU2(N, reps=reps)\n if optimizer is None:\n vqe = aqua.algorithms.VQE(Hamil_qop, var_form)\n # vqe = VQE(Hamil_qop, var_form)\n else:\n vqe = aqua.algorithms.VQE(Hamil_qop, var_form, optimizer)\n # vqe = VQE(Hamil_qop, var_form, optimizer)\n vqe_result = vqe.run(backend)\n en = np.real(vqe_result[\"eigenvalue\"])\n # params=vqe.optimal_params\n # circuit=vqe.construct_circuit(params)\n if mode == \"max_val\":\n en = -1 * en\n # states = np.sort(\n # np.real(\n # vqe.expectation.convert(\n # StateFn(vqe.operator, is_measurement=True)\n # ).to_matrix()\n # )\n # )\n return en, vqe_result, vqe",
"def escE(self) :\n #self.logdebug(\"RESET\")\n self.resets += 1",
"def test_simulate_discrete_seiiir_deterministic():\n N = 1000000\n y0={'S': N, 'E': 0, 'I1': 1, 'I2': 0, 'I3':0, 'R': 0}\n sigma = 1/5\n gamma = 1/4\n beta = 1\n rho = 1\n report_delay = 7\n tf=10\n\n results = simulate_discrete_seiiir_deterministic(y0, tf, beta=beta,\n sigma=sigma, gamma=gamma,\n rho=rho, N=N,\n report_delay=report_delay,\n tx=None)\n\n C = results.cumulative_reported_cases.values\n Cdates = results.cumulative_reported_cases.dates\n SEIIIR = results.SEIIIR\n \n assert len(Cdates) == len(SEIIIR.dates)+1\n delta = Cdates[1] - SEIIIR.dates[0]\n assert delta.days == report_delay\n assert SEIIIR.S[0] == 1000000\n assert SEIIIR.E[0] == 0\n assert SEIIIR.I1[0] == 1\n assert SEIIIR.I2[0] == 0\n assert SEIIIR.I3[0] == 0\n assert SEIIIR.R[0] == 0\n assert SEIIIR.transmissions[0] == 1\n assert C[0] == 0\n\n assert SEIIIR.S[1] == 1000000-1\n assert SEIIIR.E[1] == 1\n assert SEIIIR.I1[1] == pytest.approx(0.25)\n assert SEIIIR.I2[1] == pytest.approx(0.75)\n assert SEIIIR.I3[1] == 0\n assert SEIIIR.R[1] == 0\n assert SEIIIR.transmissions[1] == pytest.approx(1.0*999999/1000000)\n\n expS = [1000000, 999999.0, 999998.000001, 999996.8000034, 999995.661882242, 999994.3121701872, 999992.5550166771, 999990.288982791, 999987.4325757417, 999983.8563387465]\n expE = [0, 1.0, 1.799999, 2.6399968000011995, 3.2501185980054688, 3.949806933169381, 4.916999056689872, 6.199633131502013, 7.8161135545123095, 9.829127838741329]\n expI1 = [1, 0.25, 0.2625, 0.4256247999999999, 0.63440556000024, 0.808625109601154, 0.9921176640341648, 1.2314292273465157, 1.5477839331370316, 1.9501686941867198]\n expI2 = [0, 0.75, 0.375, 0.290625, 0.3918748499999999, 0.5737728825001799, 0.7499120528259104, 0.9315662612321013, 1.1564634858179121, 1.4499538213072518]\n expI3 = [0, 0.0, 0.5625, 0.421875, 0.32343750000000004, 0.37476551249999995, 0.5240210400001349, 0.6934392996194666, 0.8720345208289425, 1.0853562445706697]\n expR = [0, 0.0, 0.0, 0.421875, 0.73828125, 0.9808593750000001, 1.261933509375, 1.654949289375101, 2.175028764089701, 2.829054654711408]\n expT = [1.0, 0.999999, 1.1999976000011998, 1.1381211580045094, 1.349712054765006, 1.7571535101543665, 2.2660338861501166, 2.856407049310699, 3.5762369951314814, 4.485406348014979]\n expC = [0, 1.0, 1.9999989999999999, 3.1999966000011995, 4.338117758005708, 5.6878298127707145, 7.444983322925081, 9.711017209075198, 12.567424258385897, 16.14366125351738, 20.62906760153236]\n\n np.testing.assert_allclose(np.asarray(expS), np.asarray(SEIIIR.S))\n np.testing.assert_allclose(np.asarray(expE), np.asarray(SEIIIR.E))\n np.testing.assert_allclose(np.asarray(expI1), np.asarray(SEIIIR.I1))\n np.testing.assert_allclose(np.asarray(expI2), np.asarray(SEIIIR.I2))\n np.testing.assert_allclose(np.asarray(expI3), np.asarray(SEIIIR.I3))\n np.testing.assert_allclose(np.asarray(expR), np.asarray(SEIIIR.R))\n np.testing.assert_allclose(np.asarray(expT), np.asarray(SEIIIR.transmissions))\n np.testing.assert_allclose(np.asarray(expC), np.asarray(results.cumulative_reported_cases.values))\n\n # test the reporting factor\n rho = 8\n results = simulate_discrete_seiiir_deterministic(y0, tf, beta, sigma, gamma, rho, N, report_delay, tx=None)\n SEIIIR = results.SEIIIR\n np.testing.assert_allclose(np.asarray(expS), np.asarray(SEIIIR.S))\n np.testing.assert_allclose(np.asarray(expE), np.asarray(SEIIIR.E))\n np.testing.assert_allclose(np.asarray(expI1), np.asarray(SEIIIR.I1))\n np.testing.assert_allclose(np.asarray(expI2), np.asarray(SEIIIR.I2))\n np.testing.assert_allclose(np.asarray(expI3), np.asarray(SEIIIR.I3))\n np.testing.assert_allclose(np.asarray(expR), np.asarray(SEIIIR.R))\n np.testing.assert_allclose(np.asarray(expT), np.asarray(SEIIIR.transmissions))\n np.testing.assert_allclose(1/8*np.asarray(expC), np.asarray(results.cumulative_reported_cases.values))",
"def test_simulate_discrete_seiiir_deterministic_tx():\n N = 1000000\n y0={'S': N, 'E': 0, 'I1': 0, 'I2': 0, 'I3':0, 'R': 0}\n sigma = 1/5\n gamma = 1/4\n beta = 1\n rho = 1\n report_delay = 7\n tf=30\n tx = [0]*tf\n tx[10] = 1\n \n results = simulate_discrete_seiiir_deterministic(y0, tf, beta=beta,\n sigma=sigma, gamma=gamma,\n rho=rho, N=N,\n report_delay=report_delay,\n tx=tx)\n Cdates = results.cumulative_reported_cases.dates\n C = results.cumulative_reported_cases.values\n SEIIIR = results.SEIIIR\n\n assert len(Cdates) == len(SEIIIR.dates)+1\n delta = Cdates[1] - SEIIIR.dates[0]\n assert delta.days == report_delay\n assert SEIIIR.S[0] == 1000000\n assert SEIIIR.E[0] == 0\n assert SEIIIR.I1[0] == 0\n assert SEIIIR.I2[0] == 0\n assert SEIIIR.I3[0] == 0\n assert SEIIIR.R[0] == 0\n assert SEIIIR.transmissions[0] == 0\n assert C[0] == 0\n\n assert SEIIIR.transmissions[10] == 1\n assert SEIIIR.S[11] == 1000000-1\n assert SEIIIR.E[11] == 1\n assert SEIIIR.I1[11] == 0\n assert SEIIIR.I2[11] == 0\n assert SEIIIR.I3[11] == 0\n assert SEIIIR.R[11] == 0\n\n\n assert SEIIIR.transmissions[11] == 0\n assert SEIIIR.S[12] == 1000000-1\n assert SEIIIR.E[12] == pytest.approx(0.8)\n assert SEIIIR.I1[12] == pytest.approx(0.2)\n assert SEIIIR.I2[12] == 0\n assert SEIIIR.I3[12] == 0\n assert SEIIIR.R[12] == 0\n\n t = 0.2*999999/1000000\n assert SEIIIR.transmissions[12] == pytest.approx(t)\n assert SEIIIR.S[13] == pytest.approx(1000000 - 1 - t)\n assert SEIIIR.E[13] == pytest.approx(0.8 - 0.8*1/5 + 0.2)\n assert SEIIIR.I1[13] == pytest.approx(0.2 + 0.8*1/5 - 0.2*3*1/4)\n assert SEIIIR.I2[13] == pytest.approx(0.2*3*1/4)\n assert SEIIIR.I3[13] == 0\n assert SEIIIR.R[13] == 0\n\n expT = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.1999998, 0.35999956800007205, 0.5279991363203962, 0.6500234843491176, 0.7899611612071538, 0.9833997294942862, 1.2399269425355373, 1.5632238454281848, 1.9658282341630997, 2.4697471508748774, 3.103237816906961, 3.9001484736462553, 4.902027026250278, 6.161128231301663, 7.743451993146161, 9.732090520466189, 12.231436853410257, 15.372620496106643]\n expS = [1000000, 1000000.0, 1000000.0, 1000000.0, 1000000.0, 1000000.0, 1000000.0, 1000000.0, 1000000.0, 1000000.0, 1000000.0, 999999.0, 999999.0, 999998.8000002, 999998.4400006321, 999997.9120014957, 999997.2619780113, 999996.4720168501, 999995.4886171207, 999994.2486901782, 999992.6854663327, 999990.7196380985, 999988.2498909476, 999985.1466531307, 999981.246504657, 999976.3444776307, 999970.1833493994, 999962.4398974063, 999952.7078068858, 999940.4763700324]\n expE = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.0, 0.8, 0.8399998000000001, 1.0319994080000723, 1.353598662720454, 1.7329024145254808, 2.1762830928275383, 2.724426203756317, 3.419467905540591, 4.298798169860658, 5.404866770051626, 6.793640566916178, 8.538150270439903, 10.730668689998177, 13.48656197824882, 16.95037781390072, 21.303754244266738, 26.77509391587958, 33.651511986113924]\n expI1 = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.2, 0.21000000000000002, 0.22049996, 0.2615248716000145, 0.3361009504440945, 0.4306057205161198, 0.5429080486945377, 0.6806122529248979, 0.8540466443393429, 1.0732712950569674, 1.349291177774567, 1.6960509078268777, 2.1316427810447003, 2.6790444332608105, 3.367073503964967, 4.2318439387713855, 5.3187118335461925, 6.6846967415624645]\n expI2 = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.15000000000000002, 0.19500000000000003, 0.21412497000000005, 0.2496748962000109, 0.3144944368830736, 0.4015778996078582, 0.5075755114228678, 0.6373530675493905, 0.7998732501418546, 1.0049217838281892, 1.2631988292879723, 1.5878378881921515, 1.9956915578315628, 2.508206214403499, 3.1523566815746, 3.9619721244721893, 4.979526906277692]\n expI3 = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.11250000000000002, 0.17437500000000003, 0.20418747750000005, 0.2383030415250082, 0.2954465880435573, 0.375045071716783, 0.4744429014963466, 0.5966255260361295, 0.7490613191154234, 0.9409566676499976, 1.1826382888784785, 1.4865379883637333, 1.8684031654646056, 2.3482554521687757, 2.9513313742231433, 3.709311936909928]\n expR = [0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.084375, 0.21515625000000002, 0.368296858125, 0.5470241392687561, 0.768609080301424, 1.0498928840890112, 1.405725060211271, 1.853194204738368, 2.4149901940749356, 3.1207076948124337, 4.007686411471292, 5.122589902744092, 6.523892276842547, 8.28508386596913, 10.498582396636486]\n expC = [0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.1999998, 1.559999368000072, 2.0879985043204683, 2.738021988669586, 3.5279831498767398, 4.511382879371026, 5.751309821906563, 7.314533667334748, 9.280361901497848, 11.750109052372725, 14.853346869279687, 18.75349534292594, 23.655522369176218, 29.81665060047788, 37.560102593624045, 47.29219311409023, 59.52362996750049, 74.89625046360713]\n\n np.testing.assert_allclose(np.asarray(expT), np.asarray(SEIIIR.transmissions))\n np.testing.assert_allclose(np.asarray(expS), np.asarray(SEIIIR.S))\n np.testing.assert_allclose(np.asarray(expE), np.asarray(SEIIIR.E))\n np.testing.assert_allclose(np.asarray(expI1), np.asarray(SEIIIR.I1))\n np.testing.assert_allclose(np.asarray(expI2), np.asarray(SEIIIR.I2))\n np.testing.assert_allclose(np.asarray(expI3), np.asarray(SEIIIR.I3))\n np.testing.assert_allclose(np.asarray(expR), np.asarray(SEIIIR.R))\n np.testing.assert_allclose(np.asarray(expC), np.asarray(C))\n\n # test the reporting factor\n rho = 8\n results = simulate_discrete_seiiir_deterministic(y0, tf, beta, sigma, gamma, rho, N, report_delay, tx=tx)\n SEIIIR = results.SEIIIR\n np.testing.assert_allclose(np.asarray(expT), np.asarray(SEIIIR.transmissions))\n np.testing.assert_allclose(np.asarray(expS), np.asarray(SEIIIR.S))\n np.testing.assert_allclose(np.asarray(expE), np.asarray(SEIIIR.E))\n np.testing.assert_allclose(np.asarray(expI1), np.asarray(SEIIIR.I1))\n np.testing.assert_allclose(np.asarray(expI2), np.asarray(SEIIIR.I2))\n np.testing.assert_allclose(np.asarray(expI3), np.asarray(SEIIIR.I3))\n np.testing.assert_allclose(np.asarray(expR), np.asarray(SEIIIR.R))\n np.testing.assert_allclose(1/8*np.asarray(expC), np.asarray(results.cumulative_reported_cases.values))",
"def update(self, state_value, current_time):\r\n\r\n\t\t# Calculate Error - if SetPoint > 0.0, then normalize error with respect to setpoint\r\n\t\tif self.SetPoint==0.0:\r\n\t\t\terror = state_value - self.SetPoint\r\n\t\telse:\r\n\t\t\terror = (state_value - self.SetPoint)/self.SetPoint \r\n\t\t\r\n\t\tself.current_time = current_time/1000.0 \t\t# Converting from msec to sec\r\n\t\tdelta_time = self.Ts\r\n\t\tdelta_error = error - self.last_error\r\n\r\n\t\tself.ITerm += error * delta_time\r\n\t\t\r\n\t\tself.DTerm = 0.0\r\n\t\tif delta_time > 0:\r\n\t\t\tself.DTerm = delta_error / delta_time\r\n\r\n\t\t# Remember last time and last error for next calculation\r\n\t\tself.last_time = self.current_time\r\n\t\tself.last_error = error\r\n\t\t\r\n\t\t# Calculate u(t) - catch potential division by zero error\r\n\t\ttry:\r\n\t\t\tu = self.Kp * (error + ((1.0/self.Ti) * self.ITerm) + (self.Td * self.DTerm))\r\n\t\texcept ZeroDivisionError:\r\n\t\t\tu = self.Kp * (error + (0.0 * self.ITerm) + (self.Td * self.DTerm))\r\n\t\t\t\t\r\n\t\t# Bound the controller output if necessary (between MinValue - MaxValue) \r\n\t\tif u > self.MaxValue:\r\n\t\t\tself.OutputValue = self.MaxValue\r\n\t\t\tself.ITerm -= error * delta_time \t# Back-calculate the integral error\r\n\t\telif u < self.MinValue:\r\n\t\t\tself.OutputValue = self.MinValue\r\n\t\t\tself.ITerm -= error * delta_time \t# Back-calculate the integral error\r\n\t\telse:\r\n\t\t\tself.OutputValue = u\r\n\t\t\r\n\t\t# Update the last output value\r\n\t\tself.last_OutputValue = self.OutputValue\r\n\t\t\r\n\t\t# Record state, error, y(t), and sample time values\r\n\t\tself.state_history.append(state_value)\r\n\t\tself.error_history.append(error)\r\n\t\tself.output_history.append(self.OutputValue)\r\n\t\tself.sample_times.append(current_time/1000)\t\t# Convert from msec to sec\r\n\t\t\r\n\t\t# Return controller output\r\n\t\treturn self.OutputValue",
"def tcs2(self):\n S = self.M.allocState({})\n self.M.propagate(S, 0, 1)\n\n # set initial beam data\n S.ref_IonZ = self.refIonZ\n S.IonZ = self.IonZ\n\n S.moment0 = self.BC0\n S.moment1 = self.ENV0\n\n S.ref_IonEk = self.refIonEk\n \n S.phis = S.moment0[PS_S,:]\n S.IonEk = S.moment0[PS_PS,:]*MeVtoeV + S.ref_IonEk\n\n ### Reconstract data\n U = collections.OrderedDict()\n T = collections.OrderedDict()\n \n U,T = self.save(U, T, S)\n\n\n #S.clng = self.clng\n\n fin = len(self.M)\n\n H = self.M.propagate(S, 1, fin, observe=range(fin))\n \n ### Reconstract data\n U = collections.OrderedDict()\n T = collections.OrderedDict()\n\n for i in range(fin-1):\n U,T = self.save(U, T, H[i][1])\n\n return U,T",
"def calc_saturation_curves(self):\n HEOS = CP.AbstractState(self.additional_backend, self.fluid)\n PCSAFT = CP.AbstractState(self.backend, self.fluid)\n self.dictL, self.dictV = {}, {}\n for Q, dic in zip([0, 1], [self.dictL, self.dictV]):\n # rhomolar, smolar, hmolar, T, p, umolar = [], [], [], [], [], []\n rhomolar, T, p = [], [], []\n for _T in np.logspace(np.log10(HEOS.keyed_output(CP.iT_triple)), np.log10(HEOS.keyed_output(CP.iT_critical)), 500):\n try:\n PCSAFT.update(CP.QT_INPUTS, Q, _T)\n # print('T', PCSAFT.T())\n # print('p', PCSAFT.p())\n # print('rhomolar', PCSAFT.rhomolar())\n if (PCSAFT.p() < 0): raise ValueError('P is negative:' + str(PCSAFT.p()))\n PCSAFT.T(), PCSAFT.p(), PCSAFT.rhomolar()\n # PCSAFT.hmolar(), PCSAFT.smolar(), PCSAFT.umolar()\n\n T.append(PCSAFT.T())\n p.append(PCSAFT.p())\n rhomolar.append(PCSAFT.rhomolar())\n # hmolar.append(PCSAFT.hmolar())\n # smolar.append(PCSAFT.smolar())\n # umolar.append(PCSAFT.umolar())\n except ValueError as VE:\n myprint(1, 'satT error:', VE, '; T:', '{T:0.16g}'.format(T=_T), 'T/Tc:', _T / HEOS.keyed_output(CP.iT_critical))\n\n dic.update(dict(T=np.array(T),\n P=np.array(p),\n Dmolar=np.array(rhomolar)))\n # Hmolar=np.array(hmolar),\n # Smolar=np.array(smolar)))\n # Umolar=np.array(umolar)))",
"def iterate_value(self):\n self.V = np.zeros(self.stateCount)\n for i in range(self.maxIter):\n last_V = np.copy(self.V)\n for state_index in range(self.stateCount):\n current_state = self.env.statespace[state_index]\n for action in self.env.actionspace:\n next_state = self.env.next_state(current_state,action)\n reward = self.env.compute_reward(next_state)\n next_state_index = self.env.stateDict[next_state]\n self.Q[state_index][action] = reward + self.gamma*last_V[next_state_index]\n if self.mode == 'debug':\n print(\"Q(s={}):{}\".format(current_state,self.Q[state_index]))\n self.V[state_index] = max(self.Q[state_index])\n if np.sum(np.fabs(last_V - self.V)) <= self.th:\n print (\"Convergene Achieved in {}th iteration. \"\n \"Breaking V_Iteration loop!\".format(i))\n break",
"def piston_control(self):\n # At the beginning it is necessary to set some variables\n t_last = 0 # time of the last cycle\n inhale_end = time.time() - 1 # End of the last inhale\n self.cd[\"exhale_duration\"] = 1\n self.cd[\"inhale_duration\"] = 1\n #now = time.time()\n VCV_stage = 0\n PCV_stage = 0\n PSV_stage = 0\n emergency_contained = False\n\n # Gets the current volume and pressure before starting the cycles. If this doesn't work and \n # takes too long, there is probably some problem with the sensors\n t_P, P = (None, None)\n t_V, V = (None, None)\n P_V_t_limit = 5\n first_P_V = time.time()\n while P == None and V == None:\n if not self.prs.empty():\n t_P, P = self.prs.get()\n if not self.vol.empty():\n t_V, V = self.vol.get()\n if time.time() - first_P_V > P_V_t_limit:\n print(\"Took too long to receive new values of P or V from the queues\")\n # TODO Raise exception, error or return in this condition\n\n while True:\n # Gets the newest data and empties que queues. If there was no data, uses the values of \n # pressure or volume that it already has\n if not self.prs.empty():\n t_P, P = self.prs.get()\n while not self.prs.empty(): # Emptying the queue, only the most recent info is used\n dump = self.prs.get()\n\n if not self.vol.empty():\n t_V, V = self.vol.get()\n while not self.vol.empty(): # Emptying the queue, only the most recent info is used\n dump = self.vol.get()\n\n # TODO Needs to be obtained from the interface or defined in a configuration by the user\n T_inh_max = 60. / self.gui[\"VCV_frequency_spb\"].value() / 2\n\n if self.mode == 1: # 'VCV'\n \"\"\"\n This mode has 3 stages:\n 0 - Wait\n 1 - Inhale\n 2 - Exhale\n \"\"\"\n period = 60. / self.gui[\"VCV_frequency_spb\"].value()\n T_inh_max = period / 2\n if VCV_stage == 0: \n self.piston.stop()\n # If it's time for a new cycle, volume and pressure are within limits\n if (time.time() - t_last > period\n and V < self.gui[\"VCV_volume_spb\"].value()\n and P < self.gui[\"VCV_pressure_max_spb\"].value()):\n VCV_stage = 1\n inhale_start = time.time()\n # It is possible to calculate how long the last exhale took\n self.cd[\"exhale_duration\"] = inhale_start - inhale_end\n\n if VCV_stage == 1:\n # Checks if the current pressure is above P_max\n if P >= self.gui[\"VCV_pressure_max_spb\"].value():\n print(\"Pressure is too high during VCV cycle!\")\n self.piston.stop()\n # Checks if it reached the maximum inhale t\n elif time.time() - inhale_start >= T_inh_max:\n print(f\"VCV cycle is too long: {time.time() - inhale_start:.2f} s\")\n self.piston.stop()\n VCV_stage = 2\n inhale_end = time.time()\n # Checks whether the piston reached the bottom\n # TODO Define what happens in this case\n elif self.piston.piston_at_bottom:\n print(\"Reached max piston travel\")\n self.piston.stop()\n VCV_stage = 2\n inhale_end = time.time()\n # Checks if the current volume is above target\n # TODO Implement margin in options\n elif V >= self.gui[\"VCV_volume_spb\"].value() * 0.9:\n print(\"Reached target volume\")\n self.piston.stop()\n VCV_stage = 2 \n inhale_end = time.time()\n # if none of the previous limitations occured, may move the piston\n else:\n self.piston.pst_down()\n\n if VCV_stage == 2:\n # While the piston still hasn't reached the top\n # TODO Put timeout in piston raise time\n if not self.piston.piston_at_top and time.time() - t_last > period:\n self.piston.pst_up()\n else:\n self.piston.stop()\n VCV_stage = 0\n # Saves the last inhale start time to calculate when a new one should start\n t_last = inhale_start\n # It is possible to calculate how long the last inhale took\n self.cd[\"inhale_duration\"] = inhale_end - inhale_start\n\n elif self.mode == 2: # 'PCV'\n \"\"\"\n This mode has 3 stages:\n 0 - Wait\n 1 - Inhale\n 2 - Exhale\n \"\"\" \n period = 60. / self.gui[\"PCV_frequency_spb\"].value()\n T_inh_max = period / 2\n if PCV_stage == 0: \n self.piston.stop()\n # If it's time for a new cycle, volume and pressure are within limits\n if (time.time() - t_last > period\n and V < self.gui[\"PCV_volume_max_spb\"].value()\n and P < self.gui[\"PCV_pressure_spb\"].value()):\n PCV_stage = 1\n inhale_start = time.time()\n # It is possible to calculate how long the last exhale took\n self.cd[\"exhale_duration\"] = inhale_start - inhale_end\n\n if PCV_stage == 1:\n # Checks if the current volume is above max\n if V >= self.gui[\"PCV_volume_max_spb\"].value():\n print(\"Volume is too high during PCV cycle!\")\n self.piston.stop()\n # Checks if it reached the maximum inhale t\n elif time.time() - inhale_start >= T_inh_max:\n print(f\"PCV cycle is too long: {time.time() - inhale_start:.2f} s\")\n self.piston.stop()\n PCV_stage = 2\n inhale_end = time.time()\n # Checks whether the piston reached the bottom\n elif self.piston.piston_at_bottom:\n print(\"Reached max piston travel\")\n self.piston.stop()\n PCV_stage = 2\n inhale_end = time.time()\n # Checks if the current pressure is above target\n elif P >= self.gui[\"PCV_pressure_spb\"].value():\n print(\"Reached target pressure\")\n self.piston.stop()\n PCV_stage = 2 \n inhale_end = time.time()\n # if none of the previous limitations occured, may move the piston\n else:\n self.piston.pst_down()\n\n if PCV_stage == 2:\n # While the piston still hasn't reached the top\n if not self.piston.piston_at_top and time.time() - t_last > period:\n self.piston.pst_up()\n else:\n self.piston.stop()\n PCV_stage = 0\n # Saves the last inhale start time to calculate when a new one should start\n t_last = inhale_start\n # It is possible to calculate how long the last inhale took\n self.cd[\"inhale_duration\"] = inhale_end - inhale_start\n\n elif self.mode == 3: # 'PSV'\n \"\"\"\n This mode has 3 stages:\n 0 - Wait for inhale, P < threshold\n 1 - Inhale\n 2 - Exhale\n \"\"\"\n if PSV_stage == 0:\n self.piston.stop()\n # If the pressure is below the threshold, time to inhale\n if P < self.gui[\"PSV_sensitivity_spb\"].value():\n PSV_stage = 1\n inhale_start = time.time()\n # It is possible to calculate how long the last exhale took\n self.cd[\"exhale_duration\"] = inhale_start - inhale_end\n\n if PSV_stage == 1:\n # Checks if the current pressure is close to P_target\n if P >= self.gui[\"PSV_pressure_spb\"].value():\n print(\"Pressure reached target.\")\n self.piston.stop()\n PSV_stage = 2\n inhale_end = time.time()\n elif self.piston.piston_at_bottom:\n print(\"Reached max piston travel.\")\n self.piston.stop()\n PSV_stage = 2\n inhale_end = time.time()\n # if none of the previous limitations occured, may move the piston\n else:\n self.piston.pst_down()\n \n if PSV_stage == 2:\n # While the piston still hasn't reached the top\n if not self.piston.piston_at_top:\n self.piston.pst_up()\n else:\n self.piston.stop()\n PSV_stage = 0\n # Saves the last inhale start time to calculate when a new one should start\n t_last = inhale_start\n # It is possible to calculate how long the last inhale took\n self.cd[\"inhale_duration\"] = inhale_end - inhale_start\n\n # Emergency mode\n elif self.mode == 4: # 'Emergency'\n if not emergency_contained:\n self.piston.emergency()\n emergency_contained = True\n else:\n self.piston.stop()\n\n\n else: # Stop\n self.piston.stop()\n\n # Finds the indexes of data from the last cycle for flow and pressure\n # i_flw = np.where(time.time() - self.flw_data[0, :] < last_cycle_dur)[0]\n # i_prs = np.where(time.time() - self.prs_data[0, :] < last_cycle_dur)[0]\n \n # Sends the maximum pressure and volume in the last cycle to the interface\n self.cd[\"IE_ratio\"] = self.cd[\"exhale_duration\"] / self.cd[\"inhale_duration\"]\n # Saving the data for the GUI update\n # self.cd[\"peak_pressure\"] = peak_prs\n # self.cd[\"tidal_volume\"] = peak_vol\n self.signal_cycle_data.emit(self.cd)\n\n time.sleep(0.05)",
"def integrate_idemix_kernel(state):\n vs = state.variables\n settings = state.settings\n\n a_tri, b_tri, c_tri, d_tri, delta = (allocate(state.dimensions, (\"xt\", \"yt\", \"zt\"))[2:-2, 2:-2] for _ in range(5))\n forc = allocate(state.dimensions, (\"xt\", \"yt\", \"zt\"))\n maxE_iw = allocate(state.dimensions, (\"xt\", \"yt\", \"zt\"))\n\n \"\"\"\n forcing by EKE dissipation\n \"\"\"\n if settings.enable_eke:\n forc = vs.eke_diss_iw\n\n else: # shortcut without EKE model\n forc = vs.K_diss_gm + vs.K_diss_h - vs.P_diss_skew\n\n if settings.enable_store_cabbeling_heat:\n forc += -vs.P_diss_hmix - vs.P_diss_iso\n\n if settings.enable_eke and (settings.enable_eke_diss_bottom or settings.enable_eke_diss_surfbot):\n \"\"\"\n vertically integrate EKE dissipation and inject at bottom and/or surface\n \"\"\"\n a_loc = npx.sum(vs.dzw[npx.newaxis, npx.newaxis, :-1] * forc[:, :, :-1] * vs.maskW[:, :, :-1], axis=2)\n a_loc += 0.5 * forc[:, :, -1] * vs.maskW[:, :, -1] * vs.dzw[-1]\n\n forc = update(forc, at[...], 0.0)\n\n ks = npx.maximum(0, vs.kbot[2:-2, 2:-2] - 1)\n mask = ks[:, :, npx.newaxis] == npx.arange(settings.nz)[npx.newaxis, npx.newaxis, :]\n if settings.enable_eke_diss_bottom:\n forc = update(\n forc,\n at[2:-2, 2:-2, :],\n npx.where(\n mask, a_loc[2:-2, 2:-2, npx.newaxis] / vs.dzw[npx.newaxis, npx.newaxis, :], forc[2:-2, 2:-2, :]\n ),\n )\n else:\n forc = update(\n forc,\n at[2:-2, 2:-2, :],\n npx.where(\n mask,\n settings.eke_diss_surfbot_frac\n * a_loc[2:-2, 2:-2, npx.newaxis]\n / vs.dzw[npx.newaxis, npx.newaxis, :],\n forc[2:-2, 2:-2, :],\n ),\n )\n forc = update(\n forc,\n at[2:-2, 2:-2, -1],\n (1.0 - settings.eke_diss_surfbot_frac) * a_loc[2:-2, 2:-2] / (0.5 * vs.dzw[-1]),\n )\n\n \"\"\"\n forcing by bottom friction\n \"\"\"\n if not settings.enable_store_bottom_friction_tke:\n forc = forc + vs.K_diss_bot\n\n \"\"\"\n prevent negative dissipation of IW energy\n \"\"\"\n maxE_iw = npx.maximum(0.0, vs.E_iw[:, :, :, vs.tau])\n\n \"\"\"\n vertical diffusion and dissipation is solved implicitly\n \"\"\"\n _, water_mask, edge_mask = utilities.create_water_masks(vs.kbot[2:-2, 2:-2], settings.nz)\n\n delta = update(\n delta,\n at[:, :, :-1],\n settings.dt_tracer\n * settings.tau_v\n / vs.dzt[npx.newaxis, npx.newaxis, 1:]\n * 0.5\n * (vs.c0[2:-2, 2:-2, :-1] + vs.c0[2:-2, 2:-2, 1:]),\n )\n delta = update(delta, at[:, :, -1], 0.0)\n a_tri = update(\n a_tri, at[:, :, 1:-1], -delta[:, :, :-2] * vs.c0[2:-2, 2:-2, :-2] / vs.dzw[npx.newaxis, npx.newaxis, 1:-1]\n )\n a_tri = update(a_tri, at[:, :, -1], -delta[:, :, -2] / (0.5 * vs.dzw[-1:]) * vs.c0[2:-2, 2:-2, -2])\n b_tri = update(\n b_tri,\n at[:, :, 1:-1],\n 1\n + delta[:, :, 1:-1] * vs.c0[2:-2, 2:-2, 1:-1] / vs.dzw[npx.newaxis, npx.newaxis, 1:-1]\n + delta[:, :, :-2] * vs.c0[2:-2, 2:-2, 1:-1] / vs.dzw[npx.newaxis, npx.newaxis, 1:-1]\n + settings.dt_tracer * vs.alpha_c[2:-2, 2:-2, 1:-1] * maxE_iw[2:-2, 2:-2, 1:-1],\n )\n b_tri = update(\n b_tri,\n at[:, :, -1],\n 1\n + delta[:, :, -2] / (0.5 * vs.dzw[-1:]) * vs.c0[2:-2, 2:-2, -1]\n + settings.dt_tracer * vs.alpha_c[2:-2, 2:-2, -1] * maxE_iw[2:-2, 2:-2, -1],\n )\n b_tri_edge = (\n 1\n + delta / vs.dzw * vs.c0[2:-2, 2:-2, :]\n + settings.dt_tracer * vs.alpha_c[2:-2, 2:-2, :] * maxE_iw[2:-2, 2:-2, :]\n )\n c_tri = update(\n c_tri, at[:, :, :-1], -delta[:, :, :-1] / vs.dzw[npx.newaxis, npx.newaxis, :-1] * vs.c0[2:-2, 2:-2, 1:]\n )\n d_tri = update(d_tri, at[...], vs.E_iw[2:-2, 2:-2, :, vs.tau] + settings.dt_tracer * forc[2:-2, 2:-2, :])\n d_tri_edge = (\n d_tri + settings.dt_tracer * vs.forc_iw_bottom[2:-2, 2:-2, npx.newaxis] / vs.dzw[npx.newaxis, npx.newaxis, :]\n )\n d_tri = update_add(d_tri, at[:, :, -1], settings.dt_tracer * vs.forc_iw_surface[2:-2, 2:-2] / (0.5 * vs.dzw[-1:]))\n\n sol = utilities.solve_implicit(\n a_tri, b_tri, c_tri, d_tri, water_mask, b_edge=b_tri_edge, d_edge=d_tri_edge, edge_mask=edge_mask\n )\n vs.E_iw = update(vs.E_iw, at[2:-2, 2:-2, :, vs.taup1], npx.where(water_mask, sol, vs.E_iw[2:-2, 2:-2, :, vs.taup1]))\n\n \"\"\"\n store IW dissipation\n \"\"\"\n vs.iw_diss = vs.alpha_c * maxE_iw * vs.E_iw[..., vs.taup1]\n\n \"\"\"\n add tendency due to lateral diffusion\n \"\"\"\n flux_east = allocate(state.dimensions, (\"xt\", \"yt\", \"zt\"))\n flux_north = allocate(state.dimensions, (\"xt\", \"yt\", \"zt\"))\n flux_top = allocate(state.dimensions, (\"xt\", \"yt\", \"zt\"))\n\n if settings.enable_idemix_hor_diffusion:\n flux_east = update(\n flux_east,\n at[:-1, :, :],\n settings.tau_h\n * 0.5\n * (vs.v0[1:, :, :] + vs.v0[:-1, :, :])\n * (vs.v0[1:, :, :] * vs.E_iw[1:, :, :, vs.tau] - vs.v0[:-1, :, :] * vs.E_iw[:-1, :, :, vs.tau])\n / (vs.cost[npx.newaxis, :, npx.newaxis] * vs.dxu[:-1, npx.newaxis, npx.newaxis])\n * vs.maskU[:-1, :, :],\n )\n\n flux_north = update(\n flux_north,\n at[:, :-1, :],\n settings.tau_h\n * 0.5\n * (vs.v0[:, 1:, :] + vs.v0[:, :-1, :])\n * (vs.v0[:, 1:, :] * vs.E_iw[:, 1:, :, vs.tau] - vs.v0[:, :-1, :] * vs.E_iw[:, :-1, :, vs.tau])\n / vs.dyu[npx.newaxis, :-1, npx.newaxis]\n * vs.maskV[:, :-1, :]\n * vs.cosu[npx.newaxis, :-1, npx.newaxis],\n )\n flux_north = update(flux_north, at[:, -1, :], 0.0)\n vs.E_iw = update_add(\n vs.E_iw,\n at[2:-2, 2:-2, :, vs.taup1],\n settings.dt_tracer\n * vs.maskW[2:-2, 2:-2, :]\n * (\n (flux_east[2:-2, 2:-2, :] - flux_east[1:-3, 2:-2, :])\n / (vs.cost[npx.newaxis, 2:-2, npx.newaxis] * vs.dxt[2:-2, npx.newaxis, npx.newaxis])\n + (flux_north[2:-2, 2:-2, :] - flux_north[2:-2, 1:-3, :])\n / (vs.cost[npx.newaxis, 2:-2, npx.newaxis] * vs.dyt[npx.newaxis, 2:-2, npx.newaxis])\n ),\n )\n\n \"\"\"\n add tendency due to advection\n \"\"\"\n if settings.enable_idemix_superbee_advection:\n flux_east, flux_north, flux_top = advection.adv_flux_superbee_wgrid(state, vs.E_iw[:, :, :, vs.tau])\n\n if settings.enable_idemix_upwind_advection:\n flux_east, flux_north, flux_top = advection.adv_flux_upwind_wgrid(state, vs.E_iw[:, :, :, vs.tau])\n\n if settings.enable_idemix_superbee_advection or settings.enable_idemix_upwind_advection:\n vs.dE_iw = update(\n vs.dE_iw,\n at[2:-2, 2:-2, :, vs.tau],\n vs.maskW[2:-2, 2:-2, :]\n * (\n -(flux_east[2:-2, 2:-2, :] - flux_east[1:-3, 2:-2, :])\n / (vs.cost[npx.newaxis, 2:-2, npx.newaxis] * vs.dxt[2:-2, npx.newaxis, npx.newaxis])\n - (flux_north[2:-2, 2:-2, :] - flux_north[2:-2, 1:-3, :])\n / (vs.cost[npx.newaxis, 2:-2, npx.newaxis] * vs.dyt[npx.newaxis, 2:-2, npx.newaxis])\n ),\n )\n vs.dE_iw = update_add(vs.dE_iw, at[:, :, 0, vs.tau], -flux_top[:, :, 0] / vs.dzw[0:1])\n vs.dE_iw = update_add(\n vs.dE_iw,\n at[:, :, 1:-1, vs.tau],\n -(flux_top[:, :, 1:-1] - flux_top[:, :, :-2]) / vs.dzw[npx.newaxis, npx.newaxis, 1:-1],\n )\n vs.dE_iw = update_add(\n vs.dE_iw, at[:, :, -1, vs.tau], -(flux_top[:, :, -1] - flux_top[:, :, -2]) / (0.5 * vs.dzw[-1:])\n )\n\n \"\"\"\n Adam Bashforth time stepping\n \"\"\"\n vs.E_iw = update_add(\n vs.E_iw,\n at[:, :, :, vs.taup1],\n settings.dt_tracer\n * (\n (1.5 + settings.AB_eps) * vs.dE_iw[:, :, :, vs.tau]\n - (0.5 + settings.AB_eps) * vs.dE_iw[:, :, :, vs.taum1]\n ),\n )\n\n return KernelOutput(E_iw=vs.E_iw, dE_iw=vs.dE_iw, iw_diss=vs.iw_diss)",
"def __init__(self, trap=2.5*10**16, Keq=1.0*10**17,\n EHdecay=1.0*10**-10, Etrap=2.0*10**-10, FHloss=8.0*10**-12,\n G3decay = 0, step=200*ps, pretime=2, reprate=80000000,\n verbose=False, trackQ=False, scalar=1, Gdecay=0, GHdecay=0,\n tolerance=0.005, G2decay=0. ,Gescape=1., Gform=1., G3loss=0.):\n # Some other variables used\n self.tolerance = tolerance\n self.scalar = scalar\n self.verbose = verbose\n self.reprate = reprate\n self.duration = 1.00 / reprate\n self.step = step\n self.steps = int(self.duration / self.step)\n self.powers = []\n self.pretime = pretime\n # Variables which hold state densities\n self.exciton = []\n self.hole = []\n self.electron = []\n self.trap = (trap) # Total number of traps\n self.filled = [] # Filled traps\n self.signal = []\n self.xsignal = []\n self.ehsignal = []\n self.xloss = []\n self.tloss = []\n self.pulses = []\n self.qk = []\n self.trackQ = trackQ\n # Rate and equilibrium constants, corrected for time step size\n self.Keq = Gescape/Gform # Equilibrium constant for X<-->e+h\n self.EHdecay = (EHdecay * step) # e+h->ground\n self.Etrap = (Etrap * step) # e+trap->filled\n self.FHloss = (FHloss * step) # filled+h->ground\n self.Gdecay = Gdecay * step\n self.G2decay = G2decay * step\n self.G3decay = G3decay * step\n self.GHdecay = GHdecay * step\n self.Gescape = Gescape * step\n self.G3loss = G3loss * step\n self.Gform = Gform * step",
"def solve_steady_state(self):\n # optimization has to be done on the reduced system\n # TODO: implement different comp. sizes\n s0 = self.model.get_initial_conc()\n [L_inv, L, _] = self.model.N_partitioned\n si = numpy.dot(L_inv, s0)\n t = s0 - numpy.dot(L, si)\n f = lambda x: numpy.linalg.norm(\n self.dS_dt(numpy.dot(L, x) + t, 1))\n ss_i = scipy.optimize.fmin_bfgs(f, si)\n ss = numpy.dot(L, ss_i) + t\n return ss",
"def spikingModel(wEE, wEI, wIE, wII, stim_e, stim_i,\n time=1000, dt=0.1, Vth=1.0, Vre=0.0,\n tau_e=15.0, tau_i=10.0, ref_e=5.0, ref_i=5.0, \n syntau2_e=3.0, syntau2_i=2.0, syntau1=1.0):\n\n T = np.arange(0,time,dt)\n nE = wEE.shape[0]\n nI = wII.shape[0]\n\n Ve = np.zeros((nE,len(T)))\n Vi = np.zeros((nI,len(T)))\n # Set initial conditions\n Ve = np.random.uniform(0,1,size=(nE,))\n Vi = np.random.uniform(0,1,size=(nI,))\n # Instantiate synaptic currents empty matrix\n Ie = np.zeros((nE,len(T)))\n Ii = np.zeros((nI,len(T)))\n # Instantiate spiking matrix\n spkE = np.zeros((nE,time))\n spkI = np.zeros((nI,time))\n # Instantiate synaptic input matrix (temporally downsampled)\n synE = np.zeros((nE,time))\n synI = np.zeros((nI,time))\n\n bin_spkE = np.zeros((nE,))\n bin_spkI = np.zeros((nI,))\n # Synaptic rise gating variable\n xrse_ee = np.zeros((nE,))\n xdec_ee = np.zeros((nE,))\n xrse_ei= np.zeros((nI,))\n xdec_ei = np.zeros((nI,))\n xrse_ie = np.zeros((nE,))\n xdec_ie = np.zeros((nE,))\n xrse_ii= np.zeros((nI,))\n xdec_ii = np.zeros((nI,))\n\n\n # Set random biases from a uniform distribution\n # Excitatory neurons\n mu_e = np.random.uniform(1.1,1.2,size=(nE,))\n #mu_e = np.random.uniform(1.05,1.15,size=(nE,)) # Imbalanced state\n # Inhibitory neurons\n mu_i = np.random.uniform(1.0,1.05,size=(nI,))\n\n maxrate = 500 # max rate is 100hz\n maxtimes = int(np.round(maxrate*time/1000))\n timesE = np.zeros((nE,maxrate))\n timesI = np.zeros((nI,maxrate))\n ne_s = np.zeros((nE,),dtype=int)\n ni_s = np.zeros((nI,),dtype=int)\n\n refractory_e = np.zeros((nE,))\n refractory_i = np.zeros((nI,))\n for t in range(len(T)-1):\n ## Using RK2 method\n\n ## K1s\n Ve = Ve + dt*((mu_e + stim_e - Ve)/tau_e + Ie[:,t])\n Vi = Vi + dt*((mu_i + stim_i - Vi)/tau_i + Ii[:,t])\n\n # Synaptic gating\n # Excitatory synapses\n xrse_ee = xrse_ee - dt*xrse_ee/syntau1 + np.matmul(bin_spkE,wEE)\n xdec_ee = xdec_ee - dt*xdec_ee/syntau2_e + np.matmul(bin_spkE,wEE)\n xrse_ei = xrse_ei - dt*xrse_ei/syntau1 + np.matmul(bin_spkE,wEI)\n xdec_ei = xdec_ei - dt*xdec_ei/syntau2_e + np.matmul(bin_spkE,wEI)\n # Inhibitory dt*synapses\n xrse_ie = xrse_ie - dt*xrse_ie/syntau1 + np.matmul(bin_spkI,wIE)\n xdec_ie = xdec_ie - dt*xdec_ie/syntau2_i + np.matmul(bin_spkI,wIE)\n xrse_ii = xrse_ii - dt*xrse_ii/syntau1 + np.matmul(bin_spkI,wII)\n xdec_ii = xdec_ii - dt*xdec_ii/syntau2_i + np.matmul(bin_spkI,wII)\n\n # Calculate synaptic outputs given rise and decay times\n Ie[:,t+1] = (xdec_ee - xrse_ee)/(syntau2_e - syntau1) + (xdec_ie - xrse_ie)/(syntau2_i - syntau1)\n Ii[:,t+1] = (xdec_ii - xrse_ii)/(syntau2_i - syntau1) + (xdec_ei - xrse_ei)/(syntau2_e - syntau1)\n\n ## Spiking\n # Find which neurons exceed threshold (and are not in a refractory period)\n bin_spkE = np.multiply(Ve>Vth, refractory_e==0.0)\n bin_spkI = np.multiply(Vi>Vth, refractory_i==0.0)\n\n # Save spike time (and downsample to 1ms)\n tms = int(np.floor(T[t]))\n spkE[bin_spkE,tms] = 1 # spikes are okay - refractory period is 5ms, anyway\n spkI[bin_spkI,tms] = 1\n synE[:,tms] = synE[:,tms] + Ie[:,t]\n synI[:,tms] = synI[:,tms] + Ii[:,t]\n\n # Reset voltages\n Ve[bin_spkE] = Vre\n Vi[bin_spkI] = Vre\n\n # spike times\n timesE[bin_spkE,ne_s[bin_spkE]] = T[t+1]\n timesI[bin_spkI,ni_s[bin_spkI]] = T[t+1]\n ne_s[bin_spkE] = ne_s[bin_spkE] + 1\n ni_s[bin_spkI] = ni_s[bin_spkI] + 1\n\n\n # Set refractory period\n # Add a refractory time step to neurons who just spiked, and to those are still in a refractory period\n refractory_e = refractory_e + (bin_spkE * dt) + (refractory_e!=0) * dt \n refractory_i = refractory_i + (bin_spkI * dt) + (refractory_i!=0) * dt\n # Once refractory period is complete, allow to spike\n can_spike_again_e = np.round(refractory_e,1) == ref_e\n can_spike_again_i = np.round(refractory_i,1) == ref_i\n\n refractory_e[can_spike_again_e] = 0.0\n refractory_i[can_spike_again_i] = 0.0\n\n # Set neurons who are in their refractory to the baseline membrane potential\n in_refractory_e = refractory_e != 0.0\n in_refractory_i = refractory_i != 0.0\n\n Ve[in_refractory_e] = Vre\n Vi[in_refractory_i] = Vre\n \n return spkE, spkI, synE, synI, timesE, timesI, ne_s, ni_s",
"def save_expval_final_statevecs():\n # Get pre-measurement statevectors\n statevecs = []\n # State |+1>\n statevec = Statevector.from_label(\"+1\")\n statevecs.append(statevec)\n # State |00> + |11>\n statevec = (Statevector.from_label(\"00\") + Statevector.from_label(\"11\")) / np.sqrt(2)\n statevecs.append(statevec)\n # State |10> -i|01>\n statevec = (Statevector.from_label(\"10\") - 1j * Statevector.from_label(\"01\")) / np.sqrt(2)\n statevecs.append(statevec)\n return statevecs",
"def calculate_Sio(tp, c, T, ib, ik, once_called, kgrid, cbm_vbm, epsilon_s, epsilon_inf):\n S_i = [np.array([1e-32, 1e-32, 1e-32]), np.array([1e-32, 1e-32, 1e-32])]\n S_i_th = [np.array([1e-32, 1e-32, 1e-32]), np.array([1e-32, 1e-32, 1e-32])]\n S_o = [np.array([1e-32, 1e-32, 1e-32]), np.array([1e-32, 1e-32, 1e-32])]\n S_o_th = [np.array([1e-32, 1e-32, 1e-32]), np.array([1e-32, 1e-32, 1e-32])]\n\n k = kgrid[tp][\"norm(k)\"][ib][ik]\n a = kgrid[tp][\"a\"][ib][ik]\n c_ = kgrid[tp][\"c\"][ib][ik]\n f = kgrid[tp][\"f\"][c][T][ib][ik]\n f_th = kgrid[tp][\"f_th\"][c][T][ib][ik]\n N_POP = kgrid[tp][\"N_POP\"][c][T][ib][ik]\n\n for j, X_Epm in enumerate([\"X_Eplus_ik\", \"X_Eminus_ik\"]):\n if tp == \"n\" and X_Epm == \"X_Eminus_ik\" and kgrid[tp][\"energy\"][ib][ik] - hbar * \\\n kgrid[tp][\"W_POP\"][ib][ik] < cbm_vbm[tp][\"energy\"]:\n continue\n if tp == \"p\" and X_Epm == \"X_Eplus_ik\" and kgrid[tp][\"energy\"][ib][ik] + hbar * \\\n kgrid[tp][\"W_POP\"][ib][ik] > cbm_vbm[tp][\"energy\"]:\n continue\n counted = len(kgrid[tp][X_Epm][ib][ik])\n for X_ib_ik in kgrid[tp][X_Epm][ib][ik]:\n X, ib_pm, ik_pm = X_ib_ik\n k_pm = kgrid[tp][\"norm(k)\"][ib_pm][ik_pm]\n abs_kdiff = abs(k_pm - k)\n if abs_kdiff < 1e-4 or k<1e-4 or k_pm<1e-4:\n # avoid rate blow-up (e.g. due to self-scattering)\n counted -= 1\n continue\n if abs(kgrid[tp]['energy'][ib_pm][ik_pm] - \\\n kgrid[tp]['energy'][ib][ik]) < \\\n hbar * kgrid[tp][\"W_POP\"][ib][ik] / 2.0:\n counted -= 1\n continue\n g_pm = kgrid[tp][\"g\"][c][T][ib_pm][ik_pm]\n g_pm_th = kgrid[tp][\"g_th\"][c][T][ib_pm][ik_pm]\n v_pm = kgrid[tp][\"norm(v)\"][ib_pm][ik_pm] / sq3 # 3**0.5 is to treat each direction as 1D BS\n a_pm = kgrid[tp][\"a\"][ib_pm][ik_pm]\n c_pm = kgrid[tp][\"c\"][ib_pm][ik_pm]\n if tp == \"n\":\n f_pm = kgrid[tp][\"f\"][c][T][ib_pm][ik_pm]\n f_pm_th = kgrid[tp][\"f_th\"][c][T][ib_pm][ik_pm]\n else:\n f_pm = 1 - kgrid[tp][\"f\"][c][T][ib_pm][ik_pm]\n f_pm_th = 1 - kgrid[tp][\"f_th\"][c][T][ib_pm][ik_pm]\n A_pm = a * a_pm + c_ * c_pm * (k_pm ** 2 + k ** 2) / (2 * k_pm * k)\n beta_pm = (e ** 2 * kgrid[tp][\"W_POP\"][ib_pm][ik_pm]) / (4 * pi * hbar * v_pm) * \\\n (1 / (epsilon_inf * epsilon_0) - 1 / (epsilon_s * epsilon_0)) * 6.2415093e20\n if not once_called:\n lamb_opm = beta_pm * (\n A_pm ** 2 * log((k_pm + k) / (abs_kdiff)) - A_pm * c_ * c_pm - a * a_pm * c_ * c_pm)\n # because in the scalar form k+ or k- is supposed to be unique, here we take average\n S_o[j] += (N_POP + j + (-1) ** j * f_pm) * lamb_opm\n S_o_th[j] += (N_POP + j + (-1) ** j * f_pm_th) * lamb_opm\n\n lamb_ipm = beta_pm * (\n (k_pm**2 + k**2) / (2*k*k_pm) * A_pm**2 *\\\n log((k_pm + k) / (abs_kdiff)) - A_pm**2 - c_**2 * c_pm** 2 / 3.0)\n S_i[j] += (N_POP + (1 - j) + (-1)**(1 - j) * f) * lamb_ipm * g_pm\n S_i_th[j] += (N_POP + (1 - j) + (-1)**(1 - j) * f_th) * lamb_ipm * g_pm_th\n if counted > 0:\n S_i[j] /= counted\n S_i_th[j] /= counted\n S_o[j] /= counted\n S_o_th[j] /= counted\n return [sum(S_i), sum(S_i_th), sum(S_o), sum(S_o_th)]",
"def __init__(self, time_grid=None, space_grid=None,\n sensors=None,\n loc_onramp=None, loc_offramp=None,\n vm_cells=None, beta_cells=None, rhoc_cells=None, wc_cells=None,\n num_ensembles=0,\n std_model_noise=None, queue_threshold=17.88,\n init_rho=0, init_qin=0.5, init_qout=0.0):\n\n self.__debug = False\n self.__debug_entrance_sensor = 'IDEALLoc100m'\n self.__debug_exit_sensor = 'IDEALLoc8300m'\n\n # initialize the superclass Estimator\n Estimator.__init__(self, time_grid, space_grid,\n loc_onramp, loc_offramp,\n sensors,\n queue_threshold)\n\n # build the index for the system state\n self.x_index, dim_state = self.__build_state_index()\n\n # initialize the super class\n EnKF.__init__(self, dim_state, num_ensembles)\n\n # y_index, and dim_obs, which will be dynamically updated upon arrival of each new data\n self.y_index = None\n self.dim_obs = None\n\n # keep track of the flow between cells for each ensemble which will be used to construct the observation\n self.__f_flow = {}\n self.__f_flow['time'] = np.array(self.time_grid[1:])\n self.__f_flow['data'] = OrderedDict()\n for i in range(0, self.num_ensembles):\n self.__f_flow['data'][i] = []\n\n # keep track of the speed between cells for each ensemble which will be used to construct the observation\n self.__f_speed = {}\n self.__f_speed['time'] = np.array(self.time_grid[1:])\n self.__f_speed['data'] = OrderedDict()\n for i in range(0, self.num_ensembles):\n self.__f_speed['data'][i] = []\n\n # save all the estimated states here\n self.est_state_all = np.matrix(np.zeros((self.dim_state, self.num_steps), float))\n\n # =================================================\n # Add additive noise to state.\n self.Q = OrderedDict()\n # initialize with all cell var\n self.Q = np.diag(np.ones(dim_state) * (std_model_noise['cell'] ** 2))\n\n # print('onramps:{0}; offramps:{1}'.format(self.cell_onramp, self.cell_offramp))\n # add onramp larger noise\n if self.cell_onramp is not None:\n for on_cell in self.cell_onramp:\n if 0 <= on_cell <= self.num_cells:\n idx = self.x_index['density'][on_cell]\n self.Q[idx, idx] = std_model_noise['oncell'] ** 2\n # add offramp larger noise\n if self.cell_offramp is not None:\n for off_cell in self.cell_offramp:\n if 0 <= off_cell <= self.num_cells:\n idx = self.x_index['density'][off_cell]\n self.Q[idx, idx] = std_model_noise['offcell'] ** 2\n # add qin variance\n idx = self.x_index['qin']\n self.Q[idx, idx] = std_model_noise['qin'] ** 2\n # add qout variance\n idx = self.x_index['qout']\n self.Q[idx, idx] = std_model_noise['qout'] ** 2\n\n # self.Q = std_model_noise\n # if np.size( self.Q['vm'] ) == 1:\n # # if it was a single value, then it was specified as std, not var (which = std^2)\n # self.Q['vm'] = np.diag( np.ones( self.num_cells )*(self.Q['vm']**2) )\n # if np.size( self.Q['beta'] ) == 1:\n # self.Q['beta'] = np.diag( np.ones( self.num_cells )*(self.Q['beta']**2) )\n # if np.size( self.Q['rhoc'] ) == 1:\n # self.Q['rhoc'] = np.diag( np.ones( self.num_cells )*(self.Q['rhoc']**2) )\n # if np.size( self.Q['wc'] ) == 1:\n # self.Q['wc'] = np.diag( np.ones( self.num_cells )*(self.Q['wc']**2) )\n #\n # if self.loc_onramp is not None and np.size(self.Q['onramp']) == 1:\n # self.Q['onramp'] = np.diag( np.ones(len(loc_onramp))*(self.Q['onramp']**2) )\n # if self.loc_offramp is not None and np.size(self.Q['offramp']) == 1:\n # self.Q['offramp'] = np.diag( np.ones(len(loc_offramp))*(self.Q['offramp']**2) )\n\n\n # =================================================\n # save the fundamental diagram for each cell\n # vm parameter\n if isinstance(vm_cells, numbers.Number):\n self.vm_cells = np.ones((self.num_cells, 1)) * float(vm_cells)\n else:\n self.vm_cells = np.array(vm_cells).astype(float)\n self.vm_cells = self.vm_cells.reshape((self.num_cells, 1))\n\n # beta parameter\n if isinstance(beta_cells, numbers.Number):\n self.beta_cells = np.ones((self.num_cells, 1)) * float(beta_cells)\n else:\n self.beta_cells = np.array(beta_cells).astype(float)\n self.beta_cells = self.beta_cells.reshape((self.num_cells, 1))\n\n # rhoc parameter\n if isinstance(rhoc_cells, numbers.Number):\n self.rhoc_cells = np.ones((self.num_cells, 1)) * float(rhoc_cells)\n else:\n self.rhoc_cells = np.array(rhoc_cells).astype(float)\n self.rhoc_cells = self.rhoc_cells.reshape((self.num_cells, 1))\n\n # wc parameter\n if isinstance(wc_cells, numbers.Number):\n self.wc_cells = np.ones((self.num_cells, 1)) * float(wc_cells)\n else:\n self.wc_cells = np.array(wc_cells).astype(float)\n self.wc_cells = self.wc_cells.reshape((self.num_cells, 1))\n\n # other use ful parameters\n self.qmax_cells = self.vm_cells * self.rhoc_cells - \\\n self.vm_cells * (self.rhoc_cells ** 2) / self.beta_cells\n\n self.rhomax_cells = - self.qmax_cells / self.wc_cells + self.rhoc_cells\n\n # =======================================================================\n self.init_rho = init_rho\n self.init_qin = init_qin\n self.init_qout = init_qout\n\n # =======================================================================\n # FOR DEBUGGING\n # recored the forecast and analysis value for qin and qout\n if self.__debug:\n self.qin_f = []\n self.qin_a = []\n self.qin_obs = []\n self.qout_f = []\n self.qout_a = []\n self.qout_obs = []",
"def updateBD(self):\r\n # itereigenupdated is always up-to-date in the diagonal case\r\n # just double check here\r\n if self.itereigenupdated == self.countiter:\r\n return\r\n\r\n if self.sp.neg.cmuexp: # cave:\r\n self.update_exponential(self.Zneg, -self.sp.neg.cmuexp)\r\n # self.C += self.Zpos # pos update after Zneg would be the correct update, overall:\r\n # self.C = self.Zpos + Cs * Mh.expms(-self.sp.neg.cmuexp*Csi*self.Zneg*Csi) * Cs\r\n self.Zneg = np.zeros((self.N, self.N))\r\n\r\n if self.sigma_vec is not 1 and not np.all(self.sigma_vec == 1):\r\n self.C = dot(dot(np.diag(self.sigma_vec), self.C), np.diag(self.sigma_vec))\r\n self.sigma_vec[:] = 1\r\n\r\n if self.opts['CMA_const_trace'] in (True, 1, 2): # normalize trace of C\r\n if self.opts['CMA_const_trace'] == 2:\r\n s = np.exp(np.mean(np.log(self.dC)))\r\n else:\r\n s = np.mean(self.dC)\r\n self.C /= s\r\n self.dC /= s\r\n self.C = (self.C + self.C.T) / 2\r\n # self.C = np.triu(self.C) + np.triu(self.C,1).T # should work as well\r\n # self.D, self.B = eigh(self.C) # hermitian, ie symmetric C is assumed\r\n\r\n if type(self.opts['CMA_eigenmethod']) == type(1):\r\n print('WARNING: option CMA_eigenmethod should be a function, not an integer')\r\n if self.opts['CMA_eigenmethod'] == -1:\r\n # pygsl\r\n # easy to install (well, in Windows install gsl binaries first,\r\n # set system path to respective libgsl-0.dll (or cp the dll to\r\n # python\\DLLS ?), in unzipped pygsl edit\r\n # gsl_dist/gsl_site_example.py into gsl_dist/gsl_site.py\r\n # and run \"python setup.py build\" and \"python setup.py install\"\r\n # in MINGW32)\r\n if 1 < 3: # import pygsl on the fly\r\n try:\r\n import pygsl.eigen.eigenvectors # TODO efficient enough?\r\n except ImportError:\r\n print('WARNING: could not find pygsl.eigen module, either install pygsl \\n' +\r\n ' or set option CMA_eigenmethod=1 (is much slower), option set to 1')\r\n self.opts['CMA_eigenmethod'] = 0 # use 0 if 1 is too slow\r\n\r\n self.D, self.B = pygsl.eigen.eigenvectors(self.C)\r\n\r\n elif self.opts['CMA_eigenmethod'] == 0:\r\n # TODO: thoroughly test np.linalg.eigh\r\n # numpy.linalg.eig crashes in 200-D\r\n # and EVecs with same EVals are not orthogonal\r\n self.D, self.B = np.linalg.eigh(self.C) # self.B[i] is a row and not an eigenvector\r\n else: # is overall two;ten times slower in 10;20-D\r\n self.D, self.B = Misc.eig(self.C) # def eig, see below\r\n else:\r\n self.D, self.B = self.opts['CMA_eigenmethod'](self.C)\r\n\r\n\r\n # assert(sum(self.D-DD) < 1e-6)\r\n # assert(sum(sum(np.dot(BB, BB.T)-np.eye(self.N))) < 1e-6)\r\n # assert(sum(sum(np.dot(BB * DD, BB.T) - self.C)) < 1e-6)\r\n idx = np.argsort(self.D)\r\n self.D = self.D[idx]\r\n self.B = self.B[:,idx] # self.B[i] is a row, columns self.B[:,i] are eigenvectors\r\n # assert(all(self.B[self.countiter % self.N] == self.B[self.countiter % self.N,:]))\r\n\r\n # qqqqqqqqqq\r\n if 11 < 3: # limit condition number to 1e13\r\n climit = 1e13 # cave: conditioncov termination is 1e14\r\n if self.D[-1] / self.D[0] > climit:\r\n self.D += self.D[-1] / climit\r\n for i in xrange(self.N):\r\n self.C[i][i] += self.D[-1] / climit\r\n\r\n if 11 < 3 and any(abs(sum(self.B[:,0:self.N-1] * self.B[:,1:], 0)) > 1e-6):\r\n print('B is not orthogonal')\r\n print(self.D)\r\n print(sum(self.B[:,0:self.N-1] * self.B[:,1:], 0))\r\n else:\r\n # is O(N^3)\r\n # assert(sum(abs(self.C - np.dot(self.D * self.B, self.B.T))) < N**2*1e-11)\r\n pass\r\n self.D **= 0.5\r\n self.itereigenupdated = self.countiter",
"def state_transition(CS, CP, state, events, soft_disable_timer, v_cruise_kph, AM):\n enabled = isEnabled(state)\n\n v_cruise_kph_last = v_cruise_kph\n\n # if stock cruise is completely disabled, then we can use our own set speed logic\n if not CP.enableCruise:\n v_cruise_kph = update_v_cruise(v_cruise_kph, CS.buttonEvents, enabled)\n elif CP.enableCruise and CS.cruiseState.enabled:\n v_cruise_kph = CS.cruiseState.speed * CV.MS_TO_KPH\n\n # decrease the soft disable timer at every step, as it's reset on\n # entrance in SOFT_DISABLING state\n soft_disable_timer = max(0, soft_disable_timer - 1)\n\n # DISABLED\n if state == State.disabled:\n if get_events(events, [ET.ENABLE]):\n if get_events(events, [ET.NO_ENTRY]):\n for e in get_events(events, [ET.NO_ENTRY]):\n AM.add(str(e) + \"NoEntry\", enabled)\n\n else:\n if get_events(events, [ET.PRE_ENABLE]):\n state = State.preEnabled\n else:\n state = State.enabled\n AM.add(\"enable\", enabled)\n v_cruise_kph = initialize_v_cruise(CS.vEgo, CS.buttonEvents, v_cruise_kph_last)\n\n # ENABLED\n elif state == State.enabled:\n if get_events(events, [ET.USER_DISABLE]):\n state = State.disabled\n AM.add(\"disable\", enabled)\n\n elif get_events(events, [ET.IMMEDIATE_DISABLE]):\n state = State.disabled\n for e in get_events(events, [ET.IMMEDIATE_DISABLE]):\n AM.add(e, enabled)\n\n elif get_events(events, [ET.SOFT_DISABLE]):\n state = State.softDisabling\n soft_disable_timer = 300 # 3s\n for e in get_events(events, [ET.SOFT_DISABLE]):\n AM.add(e, enabled)\n\n # SOFT DISABLING\n elif state == State.softDisabling:\n if get_events(events, [ET.USER_DISABLE]):\n state = State.disabled\n AM.add(\"disable\", enabled)\n\n elif get_events(events, [ET.IMMEDIATE_DISABLE]):\n state = State.disabled\n for e in get_events(events, [ET.IMMEDIATE_DISABLE]):\n AM.add(e, enabled)\n\n elif not get_events(events, [ET.SOFT_DISABLE]):\n # no more soft disabling condition, so go back to ENABLED\n state = State.enabled\n\n elif get_events(events, [ET.SOFT_DISABLE]) and soft_disable_timer > 0:\n for e in get_events(events, [ET.SOFT_DISABLE]):\n AM.add(e, enabled)\n\n elif soft_disable_timer <= 0:\n state = State.disabled\n\n # PRE ENABLING\n elif state == State.preEnabled:\n if get_events(events, [ET.USER_DISABLE]):\n state = State.disabled\n AM.add(\"disable\", enabled)\n\n elif get_events(events, [ET.IMMEDIATE_DISABLE, ET.SOFT_DISABLE]):\n state = State.disabled\n for e in get_events(events, [ET.IMMEDIATE_DISABLE, ET.SOFT_DISABLE]):\n AM.add(e, enabled)\n\n elif not get_events(events, [ET.PRE_ENABLE]):\n state = State.enabled\n\n return state, soft_disable_timer, v_cruise_kph, v_cruise_kph_last",
"def _excitonic_coft_all(self,SS,AG):\n \n # SystemBathInteraction\n sbi = AG.get_SystemBathInteraction()\n # CorrelationFunctionMatrix\n cfm = sbi.CC\n \n c0 = AG.monomers[0].get_egcf((0,1))\n Nt = len(c0)\n \n Nst = AG.HamOp.dim\n ct = numpy.zeros((Nst,Nt),dtype=numpy.complex128)\n\n # electronic states corresponding to single excited states\n import time\n timecount = 0\n elst = numpy.where(AG.which_band == 1)[0]\n start = time.time()\n for el1 in elst:\n for el2 in elst:\n coft = cfm.get_coft(el1-1,el2-1)\n start2 = time.time()\n for kk in AG.vibindices[el1]:\n for ll in AG.vibindices[el2]:\n ct[:,:] += numpy.dot(\n numpy.expand_dims((SS[kk,:]**2)*(SS[ll,:]**2),axis=1),\n numpy.expand_dims(coft,axis=0))\n stop2 = time.time()\n timecount += stop2 - start2\n stop = time.time()\n print(stop-start,stop-start - timecount)\n return ct",
"def solve(self):\n \n # Definition of the parameters\n Q_pc = self.parameters.getParam(\"Q_pc\")\n V_c = self.parameters.getParam(\"V_c\")\n V_p = self.parameters.getParam(\"V_p\")\n CL = self.parameters.getParam(\"CL\")\n initial_conditions = [\n self.parameters.getParam(\"q_c0\"),\n self.parameters.getParam(\"q_p0\"),\n ]\n t_eval = np.linspace(0, self.timespan, self.nsteps)\n\n # Definition of the model ODEs\n def pk_iv_model(t, y, Q_pc, V_c, V_p, CL):\n \"\"\"Defines the differential equations for the PK IV model.\n\n Parameters:\n :param t: time (h)\n :param y: list of the state variables of the ODEs system, in the\n form [q_c, q_p]\n :param Q_pc: transition rate between central and peripheral\n compartments (mL/h)\n :param V_c: volume of central compartment (mL)\n :param V_p: volume of peripheral compartment (mL)\n :param CL: clearance/elimination rate from the central\n compartment (mL/h)\n\n The parameters (except for t and y) are extracted from the\n Parameter class, using getParam method.\n\n Returns list containing the differential equations, in the form:\n [dqc_dt, dqp_dt]\n \"\"\"\n q_c, q_p = y\n transfer = Q_pc * (q_c / V_c - q_p / V_p)\n dqc_dt = self.dosefunction(t) - q_c / V_c * CL - transfer\n dqp_dt = transfer\n return [dqc_dt, dqp_dt]\n\n # Solving the model\n sol = scipy.integrate.solve_ivp(\n fun=lambda t, y: pk_iv_model(t, y, Q_pc, V_c, V_p, CL),\n t_span=[t_eval[0], t_eval[-1]],\n y0=initial_conditions,\n t_eval=t_eval,\n )\n\n # Feeding the solution line by line to solution class\n t = sol.t\n y = sol.y\n N = t.shape[0]\n columnNames = [\"t\", \"dose\", \"q_c\", \"q_p\"]\n self.solution.begin(columnNames, N)\n for i in range(N):\n arr = np.zeros((len(columnNames), 1))\n arr[0] = t[i]\n arr[1] = self.dosefunction(t[i])\n arr[2:, 0] = y[:, i]\n self.solution.report(arr)",
"def at_SES(self):\n\t t = self.t\n\t dM = self.dM\n\t rLbl = transLabel(t,self.P,self.t0,self.tdur*2)\n\n\t # Hack. K2 doesn't have seasons\n\t q = np.zeros(t.size) - 1\n\t season = np.mod(q,4)\n\t dtype = [('ses',float),('tnum',int),('season',int)]\n\t dM.fill_value = np.nan\n\t rses = np.array(list(zip(dM.filled(),rLbl['tLbl'],season)),dtype=dtype )\n\t rses = rses[ rLbl['tLbl'] >= 0 ]\n\n\t # If no good transits, break out\n\t if rses.size==0:\n\t return\n\n\t self.add_dset('rLbl',rLbl,\n\t description='Transit/continuum labeled (see transLabel doc string')\n\t self.add_dset('SES',rses,\n\t description='Record array with single event statistic')\n\t self.add_attr('num_trans',rses.size,\n\t description='Number of good transits')\n\n\t # Median SES, even/odd\n\t for sfx,i in zip(['even','odd'],[0,1]):\n\t medses = np.median( rses['ses'][rses['tnum'] % 2 == i] ) \n\t self.add_attr('SES_%s' % sfx, medses,\n\t description='Median SES %s' % sfx)\n\n\t # Median SES, different seasons\n\t for i in range(4):\n\t medses = -99 #Hack\n\t self.add_attr('SES_%i' % i, medses,\n\t description='Median SES [Season %i]' % i )",
"def at_SES(self):\n\t t = self.t\n\t dM = self.dM\n\t rLbl = transLabel(t,self.P,self.t0,self.tdur*2)\n\n\t # Hack. K2 doesn't have seasons\n\t q = np.zeros(t.size) - 1\n\t season = np.mod(q,4)\n\t dtype = [('ses',float),('tnum',int),('season',int)]\n\t dM.fill_value = np.nan\n\t rses = np.array(list(zip(dM.filled(),rLbl['tLbl'],season)),dtype=dtype )\n\t rses = rses[ rLbl['tLbl'] >= 0 ]\n\n\t # If no good transits, break out\n\t if rses.size==0:\n\t return\n\n\t self.add_dset('rLbl',rLbl,\n\t description='Transit/continuum labeled (see transLabel doc string')\n\t self.add_dset('SES',rses,\n\t description='Record array with single event statistic')\n\t self.add_attr('num_trans',rses.size,\n\t description='Number of good transits')\n\n\t # Median SES, even/odd\n\t for sfx,i in zip(['even','odd'],[0,1]):\n\t medses = np.median( rses['ses'][rses['tnum'] % 2 == i] ) \n\t self.add_attr('SES_%s' % sfx, medses,\n\t description='Median SES %s' % sfx)\n\n\t # Median SES, different seasons\n\t for i in range(4):\n\t medses = -99 #Hack\n\t self.add_attr('SES_%i' % i, medses,\n\t description='Median SES [Season %i]' % i )",
"def _excitonic_coft(self,SS,AG,n):\n \n # SystemBathInteraction\n sbi = AG.get_SystemBathInteraction()\n # CorrelationFunctionMatrix\n cfm = sbi.CC\n \n c0 = AG.monomers[0].get_egcf((0,1))\n Nt = len(c0)\n \n ct = numpy.zeros((Nt),dtype=numpy.complex128)\n\n # electronic states corresponding to single excited states\n elst = numpy.where(AG.which_band == 1)[0]\n for el1 in elst:\n for el2 in elst:\n if cfm.cpointer[el1-1,el2-1] == 0:\n continue\n coft = cfm.get_coft(el1-1,el2-1) \n for kk in AG.vibindices[el1]:\n for ll in AG.vibindices[el2]:\n ct += ((SS[kk,n]**2)*(SS[ll,n]**2)*coft)\n return ct",
"def test_statevector(self):\n \n qubits = QubitPlaceholder.register(3)\n program = Program()\n program += H(qubits[0])\n program += X(qubits[2])\n program += CNOT(qubits[0], qubits[1])\n\n measurement = program.declare(\"ro\", \"BIT\", 3)\n for i in range(0, 3):\n program += MEASURE(qubits[i], measurement[i])\n\n assigned_program = address_qubits(program) \n simulator = WavefunctionSimulator()\n statevector = simulator.wavefunction(assigned_program)\n print(statevector.amplitudes)"
]
| [
"0.58612555",
"0.5846139",
"0.5843146",
"0.5814054",
"0.5794621",
"0.57264256",
"0.5697631",
"0.563857",
"0.5614976",
"0.56104195",
"0.5602246",
"0.55924416",
"0.55586153",
"0.5540207",
"0.5527369",
"0.55189097",
"0.5507827",
"0.55074686",
"0.5505165",
"0.5498555",
"0.54622",
"0.5445203",
"0.54350716",
"0.54328865",
"0.54265153",
"0.54229134",
"0.54130816",
"0.54130816",
"0.5365399",
"0.5358727"
]
| 0.62694937 | 0 |
Extracts the csrf token from a url | def extract_csrf(self, url):
with requests.Session() as client:
client.get(url)
csrf = client.cookies['csrftoken']
return csrf | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_initial_token(url):\n cj = CookieJar()\n opener = build_opener(HTTPCookieProcessor(cj))\n install_opener(opener)\n opener.open(url)\n\n for cookie in cj:\n if cookie.name == 'csrftoken':\n return cookie.value\n\n return ''",
"def get_csrf_token(url,cookie):\r\n\r\n session = requests.Session()\r\n headers = {\"Origin\":url,\r\n \"Accept\":\"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8\",\r\n \"Upgrade-Insecure-Requests\":\"1\",\r\n \"User-Agent\":\"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:76.0) Gecko/20100101 Firefox/76.0\",\r\n \"Connection\":\"close\",\r\n \"Referer\":url + \"/admin/\",\r\n \"Accept-Language\":\"es-ES,es;q=0.8,en-US;q=0.5,en;q=0.3\",\r\n \"Accept-Encoding\":\"gzip, deflate\"}\r\n cookies = {\"BLUDIT-KEY\":cookie}\r\n response = session.get(url + \"/admin/dashboard\",\r\n headers=headers,\r\n cookies=cookies\r\n )\r\n csrf_token = response.text.split('var tokenCSRF = \"')[1].split('\"')[0]\r\n\r\n print(\"csrf_token: \" + csrf_token)\r\n return csrf_token",
"def get_access_token_from_url(url):\n #token = str(urlopen(url).read(), 'utf-8')\n token = \"\"\n return token.split('=')[1].split('&')[0]",
"def get_csrf_token():\n\tresponse = session.get('https://www.udemy.com/join/login-popup')\n\tmatch = re.search(\"name=\\'csrfmiddlewaretoken\\' value=\\'(.*)\\'\", response.text)\n\treturn match.group(1)",
"def get_token(request: http.Request) -> str:\n if hasattr(request, '_csrf_hook'):\n return request._csrf_hook.get_token()",
"def get_csrf_token_from_response(self, response):\n return re.search(CSRF_REGEX, response.body).group(1)",
"def get_token(self, res):\n token = res.xpath('//*[@name=\"_csrf-app\"]')[0].attrs['value']\n return token",
"def parse_csrftoken(text):\n the_match = re.search(r'csrf_token.*?value=\"(.*?)\"', text, re.M | re.S)\n if the_match:\n return the_match.group(1)\n\n return ''",
"def get_csrf(session):\n login = session.get(KONFUZIO_HOST)\n csrf_token = login.cookies['csrftoken']\n return csrf_token",
"def getcsrf(session):\n session.get(\"http://anichart.net\")",
"def get_csrf(self):\n rv = self.app.get('/')\n soup = BeautifulSoup(rv.data, 'html.parser')\n tag = soup.body.find('input', attrs = { 'name' : '_csrf_token'})\n return tag['value']",
"def get_token(self, url):\n # A hack to avoid url-encoding the url, since the authorization service\n # doesn't work with correctly encoded urls\n\n parsed_url = urlparse.urlsplit(url)\n parsed_url = parsed_url._replace(path='/authorization/api')\n self.url = urlparse.urlunsplit(parsed_url)\n\n response = self.request(method='GET', url='/v1/token?url=' + url)\n return response.result.text",
"def get_initial_token():\n cj = CookieJar()\n opener = build_opener(HTTPCookieProcessor(cj))\n install_opener(opener)\n opener.open(EDX_HOMEPAGE)\n\n for cookie in cj:\n if cookie.name == 'csrftoken':\n return cookie.value\n\n return ''",
"def get_csrf_token(self, opener, cookiejar, login_url):\n opener.open(login_url)\n try:\n token = [x.value for x in cookiejar if x.name == 'csrftoken'][0]\n except Exception:\n token = None\n return token",
"def extract_csrf_token(htmlData):\n parsedHTML = HTMLMetaTagCSRFTokenParser()\n parsedHTML.feed(htmlData)\n\n token = parsedHTML.CSRF_Token\n\n parsedHTML.clean()\n\n return token",
"def _fetch_csrf(self) -> str:\n login_page = self._session.get(\"https://www.redpocket.com/login\")\n csrf_element = re.search(\n r'<input type=\"hidden\" name=\"csrf\" value=\"([\\w|-]+)\">', login_page.text\n )\n\n if csrf_element:\n csrf = csrf_element.group(1)\n self._logger.debug(\"Using CSRF: %s\", csrf)\n return csrf\n\n raise RedPocketException(\"Failed to get CSRF token from login page!\")",
"def get_review_token(site):\n return site.get_tokens([\"csrf\"])[\"csrf\"]",
"def get_csrf_token(self) -> str:\n url_csrf = 'https://www.instagram.com/accounts/login/'\n\n res = self.session.get(url_csrf, headers={\n 'user-agent': \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.12; rv:55.0) Gecko/20100101 Firefox/55.0\"#'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.131 Safari/537.36'\n })\n csrf = re.findall(r\"csrf_token\\\":\\\"(.*?)\\\"\", res.text)[0]\n return csrf",
"def extract_token_from_cookie(request):\n try:\n token = request.headers.cookie['csrf_token'].value\n except KeyError:\n token = None\n else:\n token = _sanitize_token(token)\n\n # Don't set a CSRF cookie on assets, to avoid busting the cache due to the\n # Vary header we set below. Don't set it on callbacks, because we use IP\n # filtering there.\n\n if request.path.raw.startswith('/assets/') or request.path.raw.startswith('/callbacks/'):\n token = None\n else:\n token = token or _get_new_token()\n\n return {'csrf_token': token}",
"def fetch_csrf_token(input_text, pattern):\r\n\r\n m = re.search(pattern=pattern, string=input_text)\r\n if not m:\r\n return None\r\n\r\n match_input_tag = m.group(0)\r\n\r\n if match_input_tag:\r\n for replace_tag in [\"value\", \"content\"]:\r\n m = re.search(pattern=replace_tag + \"=\\\".*\\\"\", string=match_input_tag)\r\n if m:\r\n csrf_tag = str(m.group(0))\r\n csrf_tag = csrf_tag.replace(replace_tag + \"=\", '')\r\n csrf_tag = csrf_tag.replace('\"', '')\r\n return csrf_tag.strip()\r\n return None",
"def _parse_url_token(url_token):\n match = re.fullmatch(\n '^([0-9A-Za-z_\\-]+)/([0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})$',\n url_token\n )\n if match:\n return match.group(1), match.group(2)\n return None, None",
"def fetchCSRF(url, cookies):\n\n response = None\n try:\n csrf = None\n uri = '/manager/html'\n response = utility.requests_get(url + uri, cookies=cookies[0],\n auth=cookies[1])\n\n if response.status_code is 200:\n\n data = findall('CSRF_NONCE=(.*?)\\\"', response.content)\n if len(data) > 0:\n csrf = data[0]\n\n except Exception, e:\n utility.Msg(\"Failed to fetch CSRF token (HTTP %d)\" % response.status_code,\n LOG.ERROR)\n csrf = None\n\n return (csrf, dict_from_cookiejar(response.cookies))",
"def csrf_token():\n return api_util.jsonify({\n 'token': view_helpers.generate_csrf_token()\n })",
"def getToken(request):\n try:\n token = request.META['HTTP_AUTHORIZATION'].split()[1]\n except:\n token = \"\"\n return token",
"async def token(request: Request):\n return get_token()",
"def csrf(request):\n return django_csrf(request)['csrf_token']",
"def get_csrf_token(self):\n return get_csrf_token(self.REQUEST)",
"def _get_form_token(self, req):\n if req.incookie.has_key('trac_form_token'):\n return req.incookie['trac_form_token'].value\n else:\n req.outcookie['trac_form_token'] = hex_entropy(24)\n req.outcookie['trac_form_token']['path'] = req.base_path or '/'\n if self.env.secure_cookies:\n req.outcookie['trac_form_token']['secure'] = True\n if sys.version_info >= (2, 6):\n req.outcookie['trac_form_token']['httponly'] = True\n return req.outcookie['trac_form_token'].value",
"def _request_csrf_token(self, params):\n if params.get(\"action\") == \"query\":\n if params.get(\"meta\"):\n if \"tokens\" not in params[\"meta\"].split(\"|\"):\n params[\"meta\"] += \"|tokens\"\n else:\n params[\"meta\"] = \"tokens\"\n if params.get(\"type\"):\n if \"csrf\" not in params[\"type\"].split(\"|\"):\n params[\"type\"] += \"|csrf\"",
"def _get_xsrf(self):\n self.session.get('https://www.zhihu.com/', allow_redirects=False)\n for c in self.session.cookies:\n if c.name == '_xsrf':\n return c.value\n raise AssertionError(' 获取 xsrf 失败')"
]
| [
"0.73404205",
"0.69607997",
"0.6890592",
"0.6776865",
"0.6656046",
"0.6628088",
"0.65032995",
"0.6463559",
"0.6401501",
"0.63869375",
"0.6296214",
"0.6274736",
"0.6208114",
"0.62058866",
"0.613803",
"0.61265904",
"0.6116208",
"0.6084349",
"0.6079563",
"0.60091925",
"0.5976771",
"0.59468216",
"0.59006846",
"0.5888458",
"0.584332",
"0.5829436",
"0.58054495",
"0.57639503",
"0.56630653",
"0.5585025"
]
| 0.7907463 | 0 |
Adds a new weight; params needed for post request's payload | def add_weight(self):
# Get the csrf token
csrf = self.extract_csrf('https://wger.de/en/weight/add/')
# Adding referer to the headers
self.headers['Referer'] = API.url_weight
# Take the weight entires from TOML file
entries = self.cfg.get('payload', {}).get('weight')
# Check for valid entires
if entries:
for payload in entries:
# Add csrf token to payload
payload['csrfmiddlewaretoken'] = csrf
# Test the entry with it's json schema
check.check_entry(path='schemas/weight.json', test=payload)
# Post request
self.add_post(payload, API.url_weight, self.weights)
# Eliminates the referer from the headers
self.headers.pop('Referer') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_weight():\n\twts = request.json['featureWeights']\n\n\t# Intialize new model with the latest weights\n\tglobal model\n\tmodel = tscore.ScoreModel(wts)\n\treturn jsonify( { 'updated': \"True\", 'featureWeights': wts } ), 201",
"def my_assign_weights(context, data):\n pass",
"def set_weight(self, weight):\n self.weight = weight # overwrite the existing weight with the input weight value",
"def weight(self, weight):\n\n self._weight = weight",
"def weight(self, weight):\n\n self._weight = weight",
"def weight(self, weight):\n\n self._weight = weight",
"def update_weights(self):\n self._weights = self._weights + self.update_weights_value",
"def send_weights(self, req):\n with self.mutex_2:\n # Send empty response if update is underway\n if self.update_underway:\n return GetWeightsResponse()\n else:\n print(\"Sending weights to node {}\".format(req.env_id))\n print(\"-\" * 10)\n if req.env_id not in self.trainings_env:\n self.trainings_env.append(req.env_id)\n\n pi_weights, vf_weights = self.encode_weights()\n\n return GetWeightsResponse(pi_weights=pi_weights,\n vf_weights=vf_weights)",
"def set_weights(self, weights):\r\n self.weights = weights",
"def send_weights(self, req):\n # Send weights for initialization\n print(\"Initializing weights for node {}\".format(req.env_id))\n print(\"-\" * 10)\n\n weights = self.encode_weights()\n\n return GetWeightsResponse(policy_weights=weights)",
"def post(self):\n args = parser.parse(self.arg_schema_post, request, location='json_or_form')\n data_file = request.files['model_weights']\n if not data_file:\n raise FileError(\"Missing upload file.\")\n file_path = self.save_weight_file_locally(data_file, args['clinic_id'], args['severity'])\n model_id = self.save_model_file_path_to_db(file_path, args['clinic_id'], args['severity'], args['accuracy'], False)\n if 'make_in_use' in args and args['make_in_use']:\n Models().set_active_model(args['clinic_id'], model_id)",
"def change_weight(self, new_weight_arr):\n self.weights = new_weight_arr",
"def weights(self, weights):\n\n self._weights = weights",
"def add_weight(self,\n name,\n shape,\n dtype=None,\n initializer=None,\n regularizer=None,\n trainable=True,\n constraint=None):\n if dtype is None:\n dtype = K.floatx()\n weight = self.add_variable(name, shape,\n dtype=dtype,\n initializer=initializers.get(initializer),\n regularizer=regularizers.get(regularizer),\n constraint=constraints.get(constraint),\n trainable=trainable)\n return weight",
"def update_weights(self):\n\t\tpass",
"def addItem(self, item):\n self.items.append(item)\n self.totalWeight += item",
"def add(self, name, weight, weightUp=None, weightDown=None, shift=False):\n if name.endswith(\"Up\") or name.endswith(\"Down\"):\n raise ValueError(\n \"Avoid using 'Up' and 'Down' in weight names, instead pass appropriate shifts to add() call\"\n )\n weight = coffea.util._ensure_flat(weight, allow_missing=True)\n if isinstance(weight, numpy.ndarray) and isinstance(\n self._weight, numpy.ndarray\n ):\n self.__add_eager(name, weight, weightUp, weightDown, shift)\n elif isinstance(weight, dask_awkward.Array) and isinstance(\n self._weight, (dask_awkward.Array, type(None))\n ):\n self.__add_delayed(name, weight, weightUp, weightDown, shift)\n else:\n raise ValueError(\n f\"Incompatible weights: self._weight={type(self.weight)}, weight={type(weight)}\"\n )",
"def update_weights(self, weight_delta):\n\n self._weights = math_util.vector_sum(self._weights, weight_delta)",
"def change_weight(self, new_weight):\r\n self.old_weight = self.weight\r\n self.weight = new_weight",
"def _determine_new_weight(self, weight, input, currentNeuron, bmu):\n return weight \\\n + (self.neighborhood.fn(currentNeuron, bmu) \\\n * self.learning_rate * (input - weight))",
"def add_weight(self, from_neuron, to_neuron, value):\n index = (to_neuron * self.neuron_count) + from_neuron\n if index >= len(self.weights):\n raise IndexError(\"Out of range: from_neuron: {}, to_neuron: {}\".format(from_neuron, to_neuron))\n self.weights[index] += value",
"def __init__(self, weight: float = 1.0):\n\n super().__init__()\n self.weight = weight",
"def setWeight(self, w):\n self._W = w",
"def __add_eager(self, name, weight, weightUp, weightDown, shift):\n if isinstance(weight, numpy.ma.MaskedArray):\n # TODO what to do with option-type? is it representative of unknown weight\n # and we default to one or is it an invalid weight and we should never use this\n # event in the first place (0) ?\n weight = weight.filled(1.0)\n self._weight = self._weight * weight\n if self._storeIndividual:\n self._weights[name] = weight\n self.__add_variation(name, weight, weightUp, weightDown, shift)\n self._weightStats[name] = WeightStatistics(\n weight.sum(),\n (weight**2).sum(),\n weight.min(),\n weight.max(),\n weight.size,\n )",
"def __add_delayed(self, name, weight, weightUp, weightDown, shift):\n if isinstance(dask_awkward.type(weight), awkward.types.OptionType):\n # TODO what to do with option-type? is it representative of unknown weight\n # and we default to one or is it an invalid weight and we should never use this\n # event in the first place (0) ?\n weight = dask_awkward.fill_none(weight, 1.0)\n if self._weight is None:\n self._weight = weight\n else:\n self._weight = self._weight * weight\n if self._storeIndividual:\n self._weights[name] = weight\n self.__add_variation(name, weight, weightUp, weightDown, shift)\n if isinstance(self._weightStats, coffea.processor.dict_accumulator):\n self._weightStats = {}\n self._weightStats[name] = {\n \"sumw\": dask_awkward.to_dask_array(weight).sum(),\n \"sumw2\": dask_awkward.to_dask_array(weight**2).sum(),\n \"minw\": dask_awkward.to_dask_array(weight).min(),\n \"maxw\": dask_awkward.to_dask_array(weight).max(),\n }",
"def add(self, destination: n, weight: w):\n self.connections[destination] = weight",
"def _weight_changed(self, value):\r\n # update internal data\r\n self._weight = value",
"def weight(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"weight\")",
"def add_item(self, quantity: int, weight: float, item: Item):\n if self.item_list:\n if item in self.item_list:\n for i in range(0, len(self.item_list)):\n if item.product_id == self.item_list[i].product_id:\n item.quantity = int(item.quantity)\n item.quantity += 0 if not quantity else quantity\n item.weight += 0 if not weight else weight\n else:\n self.item_list.append(item)\n else:\n self.item_list.append(item)",
"def _set_weight(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['-2147483648..2147483647']}, int_size=32), is_leaf=True, yang_name=\"weight\", rest_name=\"weight\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='int32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"weight must be of a type compatible with int32\"\"\",\n 'defined-type': \"int32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['-2147483648..2147483647']}, int_size=32), is_leaf=True, yang_name=\"weight\", rest_name=\"weight\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='int32', is_config=False)\"\"\",\n })\n\n self.__weight = t\n if hasattr(self, '_set'):\n self._set()"
]
| [
"0.70913374",
"0.6750403",
"0.6731251",
"0.67190844",
"0.67190844",
"0.67190844",
"0.6427595",
"0.62867343",
"0.6260229",
"0.6235389",
"0.6201869",
"0.61902094",
"0.61885905",
"0.6185385",
"0.61091244",
"0.6068805",
"0.6056508",
"0.6048858",
"0.60299945",
"0.6019781",
"0.6000964",
"0.59941125",
"0.59817994",
"0.5975148",
"0.5969001",
"0.5966048",
"0.5913864",
"0.5910028",
"0.589758",
"0.5895111"
]
| 0.8363505 | 0 |
Adds a new nutrition plan and stores information about it in the class dictionary Params needed for post request's payload | def add_plan(self):
# Take the weight entries from TOML file
plans = self.cfg.get('payload', {}).get('plan')
# Check for valid entries
if plans :
# Construct payload
for payload in plans:
# Parse the payload
ready = self.construct_payload(parse = copy.deepcopy(payload), dele='meal')
# Check the entry vs a json schema
check.check_entry(path='schemas/plan.json', test=ready)
# Post request
b1 = self.add_post(ready, API.url_plan, self.plans)
# Check for meals
if 'meal' in payload.keys() and payload['meal'] != [{}]:
b2 = self.add_meal(self.plans[-1].get('id'))
else:
return b1
if b2 != None:
return b2 and b1
else:
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create(cls, **kwargs):\n return cls().requests.post('plan', data=kwargs)",
"def post(self):\n return CreateSavingPlan(request, current_user.id)",
"def new_flight_plan(self):\n r = requests.post(self.base_url + f'/users/{self.username}/flight-plans')\n return r.text",
"def add_Plante(id):\n f = PlanteForm()\n return render_template(\n \"create-plante.html\",\n form = f,\n title = \"Nouvelle Plante\",\n param = \"create\",\n parterre = id)",
"def plan_list_post(request):\n company = auth_api_key(request)\n form = validate_form(PlanCreateForm, request)\n \n plan_type = form.data['plan_type']\n amount = form.data['amount']\n frequency = form.data['frequency']\n interval = form.data['interval']\n if interval is None:\n interval = 1\n company_guid = company.guid\n\n # TODO: make sure user cannot create a post to a deleted company\n\n model = PlanModel(request.session)\n type_map = dict(\n charge=model.TYPE_CHARGE,\n payout=model.TYPE_PAYOUT,\n )\n plan_type = type_map[plan_type]\n freq_map = dict(\n daily=model.FREQ_DAILY,\n weekly=model.FREQ_WEEKLY,\n monthly=model.FREQ_MONTHLY,\n yearly=model.FREQ_YEARLY,\n )\n frequency = freq_map[frequency]\n\n with db_transaction.manager:\n guid = model.create(\n company_guid=company_guid, \n plan_type=plan_type,\n amount=amount, \n frequency=frequency, \n interval=interval, \n )\n plan = model.get(guid)\n return plan",
"def post(self, request):\n data = request.data\n try:\n career_planning = CareerPlanning(**data)\n career_planning.save()\n LOGGER.info(\"CareerPlanning created successfully\")\n except Exception, error:\n LOGGER.error(\"Error:%s\", str(error))\n return Response({\"status\": \"FAILED\", \"message\": str(error)})\n return Response({\"status\": \"SUCCESS\", \"message\": \"Record saved successfully\"})",
"def __init__(self, plan):\n self.plan = plan",
"def post(self):\n request_data = request.get_json()\n plan = request_data[\"plan\"]\n success_url = request_data.get(\"success_url\")\n cancel_url = request_data.get(\"cancel_url\")\n\n if not success_url or not cancel_url:\n raise InvalidRequest()\n\n user = get_authenticated_user()\n if not user.stripe_id:\n try:\n cus = billing.Customer.create(\n email=user.email,\n )\n user.stripe_id = cus.id\n user.save()\n except stripe.error.APIConnectionError as e:\n return connection_response(e)\n\n try:\n price = get_price(plan, False)\n if not price:\n abort(404, message=\"Plan not found\")\n\n checkout_session = stripe.checkout.Session.create(\n line_items=[\n {\n \"price\": price[\"stripeId\"],\n \"quantity\": 1,\n },\n ],\n customer=user.stripe_id,\n subscription_data={\n \"metadata\": {\n \"kind\": \"account_change_plan\",\n \"namespace\": user.username,\n \"performer\": user.username,\n \"ip\": get_request_ip(),\n \"plan\": price[\"stripeId\"],\n }\n },\n mode=\"subscription\",\n success_url=success_url,\n cancel_url=cancel_url,\n )\n return checkout_session\n except stripe.error.APIConnectionError as e:\n abort(503, message=\"Cannot contact Stripe\")\n except Exception as e:\n abort(500, message=str(e))",
"def post_object(self,object_data):\n try:\n self.update_title(title=object_data[\"title\"],owner=object_data[\"owner\"])\n print(\"make request\")\n make_request(f'{GRAPH_URL}/planner/plans/', 'POST', object_data)\n logging.info(f'Created plan with title {object_data.get(\"title\")}')\n self.append_response(\"Ok\")\n return True\n except Exception as e:\n self.append_response(\"Error\")\n print(\"error : \",e)\n return False",
"def add_new_item(self, request, *a, **kw):\n item_def = request.data\n cpdoc = self.get_object()\n item_def['calendar_plan'] = cpdoc.id\n\n item_ser = self.get_serializer(data=item_def)\n item_ser.is_valid(raise_exception=True)\n item_obj = item_ser.save()\n\n headers = self.get_success_headers(item_ser.data)\n return response.Response(item_ser.data, headers=headers)",
"def __init__(__self__, *,\n plan: Optional[pulumi.Input[str]] = None,\n product: Optional[pulumi.Input[str]] = None,\n publisher: Optional[pulumi.Input[str]] = None):\n if plan is not None:\n pulumi.set(__self__, \"plan\", plan)\n if product is not None:\n pulumi.set(__self__, \"product\", product)\n if publisher is not None:\n pulumi.set(__self__, \"publisher\", publisher)",
"def add_meal(self, p_id):\r\n \r\n # Take the plans entires from TOML file\r\n plans = self.cfg.get('payload',{}).get('plan')\r\n # For each meal in each plan\r\n for entries in plans:\r\n # Check for valid entires\r\n if entries:\r\n for payload in entries.get('meal',{}):\r\n # Parse the payload\r\n ready = self.construct_payload(parse = copy.deepcopy(payload), dele='item')\r\n ready['plan'] = p_id\r\n # Check the entry vs a json schema\r\n check.check_entry(path='schemas/meal.json', test=ready)\r\n # Post request\r\n b1 = self.add_post(ready, API.url_meal, self.meals)\r\n # Check for items\r\n if 'item' in payload.keys() and payload['item'] != [{}]:\r\n b2 = self.add_item(self.meals[-1].get('id'))\r\n else:\r\n return b1\r\n if b2 != None:\r\n return b2 and b1\r\n else:\r\n return False",
"def add_investment():\n\n company_name = request.args.get('company-name')\n date_of_entry = datetime.datetime.today().strftime('%Y-%m-%d')\n \n input_quantity = request.args.get('quantity')\n quantity = int(str(input_quantity).replace(',', ''))\n \n input_cost = request.args.get('cost')\n cost = int(str(input_cost).replace(',', ''))\n\n date_of_investment = request.args.get('date')\n\n new_inv = Investment(date_of_entry=date_of_entry, \n date_of_investment=date_of_investment,\n company_name=company_name, \n quantity=quantity, \n cost=cost)\n \n db.session.add(new_inv)\n db.session.commit()\n\n user_id = session['user']\n new_inv_id = new_inv.inv_id\n\n\n new_userinv = UserInv(inv_id=new_inv_id,\n user_id=user_id)\n db.session.add(new_userinv)\n db.session.commit()\n\n return jsonify('investment added!')",
"def post(self, orgname):\n permission = AdministerOrganizationPermission(orgname)\n if permission.can():\n request_data = request.get_json()\n plan = request_data[\"plan\"]\n success_url = request_data.get(\"success_url\")\n cancel_url = request_data.get(\"cancel_url\")\n\n if not success_url or not cancel_url:\n raise InvalidRequest()\n\n organization = model.organization.get_organization(orgname)\n if not organization.stripe_id:\n try:\n cus = billing.Customer.create(\n email=organization.email,\n )\n organization.stripe_id = cus.id\n organization.save()\n except stripe.error.APIConnectionError as e:\n return connection_response(e)\n\n try:\n price = get_price(plan, True)\n if not price:\n abort(404, message=\"Plan not found\")\n\n checkout_session = stripe.checkout.Session.create(\n line_items=[\n {\n \"price\": price[\"stripeId\"],\n \"quantity\": 1,\n },\n ],\n customer=organization.stripe_id,\n subscription_data={\n \"metadata\": {\n \"kind\": \"account_change_plan\",\n \"namespace\": organization.username,\n \"performer\": get_authenticated_user().username,\n \"ip\": get_request_ip(),\n \"plan\": price[\"stripeId\"],\n },\n },\n mode=\"subscription\",\n success_url=success_url,\n cancel_url=cancel_url,\n )\n return checkout_session\n except stripe.error.APIConnectionError as e:\n abort(503, message=\"Cannot contact Stripe\")\n except Exception as e:\n abort(500, message=str(e))\n\n raise Unauthorized()",
"def new_plante_saving():\n f = None\n f = PlanteForm()\n if f.validate_on_submit():\n o = TypePlante(\n nomPlant = f.get_name(),\n comportement = f.get_comportement(),\n taux_humidite = f.get_taux_humidite(),\n quantite = f.get_quantite(),\n parterre_id = f.get_parterre().get_id())\n f.get_parterre().add_plante(o)\n db.session.add(o)\n p = Actions(\n contenu = \"Ajout d'une plante \"+f.get_name() + \" au parterre \"+ f.get_parterre().get_name(),\n liste = 1\n )\n db.session.add(p)\n db.session.commit()\n return redirect(url_for('parterre_info', id = o.get_parterre()))\n return render_template(\n \"create-plante.html\",\n form = f,\n parterre = f.get_parterre().get_id(),\n title = \"Nouvelle plante\",\n param = \"create\")",
"def add_plan(self, name: str, services: Optional[str], tasks: Optional[List[str]], description: Optional[str]):\n if not name:\n _exit_if_errors(['--name is required'])\n\n service_list = []\n if services:\n service_list = [service.strip() for service in services.split(',')]\n\n tasks_objects = []\n if tasks:\n for task_str in tasks:\n try:\n task = json.loads(task_str)\n tasks_objects.append(task)\n except json.decoder.JSONDecodeError as json_error:\n _exit_if_errors([f'invalid task {json_error!s}'])\n\n plan = {}\n if service_list:\n plan['services'] = service_list\n if tasks_objects:\n plan['tasks'] = tasks_objects\n if description:\n plan['description'] = description\n\n _, errors = self.rest.add_backup_plan(name, plan)\n _exit_if_errors(errors)\n _success('Added plan')",
"def update(cls, plan_id, **kwargs):\n return cls().requests.put(f\"plan/{plan_id}\", data=kwargs,)",
"def add_test_plan(self, test_plan):\n\n self._test_plans.append(test_plan)",
"def record_plan(request):\n ac_od_id = request.POST['ac_od_id']\n ac_od_id = int(ac_od_id.strip())\n\n plan = request.POST['plan']\n plan = int(plan.strip())\n\n record = OrderDetail.objects.get(ac_od_id=ac_od_id)\n\n record.plan = plan\n record.save()\n\n return HttpResponse(json.dumps(['ok']), mimetype='application/json')",
"def post(self, saving_plan_id):\n return CreateSavingDeposit(current_user.id, saving_plan_id, request)",
"def post(self):\n FeatureBusiness.add(request.get_json(), user_id=request.user_id)\n\n return {\"status\": 201}, 201",
"def add_plant(plant_type): # noqa: E501\n global seededPlants\n global tasks\n\n if not seededPlants.get(plant_type):\n seededPlants[plant_type] = 0\n \n pid = db.addPlant(plant_type)\n plant = Plant(plant_id=pid, plant_type=plant_type)\n seededPlants[plant_type] += 1\n\n #clean up tasks\n tasks = [x for x in tasks if x.is_alive()]\n\n tasks.append(threading.Thread(target=grow_plant, args=[plant]))\n tasks[-1].start()\n\n return plant",
"def add_plant(self, plant: 'Plant') -> None:\r\n self.plants.append(plant)\r\n self.centers.append(plant.get_center())\r\n self.make_rows()",
"def newTestPlan (self):\n self.newTab( extension = TestPlan.TYPE, repoDest=UCI.REPO_UNDEFINED )\n self.findWidget.setDisabled(True)",
"def test_adding_patient_goals(self):\n\n data = {\"goal-body\": \"New goal body.\"}\n result = self.client.post(\"/patient/1/add-goal.json\", data=data)\n\n self.assertEqual(result.status_code, 200)\n self.assertIn(b\"New goal\", result.data)",
"def test_create_rate_plan(self):\n pass",
"def __init__(self, plan_id: str=None, url: str=None, name: str=None, station_id: str=None, region_id: str=None, fare: Fare=None, is_taxable: bool=None, description: str=None): # noqa: E501\n self.swagger_types = {\n 'plan_id': str,\n 'url': str,\n 'name': str,\n 'station_id': str,\n 'region_id': str,\n 'fare': Fare,\n 'is_taxable': bool,\n 'description': str\n }\n\n self.attribute_map = {\n 'plan_id': 'planId',\n 'url': 'url',\n 'name': 'name',\n 'station_id': 'stationId',\n 'region_id': 'regionId',\n 'fare': 'fare',\n 'is_taxable': 'isTaxable',\n 'description': 'description'\n }\n self._plan_id = plan_id\n self._url = url\n self._name = name\n self._station_id = station_id\n self._region_id = region_id\n self._fare = fare\n self._is_taxable = is_taxable\n self._description = description",
"def __init__(self, plan, score):\n self.id = plan.id\n self.score = score",
"def handle_add(self, controller):\n \n controller.customer.CreatePizza()\n controller.show_frame(PageOne)",
"def add_panel():\n panel_name = request.json['panel_name']\n project_id = request.json['project_id']\n panel_id = create_panel_query(s, project_id, panel_name, current_user.id)\n return jsonify(panel_id)"
]
| [
"0.7086115",
"0.6310721",
"0.61961544",
"0.6100489",
"0.6030358",
"0.6028867",
"0.5888967",
"0.5846056",
"0.5657358",
"0.561522",
"0.56080353",
"0.5604049",
"0.5563316",
"0.5547414",
"0.55382395",
"0.5532841",
"0.55313814",
"0.5527719",
"0.5475129",
"0.5444152",
"0.5414031",
"0.5399604",
"0.53436863",
"0.5336329",
"0.53021795",
"0.52395564",
"0.5231081",
"0.52260375",
"0.52163595",
"0.5210772"
]
| 0.7057411 | 1 |
Adds a new meal to a nutrition plan and stores information about it in the class dictionary Params needed for post request's payload | def add_meal(self, p_id):
# Take the plans entires from TOML file
plans = self.cfg.get('payload',{}).get('plan')
# For each meal in each plan
for entries in plans:
# Check for valid entires
if entries:
for payload in entries.get('meal',{}):
# Parse the payload
ready = self.construct_payload(parse = copy.deepcopy(payload), dele='item')
ready['plan'] = p_id
# Check the entry vs a json schema
check.check_entry(path='schemas/meal.json', test=ready)
# Post request
b1 = self.add_post(ready, API.url_meal, self.meals)
# Check for items
if 'item' in payload.keys() and payload['item'] != [{}]:
b2 = self.add_item(self.meals[-1].get('id'))
else:
return b1
if b2 != None:
return b2 and b1
else:
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_item(self, m_id):\r\n \r\n # Take the meals entires from TOML file\r\n meals = self.cfg.get('payload',{}).get('plan',{})[0].get('meal',{})\r\n for entries in meals:\r\n # Check for valid entires\r\n if entries:\r\n # Construct payload \r\n for payload in entries.get('item',{}):\r\n # Check the entry vs a json schema\r\n check.check_entry(path='schemas/item.json', test=payload)\r\n payload['meal'] = m_id\r\n # Post request\r\n return self.add_post(payload, API.url_items, self.items)",
"def create_meal():",
"def test_add_meal(self):\n with self.client:\n response = self.add_meal(\"pilawo\", 15000)\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 201)\n self.assertEqual(data.get('message'), \"Meal successfully created\")",
"def add_meal(self, meal_time, carbs):\r\n self.meals.append((meal_time, carbs))",
"def post_nutrition_info():\n if request.method == \"POST\":\n return jsonify(meal.post_meal_info(request, client))\n elif request.method == \"DELETE\":\n return jsonify(meal.delete_meal_info(request, client))",
"def add_plan(self):\r\n\r\n # Take the weight entries from TOML file\r\n plans = self.cfg.get('payload', {}).get('plan')\r\n # Check for valid entries\r\n if plans :\r\n # Construct payload \r\n for payload in plans:\r\n # Parse the payload\r\n ready = self.construct_payload(parse = copy.deepcopy(payload), dele='meal')\r\n # Check the entry vs a json schema\r\n check.check_entry(path='schemas/plan.json', test=ready)\r\n # Post request\r\n b1 = self.add_post(ready, API.url_plan, self.plans)\r\n # Check for meals\r\n if 'meal' in payload.keys() and payload['meal'] != [{}]:\r\n b2 = self.add_meal(self.plans[-1].get('id')) \r\n else:\r\n return b1\r\n if b2 != None:\r\n return b2 and b1\r\n else:\r\n return False",
"def test_api_can_add_food_to_a_meal(self):\n response = self.client.post(f'/api/v1/meals/{self.breakfast.id}/foods/{self.oatmeal.id}')\n # import code; code.interact(local=dict(globals(), **locals()))\n\n self.assertEqual(response.data['message'], \"Successfully added oatmeal to breakfast\")",
"def cart_add(request, meal_id):\n cart = Cart(request)\n meal = get_object_or_404(Meal, id=meal_id)\n form = CartAddMealForm(request.POST)\n if form.is_valid():\n cd = form.cleaned_data\n cart.add(meal=meal,\n quantity=cd['quantity'],\n override_quantity=cd['override'])\n return redirect('meals:meal_list')",
"def post_amenity_obj():\n dic = {}\n dic = request.get_json(silent=True)\n if dic is None:\n abort(400, \"Not a JSON\")\n if \"name\" not in dic.keys():\n abort(400, \"Missing name\")\n new_ame = amenity.Amenity()\n for k, v in dic.items():\n setattr(new_ame, k, v)\n storage.new(new_ame)\n storage.save()\n return jsonify(new_ame.to_dict()), 201",
"def post_amenity():\n\n if not request.is_json:\n abort(400, description=\"Not a JSON\")\n data = request.get_json()\n\n if \"name\" not in data:\n abort(400, description=\"Missing name\")\n\n obj = Amenity(**data)\n storage.new(obj)\n storage.save()\n return obj.to_dict(), 201",
"def test_adding_patient_posts(self):\n\n data = {\"meal-time\": \"2020-02-25 08:00:00\", \n \"meal-setting\": \"At home!\", \"TEB\": \"Some thoughts..\",\n \"hunger\": 2, \"fullness\": 8, \"satisfaction\": 5,\n \"meal-notes\": \"Some notes.\"}\n \n result = self.client.post(\"/post/new-post\", data=data,\n follow_redirects=True)\n\n self.assertEqual(result.status_code, 200)\n self.assertIn(b\"Post added successfully\", result.data)",
"def post_amenity():\n the_json = request.get_json()\n if not the_json:\n abort(400, 'Not a JSON')\n if 'name' not in the_json:\n abort(400, 'Missing name')\n new_amenity = Amenity(**request.get_json())\n storage.new(new_amenity)\n storage.save()\n return jsonify(new_amenity.to_dict()), 201",
"def post(self):\n FeatureBusiness.add(request.get_json(), user_id=request.user_id)\n\n return {\"status\": 201}, 201",
"def test_adding_patient_goals(self):\n\n data = {\"goal-body\": \"New goal body.\"}\n result = self.client.post(\"/patient/1/add-goal.json\", data=data)\n\n self.assertEqual(result.status_code, 200)\n self.assertIn(b\"New goal\", result.data)",
"def test_put_meal(self):\n with self.client:\n response = self.put_meal()\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 201)\n self.assertIn(\"Meal updated successfully\", data.get('message'))",
"def update_meal():",
"def add_new_item(self, request, *a, **kw):\n item_def = request.data\n cpdoc = self.get_object()\n item_def['calendar_plan'] = cpdoc.id\n\n item_ser = self.get_serializer(data=item_def)\n item_ser.is_valid(raise_exception=True)\n item_obj = item_ser.save()\n\n headers = self.get_success_headers(item_ser.data)\n return response.Response(item_ser.data, headers=headers)",
"def add_nutrition(request):\n if not request.user.is_superuser:\n messages.error(request, 'Opps, only administrators can do that.')\n return redirect(reverse('home'))\n\n if request.method == 'POST':\n form = NutritionUpdate(request.POST)\n if form.is_valid():\n food = form.save()\n messages.success(request, f'Successfully added nutritional information for {food.Food}!')\n return redirect(reverse('nutrition'))\n else:\n messages.error(request, 'Error adding nutritional information. Please ensure the form is valid.')\n else:\n form = NutritionUpdate()\n\n template = 'nutrition/add_nutrition.html'\n context = {\n 'form': form,\n }\n\n return render(request, template, context)",
"def create(cls, **kwargs):\n return cls().requests.post('plan', data=kwargs)",
"def post_amenity():\n body = request.get_json()\n if not body:\n abort(400, \"Not a JSON\")\n if body.get(\"name\") is None:\n abort(400, \"Missing name\")\n amenity = Amenity(**body)\n amenity.save()\n return jsonify(amenity.to_dict()), 201",
"def add_investment():\n\n company_name = request.args.get('company-name')\n date_of_entry = datetime.datetime.today().strftime('%Y-%m-%d')\n \n input_quantity = request.args.get('quantity')\n quantity = int(str(input_quantity).replace(',', ''))\n \n input_cost = request.args.get('cost')\n cost = int(str(input_cost).replace(',', ''))\n\n date_of_investment = request.args.get('date')\n\n new_inv = Investment(date_of_entry=date_of_entry, \n date_of_investment=date_of_investment,\n company_name=company_name, \n quantity=quantity, \n cost=cost)\n \n db.session.add(new_inv)\n db.session.commit()\n\n user_id = session['user']\n new_inv_id = new_inv.inv_id\n\n\n new_userinv = UserInv(inv_id=new_inv_id,\n user_id=user_id)\n db.session.add(new_userinv)\n db.session.commit()\n\n return jsonify('investment added!')",
"def post(self):\n data = request.json\n create_ue(data)\n return None, 201",
"def post(self, request):\n data = request.data\n try:\n career_planning = CareerPlanning(**data)\n career_planning.save()\n LOGGER.info(\"CareerPlanning created successfully\")\n except Exception, error:\n LOGGER.error(\"Error:%s\", str(error))\n return Response({\"status\": \"FAILED\", \"message\": str(error)})\n return Response({\"status\": \"SUCCESS\", \"message\": \"Record saved successfully\"})",
"def post(self, category_id, name, description, weight, category_ref, sponsor_id):\n\t\tproperties = {\"id\": category_id, \"name\": name, \"description\": description, \"weight\": weight, \"categoryRef\": category_ref, \"sponsorId\": sponsor_id}\n\t\treturn self.service.post(self.base_uri, json.dumps(properties))",
"def test_missing_meal_name_details(self):\n with self.client:\n response = self.add_meal(\"\", 15000)\n data = json.loads(response.data.decode())\n self.assertEqual(\n data.get('message'),\n \"Meal name must be between 3 to 25 characters long\")\n self.assertEqual(response.status_code, 400)",
"def test_post_a_menu_for_the_day(self):\n\n\t\tres = self.login_admin_user()\n\t\taccess_token = json.loads(res.data.decode())['access_token']\n\n\t\tresponse = self.client().post(\n\t\t\t'/api/v2/meals', \n\t\t\t\theaders={\"x-access-token\": access_token},\n\t\t\t\tdata = json.dumps(\n\t\t\t\tself.meal_data) , content_type = 'application/json')\n\t\tself.assertEqual(response.status_code, 201)\n\n\t\tresponse = self.client().post('/api/v2/menu/1',\n\t\t\theaders={\"x-access-token\": access_token},\n\t\t\tdata = json.dumps(\n\t\t\t\tself.meal_data), content_type = 'application/json')\n\t\tresult = json.loads(response.data)\n\t\tself.assertEqual(result[\"message\"], \"Meal added to menu\")\n\t\tself.assertEqual(response.status_code, 200)",
"def test_short_meal_name_details(self):\n with self.client:\n response = self.add_meal(\n \"qwertyuioplkjhgfdsazxcvbnmqwertyu\", 15000)\n data = json.loads(response.data.decode())\n self.assertEqual(\n data.get('message'),\n \"Meal name must be between 3 to 25 characters long\")\n self.assertEqual(response.status_code, 400)",
"def goal_handling(request, user_id, type):\n\tif type == 1:\n\t\tname = request.POST['goalName']\n\t\tamount = request.POST['amount']\n\t\tdate = request.POST['goalDate']\n\t\tg = Goals(user_goal=Users.objects.get(pk=user_id), goal_name=name, goal_amount=amount, goal_deadline=date)\n\t\tif g is not None:\n\t\t\tg.save()\n\telif type == 2:\n\t\tg = Goals.objects.get(pk=request.POST['goalPK'])\n\t\tamount = request.POST['amount']\n\t\tdate = request.POST['goalDate']\n\t\tt_from = request.POST.get('t_to')\n\t\tif g is not None:\n\t\t\tnameExp = \"Savings for \" + g.goal_name\n\t\t\tg.amount_now += int(amount)\n\t\t\tg.save()\n\t\t\te = Expenses(user_exp = Users.objects.get(pk=user_id), exp_name=nameExp, cat_exp= Categories.objects.get(user_cat = Users.objects.get(pk=user_id), cat_name=\"Adding goal\"), wal_exp=Wallets.objects.get(pk=t_from), exp_date=date, exp_amount=amount)\n\t\t\tif e is not None:\n\t\t\t\te.save()\n\t\t\tu = Users.objects.get(pk=user_id)\n\t\t\tif u is not None:\n\t\t\t\tu.balance -= int(amount)\n\t\t\t\tu.save()",
"def test_creating_new_goal(self):\n\n form_data = {\"goal-body\": \"New goal body.\"}\n goal = create_new_goal(1, form_data)\n \n self.assertEqual(\"New goal body.\", goal.goal_body)",
"def post(self, category_id):\n return CreateNewExpense(current_user.id, category_id, request)"
]
| [
"0.69292074",
"0.6641523",
"0.6351043",
"0.63263077",
"0.61973137",
"0.6144904",
"0.613827",
"0.6112059",
"0.5795221",
"0.578838",
"0.5749714",
"0.56976366",
"0.56927705",
"0.565634",
"0.56037104",
"0.55582607",
"0.55232936",
"0.5504037",
"0.5457082",
"0.5444953",
"0.5418034",
"0.54112554",
"0.5383909",
"0.5380857",
"0.5349172",
"0.53491515",
"0.53423375",
"0.5337105",
"0.5325939",
"0.5325025"
]
| 0.72102773 | 0 |
Adds a new item to a meal and stores information about it in the class dictionary Params needed for post request's payload | def add_item(self, m_id):
# Take the meals entires from TOML file
meals = self.cfg.get('payload',{}).get('plan',{})[0].get('meal',{})
for entries in meals:
# Check for valid entires
if entries:
# Construct payload
for payload in entries.get('item',{}):
# Check the entry vs a json schema
check.check_entry(path='schemas/item.json', test=payload)
payload['meal'] = m_id
# Post request
return self.add_post(payload, API.url_items, self.items) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_items_handler():\n rq = request.get_json()\n name = rq['name']\n picture = rq['picture']\n description = rq['description']\n category_id = rq['category_id']\n item = addItem(name, picture, description, category_id, g.user.id)\n return jsonify(item=item.serialize)",
"def add_item():\n if 'username' not in login_session:\n response = make_response(\n json.dumps({'error': 'User is logged out. This should not happen'}), 401\n )\n response.headers['Content-Type'] = 'application/json'\n return response\n try:\n if request.method == 'POST':\n item = Item()\n # First we populate the new item.\n item.category_id = request.form['categoryId']\n item.picture = request.form['picture']\n item.name = request.form['name']\n item.price = request.form['price']\n item.description = request.form['description']\n item.user_id = login_session['user_id']\n # Now let's pull its category.\n category = session.query(Category).filter_by(id=item.category_id).one()\n # And make sure they're properly linked.\n item.category = category\n session.add(item)\n session.flush()\n id = item.id\n session.commit()\n response = make_response(\n json.dumps({'success': '', 'nonce': login_session['state'], 'id': id}), 200\n )\n response.headers['Content-Type'] = 'application/json'\n return response\n except Exception as inst:\n print(type(inst))\n print(inst.args)\n print(inst)",
"def insert_item_page(request):\n validate(instance=request.body, schema=item_schema)\n body = json.loads(request.body)\n item = Item.new_item(body['cart_id'], body['food_id'], body['count'])\n return JsonResponse(model_to_json(item))",
"def post(self, item):\n\n db.session.add(item)\n\n return item",
"def add_meal(self, p_id):\r\n \r\n # Take the plans entires from TOML file\r\n plans = self.cfg.get('payload',{}).get('plan')\r\n # For each meal in each plan\r\n for entries in plans:\r\n # Check for valid entires\r\n if entries:\r\n for payload in entries.get('meal',{}):\r\n # Parse the payload\r\n ready = self.construct_payload(parse = copy.deepcopy(payload), dele='item')\r\n ready['plan'] = p_id\r\n # Check the entry vs a json schema\r\n check.check_entry(path='schemas/meal.json', test=ready)\r\n # Post request\r\n b1 = self.add_post(ready, API.url_meal, self.meals)\r\n # Check for items\r\n if 'item' in payload.keys() and payload['item'] != [{}]:\r\n b2 = self.add_item(self.meals[-1].get('id'))\r\n else:\r\n return b1\r\n if b2 != None:\r\n return b2 and b1\r\n else:\r\n return False",
"def add_new_item(self, request, *a, **kw):\n item_def = request.data\n cpdoc = self.get_object()\n item_def['calendar_plan'] = cpdoc.id\n\n item_ser = self.get_serializer(data=item_def)\n item_ser.is_valid(raise_exception=True)\n item_obj = item_ser.save()\n\n headers = self.get_success_headers(item_ser.data)\n return response.Response(item_ser.data, headers=headers)",
"def cart_add(request, meal_id):\n cart = Cart(request)\n meal = get_object_or_404(Meal, id=meal_id)\n form = CartAddMealForm(request.POST)\n if form.is_valid():\n cd = form.cleaned_data\n cart.add(meal=meal,\n quantity=cd['quantity'],\n override_quantity=cd['override'])\n return redirect('meals:meal_list')",
"def add_meal(self, meal_time, carbs):\r\n self.meals.append((meal_time, carbs))",
"def test_add_item_using_post(self):\n pass",
"def add(self, item):",
"def create_meal():",
"def create_item(self, user: User, **kwargs) -> None:",
"def add_to_bag(request, item_id):\n\n adult_quantity = int(request.POST.get('adult_quantity'))\n child_quantity = int(request.POST.get('child_quantity'))\n family_quantity = int(request.POST.get('family_quantity'))\n redirect_url = request.POST.get('redirect_url')\n bag = request.session.get('bag', {})\n\n def add_quantity(\n quantity, ticket_type, item_id, bag):\n \"\"\"\n adds quantity of specific pass ticket_type to bag\n \"\"\"\n if quantity:\n if item_id in list(bag.keys()):\n if f'{ticket_type}' in bag[item_id]:\n bag[item_id][f'{ticket_type}'] += quantity\n else:\n bag[item_id][f'{ticket_type}'] = quantity\n else:\n bag[item_id] = {f'{ticket_type}': quantity}\n\n if adult_quantity or child_quantity or family_quantity:\n\n if adult_quantity:\n add_quantity(\n adult_quantity,\n 'adult_quantity',\n item_id,\n bag,)\n\n if child_quantity:\n add_quantity(\n child_quantity,\n 'child_quantity',\n item_id,\n bag,)\n\n if family_quantity:\n add_quantity(\n family_quantity,\n 'family_quantity',\n item_id,\n bag,)\n\n messages.success(\n request, 'Added items to your bag')\n else:\n messages.error(\n request, 'Nothing selected!!')\n\n request.session['bag'] = bag\n return redirect(redirect_url)",
"def add(self, item: Any) -> None:\n pass",
"def addItem(*args):",
"def addItem(*args):",
"def addItem(*args):",
"def add_to_bag(request, item_id):\n\n a_class = get_object_or_404(Class, pk=item_id)\n quantity = int(request.POST.get('quantity'))\n redirect_url = request.POST.get('redirect_url')\n day = None\n if 'class_day' in request.POST:\n day = request.POST['class_day']\n bag = request.session.get('bag', {})\n\n if day:\n if item_id in list(bag.keys()):\n if day in bag[item_id]['items_by_day'].keys():\n bag[item_id]['items_by_day'][day] += quantity\n messages.success(request, f'Updated {a_class.name} ({day.upper()}) quantity to {bag[item_id][\"items_by_day\"][day]}')\n else:\n bag[item_id]['items_by_day'][day] = quantity\n messages.success(request, f'Added {a_class.name} ({day.upper()}) to your bag')\n else:\n bag[item_id] = {'items_by_day': {day: quantity}}\n messages.success(request, f'Added {a_class.name} ({day.upper()}) to your bag')\n else:\n if item_id in list(bag.keys()):\n bag[item_id] += quantity\n messages.success(request, f'Updated {a_class.name} quantity to {bag[item_id]}')\n else:\n bag[item_id] = quantity\n messages.success(request, f'Added {a_class.name} to your bag')\n\n request.session['bag'] = bag\n return redirect(redirect_url)",
"def add_item(self):\n item = models.Item(item_name=self.test_item,\n list_id=1,\n description=self.test_item_desc)\n item.add()",
"def post(self, *args, **kw):\n id_tipo_item = UrlParser.parse_id(request.url, \"tipositems\")\n url_action = \"./\"\n\n pp = PoseePermiso('redefinir tipo item',id_tipo_item=id_tipo_item)\n \n if not pp.is_met(request.environ):\n flash(pp.message % pp.nombre_permiso, 'warning')\n redirect(url_action)\n \n if kw.has_key(\"sprox_id\"):\n del kw[\"sprox_id\"]\n\n tipo = TipoItem.por_id(id_tipo_item)\n try:\n tipo.agregar_atributo(**kw)\n except NombreDeAtributoError, err:\n flash(unicode(err), \"warning\")\n\n redirect(url_action)",
"def create_item():\n #if not request.json:\n # abort(400)\n parser = reqparse.RequestParser()\n parser.add_argument('item_code', type=int, required=False, help=\"Item code missing\")\n parser.add_argument('item_name', type=str, required=True, help=\"Item name missing\")\n parser.add_argument('size', type=str, required=True, help=\"Size missing\")\n parser.add_argument('color', type=str, required=True, help=\"Color missing\")\n parser.add_argument('quality', type=str, required=True, help=\"Quality missing\")\n parser.add_argument('username', type=str, required=True, help=\"Username missing\")\n args = parser.parse_args(strict=True)\n user_code = get_user_code(args['username'])\n if user_code is None:\n return make_response(jsonify({'error': 'User does not exists'}), 400)\n new_item = dict(\n item_code = args['item_code'],\n item_name = args['item_name'],\n size_code = get_size_code( args['size']),\n color_code = get_color_code( args['color']),\n quality_code = get_quality_code( args['quality'])\n )\n try:\n u = models.Items(**new_item)\n db.session.add(u)\n db.session.commit()\n except sqlalchemy.exc.IntegrityError, e:\n return make_response(jsonify({'error': 'item code already exists.'}), 400)\n\n return make_response(jsonify({'success': True}))",
"def add_to_basket(request, item_id):\n\n artwork = get_object_or_404(Artwork, pk=item_id)\n quantity = int(request.POST.get('quantity'))\n redirect_url = request.POST.get('redirect_url')\n\n basket = request.session.get('basket', {})\n\n if item_id in list(basket.keys()):\n basket[item_id] += quantity\n messages.success(\n request, f'Added {artwork.title} to {basket[item_id]}.' +\n 'Please make sure your' +\n ' basket total is not more than the quantity available.')\n\n else:\n basket[item_id] = quantity\n messages.success(request, f'Added {artwork.title} to your basket')\n request.session['basket'] = basket\n return redirect(redirect_url)",
"def new_item():\n if request.method == 'POST':\n new_item = Item(\n category_id=int(request.form['category']),\n name=request.form['name'],\n description=request.form['description'],\n created_date=datetime.datetime.now(),\n user_id=login_session['user_id'])\n session.add(new_item)\n session.commit()\n return redirect(\n url_for(\n 'item_details',\n category_id=new_item.category_id,\n item_id=new_item.id))\n else:\n categories = session.query(\n Category).all()\n return render_template(\n 'views/add.html',\n categories=categories)",
"def test_add_meal(self):\n with self.client:\n response = self.add_meal(\"pilawo\", 15000)\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 201)\n self.assertEqual(data.get('message'), \"Meal successfully created\")",
"def add_to_shoppingbag(request, item_id):\n\n quantity = int(request.POST.get('quantity'))\n redirect_url = request.POST.get('redirect_url')\n size = None\n if 'merchandise_size' in request.POST:\n size = request.POST['merchandise_size']\n shoppingbag = request.session.get('shoppingbag', {})\n\n if size:\n if item_id in list(shoppingbag.keys()):\n if size in shoppingbag[item_id]['items_by_size'].keys():\n shoppingbag[item_id]['items_by_size'][size] += quantity\n else:\n shoppingbag[item_id]['items_by_size'][size] = quantity\n else:\n shoppingbag[item_id] = {'items_by_size': {size: quantity}}\n else:\n if item_id in list(shoppingbag.keys()):\n shoppingbag[item_id] += quantity\n else:\n shoppingbag[item_id] = quantity\n\n request.session['shoppingbag'] = shoppingbag\n return redirect(redirect_url)",
"def add(self, item, request):\n\n assert (\n isinstance(item, Election)\n or isinstance(item, ElectionCompound)\n or isinstance(item, Vote)\n )\n\n self.session.add(item)\n self.session.flush()\n\n self.update(item, request)\n self.session.flush()",
"def add(self, request, **kwargs):\n basketId = request.data.get('basket')\n if not basketId:\n return Response(data={'error': 'BasketId is missing.'}, status=status.HTTP_400_BAD_REQUEST)\n\n basket = Basket.objects.filter(id=basketId).first()\n if not basket:\n return Response(data={'error': 'The basket with id {0} not found on database.'\n .format(basketId)}, status=status.HTTP_400_BAD_REQUEST)\n\n productCode = request.data.get('product')\n if not productCode:\n return Response(data={'error': 'Product code is not provided.'}, status=status.HTTP_400_BAD_REQUEST)\n product = Product.objects.filter(code=productCode).first()\n if not product:\n return Response(data={'error': 'The product with code {0} not found on database.'\n .format(productCode)}, status=status.HTTP_400_BAD_REQUEST)\n\n basketItem = BasketItem.objects.filter(\n basket=basket, product=product).first()\n if basketItem:\n basketItem.qty += 1\n else:\n basketItem = BasketItem(basket=basket, product=product)\n\n basketItem.save()\n serializer = serializers.BasketSerializer(basket)\n return Response(serializer.data)",
"def add(self, *items):",
"def add_item():\n\n form = ItemForm()\n # Query for select field\n form.category_id.query = Category.query.filter(\n Category.user_id == current_user.id).all()\n\n if form.validate_on_submit():\n new_item = Item(\n category_id=form.category_id.data.id,\n name=form.name.data.capitalize(),\n description=form.description.data,\n user_id=current_user.id)\n db.session.add(new_item)\n db.session.commit()\n flash(\"New item '{}' was successfully created\".format(\n form.name.data.capitalize()), category='success')\n return redirect(url_for('url.index'))\n\n return render_template(\n 'forms/form.html',\n form_title='Add Item',\n form=form,\n form_name='item',\n action=url_for('url.add_item'))",
"async def additem(self, ctx, *, name: str):\n try:\n item = dict()\n item[\"name\"] = name\n check = lambda x: x.channel is ctx.channel and x.author is ctx.author\n await ctx.send(await _(ctx, \"Describe the item (a description for the item)\"))\n response = await self.bot.wait_for(\"message\", timeout=120, check=check)\n if response.content.lower() == \"cancel\":\n await ctx.send(await _(ctx, \"Cancelling!\"))\n return\n\n item[\"description\"] = response.content\n item[\"meta\"] = dict()\n\n await ctx.send(\n await _(ctx, \"Additional information? (Attributes formatted in a list i.e `color: 400, value: 200` \"\n \"Set an image for this item with the `image` key i.e. `image: http://image.com/image.png` \"\n \"Set this item as usable by adding `used` key i.e. `used: You open the jar and the bird flies away`\"))\n while True:\n response = await self.bot.wait_for(\"message\", timeout=60, check=check)\n if response.content.lower() == \"cancel\":\n await ctx.send(await _(ctx, \"Cancelling!\"))\n return\n elif response.content.lower() == \"skip\":\n await ctx.send(await _(ctx, \"Skipping!\"))\n break\n else:\n try:\n if \"\\n\" in response.content:\n res = response.content.split(\"\\n\")\n else:\n res = response.content.split(\",\")\n for val in res:\n key, value = val.split(\": \")\n key = key.strip().casefold()\n value = value.strip()\n item[\"meta\"][key] = value\n else:\n break\n except:\n await ctx.send(await _(ctx, \"Invalid syntax, try again.\"))\n await self.bot.di.new_item(ctx.guild, ServerItem(**item))\n await ctx.send(await _(ctx, \"Item successfully created\"))\n\n except asyncio.TimeoutError:\n await ctx.send(await _(ctx, \"Timed out! Try again\"))"
]
| [
"0.7085926",
"0.69619817",
"0.68986714",
"0.67922443",
"0.67652833",
"0.66561204",
"0.66533005",
"0.6582756",
"0.65212536",
"0.6510233",
"0.64977646",
"0.6480944",
"0.64776444",
"0.6473709",
"0.64668727",
"0.64668727",
"0.64668727",
"0.6349618",
"0.6285246",
"0.6214289",
"0.6162446",
"0.6161522",
"0.6156639",
"0.6144248",
"0.61423373",
"0.6115094",
"0.6101158",
"0.609843",
"0.60778433",
"0.6075494"
]
| 0.7788327 | 0 |
Adds a new exercise and stores information about it in the class dictionary Params needed for post request's payload | def add_exercise(self):
# Take the exercise entires from TOML file
entries = self.cfg.get("payload",{}).get("exercise")
# Check for valid entires
if entries:
# Construct payload
for payload in entries:
# Check the entry vs a json schema
check.check_entry(path='schemas/exercise.json', test=payload)
# Post request
requests.post(API.url_exercise, data = payload, headers = self.headers, timeout = 2) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_exercise():\n json_data = request.get_json()\n new_question = json_data.get(\"new_question\")\n new_answer = json_data.get(\"new_answer\")\n user_id = session.get(\"email\")\n try:\n fm.add_exercise(new_question, new_answer, user_id)\n msg = \"Exercise added for user: {}\".format(user_id)\n app.logger.info(msg)\n return jsonify({\"message\": \"add exercise call completed\"})\n except Exception as e:\n msg = \"The question or the answer to be added has exceeded the max char limit\"\n app.logger.error(msg)\n abort(400)",
"def add_exercise( self, exercise ):\n self.exercises.append( exercise )",
"def new_exercise():\n db = get_db()\n users = db.users\n exercises = db.exercises\n data = request.json\n \n expected_fields = ['name', 'pic_urls', 'instructions', 'created_by']\n # If the feilds in data don't match the expected fields\n if not set(expected_fields) == set(data):\n raise APIException(status_code=400, message='data does not match the expected fields')\n if not ( isinstance(data['name'], str) and isinstance(data['instructions'], str)\n and isinstance(data['created_by'], str) and isinstance(data['pic_urls'], list) ):\n raise APIException(status_code=400, message='name, created_by, and instructions must be strings')\n\n for pic in data['pic_urls']:\n if not isinstance(pic, str):\n raise APIException(status_code=400, message='each pic_url must be a string')\n\n # Check if created_by is an existing user\n cursor = users.find({\"user_id\": data['created_by']})\n if cursor.count() is 0:\n raise APIException(status_code=404, message='user_id represented by created_by does not exist')\n elif cursor.count() > 1:\n raise APIException(status_code=500, message='Error, multiple users with same user_id (created_by) exist, which is not allowed')\n \n data['workouts_used_in'] = 0\n\n # Create n grams for exercise to be used in search\n data['ngrams'] = ' '.join(make_ngrams(str(data['name']).lower()))\n\n # Insert the new exercise and return its newly created key\n postid = exercises.insert_one(data)\n\n # Index the exercises in the database to be able to be searched\n exercises.search.create_index(\n [\n ('ngrams', 'text'),\n ],\n name='search_exercises',\n weights={\n 'ngrams': 100\n }\n )\n\n return_data = {\"exercise_id\": str(postid.inserted_id)}\n return flask.jsonify(**return_data), 200",
"def test_create(self):\n self.assertEqual(Exercise.objects.count(), 2)\n payload = {\n 'name': 'Pecho plano',\n 'description': 'Some description',\n 'muscle_group': 'pecho'\n }\n self.client.post('/exercises/', data=payload)\n self.assertEqual(Exercise.objects.count(), 3)",
"def add_exercise(user_id, start_time, end_time, repetitions, weight, exercise, variant, skeleton_data=''):\n workout_id = _get_workout_id(_get_username(user_id))\n\n if workout_id is None:\n raise ValueError('This user does not appear to exist. Failed to add exercise')\n else:\n # Read the skeleton data file if it was added to the query\n if skeleton_data:\n skeleton_data = skeleton_data.read()\n\n UserExerciseData(\n user_id=user_id,\n workout_id=workout_id,\n start_time=start_time,\n end_time=end_time,\n repetitions=repetitions,\n weight=weight,\n exercise=exercise,\n variant=variant,\n skeleton_data=skeleton_data\n )",
"def add_question():\n data = request.get_json()\n question = data['question']\n answer = data['answer']\n difficulty = data['difficulty']\n category = data['category']\n for key, value in data.items():\n if not value:\n return jsonify({'success': False, 'error': 400,\n 'message': f'{key} field is missing a value'\n }), 400\n new_question = Question(question, answer, category, difficulty)\n new_question.insert()\n return jsonify({'success': True, 'message': 'Question was created',\n 'question': new_question.format()}), 201",
"def add_question():\n data = request.get_json()\n\n questionId = len(questions)\n questionId += 1\n\n details = data.get('details')\n\n if not details or details.isspace():\n return jsonify({\n \"message\": \"Sorry, you didn't enter any question!\"\n }), 400\n question = Question(questionId, details)\n questions.append(question)\n\n return jsonify({\n \"id\": questionId,\n \"question\": question.__dict__,\n \"message\": \"Question added successfully!\"\n }), 201",
"def create(self, request):\n serializer = ExperimentSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n\n return send_response(request.method, serializer)",
"def add_score():\n json_data = request.get_json()\n exercise_id = json_data.get(\"exercise_id\")\n score = json_data.get(\"score\")\n user_id = session.get(\"email\")\n fm.add_attempt(exercise_id, score, user_id)\n\n msg = \"Attempt added. Exercise ID: {} Score: {}\"\\\n .format(exercise_id, score)\n app.logger.info(msg)\n return jsonify(dict(result=\"success\"))",
"def test_create_empty_payload(self):\n response = self.client.post('/exercises/', data={})\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)",
"def test_add_question(self): \n data = {\n 'question':'Test Question',\n 'answer':'Test Answer',\n 'category':'2',\n 'difficulty':'1'\n } \n res = self.client().post('/questions/add', \n data=json.dumps(data),\n content_type='application/json')\n self.data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n json_res = json.loads(res.get_data(as_text=True))",
"def create_and_exercise(\n self,\n __template_id,\n __payload,\n __choice_name,\n __argument=None,\n *,\n workflow_id=None,\n command_id=None,\n read_as=None,\n act_as=None,\n ):\n raise NotImplementedError",
"def run(self):\n # NOTE: since this directive has a complementary `solution` directive\n # it may be better to put the two in a separate `exercise` domain\n env = self.state.document.settings.env\n\n # get the user-provided label of the exercise\n label = self.arguments[0]\n assert label.startswith('ex:'), (\n 'The exercise label ({}) must start with the \"ex:\" prefix.'.format(\n label))\n\n if self.content:\n content_string = '\\n'.join(self.content)\n content_list = self.content\n content_offset = self.content_offset\n else:\n content_string = read_exercise(env, label)\n content_list = content_string.split('\\n')\n content_offset = 0\n\n # we do not assign an id to this node (despite it being a prerequisite\n # for assigning it a fignum) as this will happen automatically when\n # a name is assigned to this node\n exercise_content_node = exercise(content_string)\n\n # since the label of the node was not given in the standard docutil\n # manner (via the optional `name` parameter), it needs to be manually\n # assigned to this instance of the exercise directive and processed,\n # i.e., it registers the label with the domain (standard `std` domain\n # in this case); it also checks whether the labels is not duplicated\n self.options['name'] = label\n self.add_name(exercise_content_node)\n # these steps ensure that the node created by this directive can be\n # referenced with `ref` and `numref`\n\n # build an empty exercise title, the fignum is injected when building\n # its HTML representation\n exercise_title_node = exercise_title()\n\n # add title to the exercise and process the content\n exercise_content_node += exercise_title_node\n self.state.nested_parse(\n content_list, content_offset, exercise_content_node)\n\n return [exercise_content_node]",
"def add_post():\n\tt_id = db.survey.insert(\n\t\tquestion = request.vars.question,\n\t\tuser_email = request.vars.email,\n\t\tuser_name = get_user_name_from_email(request.vars.email),\n\t\topt1 = request.vars.opt1,\n\t\topt2 = request.vars.opt2,\n\t\topt3 = request.vars.opt3,\n\t\topt4 = request.vars.opt4,\n\t\t#created_on_human = humanize.naturaltime(datetime.datetime.utcnow()),\n\n\t)\n\tt = db.survey(t_id)\n\treturn response.json(dict(post=t))",
"def create_problem():\n # Admin check\n if not current_user.admin == 1:\n return serve_error('You must be an admin to create problems',\n response_code=401)\n\n try:\n # Convert the JSON to python array of dictionaries\n cases = request.form['cases']\n cases = loads(cases)\n for case in cases:\n if 'input' not in case or 'output' not in case:\n return serve_error(\n 'Sample case(s) were not formed correctly',\n response_code=400)\n\n # Create the problem\n name = request.form['name'][:32]\n shortname = name.lower().replace(' ', '')\n problem = Problem(\n name=name,\n shortname=shortname\n )\n if 'difficulty' in request.form:\n problem.difficulty = request.form['difficulty']\n if 'appeared_in' in request.form:\n problem.appeared = request.form['appeared_in']\n\n # Create the problem data and add it to the database\n problem_data = ProblemData(\n description=request.form['description'],\n input_desc=request.form['input_desc'],\n output_desc=request.form['output_desc']\n )\n if 'time_limit' in request.form:\n problem_data.time_limit = request.form['time_limit']\n\n # Create list of sample cases\n case_num = 1\n sample_cases = list()\n for case in cases:\n sample = SampleCase(\n case_num=case_num,\n input=case['input'],\n output=case['output']\n )\n case_num += 1\n sample_cases.append(sample)\n\n in_file = zipfile.ZipFile(request.files['in_file'])\n out_file = zipfile.ZipFile(request.files['out_file'])\n sol_file = request.files['sol_file']\n\n # If any required values were missing, serve an error\n except KeyError as err:\n return serve_error('Form field not found: ' + err[0],\n response_code=400)\n\n # Commit everything to the database\n pid = problem.commit_to_session()\n problem_data.pid = pid\n problem_data.commit_to_session()\n for case in sample_cases:\n case.pid = pid\n case.commit_to_session()\n\n # Store the judge data\n directory = os.path.join(app.config['DATA_FOLDER'],\n 'problems', str(problem.pid))\n in_file.extractall(directory)\n out_file.extractall(directory)\n os.mkdir(os.path.join(directory, 'test'))\n sol_file.save(os.path.join(directory, 'test', sol_file.filename))\n\n return serve_response({\n 'name': problem.name,\n 'shortname': problem.shortname,\n 'description': problem_data.description,\n 'input_desc': problem_data.input_desc,\n 'output_desc': problem_data.output_desc,\n 'sample_cases': cases,\n 'pid': problem.pid,\n 'difficulty': problem.difficulty\n })",
"def post(self):\n data = request.json\n create_testing_scenario(data)\n return None, 201",
"def add_entry(name, title, duration, notes):\n clear()\n print('Entry added to work log!')\n return Entry.create(\n employee_name=name,\n task_title=title,\n time_spent=duration,\n task_notes=notes\n )",
"def create_new_test_run():\n if debug:\n print('[DEBUG] Func: create_new_test_run...')\n\n new_test_run_url = \"https://eei.testrail.com/index.php?/api/v2/add_run/{0}=\".format(project_id)\n\n new_test_run_json = {\n \"suite_id\": suite_id,\n \"name\": suite_name,\n \"assignedto_id\": assignedto_id,\n \"include_all\": False,\n \"case_ids\": [testcase_id]\n }\n\n new_test_run = requests.post(new_test_run_url, auth=authorization, json=new_test_run_json)\n\n if str(new_test_run.status_code) != '200':\n print('[ERROR] new_test_run: non 200 status code... ' + str(new_test_run.status_code))\n print(str(new_test_run.json()))\n sys.exit(1)\n\n global new_test_run_id\n new_test_run_id = str(new_test_run.json()[\"id\"])",
"def add_task():\n # get values from user\n responses = accept_inputs([\"Task label\", \"Short task description\", \"Parent task label\"])\n # insert into db\n query_no_results(\"insert into task values(?, ?, ?)\",\n [responses[\"Task label\"], responses[\"Short task description\"], responses[\"Parent task label\"]])\n print(\"New task created\")",
"def create_question():\n if request.content_type != \"application/json\":\n abort(415)\n question_text = request.json['question']\n answer = request.json['answer']\n difficulty = request.json['difficulty']\n category = request.json['category']\n\n question_object = Question(question_text, answer, category, difficulty)\n db.session.add(question_object)\n db.session.commit()\n return jsonify({\n \"success\": True\n }), 201",
"def test_user_can_add_answer(self):\n answer_details = {\"description\": \"Type Ctrl+O to exit\"}\n\n # Post a question first\n question_response = self.app.post(\n '/stackoverflowlite/api/v1/questions',\n data=json.dumps(self.question_details),\n content_type='application/json')\n\n self.assertEqual(question_response.status_code, 201)\n\n # Try to post an answer\n response = self.app.post(\n '/stackoverflowlite/api/v1/questions/1/answers',\n data=json.dumps(answer_details),\n content_type='application/json')\n\n self.assertEqual(response.status_code, 201)\n\n # Test message\n message = json.loads(response.get_data(as_text=True))[\n 'message']\n self.assertEqual(message, 'Answer successfully posted')",
"def test_new_good_lesson(client,auth_user,init_database,authenticated_request):\n response = client.post(url_for('root.index'),data=dict(email='[email protected]',password='password'))\n # try to get home\n response = client.post(url_for('lessons.add'),data=dict(name='test lesson 1',grade='1'),follow_redirects=True)\n assert response.status_code == 200\n assert '新しい単元が追加されました。'.encode('utf-8') in response.data #user informed\n assert '編集したいプリントを選べます'.encode('utf-8') in response.data #at the edit screen",
"def post(self):\n\n args = self.reqparse.parse_args()\n\n if not args['date']:\n args['date'] = datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d')\n models.Exercise.create(exercise_user=args['userId'], **args)\n\n user = models.ExerciseUser.get(id=args['userId'])\n\n marshalled_user = marshal(models.ExerciseUser.get(id=args['userId']), user_fields_with_exercises)\n marshalled_user['exercises'] = [marshal(exercise, exercise_fields) for exercise in user.exercises]\n\n return marshalled_user, 201, {'Location': url_for('resources.exercise.user', userId=args['userId'])}",
"def add():\n prev_courses = Course._file.read_db()\n course_name = input(\"Please, type course name >\")\n # check course for uniqueness/ instantiating blank class with one attribute\n c = Course(course_name)\n if c.is_course_exists():\n print(\"{} is already exists\".format(course_name))\n return\n\n prev_courses[\"courses\"].append({\n \"course_name\": course_name,\n \"teacher\": input(\"Please, type teacher's email >\"),\n \"total_place\": int(input(\"Please, type total enrolled number >\")),\n \"students\": []\n })\n Course._file.write_db(prev_courses)\n print(\"New course - {} is added\".format(course_name))\n return",
"def create_exam():\n try:\n # decode token and check role for access control\n data = request.get_json()\n user_id = authenticate_token(request)\n examiner = is_examiner(user_id)\n \n if examiner:\n # Checks if data has required fields - throws exception if not\n pre_init_check(required_fields['exam'], **data)\n\n code_found = False\n while not code_found:\n # Generates unique exam code until one is found that does not already exist\n potential_login_code = generate_exam_code()\n code_exists = Exam.query.filter_by(login_code=potential_login_code).first()\n if not code_exists:\n data['login_code'] = potential_login_code\n break\n exam = Exam(**data)\n if exam.start_date > exam.end_date:\n raise Exception('Exam end_date precedes Exam start_date')\n db.session.add(exam)\n db.session.commit()\n return jsonify(exam.to_dict()), 201\n \n return jsonify({'user_id': user_id, 'message': ['access denied, not examiner']}), 403\n except MissingModelFields as e:\n return jsonify({ 'message': e.args }), 400\n except exc.SQLAlchemyError as e:\n db.session.rollback()\n return jsonify({ 'message': e.args }), 500\n except Exception as e:\n print(traceback.format_exc())\n return jsonify({ 'message': e.args }), 500",
"def post(self):\n body = request.get_json(force=True)\n try:\n st = StudySchema(strict=True).load(body).data\n except ValidationError as err:\n abort(400, 'could not create study: {}'.format(err.messages))\n\n db.session.add(st)\n db.session.commit()\n return StudySchema(\n 201, 'study {} created'.format(st.kf_id)\n ).jsonify(st), 201",
"def test_add_new_question_success(self):\n res = self.client().post('/api/questions', json={\n \"question\": \"This is a question\",\n \"answer\": \"This is the answer\",\n \"category\": 1,\n \"difficulty\": 2\n })\n self.assertEqual(res.status_code, 200)\n data = json.loads(res.data)\n self.assertTrue(data[\"success\"])",
"def post(self):\n parser = reqparse.RequestParser()\n parser.add_argument('har_data',\n help='har_data: String of HAR (JSON) data',\n required=True)\n parser.add_argument('name', help='Custom test name')\n args = parser.parse_args()\n\n har_test = Test(data=args['har_data'])\n har_test.save()\n return (har_test, 201)",
"def test_add(self):\n # Everything added will be deleted later in test_delete.\n first_name = 'Trevor'\n last_name = 'Harvey'\n entry_date = '04/19/2012'\n title = 'Test'\n minutes = 34\n notes = 'testing entries. and regex (555) 555-3425'\n self.data.add(first_name, last_name, entry_date, title, minutes, notes)\n # second test add\n first_name = 'Nik'\n last_name = 'Silver'\n entry_date = '01/14/1827'\n title = '[email protected]'\n minutes = 34\n notes = 'This is an email test.'\n\n self.data.add(first_name, last_name, entry_date, title, minutes, notes)",
"def test_update(self):\n payload = {\n 'name': 'Pecho inclinado',\n 'description': \"New description\",\n 'muscle_group': \"pecho\"\n }\n response = self.client.put(\n '/exercises/{}/'.format(self.exer1.id), data=payload)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(\n Exercise.objects.get(id=self.exer1.id).name, payload['name'])"
]
| [
"0.78411895",
"0.7553409",
"0.7353586",
"0.6304569",
"0.6214312",
"0.61633617",
"0.6020029",
"0.5988647",
"0.5927404",
"0.58324516",
"0.581646",
"0.57472754",
"0.5722411",
"0.57162255",
"0.56089395",
"0.55933064",
"0.55925333",
"0.5591956",
"0.5584668",
"0.55407256",
"0.5531407",
"0.55190325",
"0.551141",
"0.5508195",
"0.5450181",
"0.54477894",
"0.5446901",
"0.54464364",
"0.54126865",
"0.53916836"
]
| 0.84900373 | 0 |
Adds a new workout and stores information about it in the class dictionary Params needed for post request's payload | def add_workout(self):
# Take the workout entires from TOML file
workouts = self.cfg.get('payload',{}).get('workout')
# Check for valid entires
if workouts :
# Construct payload
for payload in workouts:
# Parse the workout payload
ready = self.construct_payload(parse=copy.deepcopy(payload), dele='day')
# Check the entry vs a json schema
check.check_entry(path='schemas/workout.json', test=ready)
# Post request
b1 = self.add_post(ready, API.url_workout, self.workouts)
# Check for days
if 'day' in payload.keys() and payload['day'] != [{}]:
b2 = self.add_day(self.workouts[-1].get('id'))
else:
return b1
if b2 != None:
return b1 and b2
else:
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_add_workout(self):\n body = Workout()\n response = self.client.open(\n '/workout',\n method='POST',\n data=json.dumps(body),\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))",
"def createWorkout():\n session['num_poses'] = int(request.args.get('num_poses'))\n session['difficulty'] = [request.args.get('difficulty')] # for now just putting in 1 difficulty\n # want difficulty to include beginner & intermediate if intermediate is specified, but how do i skew towards intermediate?\n\n # what if emphasis is not filled in or empty?\n emphasis = request.args.get('emphasis')\n if emphasis == \"\":\n session['emphasis'] = []\n else:\n session['emphasis'] = [int(emphasis)] # session['emphasis'] is a list of integers\n\n session['timingOption'] = request.args.get('timingOption')\n\n workout_list = generateWorkout(session['num_poses'], difficulty=session['difficulty'], categories=session['emphasis'])\n # generateWorkout returns None if it can't find any poses that match the criteria\n\n workout_jsonlist = []\n\n # unpack the workout list to display on the page\n if workout_list:\n for i, pose in enumerate(workout_list):\n workout_jsonlist.append({'pose_id' : pose.pose_id, 'imgurl': pose.img_url, 'name': pose.name, 'is_leftright': pose.is_leftright})\n session['error'] = \"\"\n else:\n session['error'] = \"No Poses Matched. Try creating another workout\"\n \n session['workout'] = workout_jsonlist\n\n # do I want to create a workout automatically? and then save workout will just be saving\n # it to be associated with a certain user?\n\n return redirect('/workout') # go to the workout route to display the workout",
"def saveWorkout():\n \n # TODO associate that workout with a user\n results = {'isInSession': False}\n\n if session.get('workout'):\n results['isInSession'] = True\n # unpack the other parameters from the form\n workoutName = request.form.get('workoutName')\n userName = request.form.get('userName')\n description = request.form.get('description')\n\n #generate a workout and save it\n workout = Workout(duration=len(session['workout']),name=workoutName,author=userName,description=description)\n db.session.add(workout)\n db.session.commit()\n\n for pose in session['workout']:\n poseworkout = PoseWorkout(pose_id=pose['pose_id'], workout_id=workout.workout_id)\n db.session.add(poseworkout)\n db.session.commit()\n\n # refine weights based on that saved workout\n refineWeights(workout)\n\n else: \n print(\"no workout in session\")\n\n return jsonify(results)",
"def createWorkoutJson():\n num_poses = int(request.args.get('num_poses'))\n workout_list = generateWorkout(num_poses)\n\n workout_jsonlist = []\n\n # unpack the workout list to display on the page\n for i, pose in enumerate(workout_list):\n workout_jsonlist.append({'pose_id' : pose.pose_id, 'imgurl': pose.img_url, 'name': pose.name})\n \n session['workout'] = workout_jsonlist\n # do I want to create a workout automatically? and then save workout will just be saving\n # it to be associated with a certain user?\n\n return jsonify({'workout_list': workout_jsonlist})",
"def new_workout():\n db = get_db()\n users = db.users\n workouts = db.workouts\n exercises = db.exercises\n requests = db.requests\n data = request.json\n \n # Check if mentor_id and mentee_id are present in data\n if not (\"mentor_id\" in data and \"mentee_id\" in data):\n raise APIException(status_code=400, message='data must contain both mentor_id and mentee_id')\n if not ( isinstance(data['mentor_id'], str) and isinstance(data['mentee_id'], str) ):\n raise APIException(status_code=400, message='mentor_id and mentee_id must be strings')\n\n mentor_id = data['mentor_id']\n mentee_id = data['mentee_id']\n\n # Check if mentor_id is in the users collection\n cursor_mentor = users.find({\"role\": \"Mentor\", \"user_id\": mentor_id})\n if cursor_mentor.count() is 0:\n raise APIException(status_code=400, message='mentor_id not a Mentor')\n elif cursor_mentor.count() > 1:\n raise APIException(status_code=500, message='Error, multiple Mentors with same user_id (mentor_id) exist, which is not allowed')\n \n # Check if mentee_id is in the users collection\n cursor_mentee = users.find({\"role\": \"Mentee\", \"user_id\": mentee_id})\n if cursor_mentee.count() is 0:\n raise APIException(status_code=400, message='mentee_id not a Mentee')\n elif cursor_mentee.count() > 1:\n raise APIException(status_code=500, message='Error, multiple Mentees with same user_id (mentee_id) exist, which is not allowed')\n\n # Check if mentor_id has an open request with mentee_id, otherwise a workout cannot be assigned\n cursor_request = requests.find({\"mentor_id\": mentor_id, \"mentee_id\": mentee_id, \"transaction_over\": False})\n if cursor_request.count() is 0:\n raise APIException(status_code=404, message='mentee_is either does not have a request with mentor_id, or the transaction is already over')\n \n request_id = \"\"\n for document in cursor_request:\n if document['mentor_accepted'] is False:\n raise APIException(status_code=400, message='Mentor is yet to accept workout request')\n if len(document['workouts_created']) >= document['num_workouts_requested']:\n raise APIException(status_code=400, message='the number of workouts that was requested have all been created already')\n request_id = str(document[\"_id\"])\n\n # Need to delete these 2 fields for the validation function, then bring them back afterwards\n del data['mentor_id']\n del data['mentee_id']\n validate_workout_data(data)\n data['mentor_id'] = mentor_id\n data['mentee_id'] = mentee_id\n \n # Insert the new workout and store its newly created workout_id\n postid = workouts.insert_one(data)\n workout_id = str(postid.inserted_id)\n\n # For each exercise that was in the workout, update its \"workouts_used_in\"\n for exercise in data['exercises']:\n result_exercise = exercises.update_one(\n {\"_id\": ObjectId( exercise[\"exercise_id\"] )},\n {\n \"$inc\": {\n \"workouts_used_in\": 1\n }\n }\n )\n\n # Update the request object to include the newly created workout\n result_exercise = requests.update_one(\n {\"_id\": ObjectId( request_id )},\n { '$push': { \"workouts_created\": workout_id} }\n )\n\n # return the newly created workout_id\n return_data = {\"workout_id\": workout_id}\n return flask.jsonify(**return_data), 200",
"def add_work(self, identifier, work):\n self.works.append((identifier, work))",
"def loadWorkout(workout_id):\n\n # get the workout by id and puts that into the session['workout']\n # also set the emphasis, duration, timing option, emphasis\n # see the /createworkout route\n # make sure session['workout'] is in the right format first (make it a function?)\n workout = Workout.query.get(workout_id)\n session['num_poses'] = workout.duration\n session['difficulty'] = [] # for now don't put any difficulty\n session['emphasis'] = [] # for now don't put any emphasis\n session['timingOption'] = 'Timed' # always make it timed by default\n\n workout_jsonlist = []\n\n # unpack the workout list to display on the page\n for i, pose_workout in enumerate(workout.pose_workouts):\n workout_jsonlist.append({'pose_id' : pose_workout.pose.pose_id, \n 'imgurl': pose_workout.pose.img_url, \n 'name': pose_workout.pose.name,\n 'is_leftright': pose_workout.pose.is_leftright})\n \n session['workout'] = workout_jsonlist\n\n return redirect(\"/workout\")",
"def add(self, database):\n id = database.session.query(Workout.id) \\\n .filter(Workout.external_id == self.external_id) \\\n .filter(Workout.source == self.source) \\\n .first()\n if id:\n # don't add if this workout has already been added\n return False\n else:\n try:\n database.session.add(self)\n database.session.flush()\n except exc.SQLAlchemyError as e:\n logger.error(\"Database error: {}\".format(e.args))\n return False\n logger.info(\"Added new workout {}\".format(self))\n self.handle_duplicates(database)\n return True",
"def create_work_item(self):",
"def add_station(self, station_id=None, time=None, location=None):",
"def add_exercise(self):\r\n\r\n # Take the exercise entires from TOML file\r\n entries = self.cfg.get(\"payload\",{}).get(\"exercise\")\r\n # Check for valid entires\r\n if entries:\r\n # Construct payload \r\n for payload in entries:\r\n # Check the entry vs a json schema\r\n check.check_entry(path='schemas/exercise.json', test=payload)\r\n # Post request\r\n requests.post(API.url_exercise, data = payload, headers = self.headers, timeout = 2)",
"def test_aws_service_api_vm_workshift_post(self):\n pass",
"def post(self, request, nnid, wfver):\n try:\n return_data = WorkFlowSimpleManager().create_workflow(nnid, wfver, request.data['type'])\n return Response(json.dumps(return_data))\n except Exception as e:\n return_data = {\"status\": \"404\", \"result\": str(e)}\n return Response(json.dumps(return_data))",
"def add(self, workout, database):\n if not database.session:\n logger.error(\"no database session\")\n return False\n\n self.cleanup_sportstype(workout)\n self.associate_sport(database)\n id = database.session.query(SportsType.id).filter(\n SportsType.name == self.name).first()\n if id:\n self.id = id[0]\n return False\n else:\n try:\n database.session.add(self)\n database.session.flush()\n except exc.SQLAlchemyError as e:\n logger.error(\"Database error: {}\".format(e.args))\n return False\n logger.info(\"Adding new sportstype '{}' id {} of sport {}\".format(\n self.name, self.id, self.sport_id))\n return True",
"def addWorkspace(self, dryrun):\n pass",
"def test_update_workout(self):\n body = Workout()\n response = self.client.open(\n '/workout/{id}'.format(id='id_example'),\n method='PUT',\n data=json.dumps(body),\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))",
"def add_day(self, w_id):\r\n\r\n # Take the weight entries from TOML file\r\n workouts = self.cfg.get('payload',{}).get('workout')\r\n # Check for valid entries\r\n if workouts:\r\n for entries in workouts:\r\n if entries:\r\n # Construct payload \r\n for payload in entries.get('day',{}):\r\n # Parse the day payload\r\n ready = self.construct_payload(parse = copy.deepcopy(payload), dele='sets')\r\n ready['training'] = w_id\r\n # Check the entry vs a json schema\r\n check.check_entry(path='schemas/day.json', test=ready)\r\n # Post request\r\n b1 = self.add_post(ready, API.url_day, self.days)\r\n # Check for sets\r\n if 'sets' in payload.keys() and payload['sets'] != [{}]:\r\n b2 = self.add_sets(self.days[-1].get('id'))\r\n else:\r\n return b1\r\n if b2 != None:\r\n return b1 and b2\r\n else:\r\n return False",
"def add_workflow_step(self, wf_step):\n self._data_dict[self.KEY_WF_STEPS].append(wf_step)",
"def add_task():\n # get values from user\n responses = accept_inputs([\"Task label\", \"Short task description\", \"Parent task label\"])\n # insert into db\n query_no_results(\"insert into task values(?, ?, ?)\",\n [responses[\"Task label\"], responses[\"Short task description\"], responses[\"Parent task label\"]])\n print(\"New task created\")",
"def assign_workout(request, workout_id):\n c = {}\n c.update(csrf(request))\n workout = get_object_or_404(Workout, pk=workout_id)\n team = Member.objects.get(user=request.user).team\n if request.method == 'POST': # If the form has been submitted...\n form = IndividualAssignForm(request.POST)\n if form.is_valid():\n member = Member.objects.get(pk=request.POST['member'])\n individual = Individual(workout=workout, member=member, \n date_suggested=form.cleaned_data['date_suggested'],\n time_suggested=form.cleaned_data['time_suggested'],\n )\n try:\n individual.full_clean()\n individual.save()\n messages.add_message(request, messages.SUCCESS, 'Individual scheduled!')\n if not request.POST.get('add_another'):\n return HttpResponseRedirect(reverse('workout_details', args=(workout.id,)))\n else:\n return HttpResponseRedirect(reverse('assign_workout', args=(workout.id,)))\n except Exception:\n messages.add_message(request, messages.ERROR, 'Something happend with form!')\n return render_to_response(\"workouts/assign.html\", {'form':form, 'workout': workout, 'team':team, 'c':c},\n context_instance=RequestContext(request))\n\n else:\n form = IndividualAssignForm()\n\n return render_to_response(\"workouts/assign.html\", {'form': form, 'workout': workout, 'team':team, 'c':c},\n context_instance=RequestContext(request))",
"def create(self, request):\n serializer = data_serializers.CreateWorkArrangementSerializer(data=request.data)\n if serializer.is_valid(raise_exception=True):\n request_data = serializer.save()\n try:\n work_arrangement = self.controller.add_work_arrangement(request_data=request_data)\n serializer = data_serializers.PresentWorkArrangementsDataSerializer(work_arrangement)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n except (\n domain_exceptions.ObjectEntityDoesNotExist,\n domain_exceptions.TeamDoesNotExist,\n domain_exceptions.MultipleWorksForFullTimeEmployee,\n domain_exceptions.MultipleWorkArrangementInOneTeam,\n domain_exceptions.Max40HoursExceeded\n )as e:\n return Response(e.message, status=status.HTTP_400_BAD_REQUEST)\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)",
"def add_task_to_pbi(tfs_instance, task, pbi_data):\n task['System.AreaId'] = pbi_data['System.AreaId'] # Area Path\n task['System.IterationId'] = pbi_data['System.IterationId'] # Iteration Path\n try:\n new_task = tfs_instance.add_workitem(task,\n pbi_data.id,\n workitem_type=\"Task\") # Add a new task\n except requests.exceptions.HTTPError as error:\n print(f'Oops.. there was an HTTP error: {error}')\n return\n print(f'Task {str(new_task)} was added successfully')",
"def post(self, story_id):\n req_json = request.get_json()\n if req_json is None:\n return get_error_response(\"Missing data, unable to create addition\")\n owner_id = req_json.get(\"owner_id\", None)\n parent_id = req_json.get(\"parent_id\", None)\n content = req_json.get(\"content\", None)\n\n success, message = addition_api.create_addition(story_id, owner_id, parent_id, content)\n return get_success_response(message) if success else get_error_response(message)",
"def post(self, request):\n\t\tserializer = PeopleSerializer.WorkingHourSerializer(data=request.data)\n\t\tif serializer.is_valid():\n\t\t\tserializer.save()\n\t\t\treturn Response(serializer.data, status=200)\n\t\treturn Response(serializer.errors, status=422)",
"def post(self):\n arguments = self.request.arguments\n print arguments\n x = arguments['latitude']\n y = arguments['longitude']\n problem = Problem(\n title=arguments['title'],\n content=define_values(arguments,'content'),\n proposal=define_values(arguments,'proposal'),\n severity=define_values(arguments,'severity', '1'),\n status=define_values(arguments,'status','UNSOLVED'),\n location=create_location(x, y),\n problem_type_id=arguments['problem_type_id'],\n region_id=define_values(arguments,'region_id'))\n self.sess.add(problem)\n self.sess.commit()\n activity = ProblemsActivity(\n problem_id=problem.id,\n user_id=self.get_current_user(),\n datetime=get_datetime(),\n activity_type=\"ADDED\")\n self.sess.add(activity)\n self.sess.commit()\n if self.get_status() is 200:\n self.write({'id': problem.id})",
"def save_work_item(self):\n\n try:\n self.connection.createWorkItem(self.issue_id, self.work_item)\n except AttributeError as ae:\n if \"createWorkItem\" in ae.args[0]:\n raise YoutrackMissingConnectionException()\n else:\n raise YoutrackWorkItemIncorrectException()\n except TypeError as te:\n raise YoutrackIssueNotFoundException\n except YouTrackException as e:\n raise YoutrackIssueNotFoundException",
"def add_task(self,verbose = False):\n t = testrun()\n t.url = self.url\n t.runnable = self\n t.script = self.script\n t.location = self.location\n t.save()\n print \"Adding %s\" %(t)\n t.submit_to_wpt()",
"def addNewWorkItem(self, id, company, companySH, summary, logData):\n\t\tnow = datetime.datetime.now()\n\t\tdata = {\"status\": [\"open\", now.strftime(CASE_DATE_FORMAT)], \"companyName\": company, \"companyShorthand\": companySH, \"summary\": summary, \"log\": [[now.strftime(LOG_DATE_FORMAT), logData]]}\n\t\tself.LOGS[id] = data\n\t\tself.logByDate(now.strftime(CASE_DATE_FORMAT), id)\n\t\tself.save_logs(\"c\")\n\t\tself.autoCommit()\n\t\treturn 0",
"def add_entry(self, scenario_info):\n scenario_id, status = scenario_info[\"id\"], \"created\"\n sql = self.insert()\n self.cur.execute(\n sql,\n (\n scenario_id,\n status,\n ),\n )",
"def add_entry(self, scenario_info):\n scenario_id, status = scenario_info[\"id\"], \"created\"\n sql = self.insert()\n self.cur.execute(\n sql,\n (\n scenario_id,\n status,\n ),\n )"
]
| [
"0.72422904",
"0.6504186",
"0.64762545",
"0.62977403",
"0.5962294",
"0.5751026",
"0.57090676",
"0.55889237",
"0.5556088",
"0.5445592",
"0.5418208",
"0.5339491",
"0.531289",
"0.5290283",
"0.52862394",
"0.5234424",
"0.523056",
"0.5227997",
"0.52270323",
"0.5209627",
"0.51640904",
"0.51516503",
"0.5147497",
"0.5146402",
"0.5131662",
"0.50985897",
"0.5095759",
"0.50689083",
"0.5054727",
"0.5054727"
]
| 0.74642444 | 0 |
Adds a new day to a workout and stores information about it in the class dictionary Params needed for post request's payload | def add_day(self, w_id):
# Take the weight entries from TOML file
workouts = self.cfg.get('payload',{}).get('workout')
# Check for valid entries
if workouts:
for entries in workouts:
if entries:
# Construct payload
for payload in entries.get('day',{}):
# Parse the day payload
ready = self.construct_payload(parse = copy.deepcopy(payload), dele='sets')
ready['training'] = w_id
# Check the entry vs a json schema
check.check_entry(path='schemas/day.json', test=ready)
# Post request
b1 = self.add_post(ready, API.url_day, self.days)
# Check for sets
if 'sets' in payload.keys() and payload['sets'] != [{}]:
b2 = self.add_sets(self.days[-1].get('id'))
else:
return b1
if b2 != None:
return b1 and b2
else:
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_workout(self):\r\n # Take the workout entires from TOML file\r\n workouts = self.cfg.get('payload',{}).get('workout')\r\n # Check for valid entires\r\n if workouts :\r\n # Construct payload \r\n for payload in workouts:\r\n # Parse the workout payload\r\n ready = self.construct_payload(parse=copy.deepcopy(payload), dele='day')\r\n # Check the entry vs a json schema\r\n check.check_entry(path='schemas/workout.json', test=ready)\r\n # Post request\r\n b1 = self.add_post(ready, API.url_workout, self.workouts)\r\n # Check for days\r\n if 'day' in payload.keys() and payload['day'] != [{}]:\r\n b2 = self.add_day(self.workouts[-1].get('id'))\r\n else:\r\n return b1\r\n if b2 != None:\r\n return b1 and b2\r\n else:\r\n return False",
"def test_add_workout(self):\n body = Workout()\n response = self.client.open(\n '/workout',\n method='POST',\n data=json.dumps(body),\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))",
"def post(self, request):\n\t\tserializer = PeopleSerializer.WorkingHourSerializer(data=request.data)\n\t\tif serializer.is_valid():\n\t\t\tserializer.save()\n\t\t\treturn Response(serializer.data, status=200)\n\t\treturn Response(serializer.errors, status=422)",
"def AttendanceRewardExcelAddDay(builder, Day):\n return AddDay(builder, Day)",
"def add(self, course: Course, day: str) -> None:\n # if the day is not in the dictionary representing the\n # schedule, then create an empty list and add it\n if day not in self.schedule:\n day_list = []\n day_list.append(course)\n self.schedule[day] = day_list\n # if the day is already in the dictionary that represents the\n # schedule, then append the current course to the list\n # note that you do not need to re-assign the list to the\n # value in the dictionary because of the fact that the list\n # is stored as a reference which is updated through append\n else:\n current_day_list = self.schedule[day]\n current_day_list.append(course)",
"def new_entry():\n clear_screen()\n entry = {}\n entry['id'] = get_next_id()\n entry['name'] = input_name()\n print(\"How many minutes did you spend on {}?\".format(entry['name']))\n print(\"Or you may specify a format after the time, seperated by a comma\")\n entry['time_spent'] = input_time_spent()\n add_notes = input(\"Add notes? Y/n \").lower()\n if add_notes != 'n':\n entry['notes'] = input_notes()\n entry['date'] = datetime.now().strftime(FMT_MONTH_DAY_YEAR)\n with open(WORK_LOG_FILENAME, 'a', newline='') as work_log:\n work_log_writer = csv.DictWriter(work_log, fieldnames=FIELDNAMES)\n work_log_writer.writerow(entry)",
"def add_station(self, station_id=None, time=None, location=None):",
"def insert_day():\n analytics.insert_day(6)",
"def _add_record(days_dict, record, key):\n days_dict[key] = {\n \"Name\": record[\"title\"],\n \"Owner\": record[\"owner\"],\n \"Severity\": record[\"severity\"],\n \"Created\": (time.strftime(SIRPPipeline.TIME_FMT, time.gmtime(record[\"createdAt\"] / 1000.0))),\n }\n if \"endDate\" in record:\n days_dict[key].update(\n {\n \"Closed\": (time.strftime(SIRPPipeline.TIME_FMT, time.gmtime(record[\"endDate\"] / 1000.0),)),\n \"Resolution\": record[\"resolutionStatus\"],\n }\n )",
"def __init__(__self__, *,\n day: Optional[pulumi.Input[Union[str, 'WeekDay']]] = None,\n hour_slots: Optional[pulumi.Input[Sequence[pulumi.Input[int]]]] = None):\n if day is not None:\n pulumi.set(__self__, \"day\", day)\n if hour_slots is not None:\n pulumi.set(__self__, \"hour_slots\", hour_slots)",
"def createWorkout():\n session['num_poses'] = int(request.args.get('num_poses'))\n session['difficulty'] = [request.args.get('difficulty')] # for now just putting in 1 difficulty\n # want difficulty to include beginner & intermediate if intermediate is specified, but how do i skew towards intermediate?\n\n # what if emphasis is not filled in or empty?\n emphasis = request.args.get('emphasis')\n if emphasis == \"\":\n session['emphasis'] = []\n else:\n session['emphasis'] = [int(emphasis)] # session['emphasis'] is a list of integers\n\n session['timingOption'] = request.args.get('timingOption')\n\n workout_list = generateWorkout(session['num_poses'], difficulty=session['difficulty'], categories=session['emphasis'])\n # generateWorkout returns None if it can't find any poses that match the criteria\n\n workout_jsonlist = []\n\n # unpack the workout list to display on the page\n if workout_list:\n for i, pose in enumerate(workout_list):\n workout_jsonlist.append({'pose_id' : pose.pose_id, 'imgurl': pose.img_url, 'name': pose.name, 'is_leftright': pose.is_leftright})\n session['error'] = \"\"\n else:\n session['error'] = \"No Poses Matched. Try creating another workout\"\n \n session['workout'] = workout_jsonlist\n\n # do I want to create a workout automatically? and then save workout will just be saving\n # it to be associated with a certain user?\n\n return redirect('/workout') # go to the workout route to display the workout",
"def add_exercise(self):\r\n\r\n # Take the exercise entires from TOML file\r\n entries = self.cfg.get(\"payload\",{}).get(\"exercise\")\r\n # Check for valid entires\r\n if entries:\r\n # Construct payload \r\n for payload in entries:\r\n # Check the entry vs a json schema\r\n check.check_entry(path='schemas/exercise.json', test=payload)\r\n # Post request\r\n requests.post(API.url_exercise, data = payload, headers = self.headers, timeout = 2)",
"def new_workout():\n db = get_db()\n users = db.users\n workouts = db.workouts\n exercises = db.exercises\n requests = db.requests\n data = request.json\n \n # Check if mentor_id and mentee_id are present in data\n if not (\"mentor_id\" in data and \"mentee_id\" in data):\n raise APIException(status_code=400, message='data must contain both mentor_id and mentee_id')\n if not ( isinstance(data['mentor_id'], str) and isinstance(data['mentee_id'], str) ):\n raise APIException(status_code=400, message='mentor_id and mentee_id must be strings')\n\n mentor_id = data['mentor_id']\n mentee_id = data['mentee_id']\n\n # Check if mentor_id is in the users collection\n cursor_mentor = users.find({\"role\": \"Mentor\", \"user_id\": mentor_id})\n if cursor_mentor.count() is 0:\n raise APIException(status_code=400, message='mentor_id not a Mentor')\n elif cursor_mentor.count() > 1:\n raise APIException(status_code=500, message='Error, multiple Mentors with same user_id (mentor_id) exist, which is not allowed')\n \n # Check if mentee_id is in the users collection\n cursor_mentee = users.find({\"role\": \"Mentee\", \"user_id\": mentee_id})\n if cursor_mentee.count() is 0:\n raise APIException(status_code=400, message='mentee_id not a Mentee')\n elif cursor_mentee.count() > 1:\n raise APIException(status_code=500, message='Error, multiple Mentees with same user_id (mentee_id) exist, which is not allowed')\n\n # Check if mentor_id has an open request with mentee_id, otherwise a workout cannot be assigned\n cursor_request = requests.find({\"mentor_id\": mentor_id, \"mentee_id\": mentee_id, \"transaction_over\": False})\n if cursor_request.count() is 0:\n raise APIException(status_code=404, message='mentee_is either does not have a request with mentor_id, or the transaction is already over')\n \n request_id = \"\"\n for document in cursor_request:\n if document['mentor_accepted'] is False:\n raise APIException(status_code=400, message='Mentor is yet to accept workout request')\n if len(document['workouts_created']) >= document['num_workouts_requested']:\n raise APIException(status_code=400, message='the number of workouts that was requested have all been created already')\n request_id = str(document[\"_id\"])\n\n # Need to delete these 2 fields for the validation function, then bring them back afterwards\n del data['mentor_id']\n del data['mentee_id']\n validate_workout_data(data)\n data['mentor_id'] = mentor_id\n data['mentee_id'] = mentee_id\n \n # Insert the new workout and store its newly created workout_id\n postid = workouts.insert_one(data)\n workout_id = str(postid.inserted_id)\n\n # For each exercise that was in the workout, update its \"workouts_used_in\"\n for exercise in data['exercises']:\n result_exercise = exercises.update_one(\n {\"_id\": ObjectId( exercise[\"exercise_id\"] )},\n {\n \"$inc\": {\n \"workouts_used_in\": 1\n }\n }\n )\n\n # Update the request object to include the newly created workout\n result_exercise = requests.update_one(\n {\"_id\": ObjectId( request_id )},\n { '$push': { \"workouts_created\": workout_id} }\n )\n\n # return the newly created workout_id\n return_data = {\"workout_id\": workout_id}\n return flask.jsonify(**return_data), 200",
"def new_time_day(self, time, day, ride_duration):\n ride_duration = int(ride_duration)\n new_time_of_day = time + ride_duration\n new_day_of_week = day\n if new_time_of_day > 23:\n new_time_of_day = new_time_of_day % 24\n new_day_of_week += 1\n if new_day_of_week > 6:\n new_day_of_week = new_day_of_week % 7\n return new_time_of_day,new_day_of_week",
"def test_add_schedule(self):\n body = Schedule()\n response = self.client.open('/v1/schedule',\n method='POST',\n data=json.dumps(body),\n content_type='application/json')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))",
"def add(self, name=\"Main\", end_date=None, days=None, population=None, model=None, **kwargs):\n if end_date is not None:\n self._ensure_date(end_date, name=\"end_date\")\n tracker = self._tracker(name)\n try:\n tracker.add(\n end_date=end_date, days=days, population=population, model=model, **kwargs)\n except ValueError:\n last_date = tracker.last_end_date()\n raise ValueError(\n f'@end_date must be over {last_date}. However, {end_date} was applied.') from None\n self._tracker_dict[name] = tracker\n return self",
"def create(self, validated_data):\n\n w_h = WorkingHours.objects.create(\n courier_id=Courier.objects.get(courier_id=validated_data['courier_id']),\n work_start=validated_data['working_hours'][:5],\n work_end=validated_data['working_hours'][6:]\n )\n return w_h",
"def add_entry(name, title, duration, notes):\n clear()\n print('Entry added to work log!')\n return Entry.create(\n employee_name=name,\n task_title=title,\n time_spent=duration,\n task_notes=notes\n )",
"def creating_entry(self):\n response = \"\"\n today = str(date.today())\n curent_time = str(datetime.time(datetime.now()))\n entry = Diary(self.entry_id, self.title, self.body)\n lst = {}\n lst[\"entry_id\"] = entry.entry_id\n lst[\"title\"] = entry.title\n lst[\"date\"] = today\n lst[\"time\"] = curent_time\n lst[\"body\"] = entry.body\n lst[\"updated\"] = entry.updated\n if Validate.validate_entry(Diary.entries, entry):\n response = jsonify({\"message\": \"Duplicate data,Try again\"})\n response.status_code = 409\n else:\n Diary.entries.append(lst)\n response = jsonify({\"message\": \"Entry saved\", \"data\": lst})\n response.status_code = 201\n return response",
"def add_sets(self, d_id):\r\n\r\n # Take the weight entires from TOML file\r\n days = self.cfg.get('payload',{}).get('workout',{})[0].get('day',{})\r\n # Check for valid entires\r\n if days:\r\n for entires in days:\r\n # Construct payload \r\n for payload in entires.get('sets',{}):\r\n # Check the entry vs a json schema\r\n check.check_entry(path='schemas/set.json', test=payload)\r\n payload['exerciseday'] = d_id\r\n # Post request\r\n return self.add_post(payload, API.url_sets, self.sets)",
"def test_createItinerary(self):\n rv = self.json_post('/createItinerary/alex', dict(\n name = 'New Day',\n date = '2015-08-21T00:00:00.000Z'\n ))\n itinHash = str('alex' + \"_\" + '2015-08-21T00:00:00.000Z')\n assert itinHash in str(rv.data)\n\n rv = self.json_post('/createItinerary/alex', dict(\n name = 'New Day',\n date= '2015-08-21T00:00:00.000Z'\n ))\n assert 'Itinerary date already in use' in str(rv.data)\n\n rv = self.json_post('/createItinerary/bbbb', dict(\n name = 'New Day',\n date= '2015-08-21T00:00:00.000Z'\n ))\n assert 'Invalid username' in str(rv.data)",
"def add_new_arrival(self):\n pass",
"async def put_date( # pylint: disable=inconsistent-return-statements\n self, complex_body: JSON, *, content_type: str = \"application/json\", **kwargs: Any\n ) -> None:",
"def ENTRY(entry_code):\n\tif check_user(entry_code) ==True:\n\t\t###workday = Workday.objects.filter(date=get_time()).get()\n\t\tenter_workday(entry_code)",
"async def put_date( # pylint: disable=inconsistent-return-statements\n self, complex_body: IO, *, content_type: str = \"application/json\", **kwargs: Any\n ) -> None:",
"def post(self):\n data = request.json\n create_entry(data)\n return None, 201",
"def save_hours(validated_data):\n\n for w_h in validated_data['working_hours']:\n new_w_h = WorkingHoursSerializer(data={\n 'courier_id': validated_data['courier_id'],\n 'working_hours': w_h\n })\n if not new_w_h.is_valid():\n raise ValidationError(new_w_h.errors)\n new_w_h.save()",
"def _add_day(cab_data):\n return cab_data.assign(day=lambda x: x.time.dt.strftime('%m%d'))",
"def post(self, request): # FIRST EXAMPLE\n model = self._create_booking(\n request=request) # when _create_booking is invoked, historio Client will log model\n print('save me')",
"def add_task(self):\n print('Enter task')\n text_task = input()\n print('Enter deadline')\n new_task = self.Table(task=text_task, deadline=datetime.strptime(input(), '%Y-%m-%d'))\n self.session.add(new_task)\n self.session.commit()\n print('The task has been added!')\n print()"
]
| [
"0.68411314",
"0.6214305",
"0.6009533",
"0.5855128",
"0.5778126",
"0.5644487",
"0.5631726",
"0.5598066",
"0.55861247",
"0.55030936",
"0.5460911",
"0.5424971",
"0.541958",
"0.54160273",
"0.5392873",
"0.5369233",
"0.5363748",
"0.5343311",
"0.5311711",
"0.530917",
"0.5297215",
"0.5282998",
"0.52593875",
"0.5235744",
"0.52071255",
"0.5202364",
"0.51860106",
"0.5182772",
"0.5165582",
"0.5164136"
]
| 0.7105944 | 0 |
Adds a new schedule and stores information about it in the class dictionary Params needed for post request's payload | def add_schedule(self):
# Take the schedule entires from TOML file
entries = self.cfg.get('payload',{}).get('schedule')
# Check for valid entires
if entries:
# Construct payload
for payload in entries:
# Parse schedule payload
ready = self.construct_payload(parse = copy.deepcopy(payload), dele = 'link')
# Check the entry vs a json schema
check.check_entry(path='schemas/schedule.json', test=ready)
# Post request
b1 = self.add_post(ready, API.url_schl, self.schedules)
if 'link' in payload.keys() and payload['link'] != [{}]:
b2 = self.link(self.schedules[-1].get('id'))
else:
return b1
if b2 != None:
return b1 and b2
else:
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_add_schedule(self):\n body = Schedule()\n response = self.client.open('/v1/schedule',\n method='POST',\n data=json.dumps(body),\n content_type='application/json')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))",
"def post(self, request):\n data = dict(request.data)\n ser = _CreateScheduleSerializer(data=data)\n if ser.is_valid(raise_exception=False):\n ser.save()\n return send_200(\n data={\"data\": ser.data}, message=\"schedule created/updated successfully\"\n )\n else:\n return send_400(\n status=\"FAILURE\",\n data={\"errors\": ser.errors},\n message=ser.extract_error_msg(),\n )",
"def _create_schedules(self):\n\n ''''''",
"def add_schedule(self, schedule_dict):\n sub_task = SchedulePolicies.schedule_json(self.policy_type, schedule_dict)\n sub_task[\"subTaskOperation\"] = 2\n self._subtasks.append(sub_task)\n self._modify_schedule_policy_properties()",
"def _add_schedule_items(self):\n\n schedules = [\n {\n 'start_time': '9:30 AM',\n 'end_time': '10:00 AM',\n 'title': 'Daily Scrum',\n 'location': 'Hogwarts',\n 'day': self.day,\n 'user': self.user.user.rolllistuser,\n },\n {\n 'start_time': '10:30 AM',\n 'end_time': '11:00 AM',\n 'title': 'Engineering Interview',\n 'location': 'Narnia',\n 'day': self.day,\n 'user': self.user.user.rolllistuser,\n },\n {\n 'start_time': '12:00 PM',\n 'end_time': '12:30 PM',\n 'title': 'Lunch',\n 'location': 'Kitchen',\n 'day': self.day,\n 'user': self.user.user.rolllistuser,\n },\n {\n 'start_time': '2:00 PM',\n 'end_time': '2:30 PM',\n 'title': 'Workout',\n 'location': 'Gym',\n 'day': self.day,\n 'user': self.user.user.rolllistuser,\n },\n ]\n\n recurring_item_data = {\n 'start_time': '3:00 PM',\n 'end_time': '3:30 PM',\n 'title': 'Recurring thing',\n 'location': 'asdf',\n 'day': self.day,\n 'user': self.user.user.rolllistuser,\n }\n\n schedule_items = []\n\n schedule_dict = {i['start_time']: i for i in schedules}\n\n for schedule in schedules:\n save_data = schedule\n save_data['start_time'] = get_relevant_time_id(schedule['start_time'])\n save_data['end_time'] = get_relevant_time_id(schedule['end_time'])\n new_schedule_item = ScheduleItem(**save_data)\n new_schedule_item.save()\n schedule_items.append(new_schedule_item)\n\n save_data = recurring_item_data\n save_data['start_time'] = get_relevant_time_id(recurring_item_data['start_time'])\n save_data['end_time'] = get_relevant_time_id(recurring_item_data['end_time'])\n new_schedule_item = ScheduleItem(**save_data)\n new_schedule_item.save()\n new_schedule_item.make_recurring([0])\n schedule_items.append(new_schedule_item)\n\n return schedule_items, schedule_dict",
"def put_schedule(self, name, schedule):\n self.__schedules[name] = schedule",
"def create_schedule(connection, body, fields=None, error_msg=None):\n\n # id to eventId conversion - API Problem\n if 'event' in body:\n body['event']['eventId'] = body['event'].pop('id')\n\n response = connection.post(\n url=f'{connection.base_url}/api/schedules', json=body, params={'fields': fields}\n )\n if response.ok:\n # Fix for incorrect 'eventId' (expecting 'id')\n response_json = response.json()\n if 'event' in response_json:\n response_json['event']['id'] = response_json['event'].pop('eventId')\n response.encoding, response._content = 'utf-8', json.dumps(\n response_json\n ).encode('utf-8')\n\n return response",
"def append_schedule(*args, **kwargs):\n return get_schedule().append_schedule(*args, **kwargs)",
"def create_new_schedule():\n\n # collect all relevant information from form\n user_id = int(session['user_id'])\n user = User.query.filter_by(user_id=int(session['user_id'])).one()\n contact_form_value = request.form.get('contact_id')\n start_date_unicode = request.form.get('start_date')\n period = int(request.form.get('period'))\n\n # extracts email from contact_form_value string using re library\n contact_email = contact_form_value.partition('<')[-1].rpartition('>')[0]\n\n # pull contact from database\n contact = Contact.query.filter_by(email=contact_email).one()\n contact_id = contact.contact_id\n\n # turns start_date into datetime object using dateutil library\n start_date = parser.parse(start_date_unicode)\n\n # calculates send_date from start_date and period\n send_date = start_date + datetime.timedelta(days=period)\n\n # write scheduled message to database\n new_scheduled_msg = ScheduledMessage(user_id=user_id, \n contact_id=contact_id,\n send_date=send_date,\n sent=False)\n\n # set new period on contact in database\n contact.contact_period = period\n\n db.session.add(new_scheduled_msg)\n db.session.commit()\n\n print 'user_id:', user_id\n print 'contact_form_value:', contact_form_value\n print 'start_date:', start_date, 'type:', type(start_date)\n print 'contact_email:', contact_email\n print 'contact:', contact\n print 'contact_id:', contact.contact_id\n print 'period:', period\n print 'send_date:', send_date\n return jsonify({})",
"def schedule(self) -> pulumi.Input['ScheduleArgs']:\n return pulumi.get(self, \"schedule\")",
"def create():\n config = request.data\n return add_scheduling_block(config)",
"def AddSchedulee(self, s, schedulee_type):\n\n # error checking?\n\n try:\n \n schedulee = Schedulee(s, schedulee_type, self.verbose)\n \n except Exception, e:\n\n sys.stderr.write(\"Can't schedule simulation object: %s\" % e)\n \n return False\n \n self._schedulees.append(schedulee)\n\n return True",
"def schedule(self, schedule):\n \n self._schedule = schedule",
"def add(self, schedule):\n try:\n if schedule in self.set:\n self.log.error(\"%s has already been added to this Scheduler.\" %\n schedule)\n return\n self.log.debug('Adding %s to timer for %s.' %\n (schedule, schedule.next))\n self.timer.add_task(schedule.next, self._enqueue, [schedule])\n self.set.add(schedule)\n except:\n self.log.error(\n \"Invalid schedule %s found, deleting.\" % schedule)\n schedule.soft_delete()",
"def create_report_schedule(self, **kwargs):\n return CreateReportScheduleResponse(**self._request(kwargs.pop('path'), data=kwargs).json())",
"def add_scheduled_spirit(self, schedule_info):\n\n raise NotImplementedError",
"def update_schedule(schedule_name):\n data = flask.request.data\n\n try:\n new_schedule = json.loads(data)\n except json.decoder.JSONDecodeError as e:\n return 'Could not parse request', 400\n\n if not 'commands' in new_schedule:\n return 'Expecting schedule to provide commands.', 400\n\n for item in new_schedule['commands']:\n if not 'time' in item:\n return 'All commands must provide a time.', 400\n\n if not 'target' in item:\n return 'All commands must provide a target.', 400\n\n calendar.put_schedule(schedule_name, new_schedule)\n\n return 'Updated schedule.', 200",
"def add(self, story, items_to_schedule):\n url = self._build_url(story)\n arguments = self._argument_converter(\n data={\n 'items': items_to_schedule\n }\n )\n\n result = self._client.post(url, **arguments)\n return result",
"def schedule(self, schedule):\n\n self._schedule = schedule",
"def schedule(self, schedule):\n\n self._schedule = schedule",
"def __init__(self, name: str=None, schedule: Week=None): # noqa: E501\n self.swagger_types = {\n 'name': str,\n 'schedule': Week\n }\n\n self.attribute_map = {\n 'name': 'name',\n 'schedule': 'schedule'\n }\n\n self._name = name\n self._schedule = schedule",
"def schedule(self, task_schedule, place_id=-1):\r\n resource = task_schedule.resource\r\n if place_id == -1:\r\n self.tasksOfResource[resource].append(task_schedule)\r\n else:\r\n self.tasksOfResource[resource].insert(place_id, task_schedule)\r\n if task_schedule.task.graph.name in self.job_task_schedule:\r\n pass\r\n else:\r\n self.job_task_schedule[task_schedule.task.graph.name] = {}\r\n self.job_task_schedule[task_schedule.task.graph.name][task_schedule.task.id] = task_schedule",
"def create_schedule_team(self, schedule):\r\n stub_user = self.find(\"users\", \"Stub User\", attribute=\"name\")\r\n schedule_json = {\r\n \"name\": schedule['name'],\r\n \"type\": \"schedule\",\r\n \"time_zone\": \"Pacific/Auckland\",\r\n \"schedule_layers\": [\r\n {\r\n \"start\": \"2099-12-31T00:00:00+13:00\",\r\n \"rotation_virtual_start\": \"2099-12-31T00:00:00+13:00\",\r\n \"rotation_turn_length_seconds\": 86400,\r\n \"users\": [\r\n {\r\n \"user\": {\r\n \"type\": \"user\",\r\n \"id\": stub_user[\"id\"]\r\n }\r\n }\r\n ]\r\n }\r\n ]\r\n }\r\n try:\r\n self.rpost(\"users\", json=schedule_json)\r\n except PDClientError as e:\r\n raise e",
"def create_schedule(self, name, params=None):\n params = {} if params is None else params\n params.update({\"type\": params.get(\"type\", \"hive\")})\n with self.post(\n create_url(\"/v3/schedule/create/{name}\", name=name), params\n ) as res:\n code, body = res.status, res.read()\n if code != 200:\n self.raise_error(\"Create schedule failed\", res, body)\n js = self.checked_json(body, [\"start\"])\n return parse_date(get_or_else(js, \"start\", \"1970-01-01T00:00:00Z\"))",
"def schedule(self, schedule):\n if (self.local_vars_configuration.client_side_validation and\n schedule is not None and not isinstance(schedule, str)):\n raise ValueError(\"Parameter `schedule` must be a string\") # noqa: E501\n\n if (self.local_vars_configuration.client_side_validation and\n schedule is not None and len(schedule) < 1):\n raise ValueError(\"Invalid value for `schedule`, length must be greater than or equal to `1`\") # noqa: E501\n\n self._schedule = schedule",
"def register_schedule(self, term, schedule, allow_waitlisting=True, at=None):\n items = self.schedules(term, include_units=True)[schedule]\n self.register_courses(term, schedule, items, allow_waitlisting, at)",
"def add_schedule(self, schedule: Schedule, span: FiniteSpan) -> None:\n for schedule_item in schedule._schedule_items:\n for event in schedule_item.events(span):\n self.add_event(event)",
"def create( self ):\r\n for rsrc in self.ee.getRsrcs( ):\r\n self.schedule[rsrc.getid( )] = [ ]",
"def schedule(self) -> pulumi.Input['CanaryScheduleArgs']:\n return pulumi.get(self, \"schedule\")",
"def test_update_schedule(self):\n body = Schedule()\n response = self.client.open('/v1/schedule',\n method='PUT',\n data=json.dumps(body),\n content_type='application/json')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))"
]
| [
"0.7588622",
"0.72927296",
"0.6554353",
"0.6533283",
"0.64751756",
"0.6465415",
"0.64390296",
"0.63615066",
"0.6257769",
"0.61862046",
"0.61686426",
"0.6160965",
"0.6158384",
"0.61472726",
"0.613723",
"0.611298",
"0.6108676",
"0.604877",
"0.59989715",
"0.59989715",
"0.5993948",
"0.5988613",
"0.5983204",
"0.5954211",
"0.5921393",
"0.5918435",
"0.59131765",
"0.5912574",
"0.5877101",
"0.58740485"
]
| 0.7421662 | 1 |
Download the image of a exercice by index | def get_image(self, index):
# Get request to get all the links for all exercises
image = requests.get(API.url_image, headers = self.headers).json()
filename = download(image[index]['image']) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_img_from_url(index, url):\n try:\n with urllib.request.urlopen(url) as response:\n if response.headers.get_content_maintype() == 'image':\n image_filename = image_filename_prefix.format(name=image_class_name,\n counter=index,\n ext=response.headers.get_content_subtype())\n image_filepath = os.path.join(target_folder, image_filename)\n with open(image_filepath, 'wb') as image_file:\n image_file.write(response.read())\n\n print('Fetched URL {}'.format(index))\n\n except urllib.request.HTTPError:\n pass\n except Exception:\n pass",
"def image_path_from_index(self, index):\n for ext in self._image_ext:\n image_path = os.path.join(self._data_path, 'Images',\n index + ext)\n if os.path.exists(image_path):\n break\n assert os.path.exists(image_path), \\\n 'Path does not exist: {}'.format(image_path)\n\treturn image_path",
"def _image_path_from_index(self, index):\n # Example image path for index=119993:\n # images/train2014/COCO_train2014_000000119993.jpg\n file_name = (str(index).zfill(12) + '.jpg')\n image_path = os.path.join(self._root_dir, self._data_name, file_name)\n assert os.path.exists(image_path), 'Path does not exist: {}'.format(image_path)\n return image_path",
"def image_path_from_index(self, index):\n raise NotImplementedError",
"def _index_img(img_file, index):\n imgs = check_niimg(img_file, ensure_ndim=4, atleast_4d=True)\n return _index_img(imgs, index)",
"def image_path_from_index(self, index):\n image_path = os.path.join(self._data_path,'query',\n index)\n assert os.path.exists(image_path), \\\n 'Path does not exist: {}'.format(image_path)\n return image_path",
"def get_input(self, idx):\r\n img_filename = self.root / \"images\" / self._image_array[idx]\r\n x = Image.open(img_filename)\r\n return x",
"def __getitem__(self, index):\n dataset= self.dataset\n filename, label = dataset[index]\n \n path=os.path.join(self.image_dir, filename)\n if path not in self.img_cache:\n image = Image.open(path)\n image.load()\n self.img_cache[path]=image\n else:\n image=self.img_cache[path]\n \n \n encoded_lab=torch.zeros(len(self.domains), dtype=torch.float32)\n encoded_lab[label]=1\n #image=self.hsv_color_change(image,0.5)\n #im.save(self.image_dir+\"/testimg.jpg\")\n #image.save(self.image_dir+\"/testimg2.jpg\")\n return self.transform(image), encoded_lab",
"def __getitem__(self, idx):\n image = Image.open(self.filenames[idx]) # PIL image\n image = self.transform(image)\n return image",
"def image_path_from_index(self, index):\n assert self.image_set_index is not None, \"Dataset not initialized\"\n name = self.image_set_index[index]\n image_file = os.path.join(self.image_dir, 'images', name)\n assert os.path.isfile(image_file), 'Path does not exist: {}'.format(image_file)\n return image_file",
"def image_path_from_index(self, index):\n url = index.image.url\n path_lst = url.split('/')\n\n image_path = os.path.join(self._data_path, 'JPEGImages', path_lst[-2], path_lst[-1])\n assert os.path.exists(image_path), \\\n 'Path does not exist: {}'.format(image_path)\n return image_path",
"def get_image_url2(pixid, index, imgInfoPool):\n\n PageParams = dict(mode='manga_big', illust_id=pixid, page=index)\n try:\n r = requests.get(operate_url, params=PageParams, cookies=mycookies)\n except requests.exceptions.ConnectionError:\n print('I cannot open the page. Maybe the id is invalid?')\n return 1\n else:\n soup = bs4.BeautifulSoup(r.text.encode('utf-8'), 'html.parser')\n img = soup.find('img')\n imgName = soup.find('title').string\n imgInfoPool.append(dict(id=pixid + '_p' + str(index), name=imgName, url=img.attrs.get('src'), pageurl=r.url))\n return",
"def __getitem__(self, index):\n filename = self.content_dataset[index]\n image = Image.open(filename)\n content = self.content_transform(image)\n art_index = random.randint(0,self.art_num-1)\n filename,label = self.art_dataset[art_index]\n image = Image.open(filename)\n style = self.style_transform(image)\n return content,style,label",
"def image_path_from_index(self, index):\n image_path = os.path.join(self._data_path, 'JPEGImages',\n index + self._image_ext)\n assert os.path.exists(image_path), \\\n 'Path does not exist: {}'.format(image_path)\n return image_path",
"def image_path_from_index(self, index):\n image_path = os.path.join(self._data_path, index)\n assert os.path.exists(image_path), 'path does not exist: {}'.format(image_path)\n return image_path",
"def image_path_from_index(self, index):\n image_path = os.path.join(self._data_path, 'Images', index + self._image_ext)\n assert os.path.exists(image_path), 'Path does not exist: {}'.format(image_path)\n return image_path",
"def image_path_from_index(self, index):\n assert self.annotation_set is not None, \"Dataset not initialized\"\n name = self.annotation_set[index] # e.g. 'set00_V010_img00577.xml'\n set_name, video_name, xml_name = name.split('_')\n img_name = os.path.splitext(xml_name)[0] + self.extension\n img_path = os.path.join(self.data_path, set_name, video_name, img_name)\n assert os.path.exists(img_path), 'Path does not exist: {}'.format(img_path)\n\n return img_path",
"def test_z_download_images(self):\n #img_urls = logpuzzle.read_urls('place_code.google.com')\n img_urls = logpuzzle.read_urls('animal_code.google.com')\n dest_dir = './puzzle_images'\n logpuzzle.download_images(img_urls, dest_dir)\n\n result = os.listdir(dest_dir)\n expected_result = ['img0.jpg', 'img1.jpg', 'img10.jpg', 'img11.jpg', 'img12.jpg', 'img13.jpg', 'img14.jpg', 'img15.jpg', 'img16.jpg', 'img17.jpg', 'img18.jpg', 'img19.jpg', 'img2.jpg', 'img3.jpg', 'img4.jpg', 'img5.jpg', 'img6.jpg', 'img7.jpg', 'img8.jpg', 'img9.jpg']\n self.assertEqual(expected_result, result,\n 'write_index_file() expected {} but got {}'.format(expected_result, result))",
"def image_path_from_index(self, index):\n # set the prefix\n if self._image_set == 'test':\n prefix = 'testing/image_2'\n else:\n prefix = 'training/image_2'\n\n image_path = os.path.join(self._data_path, prefix,\n index + self._image_ext)\n assert os.path.exists(image_path), \\\n 'Path does not exist: {}'.format(image_path)\n return image_path",
"def load_image(self, index):\n image_path = os.path.join(self.folder_path, self.image_ids[index] + '.jpg')\n img = Image.open(image_path).convert('RGB')\n if debug:\n print(\"Loaded image: \", image_path)\n return img",
"def read(self, index):\n assert type(index) is int\n img = self.db.get_node('/images/img{:04d}'.format(index))\n return np.array(img)",
"def regular_download(self) -> NoReturn:\n\n if not path.isdir(self.name):\n mkdir(self.name)\n\n for chapter in self.chapters.keys():\n\n chapter_folder = f\"{self.name}/{chapter}/\"\n curr_chapter = self.chapters[chapter]\n base_url = f\"{curr_chapter['server']}{curr_chapter['hash']}/\"\n\n if not path.isdir(chapter_folder):\n mkdir(chapter_folder)\n\n for image in curr_chapter[\"images\"]:\n\n image_url = f\"{base_url}{image}\"\n image_file = f\"{chapter_folder}{image}\"\n response = requests.get(image_url, headers={\"Connection\":\"close\"})\n\n if response and response.status_code == 200:\n with open(image_file, \"wb\") as img_file:\n img_file.write(response.content)\n else:\n print(f\"Error downloading chapter: {curr_chapter['num']} Image: {image}\")",
"def url(self, index):\r\n return self.arraydata[index.row()][3]",
"def __getitem__(self, index) -> Tuple[Image.Image, str]:\n path = self.file_paths[\n index % self.__len__()\n ] # make sure index is within then range\n img = Image.open(path).convert(\"RGB\")\n # apply image transformation\n img = self.transform(img)\n\n return img, path",
"def index_subset(subset):\n images = []\n print('Indexing {}...'.format(subset))\n # Quick first pass to find total for tqdm bar\n subset_len = 0\n \n \n for root, folders, files in os.walk(DATA_PATH + '/Omniglot/images_{}/'.format(subset)):\n subset_len += len([f for f in files if f.endswith('.png')])\n\n progress_bar = tqdm(total=subset_len)\n for root, folders, files in os.walk(DATA_PATH + '/Omniglot/images_{}/'.format(subset)):\n if len(files) == 0:\n continue\n\n alphabet = root.split('/')[-2]\n class_name = '{}.{}'.format(alphabet, root.split('/')[-1])\n\n for f in files:\n progress_bar.update(1)\n images.append({\n 'subset': subset, \n 'class_name': class_name,\n 'filepath': os.path.join(root, f)\n })\n\n progress_bar.close()\n return images",
"def __getitem__(self, index):\n \n #random.seed()\n #random.shuffle(self.train_images)\n\n src = self.train_images[index]\n print(index)\n\n src_char = int(src.split('_')[0][len(self.image_dir+self.mode+'/'):])\n src_style = int(src.split('_')[1][:-len(\".jpg\")])\n #pdb.set_trace()\n try:\n trg = random.choice([x for x in self.train_images\n if '_'+str(src_style) in x and str(src_char)+'_' not in x])\n #print(1)\n except:\n trg = src\n #print(2)\n trg_style = int(trg.split('_')[1][:-len(\".jpg\")])\n trg_char = int(trg.split('_')[0][len(self.image_dir+self.mode+'/'):])\n src = self.transform(Image.open(src))\n trg = self.transform(Image.open(trg))\n\n return src, src_style, src_char, \\\n trg, trg_style, trg_char",
"def vis(group, indexlist, save=False):\n ##### Modify these: #####\n # imageloc = '/home/teja/Project_005/toronto/iaprtc12/images/'\n # trainloc = '/home/teja/Project_005/toronto/iaprtc12_2/iaprtc12_train_list.txt'\n # testloc = '/home/teja/Project_005/toronto/iaprtc12_2/iaprtc12_test_list.txt'\n #########################\n\n ##### Modify these: #####\n imageloc = '/home/teja/Programs/convnet/examples/imagenet/'\n trainloc = '/home/teja/Project_005/toronto/iaprtc12_2/iaprtc12_train_list.txt'\n testloc = '/home/teja/Programs/convnet/examples/imagenet/images_list.txt'\n #########################\n\n\n if group == 'train':\n listloc = trainloc\n else:\n listloc = testloc\n f = open(listloc, 'rb')\n ims = []\n for line in f:\n ims.append(line.strip() + '.jpg')\n f.close()\n for i in range(len(indexlist)):\n imloc = imageloc + ims[indexlist[i]]\n im = Image.open(imloc)\n # im.thumbnail((256,256), Image.ANTIALIAS)\n # im.show()\n if save:\n im.save('r' + str(indexlist[i]) + '.jpg')",
"def image_fetcher(year, month, day, name):\n entry = 'data/{year}/{month}/{day}/{name}'.format(year=year, month=month, day=day, type=type, name=name)\n img = open(entry)\n return send_file(img)",
"def image_path_from_index(self, index):\n image_path = os.path.join(self.cfg.file_path, 'JPEGImages',\n index + self.cfg.image_ext)\n assert os.path.exists(image_path), \\\n 'Path does not exist: {}'.format(image_path)\n return image_path",
"def download_engine(fcsd): #fcsd = first comic strip date\n\n url_list = get_comic_strip_url(fcsd)\n\n for url in url_list:\n session = requests.Session()\n response = session.get(url)\n download_url = get_image_comic_url(session, response)\n# download_dilbert(session, download_url)\n return download_url"
]
| [
"0.646242",
"0.62567097",
"0.62492424",
"0.62149733",
"0.618829",
"0.6157407",
"0.6142673",
"0.61364245",
"0.61317563",
"0.6123799",
"0.60892314",
"0.6060126",
"0.5978582",
"0.59611326",
"0.59461564",
"0.5933663",
"0.5931147",
"0.5914811",
"0.5893483",
"0.588527",
"0.5846178",
"0.5828078",
"0.58239686",
"0.5813109",
"0.5811904",
"0.58109915",
"0.5801152",
"0.57949865",
"0.5764982",
"0.57497585"
]
| 0.84663945 | 0 |
Gets the comment from a exercice by index | def get_comment(self, index):
# Get request to get all the comments for all exercises
comments = requests.get(API.url_comment, headers = self.headers).json()
# Parse the response
for my_comment in comments:
if my_comment['id'] == index:
print(my_comment['comment']) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getComment(self, ind):\r\n if ind >= 0 and ind < len(self.comments):\r\n return self.comments[ind]\r\n return None",
"def getComment(self, n = None):\n \n if n is None:\n return self._comments\n else:\n return self._comments[n]",
"def get_text_from_note (self,\r\n index):\r\n\r\n if self.using_database:\r\n aprint('GETTING TEXT DROM NOTE')\r\n value_tuple = (notebookname, str(index),)\r\n db_cursor.execute(\"SELECT note_body\"+\r\n \" FROM notes WHERE notebook=?\"+\r\n \" AND note_index=?;\",value_tuple)\r\n try:\r\n text = db_cursor.fetchone()[0].replace(\"''\",\"'\")\r\n except:\r\n text = ''\r\n\r\n return text\r\n\r\n if str(index) in self.note_dict:\r\n return self.note_dict[str(index)].text\r\n return ''",
"def _get_doc(results, index):\n return results[index]",
"def get(self, index):\n raise NotImplementedError() # pragma: no cover",
"def get_comment_index(self, name):\n idx = self.comments.index(name)\n if self._has_coords_comment:\n return idx - 1\n else:\n return idx",
"def test_issue_get_comment(self):\n pass",
"def __getitem__(self, index: Any) -> Any:\n return self.contents[index]",
"def get_row(self, index: int) -> [str]:\n return self.content[index]",
"def comment_for_run (ins, exp, runnum) :\n return dict_of_recs_for_run(ins, exp, runnum)['comment']",
"def get_note (self,\r\n index):\r\n\r\n\r\n\r\n\r\n if self.using_database:\r\n aprint('GETTING NOTE')\r\n value_tuple = (notebookname, str(index),)\r\n db_cursor.execute(\"SELECT note_body \"\r\n +\"FROM notes WHERE notebook=?\"\r\n +\" AND note_index=?;\",\r\n value_tuple)\r\n\r\n res_temp = db_cursor.fetchone()\r\n\r\n if res_temp:\r\n text = res_temp[0].replace(\"''\",\"'\")\r\n else:\r\n text = ''\r\n\r\n db_cursor.execute(\"SELECT user\"\r\n +\" FROM notes\"\r\n +\" WHERE notebook=?\"\r\n +\" AND note_index=?;\",\r\n value_tuple)\r\n res_temp = db_cursor.fetchone()\r\n if res_temp:\r\n\r\n user = res_temp[0]\r\n else:\r\n user = 'USER'\r\n\r\n db_cursor.execute(\"SELECT size\"\r\n +\" FROM notes\"\r\n +\" WHERE notebook=? \"\r\n +\"AND note_index=?;\",\r\n value_tuple)\r\n\r\n res_temp = db_cursor.fetchone()\r\n if res_temp:\r\n size = res_temp[0]\r\n else:\r\n size = 60\r\n db_cursor.execute(\"SELECT timestamp \"\r\n +\"FROM timestamps\"\r\n +\" WHERE notebook=?\"\r\n +\" AND note_index=?\"\r\n +\" ORDER BY timestamp\",\r\n value_tuple)\r\n dates = db_cursor.fetchall()\r\n date_list = [date[0] for date in dates]\r\n db_cursor.execute(\"SELECT keyword\"\r\n +\" FROM all_note_keys\"\r\n +\" WHERE notebook=?\"\r\n +\" AND note_index=?\",\r\n value_tuple)\r\n keyset = db_cursor.fetchall()\r\n keyset = {key[0] for key in keyset}\r\n\r\n metadata = {'user':user,\r\n 'date':date_list,\r\n 'size':size}\r\n return Note(keyset,text,metadata)\r\n\r\n if str(index) in self.note_dict:\r\n return self.note_dict[str(index)]\r\n\r\n return False",
"def __getitem__(self, index):\n def _getTextByIndex(blockIndex):\n return self._doc.findBlockByNumber(blockIndex).text()\n\n if isinstance(index, int):\n index = self._checkAndConvertIndex(index)\n return _getTextByIndex(index)\n elif isinstance(index, slice):\n start, stop, step = index.indices(self._doc.blockCount())\n return [_getTextByIndex(blockIndex) \\\n for blockIndex in range(start, stop, step)]",
"def _get_comment(self, cell: NotebookNode, resources: ResourcesDict) -> None:\n\n # retrieve or create the comment object from the database\n comment = self.gradebook.find_comment(\n cell.metadata['nbgrader']['grade_id'],\n self.notebook_id,\n self.assignment_id,\n self.student_id)\n\n # save it in the notebook\n cell.metadata.nbgrader['comment'] = comment.comment",
"def get(self, index):\n return self.board[index]",
"def test_issue_get_comments(self):\n pass",
"def __getitem__(self, index):\n if isinstance(index, (tuple, list)) and len(index) == 2:\n return self.cells[index[1]][index[0]]\n return self.cells[index]",
"def get(self, index):\n if self.head is None:\n raise Exception('Node vide')\n else:\n return self.leonardo_recurs(index, self.head)",
"def getRepeatableComment(self, address: ghidra.program.model.address.Address) -> unicode:\n ...",
"def _get_inline_comment(index, nodes):\n # And the comments are added as prefixes to those nodes.\n #\n # So to get an in-line comment, we actually need to search\n # beyond the current node.\n #\n #\n comma_exists = False\n\n for node in nodes[index + 1 :]:\n if isinstance(node, tree.Operator) and node.value == \",\":\n comma_exists = True\n comment = _get_comment_from_node(node)\n\n if comment and _is_comma_on_next_lines(node):\n return comment\n\n if not comma_exists:\n continue\n\n comment = _get_comment_from_node(node)\n\n if comment and comma_exists and _is_comment_on_same_line(node):\n return comment\n elif comment and (not comma_exists) and _is_comma_on_next_lines(node):\n return comment\n\n text = _get_requirement_text(node)\n\n if text:\n return \"\"\n\n return \"\"",
"def get_piece(self, index):\n return self.squares[index]",
"def get_at_index(self, index: int) -> object:\n return self.data[index]",
"def getCellComment(self, row, column):\n\n\t\t\t\tcell = self.getCell(row = row, column = column)\n\n\t\t\t\treturn cell.comment.content",
"def get_comments():\n conn = pymongo.Connection(\"localhost\",27017)\n db = conn[\"paperDB\"]\n infoDB = db.infoDB\n record = infoDB.find_one()\n return record['comment']",
"def get(self, index):\n if 0 <= index <= len(self.nums):\n return self.nums[index]\n return -1",
"def __getitem__(self,index):\n return self._data[index[0]][index[1]]",
"def comment(self) :\n\t\ttry :\n\t\t\treturn self._comment\n\t\texcept Exception as e:\n\t\t\traise e",
"def get_value_at_index(self, index, cc):\n tl = cc.dsget(self.title)\n return (tl[index], None)",
"def __getitem__(self, index):\n return self.data[index[0] - 1][index[1] - 1]",
"def get_comments(self):\n raise NotImplementedError",
"def get_comment_path(self, sha):\n if not hasattr(self, 'id'):\n raise BadReference('No matching issue on disk')\n return os.path.join(self.paths['comments'], sha)"
]
| [
"0.72585046",
"0.6156863",
"0.60285085",
"0.6012852",
"0.5878884",
"0.5870563",
"0.58260715",
"0.5798382",
"0.5696689",
"0.5656557",
"0.56371516",
"0.5632999",
"0.5582955",
"0.5553322",
"0.54806364",
"0.54631793",
"0.54591316",
"0.540845",
"0.54078734",
"0.5400077",
"0.5340767",
"0.5315754",
"0.53086185",
"0.5298459",
"0.52848864",
"0.5275811",
"0.52715373",
"0.5266109",
"0.52483666",
"0.52464813"
]
| 0.8401464 | 0 |
Checks if a delete was succesfull | def check_delete(self, url, info, index):
exists = requests.get(url, headers=self.headers)
requests.delete(url + str(index), headers = self.headers)
exists2 = requests.get(url, headers=self.headers)
if exists.ok == exists2.ok == True and exists.json()['results'] == exists2.json()['results']:
return False
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def delete_success(self, id_):\n rv = self.post((id_, self.delete_url), dict(post='yes'))\n assert not self.verify_object({self.id_field: id_})\n return rv",
"def delete(self) -> bool:\n return False",
"def do_delete(self, arg):\n \treturn False",
"def check_deletion():\n\n if newrev == zero:\n ERROR(\"[POLICY] Refusing to delete this ref\")\n sys.exit(1)",
"def test_post_deletion_success(self):\n url = reverse(\n 'post-detail',\n args=[\n self.topic1.url_name,\n self.post1.id\n ]\n )\n self.client.credentials(\n HTTP_AUTHORIZATION = 'Token ' + self.user1.auth_token.key\n )\n response = self.client.delete(url)\n\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n deleted_post = Post.objects.filter(\n id=self.post1.id,\n )\n self.assertFalse(deleted_post.exists())",
"def test_delete_success(self):\n\n url = reverse('notification',\n kwargs={'way_id': self.notification.way_id, 'notification_id': self.notification.id})\n response = self.client.delete(url)\n\n self.assertEqual(response.status_code, 200)",
"def delete(self, *args, **kwargs):\n return 0",
"def test_delete_success(self) -> None:\n\n channel = self.make_request(\n \"DELETE\",\n self.url,\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(200, channel.code, msg=channel.json_body)\n self.assertEqual({}, channel.json_body)\n\n channel = self.make_request(\n \"GET\",\n self.url,\n access_token=self.admin_user_tok,\n )\n\n # check that report was deleted\n self.assertEqual(404, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.NOT_FOUND, channel.json_body[\"errcode\"])",
"def http_delete_and_get_check(url):\n delete_resp_obj = RestClientApis.http_delete_and_check_success(url)\n json_body = delete_resp_obj.json_body\n response_object = delete_resp_obj.response_object\n if delete_resp_obj.success:\n # if delete was successful\n get_resp_obj = RestClientApis.http_get_and_check_success(url)\n response_object = get_resp_obj.response_object\n json_body = get_resp_obj.json_body\n if get_resp_obj.http_status == HTTPStatus.NOT_FOUND:\n # if resource was not found we are good\n success = True\n return_code = HTTPStatus.OK\n message = HTTPStatus.OK.phrase\n else:\n success = False\n return_code = HTTPStatus.INTERNAL_SERVER_ERROR\n message = HTTPStatus.INTERNAL_SERVER_ERROR.phrase\n else:\n success = False\n return_code = delete_resp_obj.http_status\n message = delete_resp_obj.message\n rest_return_obj = RestReturn(success=success, message=message, http_status=return_code,\n json_body=json_body, response_object=response_object)\n return rest_return_obj",
"def delete_fail(self, id_, message):\n rv = self.post((id_, self.delete_url), dict(post='yes'))\n assert in_response(rv, message)\n assert self.verify_object({self.id_field: id_})\n return rv",
"def is_new_statement_deleted_successfully(self):\n status = None\n number_of_rows_after_delete = self.get_inbound_statement_grid_number_of_rows()\n if int(number_of_rows_after_delete) < int(self.number_of_rows):\n status = True\n else:\n status = False\n return status",
"def verify_delete(url, header):\n test_result_flag = False\n\n getmsg = http.get(url, header)\n if getmsg.status_code == 404:\n test_result_flag = True\n else:\n print('GET after DELETE failed')\n print('URL')\n print(url)\n print('headers')\n print(header)\n print('Response Body')\n print(getmsg.text)\n print('GET Code {}'.format(getmsg.status_code))\n\n return test_result_flag",
"def delete():",
"def test_delete(self):\n thread = self.create_thread()\n ut = UserThread.objects.get(\n user=thread.recipients.first(), thread=thread)\n ut_id = ut.pk\n ut.delete()\n ut = UserThread.objects.with_deleted().get(pk=ut_id)\n self.assertEqual(ut.status, 'deleted')",
"def can_delete(self):\r\n return True",
"def test_delete__valid(self):\n testing_config.sign_in('[email protected]', 123567890)\n\n with register.app.test_request_context(self.request_path):\n actual_json = self.handler.do_delete(self.feature_id)\n self.assertEqual({'message': 'Done'}, actual_json)\n\n revised_feature = models.Feature.get_by_id(self.feature_id)\n self.assertTrue(revised_feature.deleted)",
"def test_delete(self):\n self.client.force_authenticate(user=self.admin)\n\n response = self.client.delete(\n reverse(\n 'retreat:waitqueuenotification-detail',\n kwargs={'pk': 1},\n ),\n )\n\n self.assertEqual(\n response.status_code,\n status.HTTP_405_METHOD_NOT_ALLOWED\n )",
"def is_document_deleted_successfully(self):\n current_number_of_rows = self.get_documents_grid_number_of_rows()\n if int(current_number_of_rows) < int(self.number_of_rows):\n return True\n else:\n return False",
"def do_deleting(self, request, obj, obj_display, obj_id):\n try:\n with transaction.atomic(savepoint=False):\n self.log_deletion(request, obj, obj_display)\n self.delete_model(request, obj)\n\n return self.response_delete(request, obj_display, obj_id)\n except ValidationError as ex:\n for message in ex.messages:\n self.message_user(request, message, messages.ERROR)\n return False",
"def can_fast_delete(self, *args, **kwargs):\n return False",
"def delete(self, id):\n\t\tstatus = self.M.delete(id)\n\n\t\t# if deleted documents > 0 we've deleted the bird\n\t\tif status['n'] > 0:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False",
"def test_delete(self):\n self.assertFalse(self.user1.ad_deleted)\n self.assertTrue(self.user1.active)\n url = '/api/users/{}/'.format(self.user1.ad_guid)\n data = {'Deleted': True}\n response = self.client.put(url, json.dumps(data), content_type='application/json')\n self.assertEqual(response.status_code, 202)\n user = DepartmentUser.objects.get(pk=self.user1.pk) # Refresh from db\n self.assertTrue(user.ad_deleted)\n self.assertFalse(user.active)\n self.assertTrue(user.in_sync)\n # Also delete a second object, to check for silly 'empty string' collisions.\n url = '/api/users/{}/'.format(self.user2.ad_guid)\n response = self.client.put(url, json.dumps(data), content_type='application/json')\n self.assertEqual(response.status_code, 202)",
"def test_delete_success(self, acme_id):\n\n api_url = self.get_acme_account_url(acme_id)\n\n # Setup the mocked response\n responses.add(responses.DELETE, api_url, status=204)\n\n acme = ACMEAccount(client=self.client)\n response = acme.delete(acme_id)\n\n self.assertEqual(True, response)",
"def _do_request_delete(self, __button):\n _return = False\n\n _model, _row = self.treeview.get_selection().get_selected()\n _node_id = _model.get_value(_row, 9)\n _level = _model.get_value(_row, 11)\n\n if not self._dtc_data_controller.request_do_delete(_node_id):\n self._on_select_revision(module_id=self._revision_id)\n else:\n _prompt = _(u\"A problem occurred while attempting to delete {0:s} \"\n u\"with ID {1:d}.\").format(_level.title(), _node_id)\n ramstk.RAMSTKMessageDialog(_prompt, self._dic_icons['error'], 'error')\n\n _return = True\n\n return _return",
"def test_delete_store_success(self):\n product = sample_product(supplier_id=self.user)\n url = detail_url(product.id)\n res = self.client.delete(url)\n products = Product.objects.all()\n\n self.assertEqual(res.status_code, status.HTTP_204_NO_CONTENT)\n self.assertEqual(len(products), 0)",
"def delete():\n # Must be logged in to perform any delete commands.\n auth_required()\n pass",
"def ok_to_delete_row(self, row):\n if self.is_new_row(row):\n return False, _('Unable to delete new row')\n elif row == 0:\n return False, _('Unable to delete sofa id row')\n elif self.new_is_dirty:\n return (False, _(\n 'Cannot delete a row while in the middle of making a new one'))\n else:\n return True, None",
"def test_validate_delete(client):\n response = client.delete('/user/1')\n assert response.status_code == 400\n assert response.json['message'] == INVALID_ACTION_MESSAGE",
"def delete():\n click.echo('delete was called.')",
"def deleted(self) -> bool:\n return pulumi.get(self, \"deleted\")"
]
| [
"0.7594761",
"0.72328854",
"0.7226481",
"0.71072614",
"0.70056593",
"0.6999749",
"0.69725275",
"0.69210666",
"0.6857827",
"0.68532443",
"0.6804796",
"0.67526543",
"0.6752633",
"0.6748591",
"0.67414945",
"0.6728462",
"0.6620742",
"0.659836",
"0.6586197",
"0.6577026",
"0.6569855",
"0.6567956",
"0.65675306",
"0.6562379",
"0.65406567",
"0.6537765",
"0.6530107",
"0.6524866",
"0.6524545",
"0.65093964"
]
| 0.7322521 | 1 |
Define the processing of each new directory added [in] dirpath The directory just added by the user Improve this ugly inteface... | def _processNewDirectory(self, dirpath):
self._parent.processDirectory(dirpath) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_directory(current_path, parent_dir, path_list):\n try:\n for item in scandir(current_path):\n path_list.append(join(current_path, item.name))\n except PermissionError:\n pass",
"def parse_dir(self, directory):\n for dir in os.listdir(directory):\n if dir in ['.git', '.github', '.vscode', 'docs']:\n continue\n next_dir = os.path.join(directory, dir)\n if os.path.isdir(next_dir):\n if dir.startswith('template_'):\n self.parse_template(next_dir)\n else:\n normpath = os.path.relpath(next_dir)\n normpath = os.path.normpath(normpath)\n path = normpath.split(os.sep)\n self.add_folder(path)\n # add_directory(next_dir)\n self.parse_dir(next_dir)",
"def _add_path(dir_name, payload_info_list):\n path_count_dict = {}\n for payload_info_dict in payload_info_list:\n file_name = payload_info_dict[\"filename\"] or payload_info_dict[\"pid\"]\n path = d1_common.utils.filesystem.gen_safe_path(dir_name, \"data\", file_name)\n path_count_dict.setdefault(path, 0)\n path_count_dict[path] += 1\n if path_count_dict[path] > 1:\n path_base, path_ext = os.path.splitext(path)\n path = \"{}({}){}\".format(path_base, path_count_dict[path], path_ext)\n payload_info_dict[\"path\"] = path",
"def add_dir(self, path):\n assert self._root_dir == path or self._root_dir.is_parent_of(path)\n self._dirs.append(path)",
"def process_directory(dir, exiftool_path):\n for path_object in pathlib.Path(dir).glob(\"**/*\"):\n if path_object.is_file():\n verbose(f\"Processing file {path_object}\")\n process_file(path_object, exiftool_path)\n elif path_object.is_dir():\n verbose(f\"Processing directory {path_object}\")\n process_directory(path_object, exiftool_path)",
"def newdir(self, path, watch=True):\n assert False\n if watch:\n wm.add_watch(path, mask, rec=True)\n # the following may result in double-processed files but prevents a different race condition\n for d,sds,fns in os.walk(path):\n for fn in fns:\n self.newfile(os.path.join(d,fn))",
"def list_dir(self, path):",
"def _listFiles(files, path):\n\n for item in os.listdir(path):\n item = os.path.join(path, item)\n if os.path.isdir(item):\n _listFiles(files, item)\n else:\n files.append(item)",
"def organizeDir(self):\n # Classify every file in dir\n for file in os.listdir(self.path):\n curPath = self.path + file\n self.moveFile(curPath)",
"def _walk_dir(self, rootpath):\n assert os.path.isabs(rootpath)\n assert rootpath not in self._dirs\n relpath = self._get_rel_path(rootpath)\n self._dirs[relpath] = Directory(rootpath, relpath, None)\n for dirpath, dirnames, filenames in os.walk(rootpath):\n if 'refdata' in dirnames:\n dirnames.remove('refdata')\n currentdir = self._dirs[self._get_rel_path(dirpath)]\n # Loop through a copy so that we can modify dirnames.\n for dirname in list(dirnames):\n fullpath = os.path.join(dirpath, dirname)\n if fullpath == self._build_root:\n dirnames.remove(dirname)\n continue\n relpath = self._get_rel_path(fullpath)\n self._dirs[relpath] = Directory(fullpath, relpath, currentdir)\n extensions = ('.h', '.cuh', '.hpp', '.c', '.cc', '.cpp', '.cu', '.bm')\n for filename in filenames:\n basename, extension = os.path.splitext(filename)\n if extension in extensions:\n fullpath = os.path.join(dirpath, filename)\n relpath = self._get_rel_path(fullpath)\n self._files[relpath] = File(fullpath, relpath, currentdir)\n elif extension == '.cmakein':\n extension = os.path.splitext(basename)[1]\n if extension in extensions:\n fullpath = os.path.join(dirpath, filename)\n relpath = self._get_rel_path(fullpath)\n sourcefile = GeneratorSourceFile(fullpath, relpath, currentdir)\n self._files[relpath] = sourcefile\n fullpath = os.path.join(dirpath, basename)\n relpath = self._get_rel_path(fullpath)\n fullpath = os.path.join(self._build_root, relpath)\n generatedfile = GeneratedFile(fullpath, relpath, currentdir)\n self._files[relpath] = generatedfile\n generatedfile.set_generator_source(sourcefile)\n elif extension in ('.l', '.y', '.pre'):\n fullpath = os.path.join(dirpath, filename)\n relpath = self._get_rel_path(fullpath)\n self._files[relpath] = GeneratorSourceFile(fullpath, relpath, currentdir)",
"def handle_directory_pre(self, dir):\n if self.ppath_prefix_len:\n prefix = self.ppath_prefix[self.ppath_prefix_idx%self.ppath_prefix_len]\n dir = os.path.join(prefix, dir)\n try:\n dir_lstats = os.lstat(dir)\n except WindowsError as e:\n if e.winerror == 3 and len(dir) > hydra.Utils.MAX_WINDOWS_FILEPATH_LENGTH:\n self.log.error('Unable to stat dir due to path length > %d characters. Try setting HKLM\\System\\CurrentControlSet\\Control\\FileSystem\\LongPathsEnabled to 1'%hydra.Utils.MAX_WINDOWS_FILEPATH_LENGTH)\n else:\n if hydra.is_invalid_windows_filename(dir):\n self.log.error('Directory contains invalid characters or invalid names for Windows: %s'%dir)\n else:\n self.log.exception(e)\n self.stats['error_stat_dirs'] += 1\n return True\n except Exception as e:\n self.log.exception(e)\n self.stats['error_stat_dirs'] += 1\n if stat.S_ISLNK(dir_lstats.st_mode):\n # We do not want to process a symlink so account for it here as a symlink\n self.stats['symlink_dirs'] += 1\n return True\n return False",
"def addchilddirectory(directorio):\n\tparaañadir = []\n\tficheros = os.listdir(directorio)\n\tfor a in ficheros:\n\t\titem = os.path.join(directorio, a)\n\t\tif os.path.isdir(item):\n\t\t\tparaañadir.append(item)\n\treturn paraañadir",
"def _dodir ( self, dirpath, mkdir_p ):\n return",
"def __post_init__(self) -> None:\n if self.is_directory and not self.path.endswith('/'):\n self.path += '/'",
"def _rnlst(self, path, filelist):\n path = self._cleanpath(path)\n dirdict = self.parsedir(path)\n print(dirdict)\n \n trycwds = dirdict.get('trycwds', [])\n names = dirdict.get('names', [])\n \n for trycwd, name in zip(trycwds, names): \n if trycwd: # name is a directory\n self._rnlst(self.remotepathsep.join([path, name]), filelist)\n else: \n filelist.append(self.remotepathsep.join([path, name]))\n \n return filelist",
"def add_valid_to_path( pathList ):\r\n import sys , os\r\n for path in pathList:\r\n if os.path.isdir( path ):\r\n sys.path.append( path )\r\n print \"Loaded\" , path",
"def add_valid_to_path( pathList ):\r\n import sys , os\r\n for path in pathList:\r\n if os.path.isdir( path ):\r\n sys.path.append( path )\r\n print \"Loaded\" , path",
"def add_from_proc(self, proc_dir):\n for dir_name in sorted(os.listdir(proc_dir)):\n if re.match(r'^[0-9]+$', dir_name):\n self.add_from_piddir(os.path.join(proc_dir, dir_name))",
"def enumerate_dirname(path):\n for (dirpath, dirname_list, _) in os.walk(path):\n for dirname in tqdm(dirname_list):\n if dirname[0] != '.':\n old_dir = dirpath + dirname\n new_dir = dirpath + '%.5d' % dirname_list.index(dirname)\n if os.path.exists(new_dir):\n print ('Directory %s already exist' % new_dir)\n continue\n else:\n try:\n os.rename(old_dir, new_dir)\n except:\n print('Couldn\\'t rename %s' % old_dir)\n break",
"def process_IN_ISDIR(self, event):",
"def iter_dir(tree, path):\n for f in os.listdir(path):\n if os.path.isfile(path + '/' + f + '/__init__.py'):\n tree[f] = None\n elif os.path.isdir(path + '/' + f):\n tree[f] = {}\n SnakeWM.iter_dir(tree[f], path + '/' + f)",
"def parse_and_add_directory(self, list_of_root_tags, directory):\n\n # Check if directory exists and list_of_root_tags isn't empty\n\n if len(list_of_root_tags) == 0:\n raise Exception(\n \"{} : List of root tags empty in parse_and_add_directory!\".format(\n self.__schema_name\n )\n )\n\n if not os.path.isdir(directory):\n raise Exception(\n \"{} : Directory {} does not exist in parse_and_add_directory!\".format(\n self.__schema_name, directory\n )\n )\n\n for subdir, dirs, files in os.walk(directory):\n for file in files:\n if file.upper().endswith(\".XML\"):\n try:\n new_path = os.path.join(subdir, file)\n parsed = self.__get_parsed_relaxng(new_path)\n root_tag = parsed.getroot().tag\n if root_tag in list_of_root_tags:\n self.add_test(\n f\"Path Added: {file}\", new_path, None, parsed_xml=parsed\n )\n except:\n pass",
"def parse_dir(args, dirname, names):\n for name in names:\n path = os.path.join(dirname, name)\n\n if os.path.isfile(path):\n parse_file_from_directory(path, args)",
"def process_dir(self, src_dir, dst_dir):\n self.logger.tree(src_dir)\n for srcpath in self.list_all_files(src_dir):\n dstpath = srcpath.replace(src_dir, dst_dir)\n # TODO: Can we clean up the way we handle relative_path?\n # Relative path is here so that when we print files in the log it\n # shows only the file's path. Should we just pass it to the logger\n # when we create it? Or let the logger figure it out?\n # relative_path = srcpath.replace(src_dir + '/', '')\n self.cur_file = File(srcpath, dstpath, self.logger)\n self.process_file(self.cur_file)",
"def parse_dir(self, dir_path=\"NULL\"):\n \n spec_list = []\n dir_path = os.path.abspath(dir_path)\n # if user's path is not having a \"/\" \n if dir_path[-1] != \"/\":\n dir_path = dir_path + \"/\"\n # invoke parse file for every file in the dir_path directory \n files = commands.getoutput(\"ls %s\" % dir_path).split()\n for f in files:\n spec = self.parse_file(dir_path + f)\n spec_list.append(spec)\n return sortbyfilenames(spec_list, files)",
"def subdir(self):",
"def directoryModifiedHandler(ob, event):\n query = dict(object_provides=IEntry.__identifier__)\n for l in ob.restrictedTraverse('@@folderListing')(**query):\n l.getObject().reindexObject(idxs=[\"pdir_keywords\"])",
"def count_files_loop(self, dirpath):\n for i in os.listdir(dirpath):\n if i[0] == '.':\n continue\n elif os.path.isdir(dirpath + i):\n self.count_files_loop(dirpath + i + '/')\n elif os.path.isfile(dirpath + i):\n self.file_count += 1\n else:\n print dirpath + i, 'does not exist'\n return",
"def addAdditionalDir(self, *args):\n return _libsbml.SBMLFileResolver_addAdditionalDir(self, *args)",
"def init(cls, dirpath=\".\", newdir=True):\n dirpath = dirpath if dirpath[-1] == os.sep else dirpath + os.sep \n if not os.path.exists(dirpath):\n os.makedirs(dirpath)\n if newdir:\n now = datetime.datetime.now()\n dirpath = dirpath + now.strftime(\"%y-%m-%d_%H-%M-%S\") + os.sep\n os.mkdir(dirpath)\n cls.dirpath = dirpath\n cls.__data.clear()\n cls.__counters.clear()"
]
| [
"0.70050806",
"0.6855994",
"0.6640226",
"0.6566045",
"0.65539616",
"0.6538955",
"0.6464651",
"0.6424907",
"0.64209896",
"0.6417724",
"0.6414585",
"0.64013577",
"0.634992",
"0.63381624",
"0.63350016",
"0.6300745",
"0.6300745",
"0.62926817",
"0.6290385",
"0.62734854",
"0.6255329",
"0.6227333",
"0.62273043",
"0.6193679",
"0.6177191",
"0.61600643",
"0.6156063",
"0.6135555",
"0.6125396",
"0.6113722"
]
| 0.7614743 | 0 |
Parse the given line in order to return an array of n_classes probabilities | def _parse_preds_line(self, preds_line):
# The output for AWS XGBoost for multiclass is [proba_c1, probac2, probac3,...] for each sample, neither csv, nor python...
# return list(map(float, preds_line[1:-2].split(',')))
return eval(preds_line) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load_predictions(filepath):\n\twith open(filepath, 'r') as fh:\n\t\tcells = [line.split(',') for line in fh.read().splitlines()]\n\tdata = empty((TESTSIZE, NCLASSES), dtype = float64)\n\tfor k, row in enumerate(cells[1:]):\n\t\tdata[k, :] = row[1:]\n\treturn data",
"def classesAndFrames(self):\n classes = defaultdict(int)\n with open(self.inputfile) as fin:\n for line in fin:\n arr = line.strip().split()\n y = int(arr[1])\n classes[y] += 1\n return classes",
"def read_cv_predictions(output):\n with open(os.path.join(output, \"classifier-fold-predictions.txt\"), 'r') as in_file:\n lines = in_file.readlines()\n\n lines = ''.join(lines)\n # there is always a new line at the end of the file. strip it\n fold_predictions = lines.split('\\n\\n')[:-1]\n\n # parse the lines of the file\n names, preds, all_labels, all_prob0s, all_prob1s = [], [], [], [], []\n for fold_prediction in fold_predictions:\n fold_prediction = fold_prediction.split('\\n')\n\n rec_names, predictions, labels, prob0s, prob1s = [], [], [], [], []\n for line in fold_prediction[2:]:\n [name, prediction, label, prob0, prob1] = re.findall('[\\w_./]+', line)\n\n rec_names.append(name)\n predictions.append(int(prediction))\n labels.append(int(label))\n prob0s.append(float(prob0))\n prob1s.append(float(prob1))\n\n names.append(rec_names)\n preds.append(predictions)\n all_labels.append(labels)\n all_prob0s.append(prob0s)\n all_prob1s.append(prob1s)\n\n return np.asarray(names), np.asarray(preds), np.asarray(all_labels), np.asarray(all_prob0s), np.asarray(all_prob1s)",
"def predict(self):\n probabilities = self.probability_array()\n # THIS ASSUMES the classifiers are in order: 0th column of the\n # probabilities corresponds to label = 0, ..., 9th col is for 9.\n classes = np.argmax(probabilities, axis=1)\n return classes",
"def predict_single(self, line):\n # print(line)\n prob_list = {}\n for claz in self.class_list:\n prob_list[claz] = 1\n\n # for each cat column\n for col in self.cat_cols:\n val = line[col]\n for claz in self.class_list:\n prob_list[claz] *= self.prob_hub[col][claz][val]\n\n # for each num column\n for col in self.num_cols:\n val = line[col]\n # for each class\n for claz in self.class_list:\n mean, std = self.prob_hub[col][claz]\n prob_list[claz] *= calculate_prob(val, mean, std)\n\n return max(prob_list.items(), key=operator.itemgetter(1))[0]",
"def _get_predictions(self, line: List[str]):\n example = self.tokenizer.encode_plus(\n [c for c in line],\n return_token_type_ids=True,\n return_attention_mask=True,\n return_tensors=\"pt\",\n )\n example = {k: v.to(self.device) for k, v in example.items()}\n prediction = self.model(example, training=False)[0]\n prediction = self.softmax_fn(prediction).cpu().data.numpy()\n prediction = prediction[1:-1, 1:].argmax(axis=-1)\n return prediction",
"def load_labels(path):\n with open(path, 'r', encoding='utf-8') as f:\n lines = f.readlines()\n labels = []\n for row_number, content in enumerate(lines):\n pair = re.split(r'[:\\s]+', content.strip(), maxsplit=1)\n #if len(pair) == 2 and pair[0].strip().isdigit():\n labels.append(np.array([int(pair[0].strip()),pair[1].strip()]))\n #else:\n # labels.append(pair[0].strip())\n return np.array(labels)",
"def train_model(filename):\n counts = get_file_counts(filename)\n new_file = open(filename, \"r\")\n num_lines = 0\n for line in new_file:\n num_lines += 1 \n #number of lines in file\n return counts_to_probs(counts, num_lines)",
"def read_classes(file, class_list):\n\n if 'PSB' not in file.readline().strip():\n raise ('Not a valid PSB classification header', ImportError)\n\n _, num_models = file.readline().strip().split()\n modelcount = 0\n class_dict = {}\n\n while modelcount < int(num_models):\n line = file.readline().strip().split()\n if len(line) == 0:\n pass \n elif len(line) > 2 and line[2] == '0': # empty class label\n pass\n elif len(line) > 2:\n class_name = str(line[0])\n # if the class not in the class_list add it\n if class_name not in class_list:\n class_list.append(class_name)\n else: # add the class to the number of the model\n class_id = class_list.index(class_name) # give class id based on class_list index\n class_dict[line[0]] = (class_id, class_name)\n modelcount += 1\n\n return class_dict, class_list",
"def parse_file_into_array(filename, separator):\n arr = []\n with open(filename) as file:\n for row in file.read().splitlines():\n try:\n row_arr = [float(cell) for cell in row.split(separator)]\n if 'winequality' in filename:\n row_arr[-1] = 1 if row_arr[-1] > 5 else 0 # convert to binary classification\n elif 'breast-cancer' in filename:\n row_arr[-1] = 1 if row_arr[-1] == 4 else 0 # convert to binary classification\n except ValueError:\n continue\n arr.append(row_arr)\n return arr",
"def mapper(line): \n feats = line.strip().split(\",\") \n # labels must be at the beginning for LRSGD\n label = feats[len(feats) - 1] \n feats = feats[: len(feats) - 1]\n feats.insert(0,label)\n features = [ float(feature) for feature in feats ] # need floats\n return np.array(features)",
"def parse_labels(file: str) -> ndarray:\n rows = []\n with open(file, 'r', encoding='utf-8') as f:\n for row in f:\n rows.append(row.strip())\n return array(rows)",
"def parse_feature_importances(filepath):\r\n lines = open(filepath, 'U').readlines()\r\n feature_IDs = []\r\n scores = []\r\n for line in lines[1:]:\r\n words = line.strip().split('\\t')\r\n feature_IDs.append(words[0].strip())\r\n scores.append(float(words[1].strip()))\r\n return array(feature_IDs), array(scores)",
"def parse_nbest_line(line):\n fields=[ x.strip() for x in line.strip().split('|||') ]\n fields[0]=int(fields[0])\n fields[3]=float(fields[3])\n return fields",
"def predict_sample_generator(self, fi):\n for line in fi:\n sequence = np.array(line.split(\" \"), dtype=np.intp)\n yield sequence, sequence.shape[0], np.unique(sequence).shape[0]",
"def classify(self, filename):\n tokens = preprocess.parse_book(filename)\n perplexities = [m.perplexity(tokens) for m in self.models]\n print('-> perplexities = {}'.format([round(p) for p in perplexities]))\n over_baseline = [p - b for p, b in zip(perplexities, self.baselines)]\n print('-> over_baseline = {}'.format([round(o) for o in over_baseline]))\n min_index = over_baseline.index(min(over_baseline))\n return self.genres[min_index]",
"def load_classes(path):\n fp = open(path, \"r\")\n names = fp.read().split(\"\\n\")[:-1]\n # -1까지 하는 이유 마지막에 공백이 있다.\n print(\"Load Class Nums : \",len(names))\n return names",
"def populate_list(self, input_filename):\r\n f = open(input_filename, 'r')\r\n\r\n for line in f:\r\n # Process the input line\r\n line_split = line.strip().split('(')\r\n line_split[-1] = line_split[-1][:-1] # Removes the extra bracket at the end\r\n\r\n class_name = line_split[0]\r\n parameters = line_split[1].split(',')\r\n self.metrics.append(self.instantiate_class(class_name, *parameters))\r\n\r\n f.close()",
"def data_parser(data):\n\n with open(data, 'r') as inp:\n\n # take every sample\n # the last line in the text file is empty, so reading until -1\n samples = inp.read().split('\\n')[:-1]\n\n vec = []\n labels = []\n for sample in samples:\n # file is tab delimited\n split_samples = sample.split('\\t')\n # last column contains the label\n labels.append(int(split_samples[-1]))\n\n features = []\n for feature in split_samples[:-1]:\n features.append(float(feature))\n vec.append(features)\n\n # make the features and labels as a numpy array\n vec = np.array(vec)\n labels = np.array(labels)\n return vec, labels",
"def get_labels(train_f_path):\n results = []\n with open(train_f_path, 'r') as f:\n for line in f:\n n_line = line.strip()\n if n_line:\n results.append(n_line.split()[0])\n return results",
"def parseClasses(file_name):\n\tlines = file(file_name).read().strip().split('\\n')\n\tlines = [x.strip() for x in lines if len(x.strip()) > 0]\n\tclasses = []\n\tfor l in lines:\n\t\tclasses = classes + [clean(x) for x in l.split(',')]\n\treturn classes",
"def prediction_generator(self, line: str):\n prediction = self._get_predictions(line[: self.model_max_length])\n if len(line) > self.model_max_length:\n prediction_left = self._get_predictions(line[self.model_max_length :])\n prediction = np.concatenate([prediction, prediction_left])\n bies_pred = [self.bies_dict[p + 1] for p in prediction]\n words_pred = [\n c if bies_pred[i] in (\"B\", \"I\") else c + \" \" for i, c in enumerate(line)\n ]\n return words_pred",
"def _parse_impute2_line(self, line):\n # Splitting\n row = line.rstrip(\"\\r\\n\").split(\" \")\n\n # Constructing the probabilities\n prob = np.array(row[5:], dtype=float)\n prob.shape = (prob.shape[0] // 3, 3)\n\n # Constructing the dosage\n dosage = 2 * prob[:, 2] + prob[:, 1]\n if self.prob_t > 0:\n dosage[~np.any(prob >= self.prob_t, axis=1)] = np.nan\n\n return Genotypes(\n Variant(row[1], CHROM_STR_ENCODE.get(row[0], row[0]), int(row[2]),\n [row[3], row[4]]),\n dosage,\n reference=row[3],\n coded=row[4],\n multiallelic=False,\n )",
"def read_input_pizza(filename):\n lines = open(filename).readlines()\n M, N = [int(val) for val in lines[0].split()]\n available = np.array([int(n) for n in lines[1].split()])\n return M, N, available",
"def extract_data(filename):\n with open(filename, 'rb') as f:\n reader=f.readlines()\n train_data_label = [[int(x) for x in line.split() if x.isdigit()] for line in reader] \n # sorted by label\n train_data_label = sorted(train_data_label, key=lambda x: x[-1])\n train_data_label = np.array(train_data_label) \n return train_data_label",
"def classification(fileName, node):\n wrongPrediction = [0, 0, 0, 0]\n exactPrediction = [0, 0, 0, 0]\n correct_classes = 0\n incorrect_classes = 0\n total_classes = 0\n numberofClasses = 4\n meanofClassAccuracy = 0\n attributeValues, classes, _ = readData(fileName)\n attributeValues = np.array(attributeValues)\n for i in range(attributeValues.shape[0]):\n predicted_value = classify(attributeValues[i], node)\n if int(classes[i]) != int(predicted_value):\n wrongPrediction[classes[i] - 1] += 1\n incorrect_classes += 1\n else:\n exactPrediction[classes[i] - 1] += 1\n correct_classes += 1\n total_classes = correct_classes + incorrect_classes\n accuracy_value= correct_classes / total_classes\n class_sum = numberofClasses * total_classes\n \n for count in range(numberofClasses):\n incorrect_classes = wrongPrediction[count]\n correct_classes = exactPrediction[count]\n class_sum = numberofClasses * (correct_classes + incorrect_classes)\n \n meanofClassAccuracy += correct_classes / class_sum\n \n \n return accuracy_value, meanofClassAccuracy",
"def predict_proba(self, samples):\n predictions, class_ids = self.__predict(samples)\n\n # analyze\n probabilities = []\n\n for class_id, p in predictions.iteritems():\n probabilities.append(len(p[p > 0]) / float(len(samples)))\n class_ids.append(class_id)\n return np.array(probabilities), np.array(class_ids)",
"def __preprocess_line(line):\n return [int(element) for element in line.lstrip().rstrip().split()] # preprocess the input line",
"def read_probabilities(filename, sep=' '):\n df = pd.read_csv(filename, sep=sep, header = None)\n print('Read probabilities')\n return df.set_index([0, 1])",
"def process_line(line):\n [label, text] = line.split('\\t')\n return text.split()"
]
| [
"0.5941844",
"0.59043354",
"0.5703307",
"0.5698324",
"0.5682355",
"0.56445974",
"0.5567654",
"0.5566163",
"0.5558649",
"0.5481291",
"0.5458445",
"0.54475963",
"0.54351074",
"0.541697",
"0.54056454",
"0.5386606",
"0.5369917",
"0.5330952",
"0.5315417",
"0.5310961",
"0.5310177",
"0.5280843",
"0.5276948",
"0.5275094",
"0.5271181",
"0.5243016",
"0.5226707",
"0.521943",
"0.5215207",
"0.5214311"
]
| 0.64363974 | 0 |
Generate the local and S3 libsvm files used for the training | def _prepare_libsvm_data(self):
logging.info('Preparing libsvm training data...')
if self.clean or not (self.is_s3_file(self.s3_training_file) and self.is_s3_file(self.s3_validation_file)):
logging.info('S3 libsvm files do not exist.')
if self.clean or not (os.path.isfile(self.local_libsvm_training_file) and os.path.isfile(self.local_libsvm_validation_file)):
logging.info('Local libsvm files do not exist.')
logging.info('Generating local libsvm files...')
dump_svmlight_file(X=self.training_x, y=self.training_y, f=self.local_libsvm_training_file)
dump_svmlight_file(X=self.validation_x, y=self.validation_y, f=self.local_libsvm_validation_file)
logging.info('Local libsvm files found. Uploading to S3...')
upload_file_to_s3(self.local_libsvm_training_file, self.infra_s3['s3_bucket'], self.s3_training_file)
upload_file_to_s3(self.local_libsvm_validation_file, self.infra_s3['s3_bucket'], self.s3_validation_file)
else:
logging.info('S3 libsvm files already exist. Skipping step.') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _init_s3_train_files(self):\n # XGBoost requires libsvm training and validation files when invoking fit()\n self._prepare_libsvm_data()\n\n if self.n_classes <= 2:\n self.hyperparameters['eval_metric'] = 'auc'\n self.hyperparameters['objective'] = 'binary:logistic'\n else:\n self.hyperparameters['objective'] = 'multi:softprob'\n self.hyperparameters['num_class'] = self.n_classes\n\n s3_input_training = sagemaker.s3_input(s3_data=self.s3_training_libsvm_path, content_type='libsvm')\n s3_input_validation = sagemaker.s3_input(s3_data=self.s3_validation_libsvm_path, content_type='libsvm')\n return s3_input_training, s3_input_validation",
"def svm_train(c, g, v, dname):\n if sys.platform != 'win32':\n # grid_py = '../tools/grid.py'\n # gnuplot_exe = '/usr/bin/gnuplot'\n svmtrain_exe = './PromoterSVM/libsvm/windows/my-svm-train'\n else:\n # grid_py = r\"D:\\LiYuan\\Data_Science\\libsvm-3.21\\windows\\grid.py\"\n # gnuplot_exe = r\"D:\\Program Files\\gnuplot\\bin\\gnuplot.exe\"\n svmtrain_exe = r'D:\\LiYuan\\\"Bioinformatic Research\"\\IMU\\zuo\\Tasks\\\"20160206-Promoter SVM\"\\biopromoter_script\\PromoterSVM\\libsvm\\windows\\my-svm-train.exe'\n\n df_model = dname + \".model\"\n cmd = '{0} -c {1} -g {2} -v {3} \"{4}\" \"{5}\"'.format(svmtrain_exe, c, g, v, dname, df_model)\n Popen(cmd, shell=True, stdout=PIPE).communicate()",
"def download_libsvm(dataset, destination, replace=False, verbose=False):\n url = (\"https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/\" +\n NAMES[dataset])\n path = download(url, destination, replace=replace, verbose=verbose)\n return path",
"def main(unused_argv):\n make_dir(FLAGS.raw_dir)\n\n # Get paths of download/extracted training and evaluation files.\n print(\"Downloading data from source\")\n train_files = get_raw_files(FLAGS.raw_dir, constants.TRAIN_DATA_SOURCES)\n eval_files = get_raw_files(FLAGS.raw_dir, constants.EVAL_DATA_SOURCES)",
"def load_embeddings_models():\n\n\t# ---LOADING WORD2VEC MODEL---\n\tmodel_load_path = os.path.join(ROOT_PATH, 'models', 'word2vec', 'NILC', 'nilc_cbow_s300_300k.txt')\n\t# model_load_path = os.path.join('models', 'word2vec', 'NILC', 'nilc_skip_s300.txt')\n\tstart_time = time.time()\n\tprint(\"Started loading the word2vec model\")\n\tword2vec_model = KeyedVectors.load_word2vec_format(model_load_path)\n\t# word2vec_model = None\n\tprint(\"Model loaded\")\n\tprint(\"--- %s seconds ---\" %(time.time() - start_time))\n\tprint('\\a')\n\n\t# ---LOADING FASTTEXT MODEL---\n\tmodel_path = os.path.join(ROOT_PATH, 'models', 'fastText', 'cc.pt.300_300k.vec')\n\tstart_time = time.time()\n\tprint(\"Started loading the fasttext model\")\n\tfasttext_model = KeyedVectors.load_word2vec_format(model_path)\n\t# fasttext_model = None\n\tprint(\"Model loaded\")\n\tprint(\"--- %s seconds ---\" %(time.time() - start_time))\n\tprint('\\a')\t\n\n\t# ---LOADING PT-LKB MODEL---\n\tmodel_load_path = os.path.join(ROOT_PATH, 'models', 'ontoPT', 'PT-LKB_embeddings_64', 'ptlkb_64_30_200_p_str.emb')\n\t# model_load_path = os.path.join('models', 'ontoPT', 'PT-LKB_embeddings_128', 'ptlkb_128_80_10_p_str.emb')\n\tstart_time = time.time()\n\tprint(\"Started loading the PT-LKB-64 model\")\n\tptlkb64_model = KeyedVectors.load_word2vec_format(model_load_path)\n\t# ptlkb64_model = None\n\tprint(\"Model loaded\")\n\tprint(\"--- %s seconds ---\" %(time.time() - start_time))\n\tprint('\\a')\n\n\t# ---LOADING GLOVE-300 MODEL---\n\tmodel_load_path = os.path.join(ROOT_PATH, 'models', 'glove', 'glove_s300_300k.txt')\n\t# model_load_path = os.path.join('models', 'glove', 'glove_s100.txt')\n\tstart_time = time.time()\n\tprint(\"Started loading the GLOVE 300 dimensions model\")\n\tglove300_model = KeyedVectors.load_word2vec_format(model_load_path)\n\t# glove300_model = None\n\tprint(\"Model loaded\")\n\tprint(\"--- %s seconds ---\" %(time.time() - start_time))\n\tprint('\\a')\n\n\t# ---LOADING NUMBERBATCH MODEL---\n\tmodel_load_path = os.path.join(ROOT_PATH, 'models', 'numberbatch', 'numberbatch-17.02_pt_tratado.txt')\n\tstart_time = time.time()\n\tprint(\"Started loading the NUMBERBATCH dimensions model\")\n\tnumberbatch_model = KeyedVectors.load_word2vec_format(model_load_path)\n\t# numberbatch_model = None\n\tprint(\"Model loaded\")\n\tprint(\"--- %s seconds ---\" %(time.time() - start_time))\n\tprint('\\a')\n\n\treturn word2vec_model, fasttext_model, ptlkb64_model, glove300_model, numberbatch_model",
"def run(self) -> None:\n ts = time.time()\n startTime = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')\n\n svm_dataset = \"NLP/SVM/IHE/SVM_dataset_ihe.csv\"\n \n tags = ['IHE {}'.format(i) for i in range(1, 10)] # IHE tags.\n\n # SDG results files.\n model = \"NLP/SVM/IHE/model.pkl\"\n\n self.load_dataset(svm_dataset)\n self.load_tags(tags)\n print(\"Loaded dataset: size =\", len(self.dataset))\n\n print(\"Training...\")\n X_train, X_test, y_train, y_test = self.train()\n\n\n print(\"Saving results...\")\n self.serialize(model)\n\n print(\"Done.\")",
"def get_train_files(self):\n raise NotImplementedError",
"def trainNewModel():\n print \"Creating feature vectors for trainset...\"\n trainDependencies = getDependency(trainDepFilename)\n trainLabel, trainFeatureVectors = \\\n createFeatureVectors(trainFilename, trainDependencies)\n print \"Length of feature vector for trainset: %d\" \\\n % len(trainFeatureVectors[0])\n if not len(addTrainsetList) == 0:\n print \"Combining feature vectors of additional trainset...\"\n trainLabel, trainFeatureVectors = \\\n combineAdditionalTrainset(\n addTrainsetList, trainLabel, trainFeatureVectors)\n print \"Feature vectors of trainset created.\"\n SVMTrain(trainLabel, trainFeatureVectors, modelFilename)",
"def main():\n num_rows = 500000\n review_df = pd.read_csv(\"s3://msia490project/processed_video_reviews.csv\").dropna().head(num_rows)\n # train and test set split\n X_train, X_test, y_train, y_test = train_test_split(review_df['reviewText'], review_df['score'],\n random_state=115)\n # re-run the model pipeline and generate necessary artifacts for making predictions\n best_svm = LinearSVC(random_state=115)\n ngram_range = (1, 3)\n generate_artifacts_for_best_svm_model(best_svm, ngram_range, X_train, y_train)",
"def ml_svm_path(self) -> str:\n return join(self.machine_learning_path, 'svm')",
"def testsvm_step2(self, models_name, sample_name, output_name):\n args = self.get_file_args(models_name)\n args += self.get_commonlib()\n\n args += \" -outputformat text \"\n\n self.mapreduce_core(sample_name=sample_name,\n output_name=output_name,\n exe_file=self.exe_testsvm2,\n is_cat=False,\n args=args)",
"def run_model_three():\n\n pos_map = pickle.load(open('pos_map.p', \"rb\"))\n vocab = build_vocab(training_directory)\n word2vec = get_w2vec(wordtovec)\n\n train_out_list = prep_data_lexsub(training_directory, \"mod3_train_prepped.txt\", 3, pos_map, vocab, word2vec)\n test_out_list = prep_data_lexsub(testing_directory, \"mod3_test_prepped.txt\", 3, pos_map, vocab, word2vec)\n\n posset = get_posset()\n vocab.add(\"<s>\")\n vocab.add(\"<UNK>\")\n convert_to_svm('mod3_train_prepped.txt', \"mod3_train.svm\", posset, vocab)\n convert_to_svm('mod3_test_prepped.txt', \"mod3_test.svm\", posset, vocab)\n\n p_labels, p_acc, p_vals = train_test_model(\"mod3_train.svm\", \"mod3_test.svm\")\n with open(\"modelscores.txt\", \"a\") as text_file:\n text_file.write(str(p_acc[0]))\n text_file.write(\"\\n\")\n pickle.dump(p_labels, open('mod3_p_labels.p', 'wb'))",
"def setup():\n for dir_path in [train_dir, output_dir]:\n Path(dir_path).mkdir(exist_ok=True)\n\n # create the training and test data files that we will use\n create_jsonlines_feature_files(train_dir)",
"def load_vectors(args):\n dict_fold = 'train' # which fold of the data will be used to produce results\n if args.task == 'conneau' or 'xling':\n data_dir = os.path.join(args.data_dir, 'MUSE')\n dict_dir = os.path.join(data_dir, 'crosslingual/')\n if args.task == 'xling':\n dict_dir = os.path.join(dict_dir, 'xling-dictionaries/bli_datasets/')\n else:\n dict_dir = os.path.join(dict_dir, 'dictionaries/')\n\n src_path = os.path.join(data_dir, 'wiki.' + args.src_lang + '.vec')\n trg_path = os.path.join(data_dir, 'wiki.' + args.trg_lang + '.vec')\n src_freq_path = None\n trg_freq_path = None\n if dict_fold == 'test':\n postfix = '.5000-6500.txt'\n elif dict_fold == 'train':\n postfix = '.0-5000.txt'\n else:\n raise ValueError('Unrecognized dictionary fold for evaluation')\n elif args.task == 'dinu':\n data_dir = os.path.join(args.data_dir,'dinu')\n dict_dir = os.path.join(data_dir, 'dictionaries/')\n src_path = os.path.join(data_dir, 'embeddings', args.src_lang + '.emb.txt')\n trg_path = os.path.join(data_dir, 'embeddings', args.trg_lang + '.emb.txt')\n src_freq_path = None\n trg_freq_path = None\n postfix = '.{}.txt'.format(dict_fold)\n elif args.task == 'zhang':\n order = [args.src_lang,args.trg_lang]\n if args.src_lang == 'en':\n order = order[::-1]\n data_dir = os.path.join(args.home_dir,'pkg/UBiLexAT/data/','-'.join(order))\n dict_dir = data_dir\n src_path = os.path.join(data_dir, 'word2vec.' + args.src_lang)\n trg_path = os.path.join(data_dir, 'word2vec.' + args.trg_lang)\n src_freq_path = os.path.join(data_dir, 'vocab-freq.' + args.src_lang)\n trg_freq_path = os.path.join(data_dir, 'vocab-freq.' + args.trg_lang)\n postfix = '.train.txt'\n\n srcfile = open(src_path, encoding=args.encoding, errors='surrogateescape')\n trgfile = open(trg_path, encoding=args.encoding, errors='surrogateescape')\n src_words, xs = embeddings.read(srcfile, args.maxs)\n trg_words, xt = embeddings.read(trgfile, args.maxt)\n srcfile.close()\n trgfile.close()\n \n if src_freq_path:\n with open(src_freq_path, encoding=args.encoding, errors='surrogateescape') as f:\n lines = [a.split(' ') for a in f.read().strip().split('\\n')]\n freq_src = {k: int(v) for (k,v) in lines}\n\n with open(trg_freq_path, encoding=args.encoding, errors='surrogateescape') as f:\n lines = [a.split(' ') for a in f.read().strip().split('\\n')]\n freq_trg = {k: int(v) for (k,v) in lines}\n\n # Build word to index map\n src_word2ind = {word: i for i, word in enumerate(src_words)}\n trg_word2ind = {word: i for i, word in enumerate(trg_words)}\n\n if args.task == 'zhang':\n dict_path = os.path.join(dict_dir, 'all.' + '-'.join(order) + '.lex')\n flip = False\n elif args.task == 'dinu' and args.src_lang != 'en':\n # Only has dicts in one direction, flip\n dict_path = os.path.join(dict_dir, args.trg_lang + '-' + args.src_lang + postfix)\n src_to_en = os.path.join(dict_dir, 'en' + '-' + args.src_lang + postfix)\n en_to_trg = os.path.join(dict_dir, args.trg_lang + '-' + 'en' + postfix)\n flip = True\n elif args.task == 'xling':\n dict_path = os.path.join(dict_dir, args.src_lang+'-'+args.trg_lang+'/yacle.test.freq.2k.'+args.src_lang+'-' + args.trg_lang + '.tsv')\n src_to_en = os.path.join(dict_dir, args.src_lang+'-'+'en'+'/yacle.test.freq.2k.'+args.src_lang+'-' + 'en' + '.tsv')\n en_to_trg = os.path.join(dict_dir, 'en'+'-'+args.trg_lang+'/yacle.test.freq.2k.'+'en'+'-' + args.trg_lang + '.tsv')\n\n flip = False\n if not os.path.exists(dict_path):\n dict_path = os.path.join(dict_dir, args.trg_lang+'-'+args.src_lang+'/yacle.test.freq.2k.'+args.src_lang+'-' + args.trg_lang + '.tsv')\n flip = True\n\n else:\n src_to_en = os.path.join(dict_dir, args.src_lang + '-' + 'en' + postfix)\n en_to_trg = os.path.join(dict_dir, 'en' + '-' + args.trg_lang + postfix)\n dict_path = os.path.join(dict_dir, args.src_lang + '-' + args.trg_lang + postfix)\n flip = False\n\n\n if not os.path.exists(dict_path):\n # create new dict\n print('Warning: no dict found, creating dictionary')\n create_dict_for(src_to_en, en_to_trg, dict_path, args)\n\n dictf = open(dict_path, encoding=args.encoding, errors='surrogateescape')\n src2trg = collections.defaultdict(set)\n oov = set()\n vocab = set()\n max_srcind = 0 # These are mostly for debug\n max_trgind = 0\n for line in dictf:\n splitted = line.split()\n if len(splitted) > 2:\n # Only using first translation if many are provided\n src, trg = splitted[:2]\n elif len(splitted) == 2:\n src, trg = splitted\n else:\n # No translation? Only happens for Zhang data so far\n continue\n if flip: src, trg = trg, src\n try:\n src_ind = src_word2ind[src]\n trg_ind = trg_word2ind[trg]\n src2trg[src_ind].add(trg_ind)\n vocab.add(src)\n max_srcind = max(max_srcind, src_ind)\n max_trgind = max(max_trgind, trg_ind)\n except KeyError:\n oov.add(src)\n\n return xs, xt, src_words, trg_words, src_word2ind, trg_word2ind, src2trg",
"def generate_singletrain_scipts(self):\n py = self.global_setting.get('python', sys.executable)\n ex_options = self.global_setting.get('train_options', str())\n train_py = \"/home/haihuam/Projects/RepPoints/mmdetection/tools/train.py\"\n\n if not os.access(py, os.X_OK):\n py = \"/home/haihuam/anaconda3/envs/RepPoints/bin/python\"\n\n if os.access(py, os.X_OK):\n content = \"set -e \\n\"\n content += \"export CUDA_VISIBLE_DEVICES=\" + \\\n \",\".join(self.selected_gpus)+ \" \\n\"\n content += \"cd %s \\n\"%(self.run_dir)\n content += \"%s %s %s \"%(py, train_py, self.setting['config_file'])\n content += \"--work_dir %s \"%(self.run_dir)\n content += \"--validate %s &> %s.log \\n\"%(ex_options, self.stage)\n content += \"touch train.done \\n\"\n\n self.script_content = content\n else:\n print(\"Error: %s is not executable.\"%py)\n sys.exit(0)",
"def store_models(self) -> None:\n\n # Iterate over the learner types (for which there will be\n # separate instances for each sub-experiment of the\n # cross-validation experiment)\n for learner_name in self.cv_learners_:\n loginfo('Saving {0} model files to disk...'.format(learner_name))\n for i, estimator in enumerate(self.cv_learners_[learner_name]):\n loginfo('Saving {0} model file #{1}'.format(learner_name, i + 1))\n joblib.dump(estimator,\n self.model_path_template_.format(learner_name, i + 1))",
"def load_data(cls, model_name):\n model_folder: pathlib.Path = pathlib.Path(os.path.join(kashgari.macros.DATA_PATH,\n 'datasets',\n f'gpt2-{model_name}'))\n model_folder.mkdir(exist_ok=True, parents=True)\n\n for filename in ['checkpoint', 'encoder.json', 'hparams.json', 'model.ckpt.data-00000-of-00001',\n 'model.ckpt.index', 'model.ckpt.meta', 'vocab.bpe']:\n url = \"https://storage.googleapis.com/gpt-2/models/\" + model_name + \"/\" + filename\n get_file(os.path.join(f'gpt2-{model_name}', filename),\n url,\n cache_dir=kashgari.macros.DATA_PATH)\n return str(model_folder)",
"def train(\n # fmt: off\n lang: (\"Model language\", \"positional\", None, str),\n output_path: (\"Output directory to store model in\", \"positional\", None, Path),\n train_path: (\"Location of JSON-formatted training data\", \"positional\", None, Path),\n dev_path: (\"Location of JSON-formatted development data\", \"positional\", None, Path),\n raw_text: (\"Path to jsonl file with unlabelled text documents.\", \"option\", \"rt\", Path) = None,\n base_model: (\"Name of model to update (optional)\", \"option\", \"b\", str) = None,\n pipeline: (\"Comma-separated names of pipeline components\", \"option\", \"p\", str) = \"tagger,parser,ner\",\n vectors: (\"Model to load vectors from\", \"option\", \"v\", str) = None,\n replace_components: (\"Replace components from base model\", \"flag\", \"R\", bool) = False,\n n_iter: (\"Number of iterations\", \"option\", \"n\", int) = 30,\n n_early_stopping: (\"Maximum number of training epochs without dev accuracy improvement\", \"option\", \"ne\", int) = None,\n n_examples: (\"Number of examples\", \"option\", \"ns\", int) = 0,\n use_gpu: (\"Use GPU\", \"option\", \"g\", int) = -1,\n version: (\"Model version\", \"option\", \"V\", str) = \"0.0.0\",\n meta_path: (\"Optional path to meta.json to use as base.\", \"option\", \"m\", Path) = None,\n init_tok2vec: (\"Path to pretrained weights for the token-to-vector parts of the models. See 'spacy pretrain'. Experimental.\", \"option\", \"t2v\", Path) = None,\n parser_multitasks: (\"Side objectives for parser CNN, e.g. 'dep' or 'dep,tag'\", \"option\", \"pt\", str) = \"\",\n entity_multitasks: (\"Side objectives for NER CNN, e.g. 'dep' or 'dep,tag'\", \"option\", \"et\", str) = \"\",\n noise_level: (\"Amount of corruption for data augmentation\", \"option\", \"nl\", float) = 0.0,\n orth_variant_level: (\"Amount of orthography variation for data augmentation\", \"option\", \"ovl\", float) = 0.0,\n eval_beam_widths: (\"Beam widths to evaluate, e.g. 4,8\", \"option\", \"bw\", str) = \"\",\n gold_preproc: (\"Use gold preprocessing\", \"flag\", \"G\", bool) = False,\n learn_tokens: (\"Make parser learn gold-standard tokenization\", \"flag\", \"T\", bool) = False,\n textcat_multilabel: (\"Textcat classes aren't mutually exclusive (multilabel)\", \"flag\", \"TML\", bool) = False,\n textcat_arch: (\"Textcat model architecture\", \"option\", \"ta\", str) = \"bow\",\n textcat_positive_label: (\"Textcat positive label for binary classes with two labels\", \"option\", \"tpl\", str) = None,\n tag_map_path: (\"Location of JSON-formatted tag map\", \"option\", \"tm\", Path) = None,\n verbose: (\"Display more information for debug\", \"flag\", \"VV\", bool) = False,\n debug: (\"Run data diagnostics before training\", \"flag\", \"D\", bool) = False,\n # fmt: on\n):\n util.fix_random_seed()\n util.set_env_log(verbose)\n\n # Make sure all files and paths exists if they are needed\n train_path = util.ensure_path(train_path)\n dev_path = util.ensure_path(dev_path)\n meta_path = util.ensure_path(meta_path)\n output_path = util.ensure_path(output_path)\n if raw_text is not None:\n raw_text = list(srsly.read_jsonl(raw_text))\n if not train_path or not train_path.exists():\n msg.fail(\"Training data not found\", train_path, exits=1)\n if not dev_path or not dev_path.exists():\n msg.fail(\"Development data not found\", dev_path, exits=1)\n if meta_path is not None and not meta_path.exists():\n msg.fail(\"Can't find model meta.json\", meta_path, exits=1)\n meta = srsly.read_json(meta_path) if meta_path else {}\n if output_path.exists() and [p for p in output_path.iterdir() if p.is_dir()]:\n msg.warn(\n \"Output directory is not empty\",\n \"This can lead to unintended side effects when saving the model. \"\n \"Please use an empty directory or a different path instead. If \"\n \"the specified output path doesn't exist, the directory will be \"\n \"created for you.\",\n )\n if not output_path.exists():\n output_path.mkdir()\n msg.good(f\"Created output directory: {output_path}\")\n\n tag_map = {}\n if tag_map_path is not None:\n tag_map = srsly.read_json(tag_map_path)\n # Take dropout and batch size as generators of values -- dropout\n # starts high and decays sharply, to force the optimizer to explore.\n # Batch size starts at 1 and grows, so that we make updates quickly\n # at the beginning of training.\n dropout_rates = util.decaying(\n util.env_opt(\"dropout_from\", 0.2),\n util.env_opt(\"dropout_to\", 0.2),\n util.env_opt(\"dropout_decay\", 0.0),\n )\n batch_sizes = util.compounding(\n util.env_opt(\"batch_from\", 100.0),\n util.env_opt(\"batch_to\", 1000.0),\n util.env_opt(\"batch_compound\", 1.001),\n )\n\n if not eval_beam_widths:\n eval_beam_widths = [1]\n else:\n eval_beam_widths = [int(bw) for bw in eval_beam_widths.split(\",\")]\n if 1 not in eval_beam_widths:\n eval_beam_widths.append(1)\n eval_beam_widths.sort()\n has_beam_widths = eval_beam_widths != [1]\n\n default_dir = Path(__file__).parent.parent / \"ml\" / \"models\" / \"defaults\"\n\n # Set up the base model and pipeline. If a base model is specified, load\n # the model and make sure the pipeline matches the pipeline setting. If\n # training starts from a blank model, intitalize the language class.\n pipeline = [p.strip() for p in pipeline.split(\",\")]\n msg.text(f\"Training pipeline: {pipeline}\")\n disabled_pipes = None\n pipes_added = False\n if use_gpu >= 0:\n activated_gpu = None\n try:\n activated_gpu = set_gpu(use_gpu)\n except Exception as e:\n msg.warn(f\"Exception: {e}\")\n if activated_gpu is not None:\n msg.text(f\"Using GPU: {use_gpu}\")\n else:\n msg.warn(f\"Unable to activate GPU: {use_gpu}\")\n msg.text(\"Using CPU only\")\n use_gpu = -1\n if base_model:\n msg.text(f\"Starting with base model '{base_model}'\")\n nlp = util.load_model(base_model)\n if nlp.lang != lang:\n msg.fail(\n f\"Model language ('{nlp.lang}') doesn't match language \"\n f\"specified as `lang` argument ('{lang}') \",\n exits=1,\n )\n if vectors:\n msg.text(f\"Loading vectors from model '{vectors}'\")\n _load_vectors(nlp, vectors)\n\n nlp.select_pipes(disable=[p for p in nlp.pipe_names if p not in pipeline])\n for pipe in pipeline:\n # first, create the model.\n # Bit of a hack after the refactor to get the vectors into a default config\n # use train-from-config instead :-)\n if pipe == \"parser\":\n config_loc = default_dir / \"parser_defaults.cfg\"\n elif pipe == \"tagger\":\n config_loc = default_dir / \"tagger_defaults.cfg\"\n elif pipe == \"ner\":\n config_loc = default_dir / \"ner_defaults.cfg\"\n elif pipe == \"textcat\":\n config_loc = default_dir / \"textcat_defaults.cfg\"\n elif pipe == \"senter\":\n config_loc = default_dir / \"senter_defaults.cfg\"\n else:\n raise ValueError(f\"Component {pipe} currently not supported.\")\n pipe_cfg = util.load_config(config_loc, create_objects=False)\n if vectors:\n pretrained_config = {\n \"@architectures\": \"spacy.VocabVectors.v1\",\n \"name\": vectors,\n }\n pipe_cfg[\"model\"][\"tok2vec\"][\"pretrained_vectors\"] = pretrained_config\n\n if pipe == \"parser\":\n pipe_cfg[\"learn_tokens\"] = learn_tokens\n elif pipe == \"textcat\":\n pipe_cfg[\"exclusive_classes\"] = not textcat_multilabel\n pipe_cfg[\"architecture\"] = textcat_arch\n pipe_cfg[\"positive_label\"] = textcat_positive_label\n\n if pipe not in nlp.pipe_names:\n msg.text(f\"Adding component to base model '{pipe}'\")\n nlp.add_pipe(nlp.create_pipe(pipe, config=pipe_cfg))\n pipes_added = True\n elif replace_components:\n msg.text(f\"Replacing component from base model '{pipe}'\")\n nlp.replace_pipe(pipe, nlp.create_pipe(pipe, config=pipe_cfg))\n pipes_added = True\n else:\n if pipe == \"textcat\":\n textcat_cfg = nlp.get_pipe(\"textcat\").cfg\n base_cfg = {\n \"exclusive_classes\": textcat_cfg[\"exclusive_classes\"],\n \"architecture\": textcat_cfg[\"architecture\"],\n \"positive_label\": textcat_cfg[\"positive_label\"],\n }\n if base_cfg != pipe_cfg:\n msg.fail(\n f\"The base textcat model configuration does\"\n f\"not match the provided training options. \"\n f\"Existing cfg: {base_cfg}, provided cfg: {pipe_cfg}\",\n exits=1,\n )\n msg.text(f\"Extending component from base model '{pipe}'\")\n disabled_pipes = nlp.select_pipes(\n disable=[p for p in nlp.pipe_names if p not in pipeline]\n )\n else:\n msg.text(f\"Starting with blank model '{lang}'\")\n lang_cls = util.get_lang_class(lang)\n nlp = lang_cls()\n\n if vectors:\n msg.text(f\"Loading vectors from model '{vectors}'\")\n _load_vectors(nlp, vectors)\n\n for pipe in pipeline:\n # first, create the model.\n # Bit of a hack after the refactor to get the vectors into a default config\n # use train-from-config instead :-)\n if pipe == \"parser\":\n config_loc = default_dir / \"parser_defaults.cfg\"\n elif pipe == \"tagger\":\n config_loc = default_dir / \"tagger_defaults.cfg\"\n elif pipe == \"morphologizer\":\n config_loc = default_dir / \"morphologizer_defaults.cfg\"\n elif pipe == \"ner\":\n config_loc = default_dir / \"ner_defaults.cfg\"\n elif pipe == \"textcat\":\n config_loc = default_dir / \"textcat_defaults.cfg\"\n elif pipe == \"senter\":\n config_loc = default_dir / \"senter_defaults.cfg\"\n else:\n raise ValueError(f\"Component {pipe} currently not supported.\")\n pipe_cfg = util.load_config(config_loc, create_objects=False)\n if vectors:\n pretrained_config = {\n \"@architectures\": \"spacy.VocabVectors.v1\",\n \"name\": vectors,\n }\n pipe_cfg[\"model\"][\"tok2vec\"][\"pretrained_vectors\"] = pretrained_config\n\n if pipe == \"parser\":\n pipe_cfg[\"learn_tokens\"] = learn_tokens\n elif pipe == \"textcat\":\n pipe_cfg[\"exclusive_classes\"] = not textcat_multilabel\n pipe_cfg[\"architecture\"] = textcat_arch\n pipe_cfg[\"positive_label\"] = textcat_positive_label\n\n pipe = nlp.create_pipe(pipe, config=pipe_cfg)\n nlp.add_pipe(pipe)\n\n # Update tag map with provided mapping\n nlp.vocab.morphology.tag_map.update(tag_map)\n\n # Multitask objectives\n multitask_options = [(\"parser\", parser_multitasks), (\"ner\", entity_multitasks)]\n for pipe_name, multitasks in multitask_options:\n if multitasks:\n if pipe_name not in pipeline:\n msg.fail(\n f\"Can't use multitask objective without '{pipe_name}' in \"\n f\"the pipeline\"\n )\n pipe = nlp.get_pipe(pipe_name)\n for objective in multitasks.split(\",\"):\n pipe.add_multitask_objective(objective)\n\n # Prepare training corpus\n msg.text(f\"Counting training words (limit={n_examples})\")\n corpus = GoldCorpus(train_path, dev_path, limit=n_examples)\n n_train_words = corpus.count_train()\n\n if base_model and not pipes_added:\n # Start with an existing model, use default optimizer\n optimizer = create_default_optimizer()\n else:\n # Start with a blank model, call begin_training\n cfg = {\"device\": use_gpu}\n optimizer = nlp.begin_training(lambda: corpus.train_examples, **cfg)\n nlp._optimizer = None\n\n # Load in pretrained weights (TODO: this may be broken in the config rewrite)\n if init_tok2vec is not None:\n components = _load_pretrained_tok2vec(nlp, init_tok2vec)\n msg.text(f\"Loaded pretrained tok2vec for: {components}\")\n\n # Verify textcat config\n if \"textcat\" in pipeline:\n textcat_labels = nlp.get_pipe(\"textcat\").cfg.get(\"labels\", [])\n if textcat_positive_label and textcat_positive_label not in textcat_labels:\n msg.fail(\n f\"The textcat_positive_label (tpl) '{textcat_positive_label}' \"\n f\"does not match any label in the training data.\",\n exits=1,\n )\n if textcat_positive_label and len(textcat_labels) != 2:\n msg.fail(\n \"A textcat_positive_label (tpl) '{textcat_positive_label}' was \"\n \"provided for training data that does not appear to be a \"\n \"binary classification problem with two labels.\",\n exits=1,\n )\n train_data = corpus.train_data(\n nlp,\n noise_level=noise_level,\n gold_preproc=gold_preproc,\n max_length=0,\n ignore_misaligned=True,\n )\n train_labels = set()\n if textcat_multilabel:\n multilabel_found = False\n for ex in train_data:\n train_labels.update(ex.gold.cats.keys())\n if list(ex.gold.cats.values()).count(1.0) != 1:\n multilabel_found = True\n if not multilabel_found and not base_model:\n msg.warn(\n \"The textcat training instances look like they have \"\n \"mutually-exclusive classes. Remove the flag \"\n \"'--textcat-multilabel' to train a classifier with \"\n \"mutually-exclusive classes.\"\n )\n if not textcat_multilabel:\n for ex in train_data:\n train_labels.update(ex.gold.cats.keys())\n if list(ex.gold.cats.values()).count(1.0) != 1 and not base_model:\n msg.warn(\n \"Some textcat training instances do not have exactly \"\n \"one positive label. Modifying training options to \"\n \"include the flag '--textcat-multilabel' for classes \"\n \"that are not mutually exclusive.\"\n )\n nlp.get_pipe(\"textcat\").cfg[\"exclusive_classes\"] = False\n textcat_multilabel = True\n break\n if base_model and set(textcat_labels) != train_labels:\n msg.fail(\n f\"Cannot extend textcat model using data with different \"\n f\"labels. Base model labels: {textcat_labels}, training data \"\n f\"labels: {list(train_labels)}\",\n exits=1,\n )\n if textcat_multilabel:\n msg.text(\n f\"Textcat evaluation score: ROC AUC score macro-averaged across \"\n f\"the labels '{', '.join(textcat_labels)}'\"\n )\n elif textcat_positive_label and len(textcat_labels) == 2:\n msg.text(\n f\"Textcat evaluation score: F1-score for the \"\n f\"label '{textcat_positive_label}'\"\n )\n elif len(textcat_labels) > 1:\n if len(textcat_labels) == 2:\n msg.warn(\n \"If the textcat component is a binary classifier with \"\n \"exclusive classes, provide '--textcat_positive_label' for \"\n \"an evaluation on the positive class.\"\n )\n msg.text(\n f\"Textcat evaluation score: F1-score macro-averaged across \"\n f\"the labels '{', '.join(textcat_labels)}'\"\n )\n else:\n msg.fail(\n \"Unsupported textcat configuration. Use `spacy debug-data` \"\n \"for more information.\"\n )\n\n # fmt: off\n row_head, output_stats = _configure_training_output(pipeline, use_gpu, has_beam_widths)\n row_widths = [len(w) for w in row_head]\n row_settings = {\"widths\": row_widths, \"aligns\": tuple([\"r\" for i in row_head]), \"spacing\": 2}\n # fmt: on\n print(\"\")\n msg.row(row_head, **row_settings)\n msg.row([\"-\" * width for width in row_settings[\"widths\"]], **row_settings)\n try:\n iter_since_best = 0\n best_score = 0.0\n for i in range(n_iter):\n train_data = corpus.train_dataset(\n nlp,\n noise_level=noise_level,\n orth_variant_level=orth_variant_level,\n gold_preproc=gold_preproc,\n max_length=0,\n ignore_misaligned=True,\n )\n if raw_text:\n random.shuffle(raw_text)\n raw_batches = util.minibatch(\n (nlp.make_doc(rt[\"text\"]) for rt in raw_text), size=8\n )\n words_seen = 0\n with tqdm.tqdm(total=n_train_words, leave=False) as pbar:\n losses = {}\n for batch in util.minibatch_by_words(train_data, size=batch_sizes):\n if not batch:\n continue\n try:\n nlp.update(\n batch,\n sgd=optimizer,\n drop=next(dropout_rates),\n losses=losses,\n )\n except ValueError as e:\n err = \"Error during training\"\n if init_tok2vec:\n err += \" Did you provide the same parameters during 'train' as during 'pretrain'?\"\n msg.fail(err, f\"Original error message: {e}\", exits=1)\n if raw_text:\n # If raw text is available, perform 'rehearsal' updates,\n # which use unlabelled data to reduce overfitting.\n raw_batch = list(next(raw_batches))\n nlp.rehearse(raw_batch, sgd=optimizer, losses=losses)\n docs = [ex.doc for ex in batch]\n if not int(os.environ.get(\"LOG_FRIENDLY\", 0)):\n pbar.update(sum(len(doc) for doc in docs))\n words_seen += sum(len(doc) for doc in docs)\n with nlp.use_params(optimizer.averages):\n util.set_env_log(False)\n epoch_model_path = output_path / f\"model{i}\"\n nlp.to_disk(epoch_model_path)\n nlp_loaded = util.load_model_from_path(epoch_model_path)\n for beam_width in eval_beam_widths:\n for name, component in nlp_loaded.pipeline:\n if hasattr(component, \"cfg\"):\n component.cfg[\"beam_width\"] = beam_width\n dev_dataset = list(\n corpus.dev_dataset(\n nlp_loaded,\n gold_preproc=gold_preproc,\n ignore_misaligned=True,\n )\n )\n nwords = sum(len(ex.doc) for ex in dev_dataset)\n start_time = timer()\n scorer = nlp_loaded.evaluate(dev_dataset, verbose=verbose)\n end_time = timer()\n if use_gpu < 0:\n gpu_wps = None\n cpu_wps = nwords / (end_time - start_time)\n else:\n gpu_wps = nwords / (end_time - start_time)\n with use_ops(\"numpy\"):\n nlp_loaded = util.load_model_from_path(epoch_model_path)\n for name, component in nlp_loaded.pipeline:\n if hasattr(component, \"cfg\"):\n component.cfg[\"beam_width\"] = beam_width\n dev_dataset = list(\n corpus.dev_dataset(\n nlp_loaded,\n gold_preproc=gold_preproc,\n ignore_misaligned=True,\n )\n )\n start_time = timer()\n scorer = nlp_loaded.evaluate(dev_dataset, verbose=verbose)\n end_time = timer()\n cpu_wps = nwords / (end_time - start_time)\n acc_loc = output_path / f\"model{i}\" / \"accuracy.json\"\n srsly.write_json(acc_loc, scorer.scores)\n\n # Update model meta.json\n meta[\"lang\"] = nlp.lang\n meta[\"pipeline\"] = nlp.pipe_names\n meta[\"spacy_version\"] = f\">={about.__version__}\"\n if beam_width == 1:\n meta[\"speed\"] = {\n \"nwords\": nwords,\n \"cpu\": cpu_wps,\n \"gpu\": gpu_wps,\n }\n meta.setdefault(\"accuracy\", {})\n for component in nlp.pipe_names:\n for metric in _get_metrics(component):\n meta[\"accuracy\"][metric] = scorer.scores[metric]\n else:\n meta.setdefault(\"beam_accuracy\", {})\n meta.setdefault(\"beam_speed\", {})\n for component in nlp.pipe_names:\n for metric in _get_metrics(component):\n meta[\"beam_accuracy\"][metric] = scorer.scores[metric]\n meta[\"beam_speed\"][beam_width] = {\n \"nwords\": nwords,\n \"cpu\": cpu_wps,\n \"gpu\": gpu_wps,\n }\n meta[\"vectors\"] = {\n \"width\": nlp.vocab.vectors_length,\n \"vectors\": len(nlp.vocab.vectors),\n \"keys\": nlp.vocab.vectors.n_keys,\n \"name\": nlp.vocab.vectors.name,\n }\n meta.setdefault(\"name\", f\"model{i}\")\n meta.setdefault(\"version\", version)\n meta[\"labels\"] = nlp.meta[\"labels\"]\n meta_loc = output_path / f\"model{i}\" / \"meta.json\"\n srsly.write_json(meta_loc, meta)\n util.set_env_log(verbose)\n\n progress = _get_progress(\n i,\n losses,\n scorer.scores,\n output_stats,\n beam_width=beam_width if has_beam_widths else None,\n cpu_wps=cpu_wps,\n gpu_wps=gpu_wps,\n )\n if i == 0 and \"textcat\" in pipeline:\n textcats_per_cat = scorer.scores.get(\"textcats_per_cat\", {})\n for cat, cat_score in textcats_per_cat.items():\n if cat_score.get(\"roc_auc_score\", 0) < 0:\n msg.warn(\n f\"Textcat ROC AUC score is undefined due to \"\n f\"only one value in label '{cat}'.\"\n )\n msg.row(progress, **row_settings)\n # Early stopping\n if n_early_stopping is not None:\n current_score = _score_for_model(meta)\n if current_score < best_score:\n iter_since_best += 1\n else:\n iter_since_best = 0\n best_score = current_score\n if iter_since_best >= n_early_stopping:\n msg.text(\n f\"Early stopping, best iteration is: {i - iter_since_best}\"\n )\n msg.text(\n f\"Best score = {best_score}; Final iteration score = {current_score}\"\n )\n break\n except Exception as e:\n msg.warn(f\"Aborting and saving final best model. Encountered exception: {e}\")\n finally:\n best_pipes = nlp.pipe_names\n if disabled_pipes:\n disabled_pipes.restore()\n with nlp.use_params(optimizer.averages):\n final_model_path = output_path / \"model-final\"\n nlp.to_disk(final_model_path)\n meta_loc = output_path / \"model-final\" / \"meta.json\"\n final_meta = srsly.read_json(meta_loc)\n final_meta.setdefault(\"accuracy\", {})\n final_meta[\"accuracy\"].update(meta.get(\"accuracy\", {}))\n final_meta.setdefault(\"speed\", {})\n final_meta[\"speed\"].setdefault(\"cpu\", None)\n final_meta[\"speed\"].setdefault(\"gpu\", None)\n meta.setdefault(\"speed\", {})\n meta[\"speed\"].setdefault(\"cpu\", None)\n meta[\"speed\"].setdefault(\"gpu\", None)\n # combine cpu and gpu speeds with the base model speeds\n if final_meta[\"speed\"][\"cpu\"] and meta[\"speed\"][\"cpu\"]:\n speed = _get_total_speed(\n [final_meta[\"speed\"][\"cpu\"], meta[\"speed\"][\"cpu\"]]\n )\n final_meta[\"speed\"][\"cpu\"] = speed\n if final_meta[\"speed\"][\"gpu\"] and meta[\"speed\"][\"gpu\"]:\n speed = _get_total_speed(\n [final_meta[\"speed\"][\"gpu\"], meta[\"speed\"][\"gpu\"]]\n )\n final_meta[\"speed\"][\"gpu\"] = speed\n # if there were no speeds to update, overwrite with meta\n if (\n final_meta[\"speed\"][\"cpu\"] is None\n and final_meta[\"speed\"][\"gpu\"] is None\n ):\n final_meta[\"speed\"].update(meta[\"speed\"])\n # note: beam speeds are not combined with the base model\n if has_beam_widths:\n final_meta.setdefault(\"beam_accuracy\", {})\n final_meta[\"beam_accuracy\"].update(meta.get(\"beam_accuracy\", {}))\n final_meta.setdefault(\"beam_speed\", {})\n final_meta[\"beam_speed\"].update(meta.get(\"beam_speed\", {}))\n srsly.write_json(meta_loc, final_meta)\n msg.good(\"Saved model to output directory\", final_model_path)\n with msg.loading(\"Creating best model...\"):\n best_model_path = _collate_best_model(final_meta, output_path, best_pipes)\n msg.good(\"Created best model\", best_model_path)",
"def test_save_and_load_svmlight_file(self):\n self.logger.info(\"Testing libsvm dataset loading and saving...\")\n\n test_file = fm.join(fm.abspath(__file__), \"myfile.libsvm\")\n\n # Cleaning test file\n try:\n fm.remove_file(test_file)\n except (OSError, IOError) as e:\n if e.errno != 2:\n raise e\n\n self.logger.info(\"Patterns saved:\\n{:}\".format(self.patterns))\n self.logger.info(\"Labels saved:\\n{:}\".format(self.labels))\n\n CDataLoaderSvmLight.dump(\n CDataset(self.patterns, self.labels), test_file)\n\n new_dataset = CDataLoaderSvmLight().load(test_file)\n\n self.assertFalse((new_dataset.X != self.patterns).any())\n self.assertFalse((new_dataset.Y != self.labels).any())\n\n # load data but now remove all zero features (colums)\n new_dataset = CDataLoaderSvmLight().load(\n test_file, remove_all_zero=True)\n\n self.logger.info(\"Patterns loaded:\\n{:}\".format(new_dataset.X))\n self.logger.info(\"Labels loaded:\\n{:}\".format(new_dataset.Y))\n self.logger.info(\n \"Mapping back:\\n{:}\".format(new_dataset.header.idx_mapping))\n\n self.assertTrue(new_dataset.X.issparse)\n self.assertTrue(new_dataset.Y.isdense)\n self.assertTrue(new_dataset.header.idx_mapping.isdense)\n\n # non-zero elements should be unchanged\n self.assertEqual(self.patterns.nnz, new_dataset.X.nnz)\n new_nnz_data = new_dataset.X.nnz_data\n self.assertFalse((self.patterns.nnz_data != new_nnz_data.sort()).any())\n\n # With idx_mapping we should be able to reconstruct original data\n original = CArray.zeros(self.patterns.shape, sparse=True)\n original[:, new_dataset.header.idx_mapping] = new_dataset.X\n self.assertFalse((self.patterns != original).any())\n\n # Cleaning test file\n try:\n fm.remove_file(test_file)\n except (OSError, IOError) as e:\n if e.errno != 2:\n raise e",
"def testsvm_step1(self, models_name, sample_name, output_name):\n args = self.get_file_args(models_name)\n args += self.get_commonlib()\n\n h_args = self.mapreduce_core(sample_name=sample_name,\n output_name=output_name,\n exe_file=self.exe_testsvm1,\n is_cat=True,\n args=args)\n return h_args",
"def main():\r\n args = Parameters().parse()\r\n # #\r\n # args.method = 'student_res18_pre'\r\n args.method = 'student_esp_d'\r\n args.dataset = 'camvid_light'\r\n args.data_list = \"/ssd/yifan/SegNet/CamVid/test.txt\"\r\n args.data_dir = \"/ssd/yifan/\"\r\n args.num_classes = 11\r\n # args.method='psp_dsn_floor'\r\n args.restore_from = \"./checkpoint/Camvid/ESP/base_57.8.pth\"\r\n # args.restore_from=\"/teamscratch/msravcshare/v-yifan/ESPNet/train/0.4results_enc_01_enc_2_8/model_298.pth\"\r\n # args.restore_from = \"/teamscratch/msravcshare/v-yifacd n/sd_pytorch0.5/checkpoint/snapshots_psp_dsn_floor_1e-2_40000_TEACHER864/CS_scenes_40000.pth\"\r\n # args.restore_from = \"/teamscratch/msravcshare/v-yifan/sd_pytorch0.5/checkpoint/snapshots_psp_dsn_floor_1e-2_40000_TEACHER5121024_esp/CS_scenes_40000.pth\"\r\n # args.data_list = '/teamscratch/msravcshare/v-yifan/deeplab_v3/dataset/list/cityscapes/train.lst'\r\n args.batch_size = 1\r\n print(\"Input arguments:\")\r\n for key, val in vars(args).items():\r\n print(\"{:16} {}\".format(key, val))\r\n\r\n h, w = map(int, args.input_size.split(','))\r\n input_size = (h, w)\r\n\r\n print(args)\r\n output_path = args.output_path\r\n if not os.path.exists(output_path):\r\n os.makedirs(output_path)\r\n # args.method='psp_dsn'\r\n deeplab = get_segmentation_model(args.method, num_classes=args.num_classes)\r\n\r\n ignore_label = 255\r\n id_to_trainid = {-1: ignore_label, 0: ignore_label, 1: ignore_label, 2: ignore_label,\r\n 3: ignore_label, 4: ignore_label, 5: ignore_label, 6: ignore_label,\r\n 7: 0, 8: 1, 9: ignore_label, 10: ignore_label, 11: 2, 12: 3, 13: 4,\r\n 14: ignore_label, 15: ignore_label, 16: ignore_label, 17: 5,\r\n 18: ignore_label, 19: 6, 20: 7, 21: 8, 22: 9, 23: 10, 24: 11, 25: 12, 26: 13, 27: 14,\r\n 28: 15, 29: ignore_label, 30: ignore_label, 31: 16, 32: 17, 33: 18}\r\n\r\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = args.gpu\r\n # args.restore_from=\"/teamscratch/msravcshare/v-yifan/sd_pytorch0.3/checkpoint/snapshots_resnet_psp_dsn_1e-4_5e-4_8_20000_DSN_0.4_769light/CS_scenes_20000.pth\"\r\n # if 'dense' in args.method:\r\n #\r\n if args.restore_from is not None:\r\n saved_state_dict = torch.load(args.restore_from)\r\n c_keys = saved_state_dict.keys()\r\n for i in c_keys:\r\n flag = i.split('.')[0]\r\n if 'module' in flag:\r\n deeplab = nn.DataParallel(deeplab)\r\n deeplab.load_state_dict(saved_state_dict)\r\n if 'module' not in flag:\r\n deeplab = nn.DataParallel(deeplab)\r\n # if 'dense' not in args.method:\r\n # deeplab = nn.DataParallel(deeplab)\r\n model = deeplab\r\n model.eval()\r\n model.cuda()\r\n # args.dataset='cityscapes_light'\r\n testloader = data.DataLoader(get_segmentation_dataset(args.dataset, root=args.data_dir, list_path=args.data_list,\r\n crop_size=(360, 480), mean=IMG_MEAN, scale=False,\r\n mirror=False),\r\n batch_size=args.batch_size, shuffle=False, pin_memory=True)\r\n\r\n data_list = []\r\n confusion_matrix = np.zeros((args.num_classes, args.num_classes))\r\n\r\n palette = get_palette(20)\r\n\r\n image_id = 0\r\n for index, batch in enumerate(testloader):\r\n if index % 100 == 0:\r\n print('%d processd' % (index))\r\n if args.side:\r\n image, label, _, size, name = batch\r\n elif 'sd' in args.dataset:\r\n _, image, label, size, name = batch\r\n else:\r\n image, label, size, name = batch\r\n # print('image name: {}'.format(name))\r\n size = size[0].numpy()\r\n output = predict_esp(model, image)\r\n # seg_pred = np.asarray(np.argmax(output, axis=3), dtype=np.uint8)\r\n result = np.asarray(np.argmax(output, axis=3), dtype=np.uint8)\r\n # result=cv2.resize(result, (1024, 1024), interpolation=cv2.INTER_NEAREST)\r\n m_seg_pred = ma.masked_array(result, mask=torch.eq(label, 255))\r\n ma.set_fill_value(m_seg_pred, 20)\r\n seg_pred = m_seg_pred\r\n\r\n for i in range(image.size(0)):\r\n image_id += 1\r\n print('%d th segmentation map generated ...' % (image_id))\r\n args.store_output = 'True'\r\n output_path = './esp_camvid_base/'\r\n if not os.path.exists(output_path):\r\n os.mkdir(output_path)\r\n if args.store_output == 'True':\r\n # print('a')\r\n output_im = PILImage.fromarray(seg_pred[i])\r\n output_im.putpalette(palette)\r\n output_im.save(output_path + '/' + name[i] + '.png')\r\n\r\n seg_gt = np.asarray(label.numpy()[:, :size[0], :size[1]], dtype=np.int)\r\n ignore_index = seg_gt != 255\r\n seg_gt = seg_gt[ignore_index]\r\n seg_pred = seg_pred[ignore_index]\r\n confusion_matrix += get_confusion_matrix(seg_gt, seg_pred, args.num_classes)\r\n\r\n pos = confusion_matrix.sum(1)\r\n res = confusion_matrix.sum(0)\r\n tp = np.diag(confusion_matrix)\r\n\r\n IU_array = (tp / np.maximum(1.0, pos + res - tp))\r\n mean_IU = IU_array.mean()\r\n\r\n print({'meanIU': mean_IU, 'IU_array': IU_array})\r\n\r\n print(\"confusion matrix\\n\")\r\n print(confusion_matrix)",
"def main():\n train_src = read_file(SRC_TRAIN)\n train_tgt = read_file(TRGT_TRAIN)\n val_src = read_file(SRC_VAL)\n val_tgt = read_file(TRGT_VAL)\n # val = read_files(VAL_FILES)\n np.savez(\n DATA_NPZ_NAME, train_src=train_src, train_tgt=train_tgt, val_src=val_src, val_tgt=val_tgt)",
"def LoadSavedModels(main_model_path=\"main_model.pkl\", \n cler_model_path=\"cler_model.pkl\", \n word2vec_path='GoogleNews-vectors-negative300.bin'):\n \n model_main = joblib.load(main_model_path)\n model_cler = joblib.load(cler_model_path)\n word2vec = gensim.models.KeyedVectors.load_word2vec_format(word2vec_path, binary=True)\n\n return model_main,model_cler,word2vec",
"def tolibsvm(df_in, fname_out, gen_file=1):\n if type(df_in) is str:\n df_in = pd.read_csv(df_in)\n df_libsvm = []\n for ind, row in df_in.iterrows():\n row_res = []\n col_ind = 0\n for i in row:\n if col_ind == 0:\n ele = str(i)\n row_res.append(ele)\n col_ind += 1\n continue\n else:\n ele = \"{}:{}\".format(col_ind, i)\n row_res.append(ele)\n col_ind += 1\n df_libsvm.append(\" \".join(row_res))\n if gen_file == 2:\n if \".libsvm\" in fname_out:\n fname = fname_out\n else:\n fname = str(fname_out)+\".libsvm\"\n file_out = open(fname, \"w\")\n for i in df_libsvm:\n print >>file_out, i\n file_out.close()\n return df_libsvm, fname\n elif gen_file == 0:\n return df_libsvm\n elif gen_file == 1:\n if \".libsvm\" in fname_out:\n fname = fname_out\n else:\n fname = str(fname_out)+\".libsvm\"\n file_out = open(fname, \"w\")\n for i in df_libsvm:\n print >>file_out, i\n file_out.close()\n return fname",
"def main(input_filepath, output_model_filepath):\n logger = logging.getLogger(__name__)\n logger.info('training hotel cluster embeddings models')\n\n input_file = os.path.join(input_filepath, 'sentences.pkl')\n output_model_file = os.path.join(output_model_filepath, 'hotelcluster2vec.bin')\n\n train(input_file, output_model_file)",
"def main(dir_models='/Volumes/ext_ssd/jlab/data_imi_10games/saved_models', dir_out='best_models'):\n random.seed(1234)\n\n subdirs = [f for f in os.listdir(dir_models) if os.path.isdir(os.path.join(dir_models, f))]\n\n path_best_models = []\n for subdir in subdirs:\n trial_num = str(random.randint(0,49))\n\n PATH_SUB = os.path.join(dir_models, subdir, trial_num)\n PATH_BEST = os.path.join(PATH_SUB, 'ckpts/best.h5')\n\n # print(PATH_BEST, os.path.join(dir_out, subdir + '_best.h5'))\n shutil.copyfile(PATH_BEST, os.path.join(dir_out, subdir + '_best.h5'))",
"def get_source_vectors(testsmells):\n\n for testsmell in testsmells:\n df = pd.read_csv('data/' + testsmell + '_data.csv')\n df['Vector'] = ''\n\n repnames = df['App'].unique().tolist()\n for repname in repnames:\n print('Processing project \\'' + repname + '\\' for ' + testsmell + '...')\n currdf = df[df['App'] == repname]\n repo = Repo('repositories/' + repname)\n vectors = []\n \n # Get the vectors for each Java file in the dataframe\n for _, row in tqdm(list(currdf.iterrows())): \n try:\n repo.git.checkout(row['CommitSHA'], force=True)\n file_path = 'repositories/' + repname + '/' + row['RelativeTestFilePath']\n vectors.append(get_vector(file_path))\n except GitCommandError as err:\n print('Failed for ' + row['App'] + ':' + row['CommitSHA'])\n print(err)\n vectors.append('')\n \n df.loc[df['App'] == repname, 'Vector'] = vectors # Set the vectors on the dataframe\n \n filename = 'data/' + testsmell + '_vectors.csv'\n df.to_csv(filename, index=False)",
"def create_and_save_model(datapath, test_percentage = 0.2):\r\n \r\n pick_in = open(datapath, \"rb\")\r\n data = pickle.load(pick_in)\r\n pick_in.close()\r\n pick_parameter = open('parameters.data', \"rb\")\r\n parameters = pickle.load(pick_parameter)\r\n pick_parameter.close()\r\n #random.shuffle(keys)\r\n #shuffled_data = [(key, data[key]) for key in keys]\r\n \r\n features = []\r\n labels = []\r\n \r\n # sift/surf return dictonary, while hog returns list\r\n # convert both in same format\r\n if type(data) == dict:\r\n farray = []\r\n for label, label_features in data.items():\r\n for feature in label_features:\r\n farray.append([feature, label])\r\n data = farray\r\n \r\n random.shuffle(data)\r\n\r\n for feature, label in data:\r\n features.append(feature)\r\n labels.append(label)\r\n \r\n \r\n \r\n xtrain, xtest, ytrain, ytest = train_test_split(features, labels, test_size=test_percentage)\r\n \r\n # unpack parameters\r\n model = SVC(**parameters)\r\n model.fit(xtrain, ytrain)\r\n \r\n pick = open('model.data', \"wb\") #save model\r\n pickle.dump(model, pick)\r\n pick.close()\r\n\r\n test_data = list(zip(xtest,ytest))\r\n\r\n pick1 = open('data_test.data', \"wb\") #save test data, so that we don't mix up training and test data\r\n pickle.dump(test_data, pick1)\r\n pick1.close()\r\n\r\n print(\"n_test: \", len(xtest))\r\n print(\"n_train: \", len(xtrain))",
"def generate_artifacts_for_best_svm_model(best_svm, ngram_range, X_train, y_train):\n # transform input data using tfidf and save the tfidf_vectorizer locally\n tfidf_vectorizer = TfidfVectorizer(use_idf=True, ngram_range=ngram_range)\n tfidf_vectorizer_vectors = tfidf_vectorizer.fit_transform(X_train)\n X_train_tfidf = tfidf_vectorizer.transform(X_train)\n with open(os.path.join(os.path.dirname(__file__), 'tfidf_vectorizer.pkl'), 'wb') as f:\n pickle.dump(tfidf_vectorizer, f)\n\n # train and save the model locally\n best_svm_model = best_svm.fit(X_train_tfidf, y_train)\n with open(os.path.join(os.path.dirname(__file__), 'best_svm.pkl'), 'wb') as f:\n pickle.dump(best_svm_model, f)",
"def main():\r\n # assert tf.__version__[0] == \"2\"\r\n\r\n \"\"\" Load Config \"\"\"\r\n with open('./config/config_origin.json', 'r') as f:\r\n CONFIG = json.load(f)\r\n BATCH_SIZE = CONFIG[\"BATCH_SIZE\"]\r\n ROOT_PATH = CONFIG[\"ROOT_PATH\"]\r\n TRAIN_DATA_DIR = CONFIG[\"TRAIN_DATA_DIR\"]\r\n TEST_DATA_DIR = CONFIG[\"TEST_DATA_DIR\"]\r\n TRAIN_DATA_DIR = os.path.join(ROOT_PATH, TRAIN_DATA_DIR)\r\n TEST_DATA_DIR = os.path.join(ROOT_PATH, TEST_DATA_DIR)\r\n MODEL_CKPT = CONFIG[\"MODEL_CKPT\"]\r\n\r\n \"\"\" Prepare Model \"\"\"\r\n n = 6 # order of ResNetv2\r\n version = 2\r\n depth = model_depth(n, version)\r\n MODEL_TYPE = 'ResNet%dv%d' % (depth, version)\r\n SAVES_DIR = \"models-%s/\" % MODEL_TYPE\r\n SAVES_DIR = os.path.join(ROOT_PATH, SAVES_DIR)\r\n MODEL_CKPT = os.path.join(SAVES_DIR, MODEL_CKPT)\r\n\r\n # Features directory\r\n FEATURE_DIR = os.path.join(ROOT_PATH, \"features\")\r\n FEATURE_DIR = os.path.join(FEATURE_DIR, \"models-%s/\" % MODEL_TYPE)\r\n if not os.path.exists(FEATURE_DIR):\r\n os.mkdir(FEATURE_DIR)\r\n\r\n if not os.path.exists(SAVES_DIR):\r\n os.mkdir(SAVES_DIR)\r\n model = resnet_v2(input_shape=INPUT_SHAPE, depth=depth, num_classes=2)\r\n model.compile(loss='categorical_crossentropy',\r\n optimizer=Adam(learning_rate=lr_schedule(TRAINING_EPOCHS)),\r\n metrics=METRICS)\r\n # model.summary()\r\n print(MODEL_TYPE)\r\n\r\n \"\"\" Load Weights \"\"\"\r\n model_ckpt_file = os.path.join(SAVES_DIR, MODEL_CKPT)\r\n if os.path.exists(model_ckpt_file):\r\n print(\"Model ckpt found! Loading...:%s\" % model_ckpt_file)\r\n model.load_weights(model_ckpt_file)\r\n\r\n \"\"\" Extract Testing Data \"\"\"\r\n _train_filenames = os.listdir(os.path.join(TRAIN_DATA_DIR, \"bad_1\"))\r\n train_bad_df = pd.DataFrame({\r\n 'filename': _train_filenames\r\n })\r\n n_bad_samples = train_bad_df.shape[0]\r\n train_bad_df.to_csv(os.path.join(\r\n FEATURE_DIR, \"bad_samples_list.csv\"), index=False)\r\n\r\n \"\"\" Extract good samples \"\"\"\r\n _train_filenames = os.listdir(os.path.join(TRAIN_DATA_DIR, \"good_0\"))\r\n train_good_df = pd.DataFrame({\r\n 'filename': _train_filenames\r\n })\r\n n_good_samples = train_good_df.shape[0]\r\n train_good_df.to_csv(os.path.join(\r\n FEATURE_DIR, \"good_samples_list.csv\"), index=False)\r\n\r\n \"\"\" Create bad sample validation generator \"\"\"\r\n train_bad_datagen = ImageDataGenerator(rescale=1./255)\r\n train_bad_generator = train_bad_datagen.flow_from_dataframe(\r\n train_bad_df,\r\n os.path.join(TRAIN_DATA_DIR, \"bad_1\"),\r\n x_col='filename',\r\n y_col=None,\r\n class_mode=None,\r\n target_size=IMAGE_SIZE,\r\n color_mode=\"grayscale\",\r\n batch_size=BATCH_SIZE,\r\n shuffle=False\r\n )\r\n\r\n \"\"\" Create bad sample validation generator \"\"\"\r\n train_good_datagen = ImageDataGenerator(rescale=1./255)\r\n train_good_generator = train_good_datagen.flow_from_dataframe(\r\n train_good_df,\r\n os.path.join(TRAIN_DATA_DIR, \"good_0\"),\r\n x_col='filename',\r\n y_col=None,\r\n class_mode=None,\r\n target_size=IMAGE_SIZE,\r\n color_mode=\"grayscale\",\r\n batch_size=BATCH_SIZE,\r\n shuffle=False\r\n )\r\n\r\n \"\"\" Extractor \"\"\"\r\n extractor = Model(\r\n model.inputs, model.layers[-2].output) # flatten_2 (Flatten) (None, 12544)\r\n # features = extractor.predict(data)\r\n\r\n \"\"\" Extract train set 的特征 \"\"\"\r\n import time\r\n # bad samples\r\n start = time.perf_counter()\r\n print(\"Start extracting bad samples...\")\r\n features = extractor.predict_generator(\r\n train_bad_generator, steps=np.ceil(n_bad_samples / BATCH_SIZE),\r\n workers=4, verbose=1)\r\n print(\"features.shape:\", features.shape) # (16/32/etc, 12544)\r\n np.save(os.path.join(FEATURE_DIR, \"features_train_bad.npy\"), features)\r\n\r\n elapsed = (time.perf_counter() - start)\r\n print(\"Prediction time used:\", elapsed)\r\n # TODO 用 pandas 存储\r\n # good samples\r\n start = time.perf_counter()\r\n print(\"Start extracting good samples...\")\r\n features = extractor.predict_generator(\r\n train_good_generator, steps=np.ceil(n_good_samples / BATCH_SIZE),\r\n workers=4, verbose=1)\r\n print(\"features.shape:\", features.shape) # (16/32/etc, 12544)\r\n np.save(os.path.join(FEATURE_DIR, \"features_train_good.npy\"), features)\r\n\r\n elapsed = (time.perf_counter() - start)\r\n print(\"Prediction time used:\", elapsed)"
]
| [
"0.7015092",
"0.6091238",
"0.5931611",
"0.5814341",
"0.58115715",
"0.57931757",
"0.5764446",
"0.5748602",
"0.5714243",
"0.5709069",
"0.57070225",
"0.5673651",
"0.5672113",
"0.56681716",
"0.5613454",
"0.5541235",
"0.5527485",
"0.5524118",
"0.55085415",
"0.55060285",
"0.55043477",
"0.5488681",
"0.5481609",
"0.54800475",
"0.54708296",
"0.5456624",
"0.5454788",
"0.54532605",
"0.5450901",
"0.5440298"
]
| 0.7519468 | 0 |
Search the nodes with the lowest f scores first. You specify the function f(node) that you want to minimize; for example, if f is a heuristic estimate to the goal, then we have greedy best first search; if f is node.depth then we have depthfirst search. | def best_first_graph_search(problem, f):
f = memoize(f, 'f')
return graph_search(problem, PriorityQueue(min, f)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def best_first_graph_search(self, problem, f):\n f = memoize(f, 'f')\n # Set starting node\n node = SearchNode(problem.initial)\n # If the goal is reached, return the resulting node\n if problem.goal_test(node.state):\n return node\n\n # Set priority queue to organize nodes\n # in order of lowest f\n frontier = PriorityQueue(min, f)\n # Append the first node\n frontier.append(node)\n # Initialize empty set\n explored = set()\n # While the frontier is not empty\n while frontier:\n # Get the first node with lowest f\n node = frontier.pop()\n # Check if node is goal\n if problem.goal_test(node.state):\n return node\n # Add the state to the explored set\n explored.add(tuple(node.state))\n # For every child in the expanded node\n for child in node.expand(problem):\n # If the child is not a repeat child append it\n if child.state not in explored and child not in frontier:\n frontier.append(child)\n # If the child is in the frontier\n # This statement basically just filters out children that\n # have the same state but lower path costs\n elif child in frontier:\n # Select that child\n incumbent = frontier[child]\n # If one child is has a lower path cost\n if f(child) < f(incumbent):\n # Remove the child that is farther\n del frontier[incumbent]\n frontier.append(child)\n return None",
"def best_first_graph_search(problem, f, display=False):\n\tf = memoize(f, 'f')\n\tnode = Node(problem.initial)\n\tfrontier = PriorityQueue('min', f)\n\tfrontier.append(node)\n\texplored = set()\n\twhile frontier:\n\t\tnode = frontier.pop()\n\t\tif problem.goal_test(node.state):\n\t\t\tif display:\n\t\t\t\tprint(len(explored), \"paths have been expanded and\", len(frontier), \"paths remain in the frontier\")\n\t\t\treturn node\n\t\texplored.add(node.state)\n\t\tfor child in node.expand(problem):\n\t\t\tif child.state not in explored and child not in frontier:\n\t\t\t\tfrontier.append(child)\n\t\t\telif child in frontier:\n\t\t\t\tif f(child) < frontier[child]:\n\t\t\t\t\tdel frontier[child]\n\t\t\t\t\tfrontier.append(child)\n\treturn None",
"def get_node_with_lowest_fn(nodes) :\r\n\r\n next_nodes = [nodes[0]] #The nodes having the lowest f(n) value\r\n min_fn = next_nodes[0].fn_value\r\n\r\n for a in range(1, nodes.__len__()) :\r\n if(nodes[a].fn_value < min_fn) :\r\n next_nodes.clear()\r\n next_nodes.append(nodes[a])\r\n elif(nodes[a].fn_value == min_fn) :\r\n next_nodes.append(nodes[a])\r\n\r\n return next_nodes",
"def bestFirstSearch(problem, heuristic=nullHeuristic):\n \"*** YOUR CODE HERE ***\"\n fringe = util.PriorityQueueWithFunction(lambda node: heuristic(node.state, problem))\n return GraphSearch(problem, 'befs').search(fringe)",
"def best_first_graph_search_show_frontier(problem, f,showFrontier = True):\n f = memoize(f, 'f')\n node = Node(problem.initial)\n frontier = PriorityQueue('min', f)\n frontier.append(node)\n explored = set()\n while frontier:\n\n print(\"Explored ==>\",explored) \n print(\"Frontier ==> \",frontier.heap)\n print()\n node = frontier.pop()\n print(\"Current ==> \",node.state)\n print(\"Eval Function ==> \",f(node))\n \n \n if problem.goal_test(node.state):\n return node\n\n explored.add(node.state)\n for child in node.expand(problem):\n if child.state not in explored and child not in frontier:\n frontier.append(child)\n elif child in frontier:\n if f(child) < frontier[child]:\n del frontier[child]\n frontier.append(child)\n \n return None",
"def recursive_best_first_search(problem, h=None):\n h = memoize(h or problem.h, 'h')\n\n infinity = 999999999\n\n\n def RBFS(problem, node, flimit):\n if problem.goal_test(node.state):\n return node, 0 # (The second value is immaterial)\n successors = node.expand(problem)\n if len(successors) == 0:\n return None, infinity\n for s in successors:\n s.f = max(s.path_cost + h(s), node.f)\n while True:\n # Order by lowest f value\n successors.sort(key=lambda x: x.f)\n best = successors[0]\n if best.f > flimit:\n return None, best.f\n if len(successors) > 1:\n alternative = successors[1].f\n else:\n alternative = infinity\n result, best.f = RBFS(problem, best, min(flimit, alternative))\n if result is not None:\n return result, best.f\n\n node = Node(problem.initial)\n node.f = h(node)\n result, bestf = RBFS(problem, node, infinity)\n return result",
"def get_smallest_f_cost_unvisited_node(self):\n node_list = []\n for column in self.grid:\n for node in column:\n if node.pos in self.unvisited_pos:\n node_list.append(node)\n min_f_cost_node = min(node_list, key=lambda x: x.g_cost)\n min_f_cost_list = []\n for column in self.grid:\n for node in column:\n if (\n node.f_cost == min_f_cost_node.f_cost\n and node.pos in self.unvisited_pos\n ):\n min_f_cost_list.append(node)\n return min_f_cost_node, len(min_f_cost_list)",
"def best_first_graph_search_for_vis(problem, f):\n\n # we use these two variables at the time of visualisations\n iterations = 0\n all_node_colors = []\n node_colors = {k: 'white' for k in problem.graph.nodes()}\n\n f = memoize(f, 'f')\n node = Node(problem.initial)\n\n node_colors[node.state] = \"red\"\n iterations += 1\n all_node_colors.append(dict(node_colors))\n\n if problem.goal_test(node.state):\n node_colors[node.state] = \"green\"\n iterations += 1\n all_node_colors.append(dict(node_colors))\n return iterations, all_node_colors, node\n\n frontier = PriorityQueue('min', f)\n frontier.append(node)\n\n node_colors[node.state] = \"orange\"\n iterations += 1\n all_node_colors.append(dict(node_colors))\n\n explored = set()\n while frontier:\n node = frontier.pop()\n\n node_colors[node.state] = \"red\"\n iterations += 1\n all_node_colors.append(dict(node_colors))\n\n if problem.goal_test(node.state):\n node_colors[node.state] = \"green\"\n iterations += 1\n all_node_colors.append(dict(node_colors))\n return iterations, all_node_colors, node\n\n explored.add(node.state)\n for child in node.expand(problem):\n if child.state not in explored and child not in frontier:\n frontier.append(child)\n node_colors[child.state] = \"orange\"\n iterations += 1\n all_node_colors.append(dict(node_colors))\n elif child in frontier:\n incumbent = frontier[child]\n # incumbent = frontier.get_item(child) # if utils modified\n if f(child) < f(incumbent):\n del frontier[incumbent]\n frontier.append(child)\n node_colors[child.state] = \"orange\"\n iterations += 1\n all_node_colors.append(dict(node_colors))\n\n node_colors[node.state] = \"gray\"\n iterations += 1\n all_node_colors.append(dict(node_colors))\n return None",
"def node_with_min_fscore(open_set, f_cost): # open_set is a set (of cell) and f_cost is a dict (with cells as keys)\n f_cost_open = dict([a for a in f_cost.items() if a[0] in open_set])\n return min(f_cost_open, key=f_cost_open.get)",
"def node_with_min_fscore(open_set, f_cost): # open_set is a set (of cell) and f_cost is a dict (with cells as keys)\n f_cost_open = dict([a for a in f_cost.items() if a[0] in open_set])\n return min(f_cost_open, key=f_cost_open.get)",
"def searchTreeF(node, d):\n if isinstance(node, DecisionTree):\n if node.i == 999: return node.mostCommon()\n if d[node.i] < node.v:\n return searchTreeF(node.lt, d)\n else:\n return searchTreeF(node.gt, d)\n else:\n return node",
"def fval_function(sN, weight):\n#IMPLEMENT\n \n #Many searches will explore nodes (or states) that are ordered by their f-value.\n #For UCS, the fvalue is the same as the gval of the state. For best-first search, the fvalue is the hval of the state.\n #You can use this function to create an alternate f-value for states; this must be a function of the state and the weight.\n #The function must return a numeric f-value.\n #The value will determine your state's position on the Frontier list during a 'custom' search.\n #You must initialize your search engine object as a 'custom' search engine if you supply a custom fval function.\n return 0",
"def bestAtDepthR(self,depth=0,scoreFunc=None) :\n scoreFunc = scoreFunc if scoreFunc != None else lambda g : g.leafScore()\n min_score = ACG.inf\n min_R = None\n for g in self.R() :\n if depth > 0 :\n g = g.bestAtDepthL(depth=depth-1,scoreFunc=scoreFunc)\n score = scoreFunc(g)\n if score < min_score :\n min_score = score\n min_R = g\n return min_R if min_R != None else self",
"def get_best_pred_finger(self,f):\n return min(self.best_finger_pred[f],\\\n key=lambda kn:dist_ident(kn.ident,self.get_finger_pred_loc(f)))",
"def minimax(node,depth):\n if node.isLeaf():\n return node.evaluate(),None\n elif node.isMax:\n max_score = float(\"-inf\")\n max_path = None\n for C in node.children():\n score,path = minimax(C,depth+1)\n if score > max_score:\n max_score = score\n max_path = C.name,path\n return max_score,max_path\n else:\n min_score = float(\"inf\")\n min_path = None\n for C in node.children():\n score,path = minimax(C,depth+1)\n if score < min_score:\n min_score = score\n min_path = C.name,path\n return min_score,min_path",
"def a_star_search(problem, heuristic=null_heuristic):\n fringe = util.PriorityQueueWithFunction(lambda x: x.get_cost() + heuristic(x.get_node(), problem))\n return general_search(problem, fringe)",
"def aStarSearch(problem, heuristic=nullHeuristic):\n \"*** YOUR CODE HERE ***\"\n # Initialize data structures\n parent_node = {}\n path_to_node = {}\n priority_queue = util.PriorityQueue()\n\n p_c = 0.5\n h_c = 1 - p_c\n\n # Get the start node\n start_node = problem.getStartState()\n parent_node[start_node] = None\n path_to_node[start_node] = []\n priority_queue.update(start_node, 0)\n\n #goal_found = False\n\n while not priority_queue.isEmpty():\n # Get the next node\n node_to_expand = priority_queue.pop()\n # Check if goal state is reached\n if problem.isGoalState(node_to_expand):\n break\n next_nodes = problem.getSuccessors(node_to_expand)\n path_to_parent = path_to_node[node_to_expand]\n\n for one_node in next_nodes:\n point, move, cost = one_node\n curr_path = path_to_node[node_to_expand] + [move]\n curr_cost = problem.getCostOfActions(curr_path)\n heuristic_cost = heuristic(point, problem)\n # Check if current node already exists in the previously visited nodes\n if point in path_to_node:\n prev_cost = problem.getCostOfActions(path_to_node[point])\n if prev_cost > curr_cost:\n path_to_node[point] = curr_path\n priority_queue.update(point, curr_cost + heuristic_cost)\n \n else:\n path_to_node[point] = curr_path\n priority_queue.update(point, curr_cost + heuristic_cost)\n \n # current_cost = problem.getCostOfActions(point) * p_c + heuristic(point, problem) * h_c\n\n print(node_to_expand) \n return path_to_node[node_to_expand]\n \n# nodes_to_expand = set()\n# # get max value node in the fringe node\n# min_val = float(\"inf\")\n# for one_node in fringe_node:\n# # Compute the cost to reach a node\n# total_cost = cost_to_point[one_node] * p_c + heuristic(one_node,problem) * h_c\n# if total_cost < min_val:\n# min_val = total_cost\n# \n# for one_node in fringe_node:\n# # Compute the cost to reach a node\n# total_cost = cost_to_point[one_node] * p_c + heuristic(one_node,problem) * h_c\n# if total_cost == min_val:\n# nodes_to_expand.add(one_node)\n# fringe_node.remove(one_node)\n#\n# # Expand the fringe node \n# for one_node in nodes_to_expand:\n# path_to_parent = path_to_point[one_node]\n# for nxt_node in problem.getSuccessors(one_node):\n# pos = nxt_node[0]\n# mv = nxt_node[1]\n# # check if point already present in path to point\n# prev_cost = float(\"inf\")\n# if pos in cost_to_point:\n# prev_cost = cost_to_point[pos]\n# new_path = path_to_parent + [mv]\n# if prev_cost > problem.getCostOfActions(new_path):\n# path_to_point[pos] = new_path\n# cost_to_point[pos] = problem.getCostOfActions(new_path)\n# fringe_node.append(pos)\n#\n# # Check if destination is reached in the fringe node\n# for one_node in fringe_node:\n# if problem.isGoalState(one_node):\n# final_node = one_node\n# goal_found = True\n# break\n# \n# #print(len(fringe_node))\n# print(final_node)\n# print(path_to_point[final_node])\n# return path_to_point[final_node] \n\n util.raiseNotDefined()",
"def argmin(seq, fn):\n best = seq[0]; best_score = fn(best)\n for x in seq:\n x_score = fn(x)\n if x_score < best_score:\n best, best_score = x, x_score\n return best",
"def aStarSearch(problem, heuristic=nullHeuristic):\n \"*** YOUR CODE HERE ***\"\n class Node:\n def __init__(self, state, parent, action, pathCost):\n self.state = state\n self.parent = parent\n self.action = action\n self.pathCost = pathCost\n\n def solution(self):\n path = list()\n tempNode = self\n while tempNode.state != problem.getStartState():\n path.insert(0, tempNode.action)\n tempNode = tempNode.parent\n return path\n\n def __eq__(self, other):\n if isinstance(other, Node):\n return self.state == other.state\n\n\n def childNode(successor, parent, action, stepCost):\n pathCost = parent.pathCost + stepCost\n child = Node(successor, parent, action, pathCost)\n return child\n\n initialNode = Node(problem.getStartState(), None, None, 0)\n frontier = util.PriorityQueue() #bfs uses a queue\n frontier.push(initialNode, initialNode.pathCost + heuristic(initialNode.state, problem)) #we use f(n) = pathCost + h(n) for the best solution\n explored = set()\n\n while not frontier.isEmpty() :\n nextNode = frontier.pop() #extract from the start of the queue\n if problem.isGoalState(nextNode.state):\n return nextNode.solution()\n explored.add(nextNode.state)\n for successor, action, stepCost in problem.getSuccessors(nextNode.state):\n child = childNode(successor, nextNode, action, stepCost)\n if child.state not in explored:\n frontier.update(child, child.pathCost + heuristic(child.state, problem))\n return []\n util.raiseNotDefined()",
"def solve_best(self, repository, cost_func, verbose=0):\n self.verbose = verbose\n self.max_depth = 0\n best_cost = None\n for solution in self._solve(repository):\n cost = cost_func(**solution)\n if best_cost is None or cost <= best_cost:\n best_cost = cost\n yield solution, cost",
"def aStarSearch(problem, heuristic=nullHeuristic):\n \"*** YOUR CODE HERE ***\"\n queue = util.PriorityQueue() # PriorityQueue for searshing the graph, priorityqueue helps to pop the element with the lowest priority (cost)\n visited = [] # cKepp track of visited nodes\n path = [] # Keep track of the path\n start =problem.getStartState() # The start node\n \n queue.push((start, path,0), 0) # we push (vertex, path , cost from parent to the vertex), \n #priority(which is the cost of getting to the vertex)\n \n while not queue.isEmpty():\n (vrtx, path, costparent) = queue.pop() \n if vrtx not in visited: \n if problem.isGoalState(vrtx):\n return [p[1] for p in path] # return the actions\n visited.append(vrtx) \n for successor in problem.getSuccessors(vrtx):\n gn = successor[2]+ costparent # the real cost from root to the expanded node(successor).\n fn = gn+heuristic(successor[0], problem) # the heursitic from the expanded node to the goal node.\n queue.push((successor[0], path+[successor],gn),fn)# push the noe with f(n) as the priority element.\n \n util.raiseNotDefined()",
"def _optimize_f(self,x0,type,method,**kwargs):\n from scipy.optimize import fmin,fmin_powell\n\n if type == 'min':\n g=lambda *args,**kwargs:self.f(*args,**kwargs)\n elif type == 'max':\n g=lambda *args,**kwargs:-1*self.f(*args,**kwargs)\n elif type == 'root':\n g=lambda *args,**kwargs:np.abs(self.f(*args,**kwargs))\n elif type == 'val':\n val = kwargs.pop('valtofind')\n g=lambda *args,**kwargs:np.abs(self.f(*args,**kwargs)-val)\n elif type == 'saddle':\n raise NotImplementedError\n else:\n raise ValueError('Unrecognized optimization type')\n\n if method == 'fmin':\n res = fmin(g,x0,tuple(self.parvals),**kwargs)\n elif method == 'fmin_powell':\n res = fmin_powell(g,x0,tuple(self.parvals),**kwargs)\n else:\n raise ValueError('Unrecognized method')\n\n self.lastOpt = res\n return res[0]",
"def astar_search(problem, h=None):\n h = h or problem.h\n h = memoize(h, 'h')\n\n def f(n):\n return max(getattr(n, 'f', -infinity), n.path_cost + h(n))\n return best_first_graph_search(problem, f)",
"def add_known_best_finger_pred(self,f,knodes):\n pool = remove_knodes_duplicates(self.best_finger_pred[f] + knodes)\n self.best_finger_pred[f] = heapq.nsmallest(self.fk,pool,key=lambda kn:\\\n (dist_ident(kn.ident,self.get_finger_pred_loc(f)),kn.path_len))",
"def aStarSearch(problem, heuristic=nullHeuristic):\n \"*** YOUR CODE HERE ***\"\n fringe = util.PriorityQueueWithFunction(lambda node: node.path_cost + heuristic(node.state, problem))\n return GraphSearch(problem, 'astar').search(fringe)",
"def argmin(seq, fn):\n best = seq[0]; best_score = fn(best)\n for x in seq:\n x_score = fn(x)\n if x_score < best_score:\n best, best_score = x, x_score\n return best",
"def argmin(seq, fn):\n best = seq[0]; best_score = fn(best)\n for x in seq:\n x_score = fn(x)\n if x_score < best_score:\n best, best_score = x, x_score\n return best",
"def get_next_node() :\r\n\r\n #Checking if any traversible nodes are left\r\n if(open_list.__len__() == 0) :\r\n raise Exception(\"No traversible nodes left\")\r\n\r\n next_nodes = get_node_with_lowest_fn(open_list) #Getting the list of nodes having min. f(n) value\r\n\r\n #In case of multiple nodes, returning the node with lowest h(n) value\r\n if(next_nodes.__len__() > 1) :\r\n return get_node_with_lowest_hn(next_nodes)\r\n\r\n return next_nodes[0]",
"def fval_function(sN, weight):\r\n # IMPLEMENT\r\n\r\n # Many searches will explore nodes (or states) that are ordered by their f-value.\r\n # For UCS, the fvalue is the same as the gval of the state. For best-first search, the fvalue is the hval of the state.\r\n # You can use this function to create an alternate f-value for states; this must be a function of the state and the weight.\r\n # The function must return a numeric f-value.\r\n # The value will determine your state's position on the Frontier list during a 'custom' search.\r\n # You must initialize your search engine object as a 'custom' search engine if you supply a custom fval function.\r\n\r\n\r\n return (1 - weight) * sN.gval + weight * sN.hval",
"def aStarSearch(problem, heuristic=nullHeuristic):\n \"*** YOUR CODE HERE ***\"\n #here again we are making use of the priority queue for storing the frontier nodes\n #created a priority queue for Astarsearch\n neighbourNodes = util.PriorityQueue()\n moves = []\n neighbourNodes.push((problem.getStartState(),moves,0),0)\n seenNodes = set()\n\n while not neighbourNodes.isEmpty():\n currentState, currentActions, currentCost = neighbourNodes.pop()\n if(currentState in seenNodes):\n continue\n if problem.isGoalState(currentState):\n return currentActions\n seenNodes.add(currentState)\n for state, action, cost in problem.getSuccessors(currentState):\n if(state in seenNodes):\n continue\n #here we calculate the hueristic value of visting each nodes\n hvalue = heuristic(state, problem)\n #and while pushing onto the queue we not only have the cost to visit the node\n #but also the heuristic value of that node.\n neighbourNodes.push((state, currentActions+[action], currentCost+cost),currentCost+cost+hvalue)\n return moves"
]
| [
"0.8007521",
"0.74137783",
"0.71993816",
"0.6964788",
"0.65959895",
"0.6396831",
"0.6337583",
"0.6335017",
"0.629833",
"0.629833",
"0.62494665",
"0.6157962",
"0.6094576",
"0.6086956",
"0.60373163",
"0.60041636",
"0.5996339",
"0.5954216",
"0.594967",
"0.594148",
"0.5939211",
"0.59379447",
"0.5934648",
"0.5928193",
"0.588729",
"0.5863301",
"0.5863301",
"0.585573",
"0.5843193",
"0.5824714"
]
| 0.805306 | 0 |
A search is bestfirst graph search with f(n) = g(n)+h(n). You need to specify the h function when you call astar_search. | def astar_search(problem, h=None):
h = h or problem.h
h = memoize(h, 'h')
def f(n):
return max(getattr(n, 'f', -infinity), n.path_cost + h(n))
return best_first_graph_search(problem, f) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def astar_search(problem, h=None, display=False):\n\n\th = memoize(h or problem.h, 'h')\n\treturn best_first_graph_search(problem, lambda n: n.path_cost + h(n), display)",
"def aStarSearch(problem, heuristic=nullHeuristic):\n \"*** YOUR CODE HERE ***\"\n fringe = util.PriorityQueueWithFunction(lambda node: node.path_cost + heuristic(node.state, problem))\n return GraphSearch(problem, 'astar').search(fringe)",
"def a_star_search(problem, heuristic=null_heuristic):\n fringe = util.PriorityQueueWithFunction(lambda x: x.get_cost() + heuristic(x.get_node(), problem))\n return general_search(problem, fringe)",
"def aStarSearch(problem, heuristic=nullHeuristic):\n return HeuristicGraphSearch(problem, heuristic).search()",
"def aStarSearch(problem, heuristic=nullHeuristic):\n \"*** YOUR CODE HERE ***\"\n util.raiseNotDefined()",
"def aStarSearch(problem, heuristic=nullHeuristic):\n \"*** YOUR CODE HERE ***\"\n util.raiseNotDefined()",
"def bestFirstSearch(problem, heuristic=nullHeuristic):\n \"*** YOUR CODE HERE ***\"\n fringe = util.PriorityQueueWithFunction(lambda node: heuristic(node.state, problem))\n return GraphSearch(problem, 'befs').search(fringe)",
"def aStarSearch(problem, heuristic=nullHeuristic):\n \"*** YOUR CODE HERE ***\"\n print(\"\\t===========================================\")\n print(\"\\t Processing ... Please Wait for 11 seconds!\")\n print(\"\\t===========================================\")\n startState = problem.getStartState();\n fringe = util.PriorityQueue()\n costs = 0 \n visitedNodes = []\n actions = [] \n if ( problem.isGoalState(startState) == True):\n return actions\n else:\n newFringeItem = (startState , actions , costs)\n fringe.push(newFringeItem,costs)\n while(fringe.isEmpty() == False ):\n #f(x) = h(x) + g(x)\n currentState , actions , costs = fringe.pop()\n if ( problem.isGoalState(currentState) == True):\n #print(\"Final Actions : \" + str(actions)) \n \"\"\"\n If you want the Analyzer Class analizes the chosen path and heuristic , \n Uncomment these two lines of code otherwise leave it be commented cause it increases the run time by 2 seconds.\n \"\"\"\n \"\"\"Start : Analyzer Properties \"\"\"\n #analyzer = Analyzer(problem,actions)\n #analyzer.start()\n \"\"\"End : Analyzer Properties \"\"\"\n return actions\n else:\n if(not currentState in visitedNodes ):\n visitedNodes.append(currentState)\n currentNodeSuccessors = problem.getSuccessors(currentState)\n for node in currentNodeSuccessors :\n state , action , stateCost = node\n heuristicAmount = heuristic(state , problem)\n newFringeItem = state , actions + [action] , costs + stateCost\n priority = costs + heuristicAmount\n fringe.push( newFringeItem , priority )\n \n util.raiseNotDefined()",
"def a_star_search(problem, heuristic=null_heuristic):\r\n \"*** YOUR CODE HERE ***\"\r\n fringe = util.PriorityQueue()\r\n path = set()\r\n final = []\r\n acts = dict()\r\n state = problem.get_start_state()\r\n fringe.push(state, 0)\r\n\r\n while (True):\r\n state = fringe.pop()\r\n path.add(state)\r\n states = problem.get_successors(state)\r\n acts[state] = states[:]\r\n if problem.is_goal_state(state):\r\n break\r\n\r\n states = problem.get_successors(state)\r\n # push into fringe\r\n for stat in states:\r\n if stat[0] not in path:\r\n \"\"\"\r\n it does worse in corners problems, to work better needs heavy huristic, not worth in\r\n in corners problem expandend nodes grow expo\r\n all others are better\r\n counter = 0 # in some situation it helps, in some it doesnt\r\n #print(stat[0].pieces)\r\n for x in stat[0].pieces[0]:\r\n if x:\r\n counter += 1\r\n \"\"\"\r\n counter = 0\r\n fringe.push(stat[0], stat[2] + counter + heuristic(stat[0], problem)) # problem.get_cost_of_actions([stat[1]])\r\n\r\n while (True):\r\n\r\n for key, val in acts.items():\r\n for va in val:\r\n if va[0] == state:\r\n final.append(va[1])\r\n state = key\r\n break\r\n else:\r\n continue\r\n break\r\n if state == problem.get_start_state():\r\n break\r\n\r\n final.reverse()\r\n\r\n return final",
"def astar_search_show_frontier(problem, h=None):\n h = memoize(h or problem.h, 'h')\n return best_first_graph_search_show_frontier(problem, lambda n: n.path_cost + h(n))",
"def aStarSearch(problem, heuristic=nullHeuristic):\n \"*** YOUR CODE HERE ***\"\n #here again we are making use of the priority queue for storing the frontier nodes\n #created a priority queue for Astarsearch\n neighbourNodes = util.PriorityQueue()\n moves = []\n neighbourNodes.push((problem.getStartState(),moves,0),0)\n seenNodes = set()\n\n while not neighbourNodes.isEmpty():\n currentState, currentActions, currentCost = neighbourNodes.pop()\n if(currentState in seenNodes):\n continue\n if problem.isGoalState(currentState):\n return currentActions\n seenNodes.add(currentState)\n for state, action, cost in problem.getSuccessors(currentState):\n if(state in seenNodes):\n continue\n #here we calculate the hueristic value of visting each nodes\n hvalue = heuristic(state, problem)\n #and while pushing onto the queue we not only have the cost to visit the node\n #but also the heuristic value of that node.\n neighbourNodes.push((state, currentActions+[action], currentCost+cost),currentCost+cost+hvalue)\n return moves",
"def aStarSearch(problem, heuristic=nullHeuristic):\n \"*** YOUR CODE HERE ***\"\n queue = util.PriorityQueue() # PriorityQueue for searshing the graph, priorityqueue helps to pop the element with the lowest priority (cost)\n visited = [] # cKepp track of visited nodes\n path = [] # Keep track of the path\n start =problem.getStartState() # The start node\n \n queue.push((start, path,0), 0) # we push (vertex, path , cost from parent to the vertex), \n #priority(which is the cost of getting to the vertex)\n \n while not queue.isEmpty():\n (vrtx, path, costparent) = queue.pop() \n if vrtx not in visited: \n if problem.isGoalState(vrtx):\n return [p[1] for p in path] # return the actions\n visited.append(vrtx) \n for successor in problem.getSuccessors(vrtx):\n gn = successor[2]+ costparent # the real cost from root to the expanded node(successor).\n fn = gn+heuristic(successor[0], problem) # the heursitic from the expanded node to the goal node.\n queue.push((successor[0], path+[successor],gn),fn)# push the noe with f(n) as the priority element.\n \n util.raiseNotDefined()",
"def aStarSearch(problem, heuristic=nullHeuristic):\n \"*** YOUR CODE HERE ***\"\n def djikstra(Estado_Inicial):\n from util import PriorityQueue\n Fila_Estados = PriorityQueue() # cria fila de prioridade\n Fila_Estados.push((Estado_Inicial, []), 0) # insere tupla do estado inicial e vetor, com prioridade zero\n Distancia = dict() # cria dicionário para guardar distancia (afim de comparação)\n Distancia[Estado_Inicial] = 0 # seta distância inicial = 0\n while(not Fila_Estados.isEmpty()): # enquanto fila não está vazia,\n Estado_Atual, Caminho = Fila_Estados.pop() # desempilha estado atual e caminho até então\n if problem.isGoalState(Estado_Atual): # se estado atual é o desejado,\n return Caminho # retorna caminho\n for (Sucessor, Acao, Custo) in problem.getSuccessors(Estado_Atual): # verifica sucessores\n if Sucessor not in Distancia or Distancia[Sucessor] > Distancia[Estado_Atual] + Custo + heuristic(Sucessor, problem):\n Distancia[Sucessor] = Distancia[Estado_Atual] + Custo + heuristic(Sucessor, problem) # atualiza distância de acordo com sucessor\n Fila_Estados.push((Sucessor, Caminho + [Acao]), Distancia[Sucessor])\n return djikstra(problem.getStartState()) # recursividade",
"def best_first_graph_search(problem, f):\n f = memoize(f, 'f')\n return graph_search(problem, PriorityQueue(min, f))",
"def aStarSearch(problem, heuristic=nullHeuristic):\n\t\"*** YOUR CODE HERE ***\"\n\n\t# Create the priority queue, and visited array to keep track of visited nodes.\n\tdfsStack = util.PriorityQueue()\n\tvisited = []\n\t# Get the first state in the graph, push to the priority queue\n\tfirst = problem.getStartState()\n\tdfsStack.push([first, [], 0], 0)\n\n\t# While the priority queue is not empty, pop the first node from the priority queue, and check if that state\n # is the goal state. If so, return the actions for that node. Otherwise, append that state\n # to the visited array, get its successors, and push them to the priority queue.\n\twhile not dfsStack.isEmpty():\n\t\tNewNode = dfsStack.pop()\n\t\tif((problem.isGoalState(NewNode[0]) == True)):\n\t\t\treturn NewNode[1]\n\t\tif(NewNode[0] not in visited):\n\t\t\tvisited.append(NewNode[0])\n\t\t\tfor NextNode in problem.getSuccessors(NewNode[0]):\n\t\t\t\tif NextNode[0] not in visited:\n\t\t\t\t\tcumulativeCost = NextNode[2] + NewNode[2]\n\t\t\t\t\theuristicCost = cumulativeCost + heuristic(NextNode[0], problem)\n\t\t\t\t\tdfsStack.push((NextNode[0], NewNode[1] + [NextNode[1]], cumulativeCost), heuristicCost)",
"def aStarSearch(problem, heuristic=nullHeuristic):\n \"*** YOUR CODE HERE ***\"\n\n #Creamos las estructuras de datos necesarias (priority queue y set)\n openNodes = util.PriorityQueue()\n closedNodes = set([])\n\n #Guardamos el nodo inicial\n node = Node(problem.getStartState(), '', 0, None)\n\n #Calculamos funcion heuristica y el coste acumulado para sacar la funcion de evaluacion del nodo inicial\n fn = problem.getCostOfActions(node.path) + heuristic(node.name, problem);\n\n #Lo metemos en la cola con su funcion de evaluacion como prioridad\n openNodes.push(node, fn)\n\n #Iteramos para cada nodo\n while True:\n if openNodes.isEmpty():\n break #ERROR: throw exception\n else :\n #sacamos el nodo de arriba de la cola\n node = openNodes.pop()\n if problem.isGoalState(node.name): #Comprobamos si el nodo es Goal. Si lo es terminamos.\n break\n else: #Expandimos los nodos sucesores del nodo si no estan en closed\n if nodeIsClosed(node, closedNodes) is False:\n for successor in problem.getSuccessors(node.name):\n n, p, c = successor\n succNode = Node(n, p, c, node)\n if nodeIsClosed(succNode, closedNodes) is False:\n fn = problem.getCostOfActions(findPath(succNode)) + heuristic(succNode.name, problem);\n openNodes.push(succNode, fn)\n #Metemos el nodo en closed\n closedNodes.add(node)\n\n #Devolvemos el camino al Goal\n return findPath(node)",
"def aStarSearch(problem, heuristic=nullHeuristic):\n \"*** YOUR CODE HERE ***\"\n class Node:\n def __init__(self, state, parent, action, pathCost):\n self.state = state\n self.parent = parent\n self.action = action\n self.pathCost = pathCost\n\n def solution(self):\n path = list()\n tempNode = self\n while tempNode.state != problem.getStartState():\n path.insert(0, tempNode.action)\n tempNode = tempNode.parent\n return path\n\n def __eq__(self, other):\n if isinstance(other, Node):\n return self.state == other.state\n\n\n def childNode(successor, parent, action, stepCost):\n pathCost = parent.pathCost + stepCost\n child = Node(successor, parent, action, pathCost)\n return child\n\n initialNode = Node(problem.getStartState(), None, None, 0)\n frontier = util.PriorityQueue() #bfs uses a queue\n frontier.push(initialNode, initialNode.pathCost + heuristic(initialNode.state, problem)) #we use f(n) = pathCost + h(n) for the best solution\n explored = set()\n\n while not frontier.isEmpty() :\n nextNode = frontier.pop() #extract from the start of the queue\n if problem.isGoalState(nextNode.state):\n return nextNode.solution()\n explored.add(nextNode.state)\n for successor, action, stepCost in problem.getSuccessors(nextNode.state):\n child = childNode(successor, nextNode, action, stepCost)\n if child.state not in explored:\n frontier.update(child, child.pathCost + heuristic(child.state, problem))\n return []\n util.raiseNotDefined()",
"def AStar_search(problem, heuristic=nullHeuristic):\n\n def getRoute(node):\n route = []\n route.insert(0, node.state)\n while node.action != -1:\n node = node.parent_node\n route.insert(0, node.state)\n return route\n\n def function(state):\n h = heuristic(state, problem)\n g = state.path_cost\n return g + h\n\n frontier = util.PriorityQueueWithFunction(function)\n explored = set()\n start_node = Node(problem.getStartState(), -1, 0.0, -1, 1)\n frontier.push(start_node)\n while not frontier.isEmpty():\n choice = frontier.pop()\n if choice.state not in explored:\n if problem.isGoalState(choice.state):\n return getRoute(choice)\n successors = problem.getSuccessors(choice.state)\n for successor in successors:\n node = Node(successor[0], successor[1], choice.path_cost +\n successor[2], choice, choice.depth+1)\n frontier.push(node)\n explored.add(choice.state)\n # util.raiseNotDefined()",
"def aStarSearch(problem, heuristic=nullHeuristic):\n stack = PriorityQueue()\n\n visited = []\n parent_dict = dict()\n start_state = problem.getStartState()\n stack.push(start_state, 0)\n actions_dict = dict()\n final_actions = []\n discovered = [problem.getStartState]\n cost_dict = dict()\n h_dict = dict()\n g_dict = dict()\n\n h_dict[start_state] = heuristic(start_state, problem)\n g_dict[start_state] = 0\n cost_dict[start_state] = 0\n parent_dict[start_state] = (420, 420)\n cost_dict[(420, 420)] = 0\n\n if problem.isGoalState(problem.getStartState()):\n return []\n\n while not stack.isEmpty():\n current_state = stack.pop()\n\n if current_state not in visited:\n\n visited.append(current_state)\n\n if problem.isGoalState(current_state):\n break\n successors = problem.getSuccessors(current_state)\n for s in successors:\n if s[0] not in visited:\n if s[0] not in cost_dict:\n h_dict[s[0]] = heuristic(s[0], problem)\n g_dict[s[0]] = g_dict[current_state] + s[2]\n cost_dict[s[0]] = g_dict[s[0]] + h_dict[s[0]]\n stack.push(s[0], cost_dict[s[0]])\n parent_dict[s[0]] = current_state\n actions_dict[(current_state, s[0])] = s[1]\n discovered.append(s[0])\n elif heuristic(s[0],problem) + g_dict[current_state] + s[2] < cost_dict[s[0]]:\n h_dict[s[0]] = heuristic(s[0], problem)\n g_dict[s[0]] = g_dict[current_state] + s[2]\n cost_dict[s[0]] = g_dict[s[0]] + h_dict[s[0]]\n stack.push(s[0], cost_dict[s[0]])\n parent_dict[s[0]] = current_state\n actions_dict[(current_state, s[0])] = s[1]\n\n while current_state is not start_state:\n parent = parent_dict[current_state]\n final_actions.append(actions_dict[parent, current_state])\n current_state = parent\n\n final_actions.reverse()\n return final_actions",
"def aStarSearch(problem, heuristic=nullHeuristic):\n \"*** YOUR CODE HERE ***\"\n from util import PriorityQueue\n pq = PriorityQueue()\n # visited = []\n mapper = {}\n costs = {}\n start = problem.getStartState()\n mapper[start] = None\n costs[start] = 0\n pq.push(start, 0)\n\n while not (pq.isEmpty()):\n # print costs\n point = pq.pop()\n if problem.isGoalState(point):\n current = point\n l = []\n while mapper[current] != None:\n tup = mapper[current]\n l.append(tup[1])\n current = tup[0]\n l.reverse()\n print l\n return l\n for child in problem.getSuccessors(point):\n if not child[0] in mapper:\n cost = costs[point] + child[2]\n if (child not in costs) or (cost < costs[child[0]]):\n costs[child[0]] = cost\n full_cost = cost + heuristic(child[0], problem)\n pq.push(child[0], full_cost)\n mapper[child[0]] = point, child[1]",
"def aStarSearch(problem, heuristic=nullHeuristic):\n \"*** YOUR CODE HERE ***\"\n from game import Actions\n\n waiting_list = util.PriorityQueue()\n COSTS = {}\n start_state = problem.getStartState()\n COSTS[start_state] = 0\n waiting_list.push(start_state,0)\n parents = {}\n \n while not waiting_list.isEmpty():\n q_state = waiting_list.pop()\n if problem.isGoalState(q_state):\n target_state = q_state\n break\n for child in problem.getSuccessors(q_state):\n n_cost = COSTS[q_state] + child[2]\n \n if child[0] not in COSTS or n_cost < COSTS[q_state]:\n COSTS[child[0]] = n_cost\n prior = n_cost + heuristic(child[0], problem)\n waiting_list.push(child[0], prior)\n parents[child[0]] = q_state\n\n sequence = []\n prev_state = target_state\n while target_state in parents.keys():\n target_state = parents[target_state]\n direction = Actions.vectorToDirection([prev_state[0] - target_state[0], prev_state[1] - target_state[1]])\n prev_state = target_state\n sequence.append(direction)\n \n return sequence[::-1]",
"def aStarSearch(problem, heuristic=nullHeuristic):\n \"*** YOUR CODE HERE ***\"\n # Initialize data structures\n parent_node = {}\n path_to_node = {}\n priority_queue = util.PriorityQueue()\n\n p_c = 0.5\n h_c = 1 - p_c\n\n # Get the start node\n start_node = problem.getStartState()\n parent_node[start_node] = None\n path_to_node[start_node] = []\n priority_queue.update(start_node, 0)\n\n #goal_found = False\n\n while not priority_queue.isEmpty():\n # Get the next node\n node_to_expand = priority_queue.pop()\n # Check if goal state is reached\n if problem.isGoalState(node_to_expand):\n break\n next_nodes = problem.getSuccessors(node_to_expand)\n path_to_parent = path_to_node[node_to_expand]\n\n for one_node in next_nodes:\n point, move, cost = one_node\n curr_path = path_to_node[node_to_expand] + [move]\n curr_cost = problem.getCostOfActions(curr_path)\n heuristic_cost = heuristic(point, problem)\n # Check if current node already exists in the previously visited nodes\n if point in path_to_node:\n prev_cost = problem.getCostOfActions(path_to_node[point])\n if prev_cost > curr_cost:\n path_to_node[point] = curr_path\n priority_queue.update(point, curr_cost + heuristic_cost)\n \n else:\n path_to_node[point] = curr_path\n priority_queue.update(point, curr_cost + heuristic_cost)\n \n # current_cost = problem.getCostOfActions(point) * p_c + heuristic(point, problem) * h_c\n\n print(node_to_expand) \n return path_to_node[node_to_expand]\n \n# nodes_to_expand = set()\n# # get max value node in the fringe node\n# min_val = float(\"inf\")\n# for one_node in fringe_node:\n# # Compute the cost to reach a node\n# total_cost = cost_to_point[one_node] * p_c + heuristic(one_node,problem) * h_c\n# if total_cost < min_val:\n# min_val = total_cost\n# \n# for one_node in fringe_node:\n# # Compute the cost to reach a node\n# total_cost = cost_to_point[one_node] * p_c + heuristic(one_node,problem) * h_c\n# if total_cost == min_val:\n# nodes_to_expand.add(one_node)\n# fringe_node.remove(one_node)\n#\n# # Expand the fringe node \n# for one_node in nodes_to_expand:\n# path_to_parent = path_to_point[one_node]\n# for nxt_node in problem.getSuccessors(one_node):\n# pos = nxt_node[0]\n# mv = nxt_node[1]\n# # check if point already present in path to point\n# prev_cost = float(\"inf\")\n# if pos in cost_to_point:\n# prev_cost = cost_to_point[pos]\n# new_path = path_to_parent + [mv]\n# if prev_cost > problem.getCostOfActions(new_path):\n# path_to_point[pos] = new_path\n# cost_to_point[pos] = problem.getCostOfActions(new_path)\n# fringe_node.append(pos)\n#\n# # Check if destination is reached in the fringe node\n# for one_node in fringe_node:\n# if problem.isGoalState(one_node):\n# final_node = one_node\n# goal_found = True\n# break\n# \n# #print(len(fringe_node))\n# print(final_node)\n# print(path_to_point[final_node])\n# return path_to_point[final_node] \n\n util.raiseNotDefined()",
"def aStarSearch(problem, heuristic=nullHeuristic):\n \"*** YOUR CODE HERE ***\"\n from util import PriorityQueue\n import math\n\n frontier = PriorityQueue()\n explored = []\n actions = []\n\n class node:\n def __init__(self, path, dad, action):\n self.path = path\n self.dad = dad\n self.action = action\n h = heuristic(path,problem.goal)\n if dad == None:\n self.g=0\n else:\n self.g = dad.g + heuristic(dad.path,path)\n self.cost = round(self.g + h,1)\n\n start = node(problem.getStartState(),None,'')\n frontier.push(start,start.cost)\n\n while frontier.isEmpty() == False:\n path = frontier.pop()\n successors = problem.getSuccessors(path.path)\n explored.append(path)\n for vertex in successors:\n achou = False\n for path_ex in explored:\n if vertex[0] == path_ex.path:\n achou = True\n\n if achou == False:\n successor = node(vertex[0],path,vertex[1])\n frontier.push(successor,successor.cost)\n if problem.isGoalState(successor.path):\n while len(explored) > 0:\n ant = explored.pop()\n if ant.path == successor.dad.path:\n actions.append(successor.action)\n successor = ant\n actions.reverse()\n return actions",
"def a_star_search(initial_state, heuristic_function):\n open_priorityqueue = []\n open_list = []\n list_of_processed_nodes = []\n num_unprocessed_nodes = 0\n num_unconsidered_children = 0\n\n initial_node = AStarNode(state=initial_state)\n heapq.heappush(open_priorityqueue, (initial_node.f, initial_node))\n open_list.append(initial_node)\n goal_state_found = False\n goal_node = None\n\n while len(open_list) > 0 and not goal_state_found:\n best_node_cost, best_node = heapq.heappop(open_priorityqueue)\n open_list.remove(best_node)\n list_of_processed_nodes.append(best_node)\n\n if best_node.state.goal_state_reached():\n print \"Goal state reached with node index {0}\".format(best_node.index)\n goal_state_found = True\n goal_node = best_node\n break\n\n list_of_children_nodes, num_unconsidered_children = generate_children_nodes(\n curr_node=best_node, list_of_processed_nodes=list_of_processed_nodes,\n running_count_of_children_dups=num_unconsidered_children, a_star_search=True\n )\n \n for i, child_node in enumerate(list_of_children_nodes):\n evaluate_child_node(\n parent_node=best_node, child_node=child_node,\n heuristic_function=heuristic_function\n )\n if (child_node not in open_list) and (child_node not in list_of_processed_nodes):\n heapq.heappush(open_priorityqueue, (child_node.f, child_node))\n open_list.append(child_node)\n else:\n open_matches = [n for n in open_list if child_node == n]\n closed_matches = [n for n in list_of_processed_nodes if child_node == n]\n matches = open_matches + closed_matches\n seen_node = matches[0]\n #If it's in open or closed, check if new path is better than prev path\n if (child_node.f < seen_node.f):\n seen_node.parent_index = child_node.parent_index\n seen_node.g = child_node.g\n seen_node.h = child_node.h\n seen_node.f = child_node.f\n children_nodes_of_child = get_all_nodes_children(\n node=child_node,\n list_of_potential_children_nodes=open_list+list_of_processed_nodes\n )\n #print len(children_nodes_of_child)\n #pdb.set_trace()\n for c in children_nodes_of_child:\n c_old_f = c.f\n evaluate_child_node(\n parent_node=child_node,\n child_node=c,\n heuristic_function=heuristic_function\n )\n if c in open_list:\n open_priorityqueue.remove((c_old_f, c))\n open_list.remove(c)\n heapq.heappush(open_priorityqueue, (c.f, c))\n open_list.append(c)\n else:\n list_of_processed_nodes.remove(c)\n list_of_processed_nodes.append(c)\n\n if len(open_list) == 0 and not goal_state_found:\n print '*'*40\n print 'NO SOLUTION PATH FOUND'\n print '*'*40\n sys.exit(0)\n\n return goal_node, list_of_processed_nodes",
"def aStarSearch(problem, heuristic=nullHeuristic):\n visited = []\n solution = []\n intialCost = 0\n priorityQueue = util.PriorityQueue()\n priorityQueue.push((problem.getStartState(),solution,intialCost),intialCost)\n \n while not priorityQueue.isEmpty():\n coord, solution, totalStep = priorityQueue.pop()\n if problem.isGoalState(coord):\n return solution\n if not coord in visited:\n visited+=[coord]\n for position, direction, step in problem.getSuccessors(coord):\n newSolution = solution+[direction]\n g = totalStep + step\n newTotalCost = g + heuristic(position, problem)\n priorityQueue.push((position, newSolution, g), newTotalCost)",
"def aStarSearch(problem, heuristic=nullHeuristic):\n \n pq = util.PriorityQueue()\n start = problem.getStartState()\n pq.push(start,heuristic(start,problem))\n cost_so_far = {}\n cost_so_far[start] = 0\n came_from = {}\n came_from[start] = (None,None)\n actions =[]\n\n while not pq.isEmpty() :\n current=pq.pop()\n if problem.isGoalState(current) :\n break\n neighbours = problem.getSuccessors(current)\n for (next,action,cost) in neighbours :\n new_cost = cost_so_far[current] + cost\n if next not in cost_so_far or new_cost < cost_so_far[next] :\n cost_so_far[next] = new_cost\n priority = new_cost + heuristic(next,problem)\n pq.push(next, priority)\n came_from[next] = (current,action)\n\n # exiting the while loop when current == goalstate , now time to trace back !\n while current != start :\n parent,action = came_from[current]\n actions.append(action)\n current = parent\n actions.reverse() \n return actions",
"def recursive_best_first_search(problem, h=None):\n h = memoize(h or problem.h, 'h')\n\n infinity = 999999999\n\n\n def RBFS(problem, node, flimit):\n if problem.goal_test(node.state):\n return node, 0 # (The second value is immaterial)\n successors = node.expand(problem)\n if len(successors) == 0:\n return None, infinity\n for s in successors:\n s.f = max(s.path_cost + h(s), node.f)\n while True:\n # Order by lowest f value\n successors.sort(key=lambda x: x.f)\n best = successors[0]\n if best.f > flimit:\n return None, best.f\n if len(successors) > 1:\n alternative = successors[1].f\n else:\n alternative = infinity\n result, best.f = RBFS(problem, best, min(flimit, alternative))\n if result is not None:\n return result, best.f\n\n node = Node(problem.initial)\n node.f = h(node)\n result, bestf = RBFS(problem, node, infinity)\n return result",
"def astar(G,s,g,cost=(lambda v,w:1),heuristic=(lambda v:0),verbose=1):\n if not callable(g):\n gtest = lambda x,goal=g: x==g\n else:\n gtest = g\n d = dict((v,float('inf')) for v in G.nodes())\n p = dict((v,None) for v in G.nodes())\n d[s] = 0\n Q = [(0,0,s)] #each element is a tuple (f,-c,v) with f=c + heuristic(v), c=cost from start, v=vertex\n nnodes = 0\n while len(Q) > 0:\n f,minus_c,v = heapq.heappop(Q) #get the element in the queue with the least value of c\n nnodes += 1\n if gtest(v):\n #found a path\n if verbose: print(\"A* succeeded in\",nnodes,\"iterations\")\n return predecessor_traverse(p,s,v),d,p\n for w in G.neighbors(v):\n dcand = d[v] + cost(v,w) #this is the cost of going through v to w\n if dcand < d[w]:\n #going through v is optimal\n #if the predecessor of w is not None, then we'll have to adjust the heap\n if p[w] is not None:\n Q = [(f,c,x) for (f,c,x) in Q if x is not w]\n heapq.heapify(Q)\n d[w] = dcand\n p[w] = v\n #put w back on the queue, with the heuristic value as its priority\n heapq.heappush(Q,(dcand+heuristic(w),-dcand,w))\n #no path found\n if verbose: print(\"A* failed in\",nnodes,\"iterations\")\n return None,d,p",
"def aStarSearch(problem, heuristic=nullHeuristic):\n \"*** YOUR CODE HERE ***\"\n\n from game import Directions\n s = Directions.SOUTH\n w = Directions.WEST\n n = Directions.NORTH\n e = Directions.EAST\n\n result = []\n qu = util.PriorityQueue()\n visited = set([])\n current = (problem.getStartState(), \"\", 0)\n qu.update(current, 0)\n costs = {}\n parents = {}\n parents[problem.getStartState()] = (problem.getStartState(), \"\")\n\n while not qu.isEmpty():\n cost, current = qu.pop()\n visited.add(current[0])\n\n if problem.isGoalState(current[0]):\n result = current[0]\n break\n\n for each in problem.getSuccessors(current[0]):\n if each[0] not in visited:\n qu.update(each, cost + each[2] + heuristic(each[0], problem))\n if each[0] not in costs:\n costs[each[0]] = cost + each[2]\n parents[each[0]] = (current[0], each[1])\n elif costs[each[0]] > cost + each[2] + heuristic(each[0], problem):\n costs[each[0]] = cost + each[2] + heuristic(each[0], problem)\n parents[each[0]] = (current[0], each[1])\n\n path = []\n while parents[result][0] != result:\n path.append(parents[result][1])\n result = parents[result][0]\n\n path.reverse()\n result = []\n for each in path:\n if each == \"South\":\n result.append(s)\n elif each == \"West\":\n result.append(w)\n elif each == \"North\":\n result.append(n)\n elif each == \"East\":\n result.append(e)\n\n return result\n util.raiseNotDefined()\n\n util.raiseNotDefined()",
"def a_star_gs(self):\n print('Performing A*GS\\n')\n\n frontier = PriorityFrontier()\n\n initial_node = SearchNode(self.initial_state)\n initial_heuristic = self.get_heuristic(self.initial_state) + initial_node.path_cost\n frontier.insert(initial_node, initial_heuristic)\n\n visited_nodes = set()\n \n num_expanded_nodes = 0\n \n while True:\n if frontier.is_empty():\n # Search failure\n print('Empty frontier.')\n return AStarResult(failure=True)\n \n # Get the next leaf node from the frontier\n leaf_node = frontier.pop()\n \n if not leaf_node:\n # Search failure\n print('Popped all the frontier nodes.')\n return AStarResult(failure=True)\n \n # Check for the goal state\n if self.check_goal_state(leaf_node.state):\n # Search success\n # Return final state and list of actions along path to the goal\n # as part of the AStarResult class solution member\n action_path = self.get_action_path(leaf_node)\n return AStarResult(solution=Solution(final_state=leaf_node.state, actions=action_path), \n num_expanded_nodes=num_expanded_nodes, max_depth=len(action_path) - 1)\n\n # Add this node to the visited nodes set\n visited_nodes.add(leaf_node)\n \n # Generate all possible actions for the given state\n actions = self.get_actions(leaf_node.state)\n \n # Create search nodes from the generated actions\n for action in actions:\n # Generate a new state from the given action\n new_state = self.get_result(leaf_node.state, action)\n \n # Create a new search node with the created state\n new_node = SearchNode(new_state, leaf_node, action, path_cost=leaf_node.path_cost + 1)\n \n num_expanded_nodes += 1\n\n # If this node has already been visited, ignore it\n if new_node in visited_nodes:\n continue\n\n # Get the new node's heuristic\n new_heuristic = self.get_heuristic(new_state) + new_node.path_cost\n \n # Check for any nodes with the same state as new_state and with better heuristic values that \n # have yet to be visited in the frontier before adding new_node\n if new_node in frontier:\n frontier_node = frontier.peek_node(new_node)\n frontier_heuristic = frontier.peek_heuristic(new_node)\n\n if frontier_heuristic <= new_heuristic:\n # The original heuristic was less than or equal to the new node\n # Disregard the new node\n continue\n \n else:\n # The new node's heuristic is larger\n # Remove the original node from the frontier\n frontier.remove_node(frontier_node)\n\n # Add the new node to the frontier\n frontier.insert(new_node, new_heuristic)"
]
| [
"0.7837965",
"0.72131985",
"0.7186974",
"0.7146829",
"0.7014595",
"0.7014595",
"0.68833905",
"0.6877281",
"0.6810793",
"0.67949086",
"0.67546475",
"0.67528677",
"0.67514026",
"0.67317224",
"0.67040306",
"0.66801614",
"0.66776395",
"0.660388",
"0.65700275",
"0.64990413",
"0.6486967",
"0.64842033",
"0.64822155",
"0.64455193",
"0.6445388",
"0.6438087",
"0.6434775",
"0.642536",
"0.64163274",
"0.64141375"
]
| 0.8199077 | 0 |
This helper function validates inputs to check that they are either scalars or arrays and then that any arrays of the same shape. It either raises an error or returns the common shape or 1 if all arguments are scalar. | def check_input_shapes(*args):
# Collect the shapes of the inputs
shapes = set()
# DESIGN NOTES - currently allow:
# - scalars,
# - 0 dim ndarrays (also scalars but packaged differently)
# - 1 dim ndarrays with only a single value
for val in args:
if isinstance(val, np.ndarray):
# Note that 0-dim ndarrays (which are scalars) pass through as do
# one dimensional arrays with a single value (also a scalar)
if not(val.ndim == 0 or val.shape == (1,)):
shapes.add(val.shape)
# elif isinstance(val, Series):
# # Note that 0-dim ndarrays (which are scalars) pass through
# if val.ndim > 0:
# shapes.add(val.shape)
elif val is None or isinstance(val, (float, int, np.generic)):
pass # No need to track scalars and optional values pass None
else:
raise ValueError(f'Unexpected input to check_input_shapes: {type(val)}')
# shapes can be an empty set (all scalars) or contain one common shape
# otherwise raise an error
if len(shapes) > 1:
raise ValueError('Inputs contain arrays of different shapes.')
if len(shapes) == 1:
return shapes.pop()
return 1 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def common_shape(arrays):\n arrays = iter(arrays)\n shape = next(arrays).shape\n for array in arrays:\n shape = tuple(a if a == b else None\n for a, b in zip(shape, array.shape))\n return shape",
"def _get_consistent_shape(images: Iterable):\n dim0s = []\n dim1s = []\n\n for img in images:\n dim0s.append(img.shape[0])\n dim1s.append(img.shape[1])\n\n assert len(set(dim0s)) == 1 and len(set(dim1s)) == 1, 'Inconsistent shapes.'\n\n return dim0s[0], dim1s[0]",
"def standardize_single_array(x):\n if x is None:\n return None\n if tensor_util.is_tensor(x):\n x_shape_ndims = array_ops.rank(x)\n else:\n x_shape_ndims = len(x.shape)\n\n if (x_shape_ndims == 1 and (expected_shape is None or len(expected_shape) != 1)):\n if tensor_util.is_tensor(x):\n x = array_ops.expand_dims(x, axis=1)\n else:\n x = np.expand_dims(x, 1)\n return x",
"def right_shape(func, args, expected):\n \n if args is None:\n assert(np.shape(func())==expected)\n else:\n assert(np.shape(func(*args))==expected)",
"def check_shapes(arrs):\r\n shps = [i.shape for i in arrs]\r\n eq = np.all(np.array([shps[0] == i for i in shps[1:]]))\r\n err = \"Arrays arr not of the same shape...\"\r\n if not eq:\r\n raise ValueError(\"{}\\n{}\".format(err, shps))",
"def as_same_dimension(*arrays):\n ndim = arrays[0].ndim\n for a in arrays:\n if a.ndim == ndim:\n continue\n # XXX could try broadcasting here\n raise ValueError(\"Invalid array dimensions: %s vs %s\" % (ndim, a.ndim))\n return arrays",
"def check_array_shape(logger, arr, name, expected_shape):\n shape = arr.shape\n check_array_ndim(arr, name, len(expected_shape))\n for i in range(len(shape)):\n check_array_dim(logger, arr, name, expected_shape[i], i)\n\n return arr",
"def check_input_shape(self, op, block):\n\n ipt_name = op.input(op.input_names[0])\n ipt_shape = block.var(ipt_name).shape\n for i in ipt_shape:\n if i < 0:\n warning_msg = (\n f\"Input {ipt_name}(shape={ipt_shape}) has unkown dimension shapes. \"\n f\"Specifying static values may improve performance\"\n )\n warnings.warn(warning_msg)",
"def have_same_shapes(array1, array2):\n return array1.shape == array2.shape",
"def validate_common(ndarray, name):\n\tvalidate_ndarray(ndarray,(np.float, np.int), (2,) , name)",
"def _is_scalar_from_shape(shape):\n return _logical_equal(_ndims_from_shape(shape), 0)",
"def _is_all_input_shape_generalize(input_shape_tuple):\n for elem in input_shape_tuple:\n if not is_shape_unknown(elem.shape):\n return False\n return True",
"def _is_scalar(shape):\n return F.shape_mul(shape) == 1",
"def _ExtractInputShapes(inputs):\n if context.executing_eagerly():\n return array_ops.shape_n(inputs)\n sizes = []\n fully_known = True\n for x in inputs:\n input_shape = array_ops.shape(x)\n if not isinstance(input_shape,\n tensor.Tensor) or input_shape.op.type != \"Const\":\n fully_known = False\n break\n sizes.append(input_shape)\n\n if fully_known:\n return sizes\n else:\n return array_ops.shape_n(inputs)",
"def standardize_single_array(x, expected_shape=None):\n if x is None:\n return None\n\n if is_composite_or_composite_value(x):\n return x\n\n if isinstance(x, int):\n raise ValueError(\n 'Expected an array data type but received an integer: {}'.format(x))\n\n if (x.shape is not None and len(x.shape) == 1 and\n (expected_shape is None or len(expected_shape) != 1)):\n if tensor_util.is_tf_type(x):\n x = array_ops.expand_dims(x, axis=1)\n else:\n x = np.expand_dims(x, 1)\n return x",
"def _ndims_from_shape(shape):\n if shape.get_shape().ndims not in (None, 1):\n raise ValueError(\"input is not a valid shape: not 1D\")\n if not shape.dtype.is_integer:\n raise TypeError(\"input is not a valid shape: wrong dtype\")\n if shape.get_shape().is_fully_defined():\n return constant_op.constant(shape.get_shape().as_list()[0])\n return array_ops.shape(shape)[0]",
"def assert_equal_shapes(numpy_arrays: list):\n\n if len(numpy_arrays) < 2:\n return\n\n shapes = np.asarray([np.shape(_arr) for _arr in numpy_arrays]).astype(float)\n mean = np.mean(shapes, axis=0)\n for i in range(len(shapes)):\n shapes[i, :] = shapes[i, :] - mean\n\n if not np.sum(np.abs(shapes)) <= 1e-5:\n raise AssertionError(\"The given volumes did not all have the same\"\n \" dimensions. Please double check the simulation\"\n f\" parameters. Called from {inspect.stack()[1].function}\")",
"def _check_same_shape(preds: Tensor, target: Tensor) ->None:\n if preds.shape != target.shape:\n raise RuntimeError(f'Predictions and targets are expected to have the same shape, but got {preds.shape} and {target.shape}.')",
"def _check_data_shape_to_num_outputs(preds: Tensor, target: Tensor, num_outputs: int) ->None:\n if preds.ndim > 2 or target.ndim > 2:\n raise ValueError(f'Expected both predictions and target to be either 1- or 2-dimensional tensors, but got {target.ndim} and {preds.ndim}.')\n if num_outputs == 1 and preds.ndim != 1 or num_outputs > 1 and num_outputs != preds.shape[1]:\n raise ValueError(f'Expected argument `num_outputs` to match the second dimension of input, but got {num_outputs} and {preds.shape[1]}.')",
"def check_array_lengths(inputs, targets, weights=None):\n\n def is_tensor_or_composite_tensor(x):\n return tensor_util.is_tf_type(x) or is_composite_or_composite_value(x)\n\n def set_of_lengths(x):\n # Returns a set with the variation between\n # different shapes, with None => 0\n if x is None:\n return {}\n else:\n return set([\n y.shape[0]\n for y in x\n if y is not None and not is_tensor_or_composite_tensor(y)\n ])\n\n set_x = set_of_lengths(inputs)\n set_y = set_of_lengths(targets)\n set_w = set_of_lengths(weights)\n if len(set_x) > 1:\n raise ValueError('All input arrays (x) should have '\n 'the same number of samples. Got array shapes: ' +\n str([x.shape for x in inputs]))\n if len(set_y) > 1:\n raise ValueError('All target arrays (y) should have '\n 'the same number of samples. Got array shapes: ' +\n str([y.shape for y in targets]))\n if set_x and set_y and list(set_x)[0] != list(set_y)[0]:\n raise ValueError('Input arrays should have '\n 'the same number of samples as target arrays. '\n 'Found ' + str(list(set_x)[0]) + ' input samples '\n 'and ' + str(list(set_y)[0]) + ' target samples.')\n if len(set_w) > 1:\n raise ValueError('All sample_weight arrays should have '\n 'the same number of samples. Got array shapes: ' +\n str([w.shape for w in weights]))\n if set_y and set_w and list(set_y)[0] != list(set_w)[0]:\n raise ValueError('Sample_weight arrays should have '\n 'the same number of samples as target arrays. Got ' +\n str(list(set_y)[0]) + ' input samples and ' +\n str(list(set_w)[0]) + ' target samples.')",
"def really1d(arr):\n if np.ndim(arr) != 1:\n return False\n # Empty list or array\n if len(arr) == 0:\n return True\n if np.any(np.vectorize(np.ndim)(arr)):\n return False\n return True",
"def get_shape(x):\n if isinstance(x, list) and len(x) > 0:\n shapes = [get_shape(subx) for subx in x]\n if any([s != shapes[0] for s in shapes[1:]]):\n raise ValueError('Parameter dimension not consistent: {}'.format(x))\n return (len(x), ) + shapes[0]\n else:\n if hasattr(x, '_shape_tuple'):\n return x._shape_tuple() # method to return the shape as a tuple\n elif hasattr(x, 'shape'):\n return tuple(x.shape)\n else:\n return ()",
"def _assert_is_batched(self, *arrays):\n shape_list = []\n for array in arrays:\n if isinstance(array, tf.Tensor):\n shape_list.append(array.shape.as_list())\n else:\n shape_list.append(np.shape(array))\n # All arrays should have at least two dimensions.\n assert all([len(shape) >= 2 for shape in shape_list])\n # All arrays should have the same batch size.\n assert len(set([shape[0] for shape in shape_list])) == 1",
"def test_convolve_input_dim_check(self, case, fn, x_shape, y_shape):\n x = torch.rand(*x_shape, dtype=self.dtype, device=self.device)\n y = torch.rand(*y_shape, dtype=self.dtype, device=self.device)\n\n message = [\n \"The operands must be the same dimension\",\n \"Leading dimensions of x and y are not broadcastable\",\n ][case]\n with self.assertRaisesRegex(ValueError, message):\n fn(x, y)",
"def infer_shape(inputs, mod=None):\n out_type = infer_type(inputs, mod=mod)\n checked_type = out_type.checked_type\n if hasattr(checked_type, 'shape'):\n # Regular operator that outputs tensors\n return get_const_tuple(out_type.checked_type.shape)\n # The return type is not a tensor, for example List\n return checked_type",
"def _check_shape(shape):\n if type(shape) == int:\n shape = (shape, shape)\n check_odd(shape, 'psf shape')\n return shape",
"def check_consistent_length(arrays: Sequence[npt.ArrayLike]) -> None:\n lengths = [_num_samples(X) for X in arrays if X is not None]\n uniques = np.unique(lengths)\n if len(uniques) > 1:\n raise ValueError(\n \"Found input variables with inconsistent numbers of\" \" samples: %r\" % [int(length) for length in lengths]\n )",
"def _check_shape(placeholder_shape, data_shape):\n\n return True",
"def get_shape(x):\n\n return None if jnp.isscalar(x) else x.shape",
"def nd_shape_checking(x, y, mvaxis, traxis):\n assert x.ndim == y.ndim\n dims = np.delete(np.arange(x.ndim), -2)\n assert all([x.shape[k] == y.shape[k] for k in dims])"
]
| [
"0.6624878",
"0.65816003",
"0.6564006",
"0.64985836",
"0.63906205",
"0.63877285",
"0.63808954",
"0.63576496",
"0.6357112",
"0.6344511",
"0.6341772",
"0.62508506",
"0.62365884",
"0.62330407",
"0.61754423",
"0.61516166",
"0.61161005",
"0.6115013",
"0.60989213",
"0.60769963",
"0.6073869",
"0.60554945",
"0.60365415",
"0.60357904",
"0.60346633",
"0.5974759",
"0.59523916",
"0.5919766",
"0.58980244",
"0.5897824"
]
| 0.82600343 | 0 |
Helper function to create a simple table of attribute mean, min, max and nan count from an object for use in summarize function. | def summarize_attrs(obj, attrs, dp=2, repr_head=True):
ret = []
for attr in attrs:
data = getattr(obj, attr)
ret.append([attr,
np.round(np.nanmean(data), dp),
np.round(np.nanmin(data), dp),
np.round(np.nanmax(data), dp),
np.count_nonzero(np.isnan(data))])
hdrs = ['Attr', 'Mean', 'Min', 'Max', 'NaN']
if repr_head:
print(obj)
print(tabulate.tabulate(ret, headers=hdrs)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def summary(df):\n summary_list = []\n print 'SHAPE', df.shape\n \n for i in df.columns:\n vals = df[i] \n if df[i].dtype == 'O':\n try:\n most_frequent = Counter(df[i].tolist()).most_common(1)\n uniq = vals.nunique()\n except TypeError:\n most_frequent = 'NA'\n uniq = 'NA'\n summary_list.append([i,\n vals.dtype, \n 'NA', \n 'NA', \n most_frequent,\n uniq, \n sum(pd.isnull(vals)),\n sum(pd.isnull(vals))/(1.0*len(df))])\n elif df[i].dtype == '<M8[ns]':\n most_frequent = Counter(df[i].tolist()).most_common(1)\n summary_list.append([i,\n vals.dtype, \n vals.min(), \n vals.max(), \n most_frequent,\n vals.nunique(), \n sum(pd.isnull(vals)),\n sum(pd.isnull(vals))/(1.0*len(df))])\n else:\n summary_list.append([i,\n vals.dtype, \n vals.min(), \n vals.max(), \n vals.mean(),\n vals.nunique(), \n sum(pd.isnull(vals)),\n sum(pd.isnull(vals))/(1.0*len(df))])\n return pd.DataFrame(summary_list, columns=['col','datatype','min','max','mean_or_most_common','num_uniq','null_count','null_pct'])",
"def basic_stats_builtin(x):\n return {\"Minimum: \": min(x), \"Maximum: \": max(x), \"Sum: \": sum(x),\n \"Mean: \": statistics.mean(x)}",
"def summary(aggregate):\n (airport, (count, total, square, minimum, maximum)) = aggregate\n\n try:\n mean = total / float(count)\n stddev = math.sqrt((square-(total**2)/count)/count-1)\n\n return (airport, (count, mean, stddev, minimum, maximum))\n except Exception:\n return (airport, (count, None, None, minimum, maximum))",
"def _summary(obj):\n return obj.summary",
"def _summarize(obj, fields):\n for name in fields:\n attr = getattr(obj, name)\n if attr is None:\n continue\n elif isinstance(attr, datetime):\n attr = attr.isoformat()\n yield f\"{name}: {repr(attr)}\"",
"def _summarize(obj, fields):\n for name in fields:\n attr = getattr(obj, name)\n if attr is None:\n continue\n elif isinstance(attr, datetime):\n attr = attr.isoformat()\n yield f\"{name}: {repr(attr)}\"",
"def median_table(obj: dict) -> Tuple[str]:\n try:\n columns = list(obj.values())[0].keys()\n header = \"| |{}\".format(''.join([f\"{i}|\" for i in columns]))\n alignment = f\"\"\"|:---|{''.join(['---:|']*len(columns))}\"\"\"\n header = f\"{header}\\n{alignment}\"\n \n rows = []\n for k, v in obj.items():\n cell_1 = f'|{k}|'\n row_values = ''.join([f\"{round(i[2], 2)}|\" for i in v.values()])\n row = f\"{cell_1}{row_values}\"\n rows.append(row)\n \n table = \"{}\\n{}\".format(header, '\\n'.join(rows))\n return table, None\n \n except Exception as e:\n return None, e",
"def table_summary():\n \n t = dict()\n t['name'] = get_names()\n t['Name'] = [get_properties(name)['label'] for name in t['name']]\n N = len(t['name'])\n \n # host\n t['host'] = ['Sagittarius', 'Sagittarius', 'none', 'Gaia-Sausage-Enceladus', 'Sagittarius', 'Sequoia / Arjuna / I\\'itoi', 'Sequoia / Arjuna', np.nan, np.nan, 'Sequoia / Arjuna', 'Gaia-Sausage-Enceladus', 'Sequoia / Arjuna', 'Helmi / Wukong', 'Helmi / Wukong', 'Sagittarius', 'in situ / Helmi / Wukong', 'Helmi / Wukong', 'Cetus', 'Cetus', 'Sagittarius', 'Sequoia / Arjuna / I\\'itoi', 'Cetus', 'Sequoia / Arjuna / I\\'itoi']\n \n # progenitor\n t['progenitor'] = [np.nan, np.nan, 'itself', 'NGC 5139', 'NGC 4590', np.nan, 'NGC 3201', '(Wukong / Helmi)', '(Wukong / Helmi)', np.nan, np.nan, np.nan, np.nan, 'NGC 5024', np.nan, 'NGC 5272', 'NGC 5024', 'NGC 5824', 'NGC 5824', np.nan, np.nan, np.nan, np.nan]\n \n # progenitor type\n t['type'] = ['DG' if name in ['elqui', 'indus', 'jhelum'] else 'GC' for name in t['name']]\n \n # metallicity\n t['feh'] = [-2.4, -2.4, -2.2, -1.5, -2.16, -2.3, -1.5, -2.1, -2.1, -1.6, -1.95, -1.6, -2.7, np.nan, -1.7, -1.1, -2.7, -1.9, np.nan, np.nan, -2.2, np.nan, -1.9]\n \n # associations\n t['friends'] = ['ATLAS', 'Aliqa Uma', np.nan, np.nan, np.nan, np.nan, np.nan, 'Jhelum', 'Indus', np.nan, np.nan, np.nan, np.nan, 'Sylgr', np.nan, np.nan, 'Ravi', 'Turbio', 'Triangulum', np.nan, np.nan, np.nan, np.nan]\n \n tout = Table(t)\n tout.pprint()\n tout.write('../data/stream_origin.fits', overwrite=True)",
"def summarize(self):\n if self.__total_count == 0:\n return \"(count=0)\"\n\n # noinspection PyStringFormat\n return \"(count=%ld,avg=%.2lf,min=%.2lf,max=%.2lf,median=%.2lf)\" % (\n self.count(),\n self.average(),\n self.min(),\n self.max(),\n self.estimate_median(),\n )",
"def show_stats(x):\n print(\"min =\", x.min())\n print(\"max =\", x.max())\n print(\"median =\", np.median(x))\n print(\"average =\", x.mean())\n print(\"std =\", x.std())",
"def basic_stats_custom(x):\n def my_min(x):\n m = x[0]\n for elem in x:\n if elem < m:\n m = elem\n else:\n pass\n return m\n\n def my_max(x):\n m = x[0]\n for elem in x:\n if elem > m:\n m = elem\n else:\n pass\n return m\n\n def my_sum(x):\n acc = 0\n for elem in x:\n acc += elem\n return acc\n\n def my_len(x):\n acc = 0\n for elem in x:\n acc += 1\n return acc\n\n def my_mean(x):\n \"\"\"Return the mean of numbers in list. Since the task is not to use\n builtin functions, length is also defined anew (prior to this\n function definition).\n\n \"\"\"\n return my_sum(x) / my_len(x)\n return {\"Minimum: \": my_min(x), \"Maximum: \": my_max(x), \"Sum: \":\n my_sum(x), \"Mean: \": my_mean(x)}",
"def summarize(dataset):\n summaries = [(np.mean(attribute), np.std(attribute)) for attribute in zip(*dataset)]\n\n return summaries",
"def descriptive_stats(array, verbose=True, label='', mean=False, plot=False):\n if mean:\n mean_ = np.mean(array)\n median = np.median(array)\n mini = np.min(array)\n maxi = np.max(array)\n first_qu = np.percentile(array, 25)\n third_qu = np.percentile(array, 75)\n\n if verbose:\n if mean:\n label += 'min={:.1f} / 1st QU={:.1f} / ave={:.1f} / med={:.1f} / '\n label += '3rd QU={:.1f} / max={:.1f}'\n print(label.format(mini, first_qu, mean_, median, third_qu, maxi))\n else:\n label += 'min={:.1f} / 1st QU={:.1f} / med={:.1f} / 3rd QU={:.1f} '\n label += '/ max={:.1f}'\n print(label.format(mini, first_qu, median, third_qu, maxi))\n\n if plot:\n boxplot(array, vert=False, meanline=mean, showfliers=True, sym='.')\n\n if mean:\n return mini, first_qu, mean_, median, third_qu, maxi\n else:\n return mini, first_qu, median, third_qu, maxi",
"def summaryone(x):\n print 'mean and std are ',np.mean(x), np.std(x)\n print 'max and min are ',np.max(x), np.min(x)\n print 'the range is ',np.max(x)-np.min(x)",
"def dictionary_of_metrics(items):\n \n n = len(items)\n average = round(np.mean(items), 2)\n median = round(np.median(items), 2)\n variance = round((sum((items-np.mean(items))**2))/(n-1), 2)\n standard_dev = round(((sum((items-np.mean(items))**2))/(n-1))**(1/2), 2)\n minimum = round(min(items), 2)\n maximum = round(max(items), 2)\n \n return {'mean':average,'median':median,'var':variance,'std':standard_dev,'min':minimum,'max':maximum}\n pass",
"def __profile_to_table(data):\n output = [[\"condition\", \"mean\", \"min\", \"max\"]]\n order = data[\"order\"]\n\n for o in order:\n try:\n values = data[\"data\"][o]\n output.append(\n [o, str(mean(values)), str(min(values)), str(max(values))]\n )\n except Exception as e:\n print(e)\n\n return \"\\n\".join([\"\\t\".join(l) for l in output])",
"def summarize_features(data, include_nan = True, print_vals = True):\n if(include_nan):\n # calculate the different measures including nan values\n means = np.mean(data, axis=0)\n medians = np.median(data, axis=0)\n stds = np.std(data, axis=0)\n mins = np.min(data, axis=0)\n maxs = np.max(data, axis=0)\n else:\n # calculate the different measures discarding nan values\n means = np.nanmean(data, axis=0)\n medians = np.nanmedian(data, axis=0)\n stds = np.nanstd(data, axis=0)\n mins = np.nanmin(data, axis=0)\n maxs = np.nanmax(data, axis=0)\n \n if(print_vals):\n # print the values obtained\n print()\n if(include_nan):\n print(\"summary variables, where nan values are not ignored:\")\n else:\n print(\"summary variables, where nan values are ignored:\")\n for idx, mean in enumerate(means):\n print(\"feature {idx}: mean={m:.3f} std={s:.3f} median={me:.3f} min={mi:.3f} max={ma:.3f}.\".format(\n idx=idx, m=mean, s=stds[idx], me=medians[idx], mi=mins[idx], ma=maxs[idx]))\n print()\n return means, stds, medians, mins, maxs",
"def CustomMetrics(InputDataframe):\r\n \r\n DataframeMetrics = pandas.DataFrame(index=InputDataframe.columns,columns='ColumnDatatypes #UniqueValues NullValuesPresent #NullValues'.split())\r\n Columns = list(InputDataframe.columns)\r\n \r\n for Values in Columns: \r\n DataframeMetrics['ColumnDatatypes'][Values] = InputDataframe[Values].dtypes\r\n DataframeMetrics['#UniqueValues'][Values] = InputDataframe[Values].nunique()\r\n DataframeMetrics['#NullValues'][Values] = InputDataframe[Values].isnull().sum()\r\n DataframeMetrics['NullValuesPresent'][Values] = InputDataframe[Values].isnull().values.any()\r\n \r\n print('The dimensions of the input dataframe are: {} rows by {} columns.'.format(len(InputDataframe.index), len(InputDataframe.columns)))\r\n return DataframeMetrics",
"def statistics_from_array(x: numpy.ndarray):\n try:\n return x.mean(), x.std(), x.max(), x.min()\n except AttributeError:\n return numpy.nan, numpy.nan, numpy.nan, numpy.nan",
"def summarize(self):\n return self.df.describe()",
"def information(self):\n \n \n x = list(zip(self.df.count(), self.df.dtypes, (self.df.isnull().sum() / self.df.shape[0])))\n y = dict(zip(self.df.columns, x))\n return pd.DataFrame(y, index=['Number of Values', 'Data Type', 'Percent Missing']).transpose()",
"def ensure_default_properties(obj):\n obj = ee.Dictionary(obj)\n default_properties = ee.Dictionary({\"mean\": -9999,\"count\": -9999,\"sum\":-9999})\n return default_properties.combine(obj)",
"def extract_summary(\n self, mean=True, max=True, min=True, ignore_sessions=False, *args, **kwargs\n ):\n out = self.__class__().__finalize__(self)\n if ignore_sessions == False:\n out.sessions = np.unique(self.sessions)\n if mean:\n new = self.extract_mean(ignore_sessions=ignore_sessions, *args, **kwargs)\n out = out.append(new, axis=1)\n # for attr_name in ['au_columns', 'emotion_columns', 'facebox_columns', 'landmark_columns', 'facepose_columns', 'gaze_columns', 'time_columns']:\n # if new.__getattr__(attr_name):\n # new_attr = new.__getattr__(attr_name)\n # out.__setattr__(attr_name, new_attr)\n if max:\n new = self.extract_max(ignore_sessions=ignore_sessions, *args, **kwargs)\n out = out.append(new, axis=1)\n # for attr_name in ['au_columns', 'emotion_columns', 'facebox_columns', 'landmark_columns', 'facepose_columns', 'gaze_columns', 'time_columns']:\n # if out.__getattr__(attr_name) and new.__getattr__(attr_name):\n # new_attr = out.__getattr__(attr_name) + new.__getattr__(attr_name)\n # out.__setattr__(attr_name, new_attr)\n if min:\n new = self.extract_min(ignore_sessions=ignore_sessions, *args, **kwargs)\n out = out.append(new, axis=1)\n for attr_name in [\n \"au_columns\",\n \"emotion_columns\",\n \"facebox_columns\",\n \"landmark_columns\",\n \"facepose_columns\",\n \"gaze_columns\",\n \"time_columns\",\n ]:\n if self.__getattr__(attr_name):\n new_attr = []\n if mean:\n new_attr.extend(\n [\"mean_\" + attr for attr in self.__getattr__(attr_name)]\n )\n if max:\n new_attr.extend(\n [\"max_\" + attr for attr in self.__getattr__(attr_name)]\n )\n if min:\n new_attr.extend(\n [\"min_\" + attr for attr in self.__getattr__(attr_name)]\n )\n out.__setattr__(attr_name, new_attr)\n return out",
"def stats(self, ops=(min, max, np.median, sum)):\n names = [op.__name__ for op in ops]\n ops = [_zero_on_type_error(op) for op in ops]\n columns = [[op(column) for op in ops] for column in self.columns]\n table = self._with_columns(columns)\n stats = table._unused_label('statistic')\n table[stats] = names\n table.move_to_start(stats)\n return table",
"def print_summary(stim_table):\n print(\n '{:<20}{:>15}{:>15}\\n'.format('Colname', 'No. conditions', 'Mean N/cond')\n )\n for colname in stim_table.columns:\n conditions, occurrences = np.unique(\n np.nan_to_num(stim_table[colname]), return_counts = True\n )\n print(\n '{:<20}{:>15}{:>15.1f}'.format(\n colname, len(conditions), np.mean(occurrences)\n )\n )",
"def dictionary_of_metrics(items):\n\n 'Initialize dict'\n d = {}\n\n # Add 'mean' key to the dict with the value of the mean calculate by using\n # np.mean rounded to 2 decimal places\n d['mean'] = round(np.mean(items), 2)\n\n # Add 'median' key to the dict with the value of the median calculate by\n # using np.median rounded to 2 decimal places\n d['median'] = round(np.median(items), 2)\n\n # Add 'var' key to the dict with the value of the varience calculate by\n # using np.var rounded to 2 decimal places\n d['var'] = round(np.var(items, ddof=1), 2)\n\n # Add 'std' key to the dict with the value of the standard deviation\n # calculate by using np.std to 2 decimal places\n d['std'] = round(np.std(items, ddof=1), 2)\n\n # Add 'min' key to the dict with the value of the minimum calculate by\n # using np.min to 2 decimal places\n d['min'] = round(np.min(items), 2)\n\n # Add 'max' key to the dict with the value of the maximum calculate by\n # using np.max to 2 decimal places\n d['max'] = round(np.max(items), 2)\n\n # returns dictionary, d\n return d",
"def _calculate_stats(values, factor=1):\n result = {'min': min(values) * factor,\n 'max': max(values) * factor,\n 'sum': sum(values) * factor,\n 'mean': 0,\n 'stddev': 0}\n\n if values:\n mean = sum(values) / float(len(values))\n result['mean'] = factor * mean\n result['stddev'] = (\n factor * math.sqrt((1.0 / (len(values) - 1))\n * sum((x - mean) ** 2 for x in values)))\n\n return result",
"def _array_stats(data):\n statdict = {}\n statdict['Minimum'] = np.ma.min(data)\n statdict['Maximum'] = np.ma.max(data)\n statdict['Mean'] = np.ma.mean(data)\n statdict['Median'] = np.ma.median(data)\n statdict['Standard_Deviation'] = np.ma.std(data)\n statdict['Variance'] = np.ma.var(data)\n\n return statdict",
"def test_summarize(self):\n measurement = self.measurement(self.metric())\n self.assertEqual(\n {\n \"count\": {\"value\": None, \"status\": None},\n \"start\": measurement[\"start\"],\n \"end\": measurement[\"end\"],\n },\n measurement.summarize(),\n )",
"def summarize_as_table(self):\n h = human_readable_size\n h_throughput = human_readable_throughput\n table = [\n ['Total Time (seconds)', '%.3f' % self.total_time,\n self.std_dev_total_time],\n ['Maximum Memory', h(self.max_memory), h(self.std_dev_max_memory)],\n ['Maximum CPU (percent)', '%.1f' % self.max_cpu,\n self.std_dev_max_cpu],\n ['Maximum Sent Throughput', h_throughput(self.max_sent_throughput),\n h_throughput(self.max_sent_throughput)],\n ['Maximum Recv Throughput', h_throughput(self.max_recv_throughput),\n h_throughput(self.max_recv_throughput)],\n ['Average Memory', h(self.average_memory),\n h(self.std_dev_average_memory)],\n ['Average CPU (percent)', '%.1f' % self.average_cpu,\n self.std_dev_average_cpu],\n ['Average Sent Throughput',\n h_throughput(self.average_sent_throughput),\n h_throughput(self.average_sent_throughput)],\n ['Average Recv Throughput',\n h_throughput(self.average_recv_throughput),\n h_throughput(self.average_recv_throughput)],\n ]\n return tabulate(\n table,\n headers=[\n 'Metric over %s run(s)' % (self.total_files),\n 'Mean',\n 'Standard Deviation'\n ],\n tablefmt=\"grid\"\n )"
]
| [
"0.6258448",
"0.6139533",
"0.6088402",
"0.5927841",
"0.5914616",
"0.5914616",
"0.58495516",
"0.5825066",
"0.5823076",
"0.58073944",
"0.58008504",
"0.5683719",
"0.5677983",
"0.5675944",
"0.56542504",
"0.56505287",
"0.5644049",
"0.5593806",
"0.5590825",
"0.5570467",
"0.5566631",
"0.5546246",
"0.5536537",
"0.552716",
"0.55242825",
"0.5515214",
"0.5490905",
"0.54767746",
"0.5460087",
"0.5451349"
]
| 0.7691301 | 0 |
tests passing 1D data into sigma_points | def test_julier_sigma_points_1D():
kappa = 0.
sp = JulierSigmaPoints(1, kappa)
#ukf = UKF(dim_x=1, dim_z=1, dt=0.1, hx=None, fx=None, kappa=kappa)
Wm, Wc = sp.weights()
assert np.allclose(Wm, Wc, 1e-12)
assert len(Wm) == 3
mean = 5
cov = 9
Xi = sp.sigma_points(mean, cov)
xm, ucov = unscented_transform(Xi, Wm, Wc, 0)
# sum of weights*sigma points should be the original mean
m = 0.0
for x, w in zip(Xi, Wm):
m += x*w
assert abs(m-mean) < 1.e-12
assert abs(xm[0] - mean) < 1.e-12
assert abs(ucov[0,0]-cov) < 1.e-12
assert Xi.shape == (3,1) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_simplex_sigma_points_1D():\n\n sp = SimplexSigmaPoints(1)\n\n #ukf = UKF(dim_x=1, dim_z=1, dt=0.1, hx=None, fx=None, kappa=kappa)\n\n Wm, Wc = sp.weights()\n assert np.allclose(Wm, Wc, 1e-12)\n assert len(Wm) == 2\n\n mean = 5\n cov = 9\n\n Xi = sp.sigma_points(mean, cov)\n xm, ucov = unscented_transform(Xi, Wm, Wc, 0)\n\n # sum of weights*sigma points should be the original mean\n m = 0.0\n for x, w in zip(Xi, Wm):\n m += x*w\n\n assert abs(m-mean) < 1.e-12\n assert abs(xm[0] - mean) < 1.e-12\n assert abs(ucov[0,0]-cov) < 1.e-12\n\n assert Xi.shape == (2,1)",
"def test_random_points(self, data):\n pts = Points(data)\n\n if pts.data.size == 0:\n assert pts.data.shape == (0, 2)\n return\n\n np.testing.assert_array_equal(pts.data, data)\n assert pts.data.ndim == data.ndim\n assert pts._view_data.ndim == data.ndim\n assert pts.data.size == data.size\n assert len(pts.selected_data) == 0",
"def sigma_points(self, x, P):\n \n \n assert x.ndim == 2 and x.shape[1], \"x must be a column vector\"\n \n n = self.n\n \n if np.isscalar(P):\n P = np.eye(n)*P\n else:\n P = np.asarray(P)\n \n lambda_ = self.alpha**2 * (n + self.kappa) - n\n U = self.sqrt((lambda_ + n)*P)\n \n sigmas = np.zeros((n, 2*n+1))\n x0 = x[:, 0]\n sigmas[:,0] = x0\n for k in range(n):\n sigmas[:, k+1] = self.subtract(x0, -U[k])\n sigmas[:, n+k+1] = self.subtract(x0, U[k])\n \n return sigmas",
"def test_points_calculation(self):\n\n assert self.test_shape.points == [\n (1030.0, 525.0),\n (1030.0, 475.0),\n (970.0, 475.0),\n (970.0, 525.0),\n ]",
"def test_sigma_plot():\n\n x = np.array([[1, 2]])\n P = np.array([[2, 1.2],\n [1.2, 2]])\n kappa = .1\n\n # if kappa is larger, than points shoudld be closer together\n\n sp0 = JulierSigmaPoints(n=2, kappa=kappa)\n sp1 = JulierSigmaPoints(n=2, kappa=kappa*1000)\n sp2 = MerweScaledSigmaPoints(n=2, kappa=0, beta=2, alpha=1e-3)\n sp3 = SimplexSigmaPoints(n=2)\n\n w0, _ = sp0.weights()\n w1, _ = sp1.weights()\n w2, _ = sp2.weights()\n w3, _ = sp3.weights()\n\n Xi0 = sp0.sigma_points(x, P)\n Xi1 = sp1.sigma_points(x, P)\n Xi2 = sp2.sigma_points(x, P)\n Xi3 = sp3.sigma_points(x, P)\n\n assert max(Xi1[:,0]) > max(Xi0[:,0])\n assert max(Xi1[:,1]) > max(Xi0[:,1])\n\n if DO_PLOT:\n plt.figure()\n for i in range(Xi0.shape[0]):\n plt.scatter((Xi0[i,0]-x[0, 0])*w0[i] + x[0, 0],\n (Xi0[i,1]-x[0, 1])*w0[i] + x[0, 1],\n color='blue', label='Julier low $\\kappa$')\n\n for i in range(Xi1.shape[0]):\n plt.scatter((Xi1[i, 0]-x[0, 0]) * w1[i] + x[0,0],\n (Xi1[i, 1]-x[0, 1]) * w1[i] + x[0,1],\n color='green', label='Julier high $\\kappa$')\n # for i in range(Xi2.shape[0]):\n # plt.scatter((Xi2[i, 0] - x[0, 0]) * w2[i] + x[0, 0],\n # (Xi2[i, 1] - x[0, 1]) * w2[i] + x[0, 1],\n # color='red')\n for i in range(Xi3.shape[0]):\n plt.scatter((Xi3[i, 0] - x[0, 0]) * w3[i] + x[0, 0],\n (Xi3[i, 1] - x[0, 1]) * w3[i] + x[0, 1],\n color='black', label='Simplex')\n\n stats.plot_covariance_ellipse([1, 2], P)",
"def _sample(self, points: Iterable[float]) -> np.array:\n pass",
"def guassian_point_process(x0, y0, xSigma, ySigma, nPoints):\n x = np.random.normal(loc=x0, scale=xSigma, size=(nPoints,))\n y = np.random.normal(loc=y0, scale=ySigma, size=(nPoints,))\n return x, y",
"def test_two_round_data_points(self):\r\n values = [2, 3]\r\n expect_mean_result = 2.5\r\n expected_sd_result = .5\r\n result = multipoint_mean_sd(values)\r\n\r\n self.assertEqual(expect_mean_result, result['mean_result'])\r\n self.assertEqual(expected_sd_result, result['sd_result'])",
"def ScalePoints(points, sigma = 0.02):\n assert(points.shape[1]==3)\n\n scale = np.random.uniform(1-sigma, 1+sigma)\n scale_matrix = np.array([[scale, 0, 0],\n [0, scale, 0],\n [0, 0, scale]])\n scaled = np.dot(points, scale_matrix)\n\n return scaled",
"def test_point_with_zero_value_is_good():\n point = np.array([0, 20])\n image_dimensions = np.array([100, 100])\n assert point_within_dimensions(point, image_dimensions)",
"def test_pointnum1():\n shape = paramak.CapsuleVacuumVessel(outer_start_point=(0, 0), radius=300, thickness=10)\n assert len(shape.points) == 12\n assert len(shape.processed_points) == 13",
"def test_sample(self):\n seed = 5\n space = Space()\n probs = (0.1, 0.2, 0.3, 0.4)\n categories = (\"asdfa\", 2, 3, 4)\n dim1 = Categorical(\"yolo\", OrderedDict(zip(categories, probs)), shape=(2, 2))\n space.register(dim1)\n dim2 = Integer(\"yolo2\", \"uniform\", -3, 6)\n space.register(dim2)\n dim3 = Real(\"yolo3\", \"norm\", 0.9)\n space.register(dim3)\n\n point = space.sample(seed=seed)\n rng = check_random_state(seed)\n test_point = [\n dict(\n yolo=dim1.sample(seed=rng)[0],\n yolo2=dim2.sample(seed=rng)[0],\n yolo3=dim3.sample(seed=rng)[0],\n )\n ]\n assert len(point) == len(test_point) == 1\n assert len(point[0].params) == len(test_point[0]) == 3\n assert np.all(point[0].params[\"yolo\"] == test_point[0][\"yolo\"])\n assert point[0].params[\"yolo2\"] == test_point[0][\"yolo2\"]\n assert point[0].params[\"yolo3\"] == test_point[0][\"yolo3\"]\n\n points = space.sample(2, seed=seed)\n rng = check_random_state(seed)\n points1 = dim1.sample(2, seed=rng)\n points2 = dim2.sample(2, seed=rng)\n points3 = dim3.sample(2, seed=rng)\n test_points = [\n dict(yolo=points1[0], yolo2=points2[0], yolo3=points3[0]),\n dict(yolo=points1[1], yolo2=points2[1], yolo3=points3[1]),\n ]\n assert len(points) == len(test_points) == 2\n for i in range(2):\n assert len(points[i].params) == len(test_points[i]) == 3\n assert np.all(points[i].params[\"yolo\"] == test_points[i][\"yolo\"])\n assert points[i].params[\"yolo2\"] == test_points[i][\"yolo2\"]\n assert points[i].params[\"yolo3\"] == test_points[i][\"yolo3\"]",
"def test_randn_normal_distribution():\n\n seed = 28041995\n pts = 10**5\n alpha = 0.05\n expected_mean = 0.0\n expected_var = 1.0\n\n dpnp.random.seed(seed)\n res = dpnp.asnumpy(dpnp.random.randn(pts))\n var = numpy.var(res)\n mean = numpy.mean(res)\n assert math.isclose(var, expected_var, abs_tol=0.03)\n assert math.isclose(mean, expected_mean, abs_tol=0.03)",
"def sigma_points(xm, P, kappa):\n n = xm.size\n Xi = np.zeros((n, 2 * n + 1)) # sigma points = col of Xi\n W = np.zeros(2 * n + 1)\n Xi[:, 0, None] = xm\n W[0] = kappa / (n + kappa)\n\n U = cholesky((n + kappa) * P) # U'*U = (n+kappa)*P\n\n for k in range(n):\n Xi[:, k + 1, None] = xm + U[k, None, :].T # row of U\n W[k + 1] = 1 / (2 * (n + kappa))\n\n for k in range(n):\n Xi[:, n + k + 1, None] = xm - U[k, None, :].T\n W[n + k + 1] = 1 / (2 * (n + kappa))\n\n return Xi, W",
"def test_many_round_data_points(self):\r\n values = [1, 1, 3, 5, 8, 3, 9, 2, 6, 2]\r\n expect_mean_result = 4\r\n expected_sd_result = 2.72029\r\n result = multipoint_mean_sd(values)\r\n\r\n self.assertEqual(expect_mean_result, result['mean_result'])\r\n self.assertAlmostEqual(expected_sd_result, result['sd_result'], 4)",
"def test_pointnum2():\n shape = paramak.CapsuleVacuumVessel(outer_start_point=(100, -100), radius=400, thickness=25)\n assert len(shape.points) == 12\n assert len(shape.processed_points) == 13",
"def test_data():\n x = np.array([8, 67, 79, 10, 52, 53, 98, 34, 15, 58], dtype=float)\n y = np.array([24, 87, 48, 94, 98, 66, 14, 24, 60, 16], dtype=float)\n z = np.array([0.064, 4.489, 6.241, 0.1, 2.704, 2.809, 9.604, 1.156,\n 0.225, 3.364], dtype=float)\n\n return x, y, z",
"def generate_2D_gaussian(num_points, mu, sigma=np.eye(2)):\n\tdata_set = np.array([])\n\tfor i in range(len(mu)):\n\t\tdata = np.random.multivariate_normal(mean=mu[i], cov=sigma, size=num_points)\n\t\tif data_set.size == 0:\n\t\t\tdata_set = data\n\t\telse:\n\t\t\tdata_set = np.concatenate([data_set,data])\n\treturn data_set",
"def test_pooled_sd(self):\r\n exp = pooled_standard_deviation(self.pooled_sd_input_1)\r\n self.assertEqual(self.pooled_sd_result, exp)",
"def fill_features(input_point, sigma=0.1):\n # to fill in f, array of features w shape (863, 1)\n f = np.zeros(X.shape[0])\n # iterate over every example to compare\n for i in range(m):\n f[i] = gaussian_kernel(input_point, X[i], sigma=sigma)\n return f",
"def test_many_decimal_data_points(self):\r\n values = [3.14, 42, 2.718281, 1.41421, 10]\r\n expect_mean_result = 11.854498\r\n expected_sd_result = 15.36621\r\n result = multipoint_mean_sd(values)\r\n\r\n self.assertAlmostEqual(expect_mean_result, result['mean_result'], 4)\r\n self.assertAlmostEqual(expected_sd_result, result['sd_result'], 4)",
"def data_preprocessing(points):\n mean_coords = points.mean(0)\n points -= mean_coords\n \n max_norm = np.max(np.linalg.norm(points, axis = 1))\n points /= max_norm\n\n return points, mean_coords, max_norm",
"def test_data_is_scaled():\n atom = ATOMClassifier(X_bin, y_bin, random_state=1)\n atom.run(\"SGD\")\n assert sum(atom.sgd.predict(X_bin)) > 0 # Always 0 if not scaled",
"def test_processed_points_calculation(self):\n\n assert self.test_shape.processed_points == [\n (1030.0, 525.0, \"straight\"),\n (1030.0, 475.0, \"straight\"),\n (970.0, 475.0, \"straight\"),\n (970.0, 525.0, \"straight\"),\n (1030.0, 525.0, \"straight\"),\n ]",
"def JitterPoints(points, sigma=0.01): \n C = 3\n assert(points.shape[1] == C)\n\n randJitters = np.random.uniform(-sigma, sigma, size = points.shape)\n return points + randJitters",
"def test_scaling():\n rng = np.random.RandomState(42)\n shape = (400, 10)\n u = rng.standard_normal(size=shape)\n mean = 100 * rng.uniform(size=shape[1]) + 1\n Y = u + mean\n Y_, mean_ = mean_scaling(Y)\n assert_almost_equal(Y_.mean(0), 0, 5)\n assert_almost_equal(mean_, mean, 0)\n assert Y.std() > 1",
"def test_shape_inputs(self, eta, cutpoints, sigma, expected):\n categorical = _OrderedProbit.dist(\n eta=eta,\n cutpoints=cutpoints,\n sigma=sigma,\n )\n p = categorical.owner.inputs[3].eval()\n assert p.shape == expected",
"def select_sigma_points_square_root(\n self,\n input_mean: torch.Tensor,\n input_scale_tril: types.ScaleTrilTorch,\n ) -> torch.Tensor:\n\n N, dim = input_mean.shape\n assert input_scale_tril.shape == (N, dim, dim)\n assert dim == self._dim\n\n # Compute matrix root, offsets for sigma points\n #\n # Note that we offset with the row vectors, so we need an upper-triangular\n # cholesky decomposition [1].\n #\n # [1] https://www.cs.ubc.ca/~murphyk/Papers/Julier_Uhlmann_mar04.pdf\n matrix_root = np.sqrt(dim + self._lambd) * input_scale_tril.transpose(-1, -2)\n assert matrix_root.shape == (N, dim, dim)\n\n sigma_point_offsets = input_mean.new_zeros((N, 2 * dim + 1, dim))\n sigma_point_offsets[:, 1 : 1 + dim] = matrix_root\n sigma_point_offsets[:, 1 + dim :] = -matrix_root\n\n # Create & return matrix of sigma points\n sigma_points: torch.Tensor = input_mean[:, None, :] + sigma_point_offsets\n assert sigma_points.shape == (N, 2 * dim + 1, dim)\n return sigma_points",
"def dataset_handling_with_standardisation(init_data):\n #\n ##Maximum number of points = 72 , keep around 80 values for even number\n max_len = 80\n ##Fluxes, Standardisation is done over 1 type of feature\n data = init_data.loc[:, [u'fluxes_0', u'fluxes_1', u'fluxes_2', u'fluxes_3', u'fluxes_4', u'fluxes_5']].values\n zp_array_flux = []\n for dat in data:\n n_data = []\n for ii in range(len(dat)):\n n_data = np.append(n_data, np.pad(dat[ii], (0, max_len * 5 - len(dat[ii])), 'constant', constant_values=0))\n n_data = QuantileTransformer(output_distribution='uniform').fit_transform(n_data.reshape(-1, 1)).flatten()\n zp_array_flux.append(n_data)\n zp_array_flux = np.array(zp_array_flux)\n print(zp_array_flux.shape)\n\n ##Fluxerrors, Standardisation is done over 1 type of feature\n data = init_data.loc[:,\n [u'fluxerrs_0', u'fluxerrs_1', u'fluxerrs_2', u'fluxerrs_3', u'fluxerrs_4', u'fluxerrs_5']].values\n zp_array_flux_error = []\n for dat in data:\n n_data = []\n for ii in range(len(dat)):\n n_data = np.append(n_data, np.pad(dat[ii], (0, max_len * 5 - len(dat[ii])), 'constant', constant_values=0))\n n_data = QuantileTransformer(output_distribution='uniform').fit_transform(n_data.reshape(-1, 1)).flatten()\n zp_array_flux_error.append(n_data)\n zp_array_flux_error = np.array(zp_array_flux_error)\n print(zp_array_flux_error.shape)\n\n ##Time, Standardisation is done over 1 type of feature\n data = init_data.loc[:, [u'mjds_0', u'mjds_1', u'mjds_2', u'mjds_3', u'mjds_4', u'mjds_5']].values\n zp_array_mjds = []\n for dat in data:\n n_data = []\n for ii in range(len(dat)):\n n_data = np.append(n_data, np.pad(dat[ii], (0, max_len * 5 - len(dat[ii])), 'constant', constant_values=0))\n n_data = QuantileTransformer(output_distribution='uniform').fit_transform(n_data.reshape(-1, 1)).flatten()\n zp_array_mjds.append(n_data)\n zp_array_mjds = np.array(zp_array_mjds)\n print(zp_array_mjds.shape)\n\n ##Concatenating everything\n zp_data = np.c_[zp_array_flux, zp_array_flux_error, zp_array_mjds]\n\n ##Adding redshift info// Gal pos info might be necessary to remove\n zp_data = np.c_[\n zp_data, init_data.loc[:, [u'gal_b', u'gal_l', u'hostgal_photoz', u'hostgal_photoz_err', u'hostgal_specz', u'mwebv']].values]\n print(zp_data.shape)\n\n ##Load labels and convert to integer\n labels = init_data.loc[:, [u'target']].values\n labels = labels.flatten()\n labels_name = np.array([6, 15, 16, 42, 52, 53, 62, 64, 65, 67, 88, 90, 92, 95, 99])\n [np.place(labels, labels == labels_name[i], [i]) for i in range(len(labels_name))]\n\n return [zp_data, labels]",
"def test_scalar_index(self):\n dset = self.f.create_dataset('x', shape=(), dtype='f')\n out = dset[...]\n self.assertIsInstance(out, np.ndarray)\n self.assertEqual(out.shape, ())"
]
| [
"0.7060399",
"0.6355961",
"0.6252628",
"0.61673874",
"0.6088816",
"0.60293305",
"0.601793",
"0.60165256",
"0.5899794",
"0.5886947",
"0.58542943",
"0.5822399",
"0.5793257",
"0.57819784",
"0.5781244",
"0.57780117",
"0.57685053",
"0.5725073",
"0.571579",
"0.570493",
"0.56952965",
"0.56875825",
"0.56870466",
"0.56832576",
"0.5670829",
"0.566915",
"0.56534487",
"0.56378925",
"0.56271833",
"0.55985475"
]
| 0.68443924 | 1 |
tests passing 1D data into sigma_points | def test_simplex_sigma_points_1D():
sp = SimplexSigmaPoints(1)
#ukf = UKF(dim_x=1, dim_z=1, dt=0.1, hx=None, fx=None, kappa=kappa)
Wm, Wc = sp.weights()
assert np.allclose(Wm, Wc, 1e-12)
assert len(Wm) == 2
mean = 5
cov = 9
Xi = sp.sigma_points(mean, cov)
xm, ucov = unscented_transform(Xi, Wm, Wc, 0)
# sum of weights*sigma points should be the original mean
m = 0.0
for x, w in zip(Xi, Wm):
m += x*w
assert abs(m-mean) < 1.e-12
assert abs(xm[0] - mean) < 1.e-12
assert abs(ucov[0,0]-cov) < 1.e-12
assert Xi.shape == (2,1) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_julier_sigma_points_1D():\n\n kappa = 0.\n sp = JulierSigmaPoints(1, kappa)\n\n #ukf = UKF(dim_x=1, dim_z=1, dt=0.1, hx=None, fx=None, kappa=kappa)\n\n Wm, Wc = sp.weights()\n assert np.allclose(Wm, Wc, 1e-12)\n assert len(Wm) == 3\n\n mean = 5\n cov = 9\n\n Xi = sp.sigma_points(mean, cov)\n xm, ucov = unscented_transform(Xi, Wm, Wc, 0)\n\n # sum of weights*sigma points should be the original mean\n m = 0.0\n for x, w in zip(Xi, Wm):\n m += x*w\n\n assert abs(m-mean) < 1.e-12\n assert abs(xm[0] - mean) < 1.e-12\n assert abs(ucov[0,0]-cov) < 1.e-12\n\n assert Xi.shape == (3,1)",
"def test_random_points(self, data):\n pts = Points(data)\n\n if pts.data.size == 0:\n assert pts.data.shape == (0, 2)\n return\n\n np.testing.assert_array_equal(pts.data, data)\n assert pts.data.ndim == data.ndim\n assert pts._view_data.ndim == data.ndim\n assert pts.data.size == data.size\n assert len(pts.selected_data) == 0",
"def sigma_points(self, x, P):\n \n \n assert x.ndim == 2 and x.shape[1], \"x must be a column vector\"\n \n n = self.n\n \n if np.isscalar(P):\n P = np.eye(n)*P\n else:\n P = np.asarray(P)\n \n lambda_ = self.alpha**2 * (n + self.kappa) - n\n U = self.sqrt((lambda_ + n)*P)\n \n sigmas = np.zeros((n, 2*n+1))\n x0 = x[:, 0]\n sigmas[:,0] = x0\n for k in range(n):\n sigmas[:, k+1] = self.subtract(x0, -U[k])\n sigmas[:, n+k+1] = self.subtract(x0, U[k])\n \n return sigmas",
"def test_points_calculation(self):\n\n assert self.test_shape.points == [\n (1030.0, 525.0),\n (1030.0, 475.0),\n (970.0, 475.0),\n (970.0, 525.0),\n ]",
"def test_sigma_plot():\n\n x = np.array([[1, 2]])\n P = np.array([[2, 1.2],\n [1.2, 2]])\n kappa = .1\n\n # if kappa is larger, than points shoudld be closer together\n\n sp0 = JulierSigmaPoints(n=2, kappa=kappa)\n sp1 = JulierSigmaPoints(n=2, kappa=kappa*1000)\n sp2 = MerweScaledSigmaPoints(n=2, kappa=0, beta=2, alpha=1e-3)\n sp3 = SimplexSigmaPoints(n=2)\n\n w0, _ = sp0.weights()\n w1, _ = sp1.weights()\n w2, _ = sp2.weights()\n w3, _ = sp3.weights()\n\n Xi0 = sp0.sigma_points(x, P)\n Xi1 = sp1.sigma_points(x, P)\n Xi2 = sp2.sigma_points(x, P)\n Xi3 = sp3.sigma_points(x, P)\n\n assert max(Xi1[:,0]) > max(Xi0[:,0])\n assert max(Xi1[:,1]) > max(Xi0[:,1])\n\n if DO_PLOT:\n plt.figure()\n for i in range(Xi0.shape[0]):\n plt.scatter((Xi0[i,0]-x[0, 0])*w0[i] + x[0, 0],\n (Xi0[i,1]-x[0, 1])*w0[i] + x[0, 1],\n color='blue', label='Julier low $\\kappa$')\n\n for i in range(Xi1.shape[0]):\n plt.scatter((Xi1[i, 0]-x[0, 0]) * w1[i] + x[0,0],\n (Xi1[i, 1]-x[0, 1]) * w1[i] + x[0,1],\n color='green', label='Julier high $\\kappa$')\n # for i in range(Xi2.shape[0]):\n # plt.scatter((Xi2[i, 0] - x[0, 0]) * w2[i] + x[0, 0],\n # (Xi2[i, 1] - x[0, 1]) * w2[i] + x[0, 1],\n # color='red')\n for i in range(Xi3.shape[0]):\n plt.scatter((Xi3[i, 0] - x[0, 0]) * w3[i] + x[0, 0],\n (Xi3[i, 1] - x[0, 1]) * w3[i] + x[0, 1],\n color='black', label='Simplex')\n\n stats.plot_covariance_ellipse([1, 2], P)",
"def _sample(self, points: Iterable[float]) -> np.array:\n pass",
"def guassian_point_process(x0, y0, xSigma, ySigma, nPoints):\n x = np.random.normal(loc=x0, scale=xSigma, size=(nPoints,))\n y = np.random.normal(loc=y0, scale=ySigma, size=(nPoints,))\n return x, y",
"def test_two_round_data_points(self):\r\n values = [2, 3]\r\n expect_mean_result = 2.5\r\n expected_sd_result = .5\r\n result = multipoint_mean_sd(values)\r\n\r\n self.assertEqual(expect_mean_result, result['mean_result'])\r\n self.assertEqual(expected_sd_result, result['sd_result'])",
"def ScalePoints(points, sigma = 0.02):\n assert(points.shape[1]==3)\n\n scale = np.random.uniform(1-sigma, 1+sigma)\n scale_matrix = np.array([[scale, 0, 0],\n [0, scale, 0],\n [0, 0, scale]])\n scaled = np.dot(points, scale_matrix)\n\n return scaled",
"def test_point_with_zero_value_is_good():\n point = np.array([0, 20])\n image_dimensions = np.array([100, 100])\n assert point_within_dimensions(point, image_dimensions)",
"def test_pointnum1():\n shape = paramak.CapsuleVacuumVessel(outer_start_point=(0, 0), radius=300, thickness=10)\n assert len(shape.points) == 12\n assert len(shape.processed_points) == 13",
"def test_sample(self):\n seed = 5\n space = Space()\n probs = (0.1, 0.2, 0.3, 0.4)\n categories = (\"asdfa\", 2, 3, 4)\n dim1 = Categorical(\"yolo\", OrderedDict(zip(categories, probs)), shape=(2, 2))\n space.register(dim1)\n dim2 = Integer(\"yolo2\", \"uniform\", -3, 6)\n space.register(dim2)\n dim3 = Real(\"yolo3\", \"norm\", 0.9)\n space.register(dim3)\n\n point = space.sample(seed=seed)\n rng = check_random_state(seed)\n test_point = [\n dict(\n yolo=dim1.sample(seed=rng)[0],\n yolo2=dim2.sample(seed=rng)[0],\n yolo3=dim3.sample(seed=rng)[0],\n )\n ]\n assert len(point) == len(test_point) == 1\n assert len(point[0].params) == len(test_point[0]) == 3\n assert np.all(point[0].params[\"yolo\"] == test_point[0][\"yolo\"])\n assert point[0].params[\"yolo2\"] == test_point[0][\"yolo2\"]\n assert point[0].params[\"yolo3\"] == test_point[0][\"yolo3\"]\n\n points = space.sample(2, seed=seed)\n rng = check_random_state(seed)\n points1 = dim1.sample(2, seed=rng)\n points2 = dim2.sample(2, seed=rng)\n points3 = dim3.sample(2, seed=rng)\n test_points = [\n dict(yolo=points1[0], yolo2=points2[0], yolo3=points3[0]),\n dict(yolo=points1[1], yolo2=points2[1], yolo3=points3[1]),\n ]\n assert len(points) == len(test_points) == 2\n for i in range(2):\n assert len(points[i].params) == len(test_points[i]) == 3\n assert np.all(points[i].params[\"yolo\"] == test_points[i][\"yolo\"])\n assert points[i].params[\"yolo2\"] == test_points[i][\"yolo2\"]\n assert points[i].params[\"yolo3\"] == test_points[i][\"yolo3\"]",
"def test_randn_normal_distribution():\n\n seed = 28041995\n pts = 10**5\n alpha = 0.05\n expected_mean = 0.0\n expected_var = 1.0\n\n dpnp.random.seed(seed)\n res = dpnp.asnumpy(dpnp.random.randn(pts))\n var = numpy.var(res)\n mean = numpy.mean(res)\n assert math.isclose(var, expected_var, abs_tol=0.03)\n assert math.isclose(mean, expected_mean, abs_tol=0.03)",
"def sigma_points(xm, P, kappa):\n n = xm.size\n Xi = np.zeros((n, 2 * n + 1)) # sigma points = col of Xi\n W = np.zeros(2 * n + 1)\n Xi[:, 0, None] = xm\n W[0] = kappa / (n + kappa)\n\n U = cholesky((n + kappa) * P) # U'*U = (n+kappa)*P\n\n for k in range(n):\n Xi[:, k + 1, None] = xm + U[k, None, :].T # row of U\n W[k + 1] = 1 / (2 * (n + kappa))\n\n for k in range(n):\n Xi[:, n + k + 1, None] = xm - U[k, None, :].T\n W[n + k + 1] = 1 / (2 * (n + kappa))\n\n return Xi, W",
"def test_many_round_data_points(self):\r\n values = [1, 1, 3, 5, 8, 3, 9, 2, 6, 2]\r\n expect_mean_result = 4\r\n expected_sd_result = 2.72029\r\n result = multipoint_mean_sd(values)\r\n\r\n self.assertEqual(expect_mean_result, result['mean_result'])\r\n self.assertAlmostEqual(expected_sd_result, result['sd_result'], 4)",
"def test_pointnum2():\n shape = paramak.CapsuleVacuumVessel(outer_start_point=(100, -100), radius=400, thickness=25)\n assert len(shape.points) == 12\n assert len(shape.processed_points) == 13",
"def test_data():\n x = np.array([8, 67, 79, 10, 52, 53, 98, 34, 15, 58], dtype=float)\n y = np.array([24, 87, 48, 94, 98, 66, 14, 24, 60, 16], dtype=float)\n z = np.array([0.064, 4.489, 6.241, 0.1, 2.704, 2.809, 9.604, 1.156,\n 0.225, 3.364], dtype=float)\n\n return x, y, z",
"def generate_2D_gaussian(num_points, mu, sigma=np.eye(2)):\n\tdata_set = np.array([])\n\tfor i in range(len(mu)):\n\t\tdata = np.random.multivariate_normal(mean=mu[i], cov=sigma, size=num_points)\n\t\tif data_set.size == 0:\n\t\t\tdata_set = data\n\t\telse:\n\t\t\tdata_set = np.concatenate([data_set,data])\n\treturn data_set",
"def test_pooled_sd(self):\r\n exp = pooled_standard_deviation(self.pooled_sd_input_1)\r\n self.assertEqual(self.pooled_sd_result, exp)",
"def fill_features(input_point, sigma=0.1):\n # to fill in f, array of features w shape (863, 1)\n f = np.zeros(X.shape[0])\n # iterate over every example to compare\n for i in range(m):\n f[i] = gaussian_kernel(input_point, X[i], sigma=sigma)\n return f",
"def test_many_decimal_data_points(self):\r\n values = [3.14, 42, 2.718281, 1.41421, 10]\r\n expect_mean_result = 11.854498\r\n expected_sd_result = 15.36621\r\n result = multipoint_mean_sd(values)\r\n\r\n self.assertAlmostEqual(expect_mean_result, result['mean_result'], 4)\r\n self.assertAlmostEqual(expected_sd_result, result['sd_result'], 4)",
"def data_preprocessing(points):\n mean_coords = points.mean(0)\n points -= mean_coords\n \n max_norm = np.max(np.linalg.norm(points, axis = 1))\n points /= max_norm\n\n return points, mean_coords, max_norm",
"def test_data_is_scaled():\n atom = ATOMClassifier(X_bin, y_bin, random_state=1)\n atom.run(\"SGD\")\n assert sum(atom.sgd.predict(X_bin)) > 0 # Always 0 if not scaled",
"def test_processed_points_calculation(self):\n\n assert self.test_shape.processed_points == [\n (1030.0, 525.0, \"straight\"),\n (1030.0, 475.0, \"straight\"),\n (970.0, 475.0, \"straight\"),\n (970.0, 525.0, \"straight\"),\n (1030.0, 525.0, \"straight\"),\n ]",
"def JitterPoints(points, sigma=0.01): \n C = 3\n assert(points.shape[1] == C)\n\n randJitters = np.random.uniform(-sigma, sigma, size = points.shape)\n return points + randJitters",
"def test_scaling():\n rng = np.random.RandomState(42)\n shape = (400, 10)\n u = rng.standard_normal(size=shape)\n mean = 100 * rng.uniform(size=shape[1]) + 1\n Y = u + mean\n Y_, mean_ = mean_scaling(Y)\n assert_almost_equal(Y_.mean(0), 0, 5)\n assert_almost_equal(mean_, mean, 0)\n assert Y.std() > 1",
"def test_shape_inputs(self, eta, cutpoints, sigma, expected):\n categorical = _OrderedProbit.dist(\n eta=eta,\n cutpoints=cutpoints,\n sigma=sigma,\n )\n p = categorical.owner.inputs[3].eval()\n assert p.shape == expected",
"def select_sigma_points_square_root(\n self,\n input_mean: torch.Tensor,\n input_scale_tril: types.ScaleTrilTorch,\n ) -> torch.Tensor:\n\n N, dim = input_mean.shape\n assert input_scale_tril.shape == (N, dim, dim)\n assert dim == self._dim\n\n # Compute matrix root, offsets for sigma points\n #\n # Note that we offset with the row vectors, so we need an upper-triangular\n # cholesky decomposition [1].\n #\n # [1] https://www.cs.ubc.ca/~murphyk/Papers/Julier_Uhlmann_mar04.pdf\n matrix_root = np.sqrt(dim + self._lambd) * input_scale_tril.transpose(-1, -2)\n assert matrix_root.shape == (N, dim, dim)\n\n sigma_point_offsets = input_mean.new_zeros((N, 2 * dim + 1, dim))\n sigma_point_offsets[:, 1 : 1 + dim] = matrix_root\n sigma_point_offsets[:, 1 + dim :] = -matrix_root\n\n # Create & return matrix of sigma points\n sigma_points: torch.Tensor = input_mean[:, None, :] + sigma_point_offsets\n assert sigma_points.shape == (N, 2 * dim + 1, dim)\n return sigma_points",
"def dataset_handling_with_standardisation(init_data):\n #\n ##Maximum number of points = 72 , keep around 80 values for even number\n max_len = 80\n ##Fluxes, Standardisation is done over 1 type of feature\n data = init_data.loc[:, [u'fluxes_0', u'fluxes_1', u'fluxes_2', u'fluxes_3', u'fluxes_4', u'fluxes_5']].values\n zp_array_flux = []\n for dat in data:\n n_data = []\n for ii in range(len(dat)):\n n_data = np.append(n_data, np.pad(dat[ii], (0, max_len * 5 - len(dat[ii])), 'constant', constant_values=0))\n n_data = QuantileTransformer(output_distribution='uniform').fit_transform(n_data.reshape(-1, 1)).flatten()\n zp_array_flux.append(n_data)\n zp_array_flux = np.array(zp_array_flux)\n print(zp_array_flux.shape)\n\n ##Fluxerrors, Standardisation is done over 1 type of feature\n data = init_data.loc[:,\n [u'fluxerrs_0', u'fluxerrs_1', u'fluxerrs_2', u'fluxerrs_3', u'fluxerrs_4', u'fluxerrs_5']].values\n zp_array_flux_error = []\n for dat in data:\n n_data = []\n for ii in range(len(dat)):\n n_data = np.append(n_data, np.pad(dat[ii], (0, max_len * 5 - len(dat[ii])), 'constant', constant_values=0))\n n_data = QuantileTransformer(output_distribution='uniform').fit_transform(n_data.reshape(-1, 1)).flatten()\n zp_array_flux_error.append(n_data)\n zp_array_flux_error = np.array(zp_array_flux_error)\n print(zp_array_flux_error.shape)\n\n ##Time, Standardisation is done over 1 type of feature\n data = init_data.loc[:, [u'mjds_0', u'mjds_1', u'mjds_2', u'mjds_3', u'mjds_4', u'mjds_5']].values\n zp_array_mjds = []\n for dat in data:\n n_data = []\n for ii in range(len(dat)):\n n_data = np.append(n_data, np.pad(dat[ii], (0, max_len * 5 - len(dat[ii])), 'constant', constant_values=0))\n n_data = QuantileTransformer(output_distribution='uniform').fit_transform(n_data.reshape(-1, 1)).flatten()\n zp_array_mjds.append(n_data)\n zp_array_mjds = np.array(zp_array_mjds)\n print(zp_array_mjds.shape)\n\n ##Concatenating everything\n zp_data = np.c_[zp_array_flux, zp_array_flux_error, zp_array_mjds]\n\n ##Adding redshift info// Gal pos info might be necessary to remove\n zp_data = np.c_[\n zp_data, init_data.loc[:, [u'gal_b', u'gal_l', u'hostgal_photoz', u'hostgal_photoz_err', u'hostgal_specz', u'mwebv']].values]\n print(zp_data.shape)\n\n ##Load labels and convert to integer\n labels = init_data.loc[:, [u'target']].values\n labels = labels.flatten()\n labels_name = np.array([6, 15, 16, 42, 52, 53, 62, 64, 65, 67, 88, 90, 92, 95, 99])\n [np.place(labels, labels == labels_name[i], [i]) for i in range(len(labels_name))]\n\n return [zp_data, labels]",
"def test_scalar_index(self):\n dset = self.f.create_dataset('x', shape=(), dtype='f')\n out = dset[...]\n self.assertIsInstance(out, np.ndarray)\n self.assertEqual(out.shape, ())"
]
| [
"0.68443924",
"0.6355961",
"0.6252628",
"0.61673874",
"0.6088816",
"0.60293305",
"0.601793",
"0.60165256",
"0.5899794",
"0.5886947",
"0.58542943",
"0.5822399",
"0.5793257",
"0.57819784",
"0.5781244",
"0.57780117",
"0.57685053",
"0.5725073",
"0.571579",
"0.570493",
"0.56952965",
"0.56875825",
"0.56870466",
"0.56832576",
"0.5670829",
"0.566915",
"0.56534487",
"0.56378925",
"0.56271833",
"0.55985475"
]
| 0.7060399 | 0 |
should work like a linear KF if problem is linear | def test_linear_1d():
def fx(x, dt):
F = np.array([[1., dt],
[0, 1]], dtype=float)
return np.dot(F, x)
def hx(x):
return np.array([x[0]])
dt = 0.1
points = MerweScaledSigmaPoints(2, .1, 2., -1)
kf = UKF(dim_x=2, dim_z=1, dt=dt, fx=fx, hx=hx, points=points)
kf.x = np.array([1, 2])
kf.P = np.array([[1, 1.1],
[1.1, 3]])
kf.R *= 0.05
kf.Q = np.array([[0., 0], [0., .001]])
z = np.array([2.])
kf.predict()
kf.update(z)
zs = []
for i in range(50):
z = np.array([i+randn()*0.1])
zs.append(z)
kf.predict()
kf.update(z)
print('K', kf.K.T)
print('x', kf.x) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def f(y):\n \n\n k = 1.0\n return y*(1-y)",
"def test_linear_2d_simplex():\n\n\n def fx(x, dt):\n F = np.array([[1, dt, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, dt],\n [0, 0, 0, 1]], dtype=float)\n\n return np.dot(F, x)\n\n def hx(x):\n return np.array([x[0], x[2]])\n\n\n dt = 0.1\n points = SimplexSigmaPoints(n=4)\n kf = UKF(dim_x=4, dim_z=2, dt=dt, fx=fx, hx=hx, points=points)\n\n\n kf.x = np.array([-1., 1., -1., 1])\n kf.P*=0.0001\n #kf.R *=0\n #kf.Q\n\n zs = []\n for i in range(20):\n z = np.array([i+randn()*0.1, i+randn()*0.1])\n zs.append(z)\n\n Ms, Ps = kf.batch_filter(zs)\n smooth_x, _, _ = kf.rts_smoother(Ms, Ps, dt=dt)\n\n if DO_PLOT:\n zs = np.asarray(zs)\n\n #plt.plot(zs[:,0])\n plt.plot(Ms[:,0])\n plt.plot(smooth_x[:,0], smooth_x[:,2])\n\n print(smooth_x)",
"def test_linear_2d_merwe():\n\n\n def fx(x, dt):\n F = np.array([[1, dt, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, dt],\n [0, 0, 0, 1]], dtype=float)\n\n return np.dot(F, x)\n\n def hx(x):\n return np.array([x[0], x[2]])\n\n\n dt = 0.1\n points = MerweScaledSigmaPoints(4, .1, 2., -1)\n kf = UKF(dim_x=4, dim_z=2, dt=dt, fx=fx, hx=hx, points=points)\n\n\n kf.x = np.array([-1., 1., -1., 1])\n kf.P*=0.0001\n #kf.R *=0\n #kf.Q\n\n zs = []\n for i in range(20):\n z = np.array([i+randn()*0.1, i+randn()*0.1])\n zs.append(z)\n\n Ms, Ps = kf.batch_filter(zs)\n smooth_x, _, _ = kf.rts_smoother(Ms, Ps, dt=dt)\n\n if DO_PLOT:\n plt.figure()\n zs = np.asarray(zs)\n plt.plot(zs[:,0], marker='+')\n plt.plot(Ms[:,0], c='b')\n plt.plot(smooth_x[:,0], smooth_x[:,2], c='r')\n print(smooth_x)",
"def spline_linear(x, f, x_k, x_ki):\n A = (x_ki - x) / (x_ki - x_k)\n B = (x - x_k) / (x_ki - x_k)\n \n return A*f(x_k) + B*f(x_ki)",
"def F(self, (k,t), (j,x), **params):\n return 0.*x",
"def f(X_,K_):\r\n return max(exp(X_)-K_,0)",
"def _fv(self):\n return self.beta * (self.x ** self.c)",
"def f(k):\n return k * k * k * k * pk(k, suppression) * spherical_jn(0, k * r)",
"def fit_full(self, K):\r\n pass",
"def kf_algorithm(self, u, y):\n # For the linear filter, x_estimated_state is the difference to the operating point\n cov_matrix_before = self.cov_matrix\n # 0. Calculate difference to operating point\n u = u - self.u_op\n if self.model_type == ModelType.EASY:\n x_est_before = self.x_estimated_state - self.operating_point[0:6].reshape((6, 1))\n # x_est_before.reshape((6, 1))\n else:\n x_est_before = self.x_estimated_state - self.operating_point.reshape((8, 1))\n # x_est_before.reshape((8, 1))\n if self.nOutputs == 3:\n y = y - self.operating_point[0:3].reshape(3, 1)\n elif self.nOutputs == 5:\n y = y - np.concatenate((self.operating_point[0:3], self.operating_point[6:8])).reshape(5, 1)\n # x_est_before = self.x_estimated_state - self.operating_point\n # 1. Prediction\n # predict the state by using the linearized system at the fixed operating point\n v_s = u[0][0] + u[1][0]\n v_d = u[0][0] - u[1][0]\n x_est_predict = self.Ak @ x_est_before + self.Bk @ u\n # predict the new covariance\n cov_matrix_predict = (self.Ak @ cov_matrix_before @ np.transpose(self.Ak)\n + self.Bk @ self.N @ np.transpose(self.Bk))\n # 2. Update\n # compute kalman gain\n Kl = (cov_matrix_predict @ np.transpose(self.Ck) @\n np.linalg.inv(self.Ck @ cov_matrix_predict @ np.transpose(self.Ck) + self.W))\n # update state\n if self.nOutputs == 3:\n y_est = x_est_predict[0:3,]\n elif self.nOutputs == 5:\n y_est = np.concatenate((x_est_predict[0:3], x_est_predict[6:8]))\n x_est_update = x_est_predict + Kl @ (y - y_est)\n # update covariance matrix (identity matrix must have as many lines as the Kalman gain\n cov_matrix_update = (np.eye(np.size(Kl, 0)) - Kl @ self.Ck) @ cov_matrix_predict\n # add again the operating point\n if self.model_type == ModelType.EASY:\n x_estimated_state = x_est_update + self.operating_point[0:6].reshape((6, 1))\n # self.x_estimated_state = x_estimated_state.reshape((1, 6))[0]\n else:\n x_estimated_state = x_est_update + self.operating_point.reshape((8, 1))\n # self.x_estimated_state = x_estimated_state.reshape((1, 8))[0]\n\n if self.should_check_limits:\n # check if the update step state needs to be changed because of limit crossing\n # if that is the case, correct the state and set the state of the simulation accordingly\n corrected_state = self.heliSim.get_limited_state_and_change_state_without_sim(np.transpose(x_estimated_state)[0],\n self.model_type)\n x_estimated_state = np.resize(corrected_state, (self.nStateVars, 1))\n self.x_estimated_state = x_estimated_state\n self.cov_matrix = cov_matrix_update\n # print(\"------\")\n # print(cov_matrix_predict)\n return x_estimated_state",
"def f(k):\n return k * k * k * pk(k, suppression) * spherical_jn(1, k * r)",
"def test_linear_2d_merwe_column():\n\n\n def fx(x, dt):\n F = np.array([[1, dt, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, dt],\n [0, 0, 0, 1]], dtype=float)\n\n return np.dot(F, x)\n\n def hx(x):\n return np.array([x[0], x[2]])\n\n\n dt = 0.1\n points = MerweScaledSigmaPoints2(4, .1, 2., -1)\n kf = UKF2(dim_x=4, dim_z=2, dt=dt, fx=fx, hx=hx, points=points)\n\n\n kf.x = np.array([[-1., 1., -1., 1]]).T\n kf.P*=0.0001\n #kf.R *=0\n #kf.Q\n\n zs = []\n for i in range(20):\n z = np.array([[i+randn()*0.1],\n [i+randn()*0.1]])\n zs.append(z)\n\n Ms, Ps = kf.batch_filter(zs)\n smooth_x, _, _ = kf.rts_smoother(Ms, Ps, dt=dt)\n\n if DO_PLOT:\n plt.figure()\n zs = np.asarray(zs)\n plt.plot(zs[:,0], marker='+', c='b')\n plt.plot(Ms[:,0], c='b')\n plt.plot(smooth_x[:,0], smooth_x[:,2], c='r')\n print(smooth_x)",
"def f(k):\n return k * k * k * k * pk(k, suppression) * spherical_jn(2, k * r)",
"def l1(P, q):\n\n m, n = P.size\n\n # Solve equivalent LP \n #\n # minimize [0; 1]' * [u; v]\n # subject to [P, -I; -P, -I] * [u; v] <= [q; -q]\n #\n # maximize -[q; -q]' * z \n # subject to [P', -P']*z = 0\n # [-I, -I]*z + 1 = 0 \n # z >= 0 \n \n c = matrix(n*[0.0] + m*[1.0])\n h = matrix([q, -q])\n\n def Fi(x, y, alpha = 1.0, beta = 0.0, trans = 'N'): \n if trans == 'N':\n # y := alpha * [P, -I; -P, -I] * x + beta*y\n u = P*x[:n]\n y[:m] = alpha * ( u - x[n:]) + beta*y[:m]\n y[m:] = alpha * (-u - x[n:]) + beta*y[m:]\n\n else:\n # y := alpha * [P', -P'; -I, -I] * x + beta*y\n y[:n] = alpha * P.T * (x[:m] - x[m:]) + beta*y[:n]\n y[n:] = -alpha * (x[:m] + x[m:]) + beta*y[n:]\n\n\n def Fkkt(W): \n\n # Returns a function f(x, y, z) that solves\n #\n # [ 0 0 P' -P' ] [ x[:n] ] [ bx[:n] ]\n # [ 0 0 -I -I ] [ x[n:] ] [ bx[n:] ]\n # [ P -I -W1^2 0 ] [ z[:m] ] = [ bz[:m] ]\n # [-P -I 0 -W2 ] [ z[m:] ] [ bz[m:] ]\n #\n # On entry bx, bz are stored in x, z.\n # On exit x, z contain the solution, with z scaled (W['di'] .* z is\n # returned instead of z). \n\n d1, d2 = W['d'][:m], W['d'][m:]\n D = 4*(d1**2 + d2**2)**-1\n A = P.T * spdiag(D) * P\n lapack.potrf(A)\n\n def f(x, y, z):\n\n x[:n] += P.T * ( mul( div(d2**2 - d1**2, d1**2 + d2**2), x[n:]) \n + mul( .5*D, z[:m]-z[m:] ) )\n lapack.potrs(A, x)\n\n u = P*x[:n]\n x[n:] = div( x[n:] - div(z[:m], d1**2) - div(z[m:], d2**2) + \n mul(d1**-2 - d2**-2, u), d1**-2 + d2**-2 )\n\n z[:m] = div(u-x[n:]-z[:m], d1)\n z[m:] = div(-u-x[n:]-z[m:], d2)\n\n return f\n\n\n # Initial primal and dual points from least-squares solution.\n\n # uls minimizes ||P*u-q||_2; rls is the LS residual.\n uls = +q\n lapack.gels(+P, uls)\n rls = P*uls[:n] - q \n\n # x0 = [ uls; 1.1*abs(rls) ]; s0 = [q;-q] - [P,-I; -P,-I] * x0\n x0 = matrix( [uls[:n], 1.1*abs(rls)] ) \n s0 = +h\n Fi(x0, s0, alpha=-1, beta=1) \n\n # z0 = [ (1+w)/2; (1-w)/2 ] where w = (.9/||rls||_inf) * rls \n # if rls is nonzero and w = 0 otherwise.\n if max(abs(rls)) > 1e-10: \n w = .9/max(abs(rls)) * rls\n else: \n w = matrix(0.0, (m,1))\n z0 = matrix([.5*(1+w), .5*(1-w)])\n\n dims = {'l': 2*m, 'q': [], 's': []}\n sol = solvers.conelp(c, Fi, h, dims, kktsolver = Fkkt, \n primalstart={'x': x0, 's': s0}, dualstart={'z': z0})\n return sol['x'][:n]",
"def f(k):\n return k * k * pk(k, suppression) * spherical_jn(0, k * r)",
"def _m_to_F_on_basis(self, la):\n Sym = self._kBoundedRing.ambient()\n kB = Sym.kBoundedSubspace(self.k, t=1)\n h = kB.khomogeneous()\n ks = kB.kschur()\n return sum( h(ks(x)).coefficient(la) * self(x) for x in PartitionsGreatestLE(sum(la), self.k))",
"def VFI(method) :\n iteration=0 # Iteration Counter\n converged = 0 # Convergence Flag|\n \n#----- Initial Settings \n v_update = zeros(n_grid)\n v_func = empty(n_grid)\n k_next_vec = empty(n_grid)\n run_time = empty(2)\n \n def obj(k_next) :\n \"\"\"\n This function is used in value function iteration.\n It represents the objective function to be maximized for one node (state) of current capitals.\n Resulting value is maximized one corresponding to next period's capital as a maximizer. \n Next period's value is computed by interpolation.\n \n Input : k_next (next period's capital)\n \n Output : value_vec (maximized value resulting from choosing optimal capital in the next period)\n \"\"\" \n \n if method==1 :\n value_vec = -1 * (log(A*k_current**(alpha) - k_next) + beta*linear_interp(k_grid,v_update,k_next))\n elif method==2 :\n value_vec = -1 * (log(A*k_current**(alpha) - k_next) + beta*quad_interp(k_grid,v_update,k_next))\n elif method==3 :\n value_vec = -1 * (log(A*k_current**(alpha) - k_next) + beta*cubic_interp(k_grid,v_update,k_next))\n \n return value_vec\n\n#----- Value function iteration\n start = time.time() # start time\n while converged==0 :\n index = 0\n for k_current in k_grid :\n k_next = fminbound(obj,k_grid[0],k_grid[-1])\n v_func[index] = (-1) * obj(k_next)\n k_next_vec[index] = k_next\n index = index + 1\n dist = abs(max(v_func - v_update))\n if dist<tol :\n converged = 1\n v_k, g_k = v_func, k_next_vec\n v_update = v_func\n print \"Iteration : \",iteration,\"\",\"Distance : \",dist # convergence process\n iteration = iteration + 1\n v_func = empty(n_grid) \n k_next_vec = empty(n_grid)\n \n end = time.time() # end time\n run_time[0],run_time[1] = runtime_cal(start,end) # total running time\n \n return v_k, g_k, run_time, iteration",
"def F(cst, x):\n [u0, v0, u1, v1, u2, v2, coeffs] = cst\n [u, v, g1, g2, g3] = x\n a = g1*u1 - u0\n b = g2*u2 - u0\n c = g3*u - u0\n l = g1*v1 - v0 \n m = g2*v2 - v0\n n = g3*v - v0\n r = g1 - 1\n s = g2 - 1\n t = g3 - 1\n return np.array([\n coeffs[0]*(a**2-l**2) + 2*coeffs[1]*(a*b-l*m) + coeffs[2]*(b**2-m**2) + 2*coeffs[3]*(a*c-l*n) + 2*coeffs[4]*(b*c-m*n) + c**2 - n**2,\n coeffs[0]*(l**2-r**2) + 2*coeffs[1]*(l*m-r*s) + coeffs[2]*(m**2-s**2) + 2*coeffs[3]*(l*n-r*t) + 2*coeffs[4]*(m*n-s*t) + n**2 - t**2,\n coeffs[0]*a*l + coeffs[1]*(l*b+m*a) + coeffs[2]*m*b + coeffs[3]*(l*c+n*a) + coeffs[4]*(m*c+b*n) + c*n,\n coeffs[0]*a*r + coeffs[1]*(r*b+s*a) + coeffs[2]*s*b + coeffs[3]*(r*c+t*a) + coeffs[4]*(s*c+b*t) + c*t,\n coeffs[0]*r*l + coeffs[1]*(l*s+m*r) + coeffs[2]*m*s + coeffs[3]*(l*t+n*r) + coeffs[4]*(m*t+s*n) + t*n \n ])",
"def kA_func(self):\n\n i1 = self.inl[0].to_flow()\n i2 = self.inl[1].to_flow()\n o1 = self.outl[0].to_flow()\n o2 = self.outl[1].to_flow()\n\n T_i1 = T_bp_p(i1)\n T_i2 = T_mix_ph(i2, T0=self.inl[1].T.val_SI)\n T_o1 = T_mix_ph(o1, T0=self.outl[0].T.val_SI)\n T_o2 = T_mix_ph(o2, T0=self.outl[1].T.val_SI)\n\n if T_i1 <= T_o2 and not self.inl[0].T.val_set:\n T_i1 = T_o2 + 0.5\n if T_i1 <= T_o2 and not self.outl[1].T.val_set:\n T_o2 = T_i1 - 0.5\n\n if T_o1 <= T_i2 and not self.outl[0].T.val_set:\n T_o1 = T_i2 + 1\n if T_o1 <= T_i2 and not self.inl[1].T.val_set:\n T_i2 = T_o1 - 1\n\n td_log = ((T_o1 - T_i2 - T_i1 + T_o2) /\n np.log((T_o1 - T_i2) / (T_i1 - T_o2)))\n\n return i1[0] * (o1[2] - i1[2]) + self.kA.val * td_log",
"def kf_simple(obs, obs_model):\r\n\r\n x = np.stack((obs_model, np.ones(len(obs_model))), axis=1)\r\n param_dim = x.shape[1]\r\n\r\n # parameters of Kalman filter\r\n delta = 0.0001 # large delta gives quicker change in beta.\r\n Vw = delta / (1 - delta) * np.eye(param_dim)\r\n Ve = 0.001\r\n\r\n y_hat = np.full(obs.shape[0], np.nan) # measurement prediction\r\n e = y_hat.copy() # measurement error\r\n Q = y_hat.copy() # variance-covariance matrix of e\r\n\r\n # For clarity, we denote R(t|t) by P(t). Initialize R, P and beta.\r\n R = np.zeros((param_dim, param_dim)) # variance-covariance matrix of beta: R(t|t-1)\r\n P = R.copy() # variance-covariance matrix of beta: R(t|t)\r\n beta = np.full((param_dim, x.shape[0]), np.nan)\r\n\r\n # Initialize to zero\r\n beta[:, 0] = 0\r\n\r\n # Given initial beta and R (and P)\r\n for t in range(len(obs)):\r\n if t > 0:\r\n beta[:, t] = beta[:, t - 1]\r\n R = P + Vw\r\n\r\n y_hat[t] = np.dot(x[t, :], beta[:, t])\r\n Q[t] = np.dot(x[t, :], np.dot(R, x[t, :])) + Ve\r\n e[t] = obs[t] - y_hat[t] # measurement prediction error\r\n K = np.dot(x[t, :], R) / Q[t] # Kalman gain\r\n beta[:, t] = beta[:, t] + np.dot(K, e[t]) # State update. Equation 3.11\r\n P = R - np.dot(np.dot(K.reshape(-1, 1), x[t, :].reshape(-1, 1).T), R) # State covariance update. Equation 3.12\r\n\r\n return beta, e, Q",
"def V_fit(x, a, b, c, d, e, f):\n x1 = x[0] # I\n x2 = x[1] # dT\n m = (a * x1 ** 2 + b * x1 + c)\n b = (d * x1 ** 2 + e * x1 + f)\n return m * x2 + b",
"def derivert(f, k):\r\n \r\n return(k*f)",
"def finite_diff(F, x0, v0, dt, M, K, C, T):\r\n\r\n ### INITIAL PARAMETERS ####\r\n\r\n # defining the number of steps of analysis = Ns\r\n Ns = int(T/dt)+1\r\n # step t0 (initial acceleration)\r\n ngl = np.shape(F)[0] # captures the number of degrees of freedom\r\n\r\n ### MODELLING THE DISPLACEMENTS ###\r\n\r\n x_before = np.zeros((ngl,1))\r\n # matrix that indicates the displacements, in each degree of freedom, along the time of \r\n # duration of analysis. Each column is a time step\r\n x = np.zeros((ngl, Ns))\r\n x[:,0] = x0[:,0]\r\n\r\n ### SOLVING INITIAL STEP ###\r\n\r\n # initial Force F0 is equivalent to the first column of the matrix of load vectors F along time\r\n aux1 = np.zeros((ngl,1))\r\n aux1[:,0] = np.copy(F[:,0])\r\n aux2 = aux1 - np.dot(C,v0) - np.dot(K,x0)\r\n a0 = np.dot(la.inv(M),aux2)\r\n # step t-1 (before initial condition)\r\n x_before = dt*dt*a0/2 - dt*v0 + x0 \r\n # step t+1 (after initial condition)\r\n C1 = M / (dt*dt) + C / (2*dt)\r\n C2 = K - 2*M / (dt*dt)\r\n C3 = M / (dt*dt) - C / (2*dt)\r\n aux3 = aux1 - np.dot(C2, x0) - np.dot(C3, x_before)\r\n x[:,1] = np.dot(la.inv(C1), aux3[:,0])\r\n\r\n ### INTEGRATING ALONG THE DURATION OS ANALYSIS ###\r\n\r\n i = 0\r\n aux4 = np.zeros((ngl,1))\r\n aux5 = np.zeros((ngl,1))\r\n aux6 = np.zeros((ngl,1))\r\n aux7 = np.zeros((ngl,1))\r\n for i in range(1,Ns-1):\r\n aux4[:,0] = np.copy(F[:,i])\r\n aux5[:,0] = np.copy(x[:,i])\r\n aux6[:,0] = np.copy(x[:,i-1])\r\n aux7[:,0] = np.copy(x[:,i+1])\r\n aux7 = np.dot(la.inv(C1), aux4 - np.dot(C2,aux5) - np.dot(C3,aux6))\r\n x[:,i+1] = np.copy(aux7[:,0])\r\n return x",
"def Kernel(x, y):\n\n Result = (np.dot(x_train[x, :], x_train[y, :])+1)**5 # Polynomial\n #Result = (np.dot(x_train[x, :], x_train[y, :])+1) # Linear\n #Gaussian\n \"\"\"\n sigma = 1\n if np.ndim(x_train[x, :]) == 1 and np.ndim(x_train[y, :]) == 1:\n Result = np.exp(- (np.linalg.norm(x_train[x, :] - x_train[y, :], 2)) ** 2 / (2 * sigma ** 2))\n elif (np.ndim(x_train[x, :]) > 1 and np.ndim(x_train[y, :]) == 1) or (np.ndim(x_train[x, :]) == 1 and np.ndim(x_train[y, :]) > 1):\n Result = np.exp(- (np.linalg.norm(x_train[x, :] - x_train[y, :], 2, axis=1) ** 2) / (2 * sigma ** 2))\n elif np.ndim(x_train[x, :]) > 1 and np.ndim(x_train[y, :]) > 1:\n Result = np.exp(- (np.linalg.norm(x[:, np.newaxis] - y[np.newaxis, :], 2, axis=2) ** 2) / (2 * sigma ** 2))\n \"\"\"\n return Result",
"def __call__(self,F):\n if self.initialized == False:\n raise Exception(\"Please run initialize method before calling or use the MoDeLoss wrapper.\")\n self.a0 = 1/2 * view(tf.reduce_sum(F*self.dm,axis=-1),(-1,1)) #integrate over mbins\n fit = tf.broadcast_to(self.a0,F.shape)# make boradcastable\n if self.order>0:\n self.a1 = 3/2 * view(tf.reduce_sum(F*self.m*self.dm,axis=-1),(-1,1))\n if self.max_slope is not None:\n fit = fit + self.max_slope*tf.tanh(self.a1/self.max_slope)*self.m\n else:\n fit = fit + self.a1*self.m\n if self.order>1:\n p2 = (3*self.m**2-1)*0.5\n self.a2 = 5/2 * view(tf.reduce_sum(F*p2*self.dm,axis=-1),(-1,1))\n if self.monotonic:\n fit = fit + self.a1/3.*tf.tanh(self.a2/(self.a1/3.+self.eps))*p2\n else:\n fit = fit+ self.a2*p2\n return fit",
"def K(self, X, X2, target):\r\n # model : a * dy/dt + b * y = U\r\n #lu=sqrt(3)/theta1 ly=1/theta2 theta2= a/b :thetay sigma2=1/(2ab) :sigmay\r\n\r\n X,slices = X[:,:-1],index_to_slices(X[:,-1])\r\n if X2 is None:\r\n X2,slices2 = X,slices\r\n else:\r\n X2,slices2 = X2[:,:-1],index_to_slices(X2[:,-1])\r\n\r\n\r\n #rdist = X[:,0][:,None] - X2[:,0][:,None].T\r\n rdist = X - X2.T\r\n ly=1/self.lengthscaleY\r\n lu=np.sqrt(3)/self.lengthscaleU\r\n #iu=self.input_lengthU #dimention of U\r\n\r\n Vu=self.varianceU\r\n Vy=self.varianceY\r\n\r\n # kernel for kuu matern3/2\r\n kuu = lambda dist:Vu * (1 + lu* np.abs(dist)) * np.exp(-lu * np.abs(dist))\r\n\r\n # kernel for kyy\r\n k1 = lambda dist:np.exp(-ly*np.abs(dist))*(2*lu+ly)/(lu+ly)**2\r\n k2 = lambda dist:(np.exp(-lu*dist)*(ly-2*lu+lu*ly*dist-lu**2*dist) + np.exp(-ly*dist)*(2*lu-ly) ) / (ly-lu)**2\r\n k3 = lambda dist:np.exp(-lu*dist) * ( (1+lu*dist)/(lu+ly) + (lu)/(lu+ly)**2 )\r\n kyy = lambda dist:Vu*Vy*(k1(dist) + k2(dist) + k3(dist))\r\n\r\n\r\n # cross covariance function\r\n kyu3 = lambda dist:np.exp(-lu*dist)/(lu+ly)*(1+lu*(dist+1/(lu+ly)))\r\n\r\n # cross covariance kyu\r\n kyup = lambda dist:Vu*Vy*(k1(dist)+k2(dist)) #t>0 kyu\r\n kyun = lambda dist:Vu*Vy*(kyu3(dist)) #t<0 kyu\r\n\r\n # cross covariance kuy\r\n kuyp = lambda dist:Vu*Vy*(kyu3(dist)) #t>0 kuy\r\n kuyn = lambda dist:Vu*Vy*(k1(dist)+k2(dist)) #t<0 kuy\r\n\r\n for i, s1 in enumerate(slices):\r\n for j, s2 in enumerate(slices2):\r\n for ss1 in s1:\r\n for ss2 in s2:\r\n if i==0 and j==0:\r\n target[ss1,ss2] = kuu(np.abs(rdist[ss1,ss2]))\r\n elif i==0 and j==1:\r\n #target[ss1,ss2] = np.where( rdist[ss1,ss2]>0 , kuyp(np.abs(rdist[ss1,ss2])), kuyn(np.abs(rdist[s1[0],s2[0]]) ) )\r\n target[ss1,ss2] = np.where( rdist[ss1,ss2]>0 , kuyp(np.abs(rdist[ss1,ss2])), kuyn(np.abs(rdist[ss1,ss2]) ) )\r\n elif i==1 and j==1:\r\n target[ss1,ss2] = kyy(np.abs(rdist[ss1,ss2]))\r\n else:\r\n #target[ss1,ss2] = np.where( rdist[ss1,ss2]>0 , kyup(np.abs(rdist[ss1,ss2])), kyun(np.abs(rdist[s1[0],s2[0]]) ) )\r\n target[ss1,ss2] = np.where( rdist[ss1,ss2]>0 , kyup(np.abs(rdist[ss1,ss2])), kyun(np.abs(rdist[ss1,ss2]) ) )\r\n\r\n #KUU = kuu(np.abs(rdist[:iu,:iu]))\r\n\r\n #KYY = kyy(np.abs(rdist[iu:,iu:]))\r\n\r\n #KYU = np.where(rdist[iu:,:iu]>0,kyup(np.abs(rdist[iu:,:iu])),kyun(np.abs(rdist[iu:,:iu]) ))\r\n\r\n #KUY = np.where(rdist[:iu,iu:]>0,kuyp(np.abs(rdist[:iu,iu:])),kuyn(np.abs(rdist[:iu,iu:]) ))\r\n\r\n #ker=np.vstack((np.hstack([KUU,KUY]),np.hstack([KYU,KYY])))\r\n\r\n #np.add(ker, target, target)\r",
"def solve_LF(self):\n self.u = zeros(self.N)\n self.u[0] = self.u0\n self.u[1] = self.u1\n u = self.u\n f= self.f\n dt = self.dt\n t = self.t\n N = self.N\n for n in xrange(1,N-1):\n u[n+1] = 2*dt*f(u[n],t[n]) + u[n-1]\n #return t,u",
"def verhulst(nb_init, t0, tf, eps, methode, gamma, K) :\n f=lambda y,t : gamma*y*(1-y/K)\n Y=meth_epsilon(nb_init, t0, tf, eps, f, methode)\n return Y",
"def f(self, X):\n\n return (X[0])**3 - (X[1])**2 + 1",
"def solve(self):"
]
| [
"0.6229389",
"0.6101894",
"0.6101571",
"0.6099031",
"0.6074229",
"0.6072425",
"0.59670156",
"0.59268576",
"0.5925382",
"0.5912769",
"0.5893324",
"0.58837515",
"0.588084",
"0.5869752",
"0.5866996",
"0.58545065",
"0.5852562",
"0.5850432",
"0.58104014",
"0.58003116",
"0.57745236",
"0.57729554",
"0.5769224",
"0.57634664",
"0.576057",
"0.5751261",
"0.57502437",
"0.57405585",
"0.5730808",
"0.57303995"
]
| 0.68978345 | 0 |
batch filter should accept missing data with None in the measurements | def test_batch_missing_data():
def fx(x, dt):
F = np.array([[1, dt, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, dt],
[0, 0, 0, 1]], dtype=float)
return np.dot(F, x)
def hx(x):
return np.array([x[0], x[2]])
dt = 0.1
points = MerweScaledSigmaPoints(4, .1, 2., -1)
kf = UKF(dim_x=4, dim_z=2, dt=dt, fx=fx, hx=hx, points=points)
kf.x = np.array([-1., 1., -1., 1])
kf.P*=0.0001
zs = []
for i in range(20):
z = np.array([i+randn()*0.1, i+randn()*0.1])
zs.append(z)
zs[2] = None
Rs = [1]*len(zs)
Rs[2] = None
Ms, Ps = kf.batch_filter(zs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def filter(self):\n self.data = self.data.loc[~self.data.isnull().any(1),:]",
"def exclude_empty_feats(self):\n for dataset in self:\n dataset.dropna(axis=1, how=\"all\", inplace=True)",
"def test_filter_function_none(self):\n self.es.register_filter(lambda x: False, ftype='none')\n self.assertTrue(self.es.streamfilter(self.data))\n self.es.register_filter(lambda x: True, ftype='none')\n self.assertFalse(self.es.streamfilter(self.data))",
"def run_batch_filter(self):\n if self.speed_data is None and self.flow_data is None and self.traveltime_data is None:\n print(\n 'Warning: The measurement data must be set before running the batch filter: use function self.set_meas_data()')\n\n # =======================================================================\n # the initial ensembles, which should have been set externally\n X_init = np.matrix(np.zeros((self.dim_state, self.num_ensembles)))\n print(\n 'Setting initial ensembles: rho {0}; qin {1}; qout {2}'.format(self.init_rho, self.init_qin, self.init_qout))\n for ens in range(0, self.num_ensembles):\n X_init[self.x_index['density'][0]:\n self.x_index['density'][self.num_cells - 1], ens] = self.init_rho\n X_init[self.x_index['qin'], ens] = self.init_qin\n X_init[self.x_index['qout'], ens] = self.init_qout\n\n # print('setted qin {0}; qout {1}'.format(X_init[self.x_index['qin'], ens], X_init[self.x_index['qout'], ens] ))\n # add noise to each ensemble\n X_init[:, ens] += np.matrix(np.random.multivariate_normal(\n np.zeros(self.dim_state), self.Q)).reshape((self.dim_state, 1))\n\n self.set_initial_ensembles(X_init)\n\n # =======================================================================\n # DEBUG\n # save the qin and qout in the corresponding probe data\n # save the initial state\n if self.__debug:\n self.qin_f.append(np.squeeze(np.array(self.X_f[self.x_index['qin'], :])).tolist())\n self.qin_a.append(np.squeeze(np.array(self.X_a[self.x_index['qin'], :])).tolist())\n self.qin_obs.append(np.nan)\n\n self.qout_f.append(np.squeeze(np.array(self.X_f[self.x_index['qout'], :])).tolist())\n self.qout_a.append(np.squeeze(np.array(self.X_a[self.x_index['qout'], :])).tolist())\n self.qout_obs.append(np.nan)\n\n # The enKF runs at the finest time grid\n # for each step, update the system\n for step in range(0, self.num_steps):\n\n # update status\n sys.stdout.write('\\r')\n sys.stdout.write('Status: filtering step {0}/{1}'.format(step, self.num_steps))\n sys.stdout.flush()\n # print('Status: filtering step {0}'.format(step))\n\n cur_time = (step + 1) * self.dur_steps\n\n # get the effective measurement\n eff_flow, eff_speed, eff_traveltime = self.__get_eff_meas(cur_time)\n\n # build the observation index\n self.y_index, self.dim_obs, y_obs, cov_noise = self.__build_obs_index(eff_flow, eff_speed, eff_traveltime)\n\n # update the estimate for this step\n est_state = self.update_estimate(y_obs, cov_noise, cur_time)\n\n # =======================================================================\n # DEBUG\n # save the qin and qout in the corresponding probe data\n # save the updated state\n if self.__debug:\n self.qin_f.append(np.squeeze(np.array(self.X_f[self.x_index['qin'], :])).tolist())\n self.qin_a.append(np.squeeze(np.array(self.X_a[self.x_index['qin'], :])).tolist())\n if 'flow' in self.y_index.keys() and self.__debug_entrance_sensor in self.y_index['flow'].keys():\n self.qin_obs.append(y_obs[self.y_index['flow'][self.__debug_entrance_sensor]])\n # print('y_index[flow]:{0}'.format(self.y_index['flow'].keys()))\n # print('y_obs[ y_index[flow][entrance] ]:{0}'.format(\n # y_obs[ self.y_index['flow'][self.__debug_entrance_sensor]],\n # self.__debug_entrance_sensor))\n else:\n self.qin_obs.append(np.nan)\n\n self.qout_f.append(np.squeeze(np.array(self.X_f[self.x_index['qout'], :])).tolist())\n self.qout_a.append(np.squeeze(np.array(self.X_a[self.x_index['qout'], :])).tolist())\n if 'flow' in self.y_index.keys() and self.__debug_exit_sensor in self.y_index['flow'].keys():\n self.qout_obs.append(y_obs[self.y_index['flow'][self.__debug_exit_sensor]])\n else:\n self.qout_obs.append(np.nan)\n # =======================================================================\n # save the estimated state\n self.est_state_all[:, step] = est_state\n\n # decouple and save into self.est_density, self.est_speed, self.est_queue, self.est_traveltime\n self.est_density[:, step] = est_state[0:self.num_cells, 0]\n\n # the speed is computed using the fundamental diagram\n for cell_id in range(0, self.num_cells):\n # use the static FD at this step\n self.est_speed[cell_id, step] = self.__rho2v(self.vm_cells[cell_id, 0], self.beta_cells[cell_id, 0],\n self.rhoc_cells[cell_id, 0], self.wc_cells[cell_id, 0],\n self.est_density[cell_id, step])\n\n # REMARK: the queue and travel time a post-processed from the speed field.\n # They are computed in cross_evaluation class for all algorithms\n # the queue length starts from the first cell with speed below queue_threshold to the end of road\n # index = (self.est_speed[:, step] <= self.queue_threshold)\n #\n # # filter out the outliers\n # index_smoothed = deepcopy(index)\n # outlier_max = 3\n # counter = 0\n # for i in range(0, len(index)):\n #\n # if index[i] == True:\n # # trigger the coutner\n # counter += 1\n # elif index[i] == False and counter != 0:\n # if counter <= outlier_max:\n # # found outliers\n # index_smoothed[ i-counter : i ] = False\n # # reset counter\n # counter = 0\n #\n # # if i != 0 and i != len(index)-1:\n # # if sum( index[i-1:i+3] ) >=2:\n # # index_smoothed[i] = True\n # # else:\n # # index_smoothed[i] = False\n # # elif i == 0:\n # # if sum(index[0: 5] ) >= 3:\n # # index_smoothed[i] = True\n # # else:\n # # index_smoothed[i] = False\n # # elif i == len(index)-1:\n # # if sum(index[ i-4 :len(index)]) >= 3:\n # # index_smoothed[i] = True\n # # else:\n # # index_smoothed[i] = False\n #\n # if sum(index_smoothed) <= 3: # use 4 to suppress false alarms\n # # if less or equal then 2 cells are in congestion, it may be caused by noise.\n # self.est_queue[step] = 0\n # else:\n # # if step > 105 and step < 115:\n # # print(sum(index_smoothed))\n # # print(index_smoothed)\n # # print(index)\n #\n # self.est_queue[step] = \\\n # self.len_cells*( self.num_cells - np.argmax(index_smoothed) )\n # # try:\n # # first_cong_cell_id = [x[0] for x in enumerate( self.est_speed[:,step] ) if x[1] < self.queue_threshold][0]\n # # except IndexError:\n # # # no congested cell\n # # first_cong_cell_id = self.num_cells\n # # # the estimated queue length\n # # self.est_queue[step] = self.len_cells*( self.num_cells - first_cong_cell_id )\n #\n # # the travel time estimate is computed by summing up the travel time in each cell\n # self.est_traveltime[step] = np.sum(self.len_cells/self.est_speed[:,step])\n\n\n # =======================================================================\n # DEBUG\n # plot the update\n if self.__debug:\n plot_len = 19\n # qin\n if False:\n if not np.isnan(self.qin_obs[-1]):\n fig1 = plt.figure(figsize=(10, 5), dpi=100)\n ax1 = fig1.add_subplot(111)\n positions_f = np.arange(0, len(self.qin_f)) - 0.1\n positions_a = np.arange(0, len(self.qin_a)) + 0.1\n positions_obs = np.arange(0, len(self.qin_obs))\n # predicted as red\n bp = ax1.boxplot(self.qin_f[-plot_len:],\n positions=positions_f[-plot_len:], widths=0.15,\n patch_artist=False)\n for box in bp['boxes']:\n # change outline color\n box.set(color='#FF4633', linewidth=1)\n # change fill color\n # box.set( facecolor = '#FF4633' )\n # corrected as green\n bp = ax1.boxplot(self.qin_a[-plot_len:],\n positions=positions_a[-plot_len:], widths=0.15, patch_artist=False)\n for box in bp['boxes']:\n # change outline color\n box.set(color='#07891B', linewidth=1)\n # change fill color\n # box.set( facecolor = '#07891B' )\n # measurement as blue\n ax1.scatter(positions_obs[-plot_len:], self.qin_obs[-plot_len:], color='b', marker='o', s=40,\n label='Observation')\n ax1.set_title('qin')\n # x_ticks = np.arange(0, len(self.qin_f))\n # ax1.set_xticks(x_ticks[-plot_len:])\n plt.show()\n\n # qout\n if False:\n if not np.isnan(self.qout_obs[-1]):\n fig2 = plt.figure(figsize=(10, 5), dpi=100)\n ax2 = fig2.add_subplot(111)\n positions_f = np.arange(0, len(self.qout_f)) - 0.1\n positions_a = np.arange(0, len(self.qout_a)) + 0.1\n positions_obs = np.arange(0, len(self.qout_obs))\n # predicted as red\n bp = ax2.boxplot(self.qout_f[-plot_len:], positions=positions_f[-plot_len:], widths=0.18,\n patch_artist=True)\n for box in bp['boxes']:\n # change outline color\n box.set(color='#7570b3', linewidth=1)\n # change fill color\n box.set(facecolor='#FF4633')\n # corrected as green\n bp = ax2.boxplot(self.qout_a[-plot_len:], positions=positions_a[-plot_len:], widths=0.18,\n patch_artist=True)\n for box in bp['boxes']:\n # change outline color\n box.set(color='#7570b3', linewidth=1)\n # change fill color\n box.set(facecolor='#07891B')\n # measurement as blue\n ax2.scatter(positions_obs[-plot_len:], self.qout_obs[-plot_len:], color='b', marker='o', s=30,\n label='Observation')\n ax2.set_title('qout')\n # x_ticks = np.arange(0, len(self.qout_f))\n # ax2.set_xticks(x_ticks[-plot_len:])\n\n plt.show()\n\n # plot the estimated qin and qout\n if self.__debug:\n if True:\n qin = np.squeeze(np.array(self.est_state_all[self.x_index['qin'], :]))\n qin_meas = np.array(self.qin_obs)[1:]\n print(len(qin), len(qin_meas))\n fig1 = plt.figure(figsize=(10, 5), dpi=100)\n ax1 = fig1.add_subplot(111)\n t = np.arange(len(qin))\n ax1.plot(t, qin, 'r-', label='Estimated')\n not_nan = ~np.isnan(qin_meas)\n ax1.plot(t[not_nan], qin_meas[not_nan], 'b', label='Measured')\n ax1.legend()\n ax1.grid(True)\n ax1.set_title('qin')\n\n plt.draw()\n\n if True:\n qout = np.squeeze(np.array(self.est_state_all[self.x_index['qout'], :]))\n qout_meas = np.array(self.qout_obs)[1:]\n fig2 = plt.figure(figsize=(10, 5), dpi=100)\n ax2 = fig2.add_subplot(111)\n t = np.arange(len(qout))\n ax2.plot(t, qout, 'r-', label='Estimated')\n not_nan = ~np.isnan(qout_meas)\n ax2.plot(t[not_nan], qout_meas[not_nan], 'b', label='Measured')\n ax2.set_title('qout')\n ax2.legend()\n ax2.grid(True)\n plt.draw()",
"def test_filter_with_empty_filters(mockdata, qfilter):\n assert len(qfilter.filter(mockdata)) == 100",
"def discard_none_targets(dataset):\r\n indices = []\r\n for (ii,sample) in enumerate(dataset):\r\n target = sample[1]\r\n if target is not None:\r\n indices.append(ii)\r\n\r\n return Subset(dataset,indices)",
"def removeIncompleteSamples(data):\n print(\"Removing incomplete samples...\")\n\n M = len(data)\n N = data[0].shape[0]\n samples_to_remove = []\n for n in range(N):\n for m in range(M):\n if pd.isnull(data[m].iloc[n][0]):\n samples_to_remove.append(n)\n break\n\n if len(samples_to_remove) > 0:\n print(\"A total of \" + str(len(samples_to_remove)) + \" sample(s) have at least a missing view and will be removed\")\n\n data_filt = [None]*M\n samples_to_keep = np.setdiff1d(range(N),samples_to_remove)\n for m in range(M):\n data_filt[m] = data[m].iloc[samples_to_keep]\n\n return data_filt",
"def setup_no_data_values(input_dataset, options):\n in_nodata = []\n if options.srcnodata:\n nds = list(map(float, options.srcnodata.split(',')))\n if len(nds) < input_dataset.RasterCount:\n in_nodata = (nds * input_dataset.RasterCount)[:input_dataset.RasterCount]\n else:\n in_nodata = nds\n else:\n for i in range(1, input_dataset.RasterCount+1):\n raster_no_data = input_dataset.GetRasterBand(i).GetNoDataValue()\n if raster_no_data is not None:\n in_nodata.append(raster_no_data)\n\n if options.verbose:\n print(\"NODATA: %s\" % in_nodata)\n\n return in_nodata",
"def filter_nan_samples(self, train_x, train_y):\n\n n_samples = train_x.shape[0]\n if n_samples != train_y.shape[0]:\n raise ValueError(\"x and y sample lengths don't match\")\n\n validity_array = np.zeros(n_samples)\n for i in range(n_samples):\n x_sample = train_x[i, :]\n y_sample = train_y[i, :]\n validity_array[i] = np.isfinite(x_sample).all() and np.isfinite(y_sample).all()\n\n mask = np.where(validity_array)[0]\n\n return train_x[mask, :], train_y[mask, :]",
"def test_non_finite_filter_1D(fitter, weights):\n\n x = np.array([1, 2, 3, 4, 5, 6, 7, 8])\n y = np.array([9, np.nan, 11, np.nan, 13, np.nan, 15, np.inf])\n\n m_init = models.Gaussian1D()\n fit = fitter()\n\n if weights is not None:\n weights[[1, 4]] = np.nan\n\n with pytest.warns(\n AstropyUserWarning,\n match=r\"Non-Finite input data has been removed by the fitter\",\n ):\n fit(m_init, x, y, filter_non_finite=True, weights=weights)",
"def filter_data(data: AnnData) -> None:\n\n assert \"passed_qc\" in data.obs\n data._inplace_subset_obs(data.obs[\"passed_qc\"].values)\n data._inplace_subset_var((data.var[\"n_cells\"] > 0).values)\n logger.info(\n \"After filteration, {nc} cells and {ng} genes are kept. Among {ng} genes, {nrb} genes are robust.\".format(\n nc=data.shape[0], ng=data.shape[1], nrb=data.var[\"robust\"].sum()\n )\n )",
"def collate_fn_remove_corrupted(batch):\n # Filter out all the Nones (corrupted examples)\n batch = list(filter(lambda x: x is not None, batch))\n return batch",
"def clean_data(x, null, drop_thresh):\n # Do not modify the original dataset\n x_clean = np.copy(x)\n \n # Vector holding, for each feature, the fraction of datapoints with a null value\n null_frac = (1/x_clean.shape[0]) * np.sum(x_clean==null, axis=0)\n # Boolean vector holding, for each feature, whether or not it needs to be kept\n column_to_keep = null_frac <= drop_thresh\n \n # Drop bad columns\n x_clean = x_clean[:, column_to_keep]\n \n # Vector of (list of) indices of columns where there are still null values\n columns_to_interpolate = np.argwhere(np.any(x_clean==null, axis=0))\n \n # For each of those columns, find the mean of non-null values, and substitute it to null values\n for col_list in columns_to_interpolate:\n # Extrapolate only entry of col_list\n col = col_list[0]\n \n # Boolean vector holding, for each row, whether or not it has a \"null\" entry at position \"col\"\n row_non_null = x_clean[:, col] != null\n # Find mean\n interpolation = np.mean(x_clean[row_non_null, col])\n # Substitute it to null values\n row_null = np.logical_not(row_non_null)\n x_clean[row_null, col] = interpolation\n \n return x_clean",
"def test_no_filter(self):\r\n\r\n d1 = {\"% IDENTITY\": \"97.6\"}\r\n d2 = {\"% IDENTITY\": \"0.0\"}\r\n d3 = {\"% IDENTITY\": \"100.0\"}\r\n\r\n self.assertTrue(no_filter(d1))\r\n self.assertTrue(no_filter(d2))\r\n self.assertTrue(no_filter(d3))",
"def missing_values():\n print('Missings in the train data:', train_data.isnull().sum())",
"def testPluginContainsNan(self):\n schema = self.dataset.makeMinimalSchema()\n task = lsst.meas.base.SingleFrameMeasurementTask(schema=schema, config=self.config)\n exposure, cat = self.dataset.realize(noise=100.0, schema=schema, randomSeed=2)\n source = cat[0]\n exposure.getMaskedImage().getImage().getArray()[int(source.getY()), int(source.getX())] = np.nan\n task.run(cat, exposure)\n self.assertTrue(source.get(self.algName + \"_flag\"))\n self.assertTrue(source.get(self.algName + \"_flag_containsNan\"))\n self.assertFalse(source.get(self.algName + \"_flag_edge\"))",
"def interpolate_none(self):\n\n # Reset processed data\n self.u_processed_mps = np.copy(self.u_mps)\n self.v_processed_mps = np.copy(self.v_mps)\n self.u_processed_mps[self.valid_data[0, :] == False] = np.nan\n self.v_processed_mps[self.valid_data[0, :] == False] = np.nan",
"def test_block_missing_batch(self):\n pass",
"def test_filter_sequence_false(self):\n self.es.register_filter(bar=list('baz'))\n self.assertFalse(self.es.streamfilter(self.data))",
"def filter_nb(a, filter_func_nb, *args):\n out = a.astype(np.float_)\n\n for col in range(out.shape[1]):\n idxs = np.flatnonzero(~np.isnan(a[:, col]))\n for i in idxs:\n if not filter_func_nb(i, col, a[i, col], *args):\n out[i, col] = np.nan\n return out",
"def filter_has_batch(self, queryset, name, value):\n q = Q(batch=None) | Q(batch='')\n\n if str2bool(value):\n return queryset.exclude(q)\n else:\n return queryset.filter(q)",
"def filter_data(self,tod,sel,medfilt_size):\n \n filters = np.zeros((tod.shape[0],tod.shape[1],tod.shape[2],int(np.sum(sel))))\n for ifeed in tqdm(self.feedlist,desc=f'{self.name}:filters:{self.source}'):\n feed_tod = tod[ifeed,...] \n for isb in range(tod.shape[1]):\n for ichan in range(tod.shape[2]):\n z = feed_tod[isb,ichan,sel]\n bad = np.where(np.isnan(z))[0]\n if len(bad) == len(z):\n continue\n if len(bad) > 0:\n good = np.where(np.isfinite(z))[0]\n \n nearest = [good[np.argmin(np.abs(good-b))] for b in bad]\n z[bad] = z[nearest]\n filters[ifeed,isb,ichan,:] = median_filter(z,medfilt_size)\n \n return filters",
"def empty_filter(item, *args, **kwargs):\n return True",
"def where_not_null(self, fields) -> \"SampleDataSet\":\n return SampleDataSet(self._data.dropna(subset=fields))",
"def remove_none_from_arrays(self):\r\n\r\n is_nan = numpy.isnan(self.y_values) # array of booleans, element is True if the corresponding element in\r\n # self.y_values is None\r\n\r\n self.x_values = self.x_values[numpy.logical_not(is_nan)]\r\n self.y_values = self.y_values[numpy.logical_not(is_nan)] # replace all None elements\r",
"def filter_empty_genes(data, *extra_data):\n gene_sums = np.array(utils.matrix_sum(data, axis=0)).reshape(-1)\n keep_genes_idx = gene_sums > 0\n data = select.select_cols(data, *extra_data, idx=keep_genes_idx)\n return data",
"def testNoneCanAppearInData(self):\n # Buffer makes comparison difficult because min/max aren't A & 9\n self.chart.auto_scale.buffer = 0\n self.AddToChart(self.chart, [1, None, 3])\n self.assertEqual(self.Param('chd'), 's:A_9')",
"def all_minimal():\n results = DatalabData.filter_minimal(None, None, None, False)\n return results",
"def sample(self):\n if self.do_filter:\n if len(self.t_filtered) > 0:\n yield self.t_filtered.popleft(), self.x_filtered.popleft()\n else:\n yield None, None\n else: # let's not filter\n if len(self.t) > 0:\n yield self.t.popleft(), self.x.popleft()\n else:\n yield None, None",
"def valid_minibatches(self):\n batch_size = self.params['batch_size']\n start_index = 0\n while start_index + batch_size < 500:\n end_index = start_index + batch_size\n yield self.input[start_index:end_index], self.y[start_index:end_index]\n start_index = end_index"
]
| [
"0.67278403",
"0.6175362",
"0.6124427",
"0.6085084",
"0.60801446",
"0.60551065",
"0.60479623",
"0.5973895",
"0.5940673",
"0.5825715",
"0.58188224",
"0.5808652",
"0.58032894",
"0.5801553",
"0.5752523",
"0.5751148",
"0.57105243",
"0.5690395",
"0.5634014",
"0.56285006",
"0.5626388",
"0.56168395",
"0.5616288",
"0.5609732",
"0.56046486",
"0.558592",
"0.5581215",
"0.5571128",
"0.5568803",
"0.556617"
]
| 0.6651685 | 1 |
plot overscan in 9x2 plots with 16 channels | def plot_overscan(overscan, img, TITLE, OUT_DIR):
fig = plt.figure(figsize=(20, 20))
gs0 = gridspec.GridSpec(3, 3)
for i, f in enumerate(img):
x = f.dev_index % 3
gs = gridspec.GridSpecFromSubplotSpec(
1, 2, wspace=0, subplot_spec=gs0[f.dev_index])
ax2 = plt.subplot(gs[0, 0])
for j in range(9, 17):
plt.plot(overscan[i, j - 1] + 500 *
(j - 8), label='seg' + str(j + 1))
plt.legend(fontsize=6, loc='upper center', ncol=4)
if x != 0:
ax2.set_yticklabels([])
plt.grid()
plt.xlim(0, 2100)
plt.ylim(0, 4500)
ax2.set_title(f.dev_name + ' (seg 10-17)')
ax1 = plt.subplot(gs[0, 1])
for j in range(1, 9):
plt.plot(overscan[i, j - 1] + 500 * j, label='seg' + str(j - 1))
plt.legend(fontsize=6, loc='upper center', ncol=4)
if x != 2:
ax1.set_yticklabels([])
if x == 2:
ax1.yaxis.tick_right()
plt.grid()
plt.xlim(0, 2100)
plt.ylim(0, 4500)
ax1.set_title(f.dev_name + ' (seg 0-7)')
fig.suptitle('Overscan ' + TITLE, y=0.94, size=20)
plt.subplots_adjust(wspace=0.05)
plt.savefig(OUT_DIR + TITLE + '_spatial.png')
plt.close(fig) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def plot_sample(x):\n plt.imshow(x[:,:,0])\n plt.title(\"gasf\")\n plt.colorbar()\n plt.show()\n\n plt.imshow(x[:,:,1])\n plt.title(\"gadf\")\n plt.colorbar()\n plt.show()\n\n plt.imshow(x[:,:,2])\n plt.title(\"mtf\")\n plt.colorbar()\n plt.show()",
"def plot_channels(self, data_array):\n\n plt.figure()\n for p in range(1, 7):\n plt.subplot(6, 1, p)\n plt.plot(data_array[p-1, :])\n\n plt.draw()\n plt.show()\n return",
"def plot_digits():\n digits = load_digits()\n for i in range(25):\n plt.subplot(5, 5, i + 1)\n plt.imshow(digits.images[i], cmap='binary')\n plt.axis('off')\n\n plt.show()",
"def visualize_scan(self):\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n ax.scatter(self.p1_points[:, 0], self.p1_points[:, 1], self.p1_points[:, 2], c='r')\n ax.scatter(self.p2_points[:, 0], self.p2_points[:, 1], self.p2_points[:, 2], c='g')\n ax.scatter(self.p3_points[:, 0], self.p3_points[:, 1], self.p3_points[:, 2], c='b')\n ax.scatter(self.p4_points[:, 0], self.p4_points[:, 1], self.p4_points[:, 2])\n\n ax.set_xlabel('x')\n ax.set_ylabel('y')\n ax.set_zlabel('z')\n plt.show()",
"def plot_autocorrs(self, axis=0, n_rows=4, n_cols=8):\n self.current_plot = 'multi'\n self.ax_zoomed = False\n \n bls = self.uv.d_uv_data['BASELINE']\n\n # Extract the relevant baselines using a truth array\n # bls = bls.tolist()\n bl_ids = set([256*i + i for i in range(1, n_rows * n_cols + 1)])\n bl_truths = np.array([(b in bl_ids) for b in bls])\n \n #print self.uv.d_uv_data['DATA'].shape\n #x_data = self.d_uv_data['DATA'][bl_truths,0,0,:,0,axis] # Baselines, freq and stokes\n #x_cplx = x_data[:,:,0] + 1j * x_data[:,:,1]\n\n x_cplx = self.stokes[axis][bl_truths]\n\n\n \n # Plot the figure\n #print self.uv.n_ant\n fig = self.sp_fig\n figtitle = '%s %s: %s -- %s'%(self.uv.telescope, self.uv.instrument, self.uv.source, self.uv.date_obs)\n for i in range(n_rows):\n for j in range(n_cols):\n ax = fig.add_subplot(n_rows, n_cols, i*n_cols + j +1)\n ax.set_title(self.uv.d_array_geometry['ANNAME'][i*n_cols + j], fontsize=10)\n #ax.set_title(\"%s %s\"%(i, j))\n \n x = x_cplx[i*n_cols+j::self.uv.n_ant]\n \n if self.scale_select.currentIndex() == 0 or self.scale_select.currentIndex() == 1:\n if x.shape[0] == self.uv.n_ant:\n self.plot_spectrum(ax, x, label_axes=False)\n else:\n self.plot_spectrum(ax, x, stat='max', label_axes=False)\n self.plot_spectrum(ax, x, stat='med', label_axes=False)\n self.plot_spectrum(ax, x, stat='min', label_axes=False)\n else:\n self.plot_spectrum(ax, x, label_axes=False)\n self.updateFreqAxis(ax)\n \n if i == n_rows-1:\n ax.set_xlabel('Freq')\n if j == 0:\n ax.set_ylabel('Amplitude')\n \n plt.ticklabel_format(style='sci', axis='y', scilimits=(0,0))\n plt.tick_params(axis='both', which='major', labelsize=10)\n plt.tick_params(axis='both', which='minor', labelsize=8)\n plt.xticks(rotation=30)\n \n plt.subplots_adjust(left=0.05, right=0.98, top=0.95, bottom=0.1, wspace=0.3, hspace=0.45)\n return fig, ax",
"def plot_overscan_diff(overscan, img, TITLE, OUT_DIR):\n fig = plt.figure(figsize=(20, 20))\n gs0 = gridspec.GridSpec(3, 3)\n\n for i, f in enumerate(img):\n x = f.dev_index % 3\n\n gs = gridspec.GridSpecFromSubplotSpec(\n 1, 2, wspace=0, subplot_spec=gs0[f.dev_index])\n ax2 = plt.subplot(gs[0, 0])\n for j in range(9, 17):\n plt.plot(overscan[i, j - 1] - overscan[i, 15] +\n 500 * (j - 8), label='seg' + str(j + 1))\n plt.legend(fontsize=6, loc='upper center', ncol=4)\n if(x != 0):\n ax2.set_yticklabels([])\n\n plt.grid()\n plt.xlim(0, 2100)\n plt.ylim(0, 4500)\n ax2.set_title(f.dev_name + ' (seg 10-17)')\n\n ax1 = plt.subplot(gs[0, 1])\n for j in range(1, 9):\n plt.plot(overscan[i, j - 1] - overscan[i, 7] +\n 500 * j, label='seg' + str(j - 1))\n plt.legend(fontsize=6, loc='upper center', ncol=4)\n if(x != 2):\n ax1.set_yticklabels([])\n if(x == 2):\n ax1.yaxis.tick_right()\n plt.grid()\n plt.xlim(0, 2100)\n plt.ylim(0, 4500)\n ax1.set_title(f.dev_name + ' (seg 0-7)')\n #\tax1.set_title('S-'+f[7:9]+' (seg 0-7)')\n\n fig.suptitle('Overscan (diff) ' + TITLE, y=0.94, size=20)\n plt.subplots_adjust(wspace=0.05)\n plt.savefig(OUT_DIR + TITLE + '_diff_spatial.png')\n plt.close(fig)",
"def plot(self, n_confs):\n \n import pandas as pd\n import numpy as np\n import matplotlib as mpl\n mpl.use('Agg')\n import matplotlib.pyplot as plt\n import csv\n \n n_iter = len(self.plot_data)\n \n data = np.ndarray((n_iter, n_confs+1))\n data[:,0] = [i[0] for i in self.plot_data]\n data[:,1:] = [i[1].detach().cpu().numpy() for i in self.plot_data]\n\n df=pd.DataFrame(data)\n names = ['iter']\n for i in range(n_confs): names.append(f'c{i+1}')\n df.columns = names\n df.to_csv(f\"{self.plot_name}.tab\", sep=\"\\t\", quoting=csv.QUOTE_NONE) \n\n d = data[:,1:].reshape(-1)\n d = d[~np.isnan(d)]\n mine = d.min() - 0.01\n for i in range(n_confs): \n data[:,i+1] -= mine\n \n df=pd.DataFrame(data)\n names = ['iter']\n for i in range(n_confs): names.append(f'c{i+1}')\n df.columns = names\n \n colors = (0,0,0)\n area = 10\n \n # Plot\n fig = plt.figure(figsize=(15, 15))\n ax = fig.add_subplot(1,1,1)\n for i in range(n_confs):\n ax.plot('iter', f'c{i+1}', data=df)\n ax.set_yscale('log')\n\n plt.xlabel('iter')\n plt.ylabel('loss')\n plt.savefig(f'{self.plot_name}.png')",
"def show(self):\n \n \n \n \n \n \n r = 4\n f, axarr = plt.subplots(r, r, figsize=(8,8))\n counter = 0\n for i in range(r):\n for j in range(r):\n temp = self.x[counter,:]\n counter += 1\n img = self.x[counter,:]\n axarr[i][j].imshow(img)\n #######################################################################\n # #\n # #\n # TODO: YOUR CODE HERE #\n # #\n # #\n #######################################################################",
"def gridPlot16(img_stack):\r\n F = plt.figure(figsize = (30,30))\r\n F.subplots_adjust(left = 0.05, right = 0.95)\r\n grid = ImageGrid(F, 142, nrows_ncols = (4,4), axes_pad = 0.0, share_all = True,\r\n label_mode = \"L\", cbar_location = \"top\", cbar_mode = \"single\")\r\n \r\n i = 0\r\n for img in img_stack[0:16]:\r\n im = grid[i].imshow(img, interpolation = \"nearest\", vmin = 0, vmax = 255)\r\n i += 1 \r\n grid.cbar_axes[0].colorbar(im)\r\n plt.savefig('gplot16.png')\r\n if 'gplot16.png' in os.listdir():\r\n plt.savefig('gplot16_2.png')\r\n# for cax in grid.cbar_axes:\r\n# cax.toggle_label(False)\r\n return",
"def plot_slice(self,res):\n x = np.linspace(0,1,res)\n y = np.linspace(0,1,res)\n X,Y = np.meshgrid(x,y)\n plt.figure()\n ax = plt.axes(projection = '3d')\n ax.plot_surface(X,Y,abs(self.psi)[:,:,math.floor(res/2)])\n plt.show()",
"def plot_chans(freq=True):\n f,ax = plt.subplots(4,3)\n for ant in range(12):\n snap.write_int('rst',1)\n snap.write_int('antenna',ant)\n snap.write_int('rst',0)\n\n time.sleep(ACC_LEN/(512*200e6)*1e3)\n arr = struct.unpack('>256Q',snap.read('spectrum',8*256))\n \n ax[ant%4][int(ant/4)].semilogy(FREQ,arr,'.-',lw=1)\n ax[ant%4][int(ant/4)].set_xlim(FREQ.max(), FREQ.min())\n ax[ant%4][int(ant/4)].set_title('Antenna %s'%ANT_LABELS[ant])\n plt.show()",
"def _plot_rawdata(self):\n fig, ax = plt.subplots(1, 1)\n ax.imshow(self.data, origin='top')\n ax.set_title('Gauss-Legendre Quadrature Grid')\n ax.set_xlabel('longitude index')\n ax.set_ylabel('latitude index')\n fig.tight_layout(pad=0.5)\n return fig,ax",
"def test_chroma_plot(self):\n plt.xlabel('chromaticity x')\n plt.ylabel('chromaticity y')\n plt.title(\"Standard Gamut\")\n plt.axis([-0.1, 0.8, -0.4, 0.65])\n plt.grid(True)\n mplh.plot_spectrum_locus_76()\n mplh.plot_colorspace_gamut(colorspaces.ACES, lines_color=\"c\",\n upvp_conversion=True)\n mplh.plot_colorspace_gamut(colorspaces.REC709, lines_color=\"m\",\n upvp_conversion=True)\n plt.legend(loc=4)\n if DISPLAY:\n plt.show()\n plt.clf()\n plt.close()",
"def show_filters(self):\n w_mat = np.transpose(self.sess.run(self.W_fc1))\n\n plt.figure(figsize=(10,10), facecolor='w', edgecolor='w')\n plot_positions = [(0,0),(0,1),(1,0),(1,1)]\n for ch in range(self.n_input_channels):\n grid,_ = ia.image_grid_RGB( w_mat,\n n_channels=self.n_input_channels,\n image_size=(self.y_res,self.x_res), n_x=6, n_y=6,\n channel_order=(ch,ch,ch), amplitude_scaling=(1,1,1),\n line_color=1, auto_scale=True, return_borders=False )\n colormax = np.abs(grid).max()\n with sns.axes_style(\"white\"):\n ax = plt.subplot2grid( (2,2), plot_positions[ch] )\n ax.imshow( grid[:,:,0], interpolation='nearest',\n cmap=plt.get_cmap('seismic'),\n clim=(-1*colormax,colormax) )\n ax.set_title(\"Hidden units, channel {}\".format(ch))\n plt.axis('tight')\n plt.axis('off')\n plt.tight_layout()",
"def plotgrid(data,d=10,shape=(30,30)):\n ion()\n gray()\n clf()\n for i in range(min(d*d,len(data))):\n subplot(d,d,i+1)\n row = data[i]\n if shape is not None: row = row.reshape(shape)\n imshow(row)\n ginput(1,timeout=0.1)",
"def plot_channels(dat, chanaxis=-1, otheraxis=-2):\n ax = []\n n_channels = dat.data.shape[chanaxis]\n for i, chan in enumerate(dat.axes[chanaxis]):\n if i == 0:\n a = plt.subplot(10, n_channels / 10 + 1, i + 1)\n else:\n a = plt.subplot(10, n_channels / 10 + 1, i + 1, sharex=ax[0], sharey=ax[0])\n ax.append(a)\n x, y = dat.axes[otheraxis], dat.data.take([i], chanaxis)\n a.plot(dat.axes[otheraxis], dat.data.take([i], chanaxis).squeeze())\n a.set_title(chan)\n plt.axvline(x=0)\n plt.axhline(y=0)",
"def plot_wav(decomp):\n \n plt.figure(figsize=(10,10))\n gs = GridSpec(4, 4)\n \n ax = plt.subplot(gs[0, 0])\n plt.imshow(decomp[0])\n plt.xticks([])\n plt.yticks([])\n \n ax = plt.subplot(gs[1,0])\n plt.imshow(decomp[1][0])\n plt.xticks([])\n plt.yticks([])\n \n ax = plt.subplot(gs[0, 1])\n plt.imshow(decomp[1][1])\n plt.xticks([])\n plt.yticks([])\n \n ax = plt.subplot(gs[1, 1])\n plt.imshow(decomp[1][2])\n plt.xticks([])\n plt.yticks([])\n \n ax = plt.subplot(gs[2:,:2])\n plt.imshow(decomp[2][0])\n plt.xticks([])\n plt.yticks([])\n \n ax = plt.subplot(gs[:2,2:])\n plt.imshow(decomp[2][1])\n plt.xticks([])\n plt.yticks([])\n \n ax = plt.subplot(gs[2:,2:])\n plt.imshow(decomp[2][2])\n plt.xticks([])\n plt.yticks([])\n \n plt.tight_layout()\n \n return",
"def plot_bp_exptimes(self, plot_spectrum = True, title = None, ylims = (1.0, 1e7),\n cc = [\"C0\", \"C2\", \"C3\"], iremove = []):\n\n # Reshape exposure times\n tmp = self.tpbpcs_rect.T\n\n # Calculate clean spectrum\n output = self.complete_spectrum_time()\n spectrum = output[2]\n\n fig, ax2 = plt.subplots(figsize = (16,5))\n\n if title is not None:\n ax2.set_title(title)\n\n icount = 0\n for ichan in range(len(CHANNELS)):\n\n data = []\n positions = []\n widths = []\n\n for j in range(len(self.bp_names[self.bp_chan == ichan])):\n\n nanmask = np.isfinite(tmp[icount,:])\n\n data.append(tmp[icount,nanmask])\n positions.append(np.mean(spectrum[0][icount]))\n widths.append(spectrum[0][icount][-1] - spectrum[0][icount][0] + np.mean(spectrum[1][icount][:]))\n color1 = cc[ichan]\n\n comp_str = \"$%i \\%%$\" %(100.*self.frac_bias_bp[icount])\n comp_str2 = \"$\\mathbf{%i {\\%%}}$\" %(100.*self.frac_bias_bp[icount])\n comp_str3 = \"$\\mathbf{%i}$\" %(100.*self.frac_bias_bp[icount])\n #ax2.text(positions[j], np.median(tmp[icount,:]) + 5.*np.std(tmp[icount,:]), comp_str2,\n # ha = \"center\", va = \"top\", fontsize = 12, color = \"w\")\n q_l, q_50, q_h, q_m, q_p = nsig_intervals(tmp[icount,nanmask], intvls=[0.25, 0.5, 0.75])\n #ax2.text(positions[j], ylims[1], comp_str2,\n # ha = \"center\", va = \"top\", color = color1, fontsize = 12)\n ax2.text(positions[j], q_50 + q_p, comp_str3,\n ha = \"center\", va = \"bottom\", color = color1)\n\n #ax2.plot(self.bandpasses[icount], [q_50, q_50], color = color1, zorder = 120, ls = \"dashed\")\n\n icount += 1\n\n positions = np.array(positions)\n widths = np.array(widths)\n bp1 = ax2.boxplot(data, sym = '', widths = widths, showfliers = False,\n boxprops = {\"color\" : color1, \"alpha\" : 0.5},\n whiskerprops = {\"color\" : color1, \"linewidth\" : 2.0},\n capprops = {\"color\" : color1, \"linewidth\" : 0.0},\n medianprops = {\"color\" : \"w\", \"linewidth\" : 2.0},\n patch_artist=True, positions = positions, whis = [5, 95]);\n\n for patch in bp1['boxes']:\n patch.set_facecolor(color1)\n\n if plot_spectrum:\n\n ax = ax2.twinx()\n ax2.set_zorder(100)\n ax2.patch.set_visible(False)\n\n ax.set_xlabel(\"Wavelength [$\\mu$m]\")\n ax.set_ylabel(r\"Planet-Star Flux Ratio ($\\times 10^{-10}$)\", rotation = 270, labelpad = 25)\n for i in range(len(self.bp_names)):\n if i not in iremove:\n pass\n #ax.plot(spectrum[0][i], 1e10*spectrum[3][i], \"o\", ms = 4.0, alpha = 0.65, color = \"w\", zorder = 80)\n #ax.errorbar(spectrum[0][i], 1e10*spectrum[3][i], yerr=1e10*spectrum[4][i], fmt = \"o\", ms = 2.0, alpha = 0.65, color = \"k\", zorder = 80)\n #ax.axvspan(drmA.bandpasses[i][0], drmA.bandpasses[i][1], alpha = 0.2, color = cc[drmA.bp_chan[i]])\n\n self.cn.telescope.lammin = 0.2\n self.cn.telescope.lammax = 2.0\n self.cn.telescope.resolution = 140.\n # Re-do count rate calcs for true Earth spectrum\n self.cn.run_count_rates(self.AHR, self.LAMHR, self.FSTAR)\n l1, = ax.plot(self.cn.lam, 1e10*self.cn.Cratio, color = \"purple\", zorder = 0, lw = 4.0, alpha = 1.)\n l2, = ax.plot(self.cn.lam, 1e10*self.cn.Cratio, color = \"w\", zorder = 0, lw = 2.0, alpha = 0.65)\n ax.set_ylim(bottom=0.0)\n ax.legend([(l1, l2)], [(\"Modern Earth\")], framealpha = 0.0)\n\n # Label Molecules\n ax.text(0.27, 1.55, \"O$_3$\", ha = \"center\", va = \"center\")\n ax.text(0.6, 1.25, \"O$_3$\", ha = \"center\", va = \"center\")\n ax.text(0.68, 1.35, \"O$_2$\", ha = \"center\", va = \"center\")\n ax.text(0.76, 1.45, \"O$_2$\", ha = \"center\", va = \"center\")\n ax.text(0.96, 1.45, \"H$_2$O\", ha = \"center\", va = \"center\")\n ax.text(1.15, 1.45, \"H$_2$O\", ha = \"center\", va = \"center\")\n ax.text(1.4, 1.45, \"H$_2$O\", ha = \"center\", va = \"center\")\n ax.text(1.9, 1.45, \"H$_2$O\", ha = \"center\", va = \"center\")\n ax.text(1.6, 1.25, \"CO$_2$\", ha = \"center\", va = \"center\")\n\n ax2.set_ylabel(\"Science Time [hrs]\")\n #ax2.set_title(r\"All %i targets (S/N$\\approx$%i)\" %(Ndraw, wantSNR))\n ax2.set_yscale(\"log\")\n\n ax2.set_xlabel(\"Wavelength [$\\mu$m]\")\n ax2.set_ylim(bottom = ylims[0], top = ylims[1])\n\n ax2.set_xticks([0.2, 0.4, 0.6, 0.8, 1.0, 1.2, 1.4, 1.6, 1.8, 2.0])\n ax2.set_xticklabels([\"$0.2$\", \"$0.4$\", \"$0.6$\", \"$0.8$\", \"$1.0$\", \"$1.2$\", \"$1.4$\", \"$1.6$\", \"$1.8$\", \"$2.0$\"])\n ax2.set_xlim(0.1, 2.1)\n #ax2.set_xlim(0.4, 1.0)\n\n #fig.savefig(\"/Users/Jake/Dropbox/Astronomy/UW/Astrobio/Research Rotation/LUVOIR/figures/drm_bp10_science_time_%s.pdf\" %drm.architecture, bbox_inches = \"tight\")\n\n return fig",
"def xx_plot(epoch, model, features, filters, figname, fgal=0.5):\n # fetch Stripe 82 data\n X, Xcov = fetch_prepped_s82data(epoch, fgal, features, filters)\n Xcoadd, Xcoaddcov = fetch_prepped_s82data(epoch, fgal, features,\n filters, use_single=False)\n N = 20000\n X = X[:N]\n Xcov = Xcov[:N]\n Xcoadd = Xcoadd[:N]\n Xcoaddcov = Xcoaddcov[:N]\n\n # unpickle the XD model\n if type(model) == str: \n f = open(model, 'rb')\n model = cPickle.load(f)\n f.close()\n\n # Calculate the posteriors, draw samples\n a, m, v = model.posterior(X, Xcov)\n posts = np.zeros_like(X)\n for i in range(X.shape[0]):\n posts[i] = model.sample(a[i], m[i], v[i], size=1)\n\n lo = [0.01, 0.02, 0.06]\n hi = [0.99, 0.96, 0.98]\n idx = [0, 1, 4]\n bins = [100, 100, 300]\n label = ['psfmag $r$', 'modelmag $u-g$', 'modelmag $i-z$']\n N = len(idx)\n fs = 5\n lsize = 20\n f = pl.figure(figsize=(N * fs, 2 * fs))\n pl.subplots_adjust(wspace=0.3)\n for i in range(N):\n x = X[:, idx[i]]\n y = Xcoadd[:, idx[i]]\n p = posts[:, idx[i]]\n ind = (y > -999) & (Xcoaddcov[:, idx[i]][:, idx[i]] < 10.)\n x = x[ind]\n y = y[ind]\n p = p[ind]\n ax = pl.subplot(2, N, i + 1)\n v = np.sort(x)\n mn, mx = v[np.int(lo[i] * x.shape[0])], v[np.int(hi[i] * x.shape[0])]\n hist2d(x, y, ax=ax, bins=bins[i], plot_contours=True,\n plot_datapoints=True)\n pl.plot([mn, mx], [mn, mx], 'r', lw=2)\n pl.ylabel('Coadd ' + label[i], fontsize=lsize)\n pl.xlabel('Single Epoch ' + label[i], fontsize=lsize)\n pl.xlim(mn, mx)\n pl.ylim(mn, mx)\n ax = pl.subplot(2, N, i + 4)\n hist2d(p, y, ax=ax, bins=bins[i], plot_contours=True,\n plot_datapoints=True)\n pl.plot([mn, mx], [mn, mx], 'r', lw=2)\n pl.xlim(mn, mx)\n pl.ylim(mn, mx)\n pl.ylabel('Coadd ' + label[i], fontsize=lsize)\n pl.xlabel('XD Posterior ' + label[i], fontsize=lsize)\n f.savefig(figname, bbox_inches='tight')",
"def rateplot2d(M:b2.SpikeMonitor, from_:float, to:float, wh:tuple):\n im1 = M.i[np.where(np.logical_and(from_<M.t/b2.ms, M.t/b2.ms<to))[0]]\n m = np.max(im1)\n idx = np.concatenate([np.bincount(im1), np.zeros(wh[0]*wh[1]-m-1)])\n idx = idx.reshape(wh[0],wh[1])\n return plt.imshow(idx)",
"def plot_data(heart_filt, pace_filt):\n\n plt.figure(1)\n plt.plot(heart_filt, pace_filt)\n plt.show()",
"def visualize_model(self, ax):\n rs = self.w[1:,:].reshape(28, 28, 10, order='F')\n rs2 = np.transpose(rs, axes=[1,0,2])\n ax.imshow(rs2.reshape(28, -1, order='F'), cmap='bone')",
"def _plot_wires(ax, hot, gnd, v, **kw):\n #get x and y coordinates\n L = len(hot)\n x = np.array([c.x for c in hot + gnd])\n y = np.array([c.y for c in hot + gnd])\n #calculate the scaling factor\n scale = _fields_plots_xs_wireperc*max(np.absolute(v))/max(np.absolute(y))\n if('scale' in kw):\n if(kw['scale'] is False):\n scale = 1.0\n #plot\n if(hot):\n kw['H'].append(ax.plot(x[:L], scale*y[:L], 'ko')[0])\n kw['L'].append('Conductors')\n if(gnd):\n kw['H'].append(ax.plot(x[L:], scale*y[L:], 'o', color='gray')[0])\n kw['L'].append('Grounded Conductors')",
"def gridPlot12(img_stack):\r\n F = plt.figure(figsize = (30,30))\r\n F.subplots_adjust(left = 0.05, right = 0.95)\r\n grid = ImageGrid(F, 142, nrows_ncols = (3,4), axes_pad = 0.0, share_all = True,\r\n label_mode = \"L\", cbar_location = \"top\", cbar_mode = \"single\")\r\n \r\n i = 0\r\n for img in img_stack[0:12]:\r\n im = grid[i].imshow(img, interpolation = \"nearest\", vmin = 0, vmax = 255)\r\n i += 1 \r\n grid.cbar_axes[0].colorbar(im)\r\n plt.savefig('gplot12.png')\r\n# for cax in grid.cbar_axes:\r\n# cax.toggle_label(False)\r\n return",
"def plot_spectra_from_scan(scan, name, direct_file, grism_file):\n\n tr_scan = np.transpose(scan)\n column_sums = [sum(scan_col) for scan_col in tr_scan[5:-5]]\n x = np.arange(len(column_sums))\n wv = convert_rows_to_wv(direct_file, grism_file, x)\n plt.plot(wv, column_sums)\n plt.savefig('{0}/spectra/{1}_spectrum.png'.format('/'.join(name.split('/')[:-1]), name.split('/')[-1]))\n plt.clf()\n return wv, column_sums",
"def visulize_5(X):\n fig, axes1 = plt.subplots(5,5,figsize=(3,3))\n for j in range(5):\n for k in range(5):\n i = np.random.choice(range(len(X)))\n axes1[j][k].set_axis_off()\n axes1[j][k].imshow(X[:,i].reshape(32, 32, 3))\n plt.show()",
"def Plot_Spectrum(Path,borne1 = 0,borne2 = 0) :\n x,y=[],[]\n fs = open(Path, 'r') \n#index_array = 0\n while 1: \n txt = fs.readline()\n if txt =='': \n break\n x.append(float(txt[0:9]))\n y.append(float(txt[10:17]))\n #x[index_array],y[index_array] = float(txt[0:9]),float(txt[10:17])\n #index_array = index_array+1\n \n fs.close()\n x = np.array(x)\n y = np.array(y)\n if ((borne1 == 0) & (borne2 == 0)) :\n pass \n else :\n index_ok = ((x<borne2) & (x>borne1))\n x = x[index_ok]\n y = y[index_ok]\n plt.figure(1)\n plt.plot(x,y)\n plt.xlabel(r\"Nombre d'onde $(cm^{-1})$\")",
"def show_digit( Pixels ):\n from matplotlib import pyplot as plt\n print(Pixels.shape)\n Patch = Pixels.reshape((8,8))\n plt.figure(1, figsize=(4,4))\n plt.imshow(Patch, cmap=plt.cm.gray_r, interpolation='nearest') # plt.cm.gray_r # plt.cm.hot\n plt.show()",
"def receptive_fields_visualization(W):\n W = W.cpu()\n \n hidden_dim = int(np.sqrt(W.shape[1]))\n side_dim = 10\n indices = [np.random.randint(0,W.shape[0]) for _ in range(side_dim**2)]\n \n fig = plt.figure(figsize=(10,10))\n for i in range(len(indices)):\n ax = fig.add_subplot(side_dim, side_dim, i+1, xticks = [], yticks = [])\n ax.imshow(W[i,:].view(hidden_dim, hidden_dim),cmap = 'gray')\n plt.subplots_adjust(wspace=0.01, hspace=0.01)\n #end\n \n plt.show()\n plt.close('all')",
"def plot5(self):\n\n cond = ((self.ds.freq<32.6) & (self.ds.freq>17))\n freq = self.ds.freq[cond]\n power = self.ds.power[cond]\n\n # the modes for KIC 9205705\n m = pd.read_csv('/home/mxs191/Desktop/MathewSchofield/TRG/GetData/Modes/modes_9205705.csv')\n m1 = [17.65, 20.6, 23.7, 26.9, 30.1] # l=1\n\n plt.rc('font', size=18)\n fig, ax = plt.subplots()\n plt.plot(freq, power, zorder=1, alpha=0.4)\n\n # NOTE: annotate mode angular degrees\n plt.scatter(m['f0'].as_matrix(), np.full(len(m), 150000), c='k', zorder=2, s=80)\n plt.scatter(m['f2'].as_matrix(), np.full(len(m), 130000), c='mediumseagreen', zorder=2, s=80, marker='^')\n plt.scatter(m1, np.full(len(m1), 140000), c='grey', zorder=2, s=80, marker='v')\n\n # NOTE: plot envelope\n numax = info['numax'].as_matrix() # mu Hz\n env_width = 0.66 * numax**0.88\n plt.plot(freq, 40004*np.exp( -( (freq-24.7)**2 / (2*7.**2) ) ), c='k', linestyle='--')\n\n # NOTE: annotate envelope\n style = dict(size=16, color='k')\n ax.text(24.1, 49167, r\"$\\nu_{\\rm max}$\", color='k', size=18)\n ax.text(24.1, 20994, r\"$\\Gamma_{\\rm env}$\", color='k', size=18)\n ax.text(23, 162944, r\"$\\Delta \\nu$\", **style)\n plt.annotate(s='', xy=(25.3, 158610), xytext=(21.91, 158610),\n arrowprops=dict(arrowstyle='<->')) # dnu\n plt.annotate(s='', xy=((24.7-env_width/2.), 15861), xytext=((24.7+env_width/2.), 15861),\n arrowprops=dict(arrowstyle='<->')) # env width\n\n ax.set_xlabel(r'Frequency ($\\rm \\mu Hz$)')\n ax.set_ylabel(r'PSD ($\\rm ppm^{2} \\, \\mu Hz^{-1}$)')\n plt.xlim(17, 32.6)\n plt.ylim(17, 195181)\n plt.tight_layout()\n plt.show()\n fig.savefig(os.getcwd() + '/DetTest1_plots/Plot5_ps_' + str(self.ds.epic) + '.pdf')"
]
| [
"0.62094134",
"0.6168367",
"0.6072057",
"0.60688096",
"0.6013233",
"0.5994714",
"0.59258676",
"0.59078777",
"0.58876216",
"0.5884491",
"0.5867321",
"0.5860733",
"0.5855114",
"0.5851817",
"0.5822021",
"0.5821232",
"0.582071",
"0.580893",
"0.58087957",
"0.57978433",
"0.57874113",
"0.57843983",
"0.57820237",
"0.57658774",
"0.5763544",
"0.57618374",
"0.575674",
"0.5723777",
"0.57225436",
"0.57183015"
]
| 0.6649696 | 0 |
plot overscan with subtracted 7th / 17th channel | def plot_overscan_diff(overscan, img, TITLE, OUT_DIR):
fig = plt.figure(figsize=(20, 20))
gs0 = gridspec.GridSpec(3, 3)
for i, f in enumerate(img):
x = f.dev_index % 3
gs = gridspec.GridSpecFromSubplotSpec(
1, 2, wspace=0, subplot_spec=gs0[f.dev_index])
ax2 = plt.subplot(gs[0, 0])
for j in range(9, 17):
plt.plot(overscan[i, j - 1] - overscan[i, 15] +
500 * (j - 8), label='seg' + str(j + 1))
plt.legend(fontsize=6, loc='upper center', ncol=4)
if(x != 0):
ax2.set_yticklabels([])
plt.grid()
plt.xlim(0, 2100)
plt.ylim(0, 4500)
ax2.set_title(f.dev_name + ' (seg 10-17)')
ax1 = plt.subplot(gs[0, 1])
for j in range(1, 9):
plt.plot(overscan[i, j - 1] - overscan[i, 7] +
500 * j, label='seg' + str(j - 1))
plt.legend(fontsize=6, loc='upper center', ncol=4)
if(x != 2):
ax1.set_yticklabels([])
if(x == 2):
ax1.yaxis.tick_right()
plt.grid()
plt.xlim(0, 2100)
plt.ylim(0, 4500)
ax1.set_title(f.dev_name + ' (seg 0-7)')
# ax1.set_title('S-'+f[7:9]+' (seg 0-7)')
fig.suptitle('Overscan (diff) ' + TITLE, y=0.94, size=20)
plt.subplots_adjust(wspace=0.05)
plt.savefig(OUT_DIR + TITLE + '_diff_spatial.png')
plt.close(fig) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def overviewCommand(self):\n plt.figure(11)\n plt.clf()\n ax = plt.subplot(211)\n plt.plot(self.raw['OPDC'].data.field('TIME'),\n 1e6*self.raw['OPDC'].data.field('FUOFFSET'),\n color='r', label='FUOFFSET',\n linewidth=1, alpha=1) \n plt.plot(self.raw['OPDC'].data.field('TIME'),\n 1e6*(self.raw['OPDC'].data.field(self.DLtrack)-\n self.raw['OPDC'].data.field('PSP')),\n color='r', linewidth=3, alpha=0.5,\n label=self.DLtrack+'-PSP')\n plt.legend()\n plt.subplot(212, sharex=ax)\n plt.plot(self.raw['OPDC'].data.field('TIME'),\n 1e6*self.raw['OPDC'].data.field('FUOFFSET')-\n 1e6*(self.raw['OPDC'].data.field(self.DLtrack)-\n self.raw['OPDC'].data.field('PSP')),\n color='k', label='$\\Delta$',\n linewidth=1, alpha=1) \n \n signal = self.raw['OPDC'].data.field('FUOFFSET')\n plt.figure(12)\n plt.clf()\n ax2 = plt.subplot(111)\n Fs = 1e6/np.diff(self.raw['OPDC'].data.field('TIME')).mean()\n print Fs\n ax2.psd(signal[:50000], NFFT=5000, Fs=Fs, label='FUOFFSET',scale_by_freq=0)\n plt.legend()",
"def plot_channel_offset(fig_name):\n file1 = 'log/influence/channel/table_left_150cm.csv'\n df = utils.read_file_rssi(file1, correct_index=True)\n df[colums].plot()\n plt.title(file1)\n plt.xlabel('Time: s')\n plt.ylabel('RSSI: dBm')\n if not os.path.exists(dir_fig):\n os.makedirs(dir_fig)\n plt.savefig(dir_fig + '/' + fig_name + '1.png')\n file2 = 'log/influence/channel/table_left_50cm.csv'\n df = utils.read_file_rssi(file2, correct_index=True)\n df[colums].plot()\n plt.title(file2)\n plt.xlabel('Time: s')\n plt.ylabel('RSSI: dBm')\n plt.savefig('figures/' + fig_name + '2.png')",
"def plot_overscan(overscan, img, TITLE, OUT_DIR):\n fig = plt.figure(figsize=(20, 20))\n gs0 = gridspec.GridSpec(3, 3)\n\n for i, f in enumerate(img):\n x = f.dev_index % 3\n\n gs = gridspec.GridSpecFromSubplotSpec(\n 1, 2, wspace=0, subplot_spec=gs0[f.dev_index])\n ax2 = plt.subplot(gs[0, 0])\n for j in range(9, 17):\n plt.plot(overscan[i, j - 1] + 500 *\n (j - 8), label='seg' + str(j + 1))\n plt.legend(fontsize=6, loc='upper center', ncol=4)\n if x != 0:\n ax2.set_yticklabels([])\n\n plt.grid()\n plt.xlim(0, 2100)\n plt.ylim(0, 4500)\n ax2.set_title(f.dev_name + ' (seg 10-17)')\n\n ax1 = plt.subplot(gs[0, 1])\n for j in range(1, 9):\n plt.plot(overscan[i, j - 1] + 500 * j, label='seg' + str(j - 1))\n plt.legend(fontsize=6, loc='upper center', ncol=4)\n if x != 2:\n ax1.set_yticklabels([])\n if x == 2:\n ax1.yaxis.tick_right()\n plt.grid()\n plt.xlim(0, 2100)\n plt.ylim(0, 4500)\n ax1.set_title(f.dev_name + ' (seg 0-7)')\n\n fig.suptitle('Overscan ' + TITLE, y=0.94, size=20)\n plt.subplots_adjust(wspace=0.05)\n plt.savefig(OUT_DIR + TITLE + '_spatial.png')\n plt.close(fig)",
"def index_shift_plot():\n file = \"Data/matfiles/20131221.mat\"\n object = MatReader(file)\n thing = object.shifted == False\n assert thing, \"shifter must be commented out\"\n \n start = 1920 #index of 16 minutes\n stop = 3120 #index of 26 minutes\n \n NeA = object.NeA[start:stop]\n NeB = object.NeB[start:stop]\n NeC = object.NeC[start:stop]\n NeA = object.meanie(NeA, 5)\n NeB = object.meanie(NeB, 5)\n NeC = object.meanie(NeC, 5)\n \n secondsA = object.secondsA[start:stop]\n secondsB = object.secondsB[start:stop]\n secondsC = object.secondsC[start:stop]\n \n plt.plot(secondsB, NeB, \"r\")\n plt.plot(secondsB, NeA, \"g\")\n plt.plot(secondsB, NeC, \"b\")\n plt.xlabel(\"Seconds since midnight UTC of satellite B\")\n plt.ylabel(\"Electron density [cm$^{-3}$]\")\n plt.legend([\"Satellite B\", \"Satellite A\", \"Satellite C\"])\n plt.title(\"Electron density data before index-shift\")\n plt.savefig(\"Figures/preshift_example.pdf\")\n plt.show()\n \n object.shifter()\n NeA = object.NeA[start:stop]\n NeB = object.NeB[start:stop]\n NeC = object.NeC[start:stop]\n NeA = object.meanie(NeA, 5)\n NeB = object.meanie(NeB, 5)\n NeC = object.meanie(NeC, 5)\n \n secondsA = object.secondsA[start:stop]\n secondsB = object.secondsB[start:stop]\n secondsC = object.secondsC[start:stop]\n \n plt.plot(secondsB, NeB, \"r\")\n plt.plot(secondsB, NeA, \"g\")\n plt.plot(secondsB, NeC, \"b\")\n plt.xlabel(\"Seconds since midnight UTC of satellite B\")\n plt.ylabel(\"Electron density [cm$^{-3}$]\")\n plt.legend([\"Satellite B\", \"Satellite A\", \"Satellite C\"])\n plt.title(\"Electron density data after index-shift\")\n plt.savefig(\"Figures/postshift_example.pdf\")\n plt.show()",
"def plot_overscan_variation(t_lst, overscan_lst, figfile):\n \n # Quality check plot of the mean overscan value over time \n fig = plt.figure(figsize=(8,6), dpi=150)\n ax2 = fig.add_axes([0.1,0.60,0.85,0.35])\n ax1 = fig.add_axes([0.1,0.15,0.85,0.35])\n #conversion of the DATE-string to a number\n date_lst = [dateutil.parser.parse(t) for t in t_lst]\n datenums = mdates.date2num(date_lst)\n\n ax1.plot_date(datenums, overscan_lst, 'r-', label='mean')\n ax2.plot(overscan_lst, 'r-', label='mean')\n for ax in fig.get_axes():\n leg = ax.legend(loc='upper right')\n leg.get_frame().set_alpha(0.1)\n ax1.set_xlabel('Time')\n ax2.set_xlabel('Frame')\n ax1.set_ylabel('Overscan mean ADU')\n ax2.set_ylabel('Overscan mean ADU')\n # adjust x and y limit\n y11,y12 = ax1.get_ylim()\n y21,y22 = ax2.get_ylim()\n z1 = min(y11,y21)\n z2 = max(y21,y22)\n ax1.set_ylim(z1,z2)\n ax2.set_ylim(z1,z2)\n ax2.set_xlim(0, len(overscan_lst)-1)\n # adjust rotation angle of ticks in time axis\n plt.setp(ax1.get_xticklabels(),rotation=30)\n\n # save figure\n fig.savefig(figfile)\n plt.close(fig)",
"def plot_data(heart_filt, pace_filt):\n\n plt.figure(1)\n plt.plot(heart_filt, pace_filt)\n plt.show()",
"def plot_bp_exptimes(self, plot_spectrum = True, title = None, ylims = (1.0, 1e7),\n cc = [\"C0\", \"C2\", \"C3\"], iremove = []):\n\n # Reshape exposure times\n tmp = self.tpbpcs_rect.T\n\n # Calculate clean spectrum\n output = self.complete_spectrum_time()\n spectrum = output[2]\n\n fig, ax2 = plt.subplots(figsize = (16,5))\n\n if title is not None:\n ax2.set_title(title)\n\n icount = 0\n for ichan in range(len(CHANNELS)):\n\n data = []\n positions = []\n widths = []\n\n for j in range(len(self.bp_names[self.bp_chan == ichan])):\n\n nanmask = np.isfinite(tmp[icount,:])\n\n data.append(tmp[icount,nanmask])\n positions.append(np.mean(spectrum[0][icount]))\n widths.append(spectrum[0][icount][-1] - spectrum[0][icount][0] + np.mean(spectrum[1][icount][:]))\n color1 = cc[ichan]\n\n comp_str = \"$%i \\%%$\" %(100.*self.frac_bias_bp[icount])\n comp_str2 = \"$\\mathbf{%i {\\%%}}$\" %(100.*self.frac_bias_bp[icount])\n comp_str3 = \"$\\mathbf{%i}$\" %(100.*self.frac_bias_bp[icount])\n #ax2.text(positions[j], np.median(tmp[icount,:]) + 5.*np.std(tmp[icount,:]), comp_str2,\n # ha = \"center\", va = \"top\", fontsize = 12, color = \"w\")\n q_l, q_50, q_h, q_m, q_p = nsig_intervals(tmp[icount,nanmask], intvls=[0.25, 0.5, 0.75])\n #ax2.text(positions[j], ylims[1], comp_str2,\n # ha = \"center\", va = \"top\", color = color1, fontsize = 12)\n ax2.text(positions[j], q_50 + q_p, comp_str3,\n ha = \"center\", va = \"bottom\", color = color1)\n\n #ax2.plot(self.bandpasses[icount], [q_50, q_50], color = color1, zorder = 120, ls = \"dashed\")\n\n icount += 1\n\n positions = np.array(positions)\n widths = np.array(widths)\n bp1 = ax2.boxplot(data, sym = '', widths = widths, showfliers = False,\n boxprops = {\"color\" : color1, \"alpha\" : 0.5},\n whiskerprops = {\"color\" : color1, \"linewidth\" : 2.0},\n capprops = {\"color\" : color1, \"linewidth\" : 0.0},\n medianprops = {\"color\" : \"w\", \"linewidth\" : 2.0},\n patch_artist=True, positions = positions, whis = [5, 95]);\n\n for patch in bp1['boxes']:\n patch.set_facecolor(color1)\n\n if plot_spectrum:\n\n ax = ax2.twinx()\n ax2.set_zorder(100)\n ax2.patch.set_visible(False)\n\n ax.set_xlabel(\"Wavelength [$\\mu$m]\")\n ax.set_ylabel(r\"Planet-Star Flux Ratio ($\\times 10^{-10}$)\", rotation = 270, labelpad = 25)\n for i in range(len(self.bp_names)):\n if i not in iremove:\n pass\n #ax.plot(spectrum[0][i], 1e10*spectrum[3][i], \"o\", ms = 4.0, alpha = 0.65, color = \"w\", zorder = 80)\n #ax.errorbar(spectrum[0][i], 1e10*spectrum[3][i], yerr=1e10*spectrum[4][i], fmt = \"o\", ms = 2.0, alpha = 0.65, color = \"k\", zorder = 80)\n #ax.axvspan(drmA.bandpasses[i][0], drmA.bandpasses[i][1], alpha = 0.2, color = cc[drmA.bp_chan[i]])\n\n self.cn.telescope.lammin = 0.2\n self.cn.telescope.lammax = 2.0\n self.cn.telescope.resolution = 140.\n # Re-do count rate calcs for true Earth spectrum\n self.cn.run_count_rates(self.AHR, self.LAMHR, self.FSTAR)\n l1, = ax.plot(self.cn.lam, 1e10*self.cn.Cratio, color = \"purple\", zorder = 0, lw = 4.0, alpha = 1.)\n l2, = ax.plot(self.cn.lam, 1e10*self.cn.Cratio, color = \"w\", zorder = 0, lw = 2.0, alpha = 0.65)\n ax.set_ylim(bottom=0.0)\n ax.legend([(l1, l2)], [(\"Modern Earth\")], framealpha = 0.0)\n\n # Label Molecules\n ax.text(0.27, 1.55, \"O$_3$\", ha = \"center\", va = \"center\")\n ax.text(0.6, 1.25, \"O$_3$\", ha = \"center\", va = \"center\")\n ax.text(0.68, 1.35, \"O$_2$\", ha = \"center\", va = \"center\")\n ax.text(0.76, 1.45, \"O$_2$\", ha = \"center\", va = \"center\")\n ax.text(0.96, 1.45, \"H$_2$O\", ha = \"center\", va = \"center\")\n ax.text(1.15, 1.45, \"H$_2$O\", ha = \"center\", va = \"center\")\n ax.text(1.4, 1.45, \"H$_2$O\", ha = \"center\", va = \"center\")\n ax.text(1.9, 1.45, \"H$_2$O\", ha = \"center\", va = \"center\")\n ax.text(1.6, 1.25, \"CO$_2$\", ha = \"center\", va = \"center\")\n\n ax2.set_ylabel(\"Science Time [hrs]\")\n #ax2.set_title(r\"All %i targets (S/N$\\approx$%i)\" %(Ndraw, wantSNR))\n ax2.set_yscale(\"log\")\n\n ax2.set_xlabel(\"Wavelength [$\\mu$m]\")\n ax2.set_ylim(bottom = ylims[0], top = ylims[1])\n\n ax2.set_xticks([0.2, 0.4, 0.6, 0.8, 1.0, 1.2, 1.4, 1.6, 1.8, 2.0])\n ax2.set_xticklabels([\"$0.2$\", \"$0.4$\", \"$0.6$\", \"$0.8$\", \"$1.0$\", \"$1.2$\", \"$1.4$\", \"$1.6$\", \"$1.8$\", \"$2.0$\"])\n ax2.set_xlim(0.1, 2.1)\n #ax2.set_xlim(0.4, 1.0)\n\n #fig.savefig(\"/Users/Jake/Dropbox/Astronomy/UW/Astrobio/Research Rotation/LUVOIR/figures/drm_bp10_science_time_%s.pdf\" %drm.architecture, bbox_inches = \"tight\")\n\n return fig",
"def plot_rssi_variation(fig_name):\n dir = \"log/heatmap\"\n pattern = r'(\\d{1,2}).csv$'\n utils.construct_heatmap_set(dir, pattern)\n df = pd.read_csv('data/df.csv', sep=';')\n df_point100 = df[[index == 100 for index in df['COUNTER FLAG']]]\n df_point100.index = np.arange(df_point100.shape[0]) / 10\n df_point100[colums].plot()\n plt.xlabel('Time: s')\n plt.ylabel('RSSI: dBm')\n plt.title(fig_name)\n if not os.path.exists(dir_fig):\n os.makedirs(dir_fig)\n plt.savefig(dir_fig + '/' + fig_name + '.png')",
"def notch_filter_raw_plot(data, fs, fc):\n b, a = sp.iirnotch(w0=fc / fs * 2, Q=100)\n w, h = sp.freqz(b, a)\n f = w / np.pi * fs / 2\n plt.figure()\n plt.plot(f, 10 * np.log10(abs(h)))\n plt.xlabel('frequency (Hz)')\n plt.ylabel('Magnitude (dB)')\n plt.title('frequency response of notch filter at 50Hz')\n plt.grid()\n\n data1 = sp.filtfilt(b, a, data)\n return data1",
"def plot_eeg(Data,start_sec = 0, window_size = 10, amp = 200, figure_size = (15,8),\n dpi=600, detrend = True, envelope=False, plot_bad = False, exclude = [], grid=True, \n xtickspace = 1,saveplot = None, subplot = None ,spines = ['left', 'bottom'],time_out = False, common_ref=False, **kwargs):\n #geting data from Data_dict\n data = Data.data\n time_vec = Data.time_vec\n sample_rate = Data.sample_rate\n ch_labels = Data.ch_labels\n if plot_bad:\n badch = np.array([],dtype=int) # a empty array \n else:\n badch = Data.bad_channels\n \n if type(exclude) == list:\n for item in exclude:\n if type(item) == str:\n idx = [i for i,x in enumerate(ch_labels) if x == item]\n badch = sorted(set(np.append(badch,idx)))\n elif type(item) == int:\n idx = item\n badch = sorted(set(np.append(badch,idx)))\n\n elif type(exclude) == str:\n idx = [i for i,x in enumerate(ch_labels) if x == exclude]\n badch = sorted(set(np.append(badch,idx)))\n elif type(exclude) == int:\n idx = exclude\n badch = sorted(set(np.append(badch,idx)))\n \n # Transforming the start_sec in points\n start_sec *= sample_rate\n start_sec = int(start_sec)\n # Transforming the window_size in points\n window_size *= sample_rate\n window_size = int(window_size)\n if subplot == None: \n # Creating the figure \n f = plt.figure(figsize=figure_size,dpi=dpi)\n # creating the axes\n sp = f.add_subplot(111)\n else:\n sp = subplot\n # creating a vector with the desired index\n time_window = np.arange(start_sec, start_sec + window_size)\n # declaring tick variables\n yticklocs = []\n yticklabel = [] \n ch_l = 1\n if len(data.shape) == 1:\n # in the axes, plot the raw signal for each channel with a amp diference \n if detrend:\n sp.plot(time_vec[time_window],(ch_l)*amp + sig.detrend(data[time_window]),**kwargs)\n else:\n sp.plot(time_vec[time_window],(ch_l)*amp + data[time_window],**kwargs)\n if envelope:\n sp.plot(time_vec[time_window],(ch_l)*amp + np.abs(sig.hilbert(data[time_window])),**kwargs)\n # appeng the channel label and the tick location\n if ch_labels is None:\n yticklabel.append(ch_l) \n else:\n yticklabel.append(ch_labels[0])\n yticklocs.append((ch_l)*amp)\n else:\n # Loop to plot each channel\n for ch in [x for x in range(data.shape[1]) if x not in badch]:\n # in the axes, plot the raw signal for each channel with a amp diference \n if detrend:\n sp.plot(time_vec[time_window],(ch_l)*amp + sig.detrend(data[time_window,ch]),**kwargs)\n else:\n sp.plot(time_vec[time_window],(ch_l)*amp + data[time_window,ch],**kwargs)\n if envelope:\n sp.plot(time_vec[time_window],(ch_l)*amp + np.abs(sig.hilbert(data[time_window,ch])),**kwargs)\n # appeng the channel label and the tick location\n if ch_labels is None:\n yticklabel.append(ch_l) \n else:\n yticklabel.append(ch_labels[ch])\n \n yticklocs.append((ch_l)*amp)\n ch_l += 1\n if common_ref: \n sp.plot(time_vec[time_window],(ch_l)*amp + Data.common_ref[time_window],**kwargs)\n yticklocs.append((ch_l)*amp)\n yticklabel.append('common_ref')\n\n\n adjust_spines(sp, spines)\n if len(spines) > 0:\n # changing the x-axis (label, limit and ticks)\n plt .xlabel('time (s)', size = 16)\n #xtickslocs = np.linspace(int(time_vec[time_window[0]]),int(time_vec[time_window[-1]]),int(window_size/(sample_rate*xtickspace)),endpoint=True)\n \n xtickslocs = np.arange(math.ceil(time_vec[time_window[0]]),math.ceil(time_vec[time_window[-1]]+xtickspace),xtickspace) \n xtickslabels = ['']*len(xtickslocs)\n for x in np.arange(0,len(xtickslocs),10):\n xtickslabels[x] = xtickslocs[x]\n plt.xticks(xtickslocs,xtickslabels,size = 16)\n # changing the y-axis\n plt.yticks(yticklocs, yticklabel, size=16)\n \n if grid: \n ax = plt.gca()\n ax.xaxis.grid(True)\n \n sp.set_xlim(time_vec[time_window[0]],time_vec[time_window[-1]]+np.diff(time_vec[time_window[0:2]]))\n #sp.set_ylim(0,(ch_l)*amp)\n if time_out:\n return time_vec[time_window[0]], time_vec[time_window[-1]]+np.diff(time_vec[time_window[0:2]]), sp\n if saveplot != None:\n if type(saveplot) == str: \n plt.savefig(saveplot, bbox_inches='tight')\n else:\n raise Exception('saveplot should be a string')",
"def rateplot2d(M:b2.SpikeMonitor, from_:float, to:float, wh:tuple):\n im1 = M.i[np.where(np.logical_and(from_<M.t/b2.ms, M.t/b2.ms<to))[0]]\n m = np.max(im1)\n idx = np.concatenate([np.bincount(im1), np.zeros(wh[0]*wh[1]-m-1)])\n idx = idx.reshape(wh[0],wh[1])\n return plt.imshow(idx)",
"def plot_spectrumxichange(self):\n countgood = 0 ; countbad = 0\n for idata in self.datarg:\n if idata[-1, 0] == 1.: \n self.fig.axes[0].plot(idata[0:,0], idata[0: ,1] ,'b') \n countgood += 1\n print countgood , 'good solution'\n else: \n self.fig.axes[0].plot(idata[0:,0], idata[0: ,1] ,'r') \n print countbad, 'bad solution'\n countbad += 1\n print 'We found %g good solutions and %g tda startdistributions that broke down before xi = 1, we hope that\\'s what you expected' %(countgood,countbad)\n #Create custom artistsr[goodline,badline],['solution','breakdown']\n goodline = pl.Line2D((0,1),(0,0), color='b') \n badline = pl.Line2D((0,1),(0,0), color='r')\n self.layout(self.reader.depvar['depvar'] , r'energy spectrum (a.u.)' , tit = r'All tda start distributions $\\xi$' , legendhand = [goodline , badline] , legendlab = ['solution', 'breakdown'] )\n self.savefig('xispec')",
"def plot_diff(self):\n if not(self.is_attribute(\"time\") & self.is_attribute(\"intensity_up\") & \n self.is_attribute(\"intensity_up_sigma\") &\n self.is_attribute(\"intensity_down\") & \n self.is_attribute(\"intensity_down_sigma\") &\n self.is_attribute(\"intensity_up_total\") &\n self.is_attribute(\"intensity_down_total\")):\n return\n fig, ax = plt.subplots()\n ax.set_title(\"Polarized intensity: I_up - I_down\")\n ax.set_xlabel(\"Time (microseconds)\")\n ax.set_ylabel('Intensity')\n \n np_time = numpy.array(self.time, dtype=float)\n np_up = numpy.array(self.intensity_up, dtype=float)\n np_sup = numpy.array(self.intensity_up_sigma, dtype=float)\n np_up_mod = numpy.array(self.intensity_up_total, dtype=float)\n np_down = numpy.array(self.intensity_down, dtype=float)\n np_sdown = numpy.array(self.intensity_down_sigma, dtype=float)\n np_down_mod = numpy.array(self.intensity_down_total, dtype=float)\n np_diff = np_up - np_down\n np_diff_mod = np_up_mod - np_down_mod\n np_sdiff = numpy.sqrt(numpy.square(np_sup)+numpy.square(np_sdown))\n\n ax.plot([np_time.min(), np_time.max()], [0., 0.], \"b:\")\n ax.plot(np_time, np_diff_mod, \"k-\",\n label=\"model\")\n ax.errorbar(np_time, np_diff, yerr=np_sdiff, fmt=\"ko\", alpha=0.2,\n label=\"experiment\")\n\n y_min_d, y_max_d = ax.get_ylim()\n param = y_min_d-(np_diff-np_diff_mod).max()\n\n ax.plot([np_time.min(), np_time.max()], [param, param], \"k:\")\n ax.plot(np_time, np_diff-np_diff_mod+param, \"r-\", alpha=0.7,\n label=\"difference\")\n ax.legend(loc='upper right')\n fig.tight_layout()\n return (fig, ax)",
"def stack_plot(self, nrow=6, show=True):\n import matplotlib.pyplot as plt\n import matplotlib.gridspec as gridspec\n import matplotlib as mpl\n mpl.rcParams['font.family'] = 'stixgeneral'\n mpl.rcParams['font.size'] = 15.\n # Check for spec\n gdiline = []\n for iline in self._abslines:\n if isinstance(iline.analy['spec'],Spectrum1D):\n gdiline.append(iline)\n nplt = len(gdiline)\n if nplt == 0:\n print(\"Load spectra into the absline.analy['spec']\")\n return\n # Setup plot\n nrow = min(nplt,nrow)\n ncol = nplt // nrow + (nplt % nrow > 0)\n plt.clf()\n gs = gridspec.GridSpec(nrow, ncol)\n ymnx = (-0.1,1.1)\n\n for qq,iline in enumerate(gdiline):\n ax = plt.subplot(gs[qq%nrow, qq//nrow])\n # Plot\n velo = iline.analy['spec'].relative_vel((1+iline.attrib['z'])*iline.wrest)\n ax.plot(velo, iline.analy['spec'].flux, 'k-', linestyle='steps-mid')\n ax.plot(velo, iline.analy['spec'].sig, 'r:')\n # Lines\n ax.plot([0]*2, ymnx, 'g--')\n # Axes\n ax.set_xlim(self.vlim.value)\n ax.set_ylim(ymnx)\n ax.minorticks_on()\n if ((qq+1) % nrow == 0) or ((qq+1) == nplt):\n ax.set_xlabel('Relative Velocity (km/s)')\n else:\n ax.get_xaxis().set_ticks([])\n # Label\n ax.text(0.1, 0.1, iline.data['name'], transform=ax.transAxes, ha='left', va='center', fontsize='x-large')#, bbox={'facecolor':'white'})\n\n plt.tight_layout(pad=0.2,h_pad=0.,w_pad=0.1)\n if show:\n plt.show()\n plt.close()",
"def find_chart():\r\n ###############################################\r\n # Read values of S/N\r\n sn = np.loadtxt(outtable, usecols=(14,))\r\n xs, ys = np.loadtxt(outtable, usecols=(1, 2)).T\r\n specs = np.loadtxt(outtable, usecols=(0,), dtype=str)\r\n ###############################################\r\n # Find good (and bad) regions according to S/N\r\n good = np.where(((~np.isnan(sn)) & (sn >= sn_cut)))[0]\r\n bad = np.where((sn < sn_cut))[0]\r\n ###############################################\r\n # Filter arrays for S/N\r\n sn = sn[good]\r\n xs = xs[good]\r\n ys = ys[good]\r\n specs = specs[good].tolist()\r\n specs = [x.replace(\".fits\", \"\")[1:] for x in specs]\r\n ###############################################\r\n # Set limits for the plot\r\n norm = Normalize(0, 1)\r\n ###############################################\r\n # Set colormap\r\n # cmap = brewer2mpl.get_map('YlGnBu', 'sequential', 5).mpl_colormap\r\n # Produces a collection of polygons with colors according to S/N values\r\n coll = PolyCollection(polygons_bins[good], array=np.ones_like(sn),\r\n cmap=\"gray\", edgecolors='0.5', norm=norm)\r\n ###############################################\r\n # Initiate figure and axis for matplotlib\r\n fig = plt.figure(figsize=(6.25, 6))\r\n gs = gridspec.GridSpec(1, 1)\r\n gs.update(left=0.08, right=0.985, bottom=0.08, top=0.985, hspace=0.05,\r\n wspace=0.06)\r\n ax = plt.subplot(gs[0])\r\n ###############################################\r\n # Draw the polygons\r\n draw_map(fig, ax, coll)\r\n ###############################################\r\n # Add contours according to V-band image\r\n draw_contours(\"vband\", fig, ax)\r\n ###############################################\r\n for x, y, spec in zip(xs, ys, specs):\r\n ax.text(x, y, spec, fontsize=10)\r\n # Write labels\r\n xylabels(ax)\r\n ##############################################\r\n # Save the figure\r\n plt.show()\r\n plt.savefig(\"figs/find_chart.pdf\")\r\n return",
"def plot5(self):\n\n cond = ((self.ds.freq<32.6) & (self.ds.freq>17))\n freq = self.ds.freq[cond]\n power = self.ds.power[cond]\n\n # the modes for KIC 9205705\n m = pd.read_csv('/home/mxs191/Desktop/MathewSchofield/TRG/GetData/Modes/modes_9205705.csv')\n m1 = [17.65, 20.6, 23.7, 26.9, 30.1] # l=1\n\n plt.rc('font', size=18)\n fig, ax = plt.subplots()\n plt.plot(freq, power, zorder=1, alpha=0.4)\n\n # NOTE: annotate mode angular degrees\n plt.scatter(m['f0'].as_matrix(), np.full(len(m), 150000), c='k', zorder=2, s=80)\n plt.scatter(m['f2'].as_matrix(), np.full(len(m), 130000), c='mediumseagreen', zorder=2, s=80, marker='^')\n plt.scatter(m1, np.full(len(m1), 140000), c='grey', zorder=2, s=80, marker='v')\n\n # NOTE: plot envelope\n numax = info['numax'].as_matrix() # mu Hz\n env_width = 0.66 * numax**0.88\n plt.plot(freq, 40004*np.exp( -( (freq-24.7)**2 / (2*7.**2) ) ), c='k', linestyle='--')\n\n # NOTE: annotate envelope\n style = dict(size=16, color='k')\n ax.text(24.1, 49167, r\"$\\nu_{\\rm max}$\", color='k', size=18)\n ax.text(24.1, 20994, r\"$\\Gamma_{\\rm env}$\", color='k', size=18)\n ax.text(23, 162944, r\"$\\Delta \\nu$\", **style)\n plt.annotate(s='', xy=(25.3, 158610), xytext=(21.91, 158610),\n arrowprops=dict(arrowstyle='<->')) # dnu\n plt.annotate(s='', xy=((24.7-env_width/2.), 15861), xytext=((24.7+env_width/2.), 15861),\n arrowprops=dict(arrowstyle='<->')) # env width\n\n ax.set_xlabel(r'Frequency ($\\rm \\mu Hz$)')\n ax.set_ylabel(r'PSD ($\\rm ppm^{2} \\, \\mu Hz^{-1}$)')\n plt.xlim(17, 32.6)\n plt.ylim(17, 195181)\n plt.tight_layout()\n plt.show()\n fig.savefig(os.getcwd() + '/DetTest1_plots/Plot5_ps_' + str(self.ds.epic) + '.pdf')",
"def plot_range_scan(self, x, scan, max_range=None):\n N = len(scan)\n t = np.linspace(0, 2*np.pi, N)\n \n plt.subplot(121)\n self.plot()\n plt.title(\"Ground Truth Position\")\n plt.scatter([x[0]], [x[1]])\n plt.plot([x[0], x[0]+2], [x[1], x[1]], 'C0')\n plt.plot([x[0], x[0]], [x[1], x[1]+2], 'C1')\n plt.subplot(122)\n plt.scatter([0], [0])\n plt.plot(scan*np.cos(t), scan*np.sin(t))\n if max_range:\n plt.xlim([-max_range, max_range])\n plt.ylim([-max_range, max_range])\n plt.title(\"Range Scan\")",
"def app_SN_animated_gradient_plot(self):\n print('this option is yet to be implemented')",
"def plot(self):\n\n # initialize outside the loop to avoid memory leak\n\n plot_a = None\n\n # initial plotting scales\n vmin = 0\n vmax = 0\n pmin = 0\n pmax = 0\n\n sr = self.dio.get_properties(self.channel)['samples_per_second']\n\n if self.control.verbose:\n print 'sample rate: ', sr\n\n # initial time info\n display_lag = 60\n b = self.dio.get_bounds(self.channel)\n\n if self.control.verbose:\n print 'data bounds: ', b\n\n if self.control.start:\n dtst0 = dateutil.parser.parse(self.control.start)\n st0 = (dtst0 - datetime.datetime(1970, 1,\n 1, tzinfo=pytz.utc)).total_seconds()\n st0 = int(st0 * sr)\n else:\n st0 = int(b[0])\n\n if self.control.end:\n dtst0 = dateutil.parser.parse(self.control.end)\n et0 = (dtst0 - datetime.datetime(1970, 1,\n 1, tzinfo=pytz.utc)).total_seconds()\n et0 = int(et0 * sr)\n else:\n et0 = int(b[1])\n\n if self.control.verbose:\n\n print 'start sample st0: ', st0\n print 'end sample et0: ', et0\n\n blocks = self.control.bins * self.control.frames\n\n samples_per_stripe = self.control.num_fft * \\\n self.control.integration * self.control.decimation\n total_samples = blocks * samples_per_stripe\n\n if total_samples > (et0 - st0):\n print 'Insufficient samples for %d samples per stripe and %d blocks between %ld and %ld' % (samples_per_stripe, blocks, st0, et0)\n return\n\n stripe_stride = (et0 - st0) / blocks\n\n bin_stride = stripe_stride / self.control.bins\n\n start_sample = st0\n\n print 'first ', start_sample\n\n # get metadata\n # this could be done better to ensure we catch frequency or sample rate\n # changes\n mdt = self.dio.read_metadata(st0, et0, self.channel)\n try:\n md = mdt[mdt.keys()[0]]\n cfreq = md['center_frequencies'].ravel()[self.sub_channel]\n except (IndexError, KeyError):\n cfreq = 0.0\n\n if self.control.verbose:\n print 'processing info : ', self.control.frames, self.control.bins, samples_per_stripe, bin_stride\n\n for p in numpy.arange(self.control.frames):\n sti_psd_data = numpy.zeros(\n [self.control.num_fft, self.control.bins], numpy.float)\n sti_times = numpy.zeros([self.control.bins], numpy.complex128)\n\n for b in numpy.arange(self.control.bins):\n\n if self.control.verbose:\n print 'read vector :', self.channel, start_sample, samples_per_stripe\n\n d_vec = self.dio.read_vector(\n start_sample, samples_per_stripe, self.channel)\n data = d_vec[:, self.sub_channel]\n\n if self.control.decimation > 1:\n data = scipy.signal.decimate(data, self.control.decimation)\n sample_freq = sr / self.control.decimation\n else:\n sample_freq = sr\n\n if self.control.mean:\n detrend_fn = matplotlib.mlab.detrend_mean\n else:\n detrend_fn = matplotlib.mlab.detrend_none\n\n try:\n psd_data, freq_axis = matplotlib.mlab.psd(\n data, NFFT=self.control.num_fft, Fs=float(sample_freq), detrend=detrend_fn, scale_by_freq=False)\n except:\n traceback.print_exc(file=sys.stdout)\n\n sti_psd_data[:, b] = numpy.real(\n 10.0 * numpy.log10(numpy.abs(psd_data) + 1E-12))\n\n sti_times[b] = start_sample / sr\n\n start_sample += stripe_stride\n\n # Now Plot the Data\n ax = self.subplots[p]\n\n # determine image x-y extent\n extent = (\n 0,\n self.control.bins,\n numpy.min(freq_axis) / 1e3,\n numpy.max(freq_axis) / 1e3,\n )\n\n # determine image color extent in log scale units\n Pss = sti_psd_data\n vmin = numpy.real(numpy.median(Pss) - 6.0)\n vmax = numpy.real(numpy.median(\n Pss) + (numpy.max(Pss) - numpy.median(Pss)) * 0.61803398875 + 50.0)\n\n if self.control.zaxis:\n vmin = int(string.split(self.control.zaxis, ':')[0])\n vmax = int(string.split(self.control.zaxis, ':')[1])\n else:\n vmin = numpy.real(numpy.median(Pss) - 6.0)\n vmax = numpy.real(numpy.median(\n Pss) + (numpy.max(Pss) - numpy.median(Pss)) * 0.61803398875 + 50.0)\n\n im = ax.imshow(sti_psd_data, cmap='jet', origin='lower', extent=extent,\n interpolation='nearest', vmin=vmin, vmax=vmax, aspect='auto')\n\n ax.set_ylabel('f (kHz)', fontsize=8)\n\n # plot dates\n\n tick_spacing = numpy.arange(\n self.control.bins / 8, self.control.bins, self.control.bins / 8)\n ax.set_xticks(tick_spacing)\n tick_labels = []\n\n for s in tick_spacing:\n tick_time = sti_times[s]\n\n if tick_time == 0:\n tick_string = ''\n else:\n gm_tick_time = time.gmtime(numpy.real(tick_time))\n tick_string = '%02d:%02d:%02d' % (\n gm_tick_time[3], gm_tick_time[4], gm_tick_time[5])\n tick_labels.append(tick_string)\n\n ax.set_xticklabels(tick_labels)\n\n # set the font sizes\n tl = ax.get_xticklabels()\n\n for tk in tl:\n tk.set_size(8)\n del tl\n\n tl = ax.get_yticklabels()\n\n for tk in tl:\n tk.set_size(8)\n del tl\n\n print 'last ', start_sample\n\n # create a time stamp\n start_time = st0 / sr\n srt_time = time.gmtime(start_time)\n sub_second = int(round((start_time - int(start_time)) * 100))\n\n timestamp = \"%d-%02d-%02d %02d:%02d:%02d.%02d UT\" % (srt_time[0], srt_time[\n 1], srt_time[2], srt_time[3], srt_time[4], srt_time[5], sub_second)\n\n self.f.suptitle('%s %s %4.2f MHz (%s)' % (\n self.control.title, timestamp, cfreq / 1E6, self.control.path), fontsize=10)\n\n # ax.legend(fontsize=8)\n ax.set_xlabel('time (UTC)', fontsize=8)\n\n # fixup ticks\n\n tl = ax.get_xticklabels()\n for tk in tl:\n tk.set_size(8)\n del tl\n tl = ax.get_yticklabels()\n for tk in tl:\n tk.set_size(8)\n del tl\n\n self.gridspec.update()\n\n self.f.tight_layout()\n\n self.f.subplots_adjust(top=0.95, right=0.88)\n cax = self.f.add_axes([0.9, 0.12, 0.02, 0.80])\n self.f.colorbar(im, cax=cax)\n if self.control.outname:\n fname, ext = os.path.splitext(self.control.outname)\n if ext == '':\n ext = '.png'\n print \"Save plot as {}\".format(fname+ext)\n matplotlib.pyplot.savefig(fname+ext)\n if self.control.appear or not self.control.outname:\n print \"Show plot\"\n matplotlib.pyplot.show()",
"def refractive_index(self):\n wd = np.arange(80,820,10)\n nd = self.boundary.imat.refractive_index(wd) \n\n plt.plot(wd, nd)\n\n return wd, nd",
"def apolco(a,minfeh=-3,out=None) :\n apo=np.where((a['TELESCOPE'] == 'apo25m') & (a['RV_FEH']>minfeh) )[0]\n fig=vscat(a[apo],marker='o',density=True)\n lco=np.where((a['TELESCOPE'] == 'lco25m') & (a['RV_FEH']>minfeh) )[0]\n vscat(a[lco],fig=fig,ls=':',marker='+',density=True)\n if out is not None : \n fig[0].savefig(out+'_1.png')\n plt.close()\n i1,i2=match.match(a['APOGEE_ID'][apo],a['APOGEE_ID'][lco])\n print('matched {:d} stars'.format(len(i1)))\n fig,ax=plots.multi(1,2)\n #plots.plotp(ax[0,0],a['SNR'][apo[i1]],a['VHELIO_AVG'][apo[i1]]-a['VHELIO_AVG'][lco[i2]],yr=[-3,3],yt=r'$\\Delta$ VHELIO_AVG',xt='S/N')\n #plots.plotp(ax[0,1],a['SNR'][apo[i1]],a['VHELIO_AVG'][apo[i1]]-a['VHELIO_AVG'][lco[i2]],yr=[-50,50],yt=r'$\\Delta$ VHELIO_AVG',xt='S/N')\n #plots.plotp(ax[1,0],a['SNR'][apo[i1]],a['VSCATTER'][apo[i1]]-a['VSCATTER'][lco[i2]],yr=[-0.5,0.5],yt=r'$\\Delta$ VSCATTER',xt='S/N')\n #plots.plotp(ax[1,1],a['SNR'][apo[i1]],a['VSCATTER'][apo[i1]]-a['VSCATTER'][lco[i2]],yr=[-5,5],yt=r'$\\Delta$ VSCATTER',xt='S/N')\n ax[0].hist(a['VHELIO_AVG'][apo[i1]]-a['VHELIO_AVG'][lco[i2]],bins=np.arange(-0.5,0.5,0.02),histtype='step')\n ax[0].set_xlabel(r'$\\Delta$ VHELIO_AVG')\n ax[1].hist(a['VSCATTER'][apo[i1]]-a['VSCATTER'][lco[i2]],bins=np.arange(-0.25,0.25,0.01),histtype='step')\n ax[1].set_xlabel(r'$\\Delta$ VSCATTER')\n if out is not None : \n fig.savefig(out+'_2.png')\n plt.close()",
"def traces(mndata,Params,srate,imagepath):\n\t#plot high gamma traces\n\t#data should be bandpassed (todo)\n\t#resample to srate\n\tst = resample(Params[\"st\"],srate)\n\ten = resample(Params[\"en\"],srate)\n\tbl_en = resample(Params[\"bl_en\"],srate)\n\tbl_st = resample(Params[\"bl_st\"],srate)\n\tplot_tp = resample(Params[\"plot\"],srate)\n\tcue = resample(500,srate)\n\t\n\tcolors = ['red','orange','green','blue']\n\tx = np.array(range(st,en+1))\n\tf, (ax,ax2) = plt.subplots(1,2, sharex = False)\n\tax.axhline(y = 0,color = 'k',linewidth=2)\n\tax.axvline(x = 0,color='k',linewidth=2)\n\tax.axvline(x = cue,color = 'gray',linewidth = 2)\n\tax.axvline(x = cue+cue,color = 'gray',linewidth = 2)\n\tax.axvspan(cue, cue+cue, facecolor='0.5', alpha=0.25,label = 'cue')\n\n\tfor j in range(len(Params[\"conditions\"])):\n\t\tcondition = Params['conditions'][j]\n\t\ty = mndata[condition]['data']\n\t\tax.plot(x,y, label = condition,linewidth = 2,color = colors[j])\n\t\n\tax.set_ylim((-30,85))\n\tax.set_xlim(st,en)\n\tax.legend()\n\tax.xaxis.set_ticklabels(['', '0', '','500', '', '1000', '', '1500', '', '2000','','2500','', '3000'],minor=False)\n\tax.xaxis.set_ticks(range(st,en,plot_tp))\n\n\tax.set_xlabel(\"time (ms)\")\n\tax.set_ylabel(\"% change baseline\")\n\tax.set_title('Analytic Amplitude - High Gamma (70-150Hz)', fontsize = 18)\n\n\t#plot brain with elec location\n\t#brain = plt.imread(imagepath)\n\t#aa = pylab.mean(brain,2)\n\t#ax2.imshow(aa)\n\t#a2.gray()\n\n\t#brain = Image.open(imagepath)\n\t#ax2.set_axis_off()\n\t#im = plt.imshow(brain, origin = 'lower')\n\n\t#brain = _png.read_png(imagepath)\n\t#imagebox = OffsetImage(brain,zoom =5)\n\t#ab = AnnotationBbox(imagebox,)\n\n\tim = Image.open(imagepath)\n\tax2.imshow(im,aspect = 'auto',origin = 'lower')\n\tax2.set_xlim((0,750))\n\tax2.set_title('Electrode Location',fontsize = 18)\n\n\n\n\treturn f, (ax, ax2)",
"def mwion_scan():\n fig, ax = plt.subplots()\n # Data from 2018-09-27, using the SFIP\n fname = \"4_mwion_blnk.txt\" # -180 GHz\n folder = os.path.join(\"..\", \"2018-09-29\")\n fname = os.path.join(folder, fname)\n data = pd.read_csv(fname, sep=\"\\t\", comment=\"#\")\n data['r'] = data['s1']/data['s2']\n data['f'] = np.power(10, data['d']/20) # field equivalent\n data.sort_values(by='f', inplace=True)\n data.plot(x='f', y='r', marker='v', ax=ax, label=\"-180 GHz\")\n return",
"def n27_and_sidebands():\n fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(4.5, 4))\n # n=26 through n=29\n folder = os.path.join(\"..\", \"..\", \"2018-09-06\")\n fname = \"1_dye_fscan.txt\"\n fname = os.path.join(folder, fname)\n data = pmu.fscan_import(fname)\n ax.axhline(0, color='grey')\n data.plot(x='fpoly', y='sig', label=\"MW Off\", c='k', ax=ax)\n # sidebands\n folder = os.path.join(\"..\", \"..\", \"2018-09-09\")\n fname = \"1_freq_dye.txt\"\n fname = os.path.join(folder, fname)\n data = pmu.fscan_import(fname)\n data['asig'] = data['sig'] - 0.3\n ax.axhline(-0.3, color='grey')\n data.plot(x='fpoly', y='asig', label=\"MW On\", c='k', ax=ax)\n # pretty figure\n ax.legend().remove()\n ax.set_ylabel(r\"$e^-$ Signal\")\n ax.set_yticks([])\n ax.set_xlabel(\"Frequency (GHz from Limit)\")\n ax.set_xticks([-4863, -4511, -4195, -3908])\n ax.text(-4400, -0.15, \"MW On\")\n ax.text(-4400, 0.3, \"MW Off\")\n # save\n fig.tight_layout()\n fig.savefig(\"n27_and_sidebands.pdf\")\n return",
"def uninformativePlot(self):\n\n self.initPlotY()\n for iPlot in range(len(self.plotFineX)):\n thisX = self.plotFineX[iPlot]\n\n self.plotFineY[iPlot] = thisX*0. + 1.",
"def vscat(a,fig=None,ls=None,marker='o',nmin=2,mhmin=-3,density=False,out=None) :\n if fig == None : fig,ax=plots.multi(4,6,hspace=0.001,wspace=0.4,figsize=(12,8))\n else : fig,ax=fig\n tbins=[3000,3500,4000,4500,5500,8000,30000] \n hbins=[8,11,12,13,15]\n try: snr = a['SNREV']\n except: snr=a['SNR']\n j=np.where(snr > 300) [0]\n snr[j] = 300\n for i in range(len(tbins)-1) :\n ax[i,0].text(0.9,0.9,'{:d}<=RV_TEFF<{:d}'.format(tbins[i],tbins[i+1]),ha='right',transform=ax[i,0].transAxes,fontsize=8)\n for j in range(len(hbins)-1) :\n ax[0,j].set_title('{:d}<=H<{:d}'.format(hbins[j],hbins[j+1]))\n gd = np.where((a['RV_TEFF']>=tbins[i]) & (a['RV_TEFF']<tbins[i+1]) &\n (a['H']>=hbins[j]) & (a['H']<hbins[j+1]) &\n (a['NVISITS']>nmin) & (a['RV_FEH']>mhmin) & (a['VSCATTER'] > 0)) [0]\n print(tbins[i],tbins[i+1],hbins[j],hbins[j+1],nmin,len(gd))\n try :\n #plots.plotc(ax[i,2],snr[gd],a['VSCATTER'][gd],a['RV_FEH'][gd],marker=marker,xr=[0,310],yr=[0,1],xt='S/N',yt='VSCATTER')\n ax[i,j].hist(a['VSCATTER'][gd],bins=np.arange(0,1,0.01),ls=ls,histtype='step',color=colors[j],normed=density)\n ax[i,j].set_xlabel('VSCATTER (km/s)')\n ax[i,j].plot([0.1,0.1],ax[i,j].get_ylim())\n #ax[i,1].hist(a['VSCATTER'][gd],bins=np.arange(0,1,0.01),histtype='step',cumulative=True,normed=True,ls=ls,color=colors[j])\n #ax[i,1].set_xlabel('VSCATTER')\n except : pass\n\n if out is not None : \n fig.savefig(out+'.png')\n plt.close()\n\n fig.suptitle('NVISITS>{:d} [M/H]>{:6.2f}'.format(nmin,mhmin))\n return fig,ax",
"def plotCombinedIncludingOceanFloors(self):\n self.CombinedPlotHelper(minc=70,maxc=170,num=50)",
"def _hold_bounds(self):\n adc_channel = self.graph_renderer.channels[0]\n if self.sx2 > adc_channel.size():\n self.anchored = True\n\n if self.anchored:\n # anchor right side of the window to the last graph sample. so the graph always animates, grows out from\n # the right side of the window. (anchor sx2 to adc_channel.size())\n dx = self.sx2 - adc_channel.size()\n dxw = self.wsx2 - adc_channel.size()\n self.sx1 -= dx\n self.sx2 -= dx\n self.wsx1 -= dxw\n self.wsx2 -= dxw\n\n # eliminate integer overflow problems. only allow indices smaller than a 32bit integer value. and then divide\n # it by four just to be sure.. maybe it's not necessary, but maybe there are some other tricks used in the\n # graph rendering..\n bound = 0xffffffff / 4\n # hmm. this allows only 12 days of data with ~960Hz. time to go 64bit?\n self.sx1 = max(self.sx1, -bound)\n self.sy1 = max(self.sy1, -bound)\n self.sx1 = min(self.sx1, bound)\n self.sy1 = min(self.sy1, bound)\n self.sx2 = max(self.sx2, -bound)\n self.sy2 = max(self.sy2, -bound)\n self.sx2 = min(self.sx2, bound)\n self.sy2 = min(self.sy2, bound)\n self.wsx1 = max(self.wsx1, -bound)\n self.wsy1 = max(self.wsy1, -bound)\n self.wsx1 = min(self.wsx1, bound)\n self.wsy1 = min(self.wsy1, bound)\n self.wsx2 = max(self.wsx2, -bound)\n self.wsy2 = max(self.wsy2, -bound)\n self.wsx2 = min(self.wsx2, bound)\n self.wsy2 = min(self.wsy2, bound)\n\n # limit horizontal zoom to 2 samples. can't zoom in anymore if less than one sample stays on screen.\n # don't have time to implement and test line segment cutting, if one sample is outside the window, and another\n # is inside.\n if self.wsx2 - self.wsx1 < 2.:\n self.wsx2 = self.wsx1 + 2.\n if self.sx2 - self.sx1 < 2.:\n self.sx2 = self.sx1 + 2.\n\n #\n # limit vertical movement and vertical zoom\n #\n\n val_min = adc_channel.value_min\n val_max = adc_channel.value_max\n\n # allow offset of this percent/100 of the screen\n overlap = .30\n\n # top of the screen has smaller sample values than bottom of the screen. inverted graph.\n # sy1 is top pixel, sy2 bottom. bottom-left coordinat is (0, 0)\n if self.sy1 < self.sy2:\n val_top = val_min + (self.wsy1 - self.wsy2) * overlap\n val_bottom = val_max - (self.wsy1 - self.wsy2) * overlap\n if self.wsy1 < val_top:\n self.wsy2 -= self.wsy1 - val_top\n self.wsy1 = val_top\n if self.wsy2 > val_bottom:\n self.wsy1 += val_bottom - self.wsy2\n self.wsy2 = val_bottom\n if self.wsy1 < val_top:\n self.wsy1 = val_top\n if self.wsy2 > val_bottom:\n self.wsy2 = val_bottom\n else:\n val_bottom = val_min - (self.wsy1 - self.wsy2) * overlap\n val_top = val_max + (self.wsy1 - self.wsy2) * overlap\n if self.wsy1 > val_top:\n self.wsy2 -= self.wsy1 - val_top\n self.wsy1 = val_top\n if self.wsy2 < val_bottom:\n self.wsy1 += val_bottom - self.wsy2\n self.wsy2 = val_bottom\n if self.wsy1 > val_top:\n self.wsy1 = val_top\n if self.wsy2 < val_bottom:\n self.wsy2 = val_bottom",
"def plot_slice(self,res):\n x = np.linspace(0,1,res)\n y = np.linspace(0,1,res)\n X,Y = np.meshgrid(x,y)\n plt.figure()\n ax = plt.axes(projection = '3d')\n ax.plot_surface(X,Y,abs(self.psi)[:,:,math.floor(res/2)])\n plt.show()",
"def plot_path_loss_experimental(fig_name):\n plt.figure()\n N = 26\n dist = 0.1 * np.arange(N) + 0.5\n pattern = r'(\\d{1,2}).csv$'\n dir1 = \"log/path loss/PIFA\"\n files = utils.walk_files(dir1)\n rssi_mean = np.zeros(26)\n for file in files:\n match_obj = re.search(pattern, file)\n rssi = utils.read_file_rssi(file, 4, correct_index=True)\n index = int(match_obj.groups()[0]) - 1\n rssi_mean[index] = np.mean(rssi['RSSI LEFT_ORIGIN'])\n plt.plot(dist[2:], rssi_mean[2:])\n plt.xlabel('Distance: m')\n plt.ylabel('RSSI: dBm')\n plt.title(fig_name)\n if not os.path.exists(dir_fig):\n os.makedirs(dir_fig)\n plt.savefig(dir_fig + '/' + fig_name + '.png')"
]
| [
"0.59074193",
"0.5813279",
"0.57293385",
"0.5697952",
"0.5669091",
"0.56296587",
"0.5561183",
"0.5508389",
"0.5485753",
"0.5481958",
"0.5460965",
"0.5353382",
"0.5347737",
"0.53302866",
"0.53153086",
"0.5289892",
"0.5282458",
"0.52795255",
"0.52742654",
"0.5266973",
"0.5266588",
"0.5266553",
"0.5261955",
"0.5253285",
"0.5251115",
"0.52461135",
"0.5234854",
"0.5223465",
"0.52140045",
"0.52007526"
]
| 0.6187088 | 0 |
plot gains with respect to the reference gain, whre reference gain is number => gains[gain_ref] | def plot_gains(gains, gain_ref, TITLES, OUT_DIR):
# print 'directory: %s' % OUT_DIR
# print 'TITLES:%s', TITLES
gain_ref_np = np.array(gains[gain_ref].gain)
ratios = []
for gain in gains:
gain_np = np.array(gain.gain)
dim = (min(gain_ref_np.shape[0], gain_np.shape[0]),
min(gain_ref_np.shape[1], gain_np.shape[1])
)
# print 'dim = ', dim
ratios.append(gain_np[0:dim[0], 0:dim[1]] / gain_ref_np[0:dim[0], 0:dim[1]])
# print 'Ratios = ', ratios
rows = 2*((len(ratios) -1) / 6 + 1)
cmap = plt.get_cmap('gnuplot')
colors = [cmap(i) for i in np.linspace(0, 1, len(ratios))]
fig, axes = plt.subplots(nrows=rows, ncols=6)
fig.set_size_inches(20,20)
axfl = axes.flatten()
for i, ratio in enumerate(ratios):
# print 'Plotting %s', TITLES[i]
j = (i / 6)*12 + i % 6
ax = axfl[j]
ax2 = axfl[j+6]
ax.hist(np.reshape(ratio, -1), 20, range=(0.9, 1.1), facecolor=colors[i])
ax.set_title(TITLES[i], size=20)
ax2.hist(np.reshape(ratio, -1), 50, range=(0., 2.), facecolor=colors[i])
fig.suptitle("Gains with ref gain '%s'" % TITLES[gain_ref], y=0.95, size=25)
# fig.tight_layout()
plt.savefig(OUT_DIR + 'gain.png')
plt.close(fig) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def graphe_gains(res_alea, res_glou, res_glou_e, res_ucb):\n T = np.arange(res_alea.size)\n fig, ax = plt.subplots()\n ax.grid(True)\n plt.xlabel(\"T\")\n plt.ylabel(\"Gain\")\n \n ax.plot(T, res_alea, label = 'aléatoire') \n ax.plot(T, res_glou, label = 'glouton')\n ax.plot(T, res_glou_e, label = 'e-glouton')\n ax.plot(T, res_ucb, label = 'UCB')\n ax.legend(loc = \"upper left\")\n plt.title(\"Gains des 4 algorithmes par rapport à T\")",
"def plot_gain(df, outcome_col='y', treatment_col='w', treatment_effect_col='tau',\n steps=100, normalize=False, random_seed=42, figsize=(8, 8)):\n\n cumgain = get_cumgain(df, outcome_col, treatment_col, treatment_effect_col, steps, normalize, random_seed)\n\n cumgain.plot(figsize=figsize)\n plt.xlabel('Fraction of Population')\n plt.ylabel('Cumulative Gain')",
"def draw_bonus_loss(loss):\n f, ax = plt.subplots()\n vertices = np.arange(10, 50)\n ax.plot(vertices, loss[10:], 'b', label='Loss')\n plt.xlabel('Rounds')\n plt.ylabel('Hinge Loss')\n plt.title('Hinge Loss: l = 10, m = 20, n = 40')\n plt.legend(loc='upper left')\n plt.grid(True)\n plt.show()",
"def plot_cumulative_gain(y_true, y_probas, title='Cumulative Gains Curve',\n ax=None, figsize=None, title_fontsize=\"large\",\n text_fontsize=\"medium\"):\n y_true = np.array(y_true)\n y_probas = np.array(y_probas)\n\n classes = np.unique(y_true)\n if len(classes) != 2:\n raise ValueError('Cannot calculate Cumulative Gains for data with '\n '{} category/ies'.format(len(classes)))\n\n # Compute Cumulative Gain Curves\n percentages, gains1 = cumulative_gain_curve(y_true, y_probas[:, 0],\n classes[0])\n percentages, gains2 = cumulative_gain_curve(y_true, y_probas[:, 1],\n classes[1])\n percentages, gains3 = cumulative_gain_curve(y_true, y_true,\n classes[0])\n percentages, gains4 = cumulative_gain_curve(y_true, y_true,\n classes[1])\n\n if ax is None:\n fig, ax = plt.subplots(1, 1, figsize=figsize)\n\n ax.set_title(title, fontsize=title_fontsize)\n\n ax.plot(percentages, gains1, lw=3, label='Class {} (pred)'.format(classes[0]))\n ax.plot(percentages, gains2, lw=3, label='Class {} (pred)'.format(classes[1]))\n #ax.plot(percentages, gains3, lw=3, label='Class {} (true)'.format(classes[0]))\n ax.plot(percentages, gains4, lw=3, label='Class {} (true)'.format(classes[1]))\n\n ax.set_xlim([0.0, 1.0])\n ax.set_ylim([0.0, 1.1])\n\n ax.plot([0, 1], [0, 1], 'k--', lw=2, label='Baseline')\n\n ax.set_xlabel('Percentage of sample', fontsize=text_fontsize)\n ax.set_ylabel('Gain', fontsize=text_fontsize)\n ax.tick_params(labelsize=text_fontsize)\n ax.grid('on')\n ax.legend(loc='lower right', fontsize=text_fontsize)\n plt.show()\n return ax",
"def plot_graph(costs):\n plt.figure()\n for i in range(len(np.array(costs).T)):\n plt.plot(np.array(costs)[:, i])\n plt.title(\"Costs\")\n plt.show()",
"def gain(self, value: int):\n self._gain = value",
"def produce_cgchart(ytrue, ypred):\n\n yprobas = np.append((1-ypred).reshape(-1,1), ypred.reshape(-1,1), axis=1)\n # 0's and 1's\n print(yprobas.shape)\n areas = plot_cumulative_gain(ytrue, yprobas)",
"def decay_rate_plot(Lmax=1000, p1=database['K+'], p=75,\r\n target_rate=53957518.001, decay_region=65):\r\n L_range = np.linspace(0, Lmax, 1000)\r\n rates, new_rates = [], []\r\n for L in L_range:\r\n rates.append(decay_rate(L, p1, p, target_rate, decay_region)*1e-6)\r\n new_rates.append(decay_rate(L, p1, p, target_rate*4, decay_region)*1e-6)\r\n fig = plt.figure(figsize=[10, 5])\r\n ax = fig.add_subplot(1, 1, 1)\r\n ax.plot(L_range, rates, 'g', lw=2, label='current beam intensity')\r\n ax.plot(L_range, new_rates, 'b', lw=2, label='proposed beam intensity')\r\n ax.set_xlim(0, Lmax)\r\n ax.set_ylim(0)\r\n ax.set_xlabel('Distance between target and decay region / m', fontsize=20)\r\n text = f'{p1.name} decays in decay region / MHz'\r\n ax.set_ylabel(text, fontsize=20)\r\n ax.axvline(102.4, color='k', lw=2, label='current setup')\r\n ax.legend(fontsize=20)\r\n plt.show()\r\n return",
"def plot_graph(error_rates, avg_hits):\n plt.xlabel(\"Error rates (σ)\")\n plt.ylabel(\"Average pins hit\")\n plt.plot(error_rates, avg_hits)\n plt.show()",
"def plot_costs(self, threshold=0):\n epochs_range = np.arange(threshold, len(self.costs), 1)\n plt.plot(epochs_range, self.costs[threshold:], color='green', marker='o')\n plt.title('Cost function plot. Eta={:.2f} Lambda={:2.2f}'.format(self.eta, self.lambda_r))\n plt.xlabel('Epochs')\n plt.ylabel('Cost')\n plt.grid(True)\n plt.show()",
"def plotModel(self, name):\n g = Digraph('G', filename = name + '.gv')\n\n for prevChord in self.chain:\n for chord in self.chain[prevChord]:\n g.edge(prevChord, chord, label=\"%.2f\" % self.chain[prevChord][chord])\n\n g.view()",
"def plot5(self):\n\n cond = ((self.ds.freq<32.6) & (self.ds.freq>17))\n freq = self.ds.freq[cond]\n power = self.ds.power[cond]\n\n # the modes for KIC 9205705\n m = pd.read_csv('/home/mxs191/Desktop/MathewSchofield/TRG/GetData/Modes/modes_9205705.csv')\n m1 = [17.65, 20.6, 23.7, 26.9, 30.1] # l=1\n\n plt.rc('font', size=18)\n fig, ax = plt.subplots()\n plt.plot(freq, power, zorder=1, alpha=0.4)\n\n # NOTE: annotate mode angular degrees\n plt.scatter(m['f0'].as_matrix(), np.full(len(m), 150000), c='k', zorder=2, s=80)\n plt.scatter(m['f2'].as_matrix(), np.full(len(m), 130000), c='mediumseagreen', zorder=2, s=80, marker='^')\n plt.scatter(m1, np.full(len(m1), 140000), c='grey', zorder=2, s=80, marker='v')\n\n # NOTE: plot envelope\n numax = info['numax'].as_matrix() # mu Hz\n env_width = 0.66 * numax**0.88\n plt.plot(freq, 40004*np.exp( -( (freq-24.7)**2 / (2*7.**2) ) ), c='k', linestyle='--')\n\n # NOTE: annotate envelope\n style = dict(size=16, color='k')\n ax.text(24.1, 49167, r\"$\\nu_{\\rm max}$\", color='k', size=18)\n ax.text(24.1, 20994, r\"$\\Gamma_{\\rm env}$\", color='k', size=18)\n ax.text(23, 162944, r\"$\\Delta \\nu$\", **style)\n plt.annotate(s='', xy=(25.3, 158610), xytext=(21.91, 158610),\n arrowprops=dict(arrowstyle='<->')) # dnu\n plt.annotate(s='', xy=((24.7-env_width/2.), 15861), xytext=((24.7+env_width/2.), 15861),\n arrowprops=dict(arrowstyle='<->')) # env width\n\n ax.set_xlabel(r'Frequency ($\\rm \\mu Hz$)')\n ax.set_ylabel(r'PSD ($\\rm ppm^{2} \\, \\mu Hz^{-1}$)')\n plt.xlim(17, 32.6)\n plt.ylim(17, 195181)\n plt.tight_layout()\n plt.show()\n fig.savefig(os.getcwd() + '/DetTest1_plots/Plot5_ps_' + str(self.ds.epic) + '.pdf')",
"def figure_10_12_b():\n xs = np.arange(-6,6,0.1)\n plt.plot(xs,sigmoid(xs))\n x=2.5\n plt.scatter(x,sigmoid(x))\n plt.plot(xs,logistic_lower_bound(xs,x))\n plt.show()",
"def gaindb(self, value):\n self._logger.debug(\"setting gain: %7.2f\", value)\n self._gaindb = value\n self._update()",
"def front_column_model_p_gain():",
"def decay_proportion_plot(Lmax=1000, p1=database['K+'], p=75, target_rate=53957518.001):\r\n L_range = np.linspace(0, 1000, 10000)\r\n prop = []\r\n for L in L_range:\r\n prop.append(decay_proportion(L, p1, p, target_rate))\r\n# charac_L = p*c*(p1.tau*1e-3/c)/p1.mass\r\n fig = plt.figure(figsize=[12, 3])\r\n ax = fig.add_subplot(1, 1, 1)\r\n ax.plot(L_range, prop, 'r', lw=2)\r\n ax.set_xlim(0, Lmax)\r\n ax.set_ylim(0)\r\n ax.set_xlabel('Target Distance', fontsize=20)\r\n ax.set_ylabel(r'$K^+$ flux', fontsize=20)\r\n# ax.xaxis.set_major_locator(plt.MultipleLocator(charac_L/4))\r\n# ax.xaxis.set_minor_locator(plt.MultipleLocator(charac_L/20))\r\n# ax.xaxis.set_major_formatter(plt.FuncFormatter(multiple_formatter_non_int(1, charac_L, 'L_{K^+}')))\r\n ax.set_xticks([0])\r\n ax.set_yticks([target_rate])\r\n ax.yaxis.set_major_locator(plt.MultipleLocator(target_rate/1))\r\n ax.yaxis.set_minor_locator(plt.MultipleLocator(target_rate/1))\r\n ax.yaxis.set_major_formatter(plt.FuncFormatter(multiple_formatter_non_int(1, target_rate, 'R_t')))\r\n ax.legend(fontsize=20)\r\n ax.minorticks_off()\r\n# ax.grid()\r\n plt.show()\r\n return",
"def plot_loss_curve(num_epochs, losses):\n plt.xlabel('Epochs')\n plt.ylabel('Loss') \n plt.title('Loss Curve') \n plt.plot(range(num_epochs), losses)\n plt.show()",
"def grb030329(ax, col, legend):\n z = 0.1686\n d = Planck15.luminosity_distance(z=z).cgs.value\n\n # LOW FREQUENCY\n\n # Berger: this is the best frequency to pick from this paper\n t = np.array(\n [0.58, 1.05, 2.65, 3.57, 4.76, 6.89, 7.68, 9.49, 11.90, \n 12.69, 14.87, 16.66, 18.72, 20.58, 25.70, 28.44, 31.51, \n 33.58, 36.52, 42.55, 44.55, 59.55, 66.53]) / (1+z)\n f = np.array(\n [3.50, 1.98, 8.50, 6.11, 9.68, 15.56, 12.55, 13.58, 17.70, \n 17.28, 19.15, 17.77, 15.92, 16.08, 15.34, 12.67, 13.55, \n 13.10, 10.64, 8.04, 8.68, 4.48, 4.92])\n nu = np.array([8.5E9]*len(f))\n\n # Van der Horst: best frequency is 2.3 GHz\n t = np.append(t, np.array([268.577, 306.753, 365.524, 420.168, 462.078, \n 583.683, 743.892, 984.163]) / (1+z))\n f = np.append(\n f, np.array([1613, 1389, 871, 933, 707, 543, 504, 318]) * 1E-3)\n nu = np.append(nu, np.array([2.3E9]*8))\n lum = plot_line(ax, d, t, nu*f, 'GRB030329', 'GRB', col, legend)\n ax.text(t[6]*1.05, lum[10]*1.05, 'GRB030329', fontsize=11,\n verticalalignment='bottom',\n horizontalalignment='left')",
"def plot(self, show=True):\n xs, ys = zip(*[(float(ix)/self.sample_rate, val)\n for ix, val in enumerate(self.samples)])\n plt.plot(xs, ys)\n if show:\n plt.show()",
"def plot_model_rates(class_name, model, ax):\n true_positives, totals = model.range_metrics[class_name]\n prob_rates = model.class_prob_rates[class_name]\n\n bins = np.arange(5)\n\n # color bars based on freq.\n # norm = plt.Normalize(0, max(totals))\n # colors = mpl.cm.Blues(norm(totals))\n\n ax.bar(bins, prob_rates, color=P_BAR_COLOR, edgecolor=BAR_EDGE_COLOR)\n ax.set_ylim(0, 1)\n for axis in ['top', 'bottom', 'left', 'right']:\n ax.spines[axis].set_linewidth(1.5)\n\n index = 0\n\n for xy in zip(np.arange(5), prob_rates):\n # Get class count of current index\n count = str(totals[index])\n loc = list(xy)\n # lower annotation, so its not out of the plot for large bars\n if loc[1] > .9:\n xy = tuple([loc[0], loc[1] - .1])\n y_val = xy[1]\n ax.annotate(count, xy=xy, textcoords='data', ha='center',\n va='bottom', fontsize=8)\n index += 1",
"def gain2dB(gain):\n dB = 20*math.log(gain)\n return dB",
"def plot_forgetting_curve(\n lags: Union[List, np.ndarray],\n forgetting_curve: np.ndarray,\n ax: plt.Axes = None,\n **kwargs,\n) -> None:\n if ax is None:\n fig, ax = plt.subplots()\n ax.plot(lags, forgetting_curve, **kwargs)\n ax.set_xlabel(\"$k$\")\n ax.set_ylabel(r\"$MC_k$\")",
"def traces(mndata,Params,srate,imagepath):\n\t#plot high gamma traces\n\t#data should be bandpassed (todo)\n\t#resample to srate\n\tst = resample(Params[\"st\"],srate)\n\ten = resample(Params[\"en\"],srate)\n\tbl_en = resample(Params[\"bl_en\"],srate)\n\tbl_st = resample(Params[\"bl_st\"],srate)\n\tplot_tp = resample(Params[\"plot\"],srate)\n\tcue = resample(500,srate)\n\t\n\tcolors = ['red','orange','green','blue']\n\tx = np.array(range(st,en+1))\n\tf, (ax,ax2) = plt.subplots(1,2, sharex = False)\n\tax.axhline(y = 0,color = 'k',linewidth=2)\n\tax.axvline(x = 0,color='k',linewidth=2)\n\tax.axvline(x = cue,color = 'gray',linewidth = 2)\n\tax.axvline(x = cue+cue,color = 'gray',linewidth = 2)\n\tax.axvspan(cue, cue+cue, facecolor='0.5', alpha=0.25,label = 'cue')\n\n\tfor j in range(len(Params[\"conditions\"])):\n\t\tcondition = Params['conditions'][j]\n\t\ty = mndata[condition]['data']\n\t\tax.plot(x,y, label = condition,linewidth = 2,color = colors[j])\n\t\n\tax.set_ylim((-30,85))\n\tax.set_xlim(st,en)\n\tax.legend()\n\tax.xaxis.set_ticklabels(['', '0', '','500', '', '1000', '', '1500', '', '2000','','2500','', '3000'],minor=False)\n\tax.xaxis.set_ticks(range(st,en,plot_tp))\n\n\tax.set_xlabel(\"time (ms)\")\n\tax.set_ylabel(\"% change baseline\")\n\tax.set_title('Analytic Amplitude - High Gamma (70-150Hz)', fontsize = 18)\n\n\t#plot brain with elec location\n\t#brain = plt.imread(imagepath)\n\t#aa = pylab.mean(brain,2)\n\t#ax2.imshow(aa)\n\t#a2.gray()\n\n\t#brain = Image.open(imagepath)\n\t#ax2.set_axis_off()\n\t#im = plt.imshow(brain, origin = 'lower')\n\n\t#brain = _png.read_png(imagepath)\n\t#imagebox = OffsetImage(brain,zoom =5)\n\t#ab = AnnotationBbox(imagebox,)\n\n\tim = Image.open(imagepath)\n\tax2.imshow(im,aspect = 'auto',origin = 'lower')\n\tax2.set_xlim((0,750))\n\tax2.set_title('Electrode Location',fontsize = 18)\n\n\n\n\treturn f, (ax, ax2)",
"def set_gain(self, *args):\n return _uhd_swig.usrp_sink_set_gain(self, *args)",
"def plot_precision_recall_curve(precisions, recalls):\n # No need to edit this code.\n plt.figure(figsize=(20, 20))\n plt.plot(recalls, precisions)\n plt.xlabel(\"Recall\")\n plt.ylabel(\"Precision\")\n plt.xlim([0.8, 1.0])\n plt.ylim([0.8, 1.0])\n plt.savefig(\"precision_recall_curve.png\")",
"def gain(self):\r\n \r\n for node in self.G.nodes():\r\n # Get number of nodes connected on same and other partition\r\n movForce, retForce = self.nodeForces(node)\r\n nodeGain = movForce-retForce\r\n\r\n #Fill list of Nodes with gains\r\n self.gainOrder.append((nodeGain,node))\r\n \r\n self.gainOrder.sort(key=lambda r: r[0])\r\n self.keys = [r[1] for r in self.gainOrder]",
"def plot_cost(self):\n steps = np.arange(len(self.cost_values))\n plt.plot(steps, self.cost_values, '-o')\n plt.xlabel(\"Steps\")\n plt.ylabel(\"Cost value\")\n plt.title(\"Cost value per step using Gradient Descent\")\n plt.show()",
"def get_gains(self):\n return tuple([lib.is_SetHWGainFactor(self.hcam,0x8000+i,0)/100 for i in range(4)])",
"def graph_coherence(coherence_values):\n limit=50; start=5; step=5;\n x = range(start, limit, step)\n plt.plot(x, coherence_values)\n plt.xlabel(\"Num Topics\")\n plt.ylabel(\"Coherence score\")\n plt.legend((\"coherence_values\"), loc='best')\n plt.show()\n\n # Print the coherence scores \n for m, cv in zip(x, coherence_values):\n print(\"Num Topics =\", m, \" has Coherence Value of\", round(cv, 4))",
"def _plot_wires(ax, hot, gnd, v, **kw):\n #get x and y coordinates\n L = len(hot)\n x = np.array([c.x for c in hot + gnd])\n y = np.array([c.y for c in hot + gnd])\n #calculate the scaling factor\n scale = _fields_plots_xs_wireperc*max(np.absolute(v))/max(np.absolute(y))\n if('scale' in kw):\n if(kw['scale'] is False):\n scale = 1.0\n #plot\n if(hot):\n kw['H'].append(ax.plot(x[:L], scale*y[:L], 'ko')[0])\n kw['L'].append('Conductors')\n if(gnd):\n kw['H'].append(ax.plot(x[L:], scale*y[L:], 'o', color='gray')[0])\n kw['L'].append('Grounded Conductors')"
]
| [
"0.63260645",
"0.60909754",
"0.59331757",
"0.58492154",
"0.5663414",
"0.557176",
"0.5538274",
"0.54979277",
"0.5453396",
"0.54062104",
"0.53628033",
"0.5208242",
"0.51532966",
"0.515238",
"0.5149669",
"0.5138719",
"0.5089669",
"0.50747716",
"0.5045793",
"0.50385696",
"0.50305974",
"0.5022003",
"0.5010838",
"0.50080943",
"0.4997691",
"0.4994243",
"0.49908397",
"0.4981601",
"0.4976533",
"0.49649495"
]
| 0.68738127 | 0 |
Get an API event instance | def get_api_event(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def retrieve(cls, event_id):\n return Event(Requester.get(cls.endpoint + '/' + event_id))",
"def apigw_event():\n with open(\"events/event.json\") as json_file:\n return json.load(json_file)",
"def get_event(self):\r\n return self.events[0]",
"def event_object(self):\n return gevent.event.Event()",
"def event(self, event_id):\r\n return e.Event(self, event_id)",
"def get_one_event(cls, event_id):\n try:\n event = events_coll.find_one({\"_id\": ObjectId(event_id)})\n return cls(**event)\n except Exception as e:\n print(e)",
"def get_event(self, instance, feed=None):\n if feed is None:\n feed = self.feed\n if self.client is None:\n self.get_client()\n event_id = CalendarEvent.objects.get_event_id(instance, feed)\n try:\n event = self.client.events().get(calendarId=feed, eventId=event_id).execute()\n except Exception:\n event = None\n return event",
"def get_evg(request: Request) -> EvergreenApi:\n return request.app.state.evg_api",
"def event(self, id):\r\n return Event(self, id)",
"def get_event(self, uuid):\n return Event.deserialize(self._get_single('events', {'uuid': uuid}))",
"def get_event(self, eventid):\n return self.s.query(Event).get(eventid)",
"def event(self):\n return self.events[0]",
"def get_event():\n json_data = request.args or {}\n return make_response(jsonify({ \"data\" : Event.get_events(json_data)}))",
"def create_event() -> abc.Event:\n return get_asynclib().Event()",
"def get_event(self):\n return self.keys.events.get()",
"def getEvent(self, timeout=None):\n socks = self.poller.poll(timeout)\n if not socks:\n return\n msg = socks[0][0].recv()\n d = self.mh.unserialize(msg)\n e = Event.fromDict(d)\n if self.store:\n _id = self.store.addEvent(e)\n e.id = _id\n return e",
"def event(self) -> object:\n return self._event",
"def get_event(event_id):\n try:\n return Event.objects.get(id=event_id)\n except ObjectDoesNotExist:\n raise ObjectDoesNotFound(\n 'There is no event with id={}.'.format(event_id))",
"def get(self, request, group):\n event = group.get_latest_event()\n\n try:\n return client.get('/events/{}/'.format(event.id), request.user, request.auth)\n except client.ApiError as e:\n return Response(e.body, status=e.status)",
"def get(self, request):\n return self.serviceHandler.getEvent(request.data)",
"def event(self, event_name):\r\n return Event(self, event_name)",
"def get_event(self):\n # type: (...) -> Optional[Event]\n if self.type == \"event\" and self.payload.json is not None:\n return self.payload.json\n return None",
"def get_object(self, *args, **kwargs):\n\t\n #Setting the test_id\n\ttest_id = self.kwargs['test_id']\n try:\n return api.nova.server_get(self.request, test_id)\n except Exception:\n redirect = reverse(\"horizon:rally_dashboard:events:index\")\n msg = _('Unable to retrieve instance details.')\n exceptions.handle(self.request, msg, redirect=redirect)",
"def get_event(self, param):\n\n if param is None:\n return None\n if isinstance(param, str):\n url = self.build_url(\n self._endpoints.get('get_event').format(id=self.calendar_id,\n ide=param))\n params = None\n by_id = True\n else:\n url = self.build_url(\n self._endpoints.get('get_events').format(id=self.calendar_id))\n params = {'$top': 1}\n params.update(param.as_params())\n by_id = False\n\n response = self.con.get(url, params=params,\n headers={'Prefer': 'outlook.timezone=\"UTC\"'})\n if not response:\n return None\n\n if by_id:\n event = response.json()\n else:\n event = response.json().get('value', [])\n if event:\n event = event[0]\n else:\n return None\n return self.event_constructor(parent=self,\n **{self._cloud_data_key: event})",
"def get_one(self, id):\n rpc_ilog = objects.event_log.get_by_uuid(\n pecan.request.context, id)\n\n return EventLog.convert_with_links(rpc_ilog)",
"def fusion_api_get_events(self, uri=None, param='', api=None, headers=None):\n return self.event.get(uri=uri, api=api, headers=headers, param=param)",
"def test_get_event(self):\n event = Event(self.client, 123, {})\n\n self.assertEqual(event.action, \"ticket_create\")\n self.assertEqual(event.created, datetime(2018, 1, 1, 0, 1, 1))\n self.assertEqual(event.duration, 300.56)\n self.assertIsNotNone(event.entity)\n self.assertEqual(event.id, 123)\n self.assertEqual(event.message, \"None\")\n self.assertIsNone(event.percent_complete)\n self.assertIsNone(event.rate)\n self.assertTrue(event.read)\n self.assertIsNotNone(event.secondary_entity)\n self.assertTrue(event.seen)\n self.assertIsNone(event.status)\n self.assertIsNone(event.time_remaining)\n self.assertEqual(event.username, \"exampleUser\")",
"def get():\n return jsonify({'events': 'Events API'}), 200",
"def get_event(self):\n # struct inotify_event {\n # int wd;\n # uint32_t mask;\n # uint32_t cookie;\n # uint32_t len;\n # char * name; };\n buffer_len = len(self.__buffer)\n if buffer_len < 16:\n return\n wd, mask, cookie, length = struct.unpack('iIII', self.__buffer[0:16])\n\n if 16 + length <= buffer_len:\n name = self.__buffer[16:16+length].rstrip('\\0')\n self.__buffer = self.__buffer[16+length:]\n return Event(wd, mask, cookie, name)",
"def getevent(self, name):\n return self.events[name.lower()]"
]
| [
"0.7332259",
"0.6719658",
"0.66772777",
"0.6642309",
"0.6639862",
"0.656849",
"0.64558864",
"0.6429285",
"0.63894624",
"0.637635",
"0.63647616",
"0.6342903",
"0.6329512",
"0.6308655",
"0.6297398",
"0.6271047",
"0.6254106",
"0.6243177",
"0.62060595",
"0.6162659",
"0.6148124",
"0.6139616",
"0.61324716",
"0.61305404",
"0.612287",
"0.60903734",
"0.6062731",
"0.5999972",
"0.59901226",
"0.5987743"
]
| 0.80483943 | 0 |
Returns the next count number for the given metric/variant (rotates every few calls) | def _get_metric_count(cls, metric, variant, next=True):
counters = cls._metric_counters
key = '%s_%s' % (metric, variant)
try:
cls._metric_counters_lock.acquire()
value = counters.get(key, -1)
if next:
value = counters[key] = value + 1
return value
finally:
cls._metric_counters_lock.release() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def next(self):\n self.lock.acquire()\n self.count += self.step;\n result = self.count\n self.lock.release()\n return result",
"def next_num(cls):\r\n cls.num += 1\r\n return cls.num",
"def get_next(current):\n return 0.5 * (current + n / current)",
"def next_int(self):\n self.innovation_number += 1\n return self.innovation_number",
"def _get_counter(metric: str) -> int:\n if metric not in db:\n db[metric] = 0\n return db[metric]",
"def counter(self) -> int:",
"def counter(self) -> int:",
"def counter(self, value: int, /) -> None:",
"def _good_turing_new_c(self, count: int) -> float:\n next_count_index = count + 1\n next_count: Optional[float] = None\n if next_count_index not in self.count_map:\n # this happens when N_{c+1} is 0\n # this can make the total probability not equal to 1\n next_count = 0.\n else:\n next_count = float(self.count_map[next_count_index])\n\n new_count: Optional[float] = None\n new_count = (count + 1) * next_count / self.count_map[count]\n return new_count",
"def get_next_sample(self):",
"def fget(self):\n if not hasattr(self, \"_n\"):\n self._n = 0\n self._n += 1\n return self._n",
"def get_next(self) -> int:\n return self._current * self._step + self._offset",
"def counter(): # Local function\n nonlocal count\n if count < n:\n count += 1\n return count",
"def get_and_increment(name, counter=defaultdict(int)):\n n = counter[name]\n counter[name] = n + 1\n return n",
"def _get_next_sequence_number(self):\n cur = self._next_sequence_number\n self._next_sequence_number += 1\n return cur",
"def getNextOrderNum(cur,vID):\n orderNum = execute_query(cur,\"\"\"SELECT Count(*) FROM OpenTasks where vID = ?\"\"\", [vID])[0][0]\n orderNum = int(orderNum) + 1\n return orderNum",
"def updateCount(self, cardVal, nextVal, counts):\n higher, lower, tie = counts\n comp = dnUtil.compareValue(cardVal, nextVal)\n if comp == 0:\n tie -= 1\n elif comp < 0:\n higher -= 1\n else:\n lower -= 1\n return (higher, lower, tie)",
"def next ( num = 1 ) :\n return run ( num )",
"def next_num():\r\n CHModuleFactory.num += 1\r\n return CHModuleFactory.num",
"def next_num():\r\n CHModuleFactory.num += 1\r\n return CHModuleFactory.num",
"def _increment_counter(metric: str):\n if metric not in db:\n db[metric] = 0\n db[metric] += 1",
"def next_sequence_num(buf=[0]): # use list as default value to make sure it is\n # initialized only once\n val = buf[0]\n buf[0] += 1\n return val",
"def next(self) -> int:\n self.idx += 1\n return self.m[self.idx]",
"def next(self) -> int:\n self.idx += 1\n return self.m[self.idx]",
"def increment_counter(self) -> None:",
"def sample_count(self):",
"def increment(val):\n return coerce_to_int(val) + 1",
"def get_next(self) -> int:\n cur_next = self._bin_iter.get_next()\n\n return self._intvs.get_next(cur_next, self.even)",
"def count_one_round(self):\n\t\tself.round_count+=1\n\t\treturn self.round_count",
"def inc_count(self, metric, value, tags):\n self.increment(metric, value, tags=tags)\n self.increment('%s.count' % metric, tags=tags)"
]
| [
"0.63280904",
"0.6065394",
"0.59277976",
"0.5897081",
"0.5783465",
"0.57659507",
"0.57659507",
"0.5717719",
"0.5647151",
"0.5628614",
"0.55990857",
"0.55796415",
"0.5523331",
"0.5496951",
"0.5465766",
"0.54415905",
"0.5427532",
"0.54228556",
"0.54203236",
"0.54203236",
"0.54045165",
"0.5397315",
"0.53898364",
"0.53898364",
"0.53727096",
"0.53567886",
"0.53408873",
"0.5283877",
"0.52816564",
"0.5261361"
]
| 0.7852118 | 0 |
proc_df takes a data frame df and splits off the response variable, and changes the df into an entirely numeric dataframe. For each column of df which is not in skip_flds nor in ignore_flds, na values are replaced by the median value of the column. | def proc_df(df, y_fld=None, skip_flds=None, ignore_flds=None, do_scale=False, na_dict=None,
preproc_fn=None, max_n_cat=None, subset=None, mapper=None):
if not ignore_flds: ignore_flds=[]
if not skip_flds: skip_flds=[]
if subset: df = get_sample(df,subset)
else: df = df.copy()
ignored_flds = df.loc[:, ignore_flds]
df.drop(ignore_flds, axis=1, inplace=True)
if preproc_fn: preproc_fn(df)
if y_fld is None: y = None
else:
if not is_numeric_dtype(df[y_fld]): df[y_fld] = pd.Categorical(df[y_fld]).codes
y = df[y_fld].values
skip_flds += [y_fld]
df.drop(skip_flds, axis=1, inplace=True)
if na_dict is None: na_dict = {}
else: na_dict = na_dict.copy()
na_dict_initial = na_dict.copy()
for n,c in df.items(): na_dict = fix_missing(df, c, n, na_dict)
if len(na_dict_initial.keys()) > 0:
df.drop([a + '_na' for a in list(set(na_dict.keys()) - set(na_dict_initial.keys()))], axis=1, inplace=True)
if do_scale: mapper = scale_vars(df, mapper)
for n,c in df.items(): numericalize(df, c, n, max_n_cat)
df = pd.get_dummies(df, dummy_na=True)
df = pd.concat([ignored_flds, df], axis=1)
res = [df, y, na_dict]
if do_scale: res = res + [mapper]
return res | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def proc_df(df, y_fld=None, skip_flds=None, ignore_flds=None, do_scale=False, na_dict=None,\n preproc_fn=None, max_n_cat=None, subset=None, mapper=None):\n if not ignore_flds: ignore_flds=[]\n if not skip_flds: skip_flds=[]\n if subset: df = get_sample(df,subset)\n else: df = df.copy()\n ignored_flds = df.loc[:, ignore_flds]\n df.drop(ignore_flds, axis=1, inplace=True)\n if preproc_fn: preproc_fn(df)\n if y_fld is None: y = None\n else:\n if not is_numeric_dtype(df[y_fld]): df[y_fld] = df[y_fld].cat.codes\n y = df[y_fld].values\n skip_flds += [y_fld]\n df.drop(skip_flds, axis=1, inplace=True)\n\n if na_dict is None: na_dict = {}\n for n,c in df.items(): na_dict = fix_missing(df, c, n, na_dict)\n if do_scale: mapper = scale_vars(df, mapper)\n for n,c in df.items(): numericalize(df, c, n, max_n_cat)\n df = pd.get_dummies(df, dummy_na=True)\n df = pd.concat([ignored_flds, df], axis=1)\n res = [df, y, na_dict]\n if do_scale: res = res + [mapper]\n return res",
"def transform(self, df):\n numerical = self.find_numerical(df)\n # filna with median\n df_ = df.copy()\n for key in numerical:\n df_[key].fillna(df_[key].median(), inplace=True)\n return StandardScaler().fit_transform(np.asarray(df_[numerical]))",
"def preprocess(df): \n \n df.drop_duplicates(subset=df.columns[0], inplace=True) #drop duplicate gene_names. \n df.set_index(keys=df.columns[0], inplace=True)\n df.dropna(how='all', inplace=True) #drop rows with all NAs\n df2 = df.select_dtypes(include=['float64']) + 0.001 #select numbers in DataFrame \n \n return df2",
"def preprocess(df: pd.DataFrame, _map: Optional[Callable[[T], Any]] = None) -> pd.DataFrame:\n assert not isinstance(df, type(None))\n\n if not _map:\n _map = tokenize_stem_stop\n\n # remove or fill rows that have no data, i.e NaN\n nonempty_df = fill_empty(df)\n\n # Map each remaining row to stemmed tokens\n processed_df = nonempty_df.apply(_map)\n\n return processed_df",
"def pre_process_data_set(df):\n df.replace([np.inf, -np.inf], np.nan)\n df[df == np.inf] = np.nan\n df = remove_bad_columns(df)\n df = fill_na(df)\n df = convert_factorial_to_numerical(df)\n\n # Remove columns only containing 0\n df = df[(df.T != 0).any()]\n return df",
"def do_preprocess_on_segment_raw(seg_raw_df):\n sigma = 2\n median_kernel_size = 5\n print \"=======================start preprocessing segment raw dataframe=================\"\n print \"parameters: \" + \"gaussian filter sigma: %.2f, median kernel size: %.2f\" % (sigma, median_kernel_size)\n pp_df = seg_raw_df.copy(deep=True)\n df_mean = pp_df[s_info.raw_value_names].mean()\n df_std = pp_df[s_info.raw_value_names].std()\n pp_df[s_info.raw_value_names] = pp_df.groupby(s_info.segment_col)[s_info.raw_value_names].transform(sp_signal.medfilt, median_kernel_size)\n pp_df[s_info.raw_value_names] = (pp_df[s_info.raw_value_names] - df_mean)/df_std\n pp_df[s_info.raw_value_names] = pp_df.groupby(s_info.segment_col)[s_info.raw_value_names].transform(gaussian_filter1d, sigma=sigma, axis=0, order=0, mode='reflect')\n return pp_df",
"def _dataframe_preprocess(self):\n # 1. add baisc feature like date, time in day, ....\n if self.data_type != 'porto':\n self.df['TIMESTAMP'] = self.df.apply(lambda df: df['TIMESTAMPS'][0], axis=1)\n self.df['TIME'] = pd.to_datetime(self.df['TIMESTAMP'], unit='s', utc=True)\n \n self.df.TIME = self.df.TIME.dt.tz_convert(self.timezone)\n # 2. group df for specific driver analysis\n self.grouped_df = self.df.groupby('LABEL')\n if self.count_od_info:\n if 'SD' not in self.df.columns:\n self._add_OD_info()\n self.grouped_od = self.df.groupby('SD')",
"def preprocess(self, df):\n print(\"Started Processing....\")\n # binary conversion\n df.replace(to_replace=\"yes\", value=1, inplace=True)\n df.replace(to_replace=\"no\", value=0, inplace=True)\n\n # replace unknowns with nan\n df = df.replace(to_replace=\"unknown\", value=np.nan)\n # getting the list of columns with nan\n ml = df.columns[df.isna().any()].tolist()\n\n for item in ml:\n # getting the ratio of the index labels\n val = pd.DataFrame(df[item].value_counts(normalize=True))\n\n # index labels in a list\n valr = val.index.tolist()\n # drc.index = valr\n # columns values in a list\n valc = val[item].tolist()\n # replacing the nan values with ratio\n df[item] = df[item].fillna(pd.Series(np.random.choice(valr, p=valc, size=len(df))))\n\n # dependent variable\n dfy = df.iloc[:, -1]\n # independent variable\n dfx = df.iloc[:, :-1]\n\n # converting categorical data to numerical\n dfx = pd.get_dummies(dfx)\n\n # normalizing\n dfx = (dfx - dfx.min()) / (dfx.max() - dfx.min())\n\n dxdy = pd.concat([dfx, dfy], axis=1)\n\n # class balancing\n sm = RandomOverSampler(random_state=42)\n dfx, dfy = sm.fit_sample(dxdy.iloc[:, :-1], dxdy.iloc[:, -1])\n\n # converting to dataframe\n dfx = pd.DataFrame(dfx, columns=dxdy.iloc[:, :-1].columns.values)\n\n # dimensionality reduction\n pca = PCA(n_components=33)\n dfx = pca.fit_transform((dfx))\n\n print(\"Processing Done\")\n\n return dfx, dfy",
"def pre_get_data(df):\n\n df_len = len(df.iloc[0, :]) - 1\n\n select_cols = []\n\n for i in range(df_len): #Get Columns that contain number values\n\n if type(df.iloc[0, i + 1]) is np.float64:\n if math.isnan(df.iloc[0, i + 1]) == False:\n select_cols.append(i + 1)\n elif type(df.iloc[0, i + 1]) is np.float:\n if math.isnan(df.iloc[0, i + 1]) == False:\n select_cols.append(i + 1)\n\n\n res_df = df.iloc[:, select_cols]\n\n list_pop = list(res_df)\n list_res = ['B_F1_Bool_Result', 'Event_Date', 'B_WClass']\n list_pop.pop()\n\n for item in list_pop:\n if \"F1\" in item:\n aa = item\n bb = aa.replace(\"F1\", \"F2\")\n if bb in list_pop:\n cc = aa.replace(\"F1\", \"F12\")\n df[cc] = df[aa] - df[bb]\n list_res.append(cc)\n\n elif \"F2\" not in item:\n list_res.append(item)\n\n\n\n bw = df['B_WClass']\n i = -1\n j = df.columns.get_loc('B_WClass')\n\n for item in bw:\n i = i + 1\n if item != item:\n df.iloc[i, j] = np.nan\n else:\n df.iloc[i, j] = get_weight[item]\n\n df['B_WClass'] = df['B_WClass'].astype(float)\n res_df = df[list_res]\n\n return res_df",
"def preprocess(df):\n drop_cols = ['duration_ms', 'key', 'mode', 'time_signature', 'popularity', 'tempo']\n drop_cols += ['track_id', 'track_name', 'artist_name']\n for col in drop_cols:\n if col in list(df.columns):\n df = df.drop(columns=col)\n return df",
"def preprocess_pipeline(self):\n if self.trim_size > 0:\n self.df = trim_initial_timeseries(\n df=self.df,\n trim_size=self.trim_size,\n aornums=self.df.aornum.unique()\n )\n\n if self.timebinsize > 0:\n med_df, std_df = bin_df_time(self.df, timebinsize=self.timebinsize)\n\n # Option 1\n self.df = med_df.copy()\n\n \"\"\"\n # Option 2\n self.df = med_df.copy()\n for colname in df.columns:\n if 'noise' in colname:\n std_colname = colname.replace('noise', 'flux')\n self.df[colname] = std_df[std_colname]\n \"\"\"\n\n del med_df, std_df\n\n if self.df is None:\n tso_data = load_from_wanderer(\n planet_name=self.planet_name,\n channel=self.channel,\n aor_dir=self.aor_dir,\n aper_key='gaussian_fit_annular_mask_rad_2.5_0.0',\n centering_key=self.centering_key\n )\n else:\n tso_data = load_from_df(\n self.df,\n aper_key=self.aper_key,\n centering_key=self.centering_key\n )\n\n isfinite = np.isfinite(tso_data.times)\n isfinite = np.bitwise_and(isfinite, np.isfinite(tso_data.fluxes))\n isfinite = np.bitwise_and(isfinite, np.isfinite(tso_data.flux_errs))\n # isfinite = np.bitwise_and(isfinite, np.isfinite(tso_data.aornums))\n isfinite = np.bitwise_and(isfinite, np.isfinite(tso_data.ycenters))\n isfinite = np.bitwise_and(isfinite, np.isfinite(tso_data.xcenters))\n isfinite = np.bitwise_and(isfinite, np.isfinite(tso_data.npix))\n\n times = tso_data.times[isfinite]\n fluxes = tso_data.fluxes[isfinite]\n flux_errs = tso_data.flux_errs[isfinite]\n aornums = tso_data.aornums[isfinite]\n ycenters = tso_data.ycenters[isfinite]\n xcenters = tso_data.xcenters[isfinite]\n npix = tso_data.npix[isfinite]\n\n med_flux = np.median(fluxes)\n flux_errs = flux_errs / med_flux\n fluxes = fluxes / med_flux\n\n arg_times = times.argsort()\n fluxes = fluxes[arg_times]\n flux_errs = flux_errs[arg_times]\n aornums = aornums[arg_times]\n times = times[arg_times]\n ycenters = ycenters[arg_times]\n xcenters = xcenters[arg_times]\n npix = npix[arg_times]\n\n if self.standardise_centers:\n # Center by assuming eclipse is near center\n ycenter = (ycenter - ycenter.mean()) / ycenter.std()\n xcenter = (xcenter - xcenter.mean()) / xcenter.std()\n\n if self.standardise_times:\n # Center by assuming eclipse is near center\n times = times - times.mean()\n\n if self.standardise_fluxes:\n # Center by assuming eclipse is near center\n med_flux = np.median(fluxes)\n std_flux = scale.mad(fluxes)\n\n idxkeep = np.abs(fluxes - med_flux) < self.n_sig * std_flux\n\n self.tso_data = ExoplanetTSOData(\n times=times[idxkeep],\n fluxes=fluxes[idxkeep],\n flux_errs=flux_errs[idxkeep],\n aornums=aornums[idxkeep],\n ycenters=ycenters[idxkeep],\n xcenters=xcenters[idxkeep],\n npix=npix[idxkeep]\n )\n\n # # TODO: Confirm if this is still required\n # self.tso_data.times = self.tso_data.times\n # self.tso_data.fluxes = self.tso_data.fluxes\n # self.tso_data.flux_errs = self.tso_data.flux_errs\n # self.tso_data.aornums = self.tso_data.aornums",
"def preprocess(df):\n df[\"distance\"] = compute_distance(df)\n X_train = df[[\"distance\"]]\n y_train = df[\"fare_amount\"]\n return X_train, y_train",
"def preprocess_feature(df):",
"def preprocess(\n self, data: dd.DataFrame, prep_fn: Callable = preprocessing_flow\n ) -> dd.DataFrame:\n self.data_divisions = data.divisions\n self.ts, series_ddf = distributed_preprocess(\n data, self.flow_config, self.client, prep_fn\n )\n return series_ddf",
"def preprocess_dataframe(self, dataframe):\n return dataframe",
"def preprocess_raw_data(df):\n def convert_date_to_datetime(_df):\n return _df.assign(Date=pd.to_datetime(_df['Date']))\n\n def fill_missing_miles_with_zero(_df):\n return _df.fillna({'Miles': 0})\n\n def filter_dates_prior_to_today(_df):\n return _df[_df['Date'] < datetime.datetime.today()]\n\n def calculate_rolling_averages(_df):\n _df['MA_10day'] = _df['Miles'].rolling(window=10).mean().fillna(0)\n _df['MA_30day'] = _df['Miles'].rolling(window=30).mean().fillna(0)\n return _df.sort_values('Date')\n\n pipeline = [\n convert_date_to_datetime,\n fill_missing_miles_with_zero,\n filter_dates_prior_to_today,\n calculate_rolling_averages,\n ]\n for func in pipeline:\n df = func(df)\n\n df['date_str_label'] = df['Date'].dt.strftime('%b-%d')\n\n return df",
"def clean_up_df(df):\n df['Age'] = df['Age'].fillna(df['Age'].median())\n df['Gender'] = df['Sex'].map({'female':0, 'male':1}).astype(int)\n df['Family'] = df['Parch'] + df['SibSp']\n df['Fare'] = df['Fare'].fillna(df['Fare'].mean())\n df = df.drop(['SibSp','Parch','Sex','Name','Cabin','Embarked','Ticket'],axis=1)\n return df",
"def process_chunked_df(df,float_id):\n df.loc[:, df.columns != 'JULD'] = df.loc[:, df.columns != 'time'].apply(pd.to_numeric, errors='ignore',downcast='signed')\n\n \"\"\"adds depth as column\"\"\"\n df[\"depth\"] = df['PRES']\n\n \"\"\"adds float ID column from float_path_name\"\"\"\n df[\"float_id\"] = int(float_id)\n\n \"\"\"rename ST cols\"\"\"\n df = rename_bgc_cols(df)\n\n \"\"\"drops any invalid ST rows\"\"\"\n df = df.dropna(subset=['time', 'lat','lon','depth'])\n \"\"\"adds climatology day,month,week,doy columns\"\"\"\n df = data.add_day_week_month_year_clim(df)\n \"\"\"reorders bgc_df with ST index leading followed by float_id and cycle\"\"\"\n df = reorder_bgc_data(df)\n \"\"\"strips any whitespace from col values\"\"\"\n df = cmn.strip_whitespace_data(df)\n \"\"\"removes comma delmim from sci cols\"\"\"\n df = replace_comm_delimiter(df)\n \"\"\"removes any inf vals\"\"\"\n df = df.replace([np.inf, -np.inf], np.nan) \n return df",
"def Median_Forecast(df_test):\n\n df_pred = pd.DataFrame(\n {\"Median Forecast\": np.median(df_test, axis=1)},\n index=df_test.index\n )\n\n return df_pred",
"def preprocess_data(dframe,out_file=constants.PROCESSED_DATA_FILE):\n dframe = dframe.copy()\n # removing dimensions with std. dev. < 0.01 as these are almost constant columns\n dframe = dframe.loc[:, dframe.std() > .01]\n\n # removing dimensions that have low absolute correlation with target column as these are possibly noise\n dframe = dframe.loc[:, abs(dframe.corr()[constants.RESULT_COLUMN_NAME]) > .05]\n\n # sorting the dataframe columns in descending order based on their absolute correlation with target variable\n dframe = dframe.iloc[:, np.argsort(-abs(dframe.corr()[constants.RESULT_COLUMN_NAME]))]\n headers = get_headers(dframe)\n # print(headers)\n # standardising the dataframe so that all columns have mean 0 and std = 1\n dframe[headers[1:]] = preprocessing.StandardScaler().fit_transform(dframe[headers[1:]])\n # print(dframe.describe())\n\n dframe.to_pickle(out_file)\n return dframe",
"def clean_dataframe(self, df_in , what = ''): \n \n if what == 'era5fb': # cleaning the era5 feedback only \n df = df_in[np.isfinite(df_in['obsvalue@body'])]\n try: \n df = df.loc[ df['vertco_type@body'] != 2 ] \n except:\n pass\n df = df.reindex()\n df = df[np.isfinite(df['vertco_reference_1@body'])]\n #print('check lengths: ' , len(df_in) , len(df) )\n new_ind = np.array ( range(len(df))) \n df['index'] =new_ind\n df = df.set_index('index')\n \n else: \n ### check if can be optimized ???\n df = df_in.loc[ df_in['z_coordinate_type'] != 2 ] # case where the levels are given in terms of geopotential only (pressure not available)\n \n df = df.loc[ (df['observation_value'] != -99999.0) \n & (df['observation_value'] != -999.0) \n & (df['observation_value'] != -9999) \n & (df['observation_value'] != -9999.0) \n & (df['observation_value'] != -999.9) \n & (df['observation_value'] != -8888 )\n & (df['observation_value'] != -8888.0 )\n \n #& (df['z_coordinate_type'] != 2) \n & (df['z_coordinate'] != -99999.0) \n & (df['z_coordinate'] != -9999.0 )\n & (df['z_coordinate'] != 999 )\n & (df['z_coordinate'] != 999.0 )\n \n \n ] #cleaning the values \n #clean = clean.loc[ (clean['z_coordinate_type'] != 2)] #cleaning the values\n #clean = clean.loc[ (clean['z_coordinate'] != -99999.0 )] #cleaning the values\n \n df = df[np.isfinite(df['observation_value'])] # excluding nan values \n df = df[np.isfinite(df['z_coordinate'])]\n \n return df",
"def preprocess(input_df, init_tables=None):\n \n df = input_df.copy()\n if init_tables is None:\n tables = {}\n else:\n tables = init_tables\n\n # Downsample, keep entries where player is rusher \n #df = df[df['NflIdRusher']==df['NflId']]\n #df.reset_index(drop=True, inplace=True)\n\n # Handle 50 YardLine by filling in the null 'FieldPositions' with the value in 'PossessionTeam'\n df.FieldPosition.fillna(df.PossessionTeam, inplace=True)\n\n # Clean defenders in box - fill nan with median (i.e. 7), and bump 1 or 2 (few samples) up to 3\n df.DefendersInTheBox.fillna(7, inplace=True)\n df.DefendersInTheBox.replace(to_replace=[1, 2], value=3, inplace=True)\n\n # Group rare position values - change 'CB', 'DE', 'DT' (few samples) to 'Other'\n df.Position.replace(to_replace=['CB', 'DE', 'DT'], value='Other', inplace=True)\n\n # Fix inconsistent naming\n map_abbr = {'ARI': 'ARZ', 'BAL': 'BLT', 'CLE': 'CLV', 'HOU': 'HST'} \n for abb in df['PossessionTeam'].unique():\n map_abbr[abb] = abb\n df['HomeTeamAbbr'] = df['HomeTeamAbbr'].map(map_abbr)\n df['VisitorTeamAbbr'] = df['VisitorTeamAbbr'].map(map_abbr)\n df['PossessionTeam'] = df['PossessionTeam'].map(map_abbr)\n\n # Find which team is on defense\n df['DefenseTeam'] = np.where(df['HomeTeamAbbr'] == df['PossessionTeam'], df['VisitorTeamAbbr'], df['HomeTeamAbbr'])\n\n # Calculate team's average offensive yards\n if init_tables is None:\n yards_avg_offense = df[['PossessionTeam','Yards']].groupby(['PossessionTeam']).mean()\n yards_avg_offense = yards_avg_offense.rename(columns={\"Yards\": \"YardsAvgOffense\"}).reset_index()\n tables['yards_avg_offense'] = yards_avg_offense\n else:\n yards_avg_offense = tables['yards_avg_offense']\n df = pd.merge(df, yards_avg_offense, how='left', on='PossessionTeam')\n df.YardsAvgOffense.fillna(4, inplace=True)\n\n # Calculate team's average yards allowed\n if init_tables is None:\n yards_avg_defense = df[['DefenseTeam','Yards']].groupby(['DefenseTeam']).mean()\n yards_avg_defense = yards_avg_defense.rename(columns={\"Yards\": \"YardsAvgDefense\"}).reset_index()\n tables['yards_avg_defense'] = yards_avg_defense\n else:\n yards_avg_defense = tables['yards_avg_defense']\n df = pd.merge(df, yards_avg_defense, how='left', on='DefenseTeam')\n df.YardsAvgDefense.fillna(4, inplace=True)\n\n # Calculate yards remaining to touchdown\n df['YardsRemaining'] = 100 - df.YardLine[df.FieldPosition == df.PossessionTeam]\n df.YardsRemaining.fillna(df.YardLine, inplace=True)\n\n # Calculate rusher carries\n if init_tables is None:\n carries = df[['PlayId', 'NflIdRusher', 'DisplayName']].groupby(['DisplayName', 'NflIdRusher']).agg('count').reset_index()\n carries.rename(columns={'PlayId':'Carries'}, inplace=True)\n tables['carries'] = carries\n else:\n carries = tables['carries']\n df = df.merge(carries[['NflIdRusher', 'Carries']], how='left', on='NflIdRusher')\n df.Carries.fillna(0, inplace=True)\n\n # Calculate rusher mean, max, min yards\n if init_tables is None:\n player_yards = df[['Yards', 'NflIdRusher', 'DisplayName']].groupby(['DisplayName', 'NflIdRusher']).agg(['mean', 'max', 'min'])['Yards'].reset_index()\n player_yards.rename(columns={'mean':'RusherMeanYards', 'max':'RusherMaxYards', 'min':'RusherMinYards'}, inplace=True)\n tables['player_yards'] = player_yards\n else:\n player_yards = tables['player_yards']\n df = df.merge(player_yards[['NflIdRusher', 'RusherMeanYards', 'RusherMaxYards', 'RusherMinYards']], how='left', on='NflIdRusher')\n df.RusherMeanYards.fillna(4, inplace=True)\n df.RusherMaxYards.fillna(99, inplace=True)\n df.RusherMinYards.fillna(-15, inplace=True)\n\n return (df, tables)",
"def data_process(df_toprocess=None, cutoff=0.2, bv_cutoff=0.15, catalog=None):\n\n print \"Selecting objects..\"\n df_toprocess['sigma_pi/pi'] = df_toprocess.loc[:, 'parallax_error'].astype(float) / df_toprocess.loc[:, 'parallax']\\\n .astype(float)\n print \"..Done\\nCutoff at relative parallax error of %s\\n----------\" % cutoff\n\n # only take objects with relative parallax error < cutoff\n df_toprocess = df_toprocess.loc[df_toprocess.loc[:, 'parallax'] /\n df_toprocess.loc[:, 'parallax_error'] > 1. / cutoff]\n\n print catalog\n if catalog is None:\n print \"Replacing whitespace with nan\"\n df_toprocess = df_toprocess.replace(' ', np.nan) # some cells are ' ' instead of nan\n\n print \"Converting BTmag and VTmag to floats..\"\n df_toprocess.BTmag = df_toprocess.BTmag.astype(float)\n df_toprocess.VTmag = df_toprocess.VTmag.astype(float)\n # Some values are NaN:\n print \"Removing objects with missing BT or VT measurements..\"\n df_toprocess = df_toprocess[df_toprocess.BTmag.notnull()]\n df_toprocess = df_toprocess[df_toprocess.VTmag.notnull()]\n\n print \"Computing B-V and M_V..\"\n df_toprocess['B_V'] = df_toprocess.BTmag - df_toprocess.VTmag\n df_toprocess['M_V'] = df_toprocess.VTmag - 5. * (np.log10(1000. / df_toprocess.parallax) - 1.)\n\n print \"Converting sigma BT and sigma VT to float..\"\n df_toprocess.e_BTmag = df_toprocess.e_BTmag.astype(float)\n df_toprocess.e_VTmag = df_toprocess.e_VTmag.astype(float)\n\n print \"Computing sigma B-V..\"\n df_toprocess['e_B_V'] = np.sqrt(df_toprocess.e_BTmag.pow(2)+df_toprocess.e_VTmag.pow(2))\n\n print \"Applying selection on sigma BT-VT < %s..\" % bv_cutoff\n df_toprocess = df_toprocess[df_toprocess.e_B_V < bv_cutoff]\n\n if catalog == 'xmatch_TGAS_Simbad.csv':\n df_toprocess = df_toprocess.loc[(df_toprocess['J'] < 11.) & (df_toprocess['K'] < 11.)]\n print \"min in J: %s\" % np.max(df_toprocess['J'])\n print \"max in J: %s\" % np.min(df_toprocess['J'])\n df_toprocess.insert(10, 'B_V', df_toprocess.loc[:, 'B'] - df_toprocess.loc[:, 'V'])\n\n df_toprocess.insert(10, 'J_K', df_toprocess.loc[:, 'J'] - df_toprocess.loc[:, 'K'])\n df_toprocess.insert(10, 'M_G', df_toprocess.loc[:, 'phot_g_mean_mag'] - 5. *\n (np.log10(1000. / df_toprocess.loc[:, 'parallax']) - 1.))\n df_toprocess.insert(10, 'M_J', df_toprocess.loc[:, 'J'] - 5. *\n (np.log10(1000. / df_toprocess.loc[:, 'parallax']) - 1.))\n df_toprocess.insert(10, 'M_K', df_toprocess.loc[:, 'K'] - 5. *\n (np.log10(1000. / df_toprocess.loc[:, 'parallax']) - 1.))\n\n if catalog == 'xmatch_TGAS_VSX.csv':\n df_toprocess = df_toprocess[df_toprocess.V == 0]\n print \"%s objects selected\" % len(df_toprocess)\n print \"..Done\\n----------\"\n return df_toprocess",
"def fill_nan_in_numeric(df):\n print(\" --- Filling NaN in Numerics.\")\n thresh = get_min_filled_threshold(df)\n columns = df.columns\n numerical = [x for x in columns if x.startswith('n_')]\n # fill NaN with mean or median, based on std dev\n for col in numerical:\n filled = get_non_missing_count(df[col])\n if filled < thresh:\n df[col] = df[col].fillna(-1)\n else:\n std = df[col].std()\n if std < 1:\n mean = df[col].mean()\n df[col] = df[col].fillna(mean)\n else:\n median = df[col].median()\n df[col] = df[col].fillna(mean)\n\n print(\" --- Finished filling NaN in Numerics.\")\n return df",
"def normalize(df, excludes):\n\n result = df.copy()\n for feature_name in df.columns:\n if feature_name in excludes:\n continue\n try:\n max_value = df[feature_name].max()\n min_value = df[feature_name].min()\n if max_value == min_value:\n min_value = 0\n result[feature_name] = (df[feature_name] - min_value) / (max_value - min_value)\n result[feature_name] = result[feature_name].apply(lambda x: round(abs(x), 4))\n except:\n LOGGER.error(f'Error normalizing feature: {feature_name}')\n raise RuntimeError(f'Error normalizing feature: {feature_name}')\n return result",
"def imputer(dataframe,columns=[], type='median',frame_type='spark'):\n if frame_type == 'spark':\n from pyspark.sql.functions import avg, lit, when, col\n\n df = dataframe\n for column in columns:\n if type == 'median':\n # Greenwald-Khanna algorithm for finding quanitiles\n median = df.approxQuantile(column, [0.5], 0.25)[0] # relative error - .25 is a measure of how accurate the number will be higher will be more expensive\n df = df.withColumn(column,\n when(col(column).isNull(), lit(median))\n .otherwise(df[column]))\n elif type == 'mean':\n #get the first element from list\n mean = df.select(avg(column)).rdd.flatMap(list).collect()[0]\n print(mean)\n df = df.withColumn(column,\n when(col(column).isNull(), lit(mean))\n .otherwise(df[column]))\n else:\n raise Exception('Type not supported. Please use a supported type.')\n return df\n else:\n from sklearn.preprocessing import Imputer\n\n df = None\n if frame_type == 'h2o':\n # convert to pandas\n df = dataframe.as_data_frame()\n elif frame_type == 'pandas':\n df = dataframe\n\n for column in columns:\n imputer = None\n if type == 'median':\n imputer = Imputer(missing_values='NaN', #numpy nissing values\n strategy=\"mean\",\n axis=0) #impute columns\n elif type == 'mean':\n imputer = Imputer(missing_values='NaN', #numpy nissing values\n strategy=\"median\",\n axis=0) #impute columns\n elif type == 'most_frequent':\n imputer = Imputer(missing_values='NaN', #numpy nissing values\n strategy=\"most_frequent\",\n axis=0) #impute columns\n else:\n raise Exception('Type not supported. Please use a supported type.')\n\n df[column] = imputer.fit_transform(df[column])\n if frame_type == 'h2o':\n import h2o\n print('Converting to H2OFrame ...')\n # convert train back to h2o\n df = h2o.H2OFrame(df)\n print('Done.')\n return df\n else:\n return df",
"def _adjust_fdr(df: pd.DataFrame) -> pd.DataFrame:\r\n\r\n df = df.sort_values(by='p').reset_index(drop=True)\r\n df['q'] = df.p * len(df.index) / (df.index + 1)\r\n df['q'] = df.q.mask(df.q > 1.0, 1.0)\r\n\r\n return df",
"def preprocess(df):\n # there were very few missing records for these columns, thus\n # we drop them to prevent biasing the data with imputation\n # (only 12 rows were removed)\n subset = ['num-of-doors', 'bore', 'stroke', 'horsepower', 'peak-rpm', 'price']\n df = df.dropna(subset = subset)\n\n # normalized-losses, drop due to lots of missing values\n # make drop due to having too many distinct categorical values\n df = df.drop(['normalized-losses', 'make'], axis = 1)\n\n # extract the response/output column\n response_col = 'price'\n y = df[response_col].values\n df = df.drop(response_col, axis = 1)\n\n # categorical variables: one-hot-encode them\n # the numeric variables are the columns excluding\n # the categorical variables and the variables that\n # have been detected to have multicollinearity using\n # VIF (variance inflation factor)\n cat_cols = ['symboling', 'fuel-type', 'aspiration', 'num-of-doors',\n 'body-style', 'drive-wheels', 'engine-location', 'fuel-system',\n 'engine-type', 'num-of-cylinders']\n for col in cat_cols:\n df[col] = df[col].astype('category')\n\n dummied = pd.get_dummies(df[cat_cols], drop_first = True)\n\n # use sorted set to ensure the consistency of the column order\n collinear_cols = SortedSet(['city-mpg', 'curb-weight', 'horsepower', 'length', 'width'])\n num_cols = list(SortedSet(df.columns) - SortedSet(cat_cols) - collinear_cols)\n X = pd.concat([df[num_cols], dummied], axis = 1)\n\n # standardize the numeric columns using the training set\n X_train, X_test, y_train, y_test = train_test_split(X.values, y,\n test_size = 0.2,\n random_state = 4321)\n scaler = StandardScaler()\n X_train[:, :len(num_cols)] = scaler.fit_transform(X_train[:, :len(num_cols)])\n X_test[:, :len(num_cols)] = scaler.transform(X_test[:, :len(num_cols)])\n return X_train, X_test, y_train, y_test, X.columns",
"def pre_process(self, frame: pd.DataFrame) -> pd.DataFrame:\n # frame = rows_filtering(frame)\n # frame = feature_dropping(frame)\n # frame = feature_values_fixing(frame)\n\n # frame = extreme_values_handling(frame, [])\n # missing_value_imputation(frame, [])\n\n # data_type_conversion(frame)\n # frame = feature_engineering(frame, self.GENERATE_USER_FEATURES)\n # feature_renaming(frame)\n\n return frame",
"def preprocess_dataset(dataset=None, remove_missing=60, remove_empty_rows=True):\n print('feature size before dropping:{}'.format(dataset.shape[1]))\n dataset_after_drop = dataset.dropna(thresh=dataset.shape[0]*remove_missing/100, how='all',axis=1)\n print('feature size after dropping:{}'.format(dataset_after_drop.shape[1]))\n print('row size before dropping:{}'.format(dataset_after_drop.shape[0]))\n if remove_empty_rows is True:\n df_final = dataset_after_drop.dropna(inplace=False).reset_index (drop=True)\n print('row size after dropping:{}'.format(df_final.shape[0]))\n print('---------------')\n print('final shape:{}'.format(df_final.shape))\n return df_final\n else:\n return dataset_after_drop"
]
| [
"0.66418713",
"0.6127512",
"0.58669835",
"0.5857657",
"0.5837059",
"0.5729272",
"0.5686927",
"0.5652169",
"0.56378347",
"0.5603194",
"0.55321705",
"0.5485073",
"0.5433233",
"0.5423102",
"0.5404316",
"0.5354289",
"0.5354275",
"0.53387",
"0.53330916",
"0.5324305",
"0.5293128",
"0.5267691",
"0.52475715",
"0.5240045",
"0.5238525",
"0.5218842",
"0.51772374",
"0.5176492",
"0.5150858",
"0.51311535"
]
| 0.6647287 | 0 |
Sort standings_rows according to the subclass rank method. | def sort_standings_rows(self, standings_rows, heat_games, players, rank_finals=False):
non_finals_sort_key_fn = self.get_standings_row_sort_key_fn()
self.calculate_secondary_rank_values(standings_rows, heat_games, players)
standings_rows.sort(key=non_finals_sort_key_fn, reverse=True)
if rank_finals:
# If someone has played in a final or third-place playoff then we
# fix their position accordingly.
relocate_indices_to = []
for (i, s) in enumerate(standings_rows):
if len(s.finals_form) >= 3 and s.finals_form[2] != '-':
fixed_pos = finals_form_to_position(s.finals_form)
if fixed_pos:
relocate_indices_to.append((i, fixed_pos))
relocate_row_to = []
for (i, fixed_pos) in reversed(relocate_indices_to):
relocate_row_to.append((standings_rows[i], fixed_pos))
del standings_rows[i]
for (s, fixed_pos) in sorted(relocate_row_to, key=lambda x : x[1]):
assert(fixed_pos >= 1 and fixed_pos <= 8)
standings_rows.insert(fixed_pos - 1, s)
if rank_finals:
sort_key_fn = lambda s : (s.finals_points, non_finals_sort_key_fn(s))
else:
sort_key_fn = non_finals_sort_key_fn
prev_s = None
pos = 0
joint = 0
for s in standings_rows:
if prev_s and sort_key_fn(prev_s) == sort_key_fn(s):
joint += 1
else:
pos += joint + 1
joint = 0
s.position = pos
prev_s = s
standings_rows.sort(key=lambda s : (s.position, s.name)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _sort(self):\n self.rows.sort(key=lambda x: (x['PERC1'], x['EQ'], x['PASS'], x['W2']),\n reverse=True)\n\n rank = 0\n prev_perc = 0\n prev_rank = 0\n for row in self.rows:\n if row[\"NR\"] == 0:\n # Something has already populated NR as 0 - so we set rank as\n # 0 too\n row['_RANK'] = 0\n row['_NR'] = 0\n continue\n\n # Increment our count\n rank += 1\n if row['PERC1'] == prev_perc:\n row['NR'] = \"\"\n row['_NR'] = prev_rank # I.e. joint 6th will be 6 here\n row['_RANK'] = rank # I.e. joint 6th could be 7, or 8 etc. here\n else:\n row['NR'] = rank\n row['_NR'] = rank\n row['_RANK'] = rank\n prev_perc = row['PERC1']\n prev_rank = rank",
"def set_rank_order(self):\n for k in self._run:\n self._run[k].sort(key=lambda x:x.get_rank(),reverse=False)\n tot_res = len(self._run[k])\n for r in self._run[k]:\n r.set_score(tot_res - int(r.get_rank()) + 1)\n print r.get_str()",
"def diversified_ranking(self):\n self.error_throw('rank')\n instance = Instance(self.table_name)\n instance.addTable(Table(instance,False,'','')) # 'False'->transformed '',''->no describe yet\n if self.import_method == 'mysql': instance = self.mysql_handle(instance)\n elif self.import_method == 'csv': instance = self.csv_handle(instance)\n\n self.rank_partial(instance)\n\n self.rank_method = methods_of_ranking[3] # = 'diversified_ranking'",
"def _rank(self):\r\n return sorted(self.player_points.items(),key=lambda x:x[1],reverse=True)",
"def rank(self, current_order_by_value: Comparable, current_row_number: int) -> int:",
"def ranking_metric(df, method, phenoPos, phenoNeg, classes, ascending): \n \n A = phenoPos\n B = phenoNeg\n df2 = df.T \n df2['class'] = classes\n df_mean= df2.groupby('class').mean().T\n df_std = df2.groupby('class').std().T \n #exclude any zero stds.\n df_mean = df_mean[df_std.sum(axis=1) !=0]\n df_std = df_std[df_std.sum(axis=1) !=0]\n \n if method == 'signal_to_noise':\n sr = (df_mean[A] - df_mean[B])/(df_std[A] + df_std[B])\n elif method == 't_test':\n sr = (df_mean[A] - df_mean[B])/ np.sqrt(df_std[A]**2/len(df_std)+df_std[B]**2/len(df_std) )\n elif method == 'ratio_of_classes':\n sr = df_mean[A] / df_mean[B]\n elif method == 'diff_of_classes':\n sr = df_mean[A] - df_mean[B]\n elif method == 'log2_ratio_of_classes':\n sr = np.log2(df_mean[A] / df_mean[B])\n else:\n logging.error(\"Please provide correct method name!!!\") \n sys.exit()\n sr.sort_values(ascending=ascending, inplace=True)\n df3 = sr.to_frame().reset_index()\n df3.columns = ['gene_name','rank']\n df3['rank2'] = df3['rank']\n\n return df3",
"def order_players_by_initial_rank(self):\n pass",
"def _compute_ranks(df, lower_better=True):\n # return df.rank(axis=1, numeric_only=True, ascending=lower_better)\n return df.rank(axis=1, numeric_only=True, ascending=lower_better, method='min')",
"def _rank_stations_by_distance_and_quality(lat, lon):\n\n station_ranking = rank_stations(lat, lon)\n station_ranking['enumerated_quality'] = station_ranking['rough_quality'].map(QUALITY_SORT)\n station_ranking = station_ranking.sort_values(by=['distance_meters', 'enumerated_quality'])\n return station_ranking",
"def run_sort_home_by_score(self):\n self.homes = self.python_sort(self.homes)",
"def sort_ranking_dict(self):\n\n # reset self.ranking_dict to empty dict (if sorted tuple)\n self.ranking_dict = {}\n\n # create ranking dict with player and grand total score\n for j, player in enumerate(self._players_list):\n ranking_name, ranking_score = \\\n self._players_list[j].get_name_and_grand_total_score()\n self.ranking_dict[ranking_name] = ranking_score\n\n # reverse sort ranking dict by grand total (returns list)\n self.ranking_dict = sorted(self.ranking_dict.items(),\n key=lambda x: x[1], reverse=True)",
"def __rank__(self) -> int:",
"def partial_order(self):\n self.error_throw('rank')\n instance = Instance(self.table_name)\n instance.addTable(Table(instance,False,'','')) # 'False'->transformed '',''->no describe yet\n if self.import_method == 'mysql': instance = self.mysql_handle(instance)\n elif self.import_method == 'csv': instance = self.csv_handle(instance)\n \n self.rank_partial(instance)\n\n self.rank_method = methods_of_ranking[2] # = 'partial_order'",
"def _add_ranks(standings, key):\n prev_key = None\n current_rank = 0\n for i, team in enumerate(standings, start=1):\n this_key = key(team)\n if this_key != prev_key:\n current_rank = i\n prev_key = this_key\n team.rank = current_rank",
"def playerStandings():\n\n \n cursor.execute(\"select * from players\")\n player_data = cursor.fetchall()\n wins_sorted = []\n\n for tup_index in range(len(player_data)):\n #the %s is about 400 ns faster than %d for integer substitution\n cursor.execute(\"select count(winnerid) from matches where winnerid = %s\" % player_data[tup_index][0])\n numMatchesWon = cursor.fetchone()[0]\n\n cursor.execute(\"select count(loserid) from matches where loserid = %s\" % player_data[tup_index][0])\n numMatchesLost = cursor.fetchone()[0]\n\n numMatchesPlayed = numMatchesWon + numMatchesLost\n\n wins_sorted.append(int(numMatchesWon))\n player_data[tup_index] += int(numMatchesWon),\n player_data[tup_index] += int(numMatchesPlayed),\n \n wins_sorted.sort(reverse=True)\n player_data_sorted_bywins = []\n \n #this is how im sorting the data from the database by wins, I'm hoping that this was supposed to be done with python code and not sql\n for w in wins_sorted:\n for tup_ind in range(len(player_data)):\n if player_data[tup_ind][2] == w:\n player_data_sorted_bywins.append(player_data[tup_ind])\n del player_data[tup_ind]\n break\n \n return player_data_sorted_bywins",
"def rank_drawr_property(final_spreadsheet_df, pg_network_n1_names):\n prop_spreadsheet_df = final_spreadsheet_df.loc[pg_network_n1_names]\n prop_spreadsheet_df.iloc[:, :-1] = prop_spreadsheet_df.iloc[:, :-1].apply(\n lambda x: (x - prop_spreadsheet_df['base']).sort_values(ascending=0).index.values)\n prop_spreadsheet_df = prop_spreadsheet_df.drop('base', 1)\n\n return prop_spreadsheet_df",
"def sort(self): # sort all entries to make room for new ones, determine best and worst\n ns = self.num_stored.value\n ys = np.asarray(self.ys[:ns])\n yi = ys.argsort()\n sortRuns = []\n for i in range(len(yi)):\n y = ys[yi[i]]\n xs = self.get_x(yi[i])\n sortRuns.append((y, xs))\n numStored = min(len(sortRuns),int(0.9*self.capacity)) # keep 90% best \n for i in range(numStored):\n self.replace(i, sortRuns[i][0], sortRuns[i][1])\n self.num_sorted.value = numStored \n self.num_stored.value = numStored \n return numStored",
"def sort_data(self):\n\n # zips the game_list and game_Scores, sorts the result by scores, and then puts them back.\n self.game_list, self.game_scores = zip(*sorted(zip(self.game_list, self.game_scores), key=lambda pair: pair[1]))",
"def sort_cards(self):\n self.cards.sort(key=operator.attrgetter('persona', 'rank'))\n self.update_position()",
"def get_ranks(d): \n raise NotImplementedError(\"Problem 3 Incomplete\")",
"def sort_by_reranker_scores(self):\n self.parses.sort(key=lambda parse: (parse.reranker_score,\n parse.parser_score),\n reverse=True)",
"def _rank(measure):\n sort_idx = np.argsort(-measure)\n ranks = np.empty(len(measure), int)\n ranks[sort_idx] = np.arange(1, len(measure)+1)\n return ranks",
"def set_subhead_ranking(self):\n # remove dirty rows\n # TODO: determine why the \"Col8\" values appear in rows in the first place\n self.subhead = self.subhead[self.subhead.Col8 != \"Col8\"]\n # cast all to int so that we can take `.max`\n self.subhead[\"Col8\"] = self.subhead[\"Col8\"].astype(\"int32\")\n\n self.subhead[\"ranking_score\"] = (\n self.subhead[\"Col8\"].max() - self.subhead[\"Col8\"]\n )",
"def sort_table(table, sats_table):",
"def print_end_of_round_rankings(self):\n print('\\nFINAL SCORES')\n print('-'*12)\n for k, v in enumerate(self.ranking_dict):\n print(f\"{k+1} {v[0]}: {v[1]}\")\n print('\\n')",
"def sort(self):\r\n\t\tif ScoreOpt.isGroupVassals():\r\n\t\t\tself._playerScores.sort(lambda x, y: cmp(x.sortKey(), y.sortKey()))\r\n\t\t\tself._playerScores.reverse()\r\n\t\tmaxPlayers = ScoreOpt.getMaxPlayers()\r\n\t\tif maxPlayers > 0 and len(self._playerScores) > maxPlayers:\r\n\t\t\tself._playerScores = self._playerScores[len(self._playerScores) - maxPlayers:]",
"def rank(self):\n\n if self._rank >= 0:\n return self._rank\n\n reduced, operations = self.to_row_echelon()\n non_leading_rows = 0\n for i in range(self.rows, 0, -1):\n if not reduce(lambda x,y: x or y, reduced.row(i)):\n non_leading_rows += 1\n else:\n break\n\n self._rank = self.rows - non_leading_rows\n return self._rank",
"def scoreGame(self):\n # create valueLs[card1,card2,...], pass it to sumHandReturnPoints(valueLs) or twoCardReturnPoints(valueLs)\n scoreLs = []\n ### Score of row\n for rowKey in self.table:\n valueLs = self.table[rowKey]\n points = self.sumHandReturnPoints(valueLs)\n scoreLs.append(points)\n\n ### Score of 4-card column\n for offset in range(0,3): # 0,1,2\n tmpLs = []\n for rowKey in self.table:\n valueLs = self.table[rowKey]\n if len(valueLs) == 5:\n iterStart = 1\n else:\n iterStart = 0\n card = valueLs[iterStart+offset]\n tmpLs.append(card)\n points = self.sumHandReturnPoints(tmpLs)\n scoreLs.append(points) \n\n ### Score of 2-card column\n #(1) 1st column\n valueLs1 = self.table['row1']\n valueLs2 = self.table['row2']\n tmpLs = []\n tmpLs.append(valueLs1[0].get_rank())\n tmpLs.append(valueLs2[0].get_rank())\n points = self.twoCardReturnPoints(tmpLs)\n scoreLs.append(points)\n #(2) 5th column\n valueLs3 = self.table['row1']\n valueLs4 = self.table['row2']\n tmpLs = []\n tmpLs.append(valueLs3[-1].get_rank())\n tmpLs.append(valueLs4[-1].get_rank())\n points = self.twoCardReturnPoints(tmpLs)\n scoreLs.append(points) \n\n ### Add up scoreLs\n sumPoints = 0\n for points in scoreLs:\n sumPoints += points\n return sumPoints",
"def rank(self, mode='range'):\n\n df = self.__df_timings\n multiindex = self.multiindex\n\n if multiindex:\n raise Exception('Ranking is supported for groupings.')\n\n if mode == 'range':\n R = np.arange(1, len(df) + 1)\n elif mode == 'constant':\n R = np.ones(len(df))\n elif mode == 'index':\n idx = np.array(df.index)\n d = idx.dtype\n if np.issubdtype(d, np.floating) or np.issubdtype(d, np.integer):\n R = df.index.values\n else:\n raise ValueError(\"Dataframe index is not int or float. Hence, 'index' is an invalid option as mode.\")\n else:\n raise ValueError(\"Invalid option as mode.\")\n\n df_ranked = df.iloc[:, R.dot(df).argsort()[::-1]]\n df[:] = df_ranked\n df.columns = df_ranked.columns\n return",
"def place_at_splits(data):\n groups = defaultdict(list)\n for runner_idx, runner in enumerate(data):\n splits = runner['splits']\n for split in splits:\n split['runner_idx'] = runner_idx\n groups[split['split_dist']].append(split)\n\n ranks = []\n srt_keys = sorted(groups, key=groups.get)\n for key in srt_keys:\n group = groups[key]\n srt_group = sorted(group, key=lambda t: t['split_mins'])\n ranked_group = []\n for rank, split in enumerate(srt_group):\n split['rank'] = rank\n ranked_group.append(split)\n ranks.append(ranked_group)\n\n return data, ranks"
]
| [
"0.6434854",
"0.62527853",
"0.6219509",
"0.6213747",
"0.6067628",
"0.59437746",
"0.5909843",
"0.5891942",
"0.5752573",
"0.5714053",
"0.5691639",
"0.56828415",
"0.5677474",
"0.56695354",
"0.5645508",
"0.5575053",
"0.5563753",
"0.55051386",
"0.5498401",
"0.5484112",
"0.54697895",
"0.54637724",
"0.5462235",
"0.54506433",
"0.5449182",
"0.54430187",
"0.5425528",
"0.5421485",
"0.53905106",
"0.5374772"
]
| 0.7152437 | 0 |
One waypoint must exist on database | def test_create(self):
self.assertTrue(WayPoint.objects.exists()) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_waypoint(self, waypoint):\n connection = self.__create_connection()\n try:\n waypoint_list = list(waypoint)\n key = self.__compound_key(waypoint)\n waypoint_list.insert(0, key)\n\n keyed_waypoint = tuple(waypoint_list)\n\n sql = ''' INSERT INTO waypoints(waypoint_id, x, y, z, distance, heading, visit_count)\n VALUES(?,?,?,?,?,?,?) '''\n cur = connection.cursor()\n cur.execute(sql, keyed_waypoint)\n connection.commit()\n cur.close()\n return\n except sqlite3.Error as e:\n print(e)\n finally:\n connection.close()",
"def test_points_exists(self):\n self.assertEqual(Destination.objects.filter(name='testWithin')[0].point,\n self.test_point_inside)\n self.assertEqual(Destination.objects.filter(name='testWithout')[0].point,\n self.test_point_outside)",
"def process_waypoint(self, waypoint: Waypoint) -> Union[Trip, None]:\n ...",
"def add_waypoint(self, waypoint):\n self.drone.add_waypoint(waypoint)",
"def passed_waypoint(self, waypoint_num):\n bools = self.ros_node.get_data('/diff_drive/waypoints_achieved', simple_data = False)\n # Waits for the data\n if bools is not None:\n if len(bools.bools) >= waypoint_num:\n return bools.bools[waypoint_num -1]\n \n rospy.logerr_throttle(15, \"Checking Waypoint Failed. Did not find a waypoint with the number '%s' in the path\" %(waypoint_num))\n return False\n else:\n return False",
"def test_find_closest_waypoints_no_position(self):\n planner = WaypointPlanner(make_example_base_waypoints())\n self.assertIsNone(planner.find_closest_waypoints(1))",
"def test_relation_way_not_inserted():\n park = query_row(db_conf, 'osm_landusages', -9001)\n assert park['type'] == 'park'\n assert park['name'] == 'rel 9001'\n assert query_row(db_conf, 'osm_landusages', 9009) == None\n\n park = query_row(db_conf, 'osm_landusages', -9101)\n assert park['type'] == 'park'\n assert park['name'] == 'rel 9101'\n assert query_row(db_conf, 'osm_landusages', 9109) == None\n\n scrub = query_row(db_conf, 'osm_landusages', 9110)\n assert scrub['type'] == 'scrub'",
"def create_checkpoint(self, checkpoint):\n connection = self.__create_connection()\n sql = ''' INSERT INTO checkpoints(waypoint_x, waypoint_y, safe_options)\n VALUES(?,?,?) '''\n try:\n cur = connection.cursor()\n cur.execute(sql, checkpoint)\n connection.commit()\n cur.close()\n return\n except sqlite3.Error as e:\n print(e)\n finally:\n if connection:\n connection.close()",
"def test_single_node_ways_not_inserted():\n assert not query_row(db_conf, 'osm_roads', 30001)\n assert not query_row(db_conf, 'osm_roads', 30002)\n assert not query_row(db_conf, 'osm_roads', 30003)",
"def test_relation_with_gap():\n park = query_row(db_conf, 'osm_landusages', -7301)\n assert park['geometry'].is_valid, park",
"def test_relation_way_inserted():\n park = query_row(db_conf, 'osm_landusages', -8001)\n assert park['type'] == 'park'\n assert park['name'] == 'rel 8001'\n assert query_row(db_conf, 'osm_roads', 8009)[\"type\"] == 'residential'",
"def waypoint_add_rel(self):\n pass",
"def point_exists(self, point):\n qs = LocationPoint.objects.raw(\"\"\"\n SELECT * FROM script_execution_manager_locationpoint\n WHERE st_dwithin(\n thegeometry,\n st_transform(\n st_setsrid(\n st_point({point.x}, {point.y}), {point.srid}),\n 4326\n ),\n -- This should be approximately one meter.\n -- See: http://stackoverflow.com/a/8477438/198050\n -- 0.00001\n -- Gerrit Hendriksen ([email protected]) says\n -- 8*10e-6 is approximately one meter.\n 8.181818181818181e-06\n )\n \"\"\".format(point=point)\n )\n\n res = sum(1 for result in qs)\n return qs[0] if res else False",
"def __init__(self, waypoints: Tuple[Waypoint]):\n self._waypoints = waypoints",
"def __init__(self, waypoints: Tuple[Waypoint]):\n self._waypoints = waypoints",
"def check_reached_waypoint_goal(self):\n return self.control_instance.check_reached_waypoint_goal()",
"def waypoint_add_global(self):\n pass",
"def test_missing_link(self):\n pfs_file = os.path.join(\"tests\", \"data\", \"positionfixes.csv\")\n pfs = ti.read_positionfixes_csv(pfs_file, sep=\";\", tz=\"utc\", index_col=\"id\", crs=\"epsg:4326\")\n _, sp = pfs.as_positionfixes.generate_staypoints(\n method=\"sliding\", gap_threshold=1e6, dist_threshold=0, time_threshold=0\n )\n warn_string = \"No locations can be generated, returning empty locs.\"\n with pytest.warns(UserWarning, match=warn_string):\n sp, _ = sp.as_staypoints.generate_locations(\n method=\"dbscan\", epsilon=1e18, num_samples=1000, agg_level=\"user\"\n )\n\n assert pd.isna(sp[\"location_id\"]).any()",
"def try_waypoints(waypoint_data, current_point, segmented_points):\n\n # waypoint_data will be a list [waypoint_n, ... , waypoint_w]\n # where waypoint_n ... w is (lat, lng)\n\n # store the waypoints retreived and compare their crime_index\n # ret [{dicte}, {dictw}]\n waypoint_geohash_data_all = get_position_geohash(waypoint_data)\n crime_index_storage = []\n for data in waypoint_geohash_data_all:\n crime_index_storage.append(data['crime_index'])\n crime_index_storage.append(current_point['crime_index'])\n\n lowest_crime_index = min(*crime_index_storage)\n\n # check and assemble dict for lowest_crime_index waypoint\n generate_waypoint(lowest_crime_index,\n waypoint_geohash_data_all,\n segmented_points)",
"def waypoint_callback(self, wp):\n if self.trajectory_constructed == False: \n NextwpPosition = np.array([wp.position.x, wp.position.y, wp.position.z])\n NextwpOrientation = np.array([wp.orientation.x, wp.orientation.y, wp.orientation.z, wp.orientation.w])\n self.pc_x, self.pc_y, self.pc_z, self.seg_times, self.traj_t0 = self.make_trajectory(NextwpPosition, NextwpOrientation) \n self.trajectory_constructed = True",
"def save_journey():\n destination = request.form.get('destination_id', ''), request.form.get('destination_name', '')\n origin = request.form.get('origin_id', ''), request.form.get('origin_name', '')\n if '' not in destination or '' not in origin:\n trip_db: Cache = g.trip_db\n trip_db.read_db()\n trip_db.write_db((origin, destination))\n print(trip_db.data)\n return redirect('/')",
"def requires_route(self) -> bool:\n return self.goal.is_specific()",
"def places_create_one():\n if request.method == 'POST':\n place = request.json['place']\n place = get_place_data(place)\n place = filter_essential_data(place)\n save_place(place)\n data = is_exists(place['local_name'])\n if data is False:\n return jsonify({'response': 'Not save.'}), 406\n return jsonify(place), 200",
"def _update_next_waypoint(self):\n if not self.base_waypoints:\n #rospy.logwarn(\"Waypoints not updated: base_waypoints not available yet.\")\n return False\n\n if not self.current_pose:\n #rospy.logwarn(\"Waypoints not updated: current_pose not available yet.\")\n return False\n\n # Get ego car variables\n ego_x = self.current_pose.position.x\n ego_y = self.current_pose.position.y\n ego_theta = math.atan2(self.current_pose.orientation.y, self.current_pose.orientation.x)\n\n # If I do have a next_waypoint, I will start looking from it, and stop looking\n # as soon as get a local minimum. Otherwise I will do a full search across the whole track\n t = time.time()\n wp = None\n yaw = 0\n dist = 1000000 # Long number\n if self.next_waypoint:\n idx_offset = self.next_waypoint\n full_search = False\n else:\n idx_offset = 0\n full_search = True\n num_base_wp = len(self.base_waypoints)\n\n for i in range(num_base_wp):\n idx = (i + idx_offset)%(num_base_wp)\n wp_x = self.base_waypoints[idx].pose.pose.position.x\n wp_y = self.base_waypoints[idx].pose.pose.position.y\n wp_d = math.sqrt((ego_x - wp_x)**2 + (ego_y - wp_y)**2)\n\n if wp_d < dist:\n dist = wp_d\n wp = idx\n if debugging:\n # Angle betwee car heading and waypoint heading\n yaw = math.atan2(wp_y - ego_y, wp_x - ego_x) - ego_theta\n elif not full_search:\n # Local minimum. If the waypoint makes sense, just use it and break\n if dist < max_local_distance:\n break; # Found a point\n else:\n # Seem to have lost track. Do search again\n rospy.logwarn(\"Waypoint updater lost track (local min at %.1f m after %d waypoints). Going back to full search.\", dist, i+1)\n full_search = True\n\n if debugging:\n rospy.loginfo(\"New next wp [%d] -> (%.1f,%.1f) after searching %d points in %fs\", wp, dist * math.cos(yaw), dist * math.sin(yaw), i, time.time()-t)\n\n if wp is None:\n rospy.logwarn(\"Waypoint updater did not find a valid waypoint\")\n return False\n\n self.next_waypoint = wp\n return True",
"def process_waypoint(self, waypoint: Waypoint) -> Union[Trip, None]:\n\n # ignore the first entry, just remember it for further compares\n if not self.prev_point:\n self.prev_point = waypoint\n return None\n\n if self.is_driving(self.prev_point, waypoint):\n if not self.start_point:\n # indicates trip start\n self.start_point = self.prev_point\n else:\n # indicates trip finish\n if self.start_point:\n d = self.calc_distance(self.start_point, self.prev_point)\n trip = Trip(d, self.start_point, self.prev_point)\n self.start_point = None\n return trip\n self.prev_point = waypoint\n return None",
"def exists(self, proxy):\n return not self.database.zscore(self.key, proxy) == None",
"def __spur_on_if_needed(self):\n if len(self.waypoints) < 2:\n return\n next_speed = (get_waypoint_speed(self.waypoints[0]) +\n get_waypoint_speed(self.waypoints[1])) / 2.0\n set_waypoint_speed(self.waypoints[0], next_speed)",
"def __init__(self, name, waypoints, position2d_proxy, waypoint_distance_tolerance):\n\n self.name = name\n self.waypoints = waypoints\n self.pp = position2d_proxy\n self.waypoint_distance_tolerance = waypoint_distance_tolerance\n\n self.active_waypoint_index = 0\n self.active_waypoint = self.waypoints[self.active_waypoint_index]\n self.first_update = True\n self.finished = False\n self.last_read = None",
"def test_does_not_have_geocode(self):\n name = Name.objects.create(name=\"Test Name\", name_type=Name.PERSONAL)\n assert not name.has_geocode()",
"def test_abort_route_when_id_route_not_exist(self):\n\n pass"
]
| [
"0.6968115",
"0.62368494",
"0.5923784",
"0.5820203",
"0.5715904",
"0.5578013",
"0.54665357",
"0.5460138",
"0.5361839",
"0.5331722",
"0.53265536",
"0.53263104",
"0.52776194",
"0.5273881",
"0.5273881",
"0.5269537",
"0.52435905",
"0.52151394",
"0.51992184",
"0.51812834",
"0.51444995",
"0.5133487",
"0.51296896",
"0.5126657",
"0.5121618",
"0.5120873",
"0.50900865",
"0.5086035",
"0.5060944",
"0.5054893"
]
| 0.68909466 | 1 |
Returns True if max episode steps have been reached. | def is_terminal(self) -> bool:
return self.time_index == self.max_episode_steps - 1 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _check_episode_end_condition(self):\n vehicles = self._get_available_vehicles()\n if np.sum(vehicles == 0) < self.episode_threshold:\n return True\n else:\n return False",
"def _exceeded_maximum_iteration(self) -> bool:\n if self.iteration >= self._maxiter:\n logger.warning(\n f\"Reached the maximum number of iterations \"\n f\"*{self._maxiter}*. Did not converge\"\n )\n return True\n\n else:\n return False",
"def exceeded(self) -> bool:\n if self.max_frames is not None:\n return self.current > self.max_frames\n if self.max_samples is not None:\n return self.current > self.max_samples\n if self.max_duration is not None:\n return self.current > self.max_duration\n return False",
"def exceeded_max(self):\n return self.total_max is not None and self.counter > self.total_max",
"def _check_episode_start_condition(self):\n vehicles = self._get_available_vehicles()\n if np.sum(vehicles == 0) >= self.episode_threshold:\n return True\n else:\n return False",
"def is_moving(self):\n return self.steps < self.max_steps",
"def episode_end(self):\n return self.game.is_episode_finished()",
"def is_terminal(self, state: EnvState, params: EnvParams) -> bool:\n # Check number of steps in episode termination condition\n done_steps = state.step >= params.max_steps_in_episode\n return done_steps",
"def max_steps(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"max_steps\")",
"def if_end(self, **kwargs):\n\n index = self.get('_index')\n\n if index and index >= len(self.steps)-1:\n return True # all steps have been used\n\n return False",
"def _compute_is_terminal(self):\n new_score = self.episode_qualities[-1]\n # by default the episode will terminate when all samples are labelled\n done = LalEnv._compute_is_terminal(self)\n # it also terminates when a quality reaches a predefined level\n if new_score >= self.target_quality:\n done = True\n return done",
"def max_steps(self) -> int:\n return pulumi.get(self, \"max_steps\")",
"def get_max_time_steps (self):\n return self.degreedays.thawing.num_timesteps",
"def is_done(self, agent, world) -> bool:\n if self.steps_from_last_reset / self.num_agents > self.episode_length:\n return True\n return False",
"def running(self):\r\n return self.__maxlen__ > 0",
"def is_exceeded(self):\n\n if self.stopwatch.check_time() > self.duration:\n self.stopwatch.start()\n self.num_processed_this_interval = 0\n return False\n\n return self.num_processed_this_interval >= self.max_per_interval",
"def _test_max_simulation_step(self):\n previous_step = self.program.steps[0]\n previous_pb_frame = self.program.steps[0].playback_frames[0]\n for step in self.program.steps:\n for index, pb_frame in enumerate(step.playback_frames):\n if self.program.simulation_type == InstructionListJointsFlags.TimeBased:\n msg = f\"Step {step.name} playback frame {index}, time_step {pb_frame.time_step} not in 'max_time_step' bounds\"\n self.assertLessEqual(pb_frame.time_step, self.program.max_time_step, msg)\n else:\n move_type = step.move_type if index != 0 else previous_step.move_type\n if move_type == MoveType.Joint:\n msg_deg = f\"Step {step.name} (Joint) playback frame {index}, deg_step {pb_frame.deg_step} not in 'max_deg_step' bounds\"\n\n # Check if value given in list result is smaller than max for simulation\n self.assertLessEqual(pb_frame.deg_step, self.program.max_deg_step, msg_deg)\n\n # Check if actual step is smaller than max for simulation\n actual_deg_step = max([abs(j_a[0] - j_b[0]) for j_a, j_b\n in zip(pb_frame.joints.rows, previous_pb_frame.joints.rows)])\n self.assertLessEqual(actual_deg_step, self.program.max_deg_step, msg_deg)\n else:\n msg_mm = f\"Step {step.name} (Frame )playback frame {index}, mm_step {pb_frame.mm_step} not in 'max_mm_step' bounds\"\n\n # Check if value given in list result is smaller than max for simulation\n self.assertLessEqual(pb_frame.mm_step, self.program.max_mm_step, msg_mm)\n\n # Check if actual step is smaller than max for simulation\n actual_mm_step = sqrt(sum([(c_a[0] - c_b[0]) * (c_a[0] - c_b[0]) for c_a, c_b\n in zip(pb_frame.coords.rows, previous_pb_frame.coords.rows)]))\n self.assertLessEqual(actual_mm_step, self.program.max_mm_step, msg_mm)\n\n previous_pb_frame = pb_frame\n previous_step = step",
"def get_max_steps(self):\n return float(self.trainer_parameters[\"max_steps\"])",
"def get_max_steps(self):\n return float(self.trainer_parameters['max_steps'])",
"def is_end_episode():\n return patterns_pb2.Condition(is_end_episode=True, eq=1)",
"def _termination(self):\n if self._never_terminate:\n return False\n\n if self._counter >= self._max_steps:\n return True\n\n return self.is_fallen() # terminates automatically when in fallen state",
"def is_eval_epoch(cur_epoch):\n return (cur_epoch + 1) % cfg.TRAIN.EVAL_PERIOD == 0 or (\n cur_epoch + 1\n ) == cfg.OPTIM.MAX_EPOCH",
"def stop(self):\n return not self.iteration < self.options['max_iters']",
"def _compute_is_terminal(self):\n # by default the episode will terminate when all samples are labelled\n done = LalEnv._compute_is_terminal(self)\n # it also terminates when self.n_horizon datapoints were labelled\n if np.size(self.indeces_known) == self.n_horizon:\n done = True\n return done",
"def close_to_exceeding(self) -> bool:\n mean = self.current / self.num_cuts\n if self.max_frames is not None:\n return self.current + mean > self.max_frames\n if self.max_samples is not None:\n return self.current + mean > self.max_samples\n if self.max_duration is not None:\n return self.current + mean > self.max_duration\n return False",
"def get_max_episode_len(path):\n path = path.replace('data/', '')\n path = path.replace('goals/', '')\n task = tasks.names[path]()\n max_steps = task.max_steps - 1 # Remember, subtract one!\n return max_steps",
"def has_more_trials(self) -> bool:\r\n raise NotImplementedError",
"def _isDone(self):\n return self.steps >= self.max_steps or len(self.food_ids) <= 0",
"def is_done(self):\n return True if self.t >= self.max_ep_len else False",
"def get_max_steps(self) -> float:\n return float(self.trainer_parameters[\"max_steps\"])"
]
| [
"0.71122867",
"0.6849744",
"0.6711568",
"0.670124",
"0.6694763",
"0.6669541",
"0.6615247",
"0.6608392",
"0.6584162",
"0.6422713",
"0.6394887",
"0.63653255",
"0.63096106",
"0.6282592",
"0.6261041",
"0.62465924",
"0.6240609",
"0.62317926",
"0.62271726",
"0.61191446",
"0.6112608",
"0.6107988",
"0.61069274",
"0.61062336",
"0.6105967",
"0.610538",
"0.60790324",
"0.6070181",
"0.60597533",
"0.60496426"
]
| 0.69028705 | 1 |
Round the value x down to the nearest time step interval. | def _round(self, x):
return x - x % self.minutes_per_step | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def round_down(x):\n return int(math.floor(x / 10.0)) * 10",
"def round_down(x, sig=2):\n dig = pow(10., sig - int(math.floor(math.log10(abs(x)))) - 1)\n return math.floor(x * dig) / dig",
"def downround(x, base):\n return base * math.floor(float(x) / base)",
"def round_down(x, m):\n return int(m * round(float(x) / m))",
"def upround(x, base):\n return base * math.ceil(float(x) / base)",
"def round_up(x, sig=2):\n dig = pow(10., sig - int(math.floor(math.log10(abs(x)))) - 1)\n return math.ceil(x * dig) / dig",
"def _round_to_nearest_multiple_down(x, n=5):\n return n * math.floor(float(x) / n)",
"def roundUP(x):\n\treturn int(ceil(x / 10.0)) * 10",
"def round(self, x):\n fraction, scaled_x, scale = self._get_fraction(x)\n\n if fraction < self.minimum_stochastic_distance or 1-fraction <self.minimum_stochastic_distance:\n result = round(x,self.precision)\n\n else:\n rounddown = fraction < self.random_generator.random()\n if rounddown:\n result = math.floor(scaled_x) / scale\n else:\n result = math.ceil(scaled_x) / scale\n\n self._record_roundoff_error(x, result)\n return result",
"def _round_to_nearest_multiple_up(x, n=5):\n return n * math.ceil(float(x) / n)",
"def round(x):\n return int(x + copysign(0.5, x))",
"def unit_step(x):\n if x < 0:\n return 0\n\n return 1",
"def custom_round(x):\n if x >= 1:\n x = 1\n else:\n x = 0\n return x",
"def roundto(x, to=10.0):\n if to and not math.isnan(x):\n return int(round(x / to)) * to\n else:\n return x",
"def decimal_ceil(x):\n int_x = int(x)\n if x - int_x == 0:\n return int_x\n return int_x + 1",
"def ceil(x):\n # if x is within MACHINE_EPS of an integer, return that integer\n if abs(x - round(x)) < MACHINE_EPS:\n return round(x)\n # otherwise, return the ceiling of x\n return math.ceil(x)",
"def iround(x):\n return ipart(x + 0.5)",
"def round_to_nearest_60(x):\r\n return int(60 * round(float(x) / 60))",
"def roundup100(x):\n\treturn int(math.ceil(x / 100.0)) * 100",
"def RoundUp(value, boundary):\n return (value + boundary - 1) & ~(boundary - 1)",
"def divide_and_round_up(x, y):\n return ((x - 1) // y) + 1",
"def round_to(x, y):\n return round(x, -int(floor(log10(abs(y)))))",
"def floor(x: float) -> int:\n return int(x) if x - int(x) >= 0 else int(x) - 1",
"def step(self) -> float:\n step = DEFAULT_STEP\n value_range = abs(self.max_value - self.min_value)\n if value_range != 0:\n while value_range <= step:\n step /= 10.0\n return step",
"def roundup_int(x, m):\n\treturn int(math.ceil(x / float(m))) * m",
"def ceil(x):\n return 0.0",
"def round_to_n(x, n=8):\n n = 1 + n - int(np.floor(np.log10(abs(x) + .1)))\n\n return round(x, n)",
"def scale_to_start(x):\n x = (x + eps) / (x[0] + eps)\n return x",
"def trunc(x):\n return 0",
"def round_down(val):\n floor_val = val\n\n try:\n if not is_empty(val):\n float_val = float(val)\n floor_val = math.floor(float_val)\n except Exception as e:\n pass\n\n return floor_val"
]
| [
"0.75671387",
"0.7262019",
"0.7236685",
"0.6981943",
"0.68482816",
"0.6823797",
"0.6816983",
"0.6760523",
"0.66808796",
"0.6487473",
"0.64819694",
"0.64758486",
"0.6460304",
"0.63035595",
"0.62998134",
"0.62797904",
"0.6267597",
"0.62645566",
"0.62511766",
"0.62149465",
"0.6169211",
"0.6167921",
"0.61407125",
"0.60860395",
"0.60840124",
"0.60833603",
"0.6072973",
"0.6062129",
"0.60045964",
"0.59496784"
]
| 0.77484167 | 0 |
Disconnect user using Facebook | def fbdisconnect():
facebook_id = login_session['facebook_id']
url = 'https://graph.facebook.com/%s/permissions' % facebook_id
h = httplib2.Http()
result = h.request(url, 'DELETE')[1]
del login_session['facebook_id']
return "you have been logged out" | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fbdisconnect():\n\n facebook_id = login_session['facebook_id']\n # The access token must me included to successfully logout\n access_token = login_session['access_token']\n # Only disconnect a connected user.\n if access_token is None:\n response = make_response(json.dumps('Current user not connected.'),\n 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n if login_session['provider'] == 'google':\n gdisconnect()\n del login_session['gplus_id']\n\n url = 'https://graph.facebook.com/%s/permissions?access_token=%s' % (\n facebook_id, access_token)\n h = httplib2.Http()\n result = h.request(url, 'DELETE')[1]\n return \"you have been logged out\"",
"def gdisconnect():\n\n # Only disconnect a connected user.\n access_token = login_session.get('access_token')\n if access_token is None:\n response = make_response(json.dumps('Current user not connected.'),\n 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n if login_session['provider'] == 'facebook':\n fbdisconnect()\n del login_session['facebook_id']\n\n url = 'https://accounts.google.com/o/oauth2/revoke?token=%s' % access_token\n h = httplib2.Http()\n result = h.request(url, 'GET')[0]\n if result['status'] == '200':\n response = make_response(json.dumps('Successfully disconnected.'), 200)\n response.headers['Content-Type'] = 'application/json'\n return response\n else:\n response = make_response(\n json.dumps('Failed to revoke token for given user.', 400))\n response.headers['Content-Type'] = 'application/json'\n return response",
"def disconnect():\n\n if 'provider' in login_session:\n if login_session['provider'] == 'google':\n gdisconnect()\n del login_session['gplus_id']\n if login_session['provider'] == 'facebook':\n fbdisconnect()\n del login_session['facebook_id']\n del login_session['access_token']\n del login_session['username']\n del login_session['email']\n del login_session['picture']\n del login_session['user_id']\n del login_session['provider']\n flash(\"You have successfully been logged out.\")\n return redirect(url_for('showCategories'))\n else:\n flash(\"You were not logged in\")\n return redirect(url_for('showCategories'))",
"def fb_deauth(self, request):\n signed_request = request.data.get('signed_request')\n if signed_request:\n parsed_signed_request = facebook_controller.parse_signed_request(signed_request)\n facebook_user_id = parsed_signed_request.get('user_id')\n if facebook_user_id:\n facebook_controller.delete_linked_facebook_account(facebook_user_id)\n return Response('OK')",
"def disconnect():\n if 'provider' in login_session:\n if login_session['provider'] == 'google':\n gdisconnect()\n del login_session['gplus_id']\n del login_session['access_token']\n del login_session['username']\n del login_session['email']\n del login_session['picture']\n del login_session['user_id']\n del login_session['provider']\n flash(\"You have successfully been logged out.\")\n return redirect(url_for('showCategories'))\n else:\n flash(\"You were not logged in\")\n return redirect(url_for('showCategories'))",
"def gdisconnect():\n try:\n access_token = login_session['credentials']\n except KeyError:\n flash('Failed to get access token')\n return redirect(url_for('home'))\n print(\"User's name was {}.\".format(login_session['name']))\n if access_token is None:\n print('Access Token is None')\n response = make_response(\n json.dumps('Current user not connected.'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n del login_session['credentials']\n del login_session['user_id']\n del login_session['name']\n del login_session['email']\n print('Successfully logged out.')\n flash('Successfully logged out.')\n return redirect(url_for('home'))",
"def gdisconnect():\r\n # only disconnect a connected user\r\n credentials = login_session.get('credentials')\r\n if credentials is None:\r\n response = make_response(json.dumps(\r\n 'Current user not connected.'), 401)\r\n response.headers['Content-Type'] = 'application/json'\r\n return response\r\n # Execute HTTP GET request to revoke current token\r\n access_token = credentials.access_token\r\n url = 'https://accounts.google.com/o/oauth2/revoke?token=%s' % access_token\r\n h = httplib2.Http()\r\n result = h.request(url, 'GET')[0]\r\n if result['status'] == '200':\r\n del login_session['access_token']\r\n del login_session['gplus_id']\r\n del login_session['username']\r\n del login_session['email']\r\n del login_session['picture']\r\n response = make_response(json.dumps('Successfully disconnected.'), 200)\r\n response.headers['Content-Type'] = 'application/json'\r\n return response\r\n else:\r\n response = make_response(json.dumps(\r\n 'Failed to revoke token for given user.', 400))\r\n response.headers['Content-Type'] = 'application/json'\r\n return response",
"def disconnect(self, login_session):\n\n # Only disconnect a connected user.\n credentials = login_session.get('credentials')\n\n if 'gplus_id' in login_session:\n del login_session['gplus_id']\n if 'credentials' in login_session:\n del login_session['credentials']\n\n if credentials is None:\n response = make_response(\n json.dumps('Current user not connected.'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n access_token = credentials.access_token\n url = 'https://accounts.google.com/o/oauth2/revoke?token=%s' % access_token\n h = httplib2.Http()\n result = h.request(url, 'GET')[0]\n if result['status'] != '200':\n # For whatever reason, the given token was invalid.\n response = make_response(\n json.dumps('Failed to revoke token for given user.', 400))\n response.headers['Content-Type'] = 'application/json'\n return response\n else:\n return \"You have been logged out.\"",
"def disconnect():\n\n\tglob.tokens.deleteToken(glob.tokens.getTokenFromUserID(999))",
"def gdisconnect():\n # Only disconnect a connected user.\n access_token = login_session.get('access_token')\n if access_token is None:\n print 'Access Token is None'\n response = make_response(json.dumps('Current user not \\\n connected.'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n print 'In gdisconnect access token is %s', access_token\n print 'User name is: '\n print login_session['username']\n url = 'https://accounts.google.com/o/oauth2/revoke?token=%s' \\\n % login_session['access_token']\n h = httplib2.Http()\n result = h.request(url, 'GET')[0]\n print 'result is '\n print result\n if result['status'] == '200':\n del login_session['access_token']\n del login_session['gplus_id']\n del login_session['username']\n del login_session['email']\n del login_session['picture']\n response = make_response(json.dumps('Successfully disconnected.'), 200)\n response.headers['Content-Type'] = 'application/json'\n return response\n else:\n response = make_response(json.dumps\n ('Failed to revoke token for given user.',\n 400))\n response.headers['Content-Type'] = 'application/json'\n return response",
"def gdisconnect():\n\n # Only disconnect a connected user.\n access_token = login_session.get('access_token')\n if access_token is None:\n response = make_response(\n json.dumps('Current user not connected.'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n url = 'https://accounts.google.com/o/oauth2/revoke?token=%s' % access_token\n h = httplib2.Http()\n result = h.request(url, 'GET')[0]\n\n if result['status'] == '200':\n # Reset the user's sesson.\n del login_session['access_token']\n del login_session['gplus_id']\n del login_session['username']\n del login_session['email']\n\n return redirect(url_for('showSports'))\n else:\n # For whatever reason, the given token was invalid.\n response = make_response(\n json.dumps('Failed to revoke token for given user.', 400))\n response.headers['Content-Type'] = 'application/json'\n return response",
"def gdisconnect():\r\n access_token = login_session.get('access_token')\r\n if access_token is None:\r\n print('Access Token is None')\r\n flash('Current user not connected.')\r\n return redirect(url_for('showCategories'))\r\n # print('Got access token for the user: {}'.\r\n # format(login_session['username']))\r\n url = 'https://accounts.google.com/o/oauth2/revoke?token=%s' %\\\r\n login_session['access_token']\r\n h = httplib2.Http()\r\n result = h.request(url, 'GET')[0]\r\n # print('Access token revoke result:{}'.format(result))\r\n if result['status'] == '200':\r\n del login_session['access_token']\r\n del login_session['gplus_id']\r\n del login_session['username']\r\n del login_session['email']\r\n del login_session['picture']\r\n flash('Successfully logged out')\r\n return redirect(url_for('showCategories'))\r\n else:\r\n flash('Failed to revoke token for given user.')\r\n return redirect(url_for('showCategories'))",
"def disconnect():\n\n # Only disconnect a connected user.\n credentials = session.get('credentials')\n if credentials is None:\n response = make_response(json.dumps('Current user not connected.'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Execute HTTP GET request to revoke current token.\n access_token = credentials.access_token\n url = 'https://accounts.google.com/o/oauth2/revoke?token=%s' % access_token\n h = httplib2.Http()\n result = h.request(url, 'GET')[0]\n\n if result['status'] == '200':\n # Reset the user's session.\n del session['credentials']\n response = make_response(json.dumps('Successfully disconnected.'), 200)\n response.headers['Content-Type'] = 'application/json'\n return response\n else:\n # For whatever reason, the given token was invalid.\n response = make_response(\n json.dumps('Failed to revoke token for given user.', 400))\n response.headers['Content-Type'] = 'application/json'\n return response",
"def gdisconnect():\n access_token = login_session['access_token']\n print 'In gdisconnect access token is %s', access_token\n print 'User name is: '\n print login_session['username']\n if access_token is None:\n print 'Access Token is None'\n response = make_response(\n json.dumps('Current user not connected.'),\n 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n url = 'https://accounts.google.com/o/oauth2/revoke?token=%s' \\\n % login_session['access_token']\n h = httplib2.Http()\n result = h.request(url, 'GET')[0]\n print 'result is '\n print result\n if result['status'] == '200':\n del login_session['access_token']\n del login_session['gplus_id']\n del login_session['username']\n del login_session['email']\n del login_session['picture']\n flash('Successfully disconnected.', 'alert-success')\n return redirect(url_for('showStyles'))\n else:\n response = make_response(json.dumps(\n 'Failed to revoke token for given user.',\n 400))\n response.headers['Content-Type'] = 'application/json'\n return response",
"def gdisconnect():\n # Verify that the nonce received is valid.\n if request.args.get('state') != login_session['state']:\n response = make_response(\n json.dumps({'error': 'Invalid state parameter'}), 401\n )\n response.headers['Content-Type'] = 'application/json'\n return response\n # Only disconnect a connected user.\n access_token = login_session.get('access_token')\n if access_token is None:\n response = make_response(\n json.dumps({'error': 'Current user not connected.'}), 404\n )\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Execute HTTP GET request to revoke current token.\n url = 'https://accounts.google.com/o/oauth2/revoke?token=%s' % access_token\n h = httplib2.Http()\n result = h.request(url, 'GET')[0]\n\n if result['status'] == '200':\n # Reset the user's session\n del login_session['provider']\n del login_session['access_token']\n del login_session['gplus_id']\n del login_session['username']\n del login_session['email']\n del login_session['picture']\n # Our response will include a new nonce.\n state = get_new_state()\n login_session['state'] = state\n response = make_response(\n json.dumps({'success': 'User disconnected', 'nonce': login_session['state']}), 200\n )\n response.headers['Content-Type'] = 'application/json'\n return response\n elif result['status'] == '400':\n del login_session['provider']\n del login_session['access_token']\n del login_session['gplus_id']\n del login_session['username']\n del login_session['email']\n del login_session['picture']\n # Our response will include a new nonce.\n state = get_new_state()\n login_session['state'] = state\n response = make_response(\n json.dumps({'success': 'User was already disconnected', 'nonce': login_session['state']}), 200\n )\n response.headers['Content-Type'] = 'application/json'\n return response\n else:\n response = make_response(\n json.dumps(\"Error: \"+result['status']), 401\n )\n response.headers['Content-Type'] = 'application/json'\n return response",
"def gdisconnect():\n\n access_token = login_session.get('access_token')\n if access_token is None:\n response = make_response(json.dumps(\n 'Current user not connected.'\n ), 401\n )\n response.headers['Content-Type'] = 'application/json'\n return response\n url = 'https://accounts.google.com/o/oauth2/revoke?token=%s'\\\n % login_session['access_token']\n h = httplib2.Http()\n result = h.request(url, 'GET')[0]\n if result['status'] == '200':\n del login_session['access_token']\n del login_session['gplus_id']\n # del login_session['username']\n # del login_session['email']\n # del login_session['picture']\n response = make_response(json.dumps('Successfully disconnected.'), 200)\n response.headers['Content-Type'] = 'application/json'\n return response\n else:\n response = make_response(json.dumps(\n 'Failed to revoke token for given user.', 400\n )\n )\n response.headers['Content-Type'] = 'application/json'\n return response",
"def gdisconnect():\n\taccess_token = session.get('access_token')\n\tuname = session.get('username')\n\n\tif not access_token:\n\t\tresponse = make_response(\n\t\t\tjson.dumps('Current user not connected.'), 401)\n\t\tresponse.headers['Content-Type'] = 'application/json'\n\t\treturn response\n\n\turl = 'https://accounts.google.com/o/oauth2/revoke?token=%s' % access_token\n\th = httplib2.Http()\n\tresult = h.request(url, 'GET')[0]\n\n\tif result['status'] != '200':\n\t\t# For whatever reason, the given token was invalid.\n\t\tresponse = make_response(\n\t\t\tjson.dumps('Failed to revoke token for given user.'), 400)\n\t\tresponse.headers['Content-Type'] = 'application/json'\n\t\t[session.pop(k, None) for k, _ in session.items()]\n\t\treturn response\n\t#Clearing out session data\n\t[session.pop(k, None) for k, _ in session.items()]\n\treturn redirect(request.referrer)",
"def gdisconnect():\n\n # Only disconnect the connected user.\n access_token = login_session.get('access_token')\n if access_token is None:\n response = make_response(\n json.dumps('Current user not connected.'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n url = 'https://accounts.google.com/o/oauth2/revoke?token=%s' % access_token\n h = httplib2.Http()\n result = h.request(url, 'GET')[0]\n\n if result['status'] == '200':\n response = make_response(json.dumps('Successfully disconnected.'), 200)\n response.headers['Content-Type'] = 'application/json'\n return response\n else:\n response = make_response(\n json.dumps('Failed to revoke token for given user.'), 400)\n response.headers['Content-Type'] = 'application/json'\n return response",
"def logout_with_google():\n access_token = login_session.get('access_token')\n if access_token is None:\n print 'Access Token is None'\n response = make_response(\n json.dumps('Current user not connected.'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n print 'In gdisconnect access token is %s', access_token\n print 'User name is: '\n print login_session['username']\n url = 'https://accounts.google.com/o/oauth2/revoke?token=%s' \\\n % login_session['access_token']\n h = httplib2.Http()\n result = h.request(url, 'GET')[0]\n print 'result is '\n print result\n if result['status'] == '200':\n del login_session['access_token']\n del login_session['gplus_id']\n del login_session['username']\n del login_session['user_id']\n del login_session['email']\n del login_session['picture']\n response = make_response(json.dumps('Successfully disconnected.'), 200)\n response.headers['Content-Type'] = 'application/json'\n return redirect(url_for('login'))\n else:\n response = make_response(\n json.dumps('Failed to revoke token for given user.', 400))\n response.headers['Content-Type'] = 'application/json'\n return response",
"def gdisconnect():\n\n if login_session['access_token'] is None:\n response = make_response(json.dumps('User does not login'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n url = (\n \"https://accounts.google.com/\"\n \"o/oauth2/revoke?token=%s\") % login_session['access_token']\n h = httplib2.Http()\n response = h.request(url, 'GET')\n result = response[0]\n content = response[1]\n if result['status'] == '200':\n del login_session['access_token']\n del login_session['gplus_id']\n del login_session['username']\n return redirect(url_for('home'))\n else:\n del login_session['access_token']\n del login_session['gplus_id']\n del login_session['username']\n response = make_response(json.dumps('Fail to logout', 400))\n response.headers['Content-Type'] = 'application/json'\n return response",
"def logout_user():\n pass",
"def disconnect(request, backend, association_id=None):\n backend = get_backend(backend, request, request.path)\n if not backend:\n return HttpResponseServerError('Incorrect authentication service')\n backend.disconnect(request.user, association_id)\n url = request.REQUEST.get(REDIRECT_FIELD_NAME, '') or \\\n DISCONNECT_REDIRECT_URL or \\\n DEFAULT_REDIRECT\n return HttpResponseRedirect(url)",
"def closeaccount(request):\n get_user_model().objects.get(username=request.user.get_username()).delete()\n return Response({}, status=status.HTTP_200_OK)",
"def _disconnect(remote, *args, **kwargs):\n if not current_user.is_authenticated:\n return current_app.login_manager.unauthorized()\n\n account = RemoteAccount.get(\n user_id=current_user.get_id(), client_id=remote.consumer_key\n )\n if account:\n external_id = account.extra_data.get(\"external_id\")\n\n if external_id:\n oauth_unlink_external_id(dict(id=external_id, method=\"cern_openid\"))\n\n with db.session.begin_nested():\n account.delete()\n\n disconnect_identity(g.identity)",
"def disconnect(self, code):\n try:\n if not self.scope['user'].is_authenticated:\n logger.error('User in not authenticated')\n self.close()\n\n user = Profile.objects.get(user=self.scope['user'])\n group_name = user.group_name\n\n self.channel_layer.group_discard(group_name, self.channel_name)\n except Exception as e:\n logger.error(e)",
"def user_logged_out(self, sender, request, user, **kwargs):",
"def disconnect_user(room: PublicChatRoom, user) -> bool:\n return room.disconnect_user(user)",
"def deleteSocialAuthentication(self, network):\n\t\turl = \"https://habitica.com/api/v3/user/auth/social/\" + network\n\t\treturn(deleteUrl(url, self.credentials))",
"def log_out_user(self):\n flask_login.logout_user()",
"async def logout(self):\n try:\n user = self.request.session.get(\"user\")\n chat = self.request.session.get(\"chat\")\n active_sockets = self.request.app.active_sockets\n active_sockets.get_chat(chat).del_user(user)\n\n self.request.session.pop(\"user\")\n self.request.user = None\n self.request.chat = None\n\n return {\n \"Type\": \"account\",\n \"Command\": \"logout\",\n \"Status\": \"success\"\n }\n except KeyError:\n return {\"Type\": \"account\", \"Command\": \"logout\", \"Status\": \"error\"}"
]
| [
"0.85809785",
"0.7544562",
"0.7427517",
"0.73637265",
"0.72514355",
"0.7159431",
"0.7095894",
"0.7086774",
"0.70851356",
"0.7000922",
"0.6953053",
"0.6939351",
"0.69161266",
"0.6914257",
"0.6896904",
"0.67927265",
"0.6780294",
"0.67707384",
"0.67109525",
"0.6651078",
"0.65444547",
"0.6535357",
"0.65337926",
"0.6417353",
"0.63847345",
"0.6317302",
"0.6310671",
"0.6260238",
"0.6247965",
"0.6245692"
]
| 0.86079866 | 0 |
Gera hash do arquivo | def gerar_hash(nome_arquivo):
m = hashlib.sha256()
arquivo = open(nome_arquivo,'rb').read()
m.update(arquivo)
hash_votos = m.digest()
open("hash_votos_cifrados.txt","w").write(hash_votos) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def hash_file(filename):\r\n\r\n # make a hash object\r\n h = hashlib.sha1()\r\n\r\n # open file for reading in binary mode\r\n with open(filename,'rb') as file:\r\n\r\n # loop till the end of the file\r\n chunk = 0\r\n while chunk != b'':\r\n # read only 1024 bytes at a time\r\n chunk = file.read(1024)\r\n h.update(chunk)\r\n\r\n # return the hex representation of digest\r\n return h.hexdigest()",
"def hash_of_file(path):\n with open(path, 'rb') as archive:\n sha = sha256()\n while True:\n data = archive.read(2 ** 20)\n if not data:\n break\n sha.update(data)\n return encoded_hash(sha)",
"def hash(self) -> bytes:",
"def hash_file(filename):\n # make a hash object\n h = hashlib.sha1()\n \n # open file for reading in binary mode\n with open(filename,'rb') as file:\n # loop till the end of the file\n chunk = 0\n while chunk != b'':\n # read only 1024 bytes at a time\n chunk = file.read(1024)\n h.update(chunk)\n # return the hex representation of digest\n return h.hexdigest()",
"def hash_file(filename):\n # make a hash object\n h = hashlib.sha1()\n \n # open file for reading in binary mode\n with open(filename,'rb') as file:\n # loop till the end of the file\n chunk = 0\n while chunk != b'':\n # read only 1024 bytes at a time\n chunk = file.read(1024)\n h.update(chunk)\n # return the hex representation of digest\n return h.hexdigest()",
"def _hash_file_content(self, path):\n hasher = hashlib.sha1()\n with open(path, 'rb') as file:\n buffer = file.read(self.hash_block_size)\n while len(buffer) > 0:\n hasher.update(buffer)\n buffer = file.read(self.hash_block_size)\n return hasher.hexdigest()",
"def hash_file(filename):\n # make a hash object\n h = hashlib.sha1()\n # open file for reading in binary mode\n with open(filename,'rb') as file:\n # loop till the end of the file\n chunk = 0\n while chunk != b'':\n # read only 1024 bytes at a time\n chunk = file.read(1024)\n h.update(chunk)\n # return the hex representation of digest\n return h.hexdigest()",
"def hash_file(file_to_hash):\n print(\"Hashing \" + file_to_hash + \"...\")\n hash_algorithm = hashlib.sha256()\n file = open(file_to_hash, 'rb')\n while True:\n contents = file.read(65536)\n if not contents:\n break\n hash_algorithm.update(contents)\n hash_str = hash_algorithm.hexdigest()\n return hash_str",
"def hash_file(filename):\n\n # make a hash object\n h = hashlib.sha256()\n\n # open file for reading in binary mode\n with open(filename,'rb') as file:\n\n # loop till the end of the file\n chunk = 0\n while chunk != b'':\n # read only 1024 bytes at a time\n chunk = file.read(1024)\n h.update(chunk)\n\n # return the hex representation of digest\n return h.hexdigest()",
"def computeHash(infile):\n f = open(infile, 'rb')\n buffer = f.read()\n f.close()\n return hashlib.sha1(buffer).hexdigest()",
"def file_hash(filepath: Path):\n hsh = hashlib.sha256()\n b = bytearray(128 * 1024)\n mv = memoryview(b)\n with Path(filepath).open(\"rb\", buffering=0) as f:\n for n in iter(lambda: f.readinto(mv), 0):\n hsh.update(mv[:n])\n return hsh.hexdigest()",
"def hashfile(file):\n\n hasher = hashlib.sha256()\n\n with open(file, 'rb') as afile:\n buf = afile.read(BLOCKSIZE)\n hasher.update(buf)\n\n return(hasher.hexdigest())",
"def _calculate_hash(self, file_object):\n hasher = self.hashlib()\n for chunk in self.iterchunks(file_object):\n hasher.update(chunk)\n return hasher.hexdigest()",
"def hashing(file,pp):\n\n def myhash(instring):\n # sdbm hash\n res = 0\n for t in instring:\n res = (ord(t) + (res<<6) + (res<<16) - res) % 2**32\n return res\n\n return hex(myhash(file.replace('\\\\','/')+\":\"+pp))",
"def hash_file(filename):\n\n # make a hash object\n h = hashlib.sha1()\n\n # open file for reading in binary mode\n with open(filename, 'rb') as file:\n # loop till the end of the file\n chunk = 0\n while chunk != b'':\n # read only 1024 bytes at a time\n chunk = file.read(1024)\n h.update(chunk)\n\n # return the hex representation of digest\n return h.hexdigest()",
"def hash_from_file(file_path):\r\n return hash_from_code(open(file_path, 'rb').read())",
"def hash(path):\n\n with open(path, 'r') as file:\n return hashlib.sha1(file.read()).hexdigest()",
"def gdsii_hash(filename, engine=None):\n with open(filename, 'rb') as fin:\n data = fin.read()\n contents = []\n start = pos = 0\n while pos < len(data):\n size, rec = struct.unpack('>HH', data[pos:pos + 4])\n if rec == 0x0502:\n start = pos + 28\n elif rec == 0x0700:\n contents.append(data[start:pos])\n pos += size\n h = hashlib.sha1() if engine is None else engine\n for x in sorted(contents):\n h.update(x)\n return h.hexdigest()",
"def hash_file(file_name):\n BLOCKSIZE = 65536\n hasher = hashlib.sha1()\n with open(file_name, 'rb') as afile:\n buf = afile.read(BLOCKSIZE)\n while len(buf) > 0:\n hasher.update(buf)\n buf = afile.read(BLOCKSIZE)\n return(hasher.hexdigest())",
"def hashFile(path: str) -> str:\n\tif not os.path.exists(path):\n\t\traise FileNotFoundError\n\n\thasher = hashlib.sha1()\n\tblock_sz = 8192\n\twith open(path, 'rb') as f:\n\t\tbuf = f.read(block_sz)\n\t\twhile len(buf) > 0:\n\t\t\thasher.update(buf)\n\t\t\tbuf = f.read(block_sz)\n\treturn str(hasher.hexdigest())",
"def get_hash(file_buffer):\n data = file_buffer.read()\n hasher = sha1()\n hasher.update(data)\n return hasher.hexdigest()",
"def _hash_file(fpath, algorithm='sha256', chunk_size=65535):\n if (algorithm == 'sha256') or (algorithm == 'auto' and len(hash) == 64):\n hasher = hashlib.sha256()\n else:\n hasher = hashlib.md5()\n\n with open(fpath, 'rb') as fpath_file:\n for chunk in iter(lambda: fpath_file.read(chunk_size), b''):\n hasher.update(chunk)\n\n return hasher.hexdigest()",
"def compute_hash(fileName):\n m = hashlib.sha1()\n try:\n fd = open(fileName,\"rb\")\n except IOError:\n print (\"Unable to open the file in readmode:\", fileName)\n return\n content = fd.readlines()\n fd.close()\n for eachLine in content:\n m.update(eachLine)\n return m.hexdigest()",
"def hash_file(method, path):\n f = open(path, \"rb\")\n h = method()\n while True:\n buf = f.read(BUFSIZE)\n if not buf:\n break\n h.update(buf)\n return h.hexdigest()",
"def hash(self):\n return self.audio_file.hash() + self.transcript_file.hash()",
"def _hash_file(fpath, algorithm='sha256', chunk_size=65535):\n if (algorithm is 'sha256') or (algorithm is 'auto' and len(hash) is 64):\n hasher = hashlib.sha256()\n else:\n hasher = hashlib.md5()\n\n with open(fpath, 'rb') as fpath_file:\n for chunk in iter(lambda: fpath_file.read(chunk_size), b''):\n hasher.update(chunk)\n\n return hasher.hexdigest()",
"def hashfile(self, afile):\n # encode_buffer = False\n\n buf = afile.read(self.blocksize)\n while buf:\n # Need to use string-escape for Python 2 non-unicode strings. For\n # Python 2 unicode strings and all Python 3 strings, we need to use\n # unicode-escape. The effect of them is the same.\n if isinstance(buf, str):\n buf = buf.encode('unicode-escape')\n\n self.hasher.update(buf)\n buf = afile.read(self.blocksize)\n return self.hasher.hexdigest()",
"def hashFile(filename):\n\tblocks = []\n\twith open(filename, 'rb') as f:\n\t\tblock = f.read(1024)\n\t\twhile block:\n\t\t\tblocks.append(block)\n\t\t\tblock = f.read(1024)\n\t\n\tprevHash = b''\n\tfor block in reversed(blocks):\n\t\thash = sha256(block + prevHash)\n\t\tprevHash = hash\n\treturn prevHash",
"def hash(self) -> str:\r\n ...",
"def get_file_hash(fname, hash_length):\n hash_sha = hashlib.sha256()\n with open(fname, 'rb') as infile:\n for chunk in iter(lambda: infile.read(4096), b''):\n hash_sha.update(chunk)\n hash_sha = hash_sha.hexdigest()\n hash_sha = int(hash_sha, 16) % (2 ** (4 * hash_length))\n return hex_encode(hash_sha, hash_length)"
]
| [
"0.74447024",
"0.7410846",
"0.7404686",
"0.73769",
"0.73769",
"0.7374401",
"0.7373445",
"0.73704106",
"0.7331975",
"0.7331069",
"0.7302976",
"0.73004663",
"0.7294757",
"0.7269015",
"0.719902",
"0.7176025",
"0.71684515",
"0.71463585",
"0.7139593",
"0.71361864",
"0.71261925",
"0.71216714",
"0.71133465",
"0.71064925",
"0.7103185",
"0.70876306",
"0.7073054",
"0.7070615",
"0.70672053",
"0.7065373"
]
| 0.82218 | 0 |
Gera a chave aes secreta | def gera_chave():
AES_tamanho_chave = 32
chave_aes_secreta = os.urandom(AES_tamanho_chave)
return chave_aes_secreta | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def cambiar_escena(self, escena):\n\t\t# Reemplazo directo\n\t\tself.escena = escena\n\t\t# Reiniciar la ventana con el tamaño de la nueva escena\n\t\tprint(\"Iniciando nuevo contexto OpenGL...\")\n\t\tv_ancho, v_alto = escena.tam\n\t\topciones = OPENGL | DOUBLEBUF\n\t\tif escena.pant_compl:\n\t\t\topciones |= FULLSCREEN\n\t\tpygame.display.set_mode((v_ancho, v_alto), opciones)\n\t\t# Título por defecto de la ventana\n\t\tpygame.display.set_caption(escena.nombre)\n\t\t# Reiniciar OpenGL\n\t\tself.gl_ini(v_ancho, v_alto)\n\t\t# Darle los datos del núcleo a la ventana\n\t\tself.escena.nucleo = self\n\t\tself.escena.eventos = self.mapa_eve\n\t\tglClearColor(*escena.color)\n\t\t# Ejecutar la lógica inicial de la escena\n\t\tprint(\"Iniciando escena...\")\n\t\tself.escena.logica_ini()",
"def artPuttyCtx(*args, accopacity: bool=False, activeListChangedProc: Union[AnyStr, bool]=\"\",\n afterStrokeCmd: Union[AnyStr, bool]=\"\", alphaclamp: Union[AnyStr, bool]=\"none\",\n alphaclamplower: Union[float, bool]=0.0, alphaclampupper: Union[float,\n bool]=1.0, attrSelected: Union[AnyStr, bool]=\"\", autosmooth: bool=False,\n beforeStrokeCmd: Union[AnyStr, bool]=\"\", brushStrength: Union[float, bool]=1.0,\n brushalignment: bool=True, brushfeedback: bool=True, clamp: Union[AnyStr,\n bool]=\"none\", clamplower: Union[float, bool]=0.0, clampupper: Union[float,\n bool]=1.0, clear: bool=True, collapsecvtol: Union[float, bool]=0.005,\n colorAlphaValue: Union[float, bool]=0.0, colorRGBAValue: Union[List[float,\n float, float, float], bool]=None, colorRGBValue: Union[List[float, float,\n float], bool]=None, colorRamp: Union[AnyStr, bool]=\"\", colorfeedback:\n bool=False, colorfeedbackOverride: bool=False, colorrangelower: Union[float,\n bool]=0.0, colorrangeupper: Union[float, bool]=1.0, dataTypeIndex: Union[int,\n bool]=0, disablelighting: bool=False, dispdecr: bool=True, dispincr: bool=True,\n dragSlider: AnyStr=\"\", duringStrokeCmd: Union[AnyStr, bool]=\"\", dynclonemode:\n bool=True, erasesrfupd: bool=True, exists: bool=True, expandfilename: bool=True,\n exportaspectratio: Union[float, bool]=0.0, exportfilemode: Union[AnyStr,\n bool]=\"luminance/rgb\", exportfilesave: AnyStr=\"\", exportfilesizex: Union[int,\n bool]=0, exportfilesizey: Union[int, bool]=0, exportfiletype: Union[AnyStr,\n bool]=\"\", filterNodes: bool=True, history: bool=True, image1: Union[AnyStr,\n bool]=\"\", image2: Union[AnyStr, bool]=\"\", image3: Union[AnyStr, bool]=\"\",\n importfileload: AnyStr=\"\", importfilemode: Union[AnyStr, bool]=\"alpha\",\n importreassign: bool=False, interactiveUpdate: bool=True, invertrefvector:\n bool=False, lastRecorderCmd: Union[AnyStr, bool]=\"\", lastStampName:\n Union[AnyStr, bool]=\"\", lowerradius: Union[float, bool]=0.0, makeStroke:\n Union[int, List[int], bool]=0, mappressure: Union[AnyStr, bool]=\"none\",\n maxdisp: Union[float, bool]=1.0, maxvalue: Union[float, bool]=1.0, minvalue:\n Union[float, bool]=0.0, mouldtypehead: Union[AnyStr, bool]=\"\", mouldtypemouse:\n Union[AnyStr, bool]=\"push\", mouldtypetail: Union[AnyStr, bool]=\"\", name:\n AnyStr=\"\", objattrArray: Union[AnyStr, bool]=\"\", opacity: Union[float,\n bool]=1.0, outline: bool=True, outwhilepaint: bool=False, paintNodeArray:\n Union[AnyStr, bool]=\"\", paintattrselected: AnyStr=\"\", paintmode: Union[AnyStr,\n bool]=\"screen\", paintoperationtype: Union[AnyStr, bool]=\"Paint\", pickColor:\n bool=True, pickValue: bool=True, playbackCursor: Union[List[float, float],\n List[List[float, float]], bool]=None, playbackPressure: Union[float,\n List[float], bool]=0.0, polecv: bool=True, preserveclonesource: bool=True,\n profileShapeFile: Union[AnyStr, bool]=\"\", projective: bool=False, radius:\n Union[float, bool]=1.0, rampMaxColor: Union[List[float, float, float],\n bool]=None, rampMinColor: Union[List[float, float, float], bool]=None, record:\n bool=True, reflection: bool=False, reflectionaboutorigin: bool=True,\n reflectionaxis: Union[AnyStr, bool]=\"x\", refsurface: bool=True, refvector:\n Union[AnyStr, bool]=\"normal\", refvectoru: Union[float, bool]=0.0, refvectorv:\n Union[float, bool]=0.0, screenRadius: Union[float, bool]=0.0,\n selectclonesource: bool=True, selectedattroper: Union[AnyStr, bool]=\"absolute\",\n showactive: bool=True, smoothiters: Union[int, bool]=3, stampDepth: Union[float,\n bool]=0.0, stampProfile: Union[AnyStr, bool]=\"\", stampSpacing: Union[float,\n bool]=1.0, stitchcorner: bool=True, stitchedgeflood: bool=True, stitchtype:\n Union[AnyStr, bool]=\"position\", strokesmooth: Union[AnyStr, bool]=\"\",\n surfaceConformedBrushVertices: bool=True, tablet: bool=True, tangentOutline:\n bool=True, toolOffProc: Union[AnyStr, bool]=\"\", toolOnProc: Union[AnyStr,\n bool]=\"\", updateerasesrf: bool=True, updaterefsrf: bool=True, useColorRamp:\n bool=True, useMaxMinColor: bool=True, usepressure: bool=False, value:\n Union[float, bool]=0.0, whichTool: Union[AnyStr, bool]=\"\", worldRadius:\n Union[float, bool]=0.0, q=True, query=True, e=True, edit=True,\n **kwargs)->Union[None, Any]:\n pass",
"def operate_cipher(self):",
"def artUserPaintCtx(*args, accopacity: bool=False, activeListChangedProc: Union[AnyStr,\n bool]=\"\", afterStrokeCmd: Union[AnyStr, bool]=\"\", alphaclamp: Union[AnyStr,\n bool]=\"none\", alphaclamplower: Union[float, bool]=0.0, alphaclampupper:\n Union[float, bool]=1.0, attrSelected: Union[AnyStr, bool]=\"\",\n beforeStrokeCmd: Union[AnyStr, bool]=\"\", brushalignment: bool=True,\n brushfeedback: bool=True, chunkCommand: Union[AnyStr, bool]=\"\", clamp:\n Union[AnyStr, bool]=\"none\", clamplower: Union[float, bool]=0.0, clampupper:\n Union[float, bool]=1.0, clear: bool=True, colorAlphaValue: Union[float,\n bool]=0.0, colorRGBAValue: Union[List[float, float, float, float],\n bool]=None, colorRGBValue: Union[List[float, float, float], bool]=None,\n colorRamp: Union[AnyStr, bool]=\"\", colorfeedback: bool=False,\n colorfeedbackOverride: bool=False, colorrangelower: Union[float, bool]=0.0,\n colorrangeupper: Union[float, bool]=1.0, dataTypeIndex: Union[int, bool]=0,\n disablelighting: bool=False, dragSlider: AnyStr=\"\", duringStrokeCmd:\n Union[AnyStr, bool]=\"\", dynclonemode: bool=True, exists: bool=True,\n expandfilename: bool=True, exportaspectratio: Union[float, bool]=0.0,\n exportfilemode: Union[AnyStr, bool]=\"luminance/rgb\", exportfilesave:\n AnyStr=\"\", exportfilesizex: Union[int, bool]=0, exportfilesizey: Union[int,\n bool]=0, exportfiletype: Union[AnyStr, bool]=\"\", filterNodes: bool=True,\n finalizeCmd: Union[AnyStr, bool]=\"\", fullpaths: bool=False,\n getArrayAttrCommand: Union[AnyStr, bool]=\"\", getSurfaceCommand:\n Union[AnyStr, bool]=\"\", getValueCommand: Union[AnyStr, bool]=\"\", history:\n bool=True, image1: Union[AnyStr, bool]=\"\", image2: Union[AnyStr, bool]=\"\",\n image3: Union[AnyStr, bool]=\"\", importfileload: AnyStr=\"\", importfilemode:\n Union[AnyStr, bool]=\"alpha\", importreassign: bool=False, initializeCmd:\n Union[AnyStr, bool]=\"\", interactiveUpdate: bool=True, lastRecorderCmd:\n Union[AnyStr, bool]=\"\", lastStampName: Union[AnyStr, bool]=\"\", lowerradius:\n Union[float, bool]=0.0, makeStroke: Union[int, List[int], bool]=0,\n mappressure: Union[AnyStr, bool]=\"none\", maxvalue: Union[float, bool]=1.0,\n minvalue: Union[float, bool]=0.0, name: AnyStr=\"\", objattrArray:\n Union[AnyStr, bool]=\"\", opacity: Union[float, bool]=1.0, outline: bool=True,\n outwhilepaint: bool=False, paintNodeArray: Union[AnyStr, bool]=\"\",\n paintattrselected: AnyStr=\"\", paintmode: Union[AnyStr, bool]=\"screen\",\n paintoperationtype: Union[AnyStr, bool]=\"Paint\", pickColor: bool=True,\n pickValue: bool=True, playbackCursor: Union[List[float, float],\n List[List[float, float]], bool]=None, playbackPressure: Union[float,\n List[float], bool]=0.0, preserveclonesource: bool=True, profileShapeFile:\n Union[AnyStr, bool]=\"\", projective: bool=False, radius: Union[float,\n bool]=1.0, rampMaxColor: Union[List[float, float, float], bool]=None,\n rampMinColor: Union[List[float, float, float], bool]=None, record:\n bool=True, reflection: bool=False, reflectionaboutorigin: bool=True,\n reflectionaxis: Union[AnyStr, bool]=\"x\", screenRadius: Union[float,\n bool]=0.0, selectclonesource: bool=True, selectedattroper: Union[AnyStr,\n bool]=\"absolute\", setArrayValueCommand: Union[AnyStr, bool]=\"\",\n setValueCommand: Union[AnyStr, bool]=\"\", showactive: bool=True, stampDepth:\n Union[float, bool]=0.0, stampProfile: Union[AnyStr, bool]=\"\", stampSpacing:\n Union[float, bool]=1.0, strokesmooth: Union[AnyStr, bool]=\"\",\n surfaceConformedBrushVertices: bool=True, tablet: bool=True,\n tangentOutline: bool=True, toolCleanupCmd: Union[AnyStr, bool]=\"\",\n toolOffProc: Union[AnyStr, bool]=\"\", toolOnProc: Union[AnyStr, bool]=\"\",\n toolSetupCmd: Union[AnyStr, bool]=\"\", useColorRamp: bool=True,\n useMaxMinColor: bool=True, usepressure: bool=False, value: Union[float,\n bool]=0.0, whichTool: Union[AnyStr, bool]=\"\", worldRadius: Union[float,\n bool]=0.0, q=True, query=True, e=True, edit=True, **kwargs)->Union[AnyStr,\n Any]:\n pass",
"def mode_crypt_cramershoup(stdscr, message=None):\n loop = True\n cursor = 0\n while loop:\n show_key_choices(stdscr, cursor, message)\n key = stdscr.getkey()\n loop = False\n cs = CramerShoup()\n if key == '1' or (key == '\\n' and cursor == 1):\n key_size = choose_keys_size(stdscr)# choose the size of key [256,512,1024]\n stdscr.clear()\n stdscr.addstr(\"Création des clés de chiffrement ...\\n\\n\")\n stdscr.refresh()\n cs.generate_keys(key_size)\n stdscr.addstr(\"Vos clés ont été générés dans keys/\\n\")\n stdscr.refresh()\n napms(2000)\n mode_crypt_cramershoup(stdscr, \"Les clés ont été générés\\n\")\n \n elif key == '2' or (key == '\\n' and cursor == 2):\n # chiffre avec la clé privé (la clé privé contient la clé publique)\n key_file_name = input_user(stdscr, \"Veuiller entrer l'enplacement de la clé public. Ctrl + G pour confirmer\")\n try:\n cs.read_key(key_file_name)\n except IOError:\n # cannot open the file\n mode_crypt_cramershoup(stdscr, \"Impossible de lire la clé dans le fichier {}\".format(key_file_name))\n return\n file_name = input_user(stdscr, \"Clé chargé avec succès.\\n Veuillez entrer le nom du fichier à chiffrer\")\n try:\n file = open(file_name)\n file.close()\n except IOError:\n mode_crypt_cramershoup(stdscr, \"Impossible d'ouvrir le fichier {}\".format(file_name))\n return\n # si le fichier est un pgm, on laisse le choix à l'utilisateur\n pgm = False\n if re.match('.+\\.pgm.*', file_name) is not None:\n pgm = choix_mode_PGM(stdscr)\n \n # on chiffre le fichier\n stdscr.clear()\n stdscr.addstr(\"En cours de chiffrement ...\\n\")\n stdscr.refresh()\n wrap = None\n if pgm:\n wrap = PGMEncrypter(file_name, cs, cs.bit_size//(2*8), file_name + \".crypted\", 4*cs.bit_size//8)\n else:\n wrap = BlockFileEncrypter(file_name, cs, cs.bit_size//(2*8), file_name + \".crypted\", 4*cs.bit_size//8)\n wrap.crypt_to_out()\n stdscr.addstr(\"Votre fichier {} a été chiffré :) !\".format(file_name), curses.color_pair(3))\n stdscr.refresh()\n napms(1000)\n menu(stdscr)\n elif key == 'm' or (key == '\\n' and cursor == 3):\n menu(stdscr)\n elif key == 'KEY_UP' and cursor > 1:\n cursor -= 1\n loop = True\n elif key == 'KEY_DOWN' and cursor < 3:\n cursor += 1\n loop = True\n else:\n loop = True",
"def cipher_feedback(self):",
"def encrypt_data ( aes_key, data ) :\n salt = Crypto.Random.new( ).read( Crypto.Cipher.AES.block_size )\n cipher = Crypto.Cipher.AES.new( aes_key, Crypto.Cipher.AES.MODE_CFB, salt )\n encrypted_data = cipher.encrypt( data )\n\n return encode_data( salt + encrypted_data )",
"def artSetPaintCtx(*args, accopacity: bool=False, afterStrokeCmd: Union[AnyStr, bool]=\"\",\n beforeStrokeCmd: Union[AnyStr, bool]=\"\", brushalignment: bool=True,\n brushfeedback: bool=True, clear: bool=True, dragSlider: AnyStr=\"\",\n dynclonemode: bool=True, exists: bool=True, expandfilename: bool=True,\n exportaspectratio: Union[float, bool]=0.0, exportfilemode: Union[AnyStr,\n bool]=\"luminance/rgb\", exportfilesave: AnyStr=\"\", exportfilesizex: Union[int,\n bool]=0, exportfilesizey: Union[int, bool]=0, exportfiletype: Union[AnyStr,\n bool]=\"\", history: bool=True, image1: Union[AnyStr, bool]=\"\", image2:\n Union[AnyStr, bool]=\"\", image3: Union[AnyStr, bool]=\"\", importfileload:\n AnyStr=\"\", importfilemode: Union[AnyStr, bool]=\"alpha\", importreassign:\n bool=False, lastRecorderCmd: Union[AnyStr, bool]=\"\", lastStampName:\n Union[AnyStr, bool]=\"\", lowerradius: Union[float, bool]=0.0, makeStroke:\n Union[int, List[int], bool]=0, mappressure: Union[AnyStr, bool]=\"none\",\n name: AnyStr=\"\", objectsetnames: Union[AnyStr, bool]=\"\", opacity:\n Union[float, bool]=1.0, outline: bool=True, outwhilepaint: bool=False,\n paintmode: Union[AnyStr, bool]=\"screen\", paintoperationtype: Union[AnyStr,\n bool]=\"Paint\", pickColor: bool=True, pickValue: bool=True, playbackCursor:\n Union[List[float, float], List[List[float, float]], bool]=None,\n playbackPressure: Union[float, List[float], bool]=0.0, preserveclonesource:\n bool=True, profileShapeFile: Union[AnyStr, bool]=\"\", projective: bool=False,\n radius: Union[float, bool]=1.0, record: bool=True, reflection: bool=False,\n reflectionaboutorigin: bool=True, reflectionaxis: Union[AnyStr, bool]=\"x\",\n screenRadius: Union[float, bool]=0.0, selectclonesource: bool=True,\n setcolorfeedback: bool=True, setdisplaycvs: bool=True, setopertype:\n Union[AnyStr, bool]=\"add\", settomodify: Union[AnyStr, bool]=\"\", showactive:\n bool=True, stampDepth: Union[float, bool]=0.0, stampProfile: Union[AnyStr,\n bool]=\"\", stampSpacing: Union[float, bool]=1.0, strokesmooth: Union[AnyStr,\n bool]=\"\", surfaceConformedBrushVertices: bool=True, tablet: bool=True,\n tangentOutline: bool=True, usepressure: bool=False, worldRadius: Union[float,\n bool]=0.0, q=True, query=True, e=True, edit=True, **kwargs)->Union[None,\n Any]:\n pass",
"def aes_encrypt(self, buff):\n start = time.time()\n message = buff.encode()\n raw = pad(message)\n cipher = AES.new(\"DESCRYPTDESCRYPT\", AES.MODE_CBC, iv())\n enc = cipher.encrypt(raw)\n end = time.time()\n print \"Encrypt time: {0:.10f}\".format((end - start))\n return base64.b64encode(enc).decode('utf-8')",
"def cx():",
"def aes(encrypt, key, data):\n cipher = AES.new(key, AES.MODE_CBC, get_zero_vector(16))\n if encrypt:\n return cipher.encrypt(data)\n else:\n return cipher.decrypt(data)",
"def artAttrPaintVertexCtx(*args, accopacity: bool=False, activeListChangedProc: Union[AnyStr,\n bool]=\"\", afterStrokeCmd: Union[AnyStr, bool]=\"\", alphaclamp:\n Union[AnyStr, bool]=\"none\", alphaclamplower: Union[float, bool]=0.0,\n alphaclampupper: Union[float, bool]=1.0, attrSelected: Union[AnyStr,\n bool]=\"\", beforeStrokeCmd: Union[AnyStr, bool]=\"\", brushalignment:\n bool=True, brushfeedback: bool=True, clamp: Union[AnyStr,\n bool]=\"none\", clamplower: Union[float, bool]=0.0, clampupper:\n Union[float, bool]=1.0, clear: bool=True, colorAlphaValue:\n Union[float, bool]=0.0, colorRGBAValue: Union[List[float, float,\n float, float], bool]=None, colorRGBValue: Union[List[float, float,\n float], bool]=None, colorRamp: Union[AnyStr, bool]=\"\", colorfeedback:\n bool=False, colorfeedbackOverride: bool=False, colorrangelower:\n Union[float, bool]=0.0, colorrangeupper: Union[float, bool]=1.0,\n dataTypeIndex: Union[int, bool]=0, disablelighting: bool=False,\n dragSlider: AnyStr=\"\", duringStrokeCmd: Union[AnyStr, bool]=\"\",\n dynclonemode: bool=True, exists: bool=True, expandfilename: bool=True,\n exportaspectratio: Union[float, bool]=0.0, exportfilemode:\n Union[AnyStr, bool]=\"luminance/rgb\", exportfilesave: AnyStr=\"\",\n exportfilesizex: Union[int, bool]=0, exportfilesizey: Union[int,\n bool]=0, exportfiletype: Union[AnyStr, bool]=\"\", filterNodes:\n bool=True, history: bool=True, image1: Union[AnyStr, bool]=\"\",\n image2: Union[AnyStr, bool]=\"\", image3: Union[AnyStr, bool]=\"\",\n importfileload: AnyStr=\"\", importfilemode: Union[AnyStr,\n bool]=\"alpha\", importreassign: bool=False, interactiveUpdate:\n bool=True, lastRecorderCmd: Union[AnyStr, bool]=\"\", lastStampName:\n Union[AnyStr, bool]=\"\", lowerradius: Union[float, bool]=0.0,\n makeStroke: Union[int, List[int], bool]=0, mappressure: Union[AnyStr,\n bool]=\"none\", maxvalue: Union[float, bool]=1.0, minvalue: Union[float,\n bool]=0.0, name: AnyStr=\"\", objattrArray: Union[AnyStr, bool]=\"\",\n opacity: Union[float, bool]=1.0, outline: bool=True, outwhilepaint:\n bool=False, paintComponent: Union[int, bool]=0, paintNodeArray:\n Union[AnyStr, bool]=\"\", paintNumChannels: Union[int, bool]=0,\n paintRGBA: bool=False, paintVertexFace: bool=False,\n paintattrselected: AnyStr=\"\", paintmode: Union[AnyStr, bool]=\"screen\",\n paintoperationtype: Union[AnyStr, bool]=\"Paint\", pickColor: bool=True,\n pickValue: bool=True, playbackCursor: Union[List[float, float],\n List[List[float, float]], bool]=None, playbackPressure: Union[float,\n List[float], bool]=0.0, preserveclonesource: bool=True,\n profileShapeFile: Union[AnyStr, bool]=\"\", projective: bool=False,\n radius: Union[float, bool]=1.0, rampMaxColor: Union[List[float, float,\n float], bool]=None, rampMinColor: Union[List[float, float, float],\n bool]=None, record: bool=True, reflection: bool=False,\n reflectionaboutorigin: bool=True, reflectionaxis: Union[AnyStr,\n bool]=\"x\", screenRadius: Union[float, bool]=0.0, selectclonesource:\n bool=True, selectedattroper: Union[AnyStr, bool]=\"absolute\",\n showactive: bool=True, stampDepth: Union[float, bool]=0.0,\n stampProfile: Union[AnyStr, bool]=\"\", stampSpacing: Union[float,\n bool]=1.0, strokesmooth: Union[AnyStr, bool]=\"\",\n surfaceConformedBrushVertices: bool=True, tablet: bool=True,\n tangentOutline: bool=True, toolOffProc: Union[AnyStr, bool]=\"\",\n toolOnProc: Union[AnyStr, bool]=\"\", useColorRamp: bool=True,\n useMaxMinColor: bool=True, usepressure: bool=False, value:\n Union[float, bool]=0.0, vertexColorRange: bool=False,\n vertexColorRangeLower: Union[float, bool]=0.0, vertexColorRangeUpper:\n Union[float, bool]=1.0, whichTool: Union[AnyStr, bool]=\"\",\n worldRadius: Union[float, bool]=0.0, q=True, query=True, e=True,\n edit=True, **kwargs)->Union[None, Any]:\n pass",
"def savekey(comp):\n with open('clave.key', mode='w') as key:\n key.write(str(comp[0])+'\\n'+str(comp[2]))\n\n with open('clave.cr', mode='w') as pub:\n pub.write(str(comp[1])+'\\n'+str(comp[2]))",
"def aes_encrypt(mode, aes_key, aes_iv, *data):\n encryptor = Cipher(\n algorithms.AES(aes_key), mode(aes_iv), backend=default_backend()\n ).encryptor()\n\n result = None\n for value in data:\n result = encryptor.update(value)\n encryptor.finalize()\n\n return result, None if not hasattr(encryptor, \"tag\") else encryptor.tag",
"def val_start(self):\n self.img_enc.eval()\n self.txt_enc.eval()",
"def new(key,mode=MODE_ECB,IV=None,counter=None,segment_size=None):\n return AES(key,mode,IV,counter,segment_size)",
"def perform_aes_algorithm(plaintext, key):\n if len(key) == 32:\n print('C.1 AES-128 (Nk=4, Nr=10)\\n')\n elif len(key) == 48:\n print('\\nC.2 AES-192 (Nk=6, Nr=12)\\n')\n else:\n print('\\nC.3 AES-256 (Nk=8, Nr=14)\\n')\n\n print('{:<19} {:}'.format('PLAINTEXT:', plaintext))\n print('{:<19} {:}\\n'.format('KEY:', key))\n\n print('CIPHER (ENCRYPT):')\n ciphertext = encrypt(plaintext, key, verbose=True)\n\n print('\\nINVERSE CIPHER (DECRYPT):')\n decrypt(ciphertext, key, verbose=True)",
"def aes_encrypt(data, key):\r\n cipher = aes_cipher_from_key(key)\r\n padded_data = pad(data)\r\n return cipher.encrypt(padded_data)",
"def encrypt_aes(msg, key, iv):\r\n #start timer\r\n start = timeit.default_timer()\r\n\r\n #converting key to bytes from hex\r\n key = bytes.fromhex(key)\r\n msg = pad(msg)\r\n obj = AES.new(key, AES.MODE_CBC, iv)\r\n ciphertxt = obj.encrypt(msg)#ciphertxt will be in 'bytes'\r\n\r\n #converting ciphertxt into hexadecimal\r\n ciphertxt = ciphertxt.hex()\r\n\r\n print(\"Ciper is: \",ciphertxt)\r\n\r\n #stop timer\r\n stop = timeit.default_timer()\r\n print('Encryption Running Time: ', stop-start)\r\n \r\n return ciphertxt",
"def renderizar(self):\n\t\t# Limpiar la pantalla\n\t\tglClear(GL_COLOR_BUFFER_BIT)\n\t\t# Renderizar la escena\n\t\tself.escena.renderizar()\n\t\t# Renderizar los buffers a la pantalla\n\t\tpygame.display.flip()",
"def savecomp(comp):\n with open('componentes.gcl', mode='w') as key:\n key.write(str(comp[3])+'\\n'+str(comp[4])+'\\n'+str(comp[5]))",
"def cooked_mode(self) -> ContextManager[None]:",
"def artAttrCtx(*args, accopacity: bool=False, activeListChangedProc: Union[AnyStr, bool]=\"\",\n afterStrokeCmd: Union[AnyStr, bool]=\"\", alphaclamp: Union[AnyStr, bool]=\"none\",\n alphaclamplower: Union[float, bool]=0.0, alphaclampupper: Union[float, bool]=1.0,\n attrSelected: Union[AnyStr, bool]=\"\", beforeStrokeCmd: Union[AnyStr, bool]=\"\",\n brushalignment: bool=True, brushfeedback: bool=True, clamp: Union[AnyStr,\n bool]=\"none\", clamplower: Union[float, bool]=0.0, clampupper: Union[float,\n bool]=1.0, clear: bool=True, colorAlphaValue: Union[float, bool]=0.0,\n colorRGBAValue: Union[List[float, float, float, float], bool]=None,\n colorRGBValue: Union[List[float, float, float], bool]=None, colorRamp:\n Union[AnyStr, bool]=\"\", colorfeedback: bool=False, colorfeedbackOverride:\n bool=False, colorrangelower: Union[float, bool]=0.0, colorrangeupper:\n Union[float, bool]=1.0, dataTypeIndex: Union[int, bool]=0, disablelighting:\n bool=False, dragSlider: AnyStr=\"\", duringStrokeCmd: Union[AnyStr, bool]=\"\",\n dynclonemode: bool=True, exists: bool=True, expandfilename: bool=True,\n exportaspectratio: Union[float, bool]=0.0, exportfilemode: Union[AnyStr,\n bool]=\"luminance/rgb\", exportfilesave: AnyStr=\"\", exportfilesizex: Union[int,\n bool]=0, exportfilesizey: Union[int, bool]=0, exportfiletype: Union[AnyStr,\n bool]=\"\", filterNodes: bool=True, history: bool=True, image1: Union[AnyStr,\n bool]=\"\", image2: Union[AnyStr, bool]=\"\", image3: Union[AnyStr, bool]=\"\",\n importfileload: AnyStr=\"\", importfilemode: Union[AnyStr, bool]=\"alpha\",\n importreassign: bool=False, interactiveUpdate: bool=True, lastRecorderCmd:\n Union[AnyStr, bool]=\"\", lastStampName: Union[AnyStr, bool]=\"\", lowerradius:\n Union[float, bool]=0.0, makeStroke: Union[int, List[int], bool]=0, mappressure:\n Union[AnyStr, bool]=\"none\", maxvalue: Union[float, bool]=1.0, minvalue:\n Union[float, bool]=0.0, name: AnyStr=\"\", objattrArray: Union[AnyStr, bool]=\"\",\n opacity: Union[float, bool]=1.0, outline: bool=True, outwhilepaint: bool=False,\n paintNodeArray: Union[AnyStr, bool]=\"\", paintattrselected: AnyStr=\"\", paintmode:\n Union[AnyStr, bool]=\"screen\", paintoperationtype: Union[AnyStr, bool]=\"Paint\",\n pickColor: bool=True, pickValue: bool=True, playbackCursor: Union[List[float,\n float], List[List[float, float]], bool]=None, playbackPressure: Union[float,\n List[float], bool]=0.0, preserveclonesource: bool=True, profileShapeFile:\n Union[AnyStr, bool]=\"\", projective: bool=False, radius: Union[float, bool]=1.0,\n rampMaxColor: Union[List[float, float, float], bool]=None, rampMinColor:\n Union[List[float, float, float], bool]=None, record: bool=True, reflection:\n bool=False, reflectionaboutorigin: bool=True, reflectionaxis: Union[AnyStr,\n bool]=\"x\", screenRadius: Union[float, bool]=0.0, selectclonesource: bool=True,\n selectedattroper: Union[AnyStr, bool]=\"absolute\", showactive: bool=True,\n stampDepth: Union[float, bool]=0.0, stampProfile: Union[AnyStr, bool]=\"\",\n stampSpacing: Union[float, bool]=1.0, strokesmooth: Union[AnyStr, bool]=\"\",\n surfaceConformedBrushVertices: bool=True, tablet: bool=True, tangentOutline:\n bool=True, toolOffProc: Union[AnyStr, bool]=\"\", toolOnProc: Union[AnyStr,\n bool]=\"\", useColorRamp: bool=True, useMaxMinColor: bool=True, usepressure:\n bool=False, value: Union[float, bool]=0.0, whichTool: Union[AnyStr, bool]=\"\",\n worldRadius: Union[float, bool]=0.0, q=True, query=True, e=True, edit=True,\n **kwargs)->Union[AnyStr, Any]:\n pass",
"def greasePencilCtx(*args, autoCreateFrames: bool=True, canDraw: bool=True, createOrEditFrame:\n Union[int, bool]=0, exists: bool=True, exportArchive: List[AnyStr,\n AnyStr]=None, fileTextureSize: Union[int, bool]=0, greasePencilType:\n Union[int, bool]=0, image1: Union[AnyStr, bool]=\"\", image2: Union[AnyStr,\n bool]=\"\", image3: Union[AnyStr, bool]=\"\", importArchive: AnyStr=\"\",\n makeStroke: Union[int, List[int], bool]=0, removeFrame: int=0,\n resetBrushes: bool=True, rgbcolor: Union[List[float, float, float],\n bool]=None, sequenceNodeName: Union[AnyStr, bool]=\"\", q=True, query=True,\n e=True, edit=True, **kwargs)->Union[None, Any]:\n pass",
"def aes_ecb(key):\n return AES.new(key, AES.MODE_ECB)",
"def curveEditorCtx(*args, direction: Union[int, bool]=0, exists: bool=True, history: bool=True,\n image1: Union[AnyStr, bool]=\"\", image2: Union[AnyStr, bool]=\"\", image3:\n Union[AnyStr, bool]=\"\", name: AnyStr=\"\", relativeTangentSize: Union[float,\n bool]=4, title: Union[AnyStr, bool]=\"\", q=True, query=True, e=True,\n edit=True, **kwargs)->Union[AnyStr, Any]:\n pass",
"def aes_key_wrap(self, kek: bytes, key_to_wrap: bytes) -> bytes:\n return keywrap.aes_key_wrap(kek, key_to_wrap, default_backend())",
"def __init__(self, width, height):\r\n super().__init__(width, height)\r\n \r\n self.file = open(\"drawing.c\", \"w\")\r\n self.held_keys = set()\r\n arcade.set_background_color(arcade.color.WHITE)\r\n self.cursor = [Cursor()]\r\n self.record = []",
"def artAttrSkinPaintCtx(*args, accopacity: bool=False, activeListChangedProc: Union[AnyStr,\n bool]=\"\", afterStrokeCmd: Union[AnyStr, bool]=\"\", alphaclamp:\n Union[AnyStr, bool]=\"none\", alphaclamplower: Union[float, bool]=0.0,\n alphaclampupper: Union[float, bool]=1.0, attrSelected: Union[AnyStr,\n bool]=\"\", beforeStrokeCmd: Union[AnyStr, bool]=\"\", brushalignment:\n bool=True, brushfeedback: bool=True, clamp: Union[AnyStr, bool]=\"none\",\n clamplower: Union[float, bool]=0.0, clampupper: Union[float, bool]=1.0,\n clear: bool=True, colorAlphaValue: Union[float, bool]=0.0,\n colorRGBAValue: Union[List[float, float, float, float], bool]=None,\n colorRGBValue: Union[List[float, float, float], bool]=None, colorRamp:\n Union[AnyStr, bool]=\"\", colorfeedback: bool=False,\n colorfeedbackOverride: bool=False, colorrangelower: Union[float,\n bool]=0.0, colorrangeupper: Union[float, bool]=1.0, dataTypeIndex:\n Union[int, bool]=0, disablelighting: bool=False, dragSlider: AnyStr=\"\",\n duringStrokeCmd: Union[AnyStr, bool]=\"\", dynclonemode: bool=True,\n exists: bool=True, expandfilename: bool=True, exportaspectratio:\n Union[float, bool]=0.0, exportfilemode: Union[AnyStr,\n bool]=\"luminance/rgb\", exportfilesave: AnyStr=\"\", exportfilesizex:\n Union[int, bool]=0, exportfilesizey: Union[int, bool]=0,\n exportfiletype: Union[AnyStr, bool]=\"\", filterNodes: bool=True,\n history: bool=True, image1: Union[AnyStr, bool]=\"\", image2:\n Union[AnyStr, bool]=\"\", image3: Union[AnyStr, bool]=\"\", importfileload:\n AnyStr=\"\", importfilemode: Union[AnyStr, bool]=\"alpha\", importreassign:\n bool=False, influence: Union[AnyStr, bool]=\"\", interactiveUpdate:\n bool=True, lastRecorderCmd: Union[AnyStr, bool]=\"\", lastStampName:\n Union[AnyStr, bool]=\"\", lowerradius: Union[float, bool]=0.0,\n makeStroke: Union[int, List[int], bool]=0, mappressure: Union[AnyStr,\n bool]=\"none\", maxvalue: Union[float, bool]=1.0, minvalue: Union[float,\n bool]=0.0, name: AnyStr=\"\", objattrArray: Union[AnyStr, bool]=\"\",\n opacity: Union[float, bool]=1.0, outline: bool=True, outwhilepaint:\n bool=False, paintNodeArray: Union[AnyStr, bool]=\"\", paintSelectMode:\n Union[int, bool]=0, paintattrselected: AnyStr=\"\", paintmode:\n Union[AnyStr, bool]=\"screen\", paintoperationtype: Union[AnyStr,\n bool]=\"Paint\", pickColor: bool=True, pickValue: bool=True,\n playbackCursor: Union[List[float, float], List[List[float, float]],\n bool]=None, playbackPressure: Union[float, List[float], bool]=0.0,\n preserveclonesource: bool=True, profileShapeFile: Union[AnyStr,\n bool]=\"\", projective: bool=False, radius: Union[float, bool]=1.0,\n rampMaxColor: Union[List[float, float, float], bool]=None,\n rampMinColor: Union[List[float, float, float], bool]=None, record:\n bool=True, reflection: bool=False, reflectionaboutorigin: bool=True,\n reflectionaxis: Union[AnyStr, bool]=\"x\", screenRadius: Union[float,\n bool]=0.0, selectclonesource: bool=True, selectedattroper: Union[AnyStr,\n bool]=\"absolute\", showactive: bool=True, skinPaintMode: Union[int,\n bool]=0, stampDepth: Union[float, bool]=0.0, stampProfile: Union[AnyStr,\n bool]=\"\", stampSpacing: Union[float, bool]=1.0, strokesmooth:\n Union[AnyStr, bool]=\"\", surfaceConformedBrushVertices: bool=True,\n tablet: bool=True, tangentOutline: bool=True, toolOffProc: Union[AnyStr,\n bool]=\"\", toolOnProc: Union[AnyStr, bool]=\"\", useColorRamp: bool=True,\n useMaxMinColor: bool=True, usepressure: bool=False, value: Union[float,\n bool]=0.0, whichTool: Union[AnyStr, bool]=\"\", worldRadius: Union[float,\n bool]=0.0, xrayJoints: bool=True, q=True, query=True, e=True, edit=True,\n **kwargs)->Union[None, Any]:\n pass",
"def encryption_form():\n \n def encryption_key_form():\n \"\"\"Load or generate AES-128 encryption key.\"\"\"\n sg.theme('DarkBlue') \n layout = [\n [\n sg.Text(\"Encryption Key\"),\n sg.Input(size=(100, 1), enable_events=True, key=\"file\"),\n sg.FileBrowse(file_types=((\"Key Files\", \"*.key\"),)),\n sg.Button(\"Ok\"),\n sg.FileSaveAs(\"Generate Key\", file_types=((\"Key Files\", \"*.key\"),), target = \"file\")\n ]\n ]\n window = sg.Window('Encrypted Image Viewer', layout, resizable=True)\n while True:\n event, values = window.read()\n print(event)\n if event == \"Exit\" or event == sg.WIN_CLOSED:\n sys.exit()\n if event == \"file\":\n key_file = values[\"file\"]\n print(values)\n print(key_file)\n if event == \"Ok\" and key_file:\n window.close()\n return key_file\n if values[\"Generate Key\"]:\n window.close()\n key = secrets.token_bytes(32)\n key = base64.urlsafe_b64encode(key)\n with open(values[\"Generate Key\"], 'wb') as f:\n f.write(key)\n return key_file\n \n key_file = encryption_key_form()\n \n sg.theme('DarkBlue') \n layout = [\n [\n sg.Text(\"Image Files\"),\n sg.Input(size=(50, 1), enable_events=True, key=\"file\"),\n sg.FilesBrowse(file_types=((\"Images\", \"*.png\"),(\"Images\", \"*.jpg\"), (\"Images\", \"*.jpeg\"), (\"Images\", \"*.webp\"), (\"Images\", \"*.jfif\") )),\n sg.Button(\"Encrypt\"),\n ]\n ]\n window = sg.Window('Encrypt Images', layout, resizable=True)\n while True:\n event, values = window.read()\n if event == \"Exit\" or event == sg.WIN_CLOSED:\n sys.exit()\n if event == \"file\":\n img_list = values[\"file\"].split(';')\n if event == \"Encrypt\" and key_file and img_list:\n key = open(key_file, \"rb\")\n key = key.read()\n f = Fernet(key)\n \n # encrypt every image and save as \".enc\" file\n for img_name in img_list:\n img = Image.open(img_name, mode='r')\n img_bytes = io.BytesIO()\n img.save(img_bytes, format='PNG')\n img_bytes = img_bytes.getvalue()\n\n img_enc = f.encrypt(img_bytes)\n\n filename = img_name[:-img_name[::-1].index(\".\")-1]+ \".enc\"\n\n with open(filename, 'wb') as file:\n file.write(img_enc)\n answer = sg.popup_yes_no('Done! Any more files?')\n if answer == \"Yes\":\n continue\n else:\n window.close()\n selection_form()"
]
| [
"0.551257",
"0.5442712",
"0.53087753",
"0.5194765",
"0.5139646",
"0.50706965",
"0.4979023",
"0.49139866",
"0.49010268",
"0.4873511",
"0.4871852",
"0.48144704",
"0.47974575",
"0.47648686",
"0.47554955",
"0.4750068",
"0.47483042",
"0.47270516",
"0.4726411",
"0.4709047",
"0.47041312",
"0.46991143",
"0.46959677",
"0.46865574",
"0.4651795",
"0.46225145",
"0.46074396",
"0.45833418",
"0.45720586",
"0.45655447"
]
| 0.624345 | 0 |
Returns all nodes of the graph visited using BFS | def bfs(graph,start):
#keeps track of nodes to be visited
queue = []
#keeps track of nodes already visited
explored = []
queue.append(start)
while queue:
#remove first node from queue
curr_node = queue.pop(0)
#check if node is visited
if curr_node not in explored:
explored.append(curr_node)
adjacent_nodes = graph[curr_node]
#add adjacent nodes to queue
for i in adjacent_nodes:
queue.append(i)
return explored | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_visited_nodes(self):\n return self.visited_nodes",
"def bfs(self, start):\n output_list = []\n queue = Queue()\n queue.put(start)\n visited = set(start)\n while not queue.empty():\n current_node = queue.get()\n output_list.append(current_node)\n visited.add(current_node)\n for node in self.__graph[current_node].neighbours:\n if node.name not in visited:\n queue.put(node.name)\n return output_list",
"def bfs_visited(ugraph, start_node):\n visited = set([start_node])\n queue = deque([start_node])\n while queue:\n node = queue.popleft()\n for neighbor in ugraph[node]:\n if neighbor not in visited:\n visited.add(neighbor)\n queue.append(neighbor)\n return visited",
"def bfs_visited(ugraph, start_node):\r\n queue = deque()\r\n visited = set() #Set is enough here.\r\n visited.add(start_node)\r\n queue.append(start_node)\r\n while len(queue) != 0:\r\n temp_node = queue.popleft()\r\n for neighbor in ugraph[temp_node]: #In graph theory, neighborhood is \r\n if neighbor not in visited: #well defined, so could be used directely.\r\n visited.add(neighbor)\r\n queue.append(neighbor)\r\n return visited",
"def bfs(graph, start):\n visited, queue = set(), [start]\n while queue:\n node = queue.pop(0)\n if node not in visited:\n visited.add(node)\n # Add all the adjacent unvisited nodes to the queue\n queue.extend(graph[node] - visited)\n return visited",
"def bfs(g: nx.Graph, start_node: Any) -> list:\r\n\tx = [start_node]\r\n\tqueue = [start_node]\r\n\ttracks = {node: [] for node in g.nodes}\r\n\twhile queue:\r\n\t\telement = queue.pop(0)\r\n\t\ty = list(g.neighbors(element))\r\n\t\tfor node in y:\r\n\t\t\tif node not in x:\r\n\t\t\t\tx.append(node)\r\n\t\t\t\tqueue.append(node)\r\n\t\t\t\ttracks[node].extend((*tracks[element], element))\r\n\treturn x",
"def bfs_visited(ugraph, start_node):\n \n visited = set([start_node])\n cola = deque([start_node])\n \n while len(cola)>0:\n node = cola.popleft() \n for neigh in ugraph[node]:\n if not neigh in visited:\n visited.add(neigh)\n cola.append(neigh)\n \n return visited",
"def bfs_iterative(graph,start):\n\tvisited = set()\n\twatched = set()\n\tnodes_queue = [start] # List that helps as queue\n\twatched.add(start)\n\t\n\twhile nodes_queue:\n\t\tcurrent_node = nodes_queue.pop(0)\n\n\t\tprint(\"visiting\",current_node)\n\t\tvisited.add(current_node)\n\t\t\n\t\tfor adjacent_node in graph[current_node]:\n\t\t\tif (adjacent_node not in watched) and (adjacent_node not in visited):\n\t\t\t\tnodes_queue.append(adjacent_node)\n\t\t\t\t#path.add(adjacent_node)",
"def bfs(g: nx.Graph, start_node: Hashable) -> List[Hashable]:\n list_ = list(g.neighbors(start_node))\n len_graph = g.number_of_nodes()\n list2 = [start_node]\n while len(list2) < len_graph:\n for i in range(len(list_) - 1):\n if list_[0] not in list2:\n list2.append(list_[0])\n list_ += list(g.neighbors(list_[0]))\n list_.remove(list_[0])\n # nx.draw(g, with_labels=True)\n # plt.show()\n return list2",
"def bfs(graph, start_vertex):\n\n queue = deque()\n queue.appendleft(start_vertex)\n explored_vertices = [start_vertex]\n\n while len(queue) != 0:\n vertex = queue.pop()\n neighbours = graph.neighbours(vertex)\n for neighbour in neighbours:\n if neighbour not in explored_vertices:\n explored_vertices.append(neighbour)\n queue.appendleft(neighbour)\n\n return explored_vertices",
"def find_path_all_bfs(graph,start,end):\n\tvisited = set()\n\twatched = set()\n\tpaths = []\n\n\twatched.add(start)\n\n\tnodes_queue = [(start,[start])]\n\twhile nodes_queue:\n\t\tcurrent_node, path = nodes_queue.pop(0)\n\n\t\tvisited.add(current_node)\n\n\t\tif (current_node == end):\n\t\t\tpaths.append(path)\n\n\t\tfor adjacent_node in graph[current_node]:\n\t\t\tif (adjacent_node not in watched) and (adjacent_node not in visited):\n\t\t\t\tnodes_queue.append((adjacent_node, path+[adjacent_node]))\n\n\treturn paths",
"def bfs_visited(ugraph, start_node):\n\tqueue = []\n\tvisited = [start_node]\n\tqueue.append(start_node)\n\twhile queue:\n\t\tcurrent = queue.pop(0)\n\t\tfor content in ugraph[current]:\n\t\t\tif content not in visited:\n\t\t\t\tvisited.append(content)\n\t\t\t\tqueue.append(content)\n\treturn set(visited)",
"def breadth_first_traversal(self, start):\n visited = []\n visited.append(start)\n start_visited = visited\n while True:\n temp = []\n for node_ in start_visited:\n for i in self.neighbors(node_):\n if i not in visited:\n visited.append(i)\n temp.append(i)\n start_visited = temp\n if not temp:\n break\n return visited",
"def bfs(self, start_node: int, flag: bool) :\n for n in self.dw_graph.get_all_v().values():\n n.visited = False\n queue = [self.dw_graph.nodes[start_node]]\n self.dw_graph.nodes[start_node].visited = True\n node_list = [start_node]\n while queue:\n current = queue.pop()\n if not flag:\n for e in self.dw_graph.all_out_edges_of_node(current.node_id).keys():\n if not self.dw_graph.nodes[e].visited:\n self.dw_graph.nodes[e].visited = True\n queue.append(self.dw_graph.nodes[e])\n node_list.append(e)\n else:\n for e in self.dw_graph.all_in_edges_of_node(current.node_id).keys():\n if not self.dw_graph.nodes[e].visited:\n self.dw_graph.nodes[e].visited = True\n queue.append(self.dw_graph.nodes[e])\n node_list.append(e)\n\n return node_list",
"def order_bfs(self) -> List[Nodo]:\n\n # Nodos por buscar, es una cola\n pending: List[Nodo] = [self.root]\n # Nodos ya visitados\n visited: List[Nodo] = []\n\n # Mientras la cola tenga items\n while len(pending) > 0:\n # Procesar el primer elemento\n curr = pending.pop(0)\n visited.append(curr)\n\n # Agregar los hijos no visitados del nodo a la cola\n for child in curr.children:\n if child in visited:\n continue\n pending.append(child)\n\n return visited",
"def bfs(self, startNode):\n queue = Queue()\n\n # Mark all the nodes as not visited\n visited = {}\n for node in self.getNodes():\n visited[node] = False\n\n queue.enqueue(startNode)\n\n while not queue.isEmpty():\n s = queue.dequeue()\n visited[s] = True\n print s,\n\n # enqueue all the adjacent vertices to s\n # if they've not already been visited\n\n for adjacentNode in self.getAdjacent(s):\n if visited[adjacentNode] is False:\n queue.enqueue(adjacentNode)\n visited[adjacentNode] = True",
"def nodes(self, visited=None):\n if (not visited):\n visited = set()\n if (self not in visited):\n visited.update([self])\n for node in self.parents():\n visited.update(node.nodes(visited=visited))\n for node in self.children():\n visited.update(node.nodes(visited=visited))\n return visited",
"def BFS(self, start_vertex, verbose=True):\n if not self.contains(start_vertex):\n return None\n traversal = []\n visited = set()\n for vertex in self.vertices():\n if vertex not in visited:\n self._BFS(vertex, visited, traversal.append)\n if verbose:\n print('BFS(Graph) =', traversal)\n return traversal",
"def bfs(graph, start_node):\n start_node.distance = 0\n start.set_predecessor(None)\n queue = list()\n queue.append(start_node)\n while (len(queue) > 0):\n current_vertex = queue.pop()\n current_vertex.setState = \"visiting\"\n for vertex in current_vertex.links():\n if (vertex.getState == \"unvisited\"):\n vertex.setState == \"tobevisited\"\n vertex.set_predecessor(current_vertex)\n vertex.distance = current_vertex.distance + 1\n queue.append(vertex)\n current_vertex.setState = \"visited\"",
"def bfs(graph, i):\n visited = set()\n\n unexplored = deque()\n unexplored.append(i)\n\n while unexplored:\n curr = unexplored.popleft()\n visited.add(curr)\n edges = graph[curr]\n\n for edge in edges:\n if edge in visited:\n continue\n else:\n unexplored.appendleft(edge)\n\n return visited",
"def get_bfs(self, s):\n # create a queue for BFS\n queue = []\n # mark all the vertices as not visited\n visited = [False] * (len(self.graph))\n # mark the start node as visited and enqueue it\n visited[s] = True\n queue.append(s)\n results = []\n\n while queue:\n # dequeue a vertex from queue and append to results.\n p = queue.pop(0)\n results.append(p)\n # get all adjacent vertices of the dequeued vertex s,\n # and for any unvisited adjacent, mark it visited and enqueue it.\n for v in self.graph[p]:\n if visited[v] is False:\n visited[v] = True\n queue.append(v)\n\n return results",
"def bfs(get_neighbors, source, target):\n\n parents = {}\n visited = set()\n queue = collections.deque()\n queue.append(source)\n while queue:\n vertex = queue.popleft()\n if vertex == target:\n return _backtrack(target, lambda v: parents.get(v))\n if vertex not in visited:\n visited.add(vertex)\n for neighbor in filter(lambda n: n not in visited, get_neighbors(vertex)):\n queue.append(neighbor)\n parents[neighbor] = vertex\n return []",
"def bfs(self, start, end):\n\n queue = [start]\n parent = dict()\n\n # Initialize parent dictionary\n for v in iter(self._reachable): parent[v] = None\n parent[start] = start\n\n while len(queue) > 0:\n (x, y) = queue.pop(0)\n if (x, y) == end: break\n\n for v in self.get_reachables(x, y):\n if parent[v] is not None: \n # Vertex v already visited\n continue\n parent[v] = (x, y)\n queue.append(v)\n\n # Reconstruct path\n path = [end]\n vertex = end\n\n while parent[vertex] != vertex:\n if parent[vertex] is None: return []\n path.append(parent[vertex])\n vertex = parent[vertex]\n\n path.reverse()\n return path",
"def bfs(self, starting_vertex, destination_vertex): # great if you know to result is somewhere close to the root/start\n visited = set() # create an empty 'set' to store visisted vertex, set sorts \n\n q = Queue() # create an empty Queue\n q.enqueue([starting_vertex]) # set the starting_vertex with enqueue \n\n while q.size() > 0:\n path = q.dequeue() # dequeue and store first path\n v = path[-1] # store the vertex from the end of path \n\n if v == destination_vertex: # if v is equal to the destination_vertex\n return path # return the path \n\n if v not in visited: # if v has not been visited yet \n visited.add(v) # add v to the vistied set \n\n for neighbor in self.vertices[v]: # loop through the neighbors \n path_copy = list(path) # make a copy of the path \n path_copy.append(neighbor) # append each neighbor to the back of the path copy \n q.enqueue(path_copy) # enqueue the path copy to the queue ",
"def get_unvisited_nodes(self, graph, solution):\n nodes = []\n for node in graph[solution.current]:\n if node not in solution:\n nodes.append(node)\n return nodes",
"def dfs(visited: list, graph: AdjList, node: int):\n if node not in visited:\n visited.append(node)\n for neighbour in graph[node]:\n dfs(visited, graph, neighbour)",
"def BFSUtility(obj,visited,vertex):\n stack = []\n subGraph = []\n stack.insert(0,vertex)\n visited[vertex] = True\n while(stack):\n subGraph.append(stack.pop())\n for nbrVertex in obj.adjList[subGraph[-1]]:\n if visited[nbrVertex]:\n continue\n stack.insert(0,nbrVertex)\n visited[stack[0]] = True\n return subGraph",
"def dfs_helper(self, start_node):\n ret_list = [start_node.value]\n\n start_node.visited = True\n for edge in start_node.edges:\n if not edge.node_to.visited:\n edge.node_to.visited = True\n ret_list.extend(self.dfs_helper(edge.node_to))\n\n return ret_list",
"def get_bfs(self)->list:\n\t\tqueue=[]\n\t\tbfs=[]\n\t\tqueue.append(self)\n\t\twhile(len(queue)>0):\n\t\t\tnode=queue.pop(0)\n\t\t\tbfs.append(node.data)\n\t\t\tif(node.right!=None):\n\t\t\t\tqueue.append(node.right)\n\t\t\tif(node.left!=None):\n\t\t\t\tqueue.append(node.left)\n\t\treturn bfs",
"def nodes(self):\n return list(self.__graph.keys())"
]
| [
"0.7565556",
"0.7559426",
"0.74942124",
"0.7450504",
"0.7416849",
"0.7399297",
"0.73478484",
"0.7344478",
"0.7339527",
"0.7230581",
"0.7224638",
"0.72207165",
"0.72174174",
"0.7151831",
"0.71481955",
"0.7136827",
"0.70414394",
"0.6941996",
"0.6924116",
"0.6856379",
"0.68324786",
"0.67971104",
"0.67659444",
"0.6740058",
"0.66890484",
"0.6686683",
"0.6663107",
"0.6656791",
"0.6653968",
"0.66396284"
]
| 0.76385623 | 0 |
Create an empty Queue. Test that its size is 0. | def test_new_queue_is_empty(self):
queue = Queue_()
self.assertTrue(queue.empty())
self.assertEqual(queue.size(), 0) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def make_empty_queue():\n return Queue(0, None, None)",
"def empty_queue():\n return Queue()",
"def empty_queue():\n return Queue()",
"def test_size_empty(the_queue):\n assert the_queue.size() == 0",
"def test_size_of_new_queue():\n queue = Queue()\n assert queue.size() == 0",
"def test_can_instantiate_empty_queue(empty_queue):\n assert isinstance(empty_queue, Queue)",
"def test_is_empty(self):\n queue = Queue()\n self.assertEqual(queue.is_empty(), True)\n queue.enqueue(1)\n self.assertEqual(queue.is_empty(), False)",
"def test_EmptyQueue(self):\n genFn = Mock(return_value=None)\n \n wrapper = KaoGenerator(genFn)\n self.assertEqual(len(wrapper._queue), 0)",
"def test_initialization_with_empty_list_first_node_check():\n queue = Queue([])\n assert queue._queue.first_node is None",
"def test_initialization_with_empty_list_last_node_check():\n queue = Queue([])\n assert queue._queue.last_node is None",
"def test_for_size_0_when_empty(new_empty_deque):\n assert new_empty_deque.size() == 0",
"def test_dequeue_empty(self):\n \n r = self.store.dequeue('/queue/nonexist')\n assert r is None\n \n assert self.store.has_frames('/queue/nonexist') == False\n assert self.store.size('/queue/nonexist') == 0",
"def testQueueisEmpty(self):\n self.mgr.isGoproBusy = True\n self.mgr.processMsgQueue()\n self.assertFalse( self.mgr.isGoproBusy )",
"def test_empty_deque_is_empty(empty_deque):\n assert empty_deque.size() == 0",
"def test_size(self):\n queue = Queue()\n self.assertEqual(queue.size(), 0)\n queue.enqueue(1)\n self.assertEqual(queue.size(), 1)",
"def verify_queue_empty(self):\n self.assert_sample_queue_size(DataParticleType.VELOCITY_PARTICLE, 0)\n self.assert_sample_queue_size(DataParticleType.TIME_PARTICLE, 0)",
"def empty(self) -> bool: \n if(self.queue is not None and len(self.queue) > 0):\n print(\"len > 0\" )\n return False\n else:\n print(\"len = 0\" )\n return True",
"def test_tx_queue_emptying(self):\n\n self._serial_handler.tx_queue.put((0,'test'))\n\n # Should write the message and then empty the queue\n self._serial_handler._write()\n\n self.assertEquals(self._serial_handler.tx_queue.qsize(), 0)",
"def assert_empty(self):\n if self._queue:\n raise CallQueueNotEmpty(\n \"Queue is not empty; {0} expected calls remaining.\"\n .format(len(self._queue))\n )",
"def empty(self):\r\n return self.queue == []\r\n\r\n\r\n\r\n # Your MyQueue object will be instantiated and called as such:\r\n # obj = MyQueue()\r\n # obj.push(x)\r\n # param_2 = obj.pop()\r\n # param_3 = obj.peek()\r\n # param_4 = obj.empty()\r",
"def test_empty_queue(self):\n max_hour_count = (None, None, None)\n result = feature_5(self.deque,\n self.heap,\n self.expected_dict,\n self.top_n,\n max_hour_count,\n self.time_rollover_queue)\n self.assertEqual(len(self.deque), 1)\n self.assertIsNone(result)\n self.assertEqual(self.deque[0], (self.datetime_obj, self.timestamp))",
"def test_enqueue(self):\n queue = Queue()\n self.assertEqual(queue.size(), 0)\n queue.enqueue(1)\n queue.enqueue(2)\n queue.enqueue(3)\n self.assertEqual(queue.size(), 3)",
"def test_new_deque_exists(new_empty_deque):\n assert new_empty_deque.size() == 0",
"def test_peak_empty_queue_returns_none():\n queue = Queue()\n assert queue.peak() is None",
"def test_deque_creation(dq):\n assert dq._length == 0",
"def empty_queue(queue):\n return queue.front is None",
"def is_empty(self):\n return len(self.the_queue) == 0",
"def is_empty(self):\n return len(self.queue) == 0",
"def is_empty(self):\n return len(self.queue) == 0",
"def is_empty(self):\n return self.queue == []"
]
| [
"0.87515324",
"0.8171699",
"0.8171699",
"0.81615436",
"0.80206966",
"0.75779504",
"0.7575396",
"0.7412317",
"0.7373005",
"0.73489606",
"0.7348045",
"0.7347103",
"0.7224042",
"0.7193373",
"0.7141106",
"0.71210796",
"0.7055048",
"0.7046142",
"0.7018612",
"0.6972072",
"0.6963029",
"0.69029814",
"0.68892145",
"0.68610996",
"0.68322206",
"0.6829427",
"0.67547417",
"0.6731816",
"0.6731816",
"0.6722622"
]
| 0.8647534 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.