query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
sequencelengths 30
30
| negative_scores
sequencelengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Given a list of parsed scene_items (a plain list of dicts) Provide methods for redering that data timesigniture is only used for debug printing | def __init__(self, scene_items, timesigniture=DEFAULT_TIMESIGNITURE_):
self.scene_items = scene_items
self.total_beats = sum(scene_item['duration'] for scene_item in self.scene_items)
self.timesigniture = timesigniture | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_items_function(self):\n ars = self.ar[2009][11]['general']\n self.assertEqual(list(ars.items()), [('LastLine', ['20091202000343', '1011585', '206082338', '54716901457']), ('FirstTime', ['20091101000237']), ('LastTime', ['20091130234113']), ('LastUpdate', ['20091201094510', '1011585', '0', '886950', '70062', '54572']), ('TotalVisits', ['1475']), ('TotalUnique', ['547']), ('MonthHostsKnown', ['397']), ('MonthHostsUnknown', ['196'])])",
"def parse_items(self):",
"def process_items():\n global HAS_WATCH\n global HAS_FIRST_AID_KIT\n global HAS_FLASHLIGHT\n global HAS_RAINCOAT\n global HAS_COMPASS\n global HAS_BEARTRAP\n\n if \"Watch\" in ITEMS:\n HAS_WATCH = True\n if \"First Aid Kit\" in ITEMS:\n HAS_FIRST_AID_KIT = True\n if \"Flashlight\" in ITEMS:\n HAS_FLASHLIGHT = True\n if \"Raincoat\" in ITEMS:\n HAS_RAINCOAT = True\n if \"Compass\" in ITEMS:\n HAS_COMPASS = True\n if \"Bear Trap\" in ITEMS:\n HAS_BEARTRAP = True\n\n # Stupid little hack to provide 'immediate updates/effect' of having the below items\n if HAS_WATCH:\n update_title_area(\" Day: %d Time: %d:00 \" % (DAY, TIME))\n if HAS_COMPASS:\n DISCOVERED[ZERO_BASE_PLYR_POS] = \"Y\"",
"def _parse_audit_items(self, items, function_name):\n for item in items:\n yield {\n \"snippet\": item[\"node\"][\"snippet\"],\n \"selector\": item[\"node\"][\"selector\"],\n \"colors\": self._extract_hex_codes(item[\"node\"][\"explanation\"]),\n \"pipeline\": [function_name],\n # path is in the format \"1,HTML,1,BODY,0,DIV,...\"\n # we only need to keep the numbers (as integers)\n \"path\": tuple(int(i) for i in item[\"node\"][\"path\"].split(\",\")[::2]),\n }",
"def parse_scene_order(self, data, timesigniture):\n if not data:\n return ()\n\n num_scenes = len(data)\n\n def attempt_parse_key_timecode(value):\n if not value:\n return value\n try:\n return float(value)\n except (ValueError, TypeError):\n pass\n try:\n return timecode_to_beat(value, timesigniture)\n except (AssertionError, ValueError, AttributeError):\n pass\n return value\n # Surface the original key value in the dict (useful for debugging)\n for key, value in data.items():\n if value:\n value['key'] = key\n data_float_indexed = {attempt_parse_key_timecode(k): v for k, v in data.items()}\n assert len(data_float_indexed) == num_scenes\n sorted_keys = sorted(data_float_indexed.keys())\n assert len(sorted_keys) == num_scenes\n\n def normalise_duration(index):\n \"\"\"\n Convert any time code or alias to a linear float value. e.g.\n '1.2' parses to -> 1.5\n 'match_next' resolves to -> 4.0\n \"\"\"\n key = sorted_keys[index]\n item = data_float_indexed[key]\n if not item:\n item = {'duration': 'auto'}\n data_float_indexed[key] = item\n duration = attempt_parse_key_timecode(item.get('duration'))\n if duration == 'match_next':\n duration = normalise_duration(index+1)\n if duration == 'match_prev':\n duration = normalise_duration(index-1)\n if isinstance(duration, str) and duration.startswith('match '):\n duration = normalise_duration(sorted_keys.index(float(duration.strip('match '))))\n if (not duration or duration == 'auto') and index < len(sorted_keys)-1:\n duration = sorted_keys[index+1] - key\n if not isinstance(duration, float):\n #log.info('Unparsed duration: {0}'.format(duration))\n duration = self.DEFAULT_DURATION\n if duration != item.get('duration'):\n item['duration'] = duration\n return duration\n for index in range(len(sorted_keys)):\n normalise_duration(index)\n scene_items = []\n for key in sorted_keys:\n scene_item = data_float_indexed[key]\n assert scene_item and scene_item.get('duration') >= 0, \"All scene must have durations. Something has failed in parsing. {0}:{1}\".format(key, scene_item)\n scene_items.append(scene_item)\n return scene_items",
"def log_items(items):\n\tif len(items) < max_print:\n\t\tlogging.info(\"ITEMS : %s\", json.dumps(items))",
"def items():",
"def report_dump_runinfo(dump_items):\n runinfo_lines = [\"name:%s; status:%s; updated:%s\" %\n (item.name(), item.status(), item.updated())\n for item in dump_items]\n runinfo_lines.reverse()\n txt_content = \"\\n\".join(runinfo_lines)\n content = {}\n content['txt'] = txt_content + \"\\n\"\n # {\"jobs\": {name: {\"status\": stuff, \"updated\": stuff}}, othername: {...}, ...}\n content_json = {\"jobs\": {}}\n for item in sorted(dump_items, reverse=True, key=lambda job: job.name()):\n content_json[\"jobs\"][item.name()] = {'status': item.status(), 'updated': item.updated()}\n content['json'] = json.dumps(content_json)\n return content",
"def rapl_timeline():\n\n return [{ \"timestamp\": \"2021-10-05T09:14:58.226\", \"sensor\": \"toto\", \"target\": \"all\", \"groups\": { \"rapl\": { \"0\": { \"7\": { \"RAPL_ENERGY_PKG\": 5558763520.0, \"time_enabled\": 1000770053.0, \"time_running\": 1000770053.0 } } } } },\n { \"timestamp\": \"2021-10-05T09:14:59.226\", \"sensor\": \"toto\", \"target\": \"all\", \"groups\": { \"rapl\": { \"0\": { \"7\": { \"RAPL_ENERGY_PKG\": 4777050112.0, \"time_enabled\": 2001065535.0, \"time_running\": 2001065535.0 } } } } },\n { \"timestamp\": \"2021-10-05T09:15:00.227\", \"sensor\": \"toto\", \"target\": \"all\", \"groups\": { \"rapl\": { \"0\": { \"7\": { \"RAPL_ENERGY_PKG\": 6847987712.0, \"time_enabled\": 3001449088.0, \"time_running\": 3001449088.0 } } } } },\n { \"timestamp\": \"2021-10-05T09:15:01.227\", \"sensor\": \"toto\", \"target\": \"all\", \"groups\": { \"rapl\": { \"0\": { \"7\": { \"RAPL_ENERGY_PKG\": 5054922752.0, \"time_enabled\": 4001882359.0, \"time_running\": 4001882359.0 } } } } },\n { \"timestamp\": \"2021-10-05T09:15:02.228\", \"sensor\": \"toto\", \"target\": \"all\", \"groups\": { \"rapl\": { \"0\": { \"7\": { \"RAPL_ENERGY_PKG\": 5434507264.0, \"time_enabled\": 5002352709.0, \"time_running\": 5002352709.0 } } } } }\n ]",
"def process_item(self, item):\n entries = self.compat.process_entries(item)\n try:\n pd = PhaseDiagram(entries)\n analyzer = PDAnalyzer(pd)\n\n docs = []\n\n for e in entries:\n (decomp, ehull) = \\\n analyzer.get_decomp_and_e_above_hull(e)\n\n d = {\"material_id\": e.entry_id}\n d[\"thermo\"] = {}\n d[\"thermo\"][\"formation_energy_per_atom\"] = pd.get_form_energy_per_atom(e)\n d[\"thermo\"][\"e_above_hull\"] = ehull\n d[\"thermo\"][\"is_stable\"] = e in pd.stable_entries\n if d[\"thermo\"][\"is_stable\"]:\n d[\"thermo\"][\"eq_reaction_e\"] = analyzer.get_equilibrium_reaction_energy(e)\n d[\"thermo\"][\"decomposes_to\"] = [{\"material_id\": de.entry_id,\n \"formula\": de.composition.formula,\n \"amount\": amt}\n for de, amt in decomp.items()]\n d[\"thermo\"][\"entry\"] = e.as_dict()\n d[\"thermo\"][\"explanation\"] = self.compat.get_explanation_dict(e)\n docs.append(d)\n except PhaseDiagramError as p:\n self.__logger.warning(\"Phase diagram error: {}\".format(p))\n return []\n\n return docs",
"def get_info(data):\n # type: (dict) -> dict\n item = data.get(\"item\", {})\n plot = item.get(\"summary\", data.get(\"description\"))\n if plot and isinstance(plot, list):\n plot = plot[0]\n # TODO : some-kind of duration calculation...\n return {\n \"title\": item.get(\"title\", \"\").title(),\n \"plot\": plot,\n \"year\": extract_year(item.get(\"date\", \"\")),\n \"genre\": item.get(\"genre\")\n }",
"def scene_to_text(scenes):\n scene_text_dict = []\n scene_text_list = []\n for i, scene in enumerate(scenes):\n if len(scene['frame_data']) == 0:\n break\n scene_image = Image.fromarray(scene['frame_data'])\n str_text = pytesseract.image_to_string(scene_image)\n #list_text = list(filter(('').__ne__, re.split(\" |\\n|, |. |:|.\\n|\\x0c\", str_text)))\n list_text = list(filter(('').__ne__, re.split(\" |\\n\", str_text)))\n bag_of_word = collections.Counter(list_text)\n scene_text_dict.append(\n {'start': scene['start'], \n 'end': scene['end'], \n 'bag_of_word': dict(bag_of_word)\n })\n scene_text_list.append(list_text)\n return scene_text_dict, scene_text_list",
"def serializeItemsData(items, highlight=False):\n from debra.models import ProductModelShelfMap\n #items = items.filter(added_datetime__gte=datetime.date.today()-datetime.timedelta(days=30))\n # unordered_pair = list(items.values_list('added_datetime', 'id'))\n unordered_pair = []\n\n for item in items:\n unordered_pair.append((item.added_datetime, item.id))\n\n unordered_pair.sort()\n unordered_pair.reverse()\n ids = [x[1] for x in unordered_pair[:60]]\n items = ProductModelShelfMap.objects.select_related(\n 'product_model__brand').filter(id__in=ids)\n items_data = []\n prod_model_existing = set()\n for item in items:\n if item.product_model.name in prod_model_existing:\n continue\n prod_model_existing.add(item.product_model.name)\n item_data = {\n \"name\": item.product_model.name,\n \"img_url_feed_view\": item.product_model.img_url,\n \"img_url_panel_view\": item.img_url_panel_view,\n }\n if highlight:\n item_data[\"highlight\"] = True\n if item.product_model.brand:\n item_data[\"brand\"] = item.product_model.brand.name\n items_data.append(item_data)\n return items_data",
"def _item_to_elements_parser(self, item):\n elements = {}\n\n ####### Sad solution - look for better one. #######\n items = [\"data\", \"img\", \"title\", \"link\", \"price\"]\n values = (\"item.p.string.strip()\", 'item.img[\"src\"]', 'item.img[\"alt\"]',\n '''item.find(\"a\", {\"class\":\"detailsLink\"})['href']''',\n '''item.find('strong').string.strip()''')\n for key, value in zip(items, values):\n\n # CONVERT TIME\n # if key == \"data\":\n # try:\n # print (time.strptime(eval(value), \"%d %b\"))\n # except Exception as error:\n # print (error) # time data '5 paz' does not match format '%d %b'\n\n try:\n elements.update({key:eval(value)})\n except (TypeError, AttributeError):\n elements.update({key:None})\n\n\n # print()\n # for key, val in elements.items():\n # print (key, val)\n # print()\n ###################################################\n return elements",
"def extract_scene_info(self) -> None:\n records = [\n (self.level5data.get(\"sample\", rec[\"first_sample_token\"])[\"timestamp\"], rec)\n for rec in self.level5data.scene\n ]\n\n entries = []\n for start_time, record in sorted(records):\n start_time = (\n self.level5data.get(\"sample\", record[\"first_sample_token\"])[\"timestamp\"]\n / 1000000\n )\n\n token = record[\"token\"]\n name = record[\"name\"]\n date = datetime.utcfromtimestamp(start_time)\n host = \"-\".join(record[\"name\"].split(\"-\")[:2])\n first_sample_token = record[\"first_sample_token\"]\n\n entries.append((host, name, date, token, first_sample_token))\n\n self.df = pd.DataFrame(\n entries,\n columns=[\"host\", \"scene_name\", \"date\", \"scene_token\", \"first_sample_token\"],\n )\n host_count_df = self.df.groupby(\"host\")[\"scene_token\"].count()\n print(\"the number of host\", host_count_df)",
"def transform_item(item):\n\n if isinstance(item, dict):\n new = {}\n for k, v in item.items():\n # Replace hyphens with underscores for BigQuery compatibility\n k = k.replace(\"-\", \"_\")\n\n # Get inner array for date parts\n if k == \"date_parts\":\n v = v[0]\n if None in v:\n # \"date-parts\" : [ [ null ] ]\n v = []\n elif k == \"award\":\n if isinstance(v, str):\n v = [v]\n elif k == \"date_time\":\n try:\n datetime.strptime(v, \"%Y-%m-%dT%H:%M:%SZ\")\n except ValueError:\n v = \"\"\n\n new[k] = transform_item(v)\n return new\n elif isinstance(item, list):\n return [transform_item(i) for i in item]\n else:\n return item",
"def test_items(self):\n obs = self.tester.items()\n self.assertTrue(isinstance(obs, Iterable))\n exp = [('1.SKB1.640202', PrepSample('1.SKB1.640202', self.tester)),\n ('1.SKB2.640194', PrepSample('1.SKB2.640194', self.tester)),\n ('1.SKB3.640195', PrepSample('1.SKB3.640195', self.tester)),\n ('1.SKB4.640189', PrepSample('1.SKB4.640189', self.tester)),\n ('1.SKB5.640181', PrepSample('1.SKB5.640181', self.tester)),\n ('1.SKB6.640176', PrepSample('1.SKB6.640176', self.tester)),\n ('1.SKB7.640196', PrepSample('1.SKB7.640196', self.tester)),\n ('1.SKB8.640193', PrepSample('1.SKB8.640193', self.tester)),\n ('1.SKB9.640200', PrepSample('1.SKB9.640200', self.tester)),\n ('1.SKD1.640179', PrepSample('1.SKD1.640179', self.tester)),\n ('1.SKD2.640178', PrepSample('1.SKD2.640178', self.tester)),\n ('1.SKD3.640198', PrepSample('1.SKD3.640198', self.tester)),\n ('1.SKD4.640185', PrepSample('1.SKD4.640185', self.tester)),\n ('1.SKD5.640186', PrepSample('1.SKD5.640186', self.tester)),\n ('1.SKD6.640190', PrepSample('1.SKD6.640190', self.tester)),\n ('1.SKD7.640191', PrepSample('1.SKD7.640191', self.tester)),\n ('1.SKD8.640184', PrepSample('1.SKD8.640184', self.tester)),\n ('1.SKD9.640182', PrepSample('1.SKD9.640182', self.tester)),\n ('1.SKM1.640183', PrepSample('1.SKM1.640183', self.tester)),\n ('1.SKM2.640199', PrepSample('1.SKM2.640199', self.tester)),\n ('1.SKM3.640197', PrepSample('1.SKM3.640197', self.tester)),\n ('1.SKM4.640180', PrepSample('1.SKM4.640180', self.tester)),\n ('1.SKM5.640177', PrepSample('1.SKM5.640177', self.tester)),\n ('1.SKM6.640187', PrepSample('1.SKM6.640187', self.tester)),\n ('1.SKM7.640188', PrepSample('1.SKM7.640188', self.tester)),\n ('1.SKM8.640201', PrepSample('1.SKM8.640201', self.tester)),\n ('1.SKM9.640192', PrepSample('1.SKM9.640192', self.tester))]\n # Creating a list and looping over it since unittest does not call\n # the __eq__ function on the objects\n for o, e in zip(sorted(list(obs)), sorted(exp)):\n self.assertEqual(o, e)",
"def do_scenes(self, line):\n\n print 'List of Scenes \\n'\n print 'ID\\tName'\n\n for index, scene in enumerate(self.huuey.scenes):\n print u\"{index}\\t{unique}\".format(index=index+1, unique=scene)",
"def process(data):\n items = data.get('items', [])\n logging.info('- processing %d items', len(items))\n return [_flatten_dimensions(t['properties']['dimensions']) for t in items]",
"def test_items(self):\n obs = self.tester.items()\n self.assertTrue(isinstance(obs, Iterable))\n exp = [('1.SKB1.640202', Sample('1.SKB1.640202', self.tester)),\n ('1.SKB2.640194', Sample('1.SKB2.640194', self.tester)),\n ('1.SKB3.640195', Sample('1.SKB3.640195', self.tester)),\n ('1.SKB4.640189', Sample('1.SKB4.640189', self.tester)),\n ('1.SKB5.640181', Sample('1.SKB5.640181', self.tester)),\n ('1.SKB6.640176', Sample('1.SKB6.640176', self.tester)),\n ('1.SKB7.640196', Sample('1.SKB7.640196', self.tester)),\n ('1.SKB8.640193', Sample('1.SKB8.640193', self.tester)),\n ('1.SKB9.640200', Sample('1.SKB9.640200', self.tester)),\n ('1.SKD1.640179', Sample('1.SKD1.640179', self.tester)),\n ('1.SKD2.640178', Sample('1.SKD2.640178', self.tester)),\n ('1.SKD3.640198', Sample('1.SKD3.640198', self.tester)),\n ('1.SKD4.640185', Sample('1.SKD4.640185', self.tester)),\n ('1.SKD5.640186', Sample('1.SKD5.640186', self.tester)),\n ('1.SKD6.640190', Sample('1.SKD6.640190', self.tester)),\n ('1.SKD7.640191', Sample('1.SKD7.640191', self.tester)),\n ('1.SKD8.640184', Sample('1.SKD8.640184', self.tester)),\n ('1.SKD9.640182', Sample('1.SKD9.640182', self.tester)),\n ('1.SKM1.640183', Sample('1.SKM1.640183', self.tester)),\n ('1.SKM2.640199', Sample('1.SKM2.640199', self.tester)),\n ('1.SKM3.640197', Sample('1.SKM3.640197', self.tester)),\n ('1.SKM4.640180', Sample('1.SKM4.640180', self.tester)),\n ('1.SKM5.640177', Sample('1.SKM5.640177', self.tester)),\n ('1.SKM6.640187', Sample('1.SKM6.640187', self.tester)),\n ('1.SKM7.640188', Sample('1.SKM7.640188', self.tester)),\n ('1.SKM8.640201', Sample('1.SKM8.640201', self.tester)),\n ('1.SKM9.640192', Sample('1.SKM9.640192', self.tester))]\n # Creating a list and looping over it since unittest does not call\n # the __eq__ function on the objects\n for o, e in zip(sorted(list(obs)), sorted(exp)):\n self.assertEqual(o, e)",
"def draw_items_value(content):\n draw_data = {}\n ####\n #Not want to get the last running time data. That makes fault\n if \":\" in content[0]:\n return draw_data\n ####\n draw_data[\"CONFIG\"] = content[0]\n for line in content:\n if \":\" in line:\n item_and_value = strip_data(line)\n draw_data[item_and_value[0]] = item_and_value[1]\n return draw_data",
"def joinData(item_list):\n\n t_1 = datetime.now()\n news_dict = {}\n ln_item_list = len(item_list)\n for i, r in enumerate(item_list):\n str_date = r[\"date\"].strftime(\"%Y-%m\")\n if str_date not in news_dict:\n news_dict[str_date] = \"\"\n news_dict[str_date] += \" %s\" % r[\"text\"]\n print (i * 100.) / ln_item_list, datetime.now() - t_1\n return news_dict",
"def _iter_items(data_sequence):\n for time, element in data_sequence:\n for item in element:\n yield time, item",
"def process_scene_data(self, scene, data, tmp_dir):\n pass",
"def find_sequence_items(data):\n results = []\n cnt = 1\n seqs = SeqIO.parse(StringIO(data), 'fasta')\n for seq in seqs:\n results.append({\n 'idx': cnt,\n 'name': seq.name,\n 'sequence': str(seq.seq)\n })\n cnt += 1\n SequenceListItems.verify_unique_names(results)\n return results",
"def print_items(items): \n print(items)",
"def on_new_json_items(self, items_params, new_items=None):\n if self.json_progress_message_bar:\n self.json_progress.setValue(self.json_progress.value() + 1)\n if new_items:\n if KEY_ESRI_GEOMETRY_POLYGON in self.geometries and self.geometries[KEY_ESRI_GEOMETRY_POLYGON].is_checked:\n for polygon in new_items[KEY_POLYGON]:\n self.write_to_file(FILE_POLYGON, u\"\\n\")\n if self.written_first_polygon:\n self.write_to_file(FILE_POLYGON, u\",\")\n else:\n self.written_first_polygon = True\n self.write_to_file(FILE_POLYGON, polygon)\n if KEY_ESRI_GEOMETRY_POLYLINE in self.geometries and self.geometries[KEY_ESRI_GEOMETRY_POLYLINE].is_checked:\n for line in new_items[KEY_LINE]:\n self.write_to_file(FILE_LINE, u\"\\n\")\n if self.written_first_line:\n self.write_to_file(FILE_LINE, u\",\")\n else:\n self.written_first_line = True\n self.write_to_file(FILE_LINE, line)\n if KEY_ESRI_GEOMETRY_POINT in self.geometries and self.geometries[KEY_ESRI_GEOMETRY_POINT].is_checked:\n for point in new_items[KEY_POINT]:\n self.write_to_file(FILE_POINTS, u\"\\n\")\n if self.written_first_point:\n self.write_to_file(FILE_POINTS, u\",\")\n else:\n self.written_first_point = True\n self.write_to_file(FILE_POINTS, point)\n if KEY_ESRI_GEOMETRY_MULTI_POINT in self.geometries and\\\n self.geometries[KEY_ESRI_GEOMETRY_MULTI_POINT].is_checked:\n for point in new_items[KEY_MULTI_POINT]:\n self.write_to_file(FILE_POINTS, u\"\\n\")\n if self.written_first_point:\n self.write_to_file(FILE_POINTS, u\",\")\n else:\n self.written_first_point = True\n self.write_to_file(FILE_POINTS, point)\n\n self.on_new_json_task_complete()",
"def GetItemsAtTime(self, time_elapsed):\n items = []\n\n if self.data == None:\n raise Exception('TimelineData: Trying to GetState when data==None')\n\n # Go through each of our items\n for item in self.data:\n # Ignore items that cant be retrieved by time_elapsed\n if 'start' not in item or 'duration' not in item:\n #print 'TimelineData: Skipping Item: %s: %s' % (self.path, item)\n continue\n\n # If time_elapsed is between start and end of this item\n if time_elapsed >= item['start'] and time_elapsed < item['start'] + item['duration']:\n print 'TimelineData: Found Item: %s: %s' % (self.path, item)\n items.append(item)\n else:\n #print 'TimelineData: Unmatched Item: %s: %s' % (self.path, item)\n pass\n\n return items",
"def test_filter_data_by_race():\n data = race.filter_data_by_race(random.randint(1, 3))\n assert len(data) == 11\n assert type(data) == list\n for datum in data:\n assert type(datum) == dict",
"def read_item(data: DataModel) -> Dict:\n convertor = Convertor(data)\n return {'output': convertor.get_humanized_data()}"
] | [
"0.5740542",
"0.56621575",
"0.5524638",
"0.5478715",
"0.54087716",
"0.53677016",
"0.53536004",
"0.52064437",
"0.51765895",
"0.5169679",
"0.51359755",
"0.51294976",
"0.5126739",
"0.5117502",
"0.51156247",
"0.511032",
"0.49845767",
"0.49424937",
"0.49424642",
"0.49300858",
"0.48953164",
"0.48901466",
"0.48895273",
"0.4884608",
"0.4816748",
"0.48146847",
"0.48141533",
"0.48104206",
"0.48055044",
"0.48018357"
] | 0.64365894 | 0 |
Return a list of all live Python objects, not including the list itself. | def get_all_objects():
gc.collect()
gcl = gc.get_objects()
olist = []
seen = {}
# Just in case:
seen[id(gcl)] = None
seen[id(olist)] = None
seen[id(seen)] = None
# _getr does the real work.
_getr(gcl, olist, seen)
return olist | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_all_objects():\n gcl = gc.get_objects()\n olist = []\n seen = {}\n # Just in case:\n seen[id(gcl)] = None\n seen[id(olist)] = None\n seen[id(seen)] = None\n # _getr does the real work.\n _getr(gcl, olist, seen)\n return olist",
"def get_all_objects():\n gcl = gc.get_objects()\n olist = []\n seen = {}\n # Just in case:\n seen[id(gcl)] = None\n seen[id(olist)] = None\n seen[id(seen)] = None\n # _getr does the real work.\n _getr(gcl, olist, seen)\n return olist",
"def _get_all_tracked_objects(self):\n all = []\n for obj in gc.get_objects():\n if any([mod.is_module_object(obj) for mod in self.tracked_modules]):\n all.append(TrackedObject(obj))\n return all",
"def obj_list(self):\n return self._obj_list",
"def objects (self):\n return InternalObjectList (self)",
"def objects(self):\n\t\treturn self._objects",
"def getinstances(cls):\n\t\t\tdead = set()\n\t\t\tfor ref in cls._instances:\n\t\t\t\tobj = ref()\n\t\t\t\tif obj is not None:\n\t\t\t\t\tyield obj\n\t\t\t\telse:\n\t\t\t\t\tdead.add(ref)\n\t\t\tcls._instances -= dead",
"def all_objects(self) -> List[StorageObject]:\n return [item for item in self._store.values()]",
"def get_downstream_objects(obj):\n # gcl = gc.get_objects()\n olist = []\n seen = {}\n # Just in case:\n # seen[id(gcl)] = None\n seen[id(olist)] = None\n seen[id(seen)] = None\n # _getr does the real work.\n _getr([obj], olist, seen)\n return olist",
"def all(self):\n return self.__objects",
"def all(self):\n return self.__objects",
"def all(self):\n return self.__objects",
"def all(self):\n return self.__objects",
"def all(self):\n return self.__objects",
"def all(self):\n return self.__objects",
"def objects(self):\r\n return self._objects",
"def all(self):\n return (self.__objects)",
"def get_all_cached_instances(cls):\n return list(cls.__dbclass__.__instance_cache__.values())",
"def get_objects(self):\n return self._objects",
"def list(self):\n return self.cell.objects+self.cell.tempObjects",
"def list(self) -> list:\n return list(self)",
"def all(self):\n return list(self)",
"def get_greenlets(cls):\n return { obj for obj in gc.get_objects() if isinstance(obj, greenlet) and not obj.dead }",
"def hbObjects(self):\r\n return self.__hbObjs",
"def list(self):\n return self._observe_list",
"def list_objects(self, path):\n return [x for x in self.list_objects_generator(path)]",
"def zombies(self):\r\n # replace with an actual generator\r\n return (zombie for zombie in self._zombie_list)",
"def get_leaks(self):\n _run_garbage_collection()\n\n remaining_objects = self._get_all_tracked_objects()\n remaining_objects = self._remove_initial_objects_from_list(remaining_objects)\n\n return remaining_objects",
"def GetObjects(self): \r\n return self.model.GetObjects()",
"def watch_list(self) -> list:\n return []"
] | [
"0.736436",
"0.729643",
"0.6785282",
"0.6629329",
"0.65524584",
"0.6406",
"0.6404724",
"0.63662046",
"0.6298751",
"0.62828684",
"0.62828684",
"0.62828684",
"0.62828684",
"0.62828684",
"0.62828684",
"0.626183",
"0.6255329",
"0.6242195",
"0.61547345",
"0.6152571",
"0.6122982",
"0.6087435",
"0.6062149",
"0.6041642",
"0.6034878",
"0.60320884",
"0.5966505",
"0.5945022",
"0.5939678",
"0.5932139"
] | 0.7565201 | 0 |
Method that returns the rest energy of the particle. | def RestEnergy(self):
return (self.restMass * const.speed_of_light * const.speed_of_light) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def energy(self):\n energy = -0.5*np.sum(self.phi)+0.5*np.sum(self.mass*np.sqrt(self.particles.momentum[:,0]**2+self.particles.momentum[:,1]**2)**2)\n return energy",
"def TotalEnergy(self):\n return (math.sqrt((Particle.RestEnergy(self) ** 2)\n + (np.linalg.norm(Particle.Momentum(self)) * const.speed_of_light) ** 2))",
"def get_energy(self):\n return self.momentum*self.momentum/(2*self.mass)",
"def calcEnergy(self):\n speed_light = constants.physical_constants[\"speed of light in vacuum\"][0]#m/sec by default\n if self.mass is None:\n raise CoordinateVector(\"The particle mass needs to be specified to calculate the energy.\")\n return speed_light*math.sqrt(self.p*self.p + (self.mass*speed_light)**2)",
"def get_energy(self):\r\n return self._energy",
"def getEnergy(self):\n if not hasattr(self,\"energy\"):\n self.energy = self.calcEnergy()\n return self.energy",
"def energy(self):\n return self._energy",
"def energy(self):\n return self.mc.energy(self.chain)",
"def KineticEnergy(self):\n return Particle.TotalEnergy(self) - Particle.RestEnergy(self)",
"def energy(self) -> Union[int, float]:\n return self.proto.energy",
"def energy(self) -> Union[int, float]:\n return self.proto.energy",
"def get_total_energy_produced (self):\n return self.pre_intertie_generation[:self.actual_project_life]",
"def E(self):\n return self.generic_getter(get_energy, \"E\", \"convert_energy\")",
"def energy(self):\n self.E = - np.sum(self.phi) + 0.5 * self.mass * np.sqrt((self.v_x ** 2 + self.v_y **2))",
"def energy(self):\n return self._accelerator.energy",
"def energy_func(self):\n i = self.inl[0].to_flow()\n o = self.outl[0].to_flow()\n\n T_m = (T_mix_ph(i, T0=self.inl[0].T.val_SI) +\n T_mix_ph(o, T0=self.outl[0].T.val_SI)) / 2\n\n return (i[0] * (o[2] - i[2]) -\n self.A.val * (\n self.E.val * self.eta_opt.val -\n (T_m - self.Tamb.val_SI) * self.lkf_lin.val -\n self.lkf_quad.val * (T_m - self.Tamb.val_SI) ** 2))",
"def energy(self, r):\n sigma = self.params['sigma']\n epsilon = self.params['epsilon']\n s = sigma / r\n s6 = s**6; s12 = s6 * s6\n pot = 4.0 * epsilon * (s12 - s6)\n return pot",
"def energy_func(self):\n i = self.inl[0].to_flow()\n o = self.outl[0].to_flow()\n\n T_m = (T_mix_ph(i, T0=self.inl[0].T.val_SI) +\n T_mix_ph(o, T0=self.outl[0].T.val_SI)) / 2\n\n iam = (\n 1 - self.iam_1.val * abs(self.aoi.val) -\n self.iam_2.val * self.aoi.val ** 2)\n\n return (i[0] * (o[2] - i[2]) -\n self.A.val * (\n self.E.val * self.eta_opt.val * self.doc.val ** 1.5 * iam -\n (T_m - self.Tamb.val_SI) * self.c_1.val -\n self.c_2.val * (T_m - self.Tamb.val_SI) ** 2))",
"def getEnergy(self):\n energy = 0.0\n\n for i in range(0, self.nPoints):\n energy += self.tDomain[i] ** 2\n\n energy /= self.nPoints\n return energy",
"def getEnergyEvolution(self):\n\n\t\tEBefore = [0.5*np.sum(i**2)/self.__Nparticles for i in self.__XPEnsembleBefore]\n\t\tEAfter = [0.5*np.sum(i**2)/self.__Nparticles for i in self.__XPEnsembleAfter]\n\n\t\treturn EBefore, EAfter",
"def get_energy():\n\n # open the psi4 log file\n with open('output.dat', 'r') as log:\n lines = log.readlines()\n\n # find the total converged energy\n for line in lines:\n if 'Total Energy =' in line:\n energy = float(line.split()[3])\n break\n else:\n raise EOFError('Cannot find energy in output.dat file.')\n\n return energy",
"def get_energy():\n\n # open the psi4 log file\n with open(\"output.dat\", \"r\") as log:\n for line in log:\n if \"Total Energy =\" in line:\n return float(line.split()[3])\n\n raise EOFError(\"Cannot find energy in output.dat file.\")",
"def compute_energy(self):\n energy = 0.5 * self.masses * np.sum(self.velocities * self.velocities, axis=1)\n avg_energy = np.mean(energy) # average kinetic energy of all particles\n return avg_energy",
"def get_e(self):\n return self.e_min + self.e_ * self.e_range",
"def potentialEnergy(self):\n return 0.5*(pdist(self.positions)**2).sum()",
"def total_energy(self):\n return self._total_energy",
"def energy(ps):\n return kinetic_energy(ps) + potential_energy(ps)",
"def get_energy(self):\n return self.bot_client.send_command(_Command.GetEnergy)",
"def energy(self):\n e = 0\n\n restoration = RestorationModel(self.graph_damaged)\n restoration.run(self.state)\n restoration_graphs = restoration.get_restoration_graphs()\n restoration_times = restoration.get_restoration_times()\n restoration_costs = restoration.get_restoration_costs()\n\n damaged = []\n damaged.append(get_delta(self.no_damage, self.initial_damage))\n\n sim_results = Parallel(n_jobs=4)(delayed(parallel_model)(\n graph, self.od_graph, self.od_matrix) for graph in restoration_graphs[:-1])\n for values in sim_results:\n damaged.append(get_delta(self.no_damage, values))\n\n for idx, values in enumerate(damaged):\n dt = restoration_times[idx] if idx == 0 else restoration_times[idx] - \\\n restoration_times[idx-1]\n e += sum(restoration_costs[idx]) + dt * (self.day_factor * values[2] * np.sum(self.mu*self.xi) +\n values[3] * np.sum(self.mu * (self.nu * self.F_w + self.rho)) + values[4] * self.upsilon)\n with open(self.fdir+'energy.csv', 'a') as f:\n f.write('\\n'+str(e))\n\n return e",
"def energy_func(self):\n return (\n self.inl[0].m.val_SI * (\n self.outl[0].h.val_SI - self.inl[0].h.val_SI) +\n self.inl[1].m.val_SI * (\n self.outl[1].h.val_SI - self.inl[1].h.val_SI))"
] | [
"0.7668576",
"0.7595888",
"0.7482029",
"0.7454896",
"0.74329084",
"0.74117565",
"0.7405934",
"0.7287761",
"0.7283499",
"0.70380706",
"0.70380706",
"0.6973054",
"0.69607884",
"0.6895652",
"0.67723006",
"0.6750274",
"0.67392176",
"0.67377317",
"0.6689237",
"0.6675212",
"0.66727716",
"0.662599",
"0.6617034",
"0.65936416",
"0.6591",
"0.6571902",
"0.65373886",
"0.6530975",
"0.6525246",
"0.6520253"
] | 0.7802598 | 0 |
Method that returns Beta (velocity/speed of light) as a float | def BetaVelocity(self):
return np.linalg.norm(self.velocity) / const.speed_of_light | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def betaT(self):\n if self.maTail > 1:\n return 0\n else:\n return sqrt(1 - self.maTail**2)",
"def getBeta(self, alpha):\n return 2.0*(2.0-alpha) + -4.0*np.sqrt(1.0-alpha)",
"def beta(self):\n eTheta = self.eTheta()\n cosOmg = np.cos(self.omega())\n return self.a1()/c.c*(1-eTheta**2)**0.5*cosOmg",
"def B(alpha: float, beta: float) -> float:\n return math.gamma(alpha) * math.gamma(beta) / math.gamma(alpha + beta)",
"def _beta(self):\n return _handle_ab(self.solution, self.use_const)[1]",
"def beta(self):\n return self._beta",
"def beta(self):\n return self._beta",
"def B(alpha, beta):\n return math.gamma(apha) * math.gamma(beta) / math.gamma(alpha + beta)",
"def B(alpha, beta):\n return math.gamma(alpha) * math.gamma(beta) / math.gamma(alpha + beta)",
"def B(alpha, beta):\n return math.gamma(alpha) * math.gamma(beta) / math.gamma(alpha + beta)",
"def B(alpha, beta):\n return math.gamma(alpha) * math.gamma(beta) / math.gamma(alpha + beta)",
"def betaW(self):\n if self.maCruise > 1:\n return 0\n else:\n return sqrt(1 - self.maCruise**2)",
"def get_beta(self):\n\n return np.matmul(self.rotation_x, self.beta_z)",
"def getBeta(self):\n\t\treturn self.relativistic_beta",
"def beta_r(r):\n return 0.",
"def pvalue_beta(self):\n return self._pvalue_beta",
"def _get_alpha_beta(self, a, b):\n beta = a / b\n alpha = a * beta\n return alpha, beta",
"def beta_factor(mol_data, ephemobj):\n # imported here to avoid circular dependency with activity.gas\n from .core import photo_timescale\n from ...data import Ephem\n\n if not isinstance(ephemobj, Ephem):\n raise ValueError('ephemobj must be a `sbpy.data.ephem` instance.')\n if not isinstance(mol_data, Phys):\n raise ValueError('mol_data must be a `sbpy.data.phys` instance.')\n\n orb = ephemobj\n delta = (orb['delta']).to('m')\n r = (orb['r'])\n\n if not isinstance(mol_data['mol_tag'][0], str):\n cat = JPLSpec.get_species_table()\n mol = cat[cat['TAG'] == mol_data['mol_tag'][0]]\n name = mol['NAME'].data[0]\n\n else:\n name = mol_data['mol_tag'][0]\n\n timescale = photo_timescale(name)\n\n if timescale.ndim != 0:\n # array\n timescale = timescale[0]\n\n beta = (timescale) * r**2\n\n return beta",
"def beta(self):\n return self[1::2]",
"def _tstat_beta(self):\n return _handle_ab(self._tstat_all, self.use_const)[1]",
"def beta_r(r):\n return 1.",
"def beta(theta, a, b):\n B = math.gamma(a) * math.gamma(b) / math.gamma(a + b)\n return (theta ** (a - 1)) * ((1 - theta) ** (b - 1)) / B",
"def getGamma(self, alpha, beta):\n return np.power(beta,2.0)/2.0/alpha",
"def brate(self):\n try:\n return self.pos / self.runtime\n except ZeroDivisionError:\n return 0",
"def tstat_beta(self):\n return self._tstat_beta",
"def getB(self):\n return ((self.bPlusbStar() / self.nPos) + (self.bMinusbStar / self.nNeg)) / 2",
"def _pvalue_beta(self):\n return _handle_ab(self._pvalues_all, self.use_const)[1]",
"def _get_alpha_beta(self):\n alpha = tf.nn.softplus(self.alpha_prime)\n beta = -alpha + tf.nn.softplus(self.beta_prime)\n return alpha, beta",
"def get_b(self):\n return ((self.b_plus_bstar / self.n_pos) + (self.b_minus_bstar / self.n_neg)) / 2",
"def B(param):\n return (param.delta + param.nu + param.mu0 - param.beta) * param.A + (param.beta - param.nu) * (param.delta + param.nu + param.mu1) * param.b"
] | [
"0.7552084",
"0.754992",
"0.7535426",
"0.7439897",
"0.74009764",
"0.7362183",
"0.7362183",
"0.71039945",
"0.707216",
"0.707216",
"0.707216",
"0.7020982",
"0.69366413",
"0.6877576",
"0.6866696",
"0.67877996",
"0.6773138",
"0.6760468",
"0.6758375",
"0.67385536",
"0.670362",
"0.67025906",
"0.6696022",
"0.66733295",
"0.66308504",
"0.6612742",
"0.6595469",
"0.6531038",
"0.65085256",
"0.6470471"
] | 0.8313839 | 0 |
Method that returns the Lorentz Factor of the particle. | def LorentzFactor(self):
# Use of abs() and x ** 0.5 provides a more stable calculation of lorentz
# factor than math.sqrt() at high velocities.
return 1 / abs( 1 - Particle.BetaVelocity(self) * Particle.BetaVelocity(self))**0.5 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def L(self) -> float:\n return self._L",
"def Lorentz(x, x0, A, B, d):\n return B + A / (((x - x0) / d) ** 2 + 1)",
"def lorentz(x, gamma):\n return 1 / cs.pi * 0.5 * gamma / ((0.5 * gamma**2) + x**2)",
"def relu(z: float) -> float:\n return z if z > 0 else 0.01 * z",
"def lorentz(x, x0, gamma): \n return (0.5/pi) * gamma / ((x-x0)**2 + 0.25 * gamma**2)",
"def lp_factor(self):\n num = 1 + np.cos(2 * self.angle) ** 2\n den = np.cos(self.angle) * np.sin(self.angle) ** 2\n return num / den",
"def lfn(self):\n if self.precision:\n return self.evaluations.exposedWing.edges[1].point1.z - self.evaluations.chordIntersected.edges[1].length\n else:\n return (self.acW + self.longPosW) / 2 # first guess for a faster evaluation",
"def Luminosity(self, z, f=1., dnu=1000.):\n ld = self.Luminosity_Distance(z)\n ld2 = ld * ld\n lum = f * self.Jy2CGS * dnu * self.MHz2Hz * 4 * np.pi * ld2\n return lum",
"def lorentzian(self, params):\n height, width, c_freq = params\n return height / (1.0+ (4.0 / width**2)*(self.freqs - c_freq)**2)",
"def lorentzian(self, params):\n height, width, frequency = params\n\n return height / (1.0+ (4.0 / width**2)*(self.freq - frequency)**2)",
"def calc_lamb(self, x_surface, geom):\n\n return self.rfl",
"def get_lz(self):\r\n return self.dz * self.nz - self.oz",
"def fun_lorentzian(p,r):\n return p[1] / ((r/p[0])**2 + 1)",
"def relu_prime(z: float) -> float:\n return 1.0 if z > 0 else 0.0",
"def L(self, t):\n return (3e-16 * self.pulsar.tau_0 * self.pulsar.L_0 *\n (t / (t + self.pulsar.tau_0)))",
"def calcLorentzGammaFromMomentum(self,direction):\n if self.mass is None:\n raise CoordinateVector(\"The particle mass needs to be specified to calculate the lorentz gamma.\")\n if direction not in self.x.order: \n raise CoordinateVector(\"The direction, \"+str(direction)+ \" needs to be one of \" +\",\".join(self.x.order) + \" to calculated the lorentz gamma.\")\n speed_light = constants.physical_constants[\"speed of light in vacuum\"][0]#m/sec by default\n return math.sqrt(1 + (getattr(self.p,direction)/(self.mass*speed_light))**2)",
"def get_coeff(self):\n return bernoulli(self.degree+1) / factorial(self.degree + 1)",
"def omLz(self,z):\n return self.omL/(self.omL + self.omR*(1.0 + z)**2 + self.om0*(1.0 + z)**3)",
"def lorentz_func(x, center, width):\n return 1/np.pi*width/2 *1/((x-center)**2+(x/width)**2)",
"def lorentzian(x, x0=0.0, fwhm=1.0, ampl=1.0):\n return ampl * (1 + 4 * ((x - x0) / fwhm) ** 2) ** (-1)",
"def lvec(self):\n lv = ROOT.TLorentzVector()\n# if self.pt < 0 or abs(self.eta) > 6:\n# raise Exception(\"Invalid values for TLorentzVector\")\n lv.SetPtEtaPhiM(self.pt, self.eta, self.phi, self.mass)\n# if abs(lv.Pt()) > 100000 or abs(lv.Eta()) > 100000:\n# raise Exception(\"Invalid values for TLorentzVector\")\n return lv",
"def log_likelihood_z_lognormal(self, std=1.0):\n #return self.log_det_Jxz - self.dim * tf.log(std) - (0.5 / (std**2)) * tf.reduce_sum(self.output_z**2, axis=1)\n from deep_boltzmann.util import logreg\n logz = logreg(self.output_z, a=0.001, tf=True)\n ll = self.log_det_Jxz \\\n - (0.5 / (std**2)) * tf.reduce_sum(logz**2, axis=1) \\\n - tf.reduce_sum(logz, axis=1)\n return ll",
"def z_score(self, x):\n return (x - self.n) / self.p",
"def lnprobability(self):\n return",
"def loevinger_coeff(self):\n a, c, d, b = self.to_ccw()\n p1, q1 = a + b, c + d\n p2, q2 = a + c, b + d\n n = p1 + q1\n\n cov = self.covar()\n\n if n == 0:\n return np.nan\n elif a == n or d == n:\n # only one (diagonal) cell is non-zero\n return 0.5\n elif cov == 0.0:\n return 0.0\n else:\n return _div(cov, min(p1 * q2, p2 * q1))",
"def get_Lf(self):\n return 0",
"def get_z(self) -> int:\n return self.__z",
"def luminosity_distance(self, z):\n da = self.angular_diameter_distance(z)\n dl = da*(1.+z)**2.\n return(dl)",
"def calcLorentzGammaFromVelocity(self,direction):\n if direction not in self.v.order: \n raise CoordinateVector(\"The direction, \"+str(direction)+ \" needs to be one of \" +\",\".join(self.x.order) + \" to calculated the lorentz gamma.\")\n speed_light = constants.physical_constants[\"speed of light in vacuum\"][0]#m/sec by default\n return math.sqrt(1 /(1 - (getattr(self.v,direction)/speed_light)**2))",
"def L(n):\n\tif (n==0):\n\t\treturn lambda x: 1.0\n\n\telif (n==1):\n\t\treturn lambda x: x\n\n\telse:\n\t\treturn lambda x: ( (2.0*n-1.0) * x * L(n-1)(x)-(n-1) * L(n-2)(x) ) / n"
] | [
"0.65476906",
"0.65036815",
"0.6469627",
"0.638012",
"0.6348522",
"0.6347106",
"0.6341828",
"0.6334289",
"0.6333599",
"0.63280964",
"0.62715966",
"0.62416214",
"0.6189796",
"0.61773413",
"0.61520237",
"0.6139124",
"0.6098995",
"0.60558134",
"0.60534275",
"0.5988797",
"0.59480953",
"0.5939049",
"0.59071845",
"0.58696145",
"0.58549404",
"0.5834981",
"0.58342516",
"0.5827706",
"0.5827286",
"0.5825165"
] | 0.8493009 | 0 |
Method that returns the relativistic momentum of the particle | def Momentum(self):
return (np.multiply(Particle.LorentzFactor(self)
, np.array(self.velocity,dtype=float))* self.restMass) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getMomentum(self):\n return self.p",
"def calcMomentumFromVelocity(self):\n if self.mass is None:\n raise CoordinateVector(\"The particle mass needs to be specified to calculate the particle momentum from velocity.\")\n values = {}\n for direction in self.v.order:\n gamma = self.calcLorentzGammaFromVelocity(direction)\n values[direction] = getattr(self.v,direction)*gamma*self.mass\n self.setMomentum(Cartesian3DVector(**values))\n return self.getMomentum()",
"def linear_momentum(self):\r\n return self.mass * self.vel",
"def calcVelocityFromMomentum(self):\n if self.mass is None:\n raise CoordinateVector(\"The particle mass needs to be specified to calculate the particle velocity from momentum.\")\n values = {}\n for direction in self.p.order:\n gamma = self.calcLorentzGammaFromMomentum(direction)\n values[direction] = getattr(self.p,direction)/(gamma*self.mass)\n self.setVelocity(Cartesian3DVector(**values))\n return self.getVelocity()",
"def momentum (self):\n\n for planet in self.planets: #this loop takes a 'planet' from 'self.planets' and computes it linear momentum.\n planet.momentum = planet.mass * planet.velocity #Each body's resulting momentum is updated to the body's information defined in the Particle class.",
"def get_velocity(self):\n return self.momentum/self.mass",
"def RelativisticMass(self):\n return Particle.LorentzFactor(self) * self.restMass",
"def totalmomentum (self):\n tot_p=0.\n for planet in self.planets: #this loop takes each 'planet' momentum in 'self.planets' and sums them.\n tot_p += planet.momentum #'tot_p' is the resulting vector of all momenta vectors.\n total_mom = np.linalg.norm(tot_p) #the 'total_mom' is the total linear momentum's magnitude, which is conserved.\n return (total_mom)",
"def getFinalMomentum(self):\n return self.final_p_MeV",
"def calcMomentum(self):\n # start conditions\n if not self.quiet:\n fs = u'''Calculating momentum gain.\n Peak field: {self.rf_peak_field:.3f} MV/m\n Phase: {self.phase:.1f}°'''\n print(fs.format(**locals()))\n\n # Fortran method (0.8 ms to run cf 11 ms for Python code)\n self.t_array, self.gamma_dash_array, self.gamma_array, self.beta_array, self.p_array = calcMomentum.calcmomentum(self.freq, self.phase, self.gamma_start, self.dz, self.gamma_tilde_dash, self.phase_offset)\n # print(self.gamma_dash_array)\n self.final_p_MeV = self.p_array[-1] * -1e-6 * epsilon_e\n\n if not self.quiet:\n print(u'Final momentum: {:.3f} MeV/c'.format(self.final_p_MeV))\n self.calc_level = CALC_MOM",
"def angular_momentum(self):\n cart = self.represent_as(coord.CartesianRepresentation)\n return cart.pos.cross(cart.vel).xyz",
"def totalmass_comvelocity(particle_list):\r\n total_momentum = sum([particle.linear_momentum()\r\n for particle in particle_list])\r\n total_mass = sum([particle.mass for particle in particle_list])\r\n\r\n return total_mass, total_momentum / total_mass",
"def calcEnergy(self):\n speed_light = constants.physical_constants[\"speed of light in vacuum\"][0]#m/sec by default\n if self.mass is None:\n raise CoordinateVector(\"The particle mass needs to be specified to calculate the energy.\")\n return speed_light*math.sqrt(self.p*self.p + (self.mass*speed_light)**2)",
"def get_momentum(self, iteration: int) -> float:\n cycle_perc = iteration / self.full_cycle\n\n # Normal cycle\n # Increase\n if cycle_perc <= 0.5:\n # starts @ 0 for baseline momentum\n momentum = self.init_momentum + iteration * self.momentum_decrement\n\n # Decrease\n elif 0.5 < cycle_perc <= 1:\n momentum = self.init_momentum + (\n self.full_cycle - iteration) * self.momentum_decrement\n\n # Tail cycle | cycle_perc > 1\n else:\n momentum = self.init_momentum\n\n return momentum",
"def dispersion(self, p):\n return p**2 / (2*self.mass)",
"def getMomentumMap(self):\n return self.p_array * -1e-6 * epsilon_e",
"def update_moments_r(self):\n denominator = np.exp(self.gamma_r[:, None] * self.constellation\n - self.Sigma_r[:, None] * np.power(self.constellation, 2) /2 )\n nominator1 = np.exp(self.gamma_r[:, None] * self.constellation\n - self.Sigma_r[:, None] * np.power(self.constellation, 2) /2 ) * self.constellation\n \n nominator2 = np.exp(self.gamma_r[:, None] * self.constellation\n - self.Sigma_r[:, None] * np.power(self.constellation, 2) /2) * np.power(self.constellation, 2)\n try:\n \n moment1 = nominator1.sum(axis=1) / denominator.sum(axis=1)\n moment2 = nominator2.sum(axis=1) / denominator.sum(axis=1)\n assert np.all(np.logical_not(np.isnan(moment1))) and np.all(np.logical_not(np.isnan(moment2)))\n except:\n print(\"Oops! That was no valid number. Try again...\")\n\n \n self.mu = moment1\n return moment1, moment2",
"def particleMass(self):\n return self.params['particleMass']",
"def gradient_descent_momentum(self):\n return self._gradient_descent_momentum",
"def first_moment(self, mass, z=None):\n return 1.0",
"def escaped_momentum(self):\r\n position, velocity,escaped_particles,impact,collision,mom = self.box_collision_info()\r\n\r\n for i in xrange(1,self.n):\r\n velocity[np.logical_not(impact)] = velocity[np.logical_not(\r\n impact)]\r\n momentum = self.m*velocity\r\n abs_momentum = np.sum(np.sqrt(momentum[:,0]**2 + momentum[:,1]**2\r\n + momentum[:,2]**2))/2\r\n force = abs_momentum/self.dt\r\n\r\n return abs_momentum, force",
"def estimate(self):\n mu = self.mean()\n var = np.average((self.particles - mu) ** 2, weights=self.weights, axis=0)\n\n return mu, var",
"def calP(self):\n N = len(self.listOfParticles)\n m = self.listOfParticles[0].m\n vsum = 0\n for particle in self.listOfParticles:\n vsum += particle.V.len()\n A = np.pi*self.R**2\n F = 0.5 * A * (2*self.R) * m * N * vsum**2\n return F",
"def _velocity_position(self, particle, dim, p_nd):\n\n new_velocity = (self.w * particle.velocity[dim]) \\\n + (self.c1 *\n (particle.pbest_position[dim] - particle.position[dim])) \\\n + (self.c2 * (self.gbest_position[dim] - particle.position[dim])) \\\n + (self.c3 * (p_nd - particle.position[dim]))\n\n new_velocity = min(\n self._vmax,\n max(-self._vmax, new_velocity)\n )\n\n new_position = min(\n self._bounds[1],\n max(self._bounds[0], particle.position[dim] + new_velocity)\n )\n\n return new_velocity, new_position",
"def setMomentum(self,p):\n if p is None:\n self.p = Cartesian3DVector()\n else:\n if isinstance(p,Cartesian3DVector):\n self.p = Cartesian3DVector(p.x,p.y,p.z)\n else:\n raise CoordinateVector(\"Initializing a particle with the incorrect momentum vector type.\")",
"def get_energy(self):\n return self.momentum*self.momentum/(2*self.mass)",
"def particleCharge(self):\n return self.params['particleCharge']",
"def relative_velocity(self):\n return self.base_platform.relative_velocity",
"def getMomentumGradient(self):\n dphi = 0.5\n orig_phase = self.phase\n p0 = self.phaseToMomentum(orig_phase - dphi / 2)\n p1 = self.phaseToMomentum(orig_phase + dphi / 2)\n self.setRFPhase(orig_phase)\n return (p1 - p0) / dphi",
"def steel_total_moment(self,strain_dis,na_z):\r\n\t\ttotal_moment = 0.0\r\n\t\tfor steel in self.reinforcement:\r\n\t\t\tstrain = np.interp(steel[0], self.mesh_center,strain_dis)\r\n\t\t\tforce = (self.steel(strain)-self.concrete(strain))*steel[1]\r\n\t\t\ttotal_moment = total_moment + force*(steel[0]-na_z)\r\n\t\treturn total_moment"
] | [
"0.74322164",
"0.73959345",
"0.73226726",
"0.72440016",
"0.69533795",
"0.69061005",
"0.6754555",
"0.665831",
"0.6585926",
"0.6467245",
"0.6458798",
"0.64507335",
"0.6449878",
"0.64052224",
"0.63391316",
"0.63144106",
"0.6260632",
"0.61805636",
"0.61436313",
"0.6129185",
"0.61285204",
"0.61268336",
"0.61111414",
"0.6062954",
"0.60131866",
"0.60126275",
"0.6002262",
"0.5959174",
"0.59279263",
"0.5922599"
] | 0.75053847 | 0 |
Method that returns the electric field from the particle that affects another particle. | def GenerateElectricField(self, affectedParticle):
return self.electricField.GenerateField(affectedParticle) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def electric_field(self, xyz):\n dxyz = self.vector_distance(xyz)\n r = spatial.repeat_scalar(self.distance(xyz))\n kr = self.wavenumber*r\n ikr = 1j * kr\n\n front_term = (\n (1j * self.omega * self.mu * self.moment) / (4. * np.pi * r**2) *\n (ikr + 1) * np.exp(-ikr)\n )\n return front_term * self.cross_orientation(dxyz) / r",
"def compute_electric_field(self):\n self.set_grid()\n rho = self.grid.distribute(self.bunch.positions)\n rho *= self.bunch.line_charge_density * 4 # unknown origin\n phi = self.solver.get_potential(rho, self.bunch.line_charge_density)\n Ex, Ey = self.grid.gradient(-phi)\n self.fields[:, 0] = self.grid.interpolate(Ex, self.bunch.positions)\n self.fields[:, 1] = self.grid.interpolate(Ey, self.bunch.positions)",
"def electric_field(self, xyz):\n dxyz = self.vector_distance(xyz)\n r = spatial.repeat_scalar(self.distance(xyz))\n kr = self.wavenumber * r\n ikr = 1j * kr\n\n front_term = (\n (self.current * self.length) / (4 * np.pi * self.sigma * r**3) *\n np.exp(-ikr)\n )\n symmetric_term = (\n spatial.repeat_scalar(self.dot_orientation(dxyz)) * dxyz *\n (-kr**2 + 3*ikr + 3) / r**2\n )\n oriented_term = (\n (kr**2 - ikr - 1) *\n np.kron(self.orientation, np.ones((dxyz.shape[0], 1)))\n )\n return front_term * (symmetric_term + oriented_term)",
"def electric_field(self, xyz):\n\n xyz = check_xyz_dim(xyz)\n if np.any(xyz[..., -1] > 0):\n raise ValueError(\n f\"z value must be less than or equal to 0 in a halfspace, got {(xyz[..., -1])}\"\n )\n\n e = self._primary.electric_field(xyz) + self._image.electric_field(xyz)\n return e",
"def GenerateMagneticField(self, affectedParticle):\n return self.magneticField.GenerateField(affectedParticle)",
"def field ( self , xyz ) :\n return self._ilhcbmagnet.fieldVector ( xyz )",
"def _evaluate_electric(snapshot, params):\n positions = snapshot.particles.position\n charges = snapshot.particles.charge\n E_field = params\n energies = -charges * np.dot(positions, E_field)\n forces = np.outer(charges, E_field)\n return forces, energies",
"def electric_field(self, xyz_m, xyz_n=None):\n\n xyz_m = check_xyz_dim(xyz_m)\n if np.any(xyz_m[..., -1] > 0):\n raise ValueError(\n f\"z value must be less than or equal to 0 in a halfspace, got {(xyz_m[..., -1])}\"\n )\n\n if xyz_n is not None:\n xyz_n = check_xyz_dim(xyz_n)\n if np.any(xyz_n[..., -1] > 0):\n raise ValueError(\n f\"z value must be less than or equal to 0 in a halfspace, got {(xyz_n[..., -1])}\"\n )\n\n em = self._a.electric_field(xyz_m) - self._b.electric_field(xyz_m)\n\n if xyz_n is not None:\n en = self._a.electric_field(xyz_n) - self._b.electric_field(xyz_n)\n e = em - en\n return e\n else:\n return em",
"def compute_rf_field(self, r):\r\n\t\tE = np.zeros((3))\r\n\t\tfor nam, e in self.rf_electrode_list:\r\n\t\t\tE += e.compute_electric_field(r)\r\n\t\treturn E",
"def calcEnergy(self):\n speed_light = constants.physical_constants[\"speed of light in vacuum\"][0]#m/sec by default\n if self.mass is None:\n raise CoordinateVector(\"The particle mass needs to be specified to calculate the energy.\")\n return speed_light*math.sqrt(self.p*self.p + (self.mass*speed_light)**2)",
"def ee_radius_diffraction(self, energy=FIRST_AIRY_ENCIRCLED):\n return _inverse_analytic_encircled_energy(self.fno, self.wavelength, energy)",
"def F(self):\n return self.generic_getter(get_F_potential, \"F\", \"convert_energy\")",
"def external_field(self):\n # TODO: return curl(A) for non-homogeneous external_field\n A = self.external_vector_potential\n if A is not None:\n Ax, Ay = A\n # TODO: check expression below\n return (- np.diff(Ax, axis=1) * cfg.idy\n + np.diff(Ay, axis=0) * cfg.idx)\n else:\n return None",
"def particleCharge(self):\n return self.params['particleCharge']",
"def getValueFromFieldname(self,fieldname):\n if hasattr(self,fieldname): #Standard attributes.\n value = getattr(self,fieldname)\n if not isinstance(value,Cartesian3DVector):\n return value\n if fieldname == \"E\": #Interprets E as energy\n return self.getEnergy()\n momentum_direction = fieldname.replace(\"p\",\"\")\n velocity_direction = fieldname.replace(\"v\",\"\")\n if fieldname.startswith(\"p\") and momentum_direction in [\"x\",\"y\",\"z\"]:\n return getattr(self.p,momentum_direction)\n if fieldname.startswith(\"v\") and velocity_direction in [\"x\",\"y\",\"z\"]:\n return getattr(self.v,velocity_direction)\n elif fieldname in [\"x\",\"y\",\"z\"]:\n return getattr(self.x,fieldname)\n raise Exception(\"The given field, \"+fieldname+\", is not defined for the particle.\")",
"def energy(self):\n energy = -0.5*np.sum(self.phi)+0.5*np.sum(self.mass*np.sqrt(self.particles.momentum[:,0]**2+self.particles.momentum[:,1]**2)**2)\n return energy",
"def current_density(self, xyz):\n\n j = self.electric_field(xyz) / self.rho\n return j",
"def current_density(self, xyz):\n return self.sigma * self.electric_field(xyz)",
"def current_density(self, xyz):\n return self.sigma * self.electric_field(xyz)",
"def getEnergyEvolution(self):\n\n\t\tEBefore = [0.5*np.sum(i**2)/self.__Nparticles for i in self.__XPEnsembleBefore]\n\t\tEAfter = [0.5*np.sum(i**2)/self.__Nparticles for i in self.__XPEnsembleAfter]\n\n\t\treturn EBefore, EAfter",
"def getPeakMagneticField(self):\n return self.solenoid.getPeakMagneticField()",
"def value(self):\n nd1 = super().nd1()\n nd2 = super().nd2()\n f1 = nd1 * self.s\n f2 = nd2 * self.x * math.e ** (-self.rf * self.t)\n return f1 - f2",
"def potentialEnergy(self):\n return 0.5*(pdist(self.positions)**2).sum()",
"def energy_func(self):\n return (\n self.inl[0].m.val_SI * (\n self.outl[0].h.val_SI - self.inl[0].h.val_SI) +\n self.inl[1].m.val_SI * (\n self.outl[1].h.val_SI - self.inl[1].h.val_SI))",
"def __call__(self, x):\n\n self.dbeads.q = x\n e = self.dforces.pot # Energy\n g = -self.dforces.f # Gradient\n\n return e, g",
"def compute_magnetic_field(self, coords, params=None, basis=\"rpz\"):\n return self._scalar * self._field.compute_magnetic_field(coords, params, basis)",
"def get_E(self):\r\n return self.Real.E, self.Ideal.E",
"def get_E(self):\r\n return self.Real.E, self.Ideal.E",
"def energy_func(self):\n i = self.inl[0].to_flow()\n o = self.outl[0].to_flow()\n\n T_m = (T_mix_ph(i, T0=self.inl[0].T.val_SI) +\n T_mix_ph(o, T0=self.outl[0].T.val_SI)) / 2\n\n iam = (\n 1 - self.iam_1.val * abs(self.aoi.val) -\n self.iam_2.val * self.aoi.val ** 2)\n\n return (i[0] * (o[2] - i[2]) -\n self.A.val * (\n self.E.val * self.eta_opt.val * self.doc.val ** 1.5 * iam -\n (T_m - self.Tamb.val_SI) * self.c_1.val -\n self.c_2.val * (T_m - self.Tamb.val_SI) ** 2))",
"def current_density(self, xyz_m, xyz_n=None):\n\n j = self.electric_field(xyz_m, xyz_n=xyz_n) / self.rho\n return j"
] | [
"0.6565109",
"0.6523056",
"0.6255628",
"0.62081003",
"0.61325175",
"0.6086595",
"0.5954075",
"0.5868522",
"0.58501065",
"0.5768421",
"0.5700194",
"0.5639009",
"0.5562006",
"0.55513227",
"0.55293006",
"0.54763085",
"0.5419618",
"0.53979456",
"0.53979456",
"0.53847796",
"0.53787816",
"0.53736144",
"0.536788",
"0.53602344",
"0.53591824",
"0.53238064",
"0.5322031",
"0.5322031",
"0.5321132",
"0.5288567"
] | 0.72869897 | 0 |
Method that returns the magnetic field from the particle that affects another particle. | def GenerateMagneticField(self, affectedParticle):
return self.magneticField.GenerateField(affectedParticle) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def magnetic_field(self, xyz):\n dxyz = self.vector_distance(xyz)\n r = spatial.repeat_scalar(self.distance(xyz))\n kr = self.wavenumber * r\n ikr = 1j*kr\n\n front_term = (\n self.current * self.length / (4 * np.pi * r**2) * (ikr + 1) *\n np.exp(-ikr)\n )\n return -front_term * self.cross_orientation(dxyz) / r",
"def magnetisation(field):\n norm_field = df.Field(field.mesh, dim=1, value=(field.norm.array != 0))\n volume = df.integral(norm_field * df.dV, direction='xyz')\n return df.integral(field * df.dV / volume, direction='xyz')",
"def field ( self , xyz ) :\n return self._ilhcbmagnet.fieldVector ( xyz )",
"def magnetic_field(self, xyz):\n dxyz = self.vector_distance(xyz)\n r = spatial.repeat_scalar(self.distance(xyz))\n kr = self.wavenumber*r\n ikr = 1j*kr\n\n front_term = self.moment / (4. * np.pi * r**3) * np.exp(-ikr)\n symmetric_term = (\n spatial.repeat_scalar(self.dot_orientation(dxyz)) * dxyz *\n (-kr**2 + 3*ikr + 3) / r**2\n )\n oriented_term = (\n (kr**2 - ikr - 1) *\n np.kron(self.orientation, np.ones((dxyz.shape[0], 1)))\n )\n\n return front_term * (symmetric_term + oriented_term)",
"def attraction(self, other: Body) -> Vector:\n dist = self.position - other.position\n dist_modsq = dist.lensq\n dist_unit = dist / math.sqrt(dist_modsq) # Unit vector\n G = 6.674384e-11\n force_mod = G * self.mass * other.mass / dist_modsq\n return dist_unit * force_mod",
"def compute_magnetic_field(self, coords, params=None, basis=\"rpz\"):\n return self._scalar * self._field.compute_magnetic_field(coords, params, basis)",
"def getMagneticField(self, z):\n return float(self.solenoid.B_interp(z))",
"def compute_magnetic_field(self, coords, params={}, basis=\"rpz\"):",
"def gravitation_force(self, other):\n force = ((CONSTANTS.G * self.mass * other.mass) /\n (self.distance(other) ** 2))\n return force",
"def m_field(self):\n grad = np.gradient(self.A)\n\n B_x = grad[1] - grad[2]\n B_y = - grad[2] - grad[0]\n B_z = - grad[0] - grad[1]\n return (B_x, B_y, B_z)",
"def GenerateElectricField(self, affectedParticle):\n return self.electricField.GenerateField(affectedParticle)",
"def getPeakMagneticField(self):\n return self.solenoid.getPeakMagneticField()",
"def force(particle1, particle2):\n position1 = particle1.position\n position2 = particle2.position\n\n distance_12 = np.sqrt((position1.x - position2.x)**2 +\n (position1.y - position2.y)**2 +\n (position1.z - position2.z)**2)\n\n return G*particle1.mass*particle2.mass/distance_12**2",
"def getMagneticFieldMap(self):\n return self.solenoid.B_interp(self.z_array)",
"def getMagFlux(self):\n return self.magflux",
"def compute_magnetic_field(self, coords, params=None, basis=\"rpz\"):\n assert basis.lower() in [\"rpz\", \"xyz\"]\n if isinstance(coords, Grid):\n coords = coords.nodes\n coords = jnp.atleast_2d(coords)\n if basis == \"xyz\":\n coords = xyz2rpz(coords)\n bp = self._B0 * self._R0 / coords[:, 0]\n brz = jnp.zeros_like(bp)\n B = jnp.array([brz, bp, brz]).T\n if basis == \"xyz\":\n B = rpz2xyz_vec(B, phi=coords[:, 1])\n\n return B",
"def compute_magnetic_field(self, coords, params=None, basis=\"rpz\"):\n assert basis.lower() in [\"rpz\", \"xyz\"]\n if isinstance(coords, Grid):\n coords = coords.nodes\n coords = jnp.atleast_2d(coords)\n if basis == \"xyz\":\n coords = xyz2rpz(coords)\n bz = self._B0 * jnp.ones_like(coords[:, 2])\n brp = jnp.zeros_like(bz)\n B = jnp.array([brp, brp, bz]).T\n if basis == \"xyz\":\n B = rpz2xyz_vec(B, phi=coords[:, 1])\n\n return B",
"def compute_magnetic_field(self, coords, params=None, basis=\"rpz\"):\n assert basis.lower() in [\"rpz\", \"xyz\"]\n if isinstance(coords, Grid):\n coords = coords.nodes\n coords = jnp.atleast_2d(coords)\n if basis == \"xyz\":\n coords = xyz2rpz(coords)\n\n R, phi, Z = coords.T\n r = jnp.sqrt((R - self._R0) ** 2 + Z**2)\n theta = jnp.arctan2(Z, R - self._R0)\n br = -r * jnp.sin(theta)\n bp = jnp.zeros_like(br)\n bz = r * jnp.cos(theta)\n bmag = self._B0 * self._iota / self._R0\n B = bmag * jnp.array([br, bp, bz]).T\n if basis == \"xyz\":\n B = rpz2xyz_vec(B, phi=coords[:, 1])\n\n return B",
"def __call__(self, coords, params={}, basis=\"rpz\"):\n return self.compute_magnetic_field(coords, params, basis)",
"def Force_on_aircraft_in_body_reference_frame(m, V_B, V_dot_B, omega_B):\n return m * (V_dot_B + omega_B.cross(V_B))",
"def _get_deform_field_dm(self):\n self.deformationStrength = self.deformrandomstate.rand()\n adr = [w // d + 4 for w, d in zip(self.w, self.deform)]\n deformshape = [len(self.w)] + adr\n tmp = np.zeros([4] * (len(self.w) - 1) + [len(self.w)] + self.w)\n\n if np.isscalar(self.deformSigma):\n myDeformSigma = np.array(len(self.w), self.deformSigma)\n else:\n myDeformSigma = np.asarray(self.deformSigma)\n\n strngs = [self.deformrandomstate.normal(0, myDeformSigma[i], deformshape[1:]) * self.deformationStrength\n for i in range(len(myDeformSigma))]\n tdf = np.asarray(strngs, dtype=np.float32)\n\n if self.truncated_deform:\n upperBound = 3 * myDeformSigma\n for i in range(len(myDeformSigma)):\n overshoot_coordinates = np.where(np.abs(tdf[i]) > upperBound[i])\n while len(overshoot_coordinates[0]):\n tdf[i][overshoot_coordinates] = np.float32(self.deformrandomstate.normal(0, myDeformSigma[i], len(\n overshoot_coordinates[0])) * self.deformationStrength)\n overshoot_coordinates = np.where(np.abs(tdf[i]) > upperBound[i])\n\n # logging.getLogger('data').info('truncated deformation field')\n\n def cint(x, pnm1, pn, pnp1, pnp2):\n return 0.5 * (\n x * ((2 - x) * x - 1) * pnm1 + (x * x * (3 * x - 5) + 2) * pn + x * ((4 - 3 * x) * x + 1) * pnp1 + (\n x - 1) * x * x * pnp2)\n\n r = [np.asarray([x * 1.0 / self.deform[i] - x // self.deform[i] for x in range(self.w[i])]).reshape(\n [self.w[i] if t == i + 1 else 1 for t in range(len(self.w) + 1)]) for i in range(len(self.w))]\n d = [np.asarray([x // self.deform[i] for x in range(self.w[i])]).reshape(\n [self.w[i] if t == i else 1 for t in range(len(self.w))]) for i in range(len(self.w))]\n\n if len(self.w) == 3:\n for i in range(4):\n for j in range(4):\n xx = d[0] + i\n yy = d[1] + j\n zz = d[2] + 1\n tmp[i, j] = cint(r[2], tdf[:, xx, yy, zz - 1], tdf[:, xx, yy, zz], tdf[:, xx, yy, zz + 1],\n tdf[:, xx, yy, zz + 2])\n for i in range(4):\n tmp[i, 0] = cint(r[1], tmp[i, 0], tmp[i, 1], tmp[i, 2], tmp[i, 3])\n return cint(r[0], tmp[0, 0], tmp[1, 0], tmp[2, 0], tmp[3, 0])\n\n elif len(self.w) == 2:\n for j in range(4):\n xx = d[0] + j\n yy = d[1] + 1\n tmp[j] = cint(r[1], tdf[:, xx, yy - 1], tdf[:, xx, yy], tdf[:, xx, yy + 1], tdf[:, xx, yy + 2])\n return cint(r[0], tmp[0], tmp[1], tmp[2], tmp[3])\n\n else:\n raise Exception('only implemented for 2d and 3d case. feel free to contribute')",
"def compute_magnetic_field(self, coords, params=None, basis=\"rpz\"):\n assert basis.lower() in [\"rpz\", \"xyz\"]\n if isinstance(coords, Grid):\n coords = coords.nodes\n coords = jnp.atleast_2d(coords)\n if basis == \"xyz\":\n coords = xyz2rpz(coords)\n Rq, phiq, Zq = coords.T\n\n if (params is None) or (len(params) == 0):\n params = self._params\n r, p, z = coords.T\n funR = lambda x: self._potential(x, p, z, **params)\n funP = lambda x: self._potential(r, x, z, **params)\n funZ = lambda x: self._potential(r, p, x, **params)\n br = Derivative.compute_jvp(funR, 0, (jnp.ones_like(r),), r)\n bp = Derivative.compute_jvp(funP, 0, (jnp.ones_like(p),), p)\n bz = Derivative.compute_jvp(funZ, 0, (jnp.ones_like(z),), z)\n B = jnp.array([br, bp / r, bz]).T\n if basis == \"xyz\":\n B = rpz2xyz_vec(B, phi=coords[:, 1])\n return B",
"def magnetometer(self):\n self._mag[X] = twos_comp(self.i2c_bus.read_byte_data(self.addr, OUT_X_H_M) << 8 | \n self.i2c_bus.read_byte_data(self.addr, OUT_X_L_M), 16)\n self._mag[Y] = twos_comp(self.i2c_bus.read_byte_data(self.addr, OUT_Y_H_M) << 8 | \n self.i2c_bus.read_byte_data(self.addr, OUT_Y_L_M), 16)\n self._mag[Z] = twos_comp(self.i2c_bus.read_byte_data(self.addr, OUT_Z_H_M) << 8 | \n self.i2c_bus.read_byte_data(self.addr, OUT_Z_L_M), 16)\n\n return vector(self._mag)",
"def magnetic_flux(self, *args):\n\t\tarea = self.area(*args)\n\t\tfield = self.los_corr(*args)\n\t\tif isinstance(args[0], np.ndarray):\n\t\t\tself.mgnt_flux = area*field\n\t\treturn area*field",
"def get_force(self):\n # @todo: Probably need to check the state of the landing gear for this (e.g. are they on the track?).\n # Note: you can get the state of the landing gear by going through self.sim \n return 0.0",
"def get_M(self):\n return self.get_par('MORB')",
"def compute_magnetic_field(self, coords, params=None, basis=\"rpz\"):\n if params is None:\n params = [None] * len(self._fields)\n if isinstance(params, dict):\n params = [params]\n B = 0\n for i, field in enumerate(self._fields):\n B += field.compute_magnetic_field(coords, params[i % len(params)], basis)\n return B",
"def calcMagneticFieldMap(self):\n # Normalised b-field (note lower case)\n self.solenoid.calcMagneticFieldMap()\n self.b = lambda z: self.solenoid.B_interp(z) * -e / (2 * m * c)\n self.calc_level = CALC_B_MAP",
"def magnetic_tension(self, method='spectral'):\n import numpy as np\n gradB, B = self.magnetic_gradient_tensor(method=method, return_B=True)\n F = np.zeros_like(B)\n for i in range(3):\n for j in range(3):\n F[j] += B[i] * gradB[i,j]\n return F",
"def m2(self):\n return self.mass[1]"
] | [
"0.67325306",
"0.65006876",
"0.64733946",
"0.64133286",
"0.61525905",
"0.6089795",
"0.594535",
"0.5899009",
"0.58383423",
"0.5798657",
"0.5736236",
"0.57189417",
"0.5650898",
"0.5645273",
"0.5644613",
"0.55965346",
"0.5590425",
"0.5575774",
"0.5534189",
"0.5524397",
"0.5512045",
"0.55114114",
"0.5504112",
"0.54844147",
"0.54765743",
"0.5461062",
"0.54220355",
"0.5379867",
"0.5334562",
"0.53335094"
] | 0.6843076 | 0 |
Returns the initialized component manager. This is used as FastAPI dependency and called for every request. | def get_component_manager(
token: str = Depends(get_api_token),
) -> ComponentOperations:
session = BaseUrlSession(base_url=CONTAXY_API_ENDPOINT)
session.headers = {"Authorization": f"Bearer {token}"}
return ComponentClient(session) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_manager():\n\n return multiprocessing.Manager()",
"def GetManager(self):\r\n\r\n return self.manager",
"def get_manager():\n return __manager__",
"def getManager(self):\n return self._manager",
"def core(self):\n return CoreManager(self)",
"def manager(self):\n if not self._manager:\n self._manager = TwistedEventLoopManager()\n\n return self._manager",
"def manager(self):\n if not self._manager:\n self._manager = TwistedEventLoopManager()\n\n return self._manager",
"def cluster_manager(self):\n # Lazily instantiate the cluster manager the first time it is asked for.\n if not hasattr(self, '_cluster_manager'):\n if self._cluster_engine:\n self._cluster_manager = self._cluster_engine.create_manager(\n self._username,\n self._tenancy\n )\n else:\n self._cluster_manager = None\n # If there is still no cluster manager, clusters are not supported\n if not self._cluster_manager:\n raise errors.UnsupportedOperationError(\n 'Clusters are not supported for this tenancy.'\n )\n return self._cluster_manager",
"def get_instance(cls):\n global FW_MANAGER_API\n if not FW_MANAGER_API:\n FW_MANAGER_API = cls()\n return FW_MANAGER_API",
"def getServiceManager( cHost=\"localhost\", cPort=\"2002\" ):\n global goServiceManager\n global pythonloader\n if not goServiceManager:\n # Get the uno component context from the PyUNO runtime\n oLocalContext = uno.getComponentContext()\n # Create the UnoUrlResolver on the Python side.\n\n goServiceManager=oLocalContext.ServiceManager\n\n return goServiceManager",
"def modules(self):\n return ModuleManager(self)",
"def getAPIsManager(self):\n return self.apisManager",
"def get_manager(api_version=None):\n from manager import get_keystone_manager\n return get_keystone_manager(get_local_endpoint(), get_admin_token(),\n api_version)",
"def create_allcomponents(self):\n\n # we store all components in a list/hash which we iterate for startup/shutdown/dumps debugging, and which can be used to lookup components\n self.components = MDictList()\n\n # setup log manager helper early so that log manager can receive messages (and queue them until startup)\n self.createappendcomp('logmanager', mlogger.MewloLogManager)\n\n # now update site state (log manager should catch this)\n self.set_statelabel(mconst.DEF_SITESTATE_INITIALIZE_START)\n\n # create (non-db-persistent) site settings -- these are set by configuration at runtime\n self.settings = self.createappendcomp('settings', MewloSettings)\n\n # database manager\n self.createappendcomp('dbmanager', mdbmanager_sqlalchemy.MewloDatabaseManagerSqlA)\n\n # component registry\n self.createappendcomp('registrymanager', mregistry.MewloRegistryManager)\n\n # signal dispatcher\n self.createappendcomp('signalmanager', msignal.MewloSignalManager)\n\n # rbac permission manager\n self.createappendcomp('rbacmanager', mrbac.MewloRbacManager)\n\n # create persistent(db) pack settings\n self.createappendcomp('packsettings', mdbsettings_pack.MewloSettingsDb_Pack)\n\n # collection of mewlo addon packs\n self.createappendcomp('packmanager', mpackmanager.MewloPackManager)\n\n # site addon manager\n #self.createappendcomp('siteaddonmanager', msiteaddon.MewloSiteAddonManager)\n\n # route manager\n self.createappendcomp('routemanager', mroute.MewloRouteManager)\n\n # navnode manager\n self.createappendcomp('navnodemanager', mnav.NavNodeManager)\n\n # template manager\n self.createappendcomp('templatemanager', mtemplate.MewloTemplateManager)\n\n # asset and alias manager\n self.createappendcomp('assetmanager', massetmanager.MewloAssetManager)\n\n # template helper (this is available inside template/views and provides helper functions like navigation menus, etc.)\n self.createappendcomp('templatehelper', mtemplatehelper.MewloTemplateHelper)\n\n # session manager\n self.createappendcomp('sessionmanager', msessionmanager.MewloSessionManager)\n\n # verification manager\n self.createappendcomp('verificationmanager', mverificationmanager.MewloVerificationManager)\n\n # user manager\n self.createappendcomp('usermanager', musermanager.MewloUserManager)\n\n # mail manager\n self.createappendcomp('mailmanager', mmailmanager.MewloMailManager)",
"def new_manager() -> SyncManager:\n return Manager()",
"def petsc_manager():\n return PetscManager()",
"def do_component_init(self):\n logger.debug(\"RwdtstaskletPython: do_component_init function called\")\n component_handle = RwTaskletPlugin.ComponentHandle()\n return component_handle",
"def plugins_get_mgr():\n global pluginmgr\n return pluginmgr",
"def get_collection_manager(self, *args, **kwargs):\n return CollectionManager(self, *args, **kwargs)",
"def get_entity_manager(self):\n return self.game.entity_manager",
"def _init_component(self):\n setup_info = self._serializer.read_msg()\n\n pid = os.getpid()\n self._serializer.send_msg({'pid': pid})\n self._create_pidfile(setup_info['pidDir'], pid)\n\n return StormConfig(setup_info['conf']), setup_info['context']",
"def name(self):\n return \"component_manager\"",
"def factory_manager():\n global _FACTORY_MANAGER\n\n if _FACTORY_MANAGER:\n return _FACTORY_MANAGER\n\n _FACTORY_MANAGER = Factories()\n\n return _FACTORY_MANAGER",
"def get_instance(cls):\n global DNS_MANAGER_API\n if not DNS_MANAGER_API:\n DNS_MANAGER_API = cls()\n return DNS_MANAGER_API",
"def _configure_manager(self):\n self._manager = CloudDatabaseManager(self,\n resource_class=CloudDatabaseInstance, response_key=\"instance\",\n uri_base=\"instances\")\n self._flavor_manager = BaseManager(self,\n resource_class=CloudDatabaseFlavor, response_key=\"flavor\",\n uri_base=\"flavors\")\n self._backup_manager = CloudDatabaseBackupManager(self,\n resource_class=CloudDatabaseBackup, response_key=\"backup\",\n uri_base=\"backups\")",
"def get_extension_manager(self):\n return get_extension_manager()",
"def pm(self) -> ControllerPropertyManager:\n return self._component_pm",
"def setup_component(self):\n self.conf, self.context = self._init_component()\n self.initialize()",
"def load(self):\n # Proceed only if singleton instance has been created\n if self.initialized:\n # The cache manager will work on manifest and cache tasks on an\n # in-process basis as load() is only called during startup from\n # the server process.\n if self.is_server_process:\n # Remove all existing manifest files from previous processes\n self._remove_all_manifest_files()\n\n # Start the watchdog if it's not alive, prevents redundant starts\n if not self.observer.is_alive():\n self.observer.start()\n\n # Fetch all component catalog instances and trigger their add to the\n # component cache if this is not already happening (it seems some server\n # test fixtures could be loading the server extensions multiple times).\n if not self.cache_manager.is_refreshing():\n self.refresh()",
"def getServiceManager( cHost=\"localhost\", cPort=\"8100\" ):\n global goServiceManager\n if not goServiceManager:\n # Get the uno component context from the PyUNO runtime\n oLocalContext = uno.getComponentContext()\n # Create the UnoUrlResolver on the Python side.\n oLocalResolver = oLocalContext.ServiceManager.createInstanceWithContext(\n \"com.sun.star.bridge.UnoUrlResolver\", oLocalContext )\n # Connect to the running OpenOffice.org and get its context.\n oContext = oLocalResolver.resolve( \"uno:socket,host=\" + cHost + \",port=\" + cPort + \";urp;StarOffice.ComponentContext\" )\n # Get the ServiceManager object\n goServiceManager = oContext.ServiceManager\n return goServiceManager"
] | [
"0.68361783",
"0.6697513",
"0.66576326",
"0.660257",
"0.6231092",
"0.61051804",
"0.61051804",
"0.6104129",
"0.5990772",
"0.5973677",
"0.5936767",
"0.5933692",
"0.588824",
"0.5797466",
"0.57823783",
"0.5773982",
"0.5772796",
"0.57626957",
"0.5757743",
"0.57147866",
"0.5693136",
"0.56847143",
"0.56443584",
"0.5634734",
"0.5628475",
"0.5609985",
"0.55819297",
"0.5572467",
"0.557196",
"0.5566214"
] | 0.69490683 | 0 |
Get a string for the status overview of the pool and nodes. | def get_pool_overview_string(self, mission):
# get statuses
pool_status, allocation_status, node_status = self.get_pool_status(mission)
s = "Pool status: {}\n".format(pool_status)
s += "Allocation status: {}".format(allocation_status)
if pool_status != "N/A":
other = sum(node_status.values()) - node_status["idle"] - \
node_status["running"] - node_status["unusable"]
s += "\n"
s += "Node status: "
s += "{} idle; ".format(node_status["idle"])
s += "{} running; ".format(node_status["running"])
s += "{} unusable; ".format(node_status["unusable"])
s += "{} other;".format(other)
return s | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def status_str(self, spaced=False):\n if self.args.vverbose:\n ## Print profile of all nodes\n status = self.pool.status(string=True)\n\n elif self.args.verbose:\n ## Print profile of usable nodes\n status = self.pool.status(min_state=PLNodeState.usable, string=True)\n\n else:\n ## Print list of usable nodes\n attribute = \"name\" if self.args.names else \"addr\"\n nodes = self.pool._get(attribute, min_state=PLNodeState.usable)\n if len(nodes) > 0:\n status = \"\\n\".join(nodes)+\"\\n\"\n else:\n status = \"No usable node found.\\n\"\n\n return status",
"def status(self):\n \n tmpl1 = \"\"\"%-20s%-52s[%s]\"\"\"\n tmpl2 = \"\"\"%-20s%-52s\\n\"\"\"\n # print tmpl1 % (\"Machine Name\", \"IP Addresses\", \"Status\")\n # print 80 * \"-\"\n # print self.get_image()\n if self.cloudserver:\n # let's build the IPs first\n status = self.cloudserver.status\n \n else:\n status = \"OFF\"\n\n res2=\"\"\n ip1 = \"%s:%s\" % (self.networks[0], self.ip_addresses[self.networks[0]])\n if len(self.networks) > 1:\n res2 += \"\\n\"\n for network in self.networks[1:]:\n ipstr = \"%s:%s\" % (network, self.ip_addresses[network])\n res2+=tmpl2 % (\"-\", ipstr)\n # print res2\n # if len(self.ip_addresses.keys()) > 1:\n # ip1 = self.ip_addresses.values()[0]\n res1 = tmpl1 % (self.machine_name, ip1, status)\n return res1 + res2",
"def status(ctx):\n return show_network_status()",
"def get_pool_status():\n pools_status = split_status_pools(fork_and_get_output(\"zpool status\".split()))\n pools = []\n for p in pools_status:\n pools.append(status.PoolStatus(p))\n return pools",
"def printStatus(self):\n output = StringIO.StringIO()\n # use a csv writer to write out each row\n writer = csv.writer(output, lineterminator = '\\n')\n \n # write the header\n writer.writerow(['Server','Ping Interval','Status'])\n \n # write out the online servers\n for server, interval in self.online_servers.iteritems():\n writer.writerow([server, interval[1], 'Online'])\n \n # write out the offline servers\n for server, interval in self.offline_servers.iteritems():\n writer.writerow([server, interval[1], 'Offline'])\n \n return output.getvalue()",
"def get_pool_status(self, mission):\n\n # initialize node status\n states = dict(\n idle=0, rebooting=0, reimaging=0, running=0, unusable=0, creating=0,\n starting=0, waiting_for_start_task=0, start_task_failed=0, unknown=0,\n leaving_pool=0, offline=0, preempted=0)\n\n # if the pool does not exist\n if not self.batch_client.pool.exists(pool_id=mission.pool_name):\n return \"N/A\", \"N/A\", states\n\n # get pool info\n the_pool = self.batch_client.pool.get(pool_id=mission.pool_name)\n state = the_pool.state.name\n allocation_state = the_pool.allocation_state.name\n\n # get the list of node at current time point\n # we check the existance of the pool again to avoid coincidence\n if self.batch_client.pool.exists(pool_id=mission.pool_name):\n node_list = self.batch_client.compute_node.list(\n pool_id=mission.pool_name)\n\n # calculate the number of nodes in each status\n for node in node_list:\n states[node.state.name] += 1\n node_list.reset()\n\n return state, allocation_state, states",
"def status(self):\n logging.debug(\"%s entered status\" % self)\n # print_config(self.infra)\n # print self.images\n # headers = [\"Machine Name\", \"Flavor\", \"IP Addresses\", \"Image Name\", \"Status\"]\n # pt = prettytable.PrettyTable(headers)\n # pt.align[\"Machine Name\"]=\"l\"\n # pt.align[\"IP Addresses\"] = \"l\"\n # pt.align[\"Image Name\"] = \"l\"\n # pt.align[\"Status\"] = \"r\"\n \n print \"Checking status of %s\" % self.footprint_name\n # tmpl = \"%(machine_name)-20s%(flavor)5s%(status)-30s\"\n tmpl1 = \"\"\"%-20s%-52s[%s]\"\"\"\n tmpl2 = \"\"\"%-20s%-60s\\n\"\"\"\n print tmpl1 % (\"Machine Name\", \"IP Addresses\", \"Status\")\n print 80 * \"-\"\n \n for machine in self.machines.keys():\n m = self.machines[machine]\n # machine_name = m.machine_name\n # ips = str(m.ip_addresses)\n # flavor = str(m.flavor)\n # img = str(m.image_id)\n # status = str(m.status)\n # pt.add_row([m, ips, status, img, status])\n # print \"FFF\", m, ips, flavor, img, status\n # print tmpl % locals()\n print m.status\n \n return \"%s is currently: %s\" % (self.footprint_name, self.footprint_status)",
"def detailed_status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"detailed_status\")",
"def detailed_status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"detailed_status\")",
"def detailed_status(self) -> str:\n return pulumi.get(self, \"detailed_status\")",
"def status(self):\n if Daemon.status(self) != 0:\n return 1\n \n # Load decoy logger\n self.load_outputs(decoy=True)\n\n # Load node pool & print status\n try:\n self.pool = PLNodePool(self)\n sys.stdout.write(self.status_str())\n except PLNodePoolException:\n sys.stdout.write(\"No node found.\\n\")\n\n return 0",
"def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")",
"def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")",
"def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")",
"def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")",
"def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")",
"def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")",
"def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")",
"def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")",
"def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")",
"def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")",
"def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")",
"def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")",
"def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")",
"def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")",
"def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")",
"def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")",
"def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")",
"def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")",
"def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")"
] | [
"0.78566015",
"0.72541404",
"0.690066",
"0.68025947",
"0.67186147",
"0.6582966",
"0.65206933",
"0.65195024",
"0.65195024",
"0.6434992",
"0.64222366",
"0.6364477",
"0.6364477",
"0.6364477",
"0.6364477",
"0.6364477",
"0.6364477",
"0.6364477",
"0.6364477",
"0.6364477",
"0.6364477",
"0.6364477",
"0.6364477",
"0.6364477",
"0.6364477",
"0.6364477",
"0.6364477",
"0.6364477",
"0.6364477",
"0.6364477"
] | 0.7833251 | 1 |
Get a string for the status overview of the job and tasks. | def get_job_overview_string(self, mission):
# get statuses
job_status, task_status = self.get_job_status(mission)
s = "Job status: {}".format(job_status)
if job_status != "N/A":
s += "\n"
s += "Tasks status: "
s += "{} active; ".format(task_status["active"])
s += "{} running; ".format(task_status["running"])
s += "{} succeeded; ".format(task_status["succeeded"])
s += "{} failed;".format(task_status["failed"])
return s | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_job_status(self):\n total_hits = session.query(BoxHit).filter_by(training_job_id=self.id).count()\n num_hits_left = session.query(BoxHit).filter_by(training_job_id=self.id, outstanding=True).count()\n total_urls = self.num_urls\n num_urls_left = session.query(VideoTrainingURL).filter_by(job=self, processed=False).count()\n faces_obtained = MTurkBox.query.filter_by(label=self.evaluator.target_label, result=True).count()\n return '\\n'.join([\n '------------- Stats for Job ID: %s -------------' % str(self.id) ,\n 'Job for Label : %s' % self.label.name,\n 'Total URLs : %d' % total_urls,\n 'Total HITs : %d' % total_hits,\n 'unprocessed URLS : %d' % num_urls_left,\n 'outstanding Hits : %d' % num_hits_left,\n 'Job Finish Status : %s' % self.finished,\n 'Faces Obtained : %d' % faces_obtained,\n ]) + '\\n'",
"def detailed_status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"detailed_status\")",
"def detailed_status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"detailed_status\")",
"def detailed_status(self) -> str:\n return pulumi.get(self, \"detailed_status\")",
"def get_status_as_string(self):\n if self.downloaded == 0:\n return \"[Starting... ]\"\n return \"[%s, %s, %s]\" % self.get_status()",
"def task_status(self) -> str:\n return self._task_status",
"def status(self) -> str:\n return self._check_job_status()",
"def _get_status(self):\n return u'%s' % (self.get_status_display())",
"def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")",
"def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")",
"def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")",
"def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")",
"def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")",
"def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")",
"def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")",
"def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")",
"def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")",
"def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")",
"def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")",
"def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")",
"def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")",
"def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")",
"def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")",
"def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")",
"def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")",
"def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")",
"def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")",
"def get_status(self, render_via_ajax):\r\n ugettext = self.system.service(self, \"i18n\").ugettext\r\n status_list = []\r\n current_task_human_name = \"\"\r\n for i in xrange(0, len(self.task_xml)):\r\n human_task_name = self.extract_human_name_from_task(self.task_xml[i])\r\n human_task_name = ugettext(human_task_name)\r\n # Extract the name of the current task for screen readers.\r\n if self.current_task_number == i:\r\n current_task_human_name = human_task_name\r\n task_data = {\r\n 'task_number': i + 1,\r\n 'human_task': human_task_name,\r\n 'current': self.current_task_number == i\r\n }\r\n status_list.append(task_data)\r\n\r\n context = {\r\n 'status_list': status_list,\r\n 'grader_type_image_dict': GRADER_TYPE_IMAGE_DICT,\r\n 'legend_list': LEGEND_LIST,\r\n 'render_via_ajax': render_via_ajax,\r\n 'current_task_human_name': current_task_human_name,\r\n }\r\n status_html = self.system.render_template(\r\n \"{0}/combined_open_ended_status.html\".format(self.TEMPLATE_DIR), context\r\n )\r\n\r\n return status_html",
"def status(self) -> str:\n return pulumi.get(self, \"status\")",
"def status(self) -> str:\n return pulumi.get(self, \"status\")"
] | [
"0.74661416",
"0.7176071",
"0.7176071",
"0.71116614",
"0.7109996",
"0.7087766",
"0.7026905",
"0.69023955",
"0.6890844",
"0.6890844",
"0.6890844",
"0.6890844",
"0.6890844",
"0.6890844",
"0.6890844",
"0.6890844",
"0.6890844",
"0.6890844",
"0.6890844",
"0.6890844",
"0.6890844",
"0.6890844",
"0.6890844",
"0.6890844",
"0.6890844",
"0.6890844",
"0.6890844",
"0.68682194",
"0.6858241",
"0.6858241"
] | 0.7895506 | 0 |
Get the status of a mission's storage container. | def get_storage_container_status(self, mission):
if self.storage_client.exists(container_name=mission.container_name):
return "available"
# TODO: calculate space used in the container
return "N/A" | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_storage_container_overview_string(self, mission):\n\n status = self.get_storage_container_status(mission)\n s = \"Storage container status: {}\".format(status)\n return s",
"def container_status(self) -> str:\n return pulumi.get(self, \"container_status\")",
"def container_status(self):\n if self.status == 'complete':\n return 'complete'\n try:\n task_status = self._ecs.describe_tasks(tasks=[self.name])['tasks'][0]['lastStatus']\n return task_status\n except (IndexError, ClientError):\n return 'STOPPED'",
"def getContainerStatus(self,node,vmid):\n data = self.connect('get','nodes/%s/lxc/%s/status/current' % (node,vmid),None)\n return data",
"def status(self) -> Optional[pulumi.Input['StorageSpacesPropertiesStatusArgs']]:\n return pulumi.get(self, \"status\")",
"def get_cont_stat(self, path, request_from_updater = False):\n try:\n self.logger.debug('Called get container stat interface of library')\n container_stat_obj = ContainerStatWithStatus()\n self.__get_container_stat(path, container_stat_obj, request_from_updater)\n status = container_stat_obj.get_return_status()\n self.logger.info(('Status from container library comes '\n 'out to be: %(status)s'),\n {'status' : status})\n if status == OsdExceptionCode.OSD_INTERNAL_ERROR:\n self.logger.debug('Internal error raised from library')\n return HTTPInternalServerError\n elif status == OsdExceptionCode.OSD_FILE_OPERATION_ERROR:\n self.logger.debug('File operatiopn error raised from library')\n return HTTPInternalServerError\n elif status == OsdExceptionCode.OSD_NOT_FOUND:\n self.logger.debug('File not found error raised from library')\n return HTTPNotFound\n else:\n pass\n cont_stat = container_stat_obj.container_stat\n return {'account' : cont_stat.account, \\\n 'container' : cont_stat.container, \\\n 'created_at' : cont_stat.created_at, \\\n 'put_timestamp' : cont_stat.put_timestamp , \\\n 'delete_timestamp' : cont_stat.delete_timestamp, \\\n 'object_count' : cont_stat.object_count, \\\n 'bytes_used' : cont_stat.bytes_used, \\\n 'hash' : cont_stat.hash, 'id' : cont_stat.id, \\\n 'status' : cont_stat.status, \\\n 'status_changed_at' : cont_stat.status_changed_at, \\\n 'metadata' : cont_stat.metadata}\n except Exception as err:\n self.logger.exception(err)\n raise err",
"def get_storage(isamAppliance, statistics_duration, check_mode=False, force=False):\n return isamAppliance.invoke_get(\n \"Retrieving the Storage Usage Statistics\",\n \"/statistics/systems/storage.json{0}\".format(\n tools.create_query_string(\n timespan=statistics_duration)),requires_model=requires_model)",
"def storage_bytes_status(self) -> str:\n return pulumi.get(self, \"storage_bytes_status\")",
"def get_job_status(self, mission):\n\n # initialize task status\n status = dict(active=0, running=0, succeeded=0, failed=0)\n\n # get job status if it exists. Otherwise, return N/A\n try:\n the_job = self.batch_client.job.get(job_id=mission.job_name)\n\n # get counts of tasks in different statuses\n status_counts = self.batch_client.job.get_task_counts(mission.job_name)\n except azure.batch.models.BatchErrorException as err:\n if err.message.value.startswith(\"The specified job does not exist\"):\n return \"N/A\", status\n # raise an exception for other kinds of errors\n raise\n\n # update the dictionary\n status[\"active\"] = status_counts.active\n status[\"running\"] = status_counts.running\n status[\"succeeded\"] = status_counts.succeeded\n status[\"failed\"] = status_counts.failed\n\n return the_job.state.name, status",
"def moc_storage_container(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"moc_storage_container\")",
"def __get_container_stat(self, path, container_stat_obj, request_from_updater = False):\n try:\n self.logger.debug('Get container interface called')\n self.asyn_helper.call(\"get_container_stat\", path, container_stat_obj, request_from_updater)\n except Exception as err:\n self.logger.error(('get_container_stat for %(con_dir)s failed '\n 'close failure: %(exc)s : %(stack)s'),\n {'con_dir' : path,\n 'exc': err, 'stack': ''.join(traceback.format_stack())})\n raise err",
"def get_details(self):\n status = []\n for key, container in self.containers.items():\n container.details = container.daemon.connection.inspect_container(self.config['release_name'])\n status.append(container.details)\n return status",
"def get_pool_status(self, mission):\n\n # initialize node status\n states = dict(\n idle=0, rebooting=0, reimaging=0, running=0, unusable=0, creating=0,\n starting=0, waiting_for_start_task=0, start_task_failed=0, unknown=0,\n leaving_pool=0, offline=0, preempted=0)\n\n # if the pool does not exist\n if not self.batch_client.pool.exists(pool_id=mission.pool_name):\n return \"N/A\", \"N/A\", states\n\n # get pool info\n the_pool = self.batch_client.pool.get(pool_id=mission.pool_name)\n state = the_pool.state.name\n allocation_state = the_pool.allocation_state.name\n\n # get the list of node at current time point\n # we check the existance of the pool again to avoid coincidence\n if self.batch_client.pool.exists(pool_id=mission.pool_name):\n node_list = self.batch_client.compute_node.list(\n pool_id=mission.pool_name)\n\n # calculate the number of nodes in each status\n for node in node_list:\n states[node.state.name] += 1\n node_list.reset()\n\n return state, allocation_state, states",
"def status(self, name=None):\n volume_info = self.cm.find_name(name)\n if volume_info:\n status = volume_info[0]['State']\n else:\n Console.error(\"volume is not existed\")\n return volume_info",
"def get_status(self):\n try:\n c = self._oc_command([\"status\"])\n o = run_cmd(c, return_output=True)\n for line in o.split('\\n'):\n logger.debug(line)\n return o\n except subprocess.CalledProcessError as ex:\n raise ConuException(\"Cannot obtain OpenShift cluster status: %s\" % ex)",
"def status(self) -> pulumi.Output['outputs.VirtualHardDiskStatusResponse']:\n return pulumi.get(self, \"status\")",
"def get_status(self):\n if self.vm.get_cloud_status() != \"ACTIVE\":\n return \"stopped\"\n #wait for the vm to be ready and SSH-able\n self.vm.wait_ready()\n status = self.vm.run_command(\"ctool status\", indent=0, prefix='')\n return status.strip()",
"def getStorageElementStatus( self, elementName, statusType = None, default = None ):\n\n if self.__getMode():\n # We do not apply defaults. If is not on the cache, S_ERROR is returned.\n return self.__getRSSStorageElementStatus( elementName, statusType )\n else:\n return self.__getCSStorageElementStatus( elementName, statusType, default )",
"def _get_status(self):\n if self._state in [\"processed\", \"error\"]:\n return self._state\n \n get_resp = requests.get(self.location, cookies={\"session\": self.session})\n\n self._state = get_resp.json()[\"status\"]\n self.slice_time = get_resp.json()[\"slice_time\"]\n \n return self._state",
"def status(self):\n return self._get(path='status')",
"def get_storage(id):\n url = f\"{BCD_URL}/contract/{NETWORK}/{id}/storage?size=10\"\n js = load_json(url)\n storage = get_storage_internal(js['children'])\n print(storage)\n return storage",
"def status(self):\n self.scion_sh('status')",
"def mesos_status(self, submissionId):\n get_tasks = self.driver.getTasks()['get_tasks']\n task_state = None\n\n tasks = get_tasks['tasks'] + get_tasks.get('completed_tasks')\n tasks_list = list(filter(lambda x: x['task_id']['value'] == submissionId, tasks))\n if len(tasks_list) > 0:\n task = tasks_list[0]\n task_state = task['state']\n self._log.debug(\"Task state = \" + task_state)\n else:\n self._log.debug(\"Task not found\")\n\n return task_state",
"def get_status(self, job_id):\n\n result = self.redis.get('job_status:' + str(job_id))\n return pickle.loads(result) if result else None",
"def put_container(self, filesystem, acc_dir, cont_dir, \\\n account, container, metadata, req):\n try:\n # create path\n path = self.create_path(filesystem, acc_dir, cont_dir, account, container)\n # Remove this after container library update\n self.logger.debug(('PUT container called for path: %(path)s'),\n {'path' : path})\n if not os.path.exists(path):\n os.makedirs(path)\n timestamp = normalize_timestamp(req.headers['x-timestamp'])\n created_at = normalize_timestamp(time.time())\n # create container stat object\n cont_stat = ContainerStat(account, container, created_at, \\\n timestamp, '0', 0, 0, '', str(uuid4()), 'ADDED', '', metadata)\n\t #get component number\n\t component_name = req.headers['x-component-number']\n # call container library to create container\n status_obj = self.__create_cont(path, filesystem, cont_stat, component_name)\n status = status_obj.get_return_status()\n self.logger.info(('Status from container library comes '\n 'out to be: %(status)s'),\n {'status' : status})\n return status, cont_stat\n except Exception as err:\n self.logger.error(('PUT request failed for account/container:'\n ' %(account)s/%(container)s '\n 'close failure: %(exc)s : %(stack)s'),\n {'account' : account, 'container' : container,\n 'exc': err, 'stack': ''.join(traceback.format_stack())})\n raise err",
"def get_job_status(parent_pid, heart_pid):\n status_container = {}\n if parent_pid != -1:\n status_container[\"memory\"] = get_memory_usage(parent_pid, heart_pid)\n status_container[\"cpu_load\"] = get_cpu_load(parent_pid, heart_pid)\n return status_container",
"def disk_encryption_status(self) -> 'outputs.DiskEncryptionStatusResponse':\n return pulumi.get(self, \"disk_encryption_status\")",
"def storage_get(context, storage_id):\n return _storage_get(context, storage_id)",
"def storage_detail(self, storage_id):\n response = self.session.get(self.get_url('newStorageAPI.do'), params={\n 'op': 'getStorageInfo_sacolar',\n 'storageId': storage_id\n })\n\n data = json.loads(response.content.decode('utf-8'))\n return data",
"def lift_container(self) -> TaskStatus:\n\n status, object_id = self._go_to_and_lift(object_ids=self.container_ids, object_type=\"container\",\n stopping_distance=0.3)\n return status"
] | [
"0.7662295",
"0.68757343",
"0.65628034",
"0.61149687",
"0.61096156",
"0.60097855",
"0.59428936",
"0.5903649",
"0.58938575",
"0.5616815",
"0.560521",
"0.5599396",
"0.55950266",
"0.5577565",
"0.55629945",
"0.556032",
"0.5544695",
"0.5521543",
"0.5521207",
"0.551155",
"0.55082923",
"0.54887813",
"0.5452498",
"0.5414804",
"0.53947043",
"0.5363504",
"0.5355417",
"0.5353225",
"0.53408164",
"0.5328964"
] | 0.85038316 | 0 |
Get a string for the status of the storage container. | def get_storage_container_overview_string(self, mission):
status = self.get_storage_container_status(mission)
s = "Storage container status: {}".format(status)
return s | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def container_status(self) -> str:\n return pulumi.get(self, \"container_status\")",
"def storage_bytes_status(self) -> str:\n return pulumi.get(self, \"storage_bytes_status\")",
"def status(self) -> str:\n return pulumi.get(self, \"status\")",
"def status(self) -> str:\n return pulumi.get(self, \"status\")",
"def status(self) -> str:\n return pulumi.get(self, \"status\")",
"def status(self) -> str:\n return pulumi.get(self, \"status\")",
"def status(self) -> str:\n return pulumi.get(self, \"status\")",
"def status(self) -> str:\n return pulumi.get(self, \"status\")",
"def status(self) -> str:\n return pulumi.get(self, \"status\")",
"def status(self) -> str:\n return pulumi.get(self, \"status\")",
"def status(self) -> str:\n return pulumi.get(self, \"status\")",
"def status(self) -> str:\n return pulumi.get(self, \"status\")",
"def status(self) -> str:\n return pulumi.get(self, \"status\")",
"def status(self) -> str:\n return pulumi.get(self, \"status\")",
"def status(self) -> str:\n return pulumi.get(self, \"status\")",
"def status(self) -> str:\n return pulumi.get(self, \"status\")",
"def status(self) -> str:\n return pulumi.get(self, \"status\")",
"def status(self) -> str:\n return pulumi.get(self, \"status\")",
"def status(self) -> str:\n return pulumi.get(self, \"status\")",
"def status(self) -> str:\n return pulumi.get(self, \"status\")",
"def status(self) -> str:\n return pulumi.get(self, \"status\")",
"def status(self) -> str:\n return pulumi.get(self, \"status\")",
"def status(self) -> str:\n return pulumi.get(self, \"status\")",
"def status(self) -> str:\n return pulumi.get(self, \"status\")",
"def status(self) -> str:\n return pulumi.get(self, \"status\")",
"def status(self) -> str:\n return pulumi.get(self, \"status\")",
"def status(self) -> str:\n return pulumi.get(self, \"status\")",
"def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")",
"def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")",
"def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")"
] | [
"0.8081375",
"0.7568696",
"0.72568953",
"0.72568953",
"0.72568953",
"0.72568953",
"0.72568953",
"0.72568953",
"0.72568953",
"0.72568953",
"0.72568953",
"0.72568953",
"0.72568953",
"0.72568953",
"0.72568953",
"0.72568953",
"0.72568953",
"0.72568953",
"0.72568953",
"0.72568953",
"0.72568953",
"0.72568953",
"0.72568953",
"0.72568953",
"0.72568953",
"0.72568953",
"0.72568953",
"0.714917",
"0.714917",
"0.714917"
] | 0.7668451 | 1 |
Shows a simple scatterplot of X, colored by the classes in y. Technically, this shows the 1st three principal components of X if X has more than 3 dimensions. If X only has 2 dimensions, then just a 2dimensional scatterplot is returned. This will not produce a plot for 1 dimensional data. | def plot_data(X, y):
x_dim = X.shape[1]
# Ignore 1 dimensional data
if x_dim == 1:
print("plot_data not gonna bother with 1 dimensional data")
return
# For 2 dimensional data, just plot it
if x_dim == 2:
plt.scatter(X[:,0], X[:,1], c=y)
plt.show()
return
# For at least 4 dimensions, do PCA
if x_dim >= 4:
pca = PCA(n_components=3)
pca.fit(X)
plot_x = pca.transform(X)
else:
plot_x = X
# Assumes y is either 1 or 0
pos_idxs = np.where(y == 1)[0]
neg_idxs = np.where(y == 0)[0]
# Plot the now 3 dimensional data
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
Xs = plot_x[neg_idxs, :]
ax.scatter(Xs[:,0], Xs[:,1], Xs[:,2], color='orange')
Xs = plot_x[pos_idxs, :]
ax.scatter(Xs[:,0], Xs[:,1], Xs[:,2], color='purple')
# Label plot
if x_dim >= 4:
ax.set_title("PCA of Generated Data")
ax.set_xlabel("1st Principal Component")
ax.set_ylabel("2nd Principal Component")
ax.set_zlabel("3rd Principal Component")
else:
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_zticklabels([])
# Display!
plt.show() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def scatter_plot(self):\n\n X = self.reduce_dimension(n_components=2)\n\n plt.figure()\n plt.scatter(X[:,0], X[:,1])\n\n return plt",
"def plot_dataset(X, classes):\n data = pd.DataFrame(X, columns=['x', 'y'])\n data['dataset'] = classes\n sns.lmplot('x', 'y', data=data, hue='dataset', fit_reg=False, size=10,\n palette=sns.color_palette(\"Set3\", 10),\n scatter_kws={\"s\": 75})",
"def scatterPlot2DMiddle(data, title, classes):\n fig = plt.figure(figsize=(8, 8))\n colormap = np.array([\"g\", \"b\"])\n if classes is not None:\n plt.scatter(data[:, 0], data[:, 1], c=colormap[classes], s=0.2)\n else:\n plt.scatter(data[:, 0], data[:, 1], s=1)\n plt.title(title, fontsize=18)\n plt.show()",
"def scatter_plot(x_train, y_train, x_test, y_test, class1, class2):\n train_c0 = x_train[y_train == 0, :]\n train_c1 = x_train[y_train == 1, :]\n test_c0 = x_test[y_test == 0, :]\n test_c1 = x_test[y_test == 1, :]\n fig, a = plt.subplots(1, 2)\n fig.set_size_inches(11, 5)\n a[0].scatter(train_c0[:, 0], train_c0[:, 1], color='green', label=class1)\n a[0].scatter(train_c1[:, 0], train_c1[:, 1], color='red', label=class2)\n a[0].legend()\n a[0].set_title('Train Set')\n a[1].scatter(test_c0[:, 0], test_c0[:, 1], color='green', label=class1)\n a[1].scatter(test_c1[:, 0], test_c1[:, 1], color='red', label=class2)\n a[1].legend()\n a[1].set_title('Test Set')\n plt.show()",
"def visualise_data_set(x_arr, y_arr):\n # Instantiate a PCA object for the sake of easy visualisation\n pca = PCA(n_components=3)\n\n # Fit and transform x to visualise inside a 3D feature space\n x_visualisation = pca.fit_transform(x_arr)\n\n figure = plt.figure()\n axis = Axes3D(figure)\n\n axis.scatter(x_visualisation[y_arr == 0, 0], x_visualisation[y_arr == 0, 1], x_visualisation[y_arr == 0, 2],\n label=\"Class #0\",\n edgecolor=almost_black, facecolor=palette[0], linewidth=0.3, marker=\"o\")\n axis.scatter(x_visualisation[y_arr == 1, 0], x_visualisation[y_arr == 1, 1], x_visualisation[y_arr == 1, 2],\n label=\"Class #1\",\n edgecolor=almost_black, facecolor=palette[2], linewidth=0.3, marker=\"^\")\n axis.set_title(\"PCA to 3 components\")\n\n plt.show()",
"def plot_data(x: np.ndarray, y: np.ndarray) -> None:\n\n _, ax = plt.subplots()\n scatter = ax.scatter(x[:, 0], x[:, 1], c=y, s=40, cmap=plt.cm.RdYlBu)\n legend1 = ax.legend(*scatter.legend_elements(),\n loc=\"lower right\", title=\"Classes\")\n ax.add_artist(legend1)\n plt.xlim((min(x[:, 0]) - 0.1, max(x[:, 0]) + 0.1))\n plt.ylim((min(x[:, 1]) - 0.1, max(x[:, 1]) + 0.1))",
"def plot(self):\n y = self.projection\n mpl.scatter(y[:, 0], y[:, 1], c=self.data_class)\n mpl.show()",
"def plot_iris_dataset(data, classes, feature_names, target_names, title = \"Grafica de las caracteristicas y sus clases\"):\n\n # Tomo las coordenadas de la matriz de datos, es decir, separo coordenadas\n # x e y de una matriz de datos que contiene pares de coordenadas\n data = np.array(data)\n x_values = data[:, 0]\n y_values = data[:, 1]\n\n # Colores que voy a utilizar para cada una de las clases\n colormap = ['orange', 'black', 'green']\n\n # Separacion de indices. Con esto, consigo la lista de los indices de la\n # clase i-esima, cada uno en un vector distinto. Esto lo necesitare para\n # colorear cada clase de un color y ponerle de label el nombre de la planta\n first_class_indexes = np.where(classes == 0)\n second_class_indexes = np.where(classes == 1)\n third_class_indexes = np.where(classes == 2)\n\n # Asi puedo referirme a la primera clase como splitted_indixes[0] en vez\n # de usar el nombre de la variable (para acceder a los indices en el siguiente\n # bucle)\n splitted_indixes = [first_class_indexes, second_class_indexes, third_class_indexes]\n\n # Tomo estos elementos para hacer graficas elaboradas\n fig, ax = plt.subplots()\n\n # Itero sobre las clases\n for index, target_name in enumerate(target_names):\n\n # Tomo las coordenadas de la clase index-esima\n current_x = x_values[splitted_indixes[index]]\n current_y = y_values[splitted_indixes[index]]\n\n # Muestro la clase index-esima, con su color y su etiqueta correspondiente\n ax.scatter(current_x, current_y, c=colormap[index], label=target_name)\n\n # Titulo para la grafica\n plt.title(title)\n\n # Tomo los titulos de las caracteristicas y los asigno al grafico\n # Tomo la idea de: https://scipy-lectures.org/packages/scikit-learn/auto_examples/plot_iris_scatter.html\n x_legend = feature_names[0]\n y_legend = feature_names[1]\n ax.legend()\n\n plt.show()\n wait_for_user_input()",
"def lda_scatter(X,Y, dim3=True):\n # Fit data\n lda = LDA()\n lda.fit(X, Y)\n X_r2 = lda.transform(X) \n\n # 3-D plot\n if dim3:\n fig = pylab.figure()\n ax = Axes3D(fig)\n ax.scatter3D(X_r2[:,0],X_r2[:,1],X_r2[:,2], c=Y)\n \n #2-D plot\n else:\n plt.scatter(X_r2[:,0], X_r2[:,1], c= Y )",
"def visualize_data(data):\n\n # Instantiate a PCA object for the sake of easy visualisation\n pca = PCA(n_components=2)\n\n # Fit and transform x to visualise inside a 2D feature space\n x_vis = pca.fit_transform(data[data.columns[:-1]])\n y = data['Tumor'].as_matrix()\n\n # Plot the original data\n # Plot the two classes\n palette = sns.color_palette()\n\n plt.scatter(x_vis[y == 0, 0], x_vis[y == 0, 1], label=\"Normal\", alpha=0.5,\n edgecolor=ALMOST_BLACK, facecolor=palette[0], linewidth=0.15)\n plt.scatter(x_vis[y == 1, 0], x_vis[y == 1, 1], label=\"Tumor\", alpha=0.5,\n edgecolor=ALMOST_BLACK, facecolor=palette[2], linewidth=0.15)\n\n plt.legend()\n plt.show()",
"def scatter_plot(x, y):\n mpl_fig = plt.figure()\n plt.scatter(x, y)\n return get_div_from_data(mpl_fig)",
"def scatterPlot2DBig(data, title, classes):\n fig = plt.figure(figsize=(15, 15))\n colormap = np.array([\"g\", \"b\"])\n\n if classes is not None:\n plt.scatter(data[:, 0], data[:, 1], c=colormap[classes])\n else:\n plt.scatter(data[:, 0], data[:, 1])\n plt.title(title, fontsize=18)\n plt.show()",
"def plot_classification(X,\n y,\n y_true,\n y_pred,\n metrics=(\"acc\", \"sen\", \"spe\"),\n fig_size=(12, 5),\n fig_show=True,\n save_as=\"figure.pdf\",\n x_label=\"x\",\n y_label=\"y\",\n **plot_kwargs):\n\n # Convert the input data to pd.Series\n if not isinstance(X, pd.Series):\n X = pd.Series(X.reshape((len(X), )))\n if not isinstance(y, pd.Series):\n y = pd.Series(y.reshape((len(y), )))\n if not isinstance(y_true, pd.Series):\n y_true = pd.Series(y_true.reshape((len(y_true), )))\n if not isinstance(y_pred, pd.Series):\n y_pred = pd.Series(y_pred.reshape((len(y_pred), )))\n\n # Compute the classification metrics\n computed_metrics = [(metric, round(classification_metric(metric, y_true, y_pred), 2)) for metric in metrics]\n\n # Prepare the temporary DataFrame\n df = pd.DataFrame({\"X\": X, \"y\": y, \"y_true\": y_true, \"y_pred\": y_pred, \"matches\": y_true == y_pred})\n\n # Create the figure\n fig = plt.figure(figsize=fig_size)\n\n # Plot the true labels scatter-plot\n ax = fig.add_subplot(1, 2, 1)\n sns.scatterplot(x=\"X\", y=\"y\", hue=\"y_true\", data=df, **plot_kwargs)\n\n ax.set_title(\"Ground truth\")\n ax.set_xlabel(x_label)\n ax.set_ylabel(y_label)\n plt.tight_layout()\n\n # Plot the predicted labels scatter-plot\n ax = fig.add_subplot(1, 2, 2)\n sns.scatterplot(x=\"X\", y=\"y\", hue=\"y_pred\", size=\"matches\", data=df, **plot_kwargs)\n\n ax.set_title(\"Predicted ({})\".format(\" \".join([\"{} = {},\".format(m, v) for m, v in computed_metrics])))\n ax.set_xlabel(x_label)\n ax.set_ylabel(\"\")\n plt.tight_layout()\n\n # Store the figure\n if save_as:\n plt.savefig(save_as)\n\n # Show the graph (if enabled)\n if fig_show:\n plt.show()\n else:\n plt.close()",
"def plot(model, samples):\n # compute responsiblity values\n resp = model.predict_proba(samples)\n\n # plot\n plt.axis('equal')\n plt.scatter(samples[:,0], samples[:,1], c=resp)\n plt.show()",
"def vis(X, y = None, vis_noise = False):\n plt.figure()\n\n if y is None:\n plt.scatter(*X.T, s=1)\n else:\n color_noise = (1,1,1)\n if vis_noise:\n color_noise = (0.75, 0.75, 0.75)\n\n color_palette = sns.color_palette('deep', np.max(y).astype(int)+1)\n cluster_colors = [color_palette[y_i] if y_i >= 0\n else color_noise\n for y_i in y]\n\n plt.scatter(*X.T, s=1, c=cluster_colors)\n\n plt.show()",
"def cluster_plot(self):\r\n train = StandardScaler().fit_transform(self.X)\r\n pca = PCA(n_components=3)\r\n pca_component = pca.fit_transform(self.X)\r\n fig = plt.figure(figsize=(10,8))\r\n sns.set_palette(sns.color_palette(\"cubehelix\", 8))\r\n ax = Axes3D(fig)\r\n ax.scatter(pca_component[:,0].tolist(),pca_component[:,1].tolist(),pca_component[:,2].tolist(),c=self.labels,marker='v')\r\n ax.legend()\r\n plt.show()",
"def plot_dataset(features, labels, nb_classes: int) -> None:\n sns.scatterplot(x=features[:, 0], y=features[:, 1], hue=labels, markers=True)\n plt.title(f'Data from {nb_classes} classes')\n save_plot('mock_dataset')",
"def scatterPlot2():\n N = 100\n x = np.random.rand(N)\n y = np.random.rand(N)\n colors = np.random.rand(N)\n\n plt.scatter(x, y, c=colors, alpha=0.5)\n plt.show()",
"def plot(self):\n\t\tif (2 <= len(self.X) <= 3):\n\t\t\tvDataFrame(self.name, self.cursor).scatter(columns = self.X, catcol = \"dbscan_cluster\", max_cardinality = 100, max_nb_points = 10000)\n\t\telse:\n\t\t\traise ValueError(\"Clustering Plots are only available in 2D or 3D\")",
"def plot_coefs(results):\n coefs_noisy = pd.concat([\n arr_to_df(results['obj_noisy'], n_arr, 'obj'),\n vec_to_df(results['dist_obj'], n_arr, 'obj'),\n arr_to_df(results['pos_noisy'], n_arr, 'pos'),\n vec_to_df(results['dist_pos'], n_arr, 'pos'),\n arr_to_df(results['neg_noisy'], n_arr, 'neg'),\n vec_to_df(results['dist_neg'], n_arr, 'neg')\n ])\n\n xlim = (min(n_arr), max(n_arr))\n ylim = (-1.1, 1.1)\n\n g = sns.FacetGrid(coefs_noisy, row = 'id', col = 'component', xlim = xlim,\n ylim = ylim)\n g.map(sns.pointplot, 'n', 'value', order = n_arr)\n g.set_xticklabels(rotation = 45)\n\n for i, val in enumerate(results['obj_true']):\n ax = g.axes[0, i]\n ax.hlines(val, *ax.get_xlim())\n for i, val in enumerate(results['pos_true']):\n ax = g.axes[1, i]\n ax.hlines(0, *ax.get_xlim(), linestyle = '--', color = 'red')\n ax.hlines(val, *ax.get_xlim())\n for i, val in enumerate(results['neg_true']):\n ax = g.axes[2, i]\n ax.hlines(0, *ax.get_xlim(), linestyle = '--', color = 'red')\n ax.hlines(val, *ax.get_xlim())",
"def plot_data(x):\n if DATA_2D:\n plt.scatter(x[:, 0], x[:, 1])\n plt.show()\n else:\n fig = plt.figure()\n ax = Axes3D(fig)\n ax.scatter(x[:, 0], x[:, 1], x[:, 2])\n ax.set_xlabel('X Label')\n ax.set_ylabel('Y Label')\n ax.set_zlabel('Z Label')\n plt.show()",
"def generate_pca(X, y, cols, n_components, **kwargs):\n\n pca = PCA(n_components, **kwargs)\n pca_result = pca.fit_transform(X)\n pca_df = pd.DataFrame(pca_result, columns=cols, index=X.index)\n pca_df['label'] = y\n pca_plot = ggplot(pca_df, aes(x=\"PCA-1\", y=\"PCA-2\", color='label') ) + geom_point(size=100,alpha=0.8) + ggtitle(\"First and Second Principal Components colored by class\")\n return pca_plot",
"def scatterplot(x, y):\n plt.figure(figsize=(14, 8), dpi=80)\n plt.scatter(x[:, 1], y, s=30, c='r', marker='x', linewidths=1)\n plt.grid(True)\n plt.xlim(4, 24)\n plt.ylabel('Profit ($10k)')\n plt.xlabel('Population (10k)')\n plt.show()\n plt.close()",
"def scatter(xarr, yarr, xlbl=None, ylbl=None, pw=600, ph=400):\n p = figure(plot_width=pw, plot_height=ph)\n # Model\n p.circle(xarr, yarr, color='black')#, legend='data')\n # Label\n if xlbl is not None:\n p.xaxis.axis_label = xlbl\n if ylbl is not None:\n p.yaxis.axis_label = ylbl\n # Show\n show(p)",
"def plot_cmatrix_wrapper(y_true, y_pred, classes, **kwargs):\n cm = confusion_matrix(y_true, y_pred)\n plot_confusion_matrix(cm, classes, **kwargs)",
"def plot_data(self):\n if hasattr(self,'data'):\n plt.scatter(*self.data.T)\n plt.show()\n else:\n raise Exception('No 2d data of the instance has been loaded')",
"def _bokeh_confusion_scatter(\n y_true: np.ndarray,\n y_pred: np.ndarray,\n class_names: Sequence[str],\n title_rows: Sequence[str],\n x_label_rotation: Union[str, float] = \"horizontal\",\n y_label_rotation: Union[str, float] = \"vertical\",\n) -> Callable[[], Figure]:\n if len(y_true) != len(y_pred):\n raise ValueError(\"y_true and y_pred must have the same length!\")\n\n def figure() -> Figure:\n\n p = plotting.figure(\n x_range=(-0.5, -0.5 + len(class_names)),\n y_range=(-0.5, -0.5 + len(class_names)),\n plot_height=350,\n plot_width=350,\n tools=TOOLS,\n toolbar_location=TOOLBAR_LOCATION,\n match_aspect=True,\n )\n\n def noise() -> np.ndarray:\n return (np.random.beta(1, 1, size=len(y_true)) - 0.5) * 0.6\n\n p.scatter(\n x=y_true + noise(),\n y=y_pred + noise(),\n size=scatter_plot_circle_size(\n num_points=len(y_true),\n biggest=4.0,\n smallest=1.0,\n use_smallest_when_num_points_at_least=5000,\n ),\n color=DARK_BLUE,\n fill_alpha=SCATTER_CIRCLES_FILL_ALPHA,\n line_alpha=SCATTER_CIRCLES_LINE_ALPHA,\n )\n\n add_title_rows(p, title_rows)\n apply_default_style(p)\n\n p.xaxis.axis_label = \"Ground Truth\"\n p.yaxis.axis_label = \"Prediction\"\n\n arange = np.arange(len(class_names))\n\n p.xaxis.ticker = arange\n p.yaxis.ticker = arange\n\n p.xaxis.major_label_overrides = {i: name for i, name in enumerate(class_names)}\n p.yaxis.major_label_overrides = {i: name for i, name in enumerate(class_names)}\n\n p.xaxis.major_label_orientation = x_label_rotation\n p.yaxis.major_label_orientation = y_label_rotation\n\n # grid between classes, not at classes\n p.xgrid.ticker = arange[0:-1] + 0.5\n p.ygrid.ticker = arange[0:-1] + 0.5\n\n p.xgrid.grid_line_width = 3\n p.ygrid.grid_line_width = 3\n\n # prevent panning to empty regions\n p.x_range.bounds = (-0.5, -0.5 + len(class_names))\n p.y_range.bounds = (-0.5, -0.5 + len(class_names))\n\n return p\n\n return figure",
"def pairplot(data, target_col, columns=None, scatter_alpha='auto',\n scatter_size='auto'):\n if columns is None:\n columns = data.columns.drop(target_col)\n n_features = len(columns)\n fig, axes = plt.subplots(n_features, n_features,\n figsize=(n_features * 3, n_features * 3))\n axes = np.atleast_2d(axes)\n for ax, (i, j) in zip(axes.ravel(),\n itertools.product(range(n_features), repeat=2)):\n legend = i == 0 and j == n_features - 1\n if i == j:\n class_hists(data, columns[i], target_col, ax=ax.twinx())\n else:\n discrete_scatter(data[columns[j]], data[columns[i]],\n c=data[target_col], legend=legend, ax=ax,\n alpha=scatter_alpha,\n s=scatter_size)\n if j == 0:\n ax.set_ylabel(columns[i])\n else:\n ax.set_ylabel(\"\")\n ax.set_yticklabels(())\n if i == n_features - 1:\n ax.set_xlabel(_shortname(columns[j]))\n else:\n ax.set_xlabel(\"\")\n ax.set_xticklabels(())\n despine(fig)\n if n_features > 1:\n axes[0, 0].set_yticks(axes[0, 1].get_yticks())\n axes[0, 0].set_ylim(axes[0, 1].get_ylim())\n return axes",
"def plot_scatter(x, y):\n\tplt.scatter(x, y)",
"def visualise_two_data_sets(x_arr, y_arr, x_arr_two, y_arr_two):\n # Instantiate a PCA object for the sake of easy visualisation\n pca = PCA(n_components=3)\n\n # Fit and transform x to visualise inside a 3D feature space\n x_visualisation = pca.fit_transform(x_arr)\n\n figure = plt.figure()\n axis = Axes3D(figure)\n\n axis.scatter(x_visualisation[y_arr == 0, 0], x_visualisation[y_arr == 0, 1], x_visualisation[y_arr == 0, 2],\n label=\"Class #0\",\n edgecolor=almost_black, facecolor=palette[0], linewidth=0.3, marker=\"o\")\n axis.scatter(x_visualisation[y_arr == 1, 0], x_visualisation[y_arr == 1, 1], x_visualisation[y_arr == 1, 2],\n label=\"Class #1\",\n edgecolor=almost_black, facecolor=palette[2], linewidth=0.3, marker=\"^\")\n axis.set_title(\"PCA to 3 components - data-set 1\")\n\n x_visualisation_two = pca.transform(x_arr_two)\n figure_two = plt.figure()\n axis_two = Axes3D(figure_two)\n axis_two.scatter(x_visualisation_two[y_arr_two == 0, 0], x_visualisation_two[y_arr_two == 0, 1],\n x_visualisation_two[y_arr_two == 0, 2],\n label=\"Class #0\", edgecolor=almost_black,\n facecolor=palette[0], linewidth=0.3, marker=\"o\")\n axis_two.scatter(x_visualisation_two[y_arr_two == 1, 0], x_visualisation_two[y_arr_two == 1, 1],\n x_visualisation_two[y_arr_two == 1, 2],\n label=\"Class #1\", edgecolor=almost_black,\n facecolor=palette[2], linewidth=0.3, marker=\"^\")\n axis_two.set_title(\"PCA to 3 components - data-set 2\")\n\n plt.show()"
] | [
"0.68430007",
"0.66364133",
"0.6596344",
"0.6580677",
"0.65084815",
"0.645392",
"0.6410972",
"0.6298724",
"0.6219223",
"0.621192",
"0.61827666",
"0.61542743",
"0.6125981",
"0.60232717",
"0.59843695",
"0.590425",
"0.59014237",
"0.5894517",
"0.58731264",
"0.58717024",
"0.58680373",
"0.58463746",
"0.58204675",
"0.5802502",
"0.58007133",
"0.5799708",
"0.57966316",
"0.57835287",
"0.5772385",
"0.57693636"
] | 0.7091436 | 0 |
Log and assert based on condition. If condition True, log message as PASS to testcase log file. If condition False, Assert and Print message with status FAIL. | def logfile_assert_message(s, condition, message):
if not condition:
s.log_to_file += now_short() + message + ": FAIL\r\n"
assert 0, message + ": FAIL\r\n"
else:
log_message(s, message + ": PASS") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test(self):\n self.info(\"LOGGING: Testing log messages\")\n self.debug(\"This is a debugging message\")\n self.info(\"This is an informational message\")\n self.warning(\"This is a warning message\")\n self.error(\"This is an error message\")\n self.critical(\"This is a critical message\")\n self.info(\"LOGGING: Testing log messages COMPLETE\")\n return",
"def Assert(condition):\n try:\n assert TestStepsTools.Eval(condition)\n except AssertionError:\n _LOGGER.error('Condition %s is not True', condition)\n raise\n\n return True",
"def do_assert(self, str_arg):\n arg = validateString(str_arg)\n if arg not in ('true', 'false'):\n self.resultFlag = False\n raise ValueError('Bad parameter.')\n if (arg == 'true' and self.resultFlag) or (arg == 'false' and not self.resultFlag):\n printLog(self.threadName + '[ASSERT PASS]', logging.DEBUG)\n self.resultFlag = True\n else:\n # printLog(self.threadName+'[status=%s]' % self.resultFlag)\n printLog(self.threadName + '[ASSERT FAIL!]', logging.DEBUG)\n self.resultFlag = False\n raise AssertionError()",
"def assertTrue(self, statement, message):\n prefix = \"In component %s: \" % self.name\n if not statement:\n error(prefix + str(message))",
"def _assert(condition, message):\n if not condition:\n raise AssertionError(message)",
"def test_assert_truth(self):\n\n # Confused? This video should help:\n #\n # http://bit.ly/about_asserts\n\n self.assertTrue(True) # This should be true",
"def print(self):\n if self.passed():\n self.print_passed()\n else:\n self.print_failed()",
"def print_result(testcase, passed):\n print '{:<6}{}'.format('{}:'.format(testcase), \"allowed\" if passed else \"blocked\")",
"def test_log_success(self, mock_info):\n\n with utils.log_activity(\"for test\"):\n pass\n\n mock_info.assert_any_call(\"[jaxline] %s starting...\", \"for test\")\n mock_info.assert_any_call(\"[jaxline] %s finished.\", \"for test\")",
"def test_02_log_something(self):\n logger = get_logger(self)\n logger.info('Info in test_02')\n logger.debug('Debug in test_02')\n logger.warn('Warn in test_02')\n logfiles = glob.glob(os.path.join(self.LOG_FOLDER,\n '{}*.log'.format(self.scenario)))\n assert logfiles\n print(logfiles)\n for logfile in logfiles:\n with open(logfile) as f:\n for line in f:\n print(line.strip())",
"def unitTest(self, _strMessage=\"\"):\n self.edLogging.unitTest(_strMessage)",
"def test_case_01(self):\n if True:\n self.fail()",
"def print_tcase_success(self,testcaseName,reasonPassed):\n\n # go throuht the test case objects\n\tfor t in self.testcases:\n\t\t\n\t\ttName = t.name\n\t\tif tName == testcaseName:\n\t\t\t#print tName\n\t\t\tt.status = \"Passed\"\n\t\t\tt.reasonPassed = reasonPassed\n return 1\n\tprint_green(\"=\" * 80)\n\ttrace_success(\"TESTCASE: PASSED %s,reason '%s'\"%(testcaseName,reasonPassed))\n\tprint_green(\"=\" * 80)\n \n\traise ViriValuePassedError(\"Testcase '%s' doesnt seem to be run but print success called\"%testcaseName)",
"def ASSERT(self, _strMessage):\n self.edLogging.ASSERT(_strMessage)",
"def test_xfail_with_run_false_and_with_reason():\n pass",
"def test_failed():\n assert False",
"def test_common_case(self):\n loglevel_from_command_line = \"WARNING\"\n assert output(self.msg, \"INFO\", loglevel_from_command_line)",
"def test1(self):\n\n log.info('This is a test')\n self.assertTrue((random.randint(0,9) % 2) == 0)#! /usr/bin/env python",
"def test_condition_split(self):\n self.write_contents(\n 'external/wpt/fail.html.ini', \"\"\"\\\n [fail.html]\n expected:\n if product == \"content_shell\": FAIL\n \"\"\")\n self.update(\n {\n 'run_info': {\n 'product': 'content_shell',\n 'os': 'mac',\n 'port': 'mac12',\n },\n 'results': [{\n 'test': '/fail.html',\n 'status': 'TIMEOUT',\n 'expected': 'FAIL',\n }],\n }, {\n 'run_info': {\n 'product': 'content_shell',\n 'os': 'win',\n 'port': 'win11',\n },\n 'results': [{\n 'test': '/fail.html',\n 'status': 'FAIL',\n 'expected': 'FAIL',\n }],\n }, {\n 'run_info': {\n 'product': 'chrome',\n 'os': 'linux',\n 'port': 'trusty',\n },\n 'results': [{\n 'test': '/fail.html',\n 'status': 'PASS',\n 'expected': 'PASS',\n }],\n },\n overwrite_conditions='yes')\n path = self.finder.path_from_web_tests('external', 'wpt',\n 'fail.html.ini')\n lines = self.tool.filesystem.read_text_file(path).splitlines()\n expected = textwrap.dedent(\"\"\"\\\n [fail.html]\n expected:\n if (product == \"content_shell\") and (os == \"win\"): FAIL\n if (product == \"content_shell\") and (os == \"mac\"): TIMEOUT\n \"\"\")\n # TODO(crbug.com/1299650): The branch order appears unstable, which we\n # should fix upstream to avoid create spurious diffs.\n self.assertEqual(sorted(lines, reverse=True), expected.splitlines())",
"def Checktest(self, expectedoutput):\n\n if expectedoutput == 0:\n result = self.runner.invoke(yoda.cli, [\"setup\", \"check\"])\n self.assertEqual(result.exit_code, 0)\n self.assertIn(\"The configuration file does not exist.\", result.output)\n return\n\n if expectedoutput == 1:\n result = self.runner.invoke(yoda.cli, [\"setup\", \"check\"])\n self.assertEqual(result.exit_code, 0)\n self.assertIn(\"Name: Name\", result.output)\n self.assertIn(\"Email: [email protected]\", result.output)\n self.assertIn(\"Github username: GhUser\", result.output)",
"def _check(self, expected, actual):\n\n assert expected == actual, 'Assert fail. expected={} but actual={}'.format(expected, actual)",
"def self_test(message):\n global failed_tests\n\n if result != correct:\n failed_tests += 1\n print module_banner\n print \"test failed:\", message\n print \" correct:\", correct\n print \" result: \", result\n print",
"def test(did_pass):\r\n linenum = sys._getframe(1).f_lineno # Get the caller's line number\r\n if did_pass:\r\n msg = \"Test at line {0} is ok\".format(linenum)\r\n else:\r\n msg = \"Test at line {0} is FAILED\".format(linenum)\r\n print(msg)",
"def visit_assert(self: Parser, node: doc.Assert) -> None:\n cond = self.eval_expr(node.test)\n msg = self.eval_expr(node.msg)\n frame = T.Assert(cond, msg)\n frame.add_callback(partial(frame.__exit__, None, None, None))\n frame.__enter__()",
"def assert_verbose(actual, expected):\n assert expected == actual, f\"Expected value: {expected}. But actual value is {actual}\"",
"def test_execute_or_bail_ok(self):\n with self.assertLogs(level=\"INFO\") as cm:\n with etl.commands.execute_or_bail(\"unittest\"):\n pass\n self.assertEqual(len(cm.output), 1)\n self.assertTrue(\"finished successfully\" in cm.output[0])",
"def test_level_error(self):\n self.assertEqual(DiscordReportFormatter().format(self.record(loglevel=logging.ERROR)), \":exclamation: **test**\")",
"def test_the_tests():\n\n assert True is True",
"def _run_test(level_name):\n input_data, expected_output = _data_for_level(level_name)\n\n print 'Running test %s' % level_name\n program_output = bundler.bundle_mail(input_data)\n\n passed, message = _verify_output(program_output, expected_output,\n input_data)\n\n if len(message) > 0:\n print ''\n print message\n\n print '----------------------------'\n if passed:\n print 'Success!'\n else:\n print 'Fail'\n\n return passed",
"def _test():\n try:\n print 'Test for Loging'\n # Establish Logging at the beginning of the script\n fh = establish(lvl='DEBUG', logName='TestLog.txt', logPath='', backups=0)\n\n # Supply log functions with message as a STRING\n info('TEST - Info lvl')\n debug('TEST - Debug lvl')\n warning('TEST - Warning lvl')\n error('TEST - Error lvl')\n exception('TEST - Exception. See the exception below this line.')\n info('Would any of this be logged to ArcPy: {0}'.format(_logToArcpyMessagingWindow))\n\n except:\n exception('Error in main function of script')\n print 'ERROR WITH SCRIPT: {0}'.format(traceback.format_exc())\n finally:\n # Ensure to Shut-down the Logging\n info('Script Completed')\n shutdown(fh)\n print 'Test Complete'"
] | [
"0.6288022",
"0.61218476",
"0.6107574",
"0.60947496",
"0.5991102",
"0.5953129",
"0.5885416",
"0.587344",
"0.5861646",
"0.58180374",
"0.5753832",
"0.5752025",
"0.5750852",
"0.5717795",
"0.57175326",
"0.570502",
"0.5684395",
"0.5653539",
"0.5651103",
"0.5643155",
"0.5638683",
"0.5637379",
"0.5637224",
"0.5632616",
"0.56316113",
"0.56235796",
"0.5623042",
"0.5621439",
"0.56190467",
"0.56172097"
] | 0.78782284 | 1 |
Write detailed log file for given test. | def write_test_log(t, output_dir):
if t.log_to_file is not None and hasattr(t, "stop_time"):
filename = type(t).__name__ + "-" + time.strftime("%Y%m%d-%H%M%S") + ".txt"
testtime = t.stop_time - t.start_time
with open(os.path.join(output_dir, filename), "w") as log:
log.write("\t=======================================================")
log.write(f"\n\tTest case ID: {type(t).__name__}")
log.write(f"\n\tTest case Description: {type(t).__doc__}")
log.write("\n\t=======================================================\n")
log.write(t.log_to_file)
log.write("\n\t=======================================================")
log.write(f"\n\t{type(t).__name__} test result: {t.result_grade}")
log.write(f"\n\tTotal test time: {testtime} seconds")
log.write("\n\t=======================================================") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def write_test_log(t, output_dir):\n if t.log_to_file is not None and hasattr(t, \"stop_time\"):\n filename = type(t).__name__ + \"-\" + time.strftime(\"%Y%m%d-%H%M%S\") + \".txt\"\n testtime = t.stop_time - t.start_time\n with open(os.path.join(output_dir, filename), \"w\") as log:\n log.write(\"\\t=======================================================\")\n log.write(\"\\n\\tTest case ID: %s\" % (type(t).__name__))\n log.write(\"\\n\\tTest case Description: %s\" % (type(t).__doc__))\n log.write(\"\\n\\t=======================================================\\n\")\n log.write(t.log_to_file)\n log.write(\"\\n\\t=======================================================\")\n log.write(\"\\n\\t%s test result: %s\" % (type(t).__name__, t.result_grade))\n log.write(\"\\n\\tTotal test time: %s seconds\" % testtime)\n log.write(\"\\n\\t=======================================================\")",
"def _dump_test_parser_log(self):\n\t\tFileSystem.dump_to(self._result_directory_name + \"/\" + \"Test_Parser.log\", self._form_test_parser_log())",
"def writeToLogFile(self, event):\n outPutStr = '{:013}'.format(0)\n logOutPutStr = outPutStr + '\\t' + '{:.2f}'.format (time ()) + '\\t' + event + '\\t' + datetime.fromtimestamp (int (time())).isoformat (' ')\n printOutPutStr = outPutStr + '\\t' + datetime.fromtimestamp (int (time())).isoformat (' ') + '\\t' + event\n print (printOutPutStr)\n if self.logFP is not None:\n self.logFP.write(logOutPutStr + '\\n')\n self.logFP.flush()",
"def write_test(path, test):\n with open(path, 'w') as f:\n f.write('test = ')\n pprint.pprint(test, f, indent=4, width=200, depth=None)",
"def logToFile(output, file): \r\n print( output, file=file )",
"def create_log_file(path):\n with open(path, 'w'):\n pass",
"def write_to_file(output, test_case_name, path):\n path_to_store = OutputWrite.make_test_dir(path, test_case_name)\n time_stamp = OutputWrite.get_time_stamp()\n try:\n LOG.debug('Changing the dir to {0}'.format(path_to_store))\n os.chdir(path_to_store)\n except Exception as _ex_:\n LOG.exception('Error :{0}'.format(_ex_))\n else:\n file_name = os.path.join(path_to_store, test_case_name +\n time_stamp)\n LOG.debug('The file name after joining = {0}'.format(file_name))\n try:\n LOG.debug('Writing Test case output to the file')\n with open(file_name, 'w') as file_obj:\n file_obj.write(output)\n except FileNotFoundError as _ex_:\n LOG.exception('Error : {0}'.format(_ex_))",
"def _log_to_file(self, message):\n if self.log is not None:\n message = \"[%s] %s\" % (datetime.datetime.utcnow().strftime('%H:%M:%S'), message)\n self.log.write(\"%s\\n\" % (message,))\n self.log.flush()\n print message",
"def logsave(self):\n log_file = open(self.conf[\"output_prefix\"] + \"_log.txt\", \"w\")\n try:\n log_file.write(self.log)\n finally:\n log_file.close()",
"def logger_test():\n test_logger = Logger(True)\n test_dir = r'{}/logger_test'.format(os.getcwd())\n header = ['x', 'y', 'z']\n test_logger.new('test', header)\n for i in range(10):\n data = np.random.random((3,))\n test_logger.add('test', data)\n test_logger.save('test', test_dir)",
"def write_to_file(self, *args, **kwargs) -> None:\n with open(self._log_file, 'a') as file:\n print(file=file, *args, **kwargs)",
"def write_to_file(train_file, test_file, log_dict):\n i = 0\n train_events = []\n test_events = []\n\n for key in log_dict:\n trace = log_dict[key]\n if random.randint(0,1) == 0: # Add file to training set with 50% chance\n for e_idx in range(len(trace)):\n train_events.append(\",\".join([str(x) for x in trace[e_idx]]) + \",\" + str(key) + \",0,None\")\n else: # Add file to test set\n if random.randint(0,100) > 50: # No anomaly injection with 50% chance\n for e_idx in range(len(trace)):\n test_events.append(\",\".join([str(x) for x in trace[e_idx]]) + \",\" + str(key) + \",0,None\")\n else: # Anomaly injection\n trace, types = introduce_anomaly(trace, single=False)\n for e_idx in range(len(trace)):\n test_events.append(\",\".join([str(x) for x in trace[e_idx]]) + \",\" + str(key) + \",1,\\\"\" + str(types) + \"\\\"\")\n\n with open(train_file, \"w\") as fout:\n fout.write(\",\".join([\"Time\", \"Activity\", \"Resource\", \"Weekday\", \"Case\", \"Anomaly\", \"Type\"]) + \"\\n\")\n for e in train_events:\n fout.write(e + \"\\n\")\n\n with open(test_file, \"w\") as fout:\n fout.write(\",\".join([\"Time\", \"Activity\", \"Resource\", \"Weekday\", \"Case\", \"Anomaly\", \"Type\"]) + \"\\n\")\n for e in test_events:\n fout.write(e + \"\\n\")",
"def log(self, loginfo):\n logging.basicConfig(level=logging.INFO,\n format='%(asctime)s %(filename)s:%(message)s',\n datefmt='%d %b %Y %H:%M:%S',\n filename=self.logfilepath,\n filemode='w')\n filelog = logging.FileHandler(self.logfilepath)\n logging.getLogger('Functest').addHandler(filelog)\n logging.info(loginfo)",
"def WriteLog(self, content, file_name=None):\n file_path = ''\n if file_name is None:\n file_path = tempfile.NamedTemporaryFile(dir=self.events_dir,\n delete=False).name\n else:\n file_path = os.path.join(self.events_dir, file_name)\n with open(file_path, 'a') as f:\n f.write(content)",
"def save_log(self, test_status: str = Status.FAILED):\n self.__log.close()\n sys.stdout = self.__original_stdout\n if test_status == Status.PASSED and Logger.__KEEP_LOG_FLAG not in sys.argv:\n if os.path.isfile(self.__log_file_path):\n os.remove(self.__log_file_path)\n print(Colors.OKBLUE + \"\\nLog file has been removed\\n\" + Colors.ENDC)\n return\n\n if os.path.isfile(self.__log_file_path):\n print(Colors.OKBLUE + \"\\nLog file has been kept at: {}\\n\".format(self.__log_file_path) + Colors.ENDC)",
"def test_02_log_something(self):\n logger = get_logger(self)\n logger.info('Info in test_02')\n logger.debug('Debug in test_02')\n logger.warn('Warn in test_02')\n logfiles = glob.glob(os.path.join(self.LOG_FOLDER,\n '{}*.log'.format(self.scenario)))\n assert logfiles\n print(logfiles)\n for logfile in logfiles:\n with open(logfile) as f:\n for line in f:\n print(line.strip())",
"def _createLogFile(LogFile,date,LocalPath,ShowTagsResult):\n try:\n LOG = open(LogFile,\"w\")\n if _verbose:\n print(\"Writing Production Host, Location, Release and Tags information in %s\" % LogFile) \n LOG.write(\"These performance tests were executed on host %s and published on %s\" % (HOST,date))\n LOG.write(\"They were run in %s\" % LocalPath)\n LOG.write(\"Results of showtags -r in the local release:\\n%s\" % ShowTagsResult)\n LOG.close()\n except IOError as detail:\n print(\"WARNING: Can't create log file\") \n print(detail)",
"def log_event_to_file(event):\n with open('eventlogs/{}.json'.format(time.time()), 'w') as event_write:\n event_write.write(json_dumpstring(event))\n pass",
"def create_writer(self, session: tf.Session):\n self.logger.writer = tf.summary.FileWriter(str(self.info.summary_path), session.graph)",
"def write_log_to_file(filename, content):\n append_to_file(filename, content)",
"def saveLogFile(self, fname = \"data/status.txt\"):\n with open(fname, 'w') as f:\n f.write(\"<br>\\n\".join(self.logLines))\n self.log(\"wrote \"+fname)",
"def log_to_file(text, status='INFO'):\n outfile = open(LogName, 'a')\n outfile.write(timestamp()+' - '+status+' - '+str(text)+'\\n')\n outfile.close()",
"def printToLogfile (self, text):\n if self.logFile is not None:\n self.logFile.write(text)\n self.logFile.flush()",
"def main(): \n suite = unittest.TestLoader().discover(unitTestDirectory) \n os.chdir(os.path.join(os.getcwd(), unitTestDirectory)) #need to change cwd if the unit test runs files that it doesn't just import\n \n f = open('log_file.txt', 'w')\n testRunner = unittest.TextTestRunner(f, verbosity=2).run(suite) #diverts stderr to the log_file when running the test suite\n f.close()",
"def dump_to_log(self, log_dir, log_filename):\n\t\twith open(os.path.join(log_dir, log_filename), \"w\") as f:\n\t\t\tf.write(\"================ Arguments ==================\\n\")\n\t\t\tfor k, v in vars(self).items():\n\t\t\t\tf.write(\"{} : {}\\n\".format(k, v))\n\t\t\tf.write(\"=============================================\\n\")",
"def log(msg=\"\"):\n print(msg)\n sys.stdout.flush()\n f = open(\"/target/testdriver.log\", \"a\")\n f.write('{:%Y-%m-%d %H:%M:%S.%s} :: '.format(datetime.datetime.now()))\n f.write(f\"{msg}\\n\")\n f.close()",
"def write_tests(project_name, root_dir):\r\n test_path = get_file_path(root_dir, \"tests\", \"%s_tests.py\" % project_name) #Get the path for setup.py\r\n test_content = get_test_text(project_name)\r\n \r\n test_file = open(test_path, 'w')\r\n test_file.write(test_content)\r\n test_file.close()\r\n print_file(test_path)",
"def test_04_logs(self):\n\n file_name = 'train-test.log'\n request_json = {'file':'train-test.log'}\n r = requests.get('http://localhost:{}/logs/{}'.format(port,file_name))\n\n with open(file_name, 'wb') as f:\n f.write(r.content)\n \n self.assertTrue(os.path.exists(file_name))\n\n if os.path.exists(file_name):\n os.remove(file_name)",
"def writeLog(self):\n if self.logBuffer != None and self.logging :\n f = open(self.logfileName, 'w')\n self.logBuffer += \"Final Fitness: %f\\n\" % self.getTotalReward()\n self.logBuffer += \"\\n\"\n f.write(self.logBuffer)\n f.close()",
"def write_log(message: str, base_url, path=\"logs/\"):\n print(message)\n url_filename = url_to_filename(base_url)\n filename = f\"{path}LOG-{url_filename}.txt\"\n\n if os.path.exists(filename):\n append_write = \"a\"\n else:\n append_write = \"w\"\n\n f = open(filename, append_write)\n f.write(message)\n f.close()"
] | [
"0.77759415",
"0.6469761",
"0.6405734",
"0.63734347",
"0.6358866",
"0.6292069",
"0.627844",
"0.6241813",
"0.62395686",
"0.6190359",
"0.618326",
"0.6178866",
"0.6163366",
"0.61453724",
"0.61151177",
"0.6075739",
"0.6044056",
"0.6033274",
"0.5966068",
"0.59578186",
"0.5957704",
"0.59228593",
"0.5915319",
"0.5910582",
"0.5851896",
"0.5845745",
"0.5840753",
"0.5827593",
"0.5812578",
"0.58048826"
] | 0.7797923 | 0 |
Add process time with the log messages. | def extra_log(self, string):
if hasattr(self.parent, "log"):
self.parent.log += f"\r\n[{time.process_time()}] "
self.parent.log += string + "\r\n" | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_line_in_log():\n logging.info(' ' + '-' * 60 + '\\n')",
"def add_log(self, log):\n log = str(datetime.datetime.now()) + \": \"+log+\"\\n\"\n print(log)\n self.logs.append(log)\n if len(self.logs) > 10:\n self.append_to_logfile()",
"def extra_log(self, string):\n if hasattr(self.parent, \"log\"):\n self.parent.log += \"\\r\\n[%s] \" % time.process_time()\n self.parent.log += string + \"\\r\\n\"",
"def writeToLog(self,msg):\n\tlocaltime = \"%s \"%time.strftime(\"%H:%M:%S\",time.localtime())\n\tpid = \"%s \"%self.pid\n self.log.write(pid+localtime+'###### '+msg+'\\n')",
"def log(self, line):\n now = datetime.datetime.now()\n time = datetime.datetime.strftime(now, '(%d %b %Y %H:%M:%S)')\n with open(self.logfile, 'a') as log:\n log.write(time + ' ' + line + '\\n')",
"def log(self, msg):\n current_datetime = self.get_date_time()\n self.file.write(\"%s %s\\n\" % (current_datetime, msg))",
"def log(self, msg=\"\"):\n if len(msg):\n msg = \"[%.03fs] %s\" % (time.time()-self.timeStart, msg)\n print(msg)\n self.logLines.append(msg)",
"def add_message(self, message):\n message_time = t.process_time()\n self.log_cache.append((message, message_time))\n if len(self.log_cache) % 20 == 0:\n self._commit_log_db()",
"def task_display_funny_time():\n print(\"funny time is %s\" % datetime.datetime.now())\n logger.info(\"Hurray its working\")",
"def writeLog(pid):\n\tglobal processes,logfile,strikes,sleep\n\tproc = processes[pid]\n\tlogfile.write('[%s] %d %s %f%%cpu %f%%mem (over %d s): %s\\n'%(time.strftime('%b %d %H:%M:%S'),pid,proc.user,proc.cpu,proc.mem,proc.count*sleep,proc.command))",
"def log_time(name):\n if DEBUG:\n now = time.time()\n logging.debug('emcc step \"%s\" took %.2f seconds', name, now - TimeLogger.last)\n TimeLogger.update()",
"def logprllwrite(self):\n sql = '''select to_char(time_waited, 'FM99999999999999990') retvalue \n from v$system_event se, v$event_name en where se.event(+) \n = en.name and en.name = 'log files parallel write' '''\n self.cur.execute(sql)\n res = self.cur.fetchall()\n for i in res:\n print(i[0])",
"def logStats(self, msg):\n self.logLinesStats.append(msg)",
"async def log_time(self, event):\n sender = await event.get_sender()\n user = utils.get_display_name(sender)\n\n message = event.message\n\n time = message.date.astimezone(self.__to_zone).time().hour\n\n logging.debug(\"Got the following message: \\\"\" + event.raw_text + \"\\\" at time \" + str(time))\n\n self.__contact_times.labels(user).observe(time)",
"def _reportExecTime(self, exec_time, outputFile):\n f=self.openFile(outputFile, \"a\") #open for appending\n f.write ('<font face=\"verdana\" color=\"' + AutoGrader.Const.ANALYTICS_COLOR2 + '\">[Execution Time: ' + format(\"%0.4f\" % exec_time) + ' sec.]</font><br>\\n')\n f.close()",
"def appendMsg(self, msg):\n # self.message += msg\n theTime = self.logger.mytime()\n # self.message += theTime + \" \" + str( msg )\n self.message = str(self.message) + str(theTime) + \" \" + str(msg)",
"def log(message):\n print(\"{0}: {1}\".format(acm.Time.TimeNow(), message))",
"def exec_time_processor(self):\n with open(join(self.logs_dir, \"clock_time.dat\"), 'w') as fh:\n fh.write(\"Time ExecutionTime ClockTime\\n\")\n while True:\n rexp = (yield)\n fh.write(self.time_str + \"\\t\" +\n \"\\t\".join(x for x in rexp.groups()) + \"\\n\")\n self._tick = True",
"def _log(self, runtime, extra):\n\t\tif extra is None:\n\t\t\tdebug(\"Timer - %s took %d ms\" % (self._item, 1000 * runtime))\n\t\telse:\n\t\t\tdebug(\"Timer - %s [%s] took %d ms\" % (self._item, str(extra), 1000 * runtime))\n\t\treturn self",
"def LogProcess(self):\n time = datetime.today().strftime('%a %Y%b%d %X')\n# Get user name.\n f = os.popen(\"whoami\",\"r\")\n user = f.read().strip()\n f.close()\n\n entry = '%s\\t%s\\t%s\\t%s\\n' % (time, self.topdir, user, self.version)\n\n if ismounted(c.exams_file):\n# Append info to the exams file.\n try:\n f = open(c.exams_file,'a+')\n f.seek(0, 2)\n f.write(entry)\n f.close()\n except:\n# Not a huge problem if this doesn't work.\n pass",
"def Log(self, times):\n\n print '--'\n print times.PrettyPrintLog()\n\n return",
"def __call__(self, msg='', total=False):\r\n if not total:\r\n time_lapse = time.time() - self.last_time\r\n full_msg = \"%s: %s\" % (msg, format_time(time_lapse))\r\n else:\r\n # FIXME: Too much logic duplicated\r\n time_lapse = time.time() - self.start_time\r\n full_msg = \"%s: %.2fs, %.1f min\" % (msg, time_lapse,\r\n time_lapse / 60)\r\n print(full_msg, file=sys.stderr)\r\n if self.logfile is not None:\r\n try:\r\n with open(self.logfile, 'a') as f:\r\n print(full_msg, file=f)\r\n except:\r\n \"\"\" Multiprocessing writing to files can create race\r\n conditions. Rather fail silently than crash the\r\n calculation.\r\n \"\"\"\r\n # XXX: We actually need a debug flag to disable this\r\n # silent failure.\r\n self.last_time = time.time()",
"def _log2mylog(self, msg):\n time_str = mod_time.strftime(\n \"%Y-%m-%d %H:%M:%S\", mod_time.localtime(mod_time.time())\n )\n msg = str(msg)\n content = \"%s [%s]\\n\" % (time_str, msg)\n fa = open(self.mylogfile, \"a\")\n fa.write(content)\n fa.close()",
"def log(self, message):\n timestamp = time.strftime(\"[%H:%M:%S]\", time.localtime(time.time()))\n self.file.write('%s %s\\n' % (timestamp, message))\n self.file.flush()",
"def _log_update_time(self, *_):\n import time\n if not hasattr(self, '_time'):\n setattr(self, '_time', time.time())\n _time = time.time()\n debug('Time since last call: {:.6f}s'.format(_time - getattr(self, '_time')))\n setattr(self, '_time', _time)",
"def trace(msg):\n import datetime\n print('[{:%Y-%m-%d %H:%M:%S}]: '.format(datetime.datetime.now()) + msg)",
"def add_log(conn, task, start_time):\n cursor = conn.cursor()\n cursor.execute('INSERT INTO timelogs (task, start_time) VALUES (?, ?);', (task, start_time))",
"def do_timestamp_messages(self, messages):\n timestamp = self.env.now\n self.reception_records[timestamp] = messages\n log.debug(\"{} recorded {}\".format(self, self.reception_records))",
"def print_log (self, n = None):\r\n\t\tif n is None:\r\n\t\t\tn = len(self.log)\r\n\t\t\r\n\t\tfor i in range(-n,0):\r\n\t\t\tprint('@ {0: 8.1f} ms, {1} : {2}'.format(1000*self.log[i]['proctime'], self.log[i]['type'], self.log[i]['desc']) )",
"def internal_event(self):\n # log activity\n self.log_activity(LogEntry(\n sys_time=time(),\n logical_time=self.logical_clock,\n action=\"work\"\n ))"
] | [
"0.67774904",
"0.6688818",
"0.6468294",
"0.64525807",
"0.6431555",
"0.6324461",
"0.6312171",
"0.6251729",
"0.6245165",
"0.62316775",
"0.6187182",
"0.61647475",
"0.6160086",
"0.61188847",
"0.6091051",
"0.60706383",
"0.6057694",
"0.60288566",
"0.60258675",
"0.60083884",
"0.60078716",
"0.5989666",
"0.5983083",
"0.59767365",
"0.59348774",
"0.59277207",
"0.5915972",
"0.59032565",
"0.5900044",
"0.58637893"
] | 0.66905326 | 1 |
Factory for subfield items. | def subfieldFactory(name):
from pythia.pyre.inventory import facility
return facility(name, family="subfield", factory=Subfield) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def subfield():\n return Subfield()",
"def __init__(self, *args, **kwargs):\n super(ListFieldType, self).__init__(*args, **kwargs)\n\n self.item_info = self.field_info.get('items')",
"def create_subspecialty(sub_data):\n return get_or_create_object(sub_data, Subspecialty)",
"def test_customWidgetFactory(self):\n\n value_type = TextLine(__name__='bar')\n self.field = List(__name__='foo', value_type=value_type)\n request = TestRequest()\n\n # set up the custom widget factory and verify that it works\n sw = CustomWidgetFactory(ListSequenceWidget)\n widget = sw(self.field, request)\n assert widget.subwidget is None\n assert widget.context.value_type is value_type\n\n # set up a variant that specifies the subwidget to use and verify it\n class PollOption:\n pass\n ow = CustomWidgetFactory(ObjectWidget, PollOption)\n sw = CustomWidgetFactory(ListSequenceWidget, subwidget=ow)\n widget = sw(self.field, request)\n assert widget.subwidget is ow\n assert widget.context.value_type is value_type",
"def GetSubfieldDef(fielddef):\n\n format_, addrdef, datadef, arraydef, validate, cmd, converter = GetFieldDef(fielddef, fields='format_, addrdef, datadef, arraydef, validate, cmd, converter')\n\n # create new arraydef\n if len(arraydef) > 1:\n arraydef = arraydef[1:]\n else:\n arraydef = None\n\n # create new datadef\n if isinstance(datadef, tuple):\n if cmd is not None:\n datadef = (arraydef, validate, cmd)\n else:\n datadef = (arraydef, validate)\n else:\n datadef = arraydef\n\n # set new field def\n subfielddef = None\n if converter is not None:\n subfielddef = (format_, addrdef, datadef, converter)\n else:\n subfielddef = (format_, addrdef, datadef)\n\n return subfielddef",
"def add_sub_factories(self) -> None:\n for field in get_model_fields(self.model, base=False, foreign=True, m2m=False):\n if not hasattr(self.factory, field.name):\n factory_name = self._get_factory_name_for_model(field.related_model)\n if field.related_model == self.model:\n _factory = SelfFactory(factory=self.factory, required=not field.null)\n else:\n _factory = SubFactory(\n factory=factory_name,\n required=not field.null,\n related_model=field.related_model\n )\n setattr(self.factory, field.name, _factory)",
"def txnSubCollectionFactory(txnSubCollection, txn):\n subCollection = txnSubCollection.cloneMetaData()\n subCollection.append(txn)\n return subCollection",
"def multivalue_field_factory(field_class):\n class NewField(field_class):\n widget = forms.SelectMultiple\n\n def to_python(self, value):\n if not value:\n return []\n return [\n # Only append non-empty values (this avoids e.g. trying to cast '' as an integer)\n super(field_class, self).to_python(v) for v in value if v\n ]\n\n return type('MultiValue{}'.format(field_class.__name__), (NewField,), dict())",
"def test_subwidget(self):\n self.field = List(__name__='foo',\n value_type=TextLine(__name__='bar'))\n request = TestRequest()\n\n class PollOption:\n pass\n ow = CustomWidgetFactory(ObjectWidget, PollOption)\n widget = SequenceWidget(\n self.field, self.field.value_type, request, subwidget=ow)\n assert widget.subwidget is ow",
"def make_instance(self, include_optional):\n # model = rcc.models.crf_item_definition.CRFItemDefinition() # noqa: E501\n if include_optional :\n return CRFItemDefinition(\n item_document_file = rcc.models.file_base64.FileBase64(\n value = '0', \n file_name = '0', \n content_type = '0', \n file_size = 56, ), \n item_data_type = '0', \n measurement_unit_name = '0', \n variable_name = '0', \n label = '0', \n label_plain_text = '0', \n phi_status = True, \n left_alignment = True, \n rc_oid = '0', \n field_width = 56, \n info_text = '0', \n min_value = '0', \n max_value = '0', \n show_validator = True, \n soft_validation = True, \n calc_field_equation = '0', \n custom_info1 = '0', \n custom_info2 = '0', \n warning_when_left_empty = '0', \n stratification_variable = True, \n study_dictionary = '0', \n default_value = '0', \n subject_group = True, \n creation_source = '0', \n promis_oid = '0', \n promis_final_score = True, \n item_fhir_metadata = '0', \n required_query_description = '0', \n crfitem_metadata = rcc.models.crf_item_metadata.CRFItemMetadata(\n item_metadata_oid = '0', \n column_number = 56, \n page_number_label = '0', \n question_number_label = '0', \n left_item_text = '0', \n right_item_text = '0', \n regexp = '0', \n regexp_error_msg = '0', \n ordinal = 56, \n required = True, \n response_layout = '0', \n width_decimal = '0', \n show_item = True, \n code_ref = '0', \n group_oid = '0', \n is_required = True, \n disp_sequence = 56, \n branching_equation = '0', \n crf_version_oid = '0', \n hide_from_survey = True, \n position_row = 56, \n position_column = 56, \n item_data_type = '0', \n measurement_unit_name = '0', \n variable_name = '0', \n label = '0', \n label_plain_text = '0', \n phi_status = True, \n left_alignment = True, \n field_width = 56, \n info_text = '0', \n min_value = '0', \n max_value = '0', \n show_validator = True, \n soft_validation = True, \n calc_field_equation = '0', \n custom_info1 = '0', \n custom_info2 = '0', \n stratification_variable = True, \n show_response_set_value_too = True, \n study_dictionary = '0', \n default_value = '0', \n subject_group = True, \n required_query_description = '0', \n warning_when_left_empty = '0', \n dynamic_list_rs_values_eq = '0', \n dynamic_list_type = '0', \n dynamic_list_no_duplicates = True, \n used_in_dys_fields = True, \n econsent_signature = True, ), \n crfitem_metadata_group = rcc.models.crf_item_metadata_group.CRFItemMetadataGroup(\n crfitem_metadata = [\n rcc.models.crf_item_metadata.CRFItemMetadata(\n item_metadata_oid = '0', \n column_number = 56, \n page_number_label = '0', \n question_number_label = '0', \n left_item_text = '0', \n right_item_text = '0', \n regexp = '0', \n regexp_error_msg = '0', \n ordinal = 56, \n required = True, \n response_layout = '0', \n width_decimal = '0', \n show_item = True, \n code_ref = '0', \n group_oid = '0', \n is_required = True, \n disp_sequence = 56, \n branching_equation = '0', \n crf_version_oid = '0', \n hide_from_survey = True, \n position_row = 56, \n position_column = 56, \n item_data_type = '0', \n measurement_unit_name = '0', \n variable_name = '0', \n label = '0', \n label_plain_text = '0', \n phi_status = True, \n left_alignment = True, \n field_width = 56, \n info_text = '0', \n min_value = '0', \n max_value = '0', \n show_validator = True, \n soft_validation = True, \n calc_field_equation = '0', \n custom_info1 = '0', \n custom_info2 = '0', \n stratification_variable = True, \n show_response_set_value_too = True, \n study_dictionary = '0', \n default_value = '0', \n subject_group = True, \n required_query_description = '0', \n warning_when_left_empty = '0', \n dynamic_list_rs_values_eq = '0', \n dynamic_list_type = '0', \n dynamic_list_no_duplicates = True, \n used_in_dys_fields = True, \n econsent_signature = True, )\n ], )\n )\n else :\n return CRFItemDefinition(\n item_document_file = rcc.models.file_base64.FileBase64(\n value = '0', \n file_name = '0', \n content_type = '0', \n file_size = 56, ),\n )",
"def subtable(\n field: str,\n table: Type[ModelledTable],\n subfield: Optional[str] = None,\n pivot: Optional[str] = None,\n selectors: Optional[Dict[str, PrimitiveTypes]] = None,\n) -> Callable[[Type[SecondTable]], Type[SecondTable]]:\n\n if not subfield:\n subfield = field\n\n if not selectors:\n selectors = dict()\n\n sub: SubTable[ModelledTable] = SubTable(table, subfield, pivot, selectors)\n\n def _subtable(cls: Type[SecondTable]) -> Type[SecondTable]:\n \"\"\"Adds a subtable key to a Table\"\"\"\n\n if not issubclass(cls, Table):\n raise Exception(f\"{cls.__name__} is not a sub class of Table\")\n\n subtables: Dict[str, SubTable[ModelledTable]] = getattr(cls, _SUBTABLES, {})\n subtables[field] = sub\n setattr(cls, _SUBTABLES, subtables)\n\n return cls\n\n return _subtable",
"def extend_or_add_fields(cls, subfields, dbmanager, flag_mixin_atroot, propname, proplabel):\n import mdbmodel_fieldset\n if (flag_mixin_atroot):\n # prepare extra fields that will be added at root; this doesnt actually create any prerequisites\n cls.extend_fields(subfields)\n else:\n # add a special sub table that will contain some fields, using a helper class object attached to us\n # create (AND REGISTER) the new helper object\n backrefname = cls.get_dbtablename_pure()\n mdbmodel_fieldset.MewloDbFieldset.make_fieldset_dbobjectclass(cls, propname, proplabel, backrefname, dbmanager, subfields)",
"def __getitem__(self, item: dict) -> 'Field':\n raise NotImplementedError(self)",
"def _subtable(cls: Type[SecondTable]) -> Type[SecondTable]:\n\n if not issubclass(cls, Table):\n raise Exception(f\"{cls.__name__} is not a sub class of Table\")\n\n subtables: Dict[str, SubTable[ModelledTable]] = getattr(cls, _SUBTABLES, {})\n subtables[field] = sub\n setattr(cls, _SUBTABLES, subtables)\n\n return cls",
"def Item(self) -> object:",
"def Item(self) -> object:",
"def _make_subset(cls, name, data, **kwargs):\r\n return cls(name, data, **kwargs)",
"def _create_item(self, item_id: str, data: dict) -> Pipeline:\n return Pipeline(id=item_id, **data)",
"def NewItems(self) -> _n_1_t_7:",
"def _video(self, parent, group):\r\n return ItemFactory.create(\r\n parent_location=parent.location,\r\n category=\"video\",\r\n display_name=\"Group {} Sees This Video\".format(group),\r\n )",
"def _make_item(self, parent_item: FolderTreeItem, folder: dict, level=0) -> FolderTreeItem:\n for sub_folder in folder['folders']:\n sub_folder_item = FolderTreeItem(Folder(sub_folder), parent_item)\n item = self._make_item(sub_folder_item, sub_folder, level + 1)\n parent_item.append_child(item)\n for query in folder['queries']:\n item = QueryTreeItem(Query(query), parent_item)\n parent_item.append_child(item)\n return parent_item",
"def build_schema(self, spec, **kwargs):\n item_body = spec['items']\n item_builder = self.builder_provider.get_builder(item_body['type'])\n return fields.List(item_builder.build_schema(item_body), **self.translate_args(spec, **kwargs))",
"def __init__(self, parent_=None, instance_name_=None, **values):\n self.__parent = parent_\n self.__instance_name = instance_name_\n\n self._factories = {}\n\n for name, field in self._get_fields().items():\n if isinstance(field, fields.Factory):\n # for factory fields, we need to create a new factory with the given factory_type\n value = field.factory_type(field.type, name_=name, parent_instance_=self)\n self._factories[name] = value\n else:\n value = values.get(name, field.from_raw(field.default))\n\n # accept raw as a default value\n # and set inner value, so it should be availale from the start\n setattr(self, f\"__{name}\", value)",
"def item_subadres_adapter(obj, request):\n return {\n 'id': obj.id,\n 'subadres': obj.subadres,\n 'postadres': obj.postadres,\n 'status': {\n 'id': obj.status.id,\n 'naam': obj.status.naam,\n 'definitie': obj.status.definitie\n },\n 'aard': {\n 'id': obj.aard.id,\n 'naam': obj.aard.naam,\n 'definitie': obj.aard.definitie\n },\n 'metadata': {\n 'begin_tijd': obj.metadata.begin_tijd,\n 'begin_datum': obj.metadata.begin_datum,\n 'begin_bewerking': {\n 'id': obj.metadata.begin_bewerking.id,\n 'naam': obj.metadata.begin_bewerking.naam,\n 'definitie': obj.metadata.begin_bewerking.definitie\n },\n 'begin_organisatie': {\n 'id': obj.metadata.begin_organisatie.id,\n 'naam': obj.metadata.begin_organisatie.naam,\n 'definitie': obj.metadata.begin_organisatie.definitie\n }\n }\n }",
"def __init__(self, item_data):\n self.order_item_id = item_data['OrderItemRowId']\n self.quantity = item_data['Quantity']\n self.stock_id = item_data['pkStockItemId']\n self.sku = item_data['SKU']\n self.title = item_data['ItemTitle']\n self.despatch_unit_cost = item_data['DespatchUnitCost']\n self.cost_ex_tax = item_data['CostExTax']\n self.cost_inc_tax = item_data['CostIncTax']\n self.per_unit_inc_tax = item_data['PerUnitIncTax']\n self.per_unit_ex_tax = item_data['PerUnitExTax']\n self.tax_rate = item_data['TaxRate']\n self.total_tax = item_data['TotalTax']\n self.line_discount = item_data['LineDiscount']\n self.tax_cost_inclusive = item_data['TaxCostInclusive']\n self.note = item_data['Note']\n self.parent_item_id = item_data['ParentItemRowId']\n self.has_children = item_data['HasChildren']\n self.child_items = item_data['ChildItems']\n self.has_options = item_data['HasOptions']\n self.options = item_data['Options']",
"def create_sub_time_series_one_item(sub_data: pandas.core.frame.DataFrame, item: str, store: str):\n ## create sub dataset\n sub_data = sub_data[sub_data['SKU'] == item]\n sub_data = sub_data[sub_data['Store'] == store]\n sub_data = sub_data.sort_values(by=\"Date\")\n return sub_data",
"def make(self, item):\n self.name = item.get(\"name\", \"\")\n self.description = item.get(\"description\", \"\")\n self.type = item.get(\"type\", \"filler\")\n if not isinstance(self.type, str) or self.type is None:\n self.usable = NotUsable\n elif len(self.type) > 1:\n self.set_usable(self.type)\n else:\n self.usable = NotUsable",
"def make_field(field):\n\n if \"time\" in field:\n return TimeField(field)\n if \"zd\" in field:\n return RadianField(field)\n else:\n return SimpleField(field)",
"def _create_item(self, parent_location, category, display_name, **kwargs):\n return ItemFactory.create(\n parent_location=parent_location,\n category=category,\n display_name=display_name,\n publish_item=False,\n user_id=self.user.id,\n **kwargs\n )",
"def _create_item(self, category, name, data, metadata, parent_category, parent_name, draft=True, split=True):\r\n location = self.old_course_key.make_usage_key(category, name)\r\n if not draft or category in DIRECT_ONLY_CATEGORIES:\r\n mongo = self.old_mongo\r\n else:\r\n mongo = self.draft_mongo\r\n mongo.create_and_save_xmodule(location, data, metadata, self.runtime)\r\n if isinstance(data, basestring):\r\n fields = {'data': data}\r\n else:\r\n fields = data.copy()\r\n fields.update(metadata)\r\n if parent_name:\r\n # add child to parent in mongo\r\n parent_location = self.old_course_key.make_usage_key(parent_category, parent_name)\r\n if not draft or parent_category in DIRECT_ONLY_CATEGORIES:\r\n mongo = self.old_mongo\r\n else:\r\n mongo = self.draft_mongo\r\n parent = mongo.get_item(parent_location)\r\n parent.children.append(location)\r\n mongo.update_item(parent, self.userid)\r\n # create pointer for split\r\n course_or_parent_locator = BlockUsageLocator(\r\n course_key=self.split_course_key,\r\n block_type=parent_category,\r\n block_id=parent_name\r\n )\r\n else:\r\n course_or_parent_locator = self.split_course_key\r\n if split:\r\n self.split_mongo.create_item(course_or_parent_locator, category, self.userid, block_id=name, fields=fields)"
] | [
"0.7079664",
"0.60948706",
"0.57870966",
"0.5703622",
"0.5617625",
"0.56146836",
"0.56075746",
"0.5595222",
"0.554094",
"0.546303",
"0.54049605",
"0.53629017",
"0.53175354",
"0.5301199",
"0.5221799",
"0.5221799",
"0.51944876",
"0.51886076",
"0.5142889",
"0.5127228",
"0.5126402",
"0.5116837",
"0.5099944",
"0.50934803",
"0.50798714",
"0.5075126",
"0.50698483",
"0.5046927",
"0.5024666",
"0.49899125"
] | 0.7673134 | 0 |
Factory associated with Subfield. | def subfield():
return Subfield() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def subfieldFactory(name):\n from pythia.pyre.inventory import facility\n return facility(name, family=\"subfield\", factory=Subfield)",
"def add_sub_factories(self) -> None:\n for field in get_model_fields(self.model, base=False, foreign=True, m2m=False):\n if not hasattr(self.factory, field.name):\n factory_name = self._get_factory_name_for_model(field.related_model)\n if field.related_model == self.model:\n _factory = SelfFactory(factory=self.factory, required=not field.null)\n else:\n _factory = SubFactory(\n factory=factory_name,\n required=not field.null,\n related_model=field.related_model\n )\n setattr(self.factory, field.name, _factory)",
"def GetSubfieldDef(fielddef):\n\n format_, addrdef, datadef, arraydef, validate, cmd, converter = GetFieldDef(fielddef, fields='format_, addrdef, datadef, arraydef, validate, cmd, converter')\n\n # create new arraydef\n if len(arraydef) > 1:\n arraydef = arraydef[1:]\n else:\n arraydef = None\n\n # create new datadef\n if isinstance(datadef, tuple):\n if cmd is not None:\n datadef = (arraydef, validate, cmd)\n else:\n datadef = (arraydef, validate)\n else:\n datadef = arraydef\n\n # set new field def\n subfielddef = None\n if converter is not None:\n subfielddef = (format_, addrdef, datadef, converter)\n else:\n subfielddef = (format_, addrdef, datadef)\n\n return subfielddef",
"def create_subspecialty(sub_data):\n return get_or_create_object(sub_data, Subspecialty)",
"def subtable(\n field: str,\n table: Type[ModelledTable],\n subfield: Optional[str] = None,\n pivot: Optional[str] = None,\n selectors: Optional[Dict[str, PrimitiveTypes]] = None,\n) -> Callable[[Type[SecondTable]], Type[SecondTable]]:\n\n if not subfield:\n subfield = field\n\n if not selectors:\n selectors = dict()\n\n sub: SubTable[ModelledTable] = SubTable(table, subfield, pivot, selectors)\n\n def _subtable(cls: Type[SecondTable]) -> Type[SecondTable]:\n \"\"\"Adds a subtable key to a Table\"\"\"\n\n if not issubclass(cls, Table):\n raise Exception(f\"{cls.__name__} is not a sub class of Table\")\n\n subtables: Dict[str, SubTable[ModelledTable]] = getattr(cls, _SUBTABLES, {})\n subtables[field] = sub\n setattr(cls, _SUBTABLES, subtables)\n\n return cls\n\n return _subtable",
"def _subtable(cls: Type[SecondTable]) -> Type[SecondTable]:\n\n if not issubclass(cls, Table):\n raise Exception(f\"{cls.__name__} is not a sub class of Table\")\n\n subtables: Dict[str, SubTable[ModelledTable]] = getattr(cls, _SUBTABLES, {})\n subtables[field] = sub\n setattr(cls, _SUBTABLES, subtables)\n\n return cls",
"def subscription_factory_fixture():\n def _factory(capability):\n sub = Subscription()\n sub.capability = capability\n return sub\n return _factory",
"def make_field(field):\n\n if \"time\" in field:\n return TimeField(field)\n if \"zd\" in field:\n return RadianField(field)\n else:\n return SimpleField(field)",
"def test_customWidgetFactory(self):\n\n value_type = TextLine(__name__='bar')\n self.field = List(__name__='foo', value_type=value_type)\n request = TestRequest()\n\n # set up the custom widget factory and verify that it works\n sw = CustomWidgetFactory(ListSequenceWidget)\n widget = sw(self.field, request)\n assert widget.subwidget is None\n assert widget.context.value_type is value_type\n\n # set up a variant that specifies the subwidget to use and verify it\n class PollOption:\n pass\n ow = CustomWidgetFactory(ObjectWidget, PollOption)\n sw = CustomWidgetFactory(ListSequenceWidget, subwidget=ow)\n widget = sw(self.field, request)\n assert widget.subwidget is ow\n assert widget.context.value_type is value_type",
"def test_subwidget(self):\n self.field = List(__name__='foo',\n value_type=TextLine(__name__='bar'))\n request = TestRequest()\n\n class PollOption:\n pass\n ow = CustomWidgetFactory(ObjectWidget, PollOption)\n widget = SequenceWidget(\n self.field, self.field.value_type, request, subwidget=ow)\n assert widget.subwidget is ow",
"def field_subtype(f, default=MISSING, *, unwrap=True):\n return _field_type(f, SUBTYPE, default, unwrap=unwrap)",
"def __init__(self, parent_=None, instance_name_=None, **values):\n self.__parent = parent_\n self.__instance_name = instance_name_\n\n self._factories = {}\n\n for name, field in self._get_fields().items():\n if isinstance(field, fields.Factory):\n # for factory fields, we need to create a new factory with the given factory_type\n value = field.factory_type(field.type, name_=name, parent_instance_=self)\n self._factories[name] = value\n else:\n value = values.get(name, field.from_raw(field.default))\n\n # accept raw as a default value\n # and set inner value, so it should be availale from the start\n setattr(self, f\"__{name}\", value)",
"def get_factory():",
"def childFactory(self, request, childName):\n return None",
"def factory(self):\n raise NotImplementedError()",
"def multivalue_field_factory(field_class):\n class NewField(field_class):\n widget = forms.SelectMultiple\n\n def to_python(self, value):\n if not value:\n return []\n return [\n # Only append non-empty values (this avoids e.g. trying to cast '' as an integer)\n super(field_class, self).to_python(v) for v in value if v\n ]\n\n return type('MultiValue{}'.format(field_class.__name__), (NewField,), dict())",
"def _new_field(self):\n field = self.domain.new_field()\n return field",
"def __createField(self, field):\n name = field['name']\n fType = field['type']\n fieldLength = None\n if 'shape' in name.lower():\n return\n elif \"String\" in fType:\n fieldType = \"TEXT\"\n fieldLength = field['length']\n elif \"Date\" in fType:\n fieldType = \"DATE\"\n elif \"SmallInteger\" in fType:\n fieldType = \"SHORT\"\n elif \"Integer\" in fType:\n fieldType = \"LONG\"\n elif \"Double\" in fType:\n fieldType = \"DOUBLE\"\n elif \"Single\" in fType:\n fieldType = \"FLOAT\"\n else:\n fieldType = \"Unknown\"\n featureClass = self.featureClassLocation + \"\\\\\" + self.name\n validatedName = arcpy.ValidateFieldName(name, self.featureClassLocation)\n arcpy.AddField_management(in_table=featureClass, field_name=name, field_type=fieldType, field_length=fieldLength)",
"def factory(self):\n return self._factory",
"def factory(self):\n return self._factory",
"def extend_or_add_fields(cls, subfields, dbmanager, flag_mixin_atroot, propname, proplabel):\n import mdbmodel_fieldset\n if (flag_mixin_atroot):\n # prepare extra fields that will be added at root; this doesnt actually create any prerequisites\n cls.extend_fields(subfields)\n else:\n # add a special sub table that will contain some fields, using a helper class object attached to us\n # create (AND REGISTER) the new helper object\n backrefname = cls.get_dbtablename_pure()\n mdbmodel_fieldset.MewloDbFieldset.make_fieldset_dbobjectclass(cls, propname, proplabel, backrefname, dbmanager, subfields)",
"def _make_subset(cls, name, data, **kwargs):\r\n return cls(name, data, **kwargs)",
"def factory_method(self):\n pass",
"def factory_method(self):\n pass",
"def txnSubCollectionFactory(txnSubCollection, txn):\n subCollection = txnSubCollection.cloneMetaData()\n subCollection.append(txn)\n return subCollection",
"def getFactorys(self) -> List[ghidra.app.util.viewer.field.FieldFactory]:\n ...",
"def __init__(self, field: str):\n super().__init__()\n self.field = field",
"def get_factory(self):\n\n return Factory(type(self), self.kwargs)",
"def create_field(self, field, dim_translation=None):\n raise NotImplementedError",
"def create_field(dj_field, **kwargs):\n if isinstance(dj_field, dj_models.OneToOneField):\n return field.OneToOne.from_dj_field(dj_field, **kwargs)\n elif isinstance(dj_field, dj_models.ForeignKey):\n return field.ForeignKey.from_dj_field(dj_field, **kwargs)\n elif isinstance(dj_field, dj_models.ManyToManyField):\n return field.ManyToMany.from_dj_field(dj_field, **kwargs)\n else:\n return field.Field.from_dj_field(dj_field, **kwargs)"
] | [
"0.8500797",
"0.66517395",
"0.6441253",
"0.6377734",
"0.59815156",
"0.58264554",
"0.5662838",
"0.5660655",
"0.56087625",
"0.5576944",
"0.5576053",
"0.5484294",
"0.54726523",
"0.5440579",
"0.5439738",
"0.5400487",
"0.528335",
"0.52721506",
"0.52566725",
"0.52566725",
"0.5199021",
"0.5194737",
"0.51229864",
"0.51229864",
"0.51133466",
"0.50936073",
"0.5090678",
"0.50849265",
"0.5080003",
"0.5052508"
] | 0.77598864 | 1 |
Gets the short path name of a given long path. | def get_short_path_name(long_name: str):
output_buf_size = _GetShortPathNameW(long_name, None, 0)
if output_buf_size <= 0:
return None
output_buf = ctypes.create_unicode_buffer(output_buf_size)
needed = _GetShortPathNameW(long_name, output_buf, output_buf_size)
assert 0 < needed < output_buf_size
return output_buf.value | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_short_path_name(long_name):\n output_buf_size = 0\n while True:\n output_buf = ctypes.create_unicode_buffer(output_buf_size)\n needed = _GetShortPathNameW(long_name, output_buf, output_buf_size)\n if output_buf_size >= needed:\n return output_buf.value\n else:\n output_buf_size = needed",
"def get_short_path(content):",
"def get_short_name(self):\n return self.full_name.split(' ')[0]",
"def short_name(self, length: int = 25) -> str:\n dir_name, file_name = os.path.split(self.name)\n file_root, file_ext = os.path.splitext(file_name)\n return (file_root[:length] + '..') if len(file_root) > length else file_root",
"def get_folder_short_name_for_location(self, location):\n _method_name = 'get_folder_short_name_for_location'\n _logger.entering(location.get_folder_path(), class_name=_class_name, method_name=_method_name)\n folder_dict = self.__get_dictionary_for_location(location, False)\n result = ''\n if SHORT_NAME in folder_dict:\n result = folder_dict[SHORT_NAME]\n _logger.exiting(class_name=_class_name, method_name=_method_name, result=result)\n return result",
"def short_filename(path: Path,\n length: int = MAX_FILENAME_LENGTH) -> str:\n shorted_name = Path(path).name\n if len(shorted_name) > length:\n shorted_name = ''.join(\n shorted_name[:length // 2].strip() +\n '...' +\n shorted_name[-length // 2:].strip())\n return shorted_name",
"def full_path_to_name(self):\n return self._full_path_to_name",
"def getShortName(self) -> str:\n return self.short_name",
"def path_name(self):\n return self.full_name",
"def findShortestPath(self):\r\n pass",
"def short_name(self):\n return self.get(\"short_name\", decode=True)",
"def shortname(self):\n return self.get(\"shortName\")",
"def short_name(self) -> str:\n return self.name_components[-1]",
"def shortpath(path):\r\n import os\r\n if path.startswith(base_dir):\r\n return path[len(base_dir) + len(os.path.sep) : ]\r\n return path",
"def _shortpath(abspath):\r\n b = os.path.dirname(os.path.normpath(sys.modules[settings.SETTINGS_MODULE].__file__))\r\n p = os.path.normpath(abspath)\r\n return p[len(os.path.commonprefix([b, p])):]",
"def getLongName(self) -> str:\n return self.long_name",
"def full_name(self):\n path = [str(p) for p in self.path]\n # TODO add further checks, the mapping should only occur on stdlib.\n try:\n path[0] = self._mapping[path[0]]\n except KeyError:\n pass\n for key, repl in self._tuple_mapping.items():\n if tuple(path[:len(key)]) == key:\n path = [repl] + path[len(key):]\n\n return '.'.join(path if path[0] else path[1:])",
"def path_shorten(str_path, length = 80) -> str:\n if length < 0:\n length = os.get_terminal_size().columns + length\n if len(str_path) > length:\n l_parts = list(pathlib.PurePath(str_path).parts)\n l_copy = l_parts.copy()\n max = len(l_parts)\n offset = -1\n center = max // 2\n while len(str_path) > length:\n offset += 1\n l_shorten = [i % (max + 1) for i in range( center - offset,\n center + offset + 1)]\n for prt in l_shorten: l_copy[prt] = '...'\n str_path = str(pathlib.PurePath(*l_copy))\n return str_path",
"def shorten_path(path, length):\n if len(path) < length:\n return path\n if os.path.sep not in path:\n return shorten_string(path, length)\n\n short_base = \"\"\n if path.startswith(os.path.sep):\n short_base = os.path.sep\n path = path[1:]\n parts = path.split(os.path.sep)\n short_base += os.path.sep.join([p[0] for p in parts[:-1]])\n if len(short_base) > length:\n short_base = \"\"\n\n # Shorten the last part:\n short_name = parts[-1]\n last_length = length - len(short_base)\n if short_base:\n last_length = last_length - 1\n short_name = shorten_string(short_name, last_length)\n return os.path.join(short_base, short_name)",
"def get_short_name(self):\n\n return self.name",
"def get_short_name(self):\n return self.name",
"def get_short_name(self):\n return self.name",
"def get_short_name(self):\n return self.name",
"def get_short_name(self):\n return self.name",
"def get_short_name(self):\n return self.name",
"def get_short_name(self):\n return self.name",
"def get_short_name(self):\n return self.name",
"def get_short_name(self):\n return self.name",
"def name_from_path(path):\n return path[0:-3]",
"def get_short_code():\n return rh.get_short_code(request)"
] | [
"0.8374927",
"0.692538",
"0.6779394",
"0.6752157",
"0.6703952",
"0.65418506",
"0.6538999",
"0.6492663",
"0.64758646",
"0.6439164",
"0.63933027",
"0.6386224",
"0.636427",
"0.6346333",
"0.6328241",
"0.63225263",
"0.6192011",
"0.61536336",
"0.6148895",
"0.6138921",
"0.61234444",
"0.61234444",
"0.61234444",
"0.61234444",
"0.61234444",
"0.61234444",
"0.61234444",
"0.61234444",
"0.61044645",
"0.60989374"
] | 0.8415998 | 0 |
open jpg file or merge several jpg file then open it | def execute_file(self, event=None):
file_list = self.get_path_list()
print(file_list)
if not file_list:
return
# merge image
# 修复内存泄露的bug,由于没有清除之前打开的图片,第二次打开的图片仍然为之前的图片
try:
self.photos.destroy()
except:
pass
self.photos.imgs = file_list
merged_photo = self.photos.merge_photos()
# show image
try:
window.destroy()
except:
import traceback
traceback.print_exc()
window.build_img_canvas()
window.show_img_in_canvas(merged_photo) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def open_frame(path,number):\n num=str(number).zfill(3) #Zero filling\n name = glob.glob(path+\"/*\"+num+\"*\")\n if len(name)==0:\n name = glob.glob(path+\"/\"+str(number)+\".png\")\n if len(name)>1:\n print \"too many matches \",len(name),\" found\"\n name = name[0]\n img = Image.open(name)\n img = np.asarray(img)\n img.setflags(write=1)\n return img",
"def load_jpgs(path, size=(224, 224)):\n fnames = os.listdir(path)\n imgs = []\n i = 0\n if i<1500:\n for f in fnames:\n f= path + '/'+f\n if (os.path.isfile(f) and os.path.getsize(f) > 0):\n if not re.match('.+(jpg|jpeg|JPEG|JPG)', f):\n continue\n try:\n #image = Image.open(os.path.join(path, f))\n image = Image.open(f)\n except OSError:\n continue # ignore corrupt files\n data = list(image.getdata())\n im = Image.new(image.mode, image.size)\n im.putdata(data)\n if im.mode != 'RGB':\n im = im.convert('RGB')\n im = crop_center_or_reshape(im, size)\n img = 2 * (np.asarray(im) / 255) - 1\n #img= np.asarray(im)\n imgs.append(img)\n i= i+1\n\n return np.array(imgs)",
"def main():\n me = SimpleImage(\"images/me.JPG\")\n dinosaur = SimpleImage(\"images/dinosaur.jpg\")\n\n dinosaur.make_as_big_as(me)\n combine = magic(me, dinosaur)\n combine.show()",
"def test_merge_images(self):\n test_folder = base_path +'/test_data/merging_tests/single_merge/'\n # the files are: render1.png and background.jpg\n\n background = Image.open(test_folder+\"background.jpg\")\n foreground = Image.open(test_folder+\"render1.png\")\n output, bbox = mi.merge_images(foreground, background)\n self.assertEqual((300,300),output.size)\n self.assertEqual('JPEG',output.format)",
"def open_pngs_in_dir(out_dir):\n pngs = glob.glob(os.path.join(out_dir, '*png'))\n operating_system = platform.system()\n if 'Windows' in operating_system:\n os.system(\"start \" + \" \".join(pngs))\n elif 'Darwin' in operating_system:\n os.system('open ' + \" \".join(pngs))",
"def im_open(path):\n\n try:\n assert os.path.isdir(path)\n #get file list in directory - glob includes full path\n files = sorted(glob.glob('{}{}*'.format(path,os.sep)), key=sort_key) \n #load the collection\n raw_stack = io.imread_collection(files)\n #turn the collection into a np array and remove extraneous OCT portion from 1025:1083 on x axis. (z,y,x)\n #if .bmp files are open (from pv-oct), the slicing will not affect them, the x-axis is only 540 pixels.\n stack = io.collection.concatenate_images(raw_stack)[:,:,0:1024]\n \n return stack\n\n except AssertionError:\n sys.exit(\"A non-directory object was given to the __open__ function\")",
"def _open_images(training_filenames, path):\n imagePaths=[os.path.join(path,f) for f in training_filenames]\n faces=[]\n for i, imagePath in enumerate(imagePaths):\n faceImg=Image.open(imagePath).convert('L')\n faceNp=np.array(faceImg,'uint8')\n faces.append(faceNp)\n return faces",
"def _open_img(self, img_name):\n try:\n img = Image.open(img_name)\n photo = ImageTk.PhotoImage(img)\n return photo\n except IOError:\n Debug.printi(\"Unable to find image \" + img_name, Debug.Level.ERROR)",
"def openFile(self):\r\n from SXM import FileIO,Data\r\n fname = str(QFileDialog.getOpenFileName(self.widget,self.tr(\"Open File\"), \\\r\n \".\",FileIO.getFilterString(types=(Data.Image,))))\r\n if len(fname) > 0:\r\n root, ext = os.path.splitext(fname)\r\n self.statusBar().showMessage(self.tr(\"Loading data: %1\").arg(fname),2000)\r\n image = FileIO.fromFile(fname)\r\n image.load()\r\n imwin = ImageWindow(self,image)\r\n self.Images.append(imwin)\r\n self.updateImageList()\r\n imwin.windowModality = False\r\n imwin.show()",
"def load_jpg_series(files: List[str]):\n sort_files_by_name(files)\n volume = _load_volume_from_jpg(files)\n return files, volume",
"def JPGtoMatrix(path,w,h):\n listing = os.listdir(path)\n listing.sort()\n count = 0\n docFiles = []\n for infile in listing:\n count = count + 1\n docFiles.append(infile)\n matrix = np.zeros((w*h,count))\n for i in range(len(listing)):\n matrix[:,i]=JPGtoArray(join(path,listing[i]))\n return matrix,listing",
"def test_single_merge(self):\n test_folder = base_path +'/test_data/merging_tests/single_test/'\n # the files are: render1.png and background.jpg\n output_file = os.path.join(test_folder, \"output1.jpg\")\n if(os.path.isfile(output_file)):\n os.unlink(output_file)\n\n mi.add_background(test_folder+\"render1.png\", test_folder+\"background.jpg\", output_file)\n self.assertTrue(os.path.isfile(output_file))\n output = Image.open(output_file)\n self.assertEqual((300,300),output.size)\n self.assertEqual('JPEG',output.format)",
"def openFile(path_name):\n if os.path.isdir(path_name):\n reader = sitk.ImageSeriesReader()\n dicom_names = reader.GetGDCMSeriesFileNames(path_name)\n reader.SetFileNames(dicom_names)\n image_object = reader.Execute()\n \n elif os.path.isfile(path_name):\n image_object = sitk.ReadImage(path_name)\n\n else:\n print(\"Path name wrong.\")\n return None\n\n return image_object",
"def open_images_in(directory):\n\n files = [\n filename\n for filename in os.listdir(directory)\n if \"_\" in filename and not filename.startswith(\"joined\")\n ]\n tiles = []\n if len(files) > 0:\n i = 0\n for file in files:\n pos = get_image_column_row(file)\n im = Image.open(os.path.join(directory, file))\n\n position_xy = [0, 0]\n count = 0\n for a, b in zip(pos, im.size):\n position_xy[count] = a * b\n count = count + 1\n tiles.append(\n Tile(\n image=im,\n position=pos,\n number=i + 1,\n coords=position_xy,\n filename=file,\n )\n )\n i = i + 1\n return tiles",
"def main():\n fg = SimpleImage('image_contest/me.jpg')\n bg = SimpleImage('image_contest/house.png')\n bg.make_as_big_as(fg)\n combined_img = combine(bg, fg)\n combined_img.show()",
"def fileCmd(self):\n filename = askopenfilename() \n self.cnvImgOrig.displayImage(filename)\n self.cnvImgTest.displayImage(filename)",
"def openFiles(self, prog):\n prog = Utilities.normabspath(prog)\n # Open up the new files.\n self.openSourceFile(prog)",
"def combineImages(path=None, imgfiles=None, cols=3, size=300):\n\n font = ImageFont.truetype(\"Arial.ttf\", 15)\n x=size\n w=20\n i=0; j=0\n if imgfiles == None:\n imgfiles = findFiles(path, 'png')\n width = cols*(x+w)\n height = int(math.ceil(float(len(imgfiles))/cols)*x)\n new_im = Image.new('RGBA', (width, height), 'white')\n for f in imgfiles:\n name = os.path.basename(f).split('.')[0]\n if not os.path.exists(f):\n continue\n im = Image.open(f)\n im.thumbnail((x,x))\n new_im.paste(im, (i*x+w,j*x+w))\n draw = ImageDraw.Draw(new_im)\n draw.text((i*x+w,j*x+w), name, (0,0,0), font=font)\n i+=1\n if i>=cols:\n i=0; j+=1\n #new_im.show()\n path = os.path.split(imgfiles[0])[0]\n new_im.save(os.path.join(path,\"summary.png\"))\n return",
"def open_out_dir():\n\n pngs = glob.glob(os.path.join(out_dir, '*png'))\n operating_system = platform.system()\n if 'Windows' in operating_system:\n os.system(\"start \" + \" \".join(pngs))\n elif 'Darwin' in operating_system:\n os.system('open ' + \" \".join(pngs))",
"def convert_files_to_jpeg(joblist, inputpath, tmp_jpeg_folder, poppler_path=None):\n image_list = []\n threadlist = []\n for letter in joblist:\n threadlist.append((inputpath, tmp_jpeg_folder, joblist[letter][0], joblist[letter][-1], letter, poppler_path,))\n\n with concurrent.futures.ProcessPoolExecutor() as executor:\n for _, rv in zip(joblist, executor.map(pdf_to_jpeg, threadlist)):\n for path in rv:\n image_list.append(path)\n\n _, __, tmp_free = shutil.disk_usage(tmp_jpeg_folder)\n if (tmp_free/1000000) < 100:\n return False\n\n image_list.sort()\n return image_list",
"def view_media(self, obj):\n for handle in self.selected_handles():\n ref_obj = self.dbstate.db.get_object_from_handle(handle)\n mpath = media_path_full(self.dbstate.db, ref_obj.get_path())\n open_file_with_default_application(mpath)",
"def merge_chips(images_files, *, win_bounds):\n datasets = [rasterio.open(p) for p in images_files]\n img, _ = rasterio.merge.merge(datasets, bounds=win_bounds, method=mean_merge_method)\n for ds in datasets:\n ds.close()\n return img",
"def add_image(self, f_name,file,new_id):\r\n folder=tempfile.mktemp()\r\n os.mkdir(folder)\r\n datei=open(folder+'/'+f_name,'w+')\r\n datei.write(file.read())\r\n datei.close()\r\n val='' \r\n liste_ext=liste_val\r\n if(self.toolbox.hasProperty('eigene_formate')):\r\n self_val=self.toolbox.getProperty('eigene_formate').split(',')\r\n liste_ext=[]\r\n for x in self_val:\r\n liste_ext.append('_'+x+'.jpeg')\r\n for extension in liste_ext:\r\n #cmd='/usr/bin/convert '+folder+'/'+f_name+' -resize '+extension[1:-4]+'x'+extension[1:-4]+' '+folder+'/'+new_id+extension\r\n cmd='/usr/bin/convert '+folder+'/'+f_name+' -resize '+extension[1:-4]+' '+folder+'/'+new_id+extension\r\n order=os.popen(cmd).read()\r\n kurz_name='_'+str(f_name.split('.')[0])\r\n kurz_name=kurz_name.replace(' ','_')\r\n val=val+self.manage_addImage(id=new_id+kurz_name+extension,file=open(folder+'/'+new_id+extension),title=f_name, precondition='', content_type='',REQUEST=None)+' ' \r\n os.remove(folder+'/'+new_id+extension)\r\n os.remove(folder+'/'+f_name)\r\n os.rmdir(folder)\r\n txt=\"Datei Hochgeladen!<br>\"\r\n #my_root=self.toolbox\r\n #txt+=my_root.id+\"<br>\"\r\n #if(my_root.hasProperty('eigene_formate')):\r\n # txt+=my_root.getProperty('eigene_formate')+\"<br>\"\r\n return txt",
"def createAllImageFiles(poly, name) :\n \n for i in range(len(poly.getPaths())):\n fileName = name + \"_\" + str(i) + \".dot\"\n imgName = name + \"_\" + str(i) + \".jpg\"\n \n Command = \"neato -Tjpeg \" + fileName + \" -o \" + imgName\n run(Command, shell=True)",
"def TextureFiles():\n import shutil\n\n # first convert the .psd files to .png\n\n FbmDir = glo.outputFolder + '.fbm'\n\n for d1, d2, filenames in os.walk(FbmDir):\n for filename in filenames:\n \"\"\"filename: vitrin_diffuse.psd\n \"\"\"\n # print \"TextureFiles():\", filename\n if filename[-4:].upper() == '.PSD':\n #print \" -- FbmDir:\" , FbmDir\n #print \" -- in the if clause with filename:\" , filename\n #print \" -- glo.outputFolder\" , glo.outputFolder\n # FbmDir = '../fbx/simplelifeembedmedia.fbm'\n # filename = 'shelves_light.PSD'\n PsdToPngConverter(FbmDir, filename)\n\n # Move only the .png file to the ../png/ directory\n filename = filename[:-4] + '.png'\n src = os.path.join(FbmDir, filename)\n elif filename[0] != '.':\n src = os.path.join(FbmDir, filename)\n pass\n\n shutil.copy(src, glo.outputFolder)\n print os.path.join(glo.outputFolder, filename), \"\\n\"\n sys.stdout.flush()\n # for d1, d2, files in os.walk(glo.outputFolder):\n # if not filename in files:\n # #print \"moving: \", files, filename, not filename in files\n # shutil.copy(src, glo.outputFolder)\n # print os.path.join(glo.outputFolder, filename), \"\\n\"\n # else:\n # print \"%s/%s already exists. File not moved\" % (glo.outputFolder,filename)",
"def multiopen(files):\n tempfile = write_html(files)\n\n open_in_browser(tempfile)",
"def build_jpeg_preview(self, file_path, cache_path, page_id: int, extension='.jpg', size=(256,256)):\n\n # try:\n # os.mkdir(cache_path.format(d_id=document_id)+'/')\n # except OSError:\n # pass\n\n\n with open(file_path, 'rb') as odt:\n\n file_name = self.get_file_hash(file_path)\n if os.path.exists(\n '{path}{file_name}.pdf'.format(\n path=cache_path,\n file_name=file_name\n )):\n result = open(\n '{path}.pdf'.format(\n path=cache_path + file_name,\n ), 'rb')\n\n else:\n if os.path.exists(cache_path + file_name + '_flag'):\n time.sleep(2)\n self.build_pdf_preview(\n file_path=file_path,\n cache_path=cache_path,\n extension=extension\n )\n else:\n result = file_converter.office_to_pdf(odt, cache_path, file_name)\n\n input_pdf = PdfFileReader(result)\n output_pdf = PdfFileWriter()\n output_pdf.addPage(input_pdf.getPage(int(page_id)))\n output_stream = BytesIO()\n output_pdf.write(output_stream)\n output_stream.seek(0, 0)\n result2 = file_converter.pdf_to_jpeg(output_stream, size)\n\n\n\n file_name = self.get_file_hash(file_path, size)\n\n with open(\n '{path}{file_name}_{page_id}_{extension}'.format(\n file_name=file_name,\n path=cache_path,\n page_id=page_id,\n extension=extension\n ),\n 'wb') \\\n as jpeg:\n buffer = result2.read(1024)\n while buffer:\n jpeg.write(buffer)\n buffer = result2.read(1024)",
"def image_process(image_info):\n path = os.path.join(cfg.IMAGESET, image_info.get(\"index\") + \".jpg\")\n if not os.path.exists(path):\n raise IOError(\"please check your file is not exists: \" + path)\n def load_image(path):\n image = Image.open(path)\n return image\n return load_image(path)",
"def main():\n base_dir = '/home/sjimenez/imagenes_prueba'\n out_dir = '/home/sjimenez/easy_analysis'\n for _, _, files in os.walk(base_dir, topdown=False):\n for f in files:\n print('--------- {} ---------'.format(f))\n act_dir = osp.join(base_dir, f)\n act_im = cv2.imread(act_dir)\n if act_im is not None:\n get_image_stats(act_im, out_dir, f)\n else:\n print('Not able to open the image')",
"def append_component_images(pldm_fw_up_pkg, image_files):\n for image in image_files:\n with open(image, \"rb\") as file:\n for line in file:\n pldm_fw_up_pkg.write(line)"
] | [
"0.58855605",
"0.5591409",
"0.55525833",
"0.5439224",
"0.5414354",
"0.5400621",
"0.5384213",
"0.53814346",
"0.5351015",
"0.5304343",
"0.52945495",
"0.5280849",
"0.52760863",
"0.52742296",
"0.527252",
"0.5259538",
"0.5245993",
"0.52299297",
"0.5225909",
"0.52070266",
"0.51910776",
"0.5189418",
"0.51831883",
"0.5182659",
"0.5180952",
"0.51739705",
"0.51703113",
"0.51187783",
"0.51177955",
"0.50918704"
] | 0.60279435 | 0 |
Scroll canvas horizontally and redraw the image | def __scroll_x(self, *args, **kwargs):
self.canvas_image.xview(*args) # scroll horizontally
self.__show_image() # redraw the image | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __scroll_x(self, *args, **kwargs):\n self.canvas.xview(*args) # scroll horizontally\n self.__show_image() # redraw the image",
"def continuous_scroll(self, context):\n\n self.drawing.redraw_canvas(self.dy)\n \n return True",
"def refresh(self):\n\n # Delete old image (if needed) \n if self.canvas_image_id:\n self.canvas.delete(self.canvas_image_id)\n if debug > 5:\n print \"refresh: New image (x\", self.zoom, \") \", (self.xint, self.yint), (self.canvas[\"width\"], self.canvas[\"height\"]), [self.zoom * s for s in self.isize]\n\n scaled_isize = [self.xint[1] - self.xint[0],\n self.yint[1] - self.yint[0]]\n\n # Create the image for the canvas\n self.image = self.generator_func(self.zoom, self.xint, self.yint)\n self.canvas_image_id = self.canvas.create_image(0, 0, anchor=N+W,\n image=self.image)\n\n # Figure out where scroll bars should be and put them there.\n if self.xint[0] == 0 and int(self.isize[0] * self.zoom) == self.xint[1]:\n self.hscroll.grid_remove()\n else:\n self.hscroll.grid()\n self.hscroll.set(mapped_number(self.xint[0],\n (0, self.isize[0] * self.zoom -1),\n (0, 1)),\n mapped_number(self.xint[1] -1,\n (0, self.isize[0] * self.zoom -1),\n (0, 1)))\n if self.yint[0] == 0 and int(self.isize[1] * self.zoom) == self.yint[1]:\n self.vscroll.grid_remove()\n else:\n self.vscroll.grid()\n self.vscroll.set(mapped_number(self.yint[0],\n (0, self.isize[1] * self.zoom -1),\n (0, 1)),\n mapped_number(self.yint[1] -1,\n (0, self.isize[1] * self.zoom -1),\n (0, 1)))",
"def scrollDown_x(self):\r\n if self.x_stack<self.img.shape[2]-1:\r\n self.x_stack+=1\r\n \r\n self.pixmap5=self.drawPixmap(\"yz\")\r\n self.lbl5.setPixmap(self.pixmap5)\r\n self.pixmap6= self.writeEdge(\"yz\")\r\n self.lbl6.setPixmap(self.pixmap6)\r\n self.x_stack_lbl.setText(str(self.x_stack+1) + '/' + str(self.img.shape[2]))",
"def redraw_canvas(self, dy):\n self.scroll = dy/20\n \n if self.scroll > 0:\n \n if self.min_cairo < -20:\n self.min_cairo = 0 \n self.min_text += 1 \n self.max_text += 1\n \n #When bottom of document is reached stop scrolling\n if self.max_text > self.line_count + 2:\n self.min_cairo = 0\n self.min_text = self.line_count - 50\n self.max_text = self.line_count\n self.scroll = 0\n \n elif self.scroll < 0:\n if self.min_cairo > 0:\n self.min_cairo = -20\n self.min_text -= 1\n self.max_text -=1\n\n #Do not scroll up if already at top of document\n if self.min_text < 0:\n self.min_cairo = 20\n self.min_text = 0\n self.max_text = 50\n self.scroll = 0\n \n #Do the scrolling\n self.min_cairo -= self.scroll\n \n self.max_cairo = self.min_cairo\n self.invalidate_canvas()",
"def scrollUp_x(self):\r\n if self.x_stack>0:\r\n self.x_stack-=1\r\n \r\n self.pixmap5=self.drawPixmap(\"yz\")\r\n self.lbl5.setPixmap(self.pixmap5) \r\n self.pixmap6= self.writeEdge(\"yz\")\r\n self.lbl6.setPixmap(self.pixmap6)\r\n self.x_stack_lbl.setText(str(self.x_stack+1) + '/' + str(self.img.shape[2]))",
"def scrollDown(self):\r\n\r\n if self.z_stack<self.img.shape[0]-1:\r\n self.z_stack+=1\r\n \r\n #self.pixmap=QtGui.QPixmap.fromImage(ImageQt.ImageQt(misc.toimage(self.img[self.z_stack]))).scaled(500,500)\r\n self.pixmap= self.drawPixmap(\"xy\")\r\n self.lbl.setPixmap(self.pixmap)\r\n self.pixmap2= self.writeEdge(\"xy\")\r\n self.lbl2.setPixmap(self.pixmap2)\r\n self.z_stack_lbl.setText(str(self.z_stack+1) + '/' + str(self.img.shape[0]))",
"def __scroll_y(self, *args, **kwargs):\n self.canvas_image.yview(*args) # scroll vertically\n self.__show_image() # redraw the image",
"def scroll(self, delta_x, delta_y):\n if delta_x < 0:\n shift_x = 0\n xend = self.width + delta_x\n dt_x = 1\n else:\n shift_x = self.width - 1\n xend = delta_x - 1\n dt_x = -1\n if delta_y < 0:\n y = 0\n yend = self.height + delta_y\n dt_y = 1\n else:\n y = self.height - 1\n yend = delta_y - 1\n dt_y = -1\n while y != yend:\n x = shift_x\n while x != xend:\n self.format.set_pixel(\n self, x, y, self.format.get_pixel(self, x - delta_x, y - delta_y)\n )\n x += dt_x\n y += dt_y",
"def __scroll_y(self, *args, **kwargs):\n self.canvas.yview(*args) # scroll vertically\n self.__show_image() # redraw the image",
"def draw(self, thing):\n thing.draw(self, Point([2,2]), flip=1)\n\n # configure the scroll region\n bbox = Canvas.bbox(self.canvas, ALL)\n self.canvas.configure(scrollregion=bbox)",
"def __show_image(self):\n box_image = self.canvas_image.coords(self.container) # get image area\n box_canvas = (self.canvas_image.canvasx(0), # get visible area of the canvas\n self.canvas_image.canvasy(0),\n self.canvas_image.canvasx(self.canvas_image.winfo_width()),\n self.canvas_image.canvasy(self.canvas_image.winfo_height()))\n self.box_img_int = tuple(map(int, box_image)) # convert to integer or it will not work properly\n \n # Get scroll region box\n box_scroll = [min(self.box_img_int[0], box_canvas[0]), min(self.box_img_int[1], box_canvas[1]),\n max(self.box_img_int[2], box_canvas[2]), max(self.box_img_int[3], box_canvas[3])]\n # Horizontal part of the image is in the visible area\n if box_scroll[0] == box_canvas[0] and box_scroll[2] == box_canvas[2]:\n box_scroll[0] = self.box_img_int[0]\n box_scroll[2] = self.box_img_int[2]\n # Vertical part of the image is in the visible area\n if box_scroll[1] == box_canvas[1] and box_scroll[3] == box_canvas[3]:\n box_scroll[1] = self.box_img_int[1]\n box_scroll[3] = self.box_img_int[3]\n # Convert scroll region to tuple and to integer\n self.canvas_image.configure(scrollregion=tuple(map(int, box_scroll))) # set scroll region\n x1 = max(box_canvas[0] - box_image[0], 0) # get coordinates (x1,y1,x2,y2) of the image tile\n y1 = max(box_canvas[1] - box_image[1], 0)\n x2 = min(box_canvas[2], box_image[2]) - box_image[0]\n y2 = min(box_canvas[3], box_image[3]) - box_image[1]\n if int(x2 - x1) > 0 and int(y2 - y1) > 0: # show image if it in the visible area\n image = self.__pyramid[max(0, self.__curr_img)].crop( # crop current img from pyramid\n (int(x1 / self.__scale), int(y1 / self.__scale),\n int(x2 / self.__scale), int(y2 / self.__scale)))\n #\n imagetk = ImageTk.PhotoImage(image.resize((int(x2 - x1), int(y2 - y1)), self.__filter))\n self.imageid = self.canvas_image.create_image(max(box_canvas[0], self.box_img_int[0]),\n max(box_canvas[1], self.box_img_int[1]),\n anchor='nw', image=imagetk)\n self.canvas_image.lower(self.imageid) # set image into background\n self.canvas_image.imagetk = imagetk # keep an extra reference to prevent garbage-collection",
"def __show_image(self):\n box_image = self.canvas.coords(self.container) # get image area\n box_canvas = (self.canvas.canvasx(0), # get visible area of the canvas\n self.canvas.canvasy(0),\n self.canvas.canvasx(self.canvas.winfo_width()),\n self.canvas.canvasy(self.canvas.winfo_height()))\n box_img_int = tuple(map(int, box_image)) # convert to integer or it will not work properly\n # Get scroll region box\n box_scroll = [min(box_img_int[0], box_canvas[0]), min(box_img_int[1], box_canvas[1]),\n max(box_img_int[2], box_canvas[2]), max(box_img_int[3], box_canvas[3])]\n # Horizontal part of the image is in the visible area\n if box_scroll[0] == box_canvas[0] and box_scroll[2] == box_canvas[2]:\n box_scroll[0] = box_img_int[0]\n box_scroll[2] = box_img_int[2]\n # Vertical part of the image is in the visible area\n if box_scroll[1] == box_canvas[1] and box_scroll[3] == box_canvas[3]:\n box_scroll[1] = box_img_int[1]\n box_scroll[3] = box_img_int[3]\n # Convert scroll region to tuple and to integer\n self.canvas.configure(scrollregion=tuple(map(int, box_scroll))) # set scroll region\n x1 = max(box_canvas[0] - box_image[0], 0) # get coordinates (x1,y1,x2,y2) of the image tile\n y1 = max(box_canvas[1] - box_image[1], 0)\n x2 = min(box_canvas[2], box_image[2]) - box_image[0]\n y2 = min(box_canvas[3], box_image[3]) - box_image[1]\n if int(x2 - x1) > 0 and int(y2 - y1) > 0: # show image if it in the visible area\n if self.__huge and self.__curr_img < 0: # show huge image\n h = int((y2 - y1) / self.imscale) # height of the tile band\n self.__tile[1][3] = h # set the tile band height\n self.__tile[2] = self.__offset + self.imwidth * int(y1 / self.imscale) * 3\n self.__image.close()\n self.__image = Image.open(self.path) # reopen / reset image\n self.__image.size = (self.imwidth, h) # set size of the tile band\n self.__image.tile = [self.__tile]\n image = self.__image.crop((int(x1 / self.imscale), 0, int(x2 / self.imscale), h))\n else: # show normal image\n image = self.__pyramid[max(0, self.__curr_img)].crop( # crop current img from pyramid\n (int(x1 / self.__scale), int(y1 / self.__scale),\n int(x2 / self.__scale), int(y2 / self.__scale)))\n #\n imagetk = ImageTk.PhotoImage(image.resize((int(x2 - x1), int(y2 - y1)), self.__filter))\n imageid = self.canvas.create_image(max(box_canvas[0], box_img_int[0]),\n max(box_canvas[1], box_img_int[1]),\n anchor='nw', image=imagetk)\n self.canvas.lower(imageid) # set image into background\n if self.last_image_id != 0:\n self.canvas.delete(self.last_image_id)\n\n self.last_image_id = imageid\n self.canvas.imagetk = imagetk # keep an extra reference to prevent garbage-collection",
"def adjustScrolls(self):\n cwidth = self._canvas.winfo_width()\n cheight = self._canvas.winfo_height()\n self._canvas.xview_moveto(0.5*(self.canvwidth-cwidth)/self.canvwidth)\n self._canvas.yview_moveto(0.5*(self.canvheight-cheight)/self.canvheight)\n if cwidth < self.canvwidth or cheight < self.canvheight:\n self.hscroll.grid(padx=1, in_ = self, pady=1, row=1,\n column=0, rowspan=1, columnspan=1, sticky='news')\n self.vscroll.grid(padx=1, in_ = self, pady=1, row=0,\n column=1, rowspan=1, columnspan=1, sticky='news')\n else:\n self.hscroll.grid_forget()\n self.vscroll.grid_forget()",
"def scrollDown_y(self):\r\n if self.y_stack<self.img.shape[1]-1:\r\n self.y_stack+=1\r\n self.pixmap3=self.drawPixmap(\"xz\") \r\n self.lbl3.setPixmap(self.pixmap3)\r\n self.pixmap4= self.writeEdge(\"xz\")\r\n self.lbl4.setPixmap(self.pixmap4)\r\n self.y_stack_lbl.setText(str(self.y_stack+1) + '/' + str(self.img.shape[1]))",
"def draw(self, canvas):\n canvas.delete(\"all\")\n width = canvas.winfo_reqwidth()\n height = canvas.winfo_reqheight()\n\n image = ImageTk.PhotoImage(self.image())\n canvas.create_image(width/2, height/2, image=image)\n canvas.img = image",
"def scrollUp(self):\r\n if self.z_stack>0:\r\n self.z_stack-=1\r\n self.pixmap=self.drawPixmap(\"xy\")\r\n self.lbl.setPixmap(self.pixmap)\r\n self.pixmap2=self.writeEdge(\"xy\")\r\n self.lbl2.setPixmap(self.pixmap2)\r\n\r\n self.z_stack_lbl.setText(str(self.z_stack+1) + '/' + str(self.img.shape[0]))",
"def draw_canvas(self):\n\n self.canvas = Canvas(self)\n self.scrollbar = ttk.Scrollbar(self, orient= VERTICAL,\n command=self.canvas.yview) \n self.canvas.configure(yscrollcommand=self.scrollbar.set)\n \n # make sure to add scrollbar before adding the canvas\n self.scrollbar.pack(side=RIGHT, fill=Y)\n self.canvas.pack(side=TOP, fill=BOTH, expand=1, padx=20, pady=20)\n \n # adding a frame to hold all the widgets, ttk Frame doesn't support\n # background config option \n self.frame = Frame(self.canvas) \n self.canvas.create_window(0,0,window=self.frame, anchor='nw')",
"def draw(self, img):\n self._erase_last_line(self.img)\n\n idxs = np.argwhere(img[:, self._pos] == 0)\n self.prev_y = (idxs.min(), idxs.max())\n\n cv.line(img, (self._pos, 0), (self._pos, self.h), (0, 0, 0), 1)",
"def __reconfig__(self, event):\r\n x, y = event.width//2, event.height//2\r\n self.canvas.config(scrollregion=(-x, -y, x, y))",
"def linux_zoomer_plus(self, event):\n self.canvas.scale(\"all\", event.x, event.y, 1.1, 1.1)\n self.canvas.configure(scrollregion=self.canvas.bbox(\"all\"))",
"def mover_scroll(self, x, y):\n self.scrollx += x\n self.scrolly += y",
"def on_mouse_wheel(self,event,canvas):\n canvas.yview(\"scroll\",-1*event.delta/100,\"units\")",
"def scrollUp_y(self):\r\n if self.y_stack>0:\r\n self.y_stack-=1\r\n \r\n self.pixmap3=self.drawPixmap(\"xz\")\r\n self.lbl3.setPixmap(self.pixmap3)\r\n self.pixmap4= self.writeEdge(\"xz\")\r\n self.lbl4.setPixmap(self.pixmap4)\r\n self.y_stack_lbl.setText(str(self.y_stack+1) + '/' + str(self.img.shape[1]))",
"def move_horizontal(self):\r\n if self.movement == \"horizontal\" and self.flag_move:\r\n self.move_ball()\r\n self.canvas.after(50, self.move_horizontal)",
"def _on_mousewheel(event):\n if event.num == 4 or event.delta > 0:\n canvas.yview_scroll(-1, \"units\" )\n elif event.num == 5 or event.delta < 0:\n canvas.yview_scroll(1, \"units\" )",
"def horizontal_scroll(self, image, padding=True):\n\n image_list = list()\n height = image.size[1]\n\n # Scroll into the blank image.\n if padding:\n for y in range(1,17):\n section = image.crop((0, 0, 8, y))\n print section.width,section.height\n display_section = self.create_blank_image()\n display_section.paste(section, (0, 16 - y, 8, 16))\n image_list.append(display_section)\n return image_list\n\n #Scroll across the input image.\n for y in range(16, height + 1):\n section = image.crop((0, y - 16, 8, y))\n display_section = self.create_blank_image()\n display_section.paste(section, (0, 0, 8, 16))\n image_list.append(display_section)\n\n #Scroll out, leaving the blank image.\n if padding:\n for y in range(height - 15, height + 1):\n section = image.crop((0, y, 8, height))\n display_section = self.create_blank_image()\n display_section.paste(section, (0, 0, 8, 15 - (y - (height - 15))))\n image_list.append(display_section)\n\n #Return the list of images created\n return image_list",
"def __wheel(self, event):\n x = self.canvas_image.canvasx(event.x) # get coordinates of the event on the canvas\n y = self.canvas_image.canvasy(event.y)\n if self.outside(x, y): return # zoom only inside image area\n scale = 1.0\n # Respond to Linux (event.num) or Windows (event.delta) wheel event\n if event.num == 5 or event.delta == -120: # scroll down, smaller\n if round(self.__min_side * self.imscale) < 30: return # image is less than 30 pixels\n self.imscale /= self.__delta\n scale /= self.__delta\n if event.num == 4 or event.delta == 120: # scroll up, bigger\n i = min(self.canvas_image.winfo_width(), self.canvas_image.winfo_height()) >> 1\n if i < self.imscale: return # 1 pixel is bigger than the visible area\n self.imscale *= self.__delta\n scale *= self.__delta\n # Take appropriate image from the pyramid\n k = self.imscale * self.__ratio # temporary coefficient\n self.__curr_img = min((-1) * int(math.log(k, self.__reduction)), len(self.__pyramid) - 1)\n self.__scale = k * math.pow(self.__reduction, max(0, self.__curr_img))\n #\n self.canvas_image.scale('all', x, y, scale, scale) # rescale all objects\n # Redraw some figures before showing image on the screen\n self.redraw_figures() # method for child classes\n self.__show_image()",
"def updatescroll(self):\n if self.node:\n #self.update_idletasks() # Required, else dimension of content may not have been computed ?\n forgetit, forgetit, x1, forgetit = self.bbox(ALL)\n self.sizetree = self.node.sizetree() + (self.winfo_height() / self.nodeheight) - 1\n self.configure(scrollregion = (0, 0, x1, self.sizetree * self.nodeheight))",
"def scroll(self):\n x_position = self._player.get_position()[0]\n half_screen = self._master.winfo_width() / 2\n world_size = self._world.get_pixel_size()[0] - half_screen\n\n # Left side\n if x_position <= half_screen:\n self._view.set_offset((0, 0))\n\n # Between left and right sides\n elif half_screen <= x_position <= world_size:\n self._view.set_offset((half_screen - x_position, 0))\n\n # Right side\n elif x_position >= world_size:\n self._view.set_offset((half_screen - world_size, 0))"
] | [
"0.7781856",
"0.6695677",
"0.6625122",
"0.66034824",
"0.64015204",
"0.6336621",
"0.62105596",
"0.6134658",
"0.6117314",
"0.6087952",
"0.60482293",
"0.5974955",
"0.59536266",
"0.59532565",
"0.5856979",
"0.58364534",
"0.58310264",
"0.58037436",
"0.5784162",
"0.5746283",
"0.5735475",
"0.5700967",
"0.56167144",
"0.5609998",
"0.5547529",
"0.5539105",
"0.55353874",
"0.55134064",
"0.55090874",
"0.546265"
] | 0.78580177 | 0 |
Scroll canvas vertically and redraw the image | def __scroll_y(self, *args, **kwargs):
self.canvas_image.yview(*args) # scroll vertically
self.__show_image() # redraw the image | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __scroll_y(self, *args, **kwargs):\n self.canvas.yview(*args) # scroll vertically\n self.__show_image() # redraw the image",
"def refresh(self):\n\n # Delete old image (if needed) \n if self.canvas_image_id:\n self.canvas.delete(self.canvas_image_id)\n if debug > 5:\n print \"refresh: New image (x\", self.zoom, \") \", (self.xint, self.yint), (self.canvas[\"width\"], self.canvas[\"height\"]), [self.zoom * s for s in self.isize]\n\n scaled_isize = [self.xint[1] - self.xint[0],\n self.yint[1] - self.yint[0]]\n\n # Create the image for the canvas\n self.image = self.generator_func(self.zoom, self.xint, self.yint)\n self.canvas_image_id = self.canvas.create_image(0, 0, anchor=N+W,\n image=self.image)\n\n # Figure out where scroll bars should be and put them there.\n if self.xint[0] == 0 and int(self.isize[0] * self.zoom) == self.xint[1]:\n self.hscroll.grid_remove()\n else:\n self.hscroll.grid()\n self.hscroll.set(mapped_number(self.xint[0],\n (0, self.isize[0] * self.zoom -1),\n (0, 1)),\n mapped_number(self.xint[1] -1,\n (0, self.isize[0] * self.zoom -1),\n (0, 1)))\n if self.yint[0] == 0 and int(self.isize[1] * self.zoom) == self.yint[1]:\n self.vscroll.grid_remove()\n else:\n self.vscroll.grid()\n self.vscroll.set(mapped_number(self.yint[0],\n (0, self.isize[1] * self.zoom -1),\n (0, 1)),\n mapped_number(self.yint[1] -1,\n (0, self.isize[1] * self.zoom -1),\n (0, 1)))",
"def redraw_canvas(self, dy):\n self.scroll = dy/20\n \n if self.scroll > 0:\n \n if self.min_cairo < -20:\n self.min_cairo = 0 \n self.min_text += 1 \n self.max_text += 1\n \n #When bottom of document is reached stop scrolling\n if self.max_text > self.line_count + 2:\n self.min_cairo = 0\n self.min_text = self.line_count - 50\n self.max_text = self.line_count\n self.scroll = 0\n \n elif self.scroll < 0:\n if self.min_cairo > 0:\n self.min_cairo = -20\n self.min_text -= 1\n self.max_text -=1\n\n #Do not scroll up if already at top of document\n if self.min_text < 0:\n self.min_cairo = 20\n self.min_text = 0\n self.max_text = 50\n self.scroll = 0\n \n #Do the scrolling\n self.min_cairo -= self.scroll\n \n self.max_cairo = self.min_cairo\n self.invalidate_canvas()",
"def scrollDown_y(self):\r\n if self.y_stack<self.img.shape[1]-1:\r\n self.y_stack+=1\r\n self.pixmap3=self.drawPixmap(\"xz\") \r\n self.lbl3.setPixmap(self.pixmap3)\r\n self.pixmap4= self.writeEdge(\"xz\")\r\n self.lbl4.setPixmap(self.pixmap4)\r\n self.y_stack_lbl.setText(str(self.y_stack+1) + '/' + str(self.img.shape[1]))",
"def scrollDown(self):\r\n\r\n if self.z_stack<self.img.shape[0]-1:\r\n self.z_stack+=1\r\n \r\n #self.pixmap=QtGui.QPixmap.fromImage(ImageQt.ImageQt(misc.toimage(self.img[self.z_stack]))).scaled(500,500)\r\n self.pixmap= self.drawPixmap(\"xy\")\r\n self.lbl.setPixmap(self.pixmap)\r\n self.pixmap2= self.writeEdge(\"xy\")\r\n self.lbl2.setPixmap(self.pixmap2)\r\n self.z_stack_lbl.setText(str(self.z_stack+1) + '/' + str(self.img.shape[0]))",
"def continuous_scroll(self, context):\n\n self.drawing.redraw_canvas(self.dy)\n \n return True",
"def scrollUp_y(self):\r\n if self.y_stack>0:\r\n self.y_stack-=1\r\n \r\n self.pixmap3=self.drawPixmap(\"xz\")\r\n self.lbl3.setPixmap(self.pixmap3)\r\n self.pixmap4= self.writeEdge(\"xz\")\r\n self.lbl4.setPixmap(self.pixmap4)\r\n self.y_stack_lbl.setText(str(self.y_stack+1) + '/' + str(self.img.shape[1]))",
"def on_mouse_wheel(self,event,canvas):\n canvas.yview(\"scroll\",-1*event.delta/100,\"units\")",
"def draw(self, thing):\n thing.draw(self, Point([2,2]), flip=1)\n\n # configure the scroll region\n bbox = Canvas.bbox(self.canvas, ALL)\n self.canvas.configure(scrollregion=bbox)",
"def __show_image(self):\n box_image = self.canvas_image.coords(self.container) # get image area\n box_canvas = (self.canvas_image.canvasx(0), # get visible area of the canvas\n self.canvas_image.canvasy(0),\n self.canvas_image.canvasx(self.canvas_image.winfo_width()),\n self.canvas_image.canvasy(self.canvas_image.winfo_height()))\n self.box_img_int = tuple(map(int, box_image)) # convert to integer or it will not work properly\n \n # Get scroll region box\n box_scroll = [min(self.box_img_int[0], box_canvas[0]), min(self.box_img_int[1], box_canvas[1]),\n max(self.box_img_int[2], box_canvas[2]), max(self.box_img_int[3], box_canvas[3])]\n # Horizontal part of the image is in the visible area\n if box_scroll[0] == box_canvas[0] and box_scroll[2] == box_canvas[2]:\n box_scroll[0] = self.box_img_int[0]\n box_scroll[2] = self.box_img_int[2]\n # Vertical part of the image is in the visible area\n if box_scroll[1] == box_canvas[1] and box_scroll[3] == box_canvas[3]:\n box_scroll[1] = self.box_img_int[1]\n box_scroll[3] = self.box_img_int[3]\n # Convert scroll region to tuple and to integer\n self.canvas_image.configure(scrollregion=tuple(map(int, box_scroll))) # set scroll region\n x1 = max(box_canvas[0] - box_image[0], 0) # get coordinates (x1,y1,x2,y2) of the image tile\n y1 = max(box_canvas[1] - box_image[1], 0)\n x2 = min(box_canvas[2], box_image[2]) - box_image[0]\n y2 = min(box_canvas[3], box_image[3]) - box_image[1]\n if int(x2 - x1) > 0 and int(y2 - y1) > 0: # show image if it in the visible area\n image = self.__pyramid[max(0, self.__curr_img)].crop( # crop current img from pyramid\n (int(x1 / self.__scale), int(y1 / self.__scale),\n int(x2 / self.__scale), int(y2 / self.__scale)))\n #\n imagetk = ImageTk.PhotoImage(image.resize((int(x2 - x1), int(y2 - y1)), self.__filter))\n self.imageid = self.canvas_image.create_image(max(box_canvas[0], self.box_img_int[0]),\n max(box_canvas[1], self.box_img_int[1]),\n anchor='nw', image=imagetk)\n self.canvas_image.lower(self.imageid) # set image into background\n self.canvas_image.imagetk = imagetk # keep an extra reference to prevent garbage-collection",
"def __show_image(self):\n box_image = self.canvas.coords(self.container) # get image area\n box_canvas = (self.canvas.canvasx(0), # get visible area of the canvas\n self.canvas.canvasy(0),\n self.canvas.canvasx(self.canvas.winfo_width()),\n self.canvas.canvasy(self.canvas.winfo_height()))\n box_img_int = tuple(map(int, box_image)) # convert to integer or it will not work properly\n # Get scroll region box\n box_scroll = [min(box_img_int[0], box_canvas[0]), min(box_img_int[1], box_canvas[1]),\n max(box_img_int[2], box_canvas[2]), max(box_img_int[3], box_canvas[3])]\n # Horizontal part of the image is in the visible area\n if box_scroll[0] == box_canvas[0] and box_scroll[2] == box_canvas[2]:\n box_scroll[0] = box_img_int[0]\n box_scroll[2] = box_img_int[2]\n # Vertical part of the image is in the visible area\n if box_scroll[1] == box_canvas[1] and box_scroll[3] == box_canvas[3]:\n box_scroll[1] = box_img_int[1]\n box_scroll[3] = box_img_int[3]\n # Convert scroll region to tuple and to integer\n self.canvas.configure(scrollregion=tuple(map(int, box_scroll))) # set scroll region\n x1 = max(box_canvas[0] - box_image[0], 0) # get coordinates (x1,y1,x2,y2) of the image tile\n y1 = max(box_canvas[1] - box_image[1], 0)\n x2 = min(box_canvas[2], box_image[2]) - box_image[0]\n y2 = min(box_canvas[3], box_image[3]) - box_image[1]\n if int(x2 - x1) > 0 and int(y2 - y1) > 0: # show image if it in the visible area\n if self.__huge and self.__curr_img < 0: # show huge image\n h = int((y2 - y1) / self.imscale) # height of the tile band\n self.__tile[1][3] = h # set the tile band height\n self.__tile[2] = self.__offset + self.imwidth * int(y1 / self.imscale) * 3\n self.__image.close()\n self.__image = Image.open(self.path) # reopen / reset image\n self.__image.size = (self.imwidth, h) # set size of the tile band\n self.__image.tile = [self.__tile]\n image = self.__image.crop((int(x1 / self.imscale), 0, int(x2 / self.imscale), h))\n else: # show normal image\n image = self.__pyramid[max(0, self.__curr_img)].crop( # crop current img from pyramid\n (int(x1 / self.__scale), int(y1 / self.__scale),\n int(x2 / self.__scale), int(y2 / self.__scale)))\n #\n imagetk = ImageTk.PhotoImage(image.resize((int(x2 - x1), int(y2 - y1)), self.__filter))\n imageid = self.canvas.create_image(max(box_canvas[0], box_img_int[0]),\n max(box_canvas[1], box_img_int[1]),\n anchor='nw', image=imagetk)\n self.canvas.lower(imageid) # set image into background\n if self.last_image_id != 0:\n self.canvas.delete(self.last_image_id)\n\n self.last_image_id = imageid\n self.canvas.imagetk = imagetk # keep an extra reference to prevent garbage-collection",
"def draw_canvas(self):\n\n self.canvas = Canvas(self)\n self.scrollbar = ttk.Scrollbar(self, orient= VERTICAL,\n command=self.canvas.yview) \n self.canvas.configure(yscrollcommand=self.scrollbar.set)\n \n # make sure to add scrollbar before adding the canvas\n self.scrollbar.pack(side=RIGHT, fill=Y)\n self.canvas.pack(side=TOP, fill=BOTH, expand=1, padx=20, pady=20)\n \n # adding a frame to hold all the widgets, ttk Frame doesn't support\n # background config option \n self.frame = Frame(self.canvas) \n self.canvas.create_window(0,0,window=self.frame, anchor='nw')",
"def adjustScrolls(self):\n cwidth = self._canvas.winfo_width()\n cheight = self._canvas.winfo_height()\n self._canvas.xview_moveto(0.5*(self.canvwidth-cwidth)/self.canvwidth)\n self._canvas.yview_moveto(0.5*(self.canvheight-cheight)/self.canvheight)\n if cwidth < self.canvwidth or cheight < self.canvheight:\n self.hscroll.grid(padx=1, in_ = self, pady=1, row=1,\n column=0, rowspan=1, columnspan=1, sticky='news')\n self.vscroll.grid(padx=1, in_ = self, pady=1, row=0,\n column=1, rowspan=1, columnspan=1, sticky='news')\n else:\n self.hscroll.grid_forget()\n self.vscroll.grid_forget()",
"def __reconfig__(self, event):\r\n x, y = event.width//2, event.height//2\r\n self.canvas.config(scrollregion=(-x, -y, x, y))",
"def yview_scroll(self, number, what):\n self.tk.call(self._w, 'yview', 'scroll', number, what)",
"def on_configure(self, event):\n self.testCanvas.configure(scrollregion=self.testCanvas.bbox('all'))\n self.testCanvas.yview_moveto(1)",
"def _on_mousewheel(event):\n if event.num == 4 or event.delta > 0:\n canvas.yview_scroll(-1, \"units\" )\n elif event.num == 5 or event.delta < 0:\n canvas.yview_scroll(1, \"units\" )",
"def __scroll_x(self, *args, **kwargs):\n self.canvas_image.xview(*args) # scroll horizontally\n self.__show_image() # redraw the image",
"def scrollUp(self):\r\n if self.z_stack>0:\r\n self.z_stack-=1\r\n self.pixmap=self.drawPixmap(\"xy\")\r\n self.lbl.setPixmap(self.pixmap)\r\n self.pixmap2=self.writeEdge(\"xy\")\r\n self.lbl2.setPixmap(self.pixmap2)\r\n\r\n self.z_stack_lbl.setText(str(self.z_stack+1) + '/' + str(self.img.shape[0]))",
"def scrollDown_x(self):\r\n if self.x_stack<self.img.shape[2]-1:\r\n self.x_stack+=1\r\n \r\n self.pixmap5=self.drawPixmap(\"yz\")\r\n self.lbl5.setPixmap(self.pixmap5)\r\n self.pixmap6= self.writeEdge(\"yz\")\r\n self.lbl6.setPixmap(self.pixmap6)\r\n self.x_stack_lbl.setText(str(self.x_stack+1) + '/' + str(self.img.shape[2]))",
"def draw(self, img):\n self._erase_last_line(self.img)\n\n idxs = np.argwhere(img[:, self._pos] == 0)\n self.prev_y = (idxs.min(), idxs.max())\n\n cv.line(img, (self._pos, 0), (self._pos, self.h), (0, 0, 0), 1)",
"def Configure_YScroll( self ):\r\n Label(self.frame_scroll).pack( side = TOP )\r\n self.yscroll = Scrollbar( self.frame_scroll )\r\n self.yscroll.config( command = self.Vertical_Scroll )\r\n self.canvas_one.config( yscrollcommand = self.Double_Expand )\r\n self.canvas_two.config( yscrollcommand = self.Double_Expand )",
"def redraw(self):\n self.vispy_viewer.canvas.update()",
"def __scroll_x(self, *args, **kwargs):\n self.canvas.xview(*args) # scroll horizontally\n self.__show_image() # redraw the image",
"def updatescroll(self):\n if self.node:\n #self.update_idletasks() # Required, else dimension of content may not have been computed ?\n forgetit, forgetit, x1, forgetit = self.bbox(ALL)\n self.sizetree = self.node.sizetree() + (self.winfo_height() / self.nodeheight) - 1\n self.configure(scrollregion = (0, 0, x1, self.sizetree * self.nodeheight))",
"def scroll(self, delta_x, delta_y):\n if delta_x < 0:\n shift_x = 0\n xend = self.width + delta_x\n dt_x = 1\n else:\n shift_x = self.width - 1\n xend = delta_x - 1\n dt_x = -1\n if delta_y < 0:\n y = 0\n yend = self.height + delta_y\n dt_y = 1\n else:\n y = self.height - 1\n yend = delta_y - 1\n dt_y = -1\n while y != yend:\n x = shift_x\n while x != xend:\n self.format.set_pixel(\n self, x, y, self.format.get_pixel(self, x - delta_x, y - delta_y)\n )\n x += dt_x\n y += dt_y",
"def on_mousewheel(self, event):\r\n self.container_widgets[\"order_canvas\"].yview_scroll(-1 * int(event.delta / 120), \"units\")\r\n # TODO FIX SCROLLING\r",
"def down():\n global y, canvas # y é modificado\n canvas.create_line(x, y, x, y + 10)\n y += 10",
"def redraw(self):\n self.vispy_widget.canvas.update()",
"def scroll_window(self):\r\n window = tkinter.Frame(self.root)\r\n scroller = tkinter.Scrollbar(self.root, orient=\"vertical\",\r\n command=self.canvas.yview)\r\n self.canvas.configure(yscrollcommand=scroller.set)\r\n\r\n scroller.pack(side=\"right\", fill=\"y\")\r\n self.canvas.pack(side=\"left\", fill=\"both\", expand=True)\r\n self.canvas.create_window((4, 4), window=window, anchor=\"nw\",\r\n tags=\"self.window\")\r\n return window"
] | [
"0.78763294",
"0.7029265",
"0.6990234",
"0.6822299",
"0.67670774",
"0.6654772",
"0.6486098",
"0.63472086",
"0.6317045",
"0.6261508",
"0.62313896",
"0.61648124",
"0.6163939",
"0.613647",
"0.61271584",
"0.61243415",
"0.61053514",
"0.6080344",
"0.60629725",
"0.606112",
"0.6051997",
"0.60451305",
"0.6028399",
"0.60230815",
"0.60148615",
"0.59800255",
"0.59372365",
"0.59371126",
"0.5928361",
"0.57631946"
] | 0.79207057 | 0 |
Checks if the point (x,y) is outside the image area | def outside(self, x, y):
bbox = self.canvas_image.coords(self.container) # get image area
if bbox[0] < x < bbox[2] and bbox[1] < y < bbox[3]:
return False # point (x,y) is inside the image area
else:
return True # point (x,y) is outside the image area | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def outside(self, x, y):\n bbox = self.canvas.coords(self.container) # get image area\n if bbox[0] < x < bbox[2] and bbox[1] < y < bbox[3]:\n return False # point (x,y) is inside the image area\n else:\n return True # point (x,y) is outside the image area",
"def point_in_map(self, x, y):\r\n return 0 <= x < self.width and 0 <= y < self.height and (x,y) not in self.walls",
"def isOutside(self, point):\n return 1-self.isInside(point)",
"def is_out_of_bounds(img_height: int, img_width: int, x: float, y: float, patch_size: int) -> bool:\n patch_half_size_floored = patch_size // 2\n x_low = x - patch_half_size_floored\n x_high = x + patch_half_size_floored\n y_low = y - patch_half_size_floored\n y_high = y + patch_half_size_floored\n\n return x_low < 0 or x_high > img_width or y_low < 0 or y_high > img_height",
"def isOutsideBorder(self):\n if (self.posX < -self.myGalaxy.worldWidth or self.posX > self.myGalaxy.worldWidth or\n self.posY < -self.myGalaxy.worldHeight or self.posY > self.myGalaxy.worldHeight):\n return 1\n return 0",
"def _in_bounds(self, x, y):\r\n return 0 <= x < 8 and 0 <= y < 8",
"def isInsideImage(x, y, nx, ny, imageNx, imageNy):\r\n return ( ((x+nx) < imageNx) and ((y+ny) < imageNy) )",
"def is_point_within(self, x, y):\n return abs(x - self._x_position) <= self._x_length / 2 and abs(y - self._y_position) <= self._y_length / 2",
"def valid_coordinates(self, x, y):\n return ((x >= 0) and (x < self.width) and\n (y >= 0) and (y < self.height))",
"def in_bounds(self, x, y):\n return x >= 0 and x < 8 and y >= 0 and y < 8",
"def isInside(point_x, point_y, area_left, area_top, area_width, area_height):\n return (area_left <= point_x < area_left + area_width) and (area_top <= point_y < area_top + area_height)",
"def contains_point(self, x, y): \r\n n = len(self.points)\r\n inside = False\r\n \r\n x1, y1 = self.points[0]\r\n for i in range(n + 1):\r\n x2, y2 = self.points[i % n]\r\n if y > min(y1, y2):\r\n if y <= max(y1, y2):\r\n if x <= max(x1, x2):\r\n if y1 != y2:\r\n xinters = (y - y1) * (x2 - x1) / (y2 - y1) + x1\r\n if x1 == x2 or x <= xinters:\r\n inside = not inside\r\n x1, y1 = x2, y2\r\n \r\n return inside",
"def __isPointOnArea(self, point, area):\r\n\r\n pointX, pointY = point\r\n areaX,areaY,areaWidth,areaHeight = area\r\n\r\n if (pointX >= areaX and pointX <= areaX+areaWidth) and (pointY >= areaY and pointY <= areaY+areaHeight):\r\n return True\r\n else:\r\n return False",
"def is_inside(self, x: int, y: int) -> bool:\n pass",
"def test_point_within_dimensions_border():\n point = np.array([100, 20])\n image_dimensions = np.array([100, 100])\n assert not point_within_dimensions(point, image_dimensions)",
"def insideArea(point, area):\n x=point.real\n y=point.imag\n n = len(area)\n inside = False\n p1x = area[0].real\n p1y = area[0].imag\n for i in range(1, n + 1):\n p2x = area[i % n].real\n p2y = area[i % n].imag\n if y > min(p1y, p2y):\n if y <= max(p1y, p2y):\n if x <= max(p1x, p2x):\n if p1y != p2y:\n xinters = (y - p1y) * (p2x - p1x) / (p2y - p1y) + p1x\n if p1x == p2x or x <= xinters:\n inside = not inside\n p1x, p1y = p2x, p2y\n return inside",
"def _inside(self, x, y):\n wx, wy, w, h = self._raw_graph_window_dim()\n if wx <= x < wx + w and wy <= y < wy + h:\n return True\n return False",
"def filter_point(x, y, xlower, xupper, ylower, yupper):\n ignore = False\n if (x < xlower or x > xupper or y < ylower or y > yupper):\n ignore = True\n return ignore",
"def isinsidepointXY(x,p):\n \n return dist(x,p) < epsilon",
"def in_area(self, x, y):\n raise NotImplementedError",
"def check_pos(self, x, y):\n if x >= WINDOWWIDTH or y >= WINDOWHEIGHT or x <=0 or y <= 0:\n return True",
"def out_of_bounds(self):\n return not 0 <= self.nodes[0].x < WIDTH * SCALE or not 0 <= self.nodes[0].y < HEIGHT * SCALE",
"def is_point_in_box(x, y, bbox):\n if x < 200 and y < 200:\n return True\n return False",
"def isInternal(self, aPoint):\n if (aPoint.x >= self.pMin.x and aPoint.x <= self.pMax.x) \\\n and (aPoint.y >= self.pMin.y and aPoint.y <= self.pMax.y):\n return True\n else:\n return False",
"def test_inside_image(self):\r\n sample_img = Image(np.zeros((100, 200, 3), dtype=np.uint8))\r\n\r\n # test 4 corners\r\n top_left = Point(y=0, x=0)\r\n bottom_left = Point(y=99, x=0)\r\n top_right = Point(y=0, x=199)\r\n bottom_right = Point(y=99, x=199)\r\n\r\n assert top_left.inside(sample_img)\r\n assert bottom_left.inside(sample_img)\r\n assert top_right.inside(sample_img)\r\n assert bottom_right.inside(sample_img)\r\n\r\n # test out-side point\r\n pt1 = Point(y=-1, x=50)\r\n pt2 = Point(y=100, x=50)\r\n pt3 = Point(y=50, x=-1)\r\n pt4 = Point(y=50, x=200)\r\n\r\n assert not pt1.inside(sample_img)\r\n assert not pt2.inside(sample_img)\r\n assert not pt3.inside(sample_img)\r\n assert not pt4.inside(sample_img)",
"def validate_in(self, xcoord, ycoord):\r\n x = int(xcoord/(self.tr.bd.TILE_WIDTH + self.tr.bd.LINE_WIDTH))\r\n y = int(ycoord/(self.tr.bd.TILE_WIDTH + self.tr.bd.LINE_WIDTH))\r\n if not self.tr.turn_tracker and self.tr.bd.disks[x][y].halo_tag:\r\n return True, x, y\r\n else:\r\n return False, x, y",
"def obstacle_prone_area(self,image):\r\n\r\n start_x=int(self.start[0])\r\n start_y=int(self.start[1])\r\n goal_x=int(self.goal[0])\r\n goal_y=int(self.goal[1])\r\n print(goal_x,goal_y)\r\n if (image[int(self.maximum_size-goal_x),int(goal_y),0]==0) or ((image[int(self.maximum_size-start_x),int(start_y),0]==0)):\r\n #print(1)\r\n return False\r\n else:\r\n #print(2)\r\n return True",
"def check_coord_in_range(self, x, y):\n return 0 <= x < self.cols and 0 <= y < self.lines",
"def is_inside(self, mX, mY, point):\n return (math.sqrt((point[0] - mX) * (point[0] - mX)\n + (point[1] - mY) * (point[1] - mY)) <= 2)",
"def in_display(self, point):\n x, y = point\n if x < 0 or x > self.width or \\\n y < 0 or y > self.height:\n return False\n return True"
] | [
"0.87402356",
"0.7378075",
"0.7371467",
"0.7322463",
"0.7271476",
"0.71669763",
"0.7123005",
"0.70656526",
"0.7022389",
"0.7014238",
"0.696292",
"0.6954585",
"0.69515514",
"0.69209486",
"0.6896473",
"0.68796045",
"0.6856969",
"0.68375915",
"0.67989755",
"0.6798606",
"0.67816234",
"0.6774851",
"0.67660224",
"0.66965127",
"0.66963464",
"0.66877055",
"0.668064",
"0.6679758",
"0.6669126",
"0.664453"
] | 0.8744287 | 0 |
Dummy function to redraw figures in the children classes | def redraw_figures(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def redraw(event):\n if np.size(plt.get_figlabels()):\n #Need to check if figure is closed or not and only then do the following\n #operations. Else, the following operations will create a new figure\n ax.clear()\n drawRectangle(ax)\n fig.canvas.draw()\n else:\n pass",
"def redraw(self):\n raise NotImplementedError()",
"def update_figure(self):\n\n self.draw()",
"def redraw(self, **kwargs):\n #src_dict = self.data_sources\n #self.remove_sources(src_dict.keys())\n self.renderers = {}\n #self.renderers = {}\n self.figure = self.draw_figure(**kwargs)\n #self.add_sources(src_dict)\n # todo does the old figure linger on?\n self.render_sources(self.data_sources)\n self.bk_pane.object = self.figure",
"def redraw(self):\n dummy_figure = plt.figure()\n new_manager = dummy_figure.canvas.manager\n new_manager.canvas.figure = self.figure\n self.figure.set_canvas(new_manager.canvas)\n plt.show(block=False)",
"def plot_refresh():\n figure.canvas.draw()",
"def on_draw(self):\n # draw everything",
"def refresh_self(self) -> None:\n self._logger.debug(\"running\")\n try:\n self.figure.canvas.draw()\n except Exception as e:\n self._logger.exception(\"issue with drawing canvas.\")\n self._logger.debug(\"done\")",
"def _redraw_graph(self) -> None:\n self._clear_drawing()\n self.draw_graph()",
"def _redraw_graph(self) -> None:\n self._clear_drawing()\n self.draw_graph(graph=self.graph, axes=self.subplot)\n self.draw_graph(graph=self.graph2, axes=self.subplot2)\n self.draw_mappings(self.mapping)",
"def redraw_viz():\n\tglobal g_last_draw\n\tif (rospy.Time.now().to_sec() > (refresh_rate + g_last_draw)):\n\t\tg_last_draw = rospy.Time.now().to_sec()\n\t\t# redraw imu box\n\t\tdoDraw()",
"def paint(self):\r\n pass",
"def update_plot():\n pass",
"def repaint(self):\n pass",
"def redraw(self):\n self._create()",
"def draw(self, force=False):\n for child in self.children.values():\n child.draw(force)",
"def _draw_handler(self, bpy_dummy_self, bpy_dummy_context):\r\n self._drawRays()",
"def draw(self):\n for obj in self.objects:\n obj.draw()",
"def on_draw(self):\n self.clear()\n self.manager.draw()",
"def setDrawing(self):\n self.graph_drawing=[]",
"def draw(self):",
"def refresh_svg_canvas(self):\n if self.ui.tabWidget.currentIndex() == 0:\n self.ui.svg_canvas.build_schematic()\n self.ui.svg_canvas.viewport().update()\n elif self.ui.tabWidget.currentIndex() in (1,2):\n self.ui.svg_canvas.build_pcb()\n self.ui.svg_canvas.viewport().update()\n else:\n raise Exception(\"Unknown view to draw\")",
"def plot_finalize():\n global figure\n global axes\n\n plot_refresh()\n plt.ioff()\n plt.show()\n\n figure, axes = None, None",
"def setup_draw(self):\n pass",
"def draw(self): \n [component.draw() for component in self.components]",
"def draw(self): \n [component.draw() for component in self.components]",
"def draw(self): \n [component.draw() for component in self.components]",
"def draw(self): \n [component.draw() for component in self.components]",
"def draw(self): \n [component.draw() for component in self.components]",
"def draw(self): \n [component.draw() for component in self.components]"
] | [
"0.69585615",
"0.68990314",
"0.6801519",
"0.67597973",
"0.6633574",
"0.64148325",
"0.6317707",
"0.6250047",
"0.6198424",
"0.61798114",
"0.61377364",
"0.6077331",
"0.60669315",
"0.60655534",
"0.60549927",
"0.6053331",
"0.60529304",
"0.6050833",
"0.604263",
"0.6037915",
"0.60376084",
"0.601923",
"0.5978028",
"0.5971856",
"0.5968721",
"0.5968721",
"0.5968721",
"0.5968721",
"0.5968721",
"0.5968721"
] | 0.78338176 | 1 |
Repeat the retrieval of the metrics of a metrics context until at least one of the specified metric group names has data. Returns the MetricGroupValues object for the metric group that has data. | def wait_for_metrics(metric_context, metric_groups):
retries = 0
got_data = False
while not got_data:
mr_str = metric_context.get_metrics()
mr = zhmcclient.MetricsResponse(metric_context, mr_str)
for mg_values in mr.metric_group_values:
if mg_values.name in metric_groups:
got_data = True
if DEBUG_METRICS_RESPONSE:
print("Debug: MetricsResponse:")
print(mr_str)
break
if not got_data:
if retries > GET_METRICS_MAX_RETRIES:
return None
time.sleep(GET_METRICS_RETRY_TIME) # avoid hot spin loop
retries += 1
return mg_values | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def result_group(group_id, failures=False, wait=0, count=None, cached=Conf.CACHED):\n if cached:\n return result_group_cached(group_id, failures, wait, count)\n start = time.time()\n if count:\n while 1:\n if count_group(group_id) == count or wait and (time.time() - start) * 1000 >= wait >= 0:\n break\n tile.sleep(0.01)\n while 1:\n r = None#Task.get_result_group(group_id, failures)\n if r:\n return r\n if (time.time() - start) * 1000 >= wait >= 0:\n break\n time.sleep(0.01)",
"def metrics_group():",
"def result_group_cached(group_id, failures=False, wait=0, count=None, broker=None):\n if not broker:\n broker = get_broker()\n start = time.time()\n if count:\n while 1:\n if count_group(group_id) == count or wait and (time.time() - start) * 1000 >= wait >= 0:\n break\n tile.sleep(0.01)\n while 1:\n group_list = broker.cache.get('{}:{}:keys'.format(broker.list_key, group_id))\n if group_list:\n result_list = []\n for task_key in group_list:\n task = signing.SignedPackage.loads(broker.cache.get(task_key))\n if task['success'] or failures:\n result_list.append(task['result'])\n return result_list\n if (time.time() - start) * 1000 >= wait >= 0:\n break\n time.sleep(0.01)",
"def _compute_group_stats():\n group_stats = []\n \n wmt16_group = Group.objects.filter(name='WMT16')\n wmt16_users = _get_active_users_for_group(wmt16_group)\n \n # Aggregate information about participating groups.\n groups = set()\n for user in wmt16_users:\n for group in _identify_groups_for_user(user):\n groups.add(group)\n \n # TODO: move this to property of evaluation group or add dedicated data model.\n # GOAL: should be configurable from within the Django admin backend.\n #\n # MINIMAL: move to local_settings.py?\n #\n # The following dictionary defines the number of HITs each group should\n # have completed during the WMT16 evaluation campaign.\n \n for group in groups:\n _name = group.name\n \n _group_stats = HIT.compute_status_for_group(group)\n _total = _group_stats[0]\n \n if _total > 0 and not _name in GROUP_HIT_REQUIREMENTS.keys():\n _required = 0\n elif _name in GROUP_HIT_REQUIREMENTS.keys():\n _required = GROUP_HIT_REQUIREMENTS[_name]\n _delta = _total - _required\n _data = (_total, _required, _delta)\n \n if _data[0] > 0:\n group_stats.append((_name, _data))\n \n # Sort by number of remaining HITs.\n group_stats.sort(key=lambda x: x[1][2])\n \n # Add totals at the bottom.\n global_total = sum([x[1][0] for x in group_stats])\n global_required = sum([x[1][1] for x in group_stats])\n global_delta = global_total - global_required\n global_data = (global_total, global_required, global_delta)\n group_stats.append((\"Totals\", global_data))\n \n return group_stats",
"def get_group_values(self, group_id:int, group_name:str) -> bool:\n try:\n value_list = self.cursor.execute(f\"SELECT id, name FROM {table_groups} WHERE id={group_id};\").fetchone()\n if not value_list:\n return False\n group_used_id, group_used_name = value_list\n if group_used_name != group_name:\n self.cursor.execute(f\"UPDATE {table_groups} SET name={group_name} WHERE id={group_used_id};\")\n self.connection.commit()\n return True\n except Exception as e:\n msg = f\"We faced problems with checking of the group prensence. Mistake: {e}\"\n self.proceed_error(msg)\n return False",
"def _read_group_fill_results(self, cr, uid, domain, groupby,\n remaining_groupbys, aggregated_fields,\n count_field, read_group_result,\n read_group_order=None, context=None):\n if groupby == 'week_number':\n WEEK_DICT = dict(self.WEEKS)\n for result in read_group_result:\n week = result['week_number']\n result['week_number'] = (week, WEEK_DICT.get(week))\n return super(calendar_event, self)._read_group_fill_results(\n cr, uid, domain, groupby, remaining_groupbys, aggregated_fields,\n count_field, read_group_result, read_group_order, context\n )",
"def metrics(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ElastigroupMultipleMetricsMetricArgs']]]]:\n return pulumi.get(self, \"metrics\")",
"def read_metric_values(self):\n inv_objs = self._inventory_mgr.current_inventory()\n monitored_metrics = self._metric_mgr.get_monitored_metrics()\n perf_manager = self._si.RetrieveServiceContent().perfManager\n for mor in inv_objs.keys():\n for inv_obj in inv_objs[mor]:\n inv_obj_metrics = inv_obj.metric_id_map\n desired_keys = list(set(inv_obj_metrics.keys()) & set(monitored_metrics[mor].keys()))\n if not len(desired_keys) == 0:\n metric_id_objs = [inv_obj_metrics[key] for key in desired_keys]\n query_spec = vim.PerformanceManager.QuerySpec(\n entity=inv_obj.mor, metricId=metric_id_objs,\n intervalId=inv_obj.INSTANT_INTERVAL,\n maxSample=1, format='normal'\n )\n try:\n results = perf_manager.QueryPerf(querySpec=[query_spec])\n except Exception as e:\n self._logger.error(\"Exception while making performance query : {0}\".format(e))\n if results:\n dps = self._parse_query(inv_obj, results, monitored_metrics[mor])\n payload = self._build_payload(dps)\n self._dispatch_metrics(payload)\n else:\n self._logger.warning(\"Empty result from query : {0}\".format(query_spec))",
"def print_metric_groups(cmd_ctx, client, metric_groups, resource_filter):\n\n if not isinstance(metric_groups, (list, tuple)):\n metric_groups = [metric_groups]\n\n properties = {\n 'anticipated-frequency-seconds': MIN_ANTICIPATED_FREQUENCY,\n 'metric-groups': metric_groups,\n }\n mc = client.metrics_contexts.create(properties)\n mg_values = wait_for_metrics(mc, metric_groups)\n filtered_object_values = list() # of MetricObjectValues\n\n if not mg_values:\n\n mg_name = metric_groups[0] # just pick any\n res_class = zhmcclient._metrics._resource_class_from_group(mg_name)\n mg_def = zhmcclient.MetricGroupDefinition(\n name=mg_name, resource_class=res_class, metric_definitions=[])\n\n else:\n\n mg_def = mc.metric_group_definitions[mg_values.name]\n\n filter_cpc = None\n filter_partition = None\n filter_lpar = None\n filter_adapter = None\n filter_nic = None\n for r_class, r_name in resource_filter:\n if r_class == 'cpc' and r_name:\n filter_cpc = client.cpcs.find(name=r_name)\n elif r_class == 'partition' and r_name:\n assert filter_cpc\n filter_partition = filter_cpc.partitions.find(name=r_name)\n elif r_class == 'logical-partition' and r_name:\n assert filter_cpc\n filter_lpar = filter_cpc.lpars.find(name=r_name)\n elif r_class == 'adapter' and r_name:\n assert filter_cpc\n filter_adapter = filter_cpc.adapters.find(name=r_name)\n elif r_class == 'nic' and r_name:\n assert filter_partition\n filter_nic = filter_partition.nics.find(name=r_name)\n\n resource_class = mg_def.resource_class\n\n for ov in mg_values.object_values:\n included = False\n if resource_class == 'cpc':\n if not filter_cpc:\n included = True\n elif ov.resource_uri == filter_cpc.uri:\n included = True\n elif resource_class == 'partition':\n if not filter_cpc:\n included = True\n elif ov.resource.manager.cpc.uri == filter_cpc.uri:\n if not filter_partition:\n included = True\n elif ov.resource_uri == filter_partition.uri:\n included = True\n elif resource_class == 'logical-partition':\n if not filter_cpc:\n included = True\n elif ov.resource.manager.cpc.uri == filter_cpc.uri:\n if not filter_lpar:\n included = True\n elif ov.resource_uri == filter_lpar.uri:\n included = True\n elif resource_class == 'adapter':\n if not filter_cpc:\n included = True\n elif ov.resource.manager.cpc.uri == filter_cpc.uri:\n if not filter_adapter:\n included = True\n elif ov.resource_uri == filter_adapter.uri:\n included = True\n elif resource_class == 'nic':\n if not filter_cpc:\n included = True\n elif ov.resource.manager.partition.manager.cpc.uri == \\\n filter_cpc.uri:\n if not filter_partition:\n included = True\n elif ov.resource.manager.partition.uri == \\\n filter_partition.uri:\n if not filter_nic:\n included = True\n elif ov.resource_uri == filter_nic.uri:\n included = True\n else:\n raise ValueError(\n \"Invalid resource class: {}\".format(resource_class))\n\n if included:\n filtered_object_values.append(ov)\n\n resource_classes = [f[0] for f in resource_filter]\n\n cmd_ctx.spinner.stop()\n print_object_values(filtered_object_values, mg_def, resource_classes,\n cmd_ctx.output_format, cmd_ctx.transpose)\n\n mc.delete()",
"def iter(self):\n if self.setting_group is None:\n raise Exception(\"No Dictionary to read values from\")\n try:\n return self.setting_group[next(self.setting_group_iter)]\n except StopIteration:\n # make sure we understand the run is over\n self.setting_group = None\n self.setting_group_iter = None",
"def _get_group_data(self, group_name):\n if self.plotter.plot_hues is None:\n data = self._get_group_data_without_hue(group_name)\n else:\n data = self._get_group_data_with_hue(group_name)\n\n group_data = remove_null(data)\n\n return group_data",
"def _poll_group(self, group_type, server, obj, name):\n\n # change collection behavior based on the type of group we're dealing\n # with\n if group_type == 'datacenter':\n # find each cluster in the datacenter\n find_children = server.get_clusters\n poll_child = self.poll_cluster\n child_type = 'cluster'\n elif group_type == 'cluster':\n # find each host in the datacenter or cluster\n find_children = server.get_clusters\n find_children = server.get_hosts\n poll_child = self.poll_host\n child_type = 'host'\n\n self.log.debug('start querying %s: %s' % (group_type, name))\n children = find_children(obj)\n self.log.debug('finish querying %s: %s' % (group_type, name))\n\n # initialize some metrics\n cpu_total = cpu_usage = cpu_percent = 0\n mem_total = mem_usage = mem_percent = 0\n vms_total = vms_running = vms_stopped = 0\n child_stats = {}\n\n # iterate over each child node in this object group\n for child_obj, child_name in children.items():\n stats = poll_child(server, child_obj, child_name)\n child_stats[child_name] = stats\n\n # aggregate data from each child to the top level\n cpu_total += stats['cpu_total']\n cpu_usage += stats['cpu_usage']\n\n mem_total += stats['mem_total']\n mem_usage += stats['mem_usage']\n\n vms_total += stats['vms_total']\n vms_running += stats['vms_running']\n vms_stopped += stats['vms_stopped']\n\n # recalculate percentages\n if cpu_total > 0:\n cpu_percent = cpu_usage / float(cpu_total) * 100\n\n if mem_total > 0:\n mem_percent = mem_usage / float(mem_total) * 100\n\n # return the current metrics for this group\n group_stats = {\n 'cpu_total': cpu_total,\n 'cpu_usage': cpu_usage,\n 'cpu_percent': cpu_percent,\n 'mem_total': mem_total,\n 'mem_usage': mem_usage,\n 'mem_percent': mem_percent,\n 'vms_total': vms_total,\n 'vms_running': vms_running,\n 'vms_stopped': vms_stopped,\n child_type: child_stats,\n }\n\n return group_stats",
"def get(self):\n status = ErrorCode.SUCCESS\n try:\n res = []\n cid = self.get_argument('cid', None)\n if not (cid is None):\n res = QueryHelper.get_groups_by_cid(cid, self.db)\n self.write_ret(status,\n dict_=DotDict(res=res))\n except Exception as e:\n logging.exception(\"[UWEB] Get groups failed. Exception: %s\",\n e.args)\n status = ErrorCode.SERVER_BUSY\n self.write_ret(status)",
"def generate_metrics_data(metricsquery: List, resultsquery: Dict, deltaminutes: int = 5, Region_name: str = None) -> Dict:\r\n cloudwatch=client('cloudwatch', region_name=Region_name) \r\n paginator = cloudwatch.get_paginator('get_metric_data')\r\n metricsgroup=grouper(metricsquery)\r\n resultsquery['ApiCalls']=0 \r\n for mqs in metricsgroup:\r\n for response in paginator.paginate(MetricDataQueries=mqs, StartTime=datetime.now()-timedelta(minutes=deltaminutes),EndTime=datetime.now()):\r\n for results in response['MetricDataResults']:\r\n resultsquery[results['Id']].append({'results':results})\r\n resultsquery['ApiCalls']+=1\r\n return resultsquery",
"def collect_metrics(grouped_samples, projroot, tgtdir, ext, grouping=\"sample\"):\n metrics = []\n for item_id, itemlist in grouped_samples.items():\n item = itemlist[0]\n # FIXME: tgtdir should be docroot!\n pfx = os.path.relpath(itemlist[0].prefix(grouping), os.path.dirname(tgtdir))\n mfile = glob.glob(pfx + \".*\" + ext)\n if mfile:\n metrics.append((item_id, mfile[0]))\n return PicardMetricsCollection(metrics)",
"def test_result_group_can_index_into_metrics(\n self, index: int, metric_name: str, result_group: ResultGroup\n ):\n assert result_group.metrics[index].name == metric_name",
"def _get_group_example_data(self, data_group_id: str) -> Dict[\n str, dict\n ]:\n return {\n e['example_id']: self._get_example_data(e['example_id'])\n for e in self.tasks['data_groups'][data_group_id]\n }",
"def _evaluate(dataset: dict, name: str, metrics=None):\n if metrics is None:\n metrics = ['Accuracy', 'AUROC', 'AUPRC', 'Precision', 'Recall', 'F1', 'F2']\n measures = [dataset[metric] for metric in metrics]\n measures.insert(0, name)\n return measures",
"def _retrieve(self):\n all_groups_settings = []\n iam_groups_settings = []\n\n model_manager = self.service_config.model_manager\n scoped_session, data_access = model_manager.get(self.model_name)\n with scoped_session as session:\n for settings in data_access.scanner_fetch_groups_settings(session,\n True):\n email = settings[0].split('group/')[1]\n iam_groups_settings.append(groups_settings.GroupsSettings\n .from_json(email, settings[1]))\n for settings in data_access.scanner_fetch_groups_settings(session,\n False):\n email = settings[0].split('group/')[1]\n all_groups_settings.append(groups_settings.GroupsSettings\n .from_json(email, settings[1]))\n\n return all_groups_settings, iam_groups_settings",
"def get_stats_by_adgroup(\n self, account_id, adgroup_ids=None, batch=False,\n start_time=None, end_time=None):\n args = {}\n if adgroup_ids is not None:\n args['adgroup_ids'] = json.dumps(adgroup_ids)\n if start_time:\n args['start_time'] = self.__parse_time(start_time)\n if end_time:\n args['end_time'] = self.__parse_time(end_time)\n path = 'act_%s/adgroupstats' % account_id\n return self.make_request(path, 'GET', args, batch=batch)",
"def group_get_members(self,groupname):\n\n if not self.check_prereqs():\n raise StopIteration\n\n db = self.env.get_db_cnx()\n cursor = db.cursor()\n\n query=self.create_query(self.sql_group_get_members_query+\" ORDER BY $username_field$\",{'groupname':groupname,'username_field':self.sql_username_field,'groupname_field':self.sql_groupname_field})\n self.log.debug(\"sqlflexibleauthstore: group_get_members: %s\" % (query,))\n\n cursor.execute(query)\n desc=[i[0] for i in cursor.description]\n for row in cursor:\n dictrow=dict(zip(desc,row))\n yield dictrow[self.sql_username_field]",
"def get(self, group) -> Optional[OrderedDict]:\n return self._queue.get(group)",
"def get_values(self, names):\n r = []\n for n in names:\n if n in self.raw_metrics:\n r.append(self.raw_metrics[n])\n else:\n return None\n return r",
"def _parse_varname(self) -> (str, int, dict):\n\n metr_groups = list(globals.metric_groups.keys())\n for g in metr_groups:\n templ_d = globals.var_name_ds_sep[g]\n pattern = '{}{}'.format(globals.var_name_metric_sep[g],\n templ_d if templ_d is not None else '')\n parts = parse(pattern, self.varname)\n\n if parts is not None and parts['metric'] in globals.metric_groups[g]:\n return parts['metric'], g, parts.named\n\n return None, None, None",
"def _get_endpoint_group(self, group_name):\n params = {\n \"name\": group_name\n }\n\n response, err_msg = self.api_call(\"GET\", ENDPOINT_GROUP_URI, params)\n if not err_msg:\n result = response.json()\n if result.get(\"nextPage\"):\n response_next = self.get_next_page(result.get(\"nextPage\"))\n\n return response, err_msg",
"def get_metrics(metric_groups):\n return sorted(m for g in metric_groups for m in INSTANCE_METRIC_GROUP_MAP[g])",
"def get_metric_filter(\n log_group_name,\n filter_name_prefix,\n metric_name,\n metric_namespace,\n):\n paginator = CLIENT.get_paginator(\"describe_metric_filters\")\n response_iterator = paginator.paginate(\n logGroupName=log_group_name,\n filterNamePrefix=filter_name_prefix,\n )\n metric_filters_response = [\n metric_filter\n for response in response_iterator\n for metric_filter in response.get(\"metricFilters\", [])\n ]\n LOGGER.debug(\"metric filters response: %s\", metric_filters_response)\n if not metric_filters_response:\n raise ValueError(\n \"failed to find existing metric filter with \"\n f\"logGroupName: [{log_group_name}], \"\n f\"filterNamePrefix: [{filter_name_prefix}]\"\n )\n # Get the fist metric filter with a matching transformation with the same\n # metricNameSpace and metricName\n # NOTE: There is a chance that there are multiple metric filters since the\n # describe_metric_filters uses a name prefix\n for m_f in metric_filters_response:\n metric_filters = [\n m_f\n for m_t in m_f[\"metricTransformations\"]\n if m_t[\"metricName\"] == metric_name and m_t[\"metricNamespace\"] == metric_namespace\n ]\n if metric_filters:\n break\n\n if not metric_filters:\n raise ValueError(\n \"failed to find existing metric filter with \"\n f\"logGroupName: [{log_group_name}], \"\n f\"filterNamePrefix: [{filter_name_prefix}], \"\n f\"metricName: [{metric_name}], \"\n f\"metricNamespace: [{metric_namespace}]\"\n )\n\n metric_filter_properties = [\n \"filterName\",\n \"filterPattern\",\n \"logGroupName\",\n \"metricTransformations\",\n ]\n # only return the properties that are needed for the put_metric_filter call\n return {k: v for k, v in metric_filters[0].items() if k in metric_filter_properties}",
"def get(self):\n FetchGroupActionObjects.__init__(self)\n kwargs = self.parser.parse_args()\n query = self.notifications_db.construct_lucene_complex_query([\n ('target_role', {'value': self.role}),\n ('targets', {'value': self.username, 'join_operator': 'OR'}),\n ('group', {'value': kwargs['group'], 'join_operator': 'AND'})])\n notifications = self.notifications_db.full_text_search('search', query)\n action_objects = []\n object_type = ''\n for notif in notifications:\n action_objects += notif['action_objects']\n if object_type == '' and notif['object_type'] != '':\n object_type = notif['object_type']\n if object_type == '' or len(action_objects) < 1:\n return {}\n action_objects_results = self.fetch_action_objects(action_objects, object_type, kwargs['page'], kwargs['limit'])\n self.logger.info(\"Fetched group action objects for group %s\" % kwargs['group'])\n return action_objects_results",
"def get_feed_group_data(\n self,\n feed: str,\n group: str,\n since: Optional[datetime.datetime] = None,\n next_token: str = None,\n ) -> GroupData:\n try:\n listing_json, record = self._get_feed_group_data()\n if record.content_type != \"application/x-tar\":\n raise UnexpectedMIMEType(record.content_type)\n return GroupData(\n data=record.content,\n next_token=None,\n since=since,\n record_count=1,\n response_metadata={\n \"checksum\": listing_json.get(\"checksum\"),\n \"built\": listing_json.get(\"built\"),\n \"version\": listing_json.get(\"version\"),\n },\n )\n except (HTTPStatusException, json.JSONDecodeError, UnicodeDecodeError) as e:\n logger.debug(\"Error executing grype DB data download: %s\", e)\n raise e",
"def get_group_members(self, group_key):\n try:\n paged_results = self.repository.members.list(group_key)\n result = api_helpers.flatten_list_results(paged_results, 'members')\n LOGGER.debug('Getting all the members for group_key = %s,'\n ' result = %s', group_key, result)\n return result\n except (errors.HttpError, HttpLib2Error) as e:\n raise api_errors.ApiExecutionError(group_key, e)"
] | [
"0.55873525",
"0.55362415",
"0.54371434",
"0.51211834",
"0.50748",
"0.50633067",
"0.50494426",
"0.50236344",
"0.5014017",
"0.49106795",
"0.4888227",
"0.48790106",
"0.48743725",
"0.4858078",
"0.48575234",
"0.48438308",
"0.48435786",
"0.4804029",
"0.47981688",
"0.47835502",
"0.47701344",
"0.47551236",
"0.4739878",
"0.47212353",
"0.46829647",
"0.46789664",
"0.46770507",
"0.46716234",
"0.4664505",
"0.46313092"
] | 0.72703934 | 0 |
Retrieve and print metric groups. | def print_metric_groups(cmd_ctx, client, metric_groups, resource_filter):
if not isinstance(metric_groups, (list, tuple)):
metric_groups = [metric_groups]
properties = {
'anticipated-frequency-seconds': MIN_ANTICIPATED_FREQUENCY,
'metric-groups': metric_groups,
}
mc = client.metrics_contexts.create(properties)
mg_values = wait_for_metrics(mc, metric_groups)
filtered_object_values = list() # of MetricObjectValues
if not mg_values:
mg_name = metric_groups[0] # just pick any
res_class = zhmcclient._metrics._resource_class_from_group(mg_name)
mg_def = zhmcclient.MetricGroupDefinition(
name=mg_name, resource_class=res_class, metric_definitions=[])
else:
mg_def = mc.metric_group_definitions[mg_values.name]
filter_cpc = None
filter_partition = None
filter_lpar = None
filter_adapter = None
filter_nic = None
for r_class, r_name in resource_filter:
if r_class == 'cpc' and r_name:
filter_cpc = client.cpcs.find(name=r_name)
elif r_class == 'partition' and r_name:
assert filter_cpc
filter_partition = filter_cpc.partitions.find(name=r_name)
elif r_class == 'logical-partition' and r_name:
assert filter_cpc
filter_lpar = filter_cpc.lpars.find(name=r_name)
elif r_class == 'adapter' and r_name:
assert filter_cpc
filter_adapter = filter_cpc.adapters.find(name=r_name)
elif r_class == 'nic' and r_name:
assert filter_partition
filter_nic = filter_partition.nics.find(name=r_name)
resource_class = mg_def.resource_class
for ov in mg_values.object_values:
included = False
if resource_class == 'cpc':
if not filter_cpc:
included = True
elif ov.resource_uri == filter_cpc.uri:
included = True
elif resource_class == 'partition':
if not filter_cpc:
included = True
elif ov.resource.manager.cpc.uri == filter_cpc.uri:
if not filter_partition:
included = True
elif ov.resource_uri == filter_partition.uri:
included = True
elif resource_class == 'logical-partition':
if not filter_cpc:
included = True
elif ov.resource.manager.cpc.uri == filter_cpc.uri:
if not filter_lpar:
included = True
elif ov.resource_uri == filter_lpar.uri:
included = True
elif resource_class == 'adapter':
if not filter_cpc:
included = True
elif ov.resource.manager.cpc.uri == filter_cpc.uri:
if not filter_adapter:
included = True
elif ov.resource_uri == filter_adapter.uri:
included = True
elif resource_class == 'nic':
if not filter_cpc:
included = True
elif ov.resource.manager.partition.manager.cpc.uri == \
filter_cpc.uri:
if not filter_partition:
included = True
elif ov.resource.manager.partition.uri == \
filter_partition.uri:
if not filter_nic:
included = True
elif ov.resource_uri == filter_nic.uri:
included = True
else:
raise ValueError(
"Invalid resource class: {}".format(resource_class))
if included:
filtered_object_values.append(ov)
resource_classes = [f[0] for f in resource_filter]
cmd_ctx.spinner.stop()
print_object_values(filtered_object_values, mg_def, resource_classes,
cmd_ctx.output_format, cmd_ctx.transpose)
mc.delete() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def list_groups(self):\n\n for counter, label in enumerate(self.exp_labels_list):\n print('Key {}: {} \\n'.format(str(counter), label))",
"def metrics_group():",
"def groups(self):\n #return self.get('{}/groups'.format(ApiVersion.A1.value))\n return self.get('{}/groups'.format(ApiVersion.CM1.value))",
"def list_groups(args):\n\n for group in get_groups(args):\n print(group)",
"def list_group():\n data, code, message = FIELD_SERVICE.list_group()\n return __result(data, code, message)",
"def groups(self):\n groups_text = '\\n'\n for group in self.exercise_numbers:\n txt = ' %s:\\t' % group[0]\n for exercise in group[1:]:\n if isinstance(exercise, int):\n txt += '%d. ' % exercise\n else:\n txt += '\\n\\t%s\\n\\t' % exercise\n groups_text += txt + '\\n'\n return groups_text",
"def get_metrics(metric_groups):\n return sorted(m for g in metric_groups for m in INSTANCE_METRIC_GROUP_MAP[g])",
"def list_groups():\n return _list_tindyb_unique_values(\"group\", dbpath=__dbpath__)",
"def get_group_names(self):\r\n return self.groups.keys()",
"def get_group_command(client: MsGraphClient, args: dict) -> tuple[str, dict, dict]:\n group_id = str(args.get('group_id'))\n group = client.get_group(group_id)\n\n group_readable, group_outputs = parse_outputs(group)\n human_readable = tableToMarkdown(name=\"Groups:\", t=group_readable,\n headers=['ID', 'Display Name', 'Description', 'Created Date Time', 'Mail',\n 'Security Enabled', 'Visibility'],\n removeNull=True)\n entry_context = {f'{INTEGRATION_CONTEXT_NAME}(obj.ID === {group_id})': group_outputs}\n return human_readable, entry_context, group",
"def test_get_groups(self):\n pass",
"def test_get_groups(self):\n pass",
"def gather_groups_memory(output_mem):\n groups = get_memory_cgroups()\n p_table = prettytable.PrettyTable(\n ['Group',\n 'Resident Set Size (MiB)'\n ], caching=False)\n p_table.align = 'l'\n p_table.align['Resident Set Size (MiB)'] = 'r'\n\n # Get overall memory summary per group\n total_rss = 0.0\n for group in groups:\n for line in output_mem.split(\"\\n\"):\n if group + \"/memory.stat\" in line:\n total_rss += float(line.split()[1])\n rss_mem = mem_to_mebibytes(line.split()[1])\n MEMORY['cgroups'][group] = rss_mem\n p_table.add_row(\n [group,\n rss_mem or '-',\n ])\n break\n\n # Add overall rss memory\n MEMORY['cgroups']['total_rss'] = mem_to_mebibytes(total_rss)\n p_table.add_row(\n [\"Total cgroup-rss\",\n MEMORY['cgroups']['total_rss'] or '-',\n ])\n return p_table",
"def groups(self):\n return self.get_data(\"groups\")",
"def printUsersInGroup(group) -> None:\n click.echo(tabulate(listUsersInDict(group), headers=\"keys\", tablefmt=\"grid\"))",
"def getGroup():\n\tprint\n\tprint \"Requesting the list of groups for this account\"\n\n\tgroups_result = getResult('/papi/v0/groups')\n\n\treturn (groups_result)",
"def cli(ctx, group_id):\n return ctx.gi.groups.show_group(group_id)",
"def print_list(self):\r\n print(\"Displaying each metric:\")\r\n print(\"======\")\r\n for metric in self.metrics:\r\n metric.whoami()\r\n print(\"======\")\r\n print(self.metrics)\r\n print(\"END\")\r\n print()",
"def get_psample_list_groups(dut):\n return st.show(dut, \"sudo psample --list-groups\", skip_tmpl=True)",
"def __repr__(self):\n return str(self.group)",
"def info_materials_groups_get():\n session = info_map.Session()\n\n mat = aliased(info_map.Material)\n grp = aliased(info_map.Group)\n\n q = session.query(mat.group_id,grp.name).join(grp).distinct()\n groups = [Group(group=row.group_id,name=row.name) for row in q.all()]\n return groups, 200",
"def getGroups():\r\n return Group.getGroups()",
"def output_groups(self) -> List[str]:\n return self._output_groups",
"def get_pingroups(self):\n return self.groups[:]",
"def __show_all_metrics(self):\n for obj in self.metrics_list:\n self.__print_metrics_info(obj.get_name())\n print()",
"def get_cloudwatch_log_groups(global_vars):\n resp_data = {'status': False, 'log_groups':[], 'error_message': ''}\n client = boto3.client('logs')\n try:\n # Lets get all the logs\n resp = client.describe_log_groups( limit = 50 )\n resp_data['log_groups'].extend( resp.get('logGroups') )\n # Check if the results are paginated\n if resp.get('nextToken'):\n while True:\n resp = client.describe_log_groups( nextToken = resp.get('nextToken'), limit = 50 )\n resp_data['log_groups'].extend( resp.get('logGroups') )\n # Check & Break, if the results are no longer paginated\n if not resp.get('nextToken'):\n break\n resp_data['status'] = True\n except Exception as e:\n resp_data['error_message'] = str(e)\n return resp_data",
"def test_grouped(self):\n gfile = grades.writers.GradesFile(self.fname)\n gfile.table.compute_grouped_mean('Group')\n gfile.table_format = 'org'\n self.check_output(self.output_str2, gfile)",
"def test_020_query_groups(self):\n\n testflow.step(\"Querying for groups\")\n assert self.query_cli.run(\n what='group'\n )[0], \"Failed to search for groups\"",
"def stats(self) -> Sequence['outputs.GetSystemGroupsGroupStatResult']:\n return pulumi.get(self, \"stats\")",
"def get(self, *args):\n return _libsbml.ListOfGroups_get(self, *args)"
] | [
"0.72279793",
"0.70860595",
"0.674111",
"0.66355723",
"0.64703333",
"0.6281449",
"0.62582123",
"0.62421024",
"0.62346387",
"0.6213498",
"0.61584836",
"0.61584836",
"0.6125847",
"0.6110818",
"0.61033535",
"0.60848945",
"0.6041761",
"0.6026084",
"0.599124",
"0.5982218",
"0.59800136",
"0.59650046",
"0.5954368",
"0.59489745",
"0.59417105",
"0.5909944",
"0.5909551",
"0.5903865",
"0.5896795",
"0.5892987"
] | 0.73456234 | 0 |
Report usage overview metrics for CPCs. In addition to the commandspecific options shown in this help text, the general options (see 'zhmc help') can also be specified right after the 'zhmc' command name. | def metrics_cpc(cmd_ctx, cpc, **options):
cmd_ctx.execute_cmd(lambda: cmd_metrics_cpc(cmd_ctx, cpc, options)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def metrics_channel(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_channel(cmd_ctx, cpc, options))",
"def do_hostinfo(self, args):\n host = opts = None\n if args:\n args = args.split()\n host = args.pop()\n\n if not host:\n print('Usage: hostinfo [-cdmu] host_name_or_ip')\n print(' uptime and load stats returned if no options specified')\n return\n\n try:\n ip = socket.gethostbyname(host)\n except socket.gaierror:\n print('cannot resolve', host, file=sys.stderr)\n return\n\n opts = []\n while args:\n arg = args.pop(0)\n if arg.startswith('--'):\n if arg == '--cpu':\n opts.append('c')\n elif arg == '--disk':\n opts.append('d')\n elif arg == '--memory':\n opts.append('m')\n elif arg == '--uptime':\n opts.append('u')\n else:\n print('unrecognized option:', arg, file=sys.stderr)\n return\n else:\n if arg[0] == '-':\n for ch in arg[1:]:\n if ch in ('cdmu') and ch not in opts:\n opts.append(ch)\n else:\n print('unrecognized option:', ch, file=sys.stderr)\n return\n\n stats = self._qm.get_host_stats(ip)\n\n if not opts:\n # Get uptime and load averages.\n up = stats['uptime']\n load = stats['cpu_load']\n print('Up for %s days, %s hours, %s minutes, '\n 'load averages: %s, %s, %s'\n % (up['days'], up['hours'], up['minutes'], load['one'],\n load['five'], load['fifteen']))\n return\n\n all_stats = []\n for opt in opts:\n if opt == 'd':\n # Get disk usage.\n disks = stats['disk_usage']\n st = ['Disk Usage:']\n for mount, disk_info in disks.viewitems():\n st.append(' Usage for: %s' % mount)\n for k, v in disk_info.viewitems():\n st.append(' %s: %s' % (k, v))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'c':\n # Get CPU load.\n load_stats = stats['cpu_load']\n st = ['CPU Load Average:']\n st.append(' last one minute: %s' % load_stats['one'])\n st.append(' last five minutes: %s' % load_stats['five'])\n st.append(' last fifteen minutes: %s' % load_stats['fifteen'])\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'm':\n # Get Memory Usage.\n memory_usage = stats['memory_usage']\n st = ['Memory usage:']\n for k, v in memory_usage.viewitems():\n st.append(' %s: %s' % (k, v))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'u':\n # Get uptime.\n up = stats['uptime']\n st = ['Uptime:']\n st.append(' Up for %s days, %s hours and %s minutes'\n % (up['days'], up['hours'], up['minutes']))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n\n print('\\n'.join(all_stats))",
"def metrics_proc(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_proc(cmd_ctx, cpc, options))",
"def print_help(self):\r\n\t\ttext = \"\\tName: ml_scikit_OPTICS\"\r\n\t\ttext += \"\\n\\t\\tThis machine learning plugin uses scikit-learn's OPTICS algorithm.\\n\"\r\n\t\ttext += \"\\n\\t\\tOptional Parameters:\"\r\n\t\ttext += \"\\n\\t\\t\\tOPTICS_skip_normalization: Do NOT perform normalization (scaling) of data, skip this step.\"\r\n\t\ttext += \"\\n\\t\\t\\OPTICS_eps: Specify eps parameter (default is 1.0).\"\r\n\t\ttext += \"\\n\\t\\t\\OPTICS_min_samples: Specify min_samples parameter (default is 5).\"\r\n#\r\n# OPTICS (with memory complexity n) is an alternative to DBSCAN (with memory complexity n^2)\r\n# which has time complexity n^2 in general with the default max_eps = np.inf. \r\n# We will set max_eps = eps to reduce the run-time.\r\n#\r\n\t\treturn text",
"def HelpCommand(self, unused_args, unused_sub_opts=None, unused_headers=None,\n unused_debug=None):\n self.OutputUsageAndExit()",
"async def stats(self, ctx):\n if ctx.invoked_subcommand is None:\n await send_cmd_help(ctx)",
"def print_help():\n print \"\"\"\nMeasure Operating System Performance (mosp)\n-------------------------------------------\n\nUse this program to measure and report on operating system\nperformance.\n\nThis code measures operating system performance,\nincluding CPU, memory, disk and network, and\noutputs stats to screen and optionally to file\ntoo for use in performance analysis\n\nUses the psutil library\n\nInstall psutil (Ubuntu) if you don't already have it:\n sudo apt-get install python-dev\n sudo pip install psutil\n\nUsage:\n python mosp.py [options]\n\nExample usage:\n python mosp.py -W -i 2\n\nOptions:\n -h --help Display this help and exit\n -m --max-run-time Maximum time to run for before exiting\n (default is infinite)\n -i --interval Interval between requests in seconds\n (default is 1)\n -w --output-file Specify an output filename\n -W Output results to default filename\n default format is:\n mosp-HOSTNAME-YYYYMMDD-HHMMSS.csv\n -b --output-path Specify path to output file directory\n -j --no-header-row Suppress writing header row into CSV\n -v --version Output version information and exit\n\n \"\"\"\n return()",
"def help(cls, extra_args=None):\n if (_is_text_interface()):\n return _create_text_help_str(cls, cls._TEXT_USAGE)\n else:\n return cls._GRAPHICAL_USAGE",
"def DoHelp(options, args):\n __pychecker__ = 'unusednames=options'\n if len(args) == 1 and args[0] in COMMAND_USAGE_TEXT:\n print(COMMAND_USAGE_TEXT[args[0]])\n else:\n raise gclient_utils.Error(\"unknown subcommand '%s'; see 'gclient help'\" %\n args[0])",
"def metrics_crypto(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_crypto(cmd_ctx, cpc, options))",
"def usage(err=''):\r\n m = '%s\\n' %err\r\n m += 'Default usage is to list Cases closed for the 30 days\\n'\r\n m += '\\n Example:\\n'\r\n m += ' closedcases -n 90 \\n' \r\n m += ' \\n'\r\n# m += ' closedcases -n 60 -s blast5 \\n'\r\n return m",
"def cmd_help(args):",
"def help(self, args):\n print('No commands available for this consumer')",
"def help(self):\n msg = \"`%s' performs the computational aspects of genotyping-by-sequencing.\\n\" % os.path.basename(sys.argv[0])\n msg += \"\\n\"\n msg += \"Usage: %s [OPTIONS] ...\\n\" % os.path.basename(sys.argv[0])\n msg += \"\\n\"\n msg += \"Options:\\n\"\n msg += \" -h, --help\\tdisplay the help and exit\\n\"\n msg += \" -V, --version\\toutput version information and exit\\n\"\n msg += \" -v, --verbose\\tverbosity level (0/default=1/2/3)\\n\"\n msg += \" --proj1\\tname of the project used for steps 1 to 4\\n\"\n msg += \"\\t\\tmention a reference genome only if all samples belong to\\n\"\n msg += \"\\t\\t the same species, and will be mapped to the same ref genome\\n\"\n msg += \" --proj2\\tname of the project used for steps 4 to 8\\n\"\n msg += \"\\t\\tcan be the same as --proj1, or can be different\\n\"\n msg +=\"\\t\\t notably when samples come from different species\\n\"\n msg += \"\\t\\t or if one wants to align reads to different ref genomes\\n\"\n msg += \" --schdlr\\tname of the cluster scheduler (default=SGE)\\n\"\n msg += \" --queue\\tname of the cluster queue (default=normal.q)\\n\"\n msg += \" --resou\\tcluster resources (e.g. 'test' for 'qsub -l test')\\n\"\n msg += \" --rmvb\\tremove bash scripts for jobs launched in parallel\\n\"\n msg += \" --step\\tstep to perform (1/2/3/.../9)\\n\"\n msg += \"\\t\\t1: raw read quality per lane (with FastQC v >= 0.11.2)\\n\"\n msg += \"\\t\\t2: demultiplexing per lane (with demultiplex.py v >= 1.14.0)\\n\"\n msg += \"\\t\\t3: cleaning per sample (with CutAdapt v >= 1.8)\\n\"\n msg += \"\\t\\t4: alignment per sample (with BWA MEM v >= 0.7.12, Samtools v >= 1.3, Picard and R v >= 3)\\n\"\n msg += \"\\t\\t5: local realignment per sample (with GATK v >= 3.5)\\n\"\n msg += \"\\t\\t6: local realignment per genotype (with GATK v >= 3.5)\\n\"\n msg += \"\\t\\t7: variant and genotype calling per genotype (with GATK HaplotypeCaller v >= 3.5)\\n\"\n msg += \"\\t\\t8: variant and genotype calling jointly across genotypes (with GATK GenotypeGVCFs v >= 3.5)\\n\"\n msg += \"\\t\\t9: variant and genotype filtering (with GATK v >= 3.5)\\n\"\n msg += \" --samples\\tpath to the 'samples' file\\n\"\n msg += \"\\t\\tcompulsory for all steps, but can differ between steps\\n\"\n msg += \"\\t\\t e.g. if samples come from different species or are aligned\\n\"\n msg += \"\\t\\t on different ref genomes, different samples file should\\n\"\n msg += \"\\t\\t be used for steps 4-9, representing different subsets of\\n\"\n msg += \"\\t\\t the file used for steps 1-3\\n\"\n msg += \"\\t\\tthe file should be encoded in ASCII\\n\"\n msg += \"\\t\\tthe first row should be a header with column names\\n\"\n msg += \"\\t\\teach 'sample' (see details below) should have one and only one row\\n\"\n msg += \"\\t\\tany two columns should be separated with one tabulation\\n\"\n msg += \"\\t\\tcolumns can be in any order\\n\"\n msg += \"\\t\\trows starting by '#' are skipped\\n\"\n msg += \"\\t\\t12 columns are compulsory (but there can be more):\\n\"\n msg += \"\\t\\t genotype (see details below, e.g. 'Col-0', but use neither underscore '_' nor space ' ' nor dot '.', use dash '-' instead)\\n\"\n msg += \"\\t\\t ref_genome (identifier of the reference genome used for alignment, e.g. 'Atha_v2', but use neither space ' ' nor dot '.'; the full species name, e.g. 'Arabidopsis thaliana', will be present in the file given to --dict)\\n\"\n msg += \"\\t\\t library (e.g. can be the same as 'genotype')\\n\"\n msg += \"\\t\\t barcode (e.g. 'ATGG')\\n\"\n msg += \"\\t\\t seq_center (e.g. 'Broad Institute', 'GenoToul', etc)\\n\"\n msg += \"\\t\\t seq_platform (e.g. 'ILLUMINA', see SAM format specification)\\n\"\n msg += \"\\t\\t seq_platform_model (e.g. 'HiSeq 2000')\\n\"\n msg += \"\\t\\t flowcell (e.g. 'C5YMDACXX')\\n\"\n msg += \"\\t\\t lane (e.g. '3', can be '31' if a first demultiplexing was done per index)\\n\"\n msg += \"\\t\\t date (e.g. '2015-01-15', see SAM format specification)\\n\"\n msg += \"\\t\\t fastq_file_R1 (filename, one per lane, gzip-compressed)\\n\"\n msg += \"\\t\\t fastq_file_R2 (filename, one per lane, gzip-compressed)\\n\"\n msg += \" --fcln\\tidentifier of a flowcell and lane number\\n\"\n msg += \"\\t\\tformat as <flowcell>_<lane-number>, e.g. 'C5YMDACXX_1'\\n\"\n msg += \"\\t\\tif set, only the samples from this lane will be analyzed\\n\"\n msg += \" --pird\\tpath to the input reads directory\\n\"\n msg += \"\\t\\tcompulsory for steps 1 and 2\\n\"\n msg += \"\\t\\twill be added to the columns 'fastq_file_R*' from the sample file\\n\"\n msg += \"\\t\\tif not set, input read files should be in current directory\\n\"\n msg += \" --enz\\tname of the restriction enzyme\\n\"\n msg += \"\\t\\tcompulsory for step 2\\n\"\n msg += \"\\t\\tdefault=ApeKI\\n\"\n msg += \" --dmxmet\\tmethod used to demultiplex\\n\"\n msg += \"\\t\\tcompulsory for step 2\\n\"\n msg += \"\\t\\tdefault=4c (see the help of demultiplex.py to know more)\\n\"\n msg += \" --subst\\tnumber of substitutions allowed during demultiplexing\\n\"\n msg += \"\\t\\tcompulsory for step 2\\n\"\n msg += \"\\t\\tdefault=2\\n\"\n msg += \" --ensubst\\tenforce the nb of substitutions allowed\\n\"\n msg += \"\\t\\tcompulsory for step 2\\n\"\n msg += \"\\t\\tdefault=lenient/strict\\n\"\n msg += \" --adp\\tpath to the file containing the adapters\\n\"\n msg += \"\\t\\tcompulsory for step 3\\n\"\n msg += \"\\t\\tsame format as FastQC: name<tab>sequence\\n\"\n msg += \"\\t\\tname: at least 'adpR1' (also 'adpR2' if paired-end)\\n\"\n msg += \"\\t\\tsequence: from 5' (left) to 3' (right)\\n\"\n msg += \" --errtol\\terror tolerance to find adapters\\n\"\n msg += \"\\t\\tcompulsory for step 3\\n\"\n msg += \"\\t\\tdefault=0.2\\n\"\n msg += \" --minovl\\tminimum overlap length between reads and adapters\\n\"\n msg += \"\\t\\tcompulsory for step 3\\n\"\n msg += \"\\t\\tdefault=3 (in bases)\\n\"\n msg += \" --minrl\\tminimum length to keep a read\\n\"\n msg += \"\\t\\tcompulsory for step 3\\n\"\n msg += \"\\t\\tdefault=35 (in bases)\\n\"\n msg += \" --minq\\tminimum quality to trim a read\\n\"\n msg += \"\\t\\tcompulsory for step 3\\n\"\n msg += \"\\t\\tdefault=20 (used for both reads if paired-end)\\n\"\n msg += \" --maxNp\\tmaximum percentage of N to keep a read\\n\"\n msg += \"\\t\\tcompulsory for step 3\\n\"\n msg += \"\\t\\tdefault=0.2\\n\"\n msg += \" --ref\\tpath to the prefix of files for the reference genome\\n\"\n msg += \"\\t\\tcompulsory for steps 4, 5, 6, 7, 8, 9\\n\"\n msg += \"\\t\\tshould correspond to the 'ref_genome' column in --samples\\n\"\n msg += \"\\t\\te.g. '/data/Atha_v2' for '/data/Atha_v2.fa', '/data/Atha_v2.bwt', etc\\n\"\n msg += \"\\t\\tthese files are produced via 'bwa index ...'\\n\"\n msg += \" --dict\\tpath to the 'dict' file (SAM header with @SQ tags)\\n\"\n msg += \"\\t\\tcompulsory for step 4\\n\"\n msg += \"\\t\\tsee 'CreateSequenceDictionary' in the Picard software\\n\"\n msg += \" --jgid\\tcohort identifier to use for joint genotyping\\n\"\n msg += \"\\t\\tcompulsory for steps 8, 9\\n\"\n msg += \"\\t\\tuseful to launch several, different cohorts in parallel\\n\"\n msg += \" --rat\\trestrict alleles to be of a particular allelicity\\n\"\n msg += \"\\t\\tused in step 9\\n\"\n msg += \"\\t\\tdefault=ALL/BIALLELIC/MULTIALLELIC\\n\"\n msg += \"\\t\\tsee '--restrictAllelesTo' in GATK's SelectVariant\\n\"\n msg += \" --mdp\\tminimum value for DP (read depth; e.g. 10)\\n\"\n msg += \"\\t\\tused in step 9\\n\"\n msg += \"\\t\\tsee GATK's VariantFiltration\\n\"\n msg += \" --mgq\\tminimum value for GQ (genotype quality; e.g. 20)\\n\"\n msg += \"\\t\\tused in step 9\\n\"\n msg += \"\\t\\tsee GATK's VariantFiltration\\n\"\n msg += \" --mnfg\\tmaximum number of filtered genotypes to keep a variant\\n\"\n msg += \"\\t\\tused in step 9\\n\"\n msg += \"\\t\\tsee '--maxFilteredGenotypes' in GATK's SelectVariants\\n\"\n msg += \" --mffg\\tmaximum fraction of filtered genotypes to keep a variant\\n\"\n msg += \"\\t\\tused in step 9\\n\"\n msg += \"\\t\\tsee '--maxFractionFilteredGenotypes' in GATK's SelectVariants\\n\"\n msg += \" --mnnc\\tmaximum number of not-called genotypes to keep a variant\\n\"\n msg += \"\\t\\tused in step 9\\n\"\n msg += \" --mfnc\\tmaximum fraction of not-called genotypes to keep a variant\\n\"\n msg += \"\\t\\tused in step 9\\n\"\n msg += \"\\t\\tsee '--maxNOCALLfraction' in GATK's SelectVariants\\n\"\n msg += \" --fam\\tpath to the file containing pedigree information\\n\"\n msg += \"\\t\\tused in step 9\\n\"\n msg += \"\\t\\tdiscard variants with Mendelian violations (see Semler et al, 2012)\\n\"\n msg += \"\\t\\tshould be in the 'fam' format specified by PLINK\\n\"\n msg += \"\\t\\tvalidation strictness (GATK '-pedValidationType') is set at 'SILENT'\\n\"\n msg += \"\\t\\t allowing some samples to be absent from the pedigree\\n\"\n msg += \" --mvq\\tminimum GQ for each trio member to accept a variant as a Mendelian violation\\n\"\n msg += \"\\t\\tused in step 9 if '--fam' is specified\\n\"\n msg += \"\\t\\tdefault=0\\n\"\n msg += \" --xlssf\\tpath to the file with genotypes to exclude\\n\"\n msg += \"\\t\\tused in step 9 (can be especially useful if '--fam' is specified)\\n\"\n msg += \" --tmpd\\tpath to a temporary directory on child nodes (default=.)\\n\"\n msg += \"\\t\\te.g. it can be /tmp or /scratch\\n\"\n msg += \"\\t\\tused in step 4 for 'samtools sort'\\n\"\n msg += \"\\t\\tused in step 7 for 'GATK HaplotypeCaller'\\n\"\n msg += \" --jvmXms\\tinitial memory allocated to the Java Virtual Machine\\n\"\n msg += \"\\t\\tdefault=512m (can also be specified as 1024k, 1g, etc)\\n\"\n msg += \"\\t\\tused in steps 4, 5, 6, 7 and 8 for Picard and GATK\\n\"\n msg += \" --jvmXmx\\tmaximum memory allocated to the Java Virtual Machine\\n\"\n msg += \"\\t\\tdefault=4g\\n\"\n msg += \"\\t\\tused in steps 4, 5, 6, 7 and 8 for Picard and GATK\\n\"\n msg += \" --queue2\\tname of the second cluster queue (default=bigmem.q)\\n\"\n msg += \"\\t\\tused in step 4 for Picard to collect insert sizes\\n\"\n msg += \" --knowni\\tpath to a VCF file with known indels (for local realignment)\\n\"\n msg += \" --known\\tpath to a VCF file with known variants (e.g. from dbSNP)\\n\"\n msg += \" --force\\tforce to re-run step(s)\\n\"\n msg += \"\\t\\tthis removes without warning the step directory if it exists\\n\"\n msg += \"\\n\"\n msg += \"Examples:\\n\"\n msg += \" %s --step 1 --samples samples.txt\\n\" % os.path.basename(sys.argv[0])\n msg += \"\\n\"\n msg += \"Details:\\n\"\n msg += \"This program aims at genotyping a set of 'genotypes' using data from\\n\"\n msg += \"a restriction-assisted DNA sequencing (RAD-seq) experiment, also known\\n\"\n msg += \"as a genotyping-by-sequencing (GBS) experiment.\\n\"\n msg += \"Here, by 'genotype', we mean the entity which is the focus of the\\n\"\n msg += \"study. For instance, it can be a plant variety (or a human being), or\\n\"\n msg += \"the specific clone of a given plant variety (or a specific tumor of a\\n\"\n msg += \"given human being), etc.\\n\"\n msg += \"Importantly, note that the content of the 'genotype' column will\\n\"\n msg += \"be used to set the 'SM' (sample) tag of the 'RG' (read group) header\\n\"\n msg += \"record type of the SAM format (see http://www.htslib.org/). However,\\n\"\n msg += \"internal to this program, the term 'sample' corresponds to the unique\\n\"\n msg += \"quadruplet (genotype,flowcell,lane,barcode) for steps 1 and 2, and to\\n\"\n msg += \"the unique triplet (genotype,flowcell,lane) for the others.\\n\"\n msg += \"Jobs are executed in parallel (--schdlr). Their return status is\\n\"\n msg += \"recorded in a SQLite database which is removed at the end. If a job\\n\"\n msg += \"fails, the whole script stops with an error.\\n\"\n msg += \"\\n\"\n msg += \"Dependencies:\\n\"\n msg += \"Python >= 2.7; Biopython; pyutilstimflutre >= 0.5\\n\"\n msg += \"\\n\"\n msg += \"Report bugs to <[email protected]>.\"\n print(msg); sys.stdout.flush()",
"def usage():\n print(\"[1] Getting help from a cipher \")\n print(\" ---> ./cryptogra.py caesar -h \")\n print(\"\")",
"def command_help(args):\n\tprint_usage()\n\treturn 0",
"def print_usage_command(self):\n print self.get_usage_command()",
"def print_usage_command(self):\n print self.get_usage_command()",
"def help_opt(self):\n print(OPTIONS)",
"def usage():",
"def usage():",
"def help_help(self):\n print(\"List commands or print details about a command\")",
"def _usage_options_example(self):\n pass",
"def treatCmdOpts(argv):\n baseName = os.path.basename(__file__)\n amc.cBaseName = colored(baseName, 'yellow')\n\n helpTxt = amc.cBaseName + ' analyses observation statistics file for selected GNSSs'\n\n # create the parser for command line arguments\n parser = argparse.ArgumentParser(description=helpTxt)\n\n parser.add_argument('--obsstat', help='observation statistics file', type=str, required=True)\n\n parser.add_argument('--freqs', help='select frequencies to use (out of {freqs:s}, default {freq:s})'.format(freqs='|'.join(gfzc.lst_freqs), freq=colored(gfzc.lst_freqs[0], 'green')), default=gfzc.lst_freqs[0], type=str, required=False, action=gco.freqtype_action, nargs='+')\n\n parser.add_argument('--cutoff', help='cutoff angle in degrees (default {mask:s})'.format(mask=colored('0', 'green')), default=0, type=int, required=False, action=gco.cutoff_action)\n\n parser.add_argument('--dbcvs', help='Add information to CVS database (default {cvsdb:s})'.format(cvsdb=colored(gco.CVSDB_OBSTLE, 'green')), required=False, type=str, default=gco.CVSDB_OBSTLE)\n\n parser.add_argument('--plot', help='displays interactive plots (default False)', action='store_true', required=False, default=False)\n\n parser.add_argument('--logging', help='specify logging level console/file (two of {choices:s}, default {choice:s})'.format(choices='|'.join(gco.lst_logging_choices), choice=colored(' '.join(gco.lst_logging_choices[3:5]), 'green')), nargs=2, required=False, default=gco.lst_logging_choices[3:5], action=gco.logging_action)\n\n # drop argv[0]\n args = parser.parse_args(argv[1:])\n\n # return arguments\n return args.obsstat, args.freqs, args.cutoff, args.dbcvs, args.plot, args.logging",
"def help(self):\r\n self._short_help(None, None, None, None)",
"def usage(self):\n\n # header\n self.usage_header()\n\n print _(\"\"\"Screen: %(screen)s\nDescription: %(description)s\n\nUsage: %(app_name)s %(screen)s [options]\"\"\") % {\n 'app_name': constants.App.NAME,\n 'screen': self.name,\n 'description': self.description,\n }\n # any additional info in between (see other classes for reference)\n self._usage_options_example()\n\n #footer\n self.usage_footer()",
"def _cmd_metrics(args):\n if (\n len(args.cnarrays) > 1\n and args.segments\n and len(args.segments) > 1\n and len(args.cnarrays) != len(args.segments)\n ):\n raise ValueError(\n \"Number of coverage/segment filenames given must be \"\n \"equal, if more than 1 segment file is given.\"\n )\n\n cnarrs = map(read_cna, args.cnarrays)\n if args.segments:\n args.segments = map(read_cna, args.segments)\n table = metrics.do_metrics(cnarrs, args.segments, args.drop_low_coverage)\n write_dataframe(args.output, table)",
"def printHelp():\n print(\"amqWorkApiMass.py -n <msgcnt> -b <body> -m <headers> -s <path/to/bodyandheaders>\")",
"def do_overview(self):\n summaries = []\n for name, cmd in self.base.commands.iteritems():\n summaries.append(' %-14s %s\\n' % (name, cmd.get_summary()))\n summaries.sort()\n sys.stdout.write('Usage: %s COMMAND ARGUMENTS...\\n\\n' \\\n 'Available commands:\\n' % (self.base.scriptname, ))\n for line in summaries:\n sys.stdout.write(line)",
"def print_help_info(self, global_options):\r\n usage = ['',\"Type '%s help <subcommand>' for help on a specific subcommand.\" % self.prog_name,'']\r\n usage.append('Available subcommands:')\r\n commands = self.get_commands(global_options).keys()\r\n commands.sort()\r\n for cmd in commands:\r\n usage.append(' %s' % cmd)\r\n return '\\n'.join(usage)"
] | [
"0.6095599",
"0.59995025",
"0.5967684",
"0.58918285",
"0.5879187",
"0.58760214",
"0.58532023",
"0.58369166",
"0.58359265",
"0.5832341",
"0.58278644",
"0.58267254",
"0.582368",
"0.57165456",
"0.5707957",
"0.56925315",
"0.5681756",
"0.5681756",
"0.5672487",
"0.5671114",
"0.5671114",
"0.56505",
"0.56297034",
"0.56186634",
"0.5612054",
"0.5602668",
"0.5588876",
"0.5586467",
"0.5581898",
"0.5570037"
] | 0.60818744 | 1 |
Report usage metrics for active adapters of CPCs in DPM mode. In addition to the commandspecific options shown in this help text, the general options (see 'zhmc help') can also be specified right after the 'zhmc' command name. | def metrics_adapter(cmd_ctx, cpc, adapter, **options):
cmd_ctx.execute_cmd(
lambda: cmd_metrics_adapter(cmd_ctx, cpc, adapter, options)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def metrics_channel(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_channel(cmd_ctx, cpc, options))",
"def metrics_proc(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_proc(cmd_ctx, cpc, options))",
"def metrics_networkport(cmd_ctx, cpc, adapter, **options):\n cmd_ctx.execute_cmd(\n lambda: cmd_metrics_networkport(cmd_ctx, cpc, adapter, options))",
"def metrics_cpc(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_cpc(cmd_ctx, cpc, options))",
"def metrics_crypto(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_crypto(cmd_ctx, cpc, options))",
"def do_hostinfo(self, args):\n host = opts = None\n if args:\n args = args.split()\n host = args.pop()\n\n if not host:\n print('Usage: hostinfo [-cdmu] host_name_or_ip')\n print(' uptime and load stats returned if no options specified')\n return\n\n try:\n ip = socket.gethostbyname(host)\n except socket.gaierror:\n print('cannot resolve', host, file=sys.stderr)\n return\n\n opts = []\n while args:\n arg = args.pop(0)\n if arg.startswith('--'):\n if arg == '--cpu':\n opts.append('c')\n elif arg == '--disk':\n opts.append('d')\n elif arg == '--memory':\n opts.append('m')\n elif arg == '--uptime':\n opts.append('u')\n else:\n print('unrecognized option:', arg, file=sys.stderr)\n return\n else:\n if arg[0] == '-':\n for ch in arg[1:]:\n if ch in ('cdmu') and ch not in opts:\n opts.append(ch)\n else:\n print('unrecognized option:', ch, file=sys.stderr)\n return\n\n stats = self._qm.get_host_stats(ip)\n\n if not opts:\n # Get uptime and load averages.\n up = stats['uptime']\n load = stats['cpu_load']\n print('Up for %s days, %s hours, %s minutes, '\n 'load averages: %s, %s, %s'\n % (up['days'], up['hours'], up['minutes'], load['one'],\n load['five'], load['fifteen']))\n return\n\n all_stats = []\n for opt in opts:\n if opt == 'd':\n # Get disk usage.\n disks = stats['disk_usage']\n st = ['Disk Usage:']\n for mount, disk_info in disks.viewitems():\n st.append(' Usage for: %s' % mount)\n for k, v in disk_info.viewitems():\n st.append(' %s: %s' % (k, v))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'c':\n # Get CPU load.\n load_stats = stats['cpu_load']\n st = ['CPU Load Average:']\n st.append(' last one minute: %s' % load_stats['one'])\n st.append(' last five minutes: %s' % load_stats['five'])\n st.append(' last fifteen minutes: %s' % load_stats['fifteen'])\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'm':\n # Get Memory Usage.\n memory_usage = stats['memory_usage']\n st = ['Memory usage:']\n for k, v in memory_usage.viewitems():\n st.append(' %s: %s' % (k, v))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'u':\n # Get uptime.\n up = stats['uptime']\n st = ['Uptime:']\n st.append(' Up for %s days, %s hours and %s minutes'\n % (up['days'], up['hours'], up['minutes']))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n\n print('\\n'.join(all_stats))",
"def metrics_env(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_env(cmd_ctx, cpc, options))",
"def main():\n\n args = parse_args()\n metric_sender = MetricSender(verbose=args.verbose, debug=args.debug)\n\n discovery_key_disk = 'disc.disk'\n interval = 3\n pcp_disk_dev_metrics = ['disk.dev.total', 'disk.dev.avactive']\n item_prototype_macro_disk = '#OSO_DISK'\n item_prototype_key_tps = 'disc.disk.tps'\n item_prototype_key_putil = 'disc.disk.putil'\n\n disk_metrics = pminfo.get_sampled_data(pcp_disk_dev_metrics, interval, 2)\n\n pcp_metrics_divided = {}\n for metric in pcp_disk_dev_metrics:\n pcp_metrics_divided[metric] = {k: v for k, v in disk_metrics.items() if metric in k}\n\n # do TPS checks; use disk.dev.total\n filtered_disk_totals = clean_up_metric_dict(pcp_metrics_divided[pcp_disk_dev_metrics[0]],\n pcp_disk_dev_metrics[0] + '.')\n\n # Add dynamic items\n metric_sender.add_dynamic_metric(discovery_key_disk, item_prototype_macro_disk, filtered_disk_totals.keys())\n\n # calculate the TPS and add them to the ZaggSender\n for disk, totals in filtered_disk_totals.iteritems():\n disk_tps = (totals[1] - totals[0]) / interval\n metric_sender.add_metric({'%s[%s]' % (item_prototype_key_tps, disk): disk_tps})\n\n # do % Util checks; use disk.dev.avactive\n filtered_disk_totals = clean_up_metric_dict(pcp_metrics_divided[pcp_disk_dev_metrics[1]],\n pcp_disk_dev_metrics[1] + '.')\n\n # calculate the % Util and add them to the ZaggSender\n for disk, totals in filtered_disk_totals.iteritems():\n total_active = (float)(totals[1] - totals[0]) / 1000.0\n putil = 100 * total_active / interval\n\n metric_sender.add_metric({'%s[%s]' % (item_prototype_key_putil, disk): putil})\n\n metric_sender.send_metrics()",
"def show(self):\n prev_queries = 0\n prev_cpu_sys = 0\n prev_cpu_user = 0\n \n lines = {\n \"Uptime (seconds)\": \"--\",\n \"Number of queries\": \"--\",\n \"Query per second\": \"--\",\n \"ACL drops\": \"--\",\n \"Dynamic drops\": \"--\",\n \"Rule drops\": \"--\",\n \"CPU Usage (%s)\": \"--\",\n \"Cache hitrate\": \"--\"\n }\n\n while True:\n try:\n # get stats from dnsdist\n stats = Statistics(console=self.console)\n global_stats = stats[\"global\"]\n \n qps = int(global_stats[\"queries\"]) - prev_queries\n prev_queries = int(global_stats[\"queries\"])\n cpu = (int(global_stats[\"cpu-sys-msec\"])+int(global_stats[\"cpu-user-msec\"]) - prev_cpu_sys - prev_cpu_user) / 10\n prev_cpu_sys = int(global_stats[\"cpu-sys-msec\"])\n prev_cpu_user = int(global_stats[\"cpu-user-msec\"])\n \n lines[\"Uptime (seconds)\"] = global_stats[\"uptime\"]\n lines[\"Number of queries\"] = global_stats[\"queries\"]\n lines[\"Query per second\"] = qps\n lines[\"CPU Usage (%s)\"] = cpu\n lines[\"ACL drops\"] = global_stats[\"acl-drops\"]\n lines[\"Rule drops\"] = global_stats[\"rule-drop\"]\n lines[\"Cache hitrate\"] = global_stats[\"cache-hits\"]\n lines[\"Dynamic drops\"] = global_stats[\"dyn-blocked\"]\n\n # reprint the lines \n sys.stdout.write(\"\\033[1mDashboard for dnsdist\\033[0m\\n\")\n sys.stdout.write(\"\\n\")\n sys.stdout.write(\"Global:\\n\")\n for k,v in lines.items():\n sys.stdout.write(\"\\t%s: %s\\n\" % (k,v))\n sys.stdout.write(\"Backends:\\n\")\n for s in stats[\"backends\"]:\n if not len(s[\"name\"]):\n s[\"name\"] = \"--\"\n if not len(s[\"pools\"]):\n s[\"pools\"] = \"--\"\n sys.stdout.write(\"\\t#%s / %s / %s / %s\\n\" % (s[\"#\"],s[\"address\"],s[\"name\"],s[\"pools\"]) )\n sys.stdout.write(\"\\t\\tNumber of queries: %s\\n\" % s[\"queries\"])\n sys.stdout.write(\"\\t\\tQuery per second: %s\\n\" % s[\"qps\"])\n sys.stdout.write(\"\\t\\tNumber of drops: %s\\n\" % s[\"drops\"])\n sys.stdout.write(\"\\n\")\n sys.stdout.write(\"Ctrl+C to exit\\n\")\n \n time.sleep(1)\n \n \n # move up cursor and delete whole line\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\") \n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n for k,v in lines.items():\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\") \n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n for s in stats[\"backends\"]:\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\") \n sys.stdout.write(\"\\x1b[1A\\x1b[2K\") \n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n \n del stats\n except KeyboardInterrupt:\n break",
"async def stats(self, ctx):\n if ctx.invoked_subcommand is None:\n await send_cmd_help(ctx)",
"def show_help(argv=None):\n if argv:\n if \"list_datasets\" in argv:\n karr, _, desc = COMMANDS_TABLE[\"list_datasets\"]\n sdm_util.print_message(\"command : %s\" % (\" | \".join(karr)))\n sdm_util.print_message(\"usage : sdm ls\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(desc)\n return 0\n elif \"search_datasets\" in argv:\n karr, _, desc = COMMANDS_TABLE[\"search_datasets\"]\n sdm_util.print_message(\"command : %s\" % (\" | \".join(karr)))\n sdm_util.print_message(\"usage : sdm search <keyword>\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(desc)\n return 0\n elif \"show_mounts\" in argv:\n karr, _, desc = COMMANDS_TABLE[\"show_mounts\"]\n sdm_util.print_message(\"command : %s\" % (\" | \".join(karr)))\n sdm_util.print_message(\"usage : sdm ps\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(desc)\n return 0\n elif \"mount\" in argv:\n karr, _, desc = COMMANDS_TABLE[\"mount\"]\n sdm_util.print_message(\"command : %s\" % (\" | \".join(karr)))\n sdm_util.print_message(\"usage : sdm mount <dataset_name> [<mount_path>]\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(desc)\n return 0\n elif \"mmount\" in argv:\n karr, _, desc = COMMANDS_TABLE[\"mmount\"]\n sdm_util.print_message(\"command : %s\" % (\" | \".join(karr)))\n sdm_util.print_message(\"usage : sdm mmount <dataset_name> [<dataset_name> ...]\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(desc)\n return 0\n elif \"unmount\" in argv:\n karr, _, desc = COMMANDS_TABLE[\"unmount\"]\n sdm_util.print_message(\"command : %s\" % (\" | \".join(karr)))\n sdm_util.print_message(\"usage : sdm unmount <mount_id> [<cleanup_flag>]\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(desc)\n return 0\n elif \"munmount\" in argv:\n karr, _, desc = COMMANDS_TABLE[\"munmount\"]\n sdm_util.print_message(\"command : %s\" % (\" | \".join(karr)))\n sdm_util.print_message(\"usage : sdm munmount <mount_id> [<mount_id> ...]\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(desc)\n return 0\n elif \"clean\" in argv:\n karr, _, desc = COMMANDS_TABLE[\"clean\"]\n sdm_util.print_message(\"command : %s\" % (\" | \".join(karr)))\n sdm_util.print_message(\"usage : sdm clean\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(desc)\n return 0\n else:\n sdm_util.print_message(\"Unrecognized command\")\n return 1\n else:\n sdm_util.print_message(\"command : sdm <COMMAND> [<COMMAND_SPECIFIC_ARGS> ...]\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(\"Available Commands\")\n\n tbl = PrettyTable()\n tbl.field_names = [\"COMMAND\", \"DESCRIPTION\"]\n for cmd in COMMANDS:\n command, _, desc = cmd\n command_str = \" | \".join(command)\n tbl.add_row([command_str, desc])\n\n sdm_util.print_message(tbl)\n sdm_util.print_message(\"\")\n return 0",
"def main(cls):\n parser = cls.make_argument_parser()\n args = parser.parse_args()\n args.device = make_hoomd_device(args)\n benchmark = cls(**vars(args))\n performance = benchmark.execute()\n\n if args.device.communicator.rank == 0:\n print(f'{numpy.mean(performance)}')",
"def metrics(self, account_id):\n from pureport_client.commands.accounts.metrics import Command\n return Command(self.client, account_id)",
"def usage(err=''):\r\n m = '%s\\n' %err\r\n m += 'Default usage is to list Cases closed for the 30 days\\n'\r\n m += '\\n Example:\\n'\r\n m += ' closedcases -n 90 \\n' \r\n m += ' \\n'\r\n# m += ' closedcases -n 60 -s blast5 \\n'\r\n return m",
"def options():\n print \"\"\"Options summary:\n -h, --help\n -u, --usage\n -v, --verbose <verb_level>\n -e, --endpoint <endpoint>\n -i, --interface-type <iface_type>\n -r, --recursive\n --dbs-conf <conf_file>\n --show-prod\n --show-caf\n --only-subscribed\n --only-custodial\n \"\"\"",
"def usage():\n \n print '-b <bench> the bench to show.'\n print '-c <config> the config to show (GPU, 8888, 565, etc).'\n print '-d <dir> a directory containing bench_r<revision>_<scalar> files.'\n print '-e <file> file containing expected bench values/ranges.'\n print ' Will raise exception if actual bench values are out of range.'\n print ' See bench_expectations.txt for data format and examples.'\n print '-f <revision>[:<revision>] the revisions to use for fitting.'\n print ' Negative <revision> is taken as offset from most recent revision.'\n print '-i <time> the time to ignore (w, c, g, etc).'\n print ' The flag is ignored when -t is set; otherwise we plot all the'\n print ' times except the one specified here.'\n print '-l <title> title to use for the output graph'\n print '-m <representation> representation of bench value.'\n print ' See _ListAlgorithm class in bench_util.py.'\n print '-o <path> path to which to write output; writes to stdout if not specified'\n print '-r <revision>[:<revision>] the revisions to show.'\n print ' Negative <revision> is taken as offset from most recent revision.'\n print '-s <setting>[=<value>] a setting to show (alpha, scalar, etc).'\n print '-t <time> the time to show (w, c, g, etc).'\n print '-x <int> the desired width of the svg.'\n print '-y <int> the desired height of the svg.'\n print '--default-setting <setting>[=<value>] setting for those without.'",
"def treatCmdOpts(argv):\n baseName = os.path.basename(__file__)\n amc.cBaseName = colored(baseName, 'yellow')\n\n helpTxt = amc.cBaseName + ' analyses observation statistics file for selected GNSSs'\n\n # create the parser for command line arguments\n parser = argparse.ArgumentParser(description=helpTxt)\n\n parser.add_argument('--obsstat', help='observation statistics file', type=str, required=True)\n\n parser.add_argument('--freqs', help='select frequencies to use (out of {freqs:s}, default {freq:s})'.format(freqs='|'.join(gfzc.lst_freqs), freq=colored(gfzc.lst_freqs[0], 'green')), default=gfzc.lst_freqs[0], type=str, required=False, action=gco.freqtype_action, nargs='+')\n\n parser.add_argument('--cutoff', help='cutoff angle in degrees (default {mask:s})'.format(mask=colored('0', 'green')), default=0, type=int, required=False, action=gco.cutoff_action)\n\n parser.add_argument('--dbcvs', help='Add information to CVS database (default {cvsdb:s})'.format(cvsdb=colored(gco.CVSDB_OBSTLE, 'green')), required=False, type=str, default=gco.CVSDB_OBSTLE)\n\n parser.add_argument('--plot', help='displays interactive plots (default False)', action='store_true', required=False, default=False)\n\n parser.add_argument('--logging', help='specify logging level console/file (two of {choices:s}, default {choice:s})'.format(choices='|'.join(gco.lst_logging_choices), choice=colored(' '.join(gco.lst_logging_choices[3:5]), 'green')), nargs=2, required=False, default=gco.lst_logging_choices[3:5], action=gco.logging_action)\n\n # drop argv[0]\n args = parser.parse_args(argv[1:])\n\n # return arguments\n return args.obsstat, args.freqs, args.cutoff, args.dbcvs, args.plot, args.logging",
"def metrics_flash(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_flash(cmd_ctx, cpc, options))",
"def _get_metrics_options(metrics):\n metrics_options = []\n if metrics is None:\n metrics = []\n for static_metric in metrics:\n metrics_options += [\n \"-m\",\n static_metric.metric.mp_metric_name,\n str(static_metric.value),\n ]\n return metrics_options",
"def Usage(shorthelp=0, writeto_stdout=0, detailed_error=None,\n exitcode=None, show_cmd=None, show_global_flags=False):\n printer('%s: Incorrect usage; details below.' % show_cmd)\n printer('Correct usage is as follows:')\n printer('')\n for line in (' ' + cmd.__doc__.rstrip()).splitlines():\n printer(line)\n # Print out str(FLAGS) for just the UICmd-specific flags.\n tmp_flags = flags.FlagValues()\n unused_cmd = type(cmd)(show_cmd, tmp_flags)\n prefix = _UICMD_MODULE_NAME + ':\\n'\n flag_str = tmp_flags.ModuleHelp(_UICMD_MODULE_NAME)\n flag_str = flag_str.lstrip()\n if flag_str.startswith(prefix):\n flag_str = flag_str[len(prefix):]\n if flag_str:\n printer('')\n printer('flags:')\n for line in flag_str.splitlines():\n printer(line)\n if detailed_error is not None:\n printer('')\n printer('The incorrect usage is as follows:')\n printer('')\n for line in unicode(detailed_error).splitlines():\n printer(' ' + line)",
"def usage(self, host):",
"def _GenAppcommandsUsage(cmd, printer):\n # pylint: disable=too-many-arguments,unused-argument\n def Usage(shorthelp=0, writeto_stdout=0, detailed_error=None,\n exitcode=None, show_cmd=None, show_global_flags=False):\n \"\"\"A replacement for app.usage.\"\"\"\n printer('%s: Incorrect usage; details below.' % show_cmd)\n printer('Correct usage is as follows:')\n printer('')\n for line in (' ' + cmd.__doc__.rstrip()).splitlines():\n printer(line)\n # Print out str(FLAGS) for just the UICmd-specific flags.\n tmp_flags = flags.FlagValues()\n unused_cmd = type(cmd)(show_cmd, tmp_flags)\n prefix = _UICMD_MODULE_NAME + ':\\n'\n flag_str = tmp_flags.ModuleHelp(_UICMD_MODULE_NAME)\n flag_str = flag_str.lstrip()\n if flag_str.startswith(prefix):\n flag_str = flag_str[len(prefix):]\n if flag_str:\n printer('')\n printer('flags:')\n for line in flag_str.splitlines():\n printer(line)\n if detailed_error is not None:\n printer('')\n printer('The incorrect usage is as follows:')\n printer('')\n for line in unicode(detailed_error).splitlines():\n printer(' ' + line)\n\n return Usage",
"def print_help(self):\r\n\t\ttext = \"\\tName: ml_scikit_OPTICS\"\r\n\t\ttext += \"\\n\\t\\tThis machine learning plugin uses scikit-learn's OPTICS algorithm.\\n\"\r\n\t\ttext += \"\\n\\t\\tOptional Parameters:\"\r\n\t\ttext += \"\\n\\t\\t\\tOPTICS_skip_normalization: Do NOT perform normalization (scaling) of data, skip this step.\"\r\n\t\ttext += \"\\n\\t\\t\\OPTICS_eps: Specify eps parameter (default is 1.0).\"\r\n\t\ttext += \"\\n\\t\\t\\OPTICS_min_samples: Specify min_samples parameter (default is 5).\"\r\n#\r\n# OPTICS (with memory complexity n) is an alternative to DBSCAN (with memory complexity n^2)\r\n# which has time complexity n^2 in general with the default max_eps = np.inf. \r\n# We will set max_eps = eps to reduce the run-time.\r\n#\r\n\t\treturn text",
"def HelpCommand(self, unused_args, unused_sub_opts=None, unused_headers=None,\n unused_debug=None):\n self.OutputUsageAndExit()",
"def print_help():\n print \"\"\"\nMeasure Operating System Performance (mosp)\n-------------------------------------------\n\nUse this program to measure and report on operating system\nperformance.\n\nThis code measures operating system performance,\nincluding CPU, memory, disk and network, and\noutputs stats to screen and optionally to file\ntoo for use in performance analysis\n\nUses the psutil library\n\nInstall psutil (Ubuntu) if you don't already have it:\n sudo apt-get install python-dev\n sudo pip install psutil\n\nUsage:\n python mosp.py [options]\n\nExample usage:\n python mosp.py -W -i 2\n\nOptions:\n -h --help Display this help and exit\n -m --max-run-time Maximum time to run for before exiting\n (default is infinite)\n -i --interval Interval between requests in seconds\n (default is 1)\n -w --output-file Specify an output filename\n -W Output results to default filename\n default format is:\n mosp-HOSTNAME-YYYYMMDD-HHMMSS.csv\n -b --output-path Specify path to output file directory\n -j --no-header-row Suppress writing header row into CSV\n -v --version Output version information and exit\n\n \"\"\"\n return()",
"def help(self, args):\n print('No commands available for this consumer')",
"def print_usage_command(self):\n print self.get_usage_command()",
"def print_usage_command(self):\n print self.get_usage_command()",
"def help_opt(self):\n print(OPTIONS)",
"def usage(msgarg):\n if msgarg:\n sys.stderr.write(\"error: %s\\n\" % msgarg)\n print(\"\"\"\\\n usage: %s [options]\n\n options:\n -d increase debug msg verbosity level\n -c N emit N classes (def: 500) per instances\n -I N emit N instances\n\n \"\"\" % os.path.basename(sys.argv[0]))\n sys.exit(1)"
] | [
"0.6317614",
"0.59251046",
"0.58999896",
"0.58430314",
"0.57148993",
"0.56526655",
"0.547031",
"0.53368515",
"0.5278745",
"0.52745694",
"0.52628577",
"0.51370335",
"0.5132186",
"0.51290196",
"0.512005",
"0.5110605",
"0.51074606",
"0.5080317",
"0.5074927",
"0.50636065",
"0.5045238",
"0.50326115",
"0.5021564",
"0.50138205",
"0.49624792",
"0.49544725",
"0.49428305",
"0.49428305",
"0.4932547",
"0.49250507"
] | 0.643978 | 0 |
Report usage metrics for all channels of CPCs in classic mode. In addition to the commandspecific options shown in this help text, the general options (see 'zhmc help') can also be specified right after the 'zhmc' command name. | def metrics_channel(cmd_ctx, cpc, **options):
cmd_ctx.execute_cmd(lambda: cmd_metrics_channel(cmd_ctx, cpc, options)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def stats(caesar, input):\n commands = {}\n users = {}\n channels = {}\n\n ignore = set(['f_note', 'startup', 'message', 'noteuri'])\n for (name, user), count in caesar.stats.items(): \n if name in ignore: continue\n if not user: continue\n\n if not user.startswith('#'): \n try: users[user] += count\n except KeyError: users[user] = count\n else: \n try: commands[name] += count\n except KeyError: commands[name] = count\n\n try: channels[user] += count\n except KeyError: channels[user] = count\n\n comrank = sorted([(b, a) for (a, b) in commands.iteritems()], reverse=True)\n userank = sorted([(b, a) for (a, b) in users.iteritems()], reverse=True)\n charank = sorted([(b, a) for (a, b) in channels.iteritems()], reverse=True)\n\n # most heavily used commands\n creply = 'most used commands: '\n for count, command in comrank[:10]: \n creply += '%s (%s), ' % (command, count)\n caesar.say(creply.rstrip(', '))\n\n # most heavy users\n reply = 'power users: '\n for count, user in userank[:10]: \n reply += '%s (%s), ' % (user, count)\n caesar.say(reply.rstrip(', '))\n\n # most heavy channels\n chreply = 'power channels: '\n for count, channel in charank[:3]: \n chreply += '%s (%s), ' % (channel, count)\n caesar.say(chreply.rstrip(', '))",
"def channel_help(message):\n message.reply(Strings['HELP'].format(config.HELP_URL))",
"def metrics_cpc(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_cpc(cmd_ctx, cpc, options))",
"def metrics_proc(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_proc(cmd_ctx, cpc, options))",
"async def channel_stats(self, ctx, channel: discord.TextChannel = None):\n channel = channel or ctx.channel\n embed = discord.Embed(\n title=f\"Stats for **{channel.name}**\",\n description=f\"{'Category: {}'.format(channel.category.name) if channel.category else 'This channel is not in a category'}\",\n color=discord.Color.blurple(),\n )\n embed.add_field(name=\"Channel Guild\",\n value=ctx.guild.name, inline=False)\n embed.add_field(name=\"Channel Id\", value=channel.id, inline=False)\n embed.add_field(\n name=\"Channel Topic\",\n value=f\"{channel.topic if channel.topic else 'No topic.'}\",\n inline=False,\n )\n embed.add_field(name=\"Channel Position\",\n value=channel.position, inline=False)\n embed.add_field(\n name=\"Channel Slowmode Delay\", value=channel.slowmode_delay, inline=False\n )\n embed.add_field(name=\"Channel is nsfw?\",\n value=channel.is_nsfw(), inline=False)\n embed.add_field(name=\"Channel is news?\",\n value=channel.is_news(), inline=False)\n embed.add_field(\n name=\"Channel Creation Time\", value=channel.created_at, inline=False\n )\n embed.add_field(\n name=\"Channel Permissions Synced\",\n value=channel.permissions_synced,\n inline=False,\n )\n embed.add_field(name=\"Channel Hash\", value=hash(channel), inline=False)\n\n await ctx.message.delete()\n await ctx.send(embed=embed)",
"def help(self, args):\n print('No commands available for this consumer')",
"def metrics_crypto(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_crypto(cmd_ctx, cpc, options))",
"async def stats(self, ctx):\n if ctx.invoked_subcommand is None:\n await send_cmd_help(ctx)",
"def command_help(self, bot, update):\n\n messages = [\n 'Available commands:',\n '/who - Who is Myles?',\n '/where - Where is Myles?',\n '/tweet - What was the last tweet Myles sent?',\n '/photo - What was the last Instagram photo Myles took?',\n '/web - Where can I find Myles on the interwebs?',\n ]\n\n self.send_messages(bot, update, messages)",
"async def serverchart(self, ctx: commands.Context, messages: int = 1000):\n if messages < 5:\n return await ctx.send(\"Don't be silly.\")\n channel_list = []\n blacklisted_channels = await self.config.guild(ctx.guild).channel_deny()\n for channel in ctx.guild.text_channels:\n channel: discord.TextChannel\n if channel.id in blacklisted_channels:\n continue\n if channel.permissions_for(ctx.message.author).read_messages is False:\n continue\n if channel.permissions_for(ctx.guild.me).read_messages is False:\n continue\n channel_list.append(channel)\n\n if len(channel_list) == 0:\n return await ctx.send(\"There are no channels to read... This should theoretically never happen.\")\n\n embed = discord.Embed(\n description=\"Fetching messages from the entire server this **will** take a while.\",\n colour=await self.bot.get_embed_colour(location=ctx.channel),\n )\n global_fetch_message = await ctx.send(embed=embed)\n global_history = []\n\n for channel in channel_list:\n embed = discord.Embed(\n title=f\"Fetching messages from #{channel.name}\",\n description=\"This might take a while...\",\n colour=await self.bot.get_embed_colour(location=channel)\n )\n loading_message = await ctx.send(embed=embed)\n try:\n history = await self.fetch_channel_history(channel, loading_message, messages)\n global_history += history\n await loading_message.delete()\n except discord.errors.Forbidden:\n try:\n await loading_message.delete()\n except discord.NotFound:\n continue\n except discord.NotFound:\n try:\n await loading_message.delete()\n except discord.NotFound:\n continue \n\n msg_data = self.calculate_member_perc(global_history)\n # If no members are found.\n if len(msg_data[\"users\"]) == 0:\n try:\n await global_fetch_message.delete()\n except discord.NotFound:\n pass\n return await ctx.send(f\"Only bots have sent messages in this server... Wauw...\")\n\n top_twenty, others = self.calculate_top(msg_data)\n chart = await self.create_chart(top_twenty, others, ctx.guild)\n\n try:\n await global_fetch_message.delete()\n except discord.NotFound:\n pass\n await ctx.send(file=discord.File(chart, \"chart.png\"))",
"async def help(self, context):\n prefix = config.BOT_PREFIX\n user=context.message.author\n if not isinstance(prefix, str):\n prefix = prefix[0]\n embed = discord.Embed(title=\"Help\", description=\"List of available commands:\", color=0x00FF00)\n for i in self.bot.cogs:\n cog = self.bot.get_cog(i.lower())\n commands = cog.get_commands()\n command_list = [command.name for command in commands if not command.hidden or context.message.author.id in config.OWNERS]\n command_description = [command.help for command in commands if not command.hidden or context.message.author.id in config.OWNERS]\n help_text = '\\n'.join(f'{prefix}{n} - {h}' for n, h in zip(command_list, command_description))\n embed = discord.Embed(title=f\"Commands in {i.capitalize()} Cog\", description=f'```{help_text}```', color=0x00FF00)\n await user.send(embed=embed)\n if not isinstance(context.message.channel, discord.channel.DMChannel):\n await context.send(f\"DM sent to {user.mention}\")\n await context.message.delete()",
"async def help(self, ctx, *cog):\n try:\n if not cog:\n halp = discord.Embed(title='Useless\\' Commands',\n description='Use `!help *category*` to find out more about the commands in them!')\n cogs_desc = ''\n for x in self.bot.cogs:\n cogs_desc = f'{x}'\n cmds = ''\n for cmd in self.bot.get_cog(x).get_commands():\n if not cmd.hidden:\n cmds += f'`{cmd.name}`, '\n if cmds != '':\n halp.add_field(name= cogs_desc,\n value=f'{cmds[0:-2]}',\n inline=False)\n cmds_desc = ''\n for y in self.bot.walk_commands():\n if not y.cog_name and not y.hidden:\n cmds_desc += ('`{}` - {}'.format(y.name, y.help) + '\\n')\n if cmds_desc != '':\n halp.add_field(name='Uncatergorized Commands',\n value=cmds_desc[0:len(cmds_desc) - 1],\n inline=False)\n await ctx.send(embed=halp)\n else:\n if len(cog) > 1:\n halp = discord.Embed(title='Error!',\n description='I can only help with 1 category!',\n color=discord.Color.red())\n await ctx.send(embed=halp)\n else:\n found = False\n for x in self.bot.cogs:\n for y in cog:\n if x == y:\n halp = discord.Embed(\n title=cog[0] + ' Command Listing',\n description=self.bot.cogs[cog[0]].__doc__)\n for c in self.bot.get_cog(y).get_commands():\n if not c.hidden:\n halp.add_field(name=c.name,\n value=c.help,\n inline=False)\n found = True\n if not found:\n halp = discord.Embed(title='Error!',\n description='How do you even use \"' +\n cog[0] + '\"?',\n color=discord.Color.red())\n await ctx.send('', embed=halp)\n\n except:\n print('Pass')\n pass",
"async def managechannels(self, ctx:commands.Context):",
"async def help(self, channel_id, user_infos, user_id, team_id):\n helpMessage = \"Bienvenue dans l'aide du bot MovieQuizz ! \\n\" \\\n \"Ce bot va tester vos connaissances cinématographiques ! \\n\" \\\n \"Les commandes disponibles sont les suivantes : \\n\" \\\n \" - ask : vous questionne à propos d'un film \\n\" \\\n \" - rank : affiche votre position et score \\n\" \\\n \" - ranking : affiche les 10 meilleures joueurs \\n\" \\\n \" - help : Vous connaissez déjà celle-là. \\n\" \\\n \"Amusez-vous bien les lapins !\"\n return await self.sendText(helpMessage, channel_id,user_infos, team_id)",
"def help(bot, sender, sendmsg, label, args):\n\n clist = commands.commands\n csort = sorted(clist.values(), key=lambda c: c.__name__.lower())\n\n if len(args) > 0:\n page = int(args[0]) - 1\n else:\n page = 0\n\n pages = len(clist) // 10 + 1\n\n sendmsg(\"-- Help (Page {} of {}) --\".format(page + 1, pages))\n for i in range(10):\n if i >= len(csort):\n break\n\n command = csort[i + (page * 10)]\n sendmsg(\"{}: {}\".format(command.__name__, command.__doc__))",
"def analyt(analytics):\n API_KEY = secrets.YT_KEY\n youtube = build('youtube', 'v3', developerKey=API_KEY)\n request = youtube.channels().list(\n part='statistics',\n forUsername=analytics\n )\n response = request.execute()\n print(response)",
"def cmd_help(args):",
"def usage(self):\n self._usage1()\n print 'folder COOL_channel COOL_tag ROOT_file'\n self._usage2()",
"async def send_cog_help(self, cog):\n ctx = self.context\n title = cog.qualified_name\n embed = discord.Embed(\n title=title,\n description=cog.description,\n color=discord.Color.blue()\n )\n\n commands = cog.get_commands()\n\n if filtered_commands := await self.filter_commands(commands):\n for command in filtered_commands:\n embed.add_field(name=command, value=command.description or 'Without description')\n\n embed.set_footer(text=f'use {prefixo}help [command] for more information about commands')\n await ctx.reply(embed=embed)",
"def do_config():\n\n tracking = get_tracking()\n for unit in (\"ppm\", \"sec\"):\n\ttunit = unit\n\tif unit == \"sec\":\n\t tunit = \"seconds\"\n\tprint \"multigraph chrony_%s\" % unit\n\tprint \"graph_title NTP (Chrony) Statistics (%s)\" % unit\n\tprint \"graph_vlabel %s\" % unit\n\tprint \"graph_args --base 1000\"\n\tprint \"graph_category time\"\n\tprint \"graph_info NTP (Chrony) tracking statistics (the ones measured in %s)\" % tunit\n\tfor key in tracking[tunit]:\n\t item = tracking[tunit][key]\n\t print \"\"\"%s.label %s\n%s.draw LINE2\n%s.info %s\"\"\" % (key, item[\"label\"], key, key, item[\"label\"])\n\tprint\n return 0",
"def metrics_adapter(cmd_ctx, cpc, adapter, **options):\n cmd_ctx.execute_cmd(\n lambda: cmd_metrics_adapter(cmd_ctx, cpc, adapter, options))",
"def do_hostinfo(self, args):\n host = opts = None\n if args:\n args = args.split()\n host = args.pop()\n\n if not host:\n print('Usage: hostinfo [-cdmu] host_name_or_ip')\n print(' uptime and load stats returned if no options specified')\n return\n\n try:\n ip = socket.gethostbyname(host)\n except socket.gaierror:\n print('cannot resolve', host, file=sys.stderr)\n return\n\n opts = []\n while args:\n arg = args.pop(0)\n if arg.startswith('--'):\n if arg == '--cpu':\n opts.append('c')\n elif arg == '--disk':\n opts.append('d')\n elif arg == '--memory':\n opts.append('m')\n elif arg == '--uptime':\n opts.append('u')\n else:\n print('unrecognized option:', arg, file=sys.stderr)\n return\n else:\n if arg[0] == '-':\n for ch in arg[1:]:\n if ch in ('cdmu') and ch not in opts:\n opts.append(ch)\n else:\n print('unrecognized option:', ch, file=sys.stderr)\n return\n\n stats = self._qm.get_host_stats(ip)\n\n if not opts:\n # Get uptime and load averages.\n up = stats['uptime']\n load = stats['cpu_load']\n print('Up for %s days, %s hours, %s minutes, '\n 'load averages: %s, %s, %s'\n % (up['days'], up['hours'], up['minutes'], load['one'],\n load['five'], load['fifteen']))\n return\n\n all_stats = []\n for opt in opts:\n if opt == 'd':\n # Get disk usage.\n disks = stats['disk_usage']\n st = ['Disk Usage:']\n for mount, disk_info in disks.viewitems():\n st.append(' Usage for: %s' % mount)\n for k, v in disk_info.viewitems():\n st.append(' %s: %s' % (k, v))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'c':\n # Get CPU load.\n load_stats = stats['cpu_load']\n st = ['CPU Load Average:']\n st.append(' last one minute: %s' % load_stats['one'])\n st.append(' last five minutes: %s' % load_stats['five'])\n st.append(' last fifteen minutes: %s' % load_stats['fifteen'])\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'm':\n # Get Memory Usage.\n memory_usage = stats['memory_usage']\n st = ['Memory usage:']\n for k, v in memory_usage.viewitems():\n st.append(' %s: %s' % (k, v))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'u':\n # Get uptime.\n up = stats['uptime']\n st = ['Uptime:']\n st.append(' Up for %s days, %s hours and %s minutes'\n % (up['days'], up['hours'], up['minutes']))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n\n print('\\n'.join(all_stats))",
"def _cmd_metrics(args):\n if (\n len(args.cnarrays) > 1\n and args.segments\n and len(args.segments) > 1\n and len(args.cnarrays) != len(args.segments)\n ):\n raise ValueError(\n \"Number of coverage/segment filenames given must be \"\n \"equal, if more than 1 segment file is given.\"\n )\n\n cnarrs = map(read_cna, args.cnarrays)\n if args.segments:\n args.segments = map(read_cna, args.segments)\n table = metrics.do_metrics(cnarrs, args.segments, args.drop_low_coverage)\n write_dataframe(args.output, table)",
"def print_help(self):\r\n\t\ttext = \"\\tName: ml_scikit_OPTICS\"\r\n\t\ttext += \"\\n\\t\\tThis machine learning plugin uses scikit-learn's OPTICS algorithm.\\n\"\r\n\t\ttext += \"\\n\\t\\tOptional Parameters:\"\r\n\t\ttext += \"\\n\\t\\t\\tOPTICS_skip_normalization: Do NOT perform normalization (scaling) of data, skip this step.\"\r\n\t\ttext += \"\\n\\t\\t\\OPTICS_eps: Specify eps parameter (default is 1.0).\"\r\n\t\ttext += \"\\n\\t\\t\\OPTICS_min_samples: Specify min_samples parameter (default is 5).\"\r\n#\r\n# OPTICS (with memory complexity n) is an alternative to DBSCAN (with memory complexity n^2)\r\n# which has time complexity n^2 in general with the default max_eps = np.inf. \r\n# We will set max_eps = eps to reduce the run-time.\r\n#\r\n\t\treturn text",
"async def help(self, ctx):\n self.log_command_call(\"help\", ctx.message)\n await ctx.send(HELP_TEXT)\n embed_output = create_embed(description=MORE_INFO_TEXT)\n await ctx.send(embed=embed_output)",
"def metrics_env(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_env(cmd_ctx, cpc, options))",
"def command_help(self, *args, **kwargs):\n print(\"Commands available:\\n\")\n for name in dir(self):\n if not name.startswith(\"command_\"):\n continue\n name_clean = name[len(\"command_\"):]\n print(\"%s:\\n - %s\\n\" % (name_clean, getattr(self, name).__doc__.strip()))",
"def DoHelp(options, args):\n __pychecker__ = 'unusednames=options'\n if len(args) == 1 and args[0] in COMMAND_USAGE_TEXT:\n print(COMMAND_USAGE_TEXT[args[0]])\n else:\n raise gclient_utils.Error(\"unknown subcommand '%s'; see 'gclient help'\" %\n args[0])",
"def help(update, context):\n msg = \"\"\n msg += \"\\n/covid 7-Day-Incident per Million\"\n msg += \"\\n/daylio What did I do a year ago today?\"\n msg += \"\\n/f1last Results of the last race\"\n msg += \"\\n/f1stand Driver standings\"\n msg += \"\\n/f1next Time and place of the next race\"\n msg += \"\\n/fuel prices and consump. (args: Xeur Ykm)\"\n msg += \"\\n/ip Outside ip address\"\n msg += \"\\n/rate Exchange rates (args: Xeur/Yhuf)\"\n msg += \"\\n/rss check rss feeds for new content\"\n msg += \"\\n/sun Time of sunrise and sunset\"\n msg += \"\\n/xkcd Sends last comic image and alt\"\n msg.rstrip()\n update.message.reply_text(msg)",
"def main():\n\n args = parse_args()\n metric_sender = MetricSender(verbose=args.verbose, debug=args.debug)\n\n discovery_key_disk = 'disc.disk'\n interval = 3\n pcp_disk_dev_metrics = ['disk.dev.total', 'disk.dev.avactive']\n item_prototype_macro_disk = '#OSO_DISK'\n item_prototype_key_tps = 'disc.disk.tps'\n item_prototype_key_putil = 'disc.disk.putil'\n\n disk_metrics = pminfo.get_sampled_data(pcp_disk_dev_metrics, interval, 2)\n\n pcp_metrics_divided = {}\n for metric in pcp_disk_dev_metrics:\n pcp_metrics_divided[metric] = {k: v for k, v in disk_metrics.items() if metric in k}\n\n # do TPS checks; use disk.dev.total\n filtered_disk_totals = clean_up_metric_dict(pcp_metrics_divided[pcp_disk_dev_metrics[0]],\n pcp_disk_dev_metrics[0] + '.')\n\n # Add dynamic items\n metric_sender.add_dynamic_metric(discovery_key_disk, item_prototype_macro_disk, filtered_disk_totals.keys())\n\n # calculate the TPS and add them to the ZaggSender\n for disk, totals in filtered_disk_totals.iteritems():\n disk_tps = (totals[1] - totals[0]) / interval\n metric_sender.add_metric({'%s[%s]' % (item_prototype_key_tps, disk): disk_tps})\n\n # do % Util checks; use disk.dev.avactive\n filtered_disk_totals = clean_up_metric_dict(pcp_metrics_divided[pcp_disk_dev_metrics[1]],\n pcp_disk_dev_metrics[1] + '.')\n\n # calculate the % Util and add them to the ZaggSender\n for disk, totals in filtered_disk_totals.iteritems():\n total_active = (float)(totals[1] - totals[0]) / 1000.0\n putil = 100 * total_active / interval\n\n metric_sender.add_metric({'%s[%s]' % (item_prototype_key_putil, disk): putil})\n\n metric_sender.send_metrics()"
] | [
"0.59195566",
"0.5586245",
"0.5444794",
"0.54191935",
"0.53922415",
"0.5380868",
"0.53543603",
"0.52876854",
"0.52462244",
"0.51891714",
"0.5112807",
"0.51073605",
"0.50244516",
"0.49929634",
"0.49870348",
"0.4977524",
"0.49535966",
"0.4952735",
"0.4930333",
"0.4928074",
"0.48858336",
"0.4872037",
"0.4856927",
"0.48532456",
"0.48516858",
"0.48491573",
"0.484905",
"0.48335287",
"0.48152322",
"0.47984397"
] | 0.6753259 | 0 |
Report environmental and power consumption metrics for CPCs. In addition to the commandspecific options shown in this help text, the general options (see 'zhmc help') can also be specified right after the 'zhmc' command name. | def metrics_env(cmd_ctx, cpc, **options):
cmd_ctx.execute_cmd(lambda: cmd_metrics_env(cmd_ctx, cpc, options)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def metrics_cpc(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_cpc(cmd_ctx, cpc, options))",
"def metrics_channel(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_channel(cmd_ctx, cpc, options))",
"def metrics_proc(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_proc(cmd_ctx, cpc, options))",
"def metrics_crypto(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_crypto(cmd_ctx, cpc, options))",
"def print_help():\n print \"\"\"\nMeasure Operating System Performance (mosp)\n-------------------------------------------\n\nUse this program to measure and report on operating system\nperformance.\n\nThis code measures operating system performance,\nincluding CPU, memory, disk and network, and\noutputs stats to screen and optionally to file\ntoo for use in performance analysis\n\nUses the psutil library\n\nInstall psutil (Ubuntu) if you don't already have it:\n sudo apt-get install python-dev\n sudo pip install psutil\n\nUsage:\n python mosp.py [options]\n\nExample usage:\n python mosp.py -W -i 2\n\nOptions:\n -h --help Display this help and exit\n -m --max-run-time Maximum time to run for before exiting\n (default is infinite)\n -i --interval Interval between requests in seconds\n (default is 1)\n -w --output-file Specify an output filename\n -W Output results to default filename\n default format is:\n mosp-HOSTNAME-YYYYMMDD-HHMMSS.csv\n -b --output-path Specify path to output file directory\n -j --no-header-row Suppress writing header row into CSV\n -v --version Output version information and exit\n\n \"\"\"\n return()",
"def metrics_flash(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_flash(cmd_ctx, cpc, options))",
"def print_help(self):\r\n\t\ttext = \"\\tName: ml_scikit_OPTICS\"\r\n\t\ttext += \"\\n\\t\\tThis machine learning plugin uses scikit-learn's OPTICS algorithm.\\n\"\r\n\t\ttext += \"\\n\\t\\tOptional Parameters:\"\r\n\t\ttext += \"\\n\\t\\t\\tOPTICS_skip_normalization: Do NOT perform normalization (scaling) of data, skip this step.\"\r\n\t\ttext += \"\\n\\t\\t\\OPTICS_eps: Specify eps parameter (default is 1.0).\"\r\n\t\ttext += \"\\n\\t\\t\\OPTICS_min_samples: Specify min_samples parameter (default is 5).\"\r\n#\r\n# OPTICS (with memory complexity n) is an alternative to DBSCAN (with memory complexity n^2)\r\n# which has time complexity n^2 in general with the default max_eps = np.inf. \r\n# We will set max_eps = eps to reduce the run-time.\r\n#\r\n\t\treturn text",
"def phast_cmmd(self):\n temp = '{prog} -R {rho} -C {ecov} -E {elen} -N {chrom} -i MAF {maf} {model} > {wig}\\n'.format(**self.dict)\n return temp.format(fnum=self.fnum)",
"def help_opt(self):\n print(OPTIONS)",
"def treatCmdOpts(argv):\n baseName = os.path.basename(__file__)\n amc.cBaseName = colored(baseName, 'yellow')\n\n helpTxt = amc.cBaseName + ' analyses observation statistics file for selected GNSSs'\n\n # create the parser for command line arguments\n parser = argparse.ArgumentParser(description=helpTxt)\n\n parser.add_argument('--obsstat', help='observation statistics file', type=str, required=True)\n\n parser.add_argument('--freqs', help='select frequencies to use (out of {freqs:s}, default {freq:s})'.format(freqs='|'.join(gfzc.lst_freqs), freq=colored(gfzc.lst_freqs[0], 'green')), default=gfzc.lst_freqs[0], type=str, required=False, action=gco.freqtype_action, nargs='+')\n\n parser.add_argument('--cutoff', help='cutoff angle in degrees (default {mask:s})'.format(mask=colored('0', 'green')), default=0, type=int, required=False, action=gco.cutoff_action)\n\n parser.add_argument('--dbcvs', help='Add information to CVS database (default {cvsdb:s})'.format(cvsdb=colored(gco.CVSDB_OBSTLE, 'green')), required=False, type=str, default=gco.CVSDB_OBSTLE)\n\n parser.add_argument('--plot', help='displays interactive plots (default False)', action='store_true', required=False, default=False)\n\n parser.add_argument('--logging', help='specify logging level console/file (two of {choices:s}, default {choice:s})'.format(choices='|'.join(gco.lst_logging_choices), choice=colored(' '.join(gco.lst_logging_choices[3:5]), 'green')), nargs=2, required=False, default=gco.lst_logging_choices[3:5], action=gco.logging_action)\n\n # drop argv[0]\n args = parser.parse_args(argv[1:])\n\n # return arguments\n return args.obsstat, args.freqs, args.cutoff, args.dbcvs, args.plot, args.logging",
"async def stats(self, ctx):\n if ctx.invoked_subcommand is None:\n await send_cmd_help(ctx)",
"def commandline_options():\n parser = argparse.ArgumentParser(\n description='ocn_diags_generator: CESM wrapper python program for Ocean Diagnostics packages.')\n\n parser.add_argument('--backtrace', action='store_true',\n help='show exception backtraces as extra debugging '\n 'output')\n\n parser.add_argument('--debug', action='store_true',\n help='extra debugging output')\n\n #parser.add_argument('--config', nargs=1, required=True, help='path to config file')\n\n options = parser.parse_args()\n return options",
"def mc(self, *args) -> None:\n env = os.environ.copy()\n env['MC_HOST_minio'] = self.auth_url\n # --config-dir is set just to prevent any config set by the user\n # from interfering with the test.\n try:\n subprocess.run(\n [\n 'mc', '--quiet', '--no-color', f'--config-dir={self.path}',\n *args\n ],\n stdout=subprocess.DEVNULL,\n stderr=subprocess.PIPE,\n env=env,\n encoding='utf-8',\n errors='replace',\n check=True\n )\n except OSError as exc:\n raise MissingProgram(f'mc could not be run: {exc}') from exc\n except subprocess.CalledProcessError as exc:\n raise ProgramFailed(exc.stderr) from exc",
"def main( argv = None ):\n\n if argv == None: argv = sys.argv\n\n # setup command line parser\n parser = E.OptionParser( version = \"%prog version: $Id$\", \n usage = globals()[\"__doc__\"] )\n\n parser.add_option(\"--category\", dest=\"category\", type=\"choice\",\n choices = (\"B\", \"C\"), help=\"supply help\" )\n\n ## add common options (-h/--help, ...) and parse command line \n (options, args) = E.Start( parser, argv = argv )\n\n data = getData(options.stdin)\n if options.category == \"B\":\n options.stdout.write(\"Category B pathway\\tKO\\tGenes\\tDescriptions\\n\")\n for pathway, descriptions in b2ko(data).iteritems():\n options.stdout.write(\"\\t\".join([pathway, \"; \".join(descriptions[0]), \"; \".join(descriptions[1]), \"; \".join(descriptions[2])]) + \"\\n\")\n\n elif options.category == \"C\":\n options.stdout.write(\"Category C pathway\\tKO\\tGenes\\tDescriptions\\n\")\n for pathway, descriptions in c2ko(data).iteritems():\n options.stdout.write(\"\\t\".join([pathway, \"; \".join(descriptions[0]), \"; \".join(descriptions[1]), \"; \".join(descriptions[2])]) + \"\\n\")\n else:\n raise ValueError(\"must specify the category of pathway\")\n\n\n ## write footer and output benchmark information.\n E.Stop()",
"def measure(self,command_exe, command_args, measure_out):\n pass",
"def main():\n test_cases = ast.literal_eval(sys.argv[1])\n results = str(my_info()) + '\\t\\t'\n for test_case in test_cases:\n mode = test_case[0]\n id_1 = int(test_case[1])\n id_2 = int(test_case[2])\n if mode == 'jc':\n results += str(Jaccard_Coefficient(id_1, id_2)) + '\\t\\t'\n elif mode == 'cc':\n results += str(Correlation_Coefficient(id_1, id_2)) + '\\t\\t'\n else:\n exit('bad command')\n print results + '\\n'",
"def do_hostinfo(self, args):\n host = opts = None\n if args:\n args = args.split()\n host = args.pop()\n\n if not host:\n print('Usage: hostinfo [-cdmu] host_name_or_ip')\n print(' uptime and load stats returned if no options specified')\n return\n\n try:\n ip = socket.gethostbyname(host)\n except socket.gaierror:\n print('cannot resolve', host, file=sys.stderr)\n return\n\n opts = []\n while args:\n arg = args.pop(0)\n if arg.startswith('--'):\n if arg == '--cpu':\n opts.append('c')\n elif arg == '--disk':\n opts.append('d')\n elif arg == '--memory':\n opts.append('m')\n elif arg == '--uptime':\n opts.append('u')\n else:\n print('unrecognized option:', arg, file=sys.stderr)\n return\n else:\n if arg[0] == '-':\n for ch in arg[1:]:\n if ch in ('cdmu') and ch not in opts:\n opts.append(ch)\n else:\n print('unrecognized option:', ch, file=sys.stderr)\n return\n\n stats = self._qm.get_host_stats(ip)\n\n if not opts:\n # Get uptime and load averages.\n up = stats['uptime']\n load = stats['cpu_load']\n print('Up for %s days, %s hours, %s minutes, '\n 'load averages: %s, %s, %s'\n % (up['days'], up['hours'], up['minutes'], load['one'],\n load['five'], load['fifteen']))\n return\n\n all_stats = []\n for opt in opts:\n if opt == 'd':\n # Get disk usage.\n disks = stats['disk_usage']\n st = ['Disk Usage:']\n for mount, disk_info in disks.viewitems():\n st.append(' Usage for: %s' % mount)\n for k, v in disk_info.viewitems():\n st.append(' %s: %s' % (k, v))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'c':\n # Get CPU load.\n load_stats = stats['cpu_load']\n st = ['CPU Load Average:']\n st.append(' last one minute: %s' % load_stats['one'])\n st.append(' last five minutes: %s' % load_stats['five'])\n st.append(' last fifteen minutes: %s' % load_stats['fifteen'])\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'm':\n # Get Memory Usage.\n memory_usage = stats['memory_usage']\n st = ['Memory usage:']\n for k, v in memory_usage.viewitems():\n st.append(' %s: %s' % (k, v))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'u':\n # Get uptime.\n up = stats['uptime']\n st = ['Uptime:']\n st.append(' Up for %s days, %s hours and %s minutes'\n % (up['days'], up['hours'], up['minutes']))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n\n print('\\n'.join(all_stats))",
"def report_on_config( args ):\n\n from khmer.utils import print_error\n\n if args.quiet: return\n\n print_error( \"\\nPARAMETERS:\" )\n print_error( \" - kmer size = {0} \\t\\t(-k)\".format( args.ksize ) )\n print_error( \" - n hashes = {0} \\t\\t(-N)\".format( args.n_hashes ) )\n print_error(\n \" - min hashsize = {0:5.2g} \\t(-x)\".format( args.min_hashsize )\n )\n print_error( \"\" )\n print_error(\n \"Estimated memory usage is {0:.2g} bytes \"\n \"(n_hashes x min_hashsize)\".format( args.n_hashes * args.min_hashsize )\n )\n print_error( \"-\" * 8 )\n\n if DEFAULT_MIN_HASHSIZE == args.min_hashsize:\n print_error(\n \"** WARNING: hashsize is default! \" \n \"You absodefly want to increase this!\\n** \"\n \"Please read the docs!\"\n )",
"def main():\n\n parser = argparse.ArgumentParser(description=main.__doc__,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n epilog=\"Homepage: https://github.com/kbat/mc-tools\")\n parser.add_argument('com', type=str, help='plot requests file name', nargs='?', default=\"/tmp/foo.c\")\n parser.add_argument('comout', type=str, help='COMOUT file name', nargs='?', default=\"zoom\")\n\n args = parser.parse_args()\n\n cmd = {} # dictionary of commands\n\n bas = False\n plane = False\n \n with open(args.com) as f:\n for line in f.readlines():\n words = line.strip().split()\n if len(words) is 0:\n continue\n\n for i,w in enumerate(words):\n if re.search(\"^bas\", w):\n cmd['bas'] = list(map(float, words[i+1:i+7]))\n if plane is False: bas = True # basis was before plane cuts\n elif re.search(\"^or\", w):\n cmd['or'] = list(map(float, words[i+1:i+4]))\n elif re.search(\"^ex\", w):\n try: # both x and y scales are given\n cmd['ex'] = list(map(float, words[i+1:i+3]))\n continue\n except ValueError: # just 1 scale is given\n cmd['ex'] = list(map(float, words[i+1:i+2]))\n elif re.search(\"^lab\", w):\n cmd['label'] = list(map(int, map(float, words[i+1:i+3]))) #+ [words[i+3]]\n elif re.search(\"^p[xyz]\", w):\n cmd[w] = [float(words[i+1])]\n if bas is False: plane = True # plane cuts were before basis\n elif re.search(\"^legend\", w):\n cmd[w] = [words[i+1]]\n elif w == \"scale\":\n print(w)\n if int(words[i+1]): # no need to put 'scale 0'\n cmd[w] = [words[i+1]]\n elif w in (\"mesh\"):\n if int(words[i+1])==1: # no need to put 'mesh 1'\n cmd[w] = [words[i+1]]\n\n print(bas, plane)\n\n if plane: # bas was first\n keys = ('bas', 'or', 'ex', 'px', 'py', 'pz', 'label', 'mesh', 'legend', 'scale')\n elif bas:\n keys = ('or', 'ex', 'px', 'py', 'pz', 'bas', 'label', 'mesh', 'legend', 'scale')\n else:\n keys = {'or', 'ex', 'label', 'mesh', 'legend', 'scale'}\n \n with open(args.comout, 'w') as f:\n for key in keys:\n if key in cmd:\n # newline required by mcplot:\n if key in ('mesh', 'legend', 'scale', 'label'):\n f.write(\"\\n\")\n f.write(\"%s %s \" % (key,\" \".join(str(e) for e in cmd[key]),))\n f.write(\"\\n\")",
"def metrics_roce(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_roce(cmd_ctx, cpc, options))",
"def options():\n print \"\"\"Options summary:\n -h, --help\n -u, --usage\n -v, --verbose <verb_level>\n -e, --endpoint <endpoint>\n -i, --interface-type <iface_type>\n -r, --recursive\n --dbs-conf <conf_file>\n --show-prod\n --show-caf\n --only-subscribed\n --only-custodial\n \"\"\"",
"def main():\n parser = argparse.ArgumentParser(\n description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter\n )\n add_common_snmp_args(parser)\n parser.add_argument(\n \"-w\",\n \"--warning\",\n type=int,\n default=70,\n help=\"Warning memory usage percentage (0-100)\",\n )\n parser.add_argument(\n \"-c\",\n \"--critical\",\n type=int,\n default=90,\n help=\"Critical memory usage percentage (0-100)\",\n )\n parser.add_argument(\n \"-f\",\n \"--family\",\n required=True,\n help=\"Switch family. Supported families: 1910, 1920, 1920S\",\n )\n\n config = vars(parser.parse_args())\n check_snmp_args(config)\n check_thresholds(config)\n\n dataset = {}\n\n if config[\"family\"] == \"1920S\":\n cpu = ObjectType(\n ObjectIdentity(\n \"HP-SWITCHING-MIB\", \"agentSwitchCpuProcessTotalUtilization\", 0\n )\n )\n elif config[\"family\"] in [\"1910\", \"1920\"]:\n cpu = ObjectType(\n ObjectIdentity(\"HH3C-ENTITY-EXT-MIB\", \"hh3cEntityExtCpuUsage\", 8)\n )\n else:\n unknown_exit(SERVICE, f\"Switch family {config['family']} NOT known\")\n\n try:\n raw_data = get_snmp_data(config, cpu)\n except ValueError as err:\n unknown_exit(SERVICE, err)\n add_vars_to_dataset(dataset, raw_data)\n\n if config[\"family\"] == \"1920S\":\n dataset[\"cpu_usage\"] = get_hp_cpu_usage(\n dataset[\"agentSwitchCpuProcessTotalUtilization\"]\n )\n elif config[\"family\"] in [\"1910\", \"1920\"]:\n dataset[\"cpu_usage\"] = int(dataset[\"hh3cEntityExtCpuUsage\"])\n else:\n unknown_exit(SERVICE, f\"Switch family {config['family']} NOT known\")\n\n state, message = generate_output(config, dataset)\n report(state, message)",
"def help(self, args):\n print('No commands available for this consumer')",
"def help(self):\n msg = \"`%s' performs the computational aspects of genotyping-by-sequencing.\\n\" % os.path.basename(sys.argv[0])\n msg += \"\\n\"\n msg += \"Usage: %s [OPTIONS] ...\\n\" % os.path.basename(sys.argv[0])\n msg += \"\\n\"\n msg += \"Options:\\n\"\n msg += \" -h, --help\\tdisplay the help and exit\\n\"\n msg += \" -V, --version\\toutput version information and exit\\n\"\n msg += \" -v, --verbose\\tverbosity level (0/default=1/2/3)\\n\"\n msg += \" --proj1\\tname of the project used for steps 1 to 4\\n\"\n msg += \"\\t\\tmention a reference genome only if all samples belong to\\n\"\n msg += \"\\t\\t the same species, and will be mapped to the same ref genome\\n\"\n msg += \" --proj2\\tname of the project used for steps 4 to 8\\n\"\n msg += \"\\t\\tcan be the same as --proj1, or can be different\\n\"\n msg +=\"\\t\\t notably when samples come from different species\\n\"\n msg += \"\\t\\t or if one wants to align reads to different ref genomes\\n\"\n msg += \" --schdlr\\tname of the cluster scheduler (default=SGE)\\n\"\n msg += \" --queue\\tname of the cluster queue (default=normal.q)\\n\"\n msg += \" --resou\\tcluster resources (e.g. 'test' for 'qsub -l test')\\n\"\n msg += \" --rmvb\\tremove bash scripts for jobs launched in parallel\\n\"\n msg += \" --step\\tstep to perform (1/2/3/.../9)\\n\"\n msg += \"\\t\\t1: raw read quality per lane (with FastQC v >= 0.11.2)\\n\"\n msg += \"\\t\\t2: demultiplexing per lane (with demultiplex.py v >= 1.14.0)\\n\"\n msg += \"\\t\\t3: cleaning per sample (with CutAdapt v >= 1.8)\\n\"\n msg += \"\\t\\t4: alignment per sample (with BWA MEM v >= 0.7.12, Samtools v >= 1.3, Picard and R v >= 3)\\n\"\n msg += \"\\t\\t5: local realignment per sample (with GATK v >= 3.5)\\n\"\n msg += \"\\t\\t6: local realignment per genotype (with GATK v >= 3.5)\\n\"\n msg += \"\\t\\t7: variant and genotype calling per genotype (with GATK HaplotypeCaller v >= 3.5)\\n\"\n msg += \"\\t\\t8: variant and genotype calling jointly across genotypes (with GATK GenotypeGVCFs v >= 3.5)\\n\"\n msg += \"\\t\\t9: variant and genotype filtering (with GATK v >= 3.5)\\n\"\n msg += \" --samples\\tpath to the 'samples' file\\n\"\n msg += \"\\t\\tcompulsory for all steps, but can differ between steps\\n\"\n msg += \"\\t\\t e.g. if samples come from different species or are aligned\\n\"\n msg += \"\\t\\t on different ref genomes, different samples file should\\n\"\n msg += \"\\t\\t be used for steps 4-9, representing different subsets of\\n\"\n msg += \"\\t\\t the file used for steps 1-3\\n\"\n msg += \"\\t\\tthe file should be encoded in ASCII\\n\"\n msg += \"\\t\\tthe first row should be a header with column names\\n\"\n msg += \"\\t\\teach 'sample' (see details below) should have one and only one row\\n\"\n msg += \"\\t\\tany two columns should be separated with one tabulation\\n\"\n msg += \"\\t\\tcolumns can be in any order\\n\"\n msg += \"\\t\\trows starting by '#' are skipped\\n\"\n msg += \"\\t\\t12 columns are compulsory (but there can be more):\\n\"\n msg += \"\\t\\t genotype (see details below, e.g. 'Col-0', but use neither underscore '_' nor space ' ' nor dot '.', use dash '-' instead)\\n\"\n msg += \"\\t\\t ref_genome (identifier of the reference genome used for alignment, e.g. 'Atha_v2', but use neither space ' ' nor dot '.'; the full species name, e.g. 'Arabidopsis thaliana', will be present in the file given to --dict)\\n\"\n msg += \"\\t\\t library (e.g. can be the same as 'genotype')\\n\"\n msg += \"\\t\\t barcode (e.g. 'ATGG')\\n\"\n msg += \"\\t\\t seq_center (e.g. 'Broad Institute', 'GenoToul', etc)\\n\"\n msg += \"\\t\\t seq_platform (e.g. 'ILLUMINA', see SAM format specification)\\n\"\n msg += \"\\t\\t seq_platform_model (e.g. 'HiSeq 2000')\\n\"\n msg += \"\\t\\t flowcell (e.g. 'C5YMDACXX')\\n\"\n msg += \"\\t\\t lane (e.g. '3', can be '31' if a first demultiplexing was done per index)\\n\"\n msg += \"\\t\\t date (e.g. '2015-01-15', see SAM format specification)\\n\"\n msg += \"\\t\\t fastq_file_R1 (filename, one per lane, gzip-compressed)\\n\"\n msg += \"\\t\\t fastq_file_R2 (filename, one per lane, gzip-compressed)\\n\"\n msg += \" --fcln\\tidentifier of a flowcell and lane number\\n\"\n msg += \"\\t\\tformat as <flowcell>_<lane-number>, e.g. 'C5YMDACXX_1'\\n\"\n msg += \"\\t\\tif set, only the samples from this lane will be analyzed\\n\"\n msg += \" --pird\\tpath to the input reads directory\\n\"\n msg += \"\\t\\tcompulsory for steps 1 and 2\\n\"\n msg += \"\\t\\twill be added to the columns 'fastq_file_R*' from the sample file\\n\"\n msg += \"\\t\\tif not set, input read files should be in current directory\\n\"\n msg += \" --enz\\tname of the restriction enzyme\\n\"\n msg += \"\\t\\tcompulsory for step 2\\n\"\n msg += \"\\t\\tdefault=ApeKI\\n\"\n msg += \" --dmxmet\\tmethod used to demultiplex\\n\"\n msg += \"\\t\\tcompulsory for step 2\\n\"\n msg += \"\\t\\tdefault=4c (see the help of demultiplex.py to know more)\\n\"\n msg += \" --subst\\tnumber of substitutions allowed during demultiplexing\\n\"\n msg += \"\\t\\tcompulsory for step 2\\n\"\n msg += \"\\t\\tdefault=2\\n\"\n msg += \" --ensubst\\tenforce the nb of substitutions allowed\\n\"\n msg += \"\\t\\tcompulsory for step 2\\n\"\n msg += \"\\t\\tdefault=lenient/strict\\n\"\n msg += \" --adp\\tpath to the file containing the adapters\\n\"\n msg += \"\\t\\tcompulsory for step 3\\n\"\n msg += \"\\t\\tsame format as FastQC: name<tab>sequence\\n\"\n msg += \"\\t\\tname: at least 'adpR1' (also 'adpR2' if paired-end)\\n\"\n msg += \"\\t\\tsequence: from 5' (left) to 3' (right)\\n\"\n msg += \" --errtol\\terror tolerance to find adapters\\n\"\n msg += \"\\t\\tcompulsory for step 3\\n\"\n msg += \"\\t\\tdefault=0.2\\n\"\n msg += \" --minovl\\tminimum overlap length between reads and adapters\\n\"\n msg += \"\\t\\tcompulsory for step 3\\n\"\n msg += \"\\t\\tdefault=3 (in bases)\\n\"\n msg += \" --minrl\\tminimum length to keep a read\\n\"\n msg += \"\\t\\tcompulsory for step 3\\n\"\n msg += \"\\t\\tdefault=35 (in bases)\\n\"\n msg += \" --minq\\tminimum quality to trim a read\\n\"\n msg += \"\\t\\tcompulsory for step 3\\n\"\n msg += \"\\t\\tdefault=20 (used for both reads if paired-end)\\n\"\n msg += \" --maxNp\\tmaximum percentage of N to keep a read\\n\"\n msg += \"\\t\\tcompulsory for step 3\\n\"\n msg += \"\\t\\tdefault=0.2\\n\"\n msg += \" --ref\\tpath to the prefix of files for the reference genome\\n\"\n msg += \"\\t\\tcompulsory for steps 4, 5, 6, 7, 8, 9\\n\"\n msg += \"\\t\\tshould correspond to the 'ref_genome' column in --samples\\n\"\n msg += \"\\t\\te.g. '/data/Atha_v2' for '/data/Atha_v2.fa', '/data/Atha_v2.bwt', etc\\n\"\n msg += \"\\t\\tthese files are produced via 'bwa index ...'\\n\"\n msg += \" --dict\\tpath to the 'dict' file (SAM header with @SQ tags)\\n\"\n msg += \"\\t\\tcompulsory for step 4\\n\"\n msg += \"\\t\\tsee 'CreateSequenceDictionary' in the Picard software\\n\"\n msg += \" --jgid\\tcohort identifier to use for joint genotyping\\n\"\n msg += \"\\t\\tcompulsory for steps 8, 9\\n\"\n msg += \"\\t\\tuseful to launch several, different cohorts in parallel\\n\"\n msg += \" --rat\\trestrict alleles to be of a particular allelicity\\n\"\n msg += \"\\t\\tused in step 9\\n\"\n msg += \"\\t\\tdefault=ALL/BIALLELIC/MULTIALLELIC\\n\"\n msg += \"\\t\\tsee '--restrictAllelesTo' in GATK's SelectVariant\\n\"\n msg += \" --mdp\\tminimum value for DP (read depth; e.g. 10)\\n\"\n msg += \"\\t\\tused in step 9\\n\"\n msg += \"\\t\\tsee GATK's VariantFiltration\\n\"\n msg += \" --mgq\\tminimum value for GQ (genotype quality; e.g. 20)\\n\"\n msg += \"\\t\\tused in step 9\\n\"\n msg += \"\\t\\tsee GATK's VariantFiltration\\n\"\n msg += \" --mnfg\\tmaximum number of filtered genotypes to keep a variant\\n\"\n msg += \"\\t\\tused in step 9\\n\"\n msg += \"\\t\\tsee '--maxFilteredGenotypes' in GATK's SelectVariants\\n\"\n msg += \" --mffg\\tmaximum fraction of filtered genotypes to keep a variant\\n\"\n msg += \"\\t\\tused in step 9\\n\"\n msg += \"\\t\\tsee '--maxFractionFilteredGenotypes' in GATK's SelectVariants\\n\"\n msg += \" --mnnc\\tmaximum number of not-called genotypes to keep a variant\\n\"\n msg += \"\\t\\tused in step 9\\n\"\n msg += \" --mfnc\\tmaximum fraction of not-called genotypes to keep a variant\\n\"\n msg += \"\\t\\tused in step 9\\n\"\n msg += \"\\t\\tsee '--maxNOCALLfraction' in GATK's SelectVariants\\n\"\n msg += \" --fam\\tpath to the file containing pedigree information\\n\"\n msg += \"\\t\\tused in step 9\\n\"\n msg += \"\\t\\tdiscard variants with Mendelian violations (see Semler et al, 2012)\\n\"\n msg += \"\\t\\tshould be in the 'fam' format specified by PLINK\\n\"\n msg += \"\\t\\tvalidation strictness (GATK '-pedValidationType') is set at 'SILENT'\\n\"\n msg += \"\\t\\t allowing some samples to be absent from the pedigree\\n\"\n msg += \" --mvq\\tminimum GQ for each trio member to accept a variant as a Mendelian violation\\n\"\n msg += \"\\t\\tused in step 9 if '--fam' is specified\\n\"\n msg += \"\\t\\tdefault=0\\n\"\n msg += \" --xlssf\\tpath to the file with genotypes to exclude\\n\"\n msg += \"\\t\\tused in step 9 (can be especially useful if '--fam' is specified)\\n\"\n msg += \" --tmpd\\tpath to a temporary directory on child nodes (default=.)\\n\"\n msg += \"\\t\\te.g. it can be /tmp or /scratch\\n\"\n msg += \"\\t\\tused in step 4 for 'samtools sort'\\n\"\n msg += \"\\t\\tused in step 7 for 'GATK HaplotypeCaller'\\n\"\n msg += \" --jvmXms\\tinitial memory allocated to the Java Virtual Machine\\n\"\n msg += \"\\t\\tdefault=512m (can also be specified as 1024k, 1g, etc)\\n\"\n msg += \"\\t\\tused in steps 4, 5, 6, 7 and 8 for Picard and GATK\\n\"\n msg += \" --jvmXmx\\tmaximum memory allocated to the Java Virtual Machine\\n\"\n msg += \"\\t\\tdefault=4g\\n\"\n msg += \"\\t\\tused in steps 4, 5, 6, 7 and 8 for Picard and GATK\\n\"\n msg += \" --queue2\\tname of the second cluster queue (default=bigmem.q)\\n\"\n msg += \"\\t\\tused in step 4 for Picard to collect insert sizes\\n\"\n msg += \" --knowni\\tpath to a VCF file with known indels (for local realignment)\\n\"\n msg += \" --known\\tpath to a VCF file with known variants (e.g. from dbSNP)\\n\"\n msg += \" --force\\tforce to re-run step(s)\\n\"\n msg += \"\\t\\tthis removes without warning the step directory if it exists\\n\"\n msg += \"\\n\"\n msg += \"Examples:\\n\"\n msg += \" %s --step 1 --samples samples.txt\\n\" % os.path.basename(sys.argv[0])\n msg += \"\\n\"\n msg += \"Details:\\n\"\n msg += \"This program aims at genotyping a set of 'genotypes' using data from\\n\"\n msg += \"a restriction-assisted DNA sequencing (RAD-seq) experiment, also known\\n\"\n msg += \"as a genotyping-by-sequencing (GBS) experiment.\\n\"\n msg += \"Here, by 'genotype', we mean the entity which is the focus of the\\n\"\n msg += \"study. For instance, it can be a plant variety (or a human being), or\\n\"\n msg += \"the specific clone of a given plant variety (or a specific tumor of a\\n\"\n msg += \"given human being), etc.\\n\"\n msg += \"Importantly, note that the content of the 'genotype' column will\\n\"\n msg += \"be used to set the 'SM' (sample) tag of the 'RG' (read group) header\\n\"\n msg += \"record type of the SAM format (see http://www.htslib.org/). However,\\n\"\n msg += \"internal to this program, the term 'sample' corresponds to the unique\\n\"\n msg += \"quadruplet (genotype,flowcell,lane,barcode) for steps 1 and 2, and to\\n\"\n msg += \"the unique triplet (genotype,flowcell,lane) for the others.\\n\"\n msg += \"Jobs are executed in parallel (--schdlr). Their return status is\\n\"\n msg += \"recorded in a SQLite database which is removed at the end. If a job\\n\"\n msg += \"fails, the whole script stops with an error.\\n\"\n msg += \"\\n\"\n msg += \"Dependencies:\\n\"\n msg += \"Python >= 2.7; Biopython; pyutilstimflutre >= 0.5\\n\"\n msg += \"\\n\"\n msg += \"Report bugs to <[email protected]>.\"\n print(msg); sys.stdout.flush()",
"def main(argv):\n version = \"0.1.2\"\n interval = 1\n max_run_time = 0\n finished = 0\n first_time = 1\n output_file = 0\n output_file_enabled = 0\n output_path = 0\n header_row = 1\n\n #*** Get the hostname for use in filenames etc:\n hostname = socket.gethostname()\n\n #*** Start by parsing command line parameters:\n try:\n opts, args = getopt.getopt(argv, \"hu:m:ni:w:Wb:jv\",\n [\"help\",\n \"url=\",\n \"max-run-time=\",\n \"no-keepalive\",\n \"interval=\",\n \"output-file=\",\n \"output-path=\",\n \"no-header-row\",\n \"version\"])\n except getopt.GetoptError as err:\n print \"mosp: Error with options:\", err\n print_help()\n sys.exit(2)\n for opt, arg in opts:\n if opt in (\"-h\", \"--help\"):\n print_help()\n sys.exit()\n elif opt in (\"-v\", \"--version\"):\n print 'mosp.py version', version\n sys.exit()\n elif opt in (\"-m\", \"--max-run-time\"):\n max_run_time = float(arg)\n elif opt in (\"-i\", \"--interval\"):\n interval = float(arg)\n elif opt in (\"-w\", \"--output-file\"):\n output_file = arg\n output_file_enabled = 1\n elif opt == \"-W\":\n output_file = \"mosp-\" + hostname + \"-\" + \\\n time.strftime(\"%Y%m%d-%H%M%S.csv\")\n output_file_enabled = 1\n elif opt in (\"-b\", \"--output-path\"):\n output_path = arg\n elif opt in (\"-j\", \"--no-header-row\"):\n header_row = 0\n\n print \"\\nMeasure Operating System Performance (mosp) version\", \\\n version\n\n #*** Display output filename:\n if output_file_enabled:\n if output_path:\n output_file = os.path.join(output_path, output_file)\n print \"Results filename is\", output_file\n else:\n print \"Not outputing results to file, as option not selected\"\n\n if not header_row:\n print \"Not writing a header row to CSV\"\n\n #*** Use this if max_run_time is set:\n initial_time = time.time()\n\n #*** Instantiate classes:\n cpus = CPUs()\n swap = Swap()\n nics = NICs()\n\n #*** Start the loop:\n while not finished:\n timenow = datetime.datetime.now()\n timestamp = timenow.strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]\n start_time = time.time()\n\n #*** Update CPU measurements:\n cpus.update()\n\n #*** Update swap measurements:\n swap.update()\n\n #*** Update network measurements:\n nics.update()\n\n #*** Put the stats into a nice string for printing and\n #*** writing to file:\n result_csv = str(timestamp) + \",\" \\\n + cpus.csv() \\\n + swap.csv() \\\n + nics.csv() \\\n + \"\\n\"\n result_kvp = str(timestamp) + \" \" \\\n + cpus.kvp() \\\n + swap.kvp() \\\n + nics.kvp()\n print result_kvp\n if output_file_enabled:\n #*** Header row in CSV:\n if first_time and header_row:\n #*** Write a header row to CSV:\n header_csv = \"time,\" + cpus.csv_header(hostname) + \\\n swap.csv_header(hostname) + \\\n nics.csv_header(hostname) + \\\n \"\\n\"\n first_time = 0\n with open(output_file, 'a') as the_file:\n the_file.write(header_csv)\n\n #*** Write a data row to CSV:\n with open(output_file, 'a') as the_file:\n the_file.write(result_csv)\n\n if max_run_time:\n if (start_time - initial_time) > max_run_time:\n break\n\n #*** Sleep for interval seconds:\n time.sleep(interval)",
"def _cmd_metrics(args):\n if (\n len(args.cnarrays) > 1\n and args.segments\n and len(args.segments) > 1\n and len(args.cnarrays) != len(args.segments)\n ):\n raise ValueError(\n \"Number of coverage/segment filenames given must be \"\n \"equal, if more than 1 segment file is given.\"\n )\n\n cnarrs = map(read_cna, args.cnarrays)\n if args.segments:\n args.segments = map(read_cna, args.segments)\n table = metrics.do_metrics(cnarrs, args.segments, args.drop_low_coverage)\n write_dataframe(args.output, table)",
"def define_options(self):\n\n from clinica.engine.cmdparser import PIPELINE_CATEGORIES\n\n clinica_comp = self._args.add_argument_group(PIPELINE_CATEGORIES['CLINICA_COMPULSORY'])\n clinica_comp.add_argument(\"caps_directory\",\n help='Path to the CAPS directory.')\n clinica_comp.add_argument(\"list_bvalues\", type=str,\n help='String listing all the shells (i.e. the b-values) in the corrected DWI datasets comma separated (e.g, 0,300,700,2200)')\n # Optional arguments\n clinica_opt = self._args.add_argument_group(PIPELINE_CATEGORIES['CLINICA_OPTIONAL'])\n\n clinica_opt.add_argument(\"-wd\", \"--working_directory\",\n help='Temporary directory to store pipeline intermediate results')\n clinica_opt.add_argument(\"-np\", \"--n_procs\", type=int, default=4,\n help='Number of cores used to run in parallel')\n clinica_opt.add_argument(\"-tsv\", \"--subjects_sessions_tsv\",\n help='TSV file containing a list of subjects with their sessions.')",
"def cmd_help(args):",
"def reports_cli():",
"def printOptions(opts,subject_ids,session_ids,task_list, run_list, acq, rec):\n uname = os.popen('uname -s -n -r').read()\n print \"\\n\"\n print \"* Pipeline started at \"+time.strftime(\"%c\")+\"on \"+uname\n print \"* Command line is : \\n \"+str(sys.argv)+\"\\n\"\n print \"* The source directory is : \"+opts.sourceDir\n print \"* The target directory is : \"+opts.targetDir+\"\\n\"\n print \"* Data-set Subject ID(s) is/are : \"+str(', '.join(subject_ids))+\"\\n\"\n # print \"* PET conditions : \"+ ','.join(opts.condiList)+\"\\n\"\n print \"* Sessions : \", session_ids, \"\\n\"\n print \"* Tasks : \" , task_list , \"\\n\"\n print \"* Runs : \" , run_list , \"\\n\"\n print \"* Acquisition : \" , acq , \"\\n\"\n print \"* Reconstruction : \" , rec , \"\\n\""
] | [
"0.629929",
"0.6273303",
"0.60736",
"0.5926187",
"0.5641638",
"0.55025315",
"0.5396064",
"0.53120846",
"0.52493966",
"0.5243705",
"0.523837",
"0.5222159",
"0.51953936",
"0.519269",
"0.5182496",
"0.5178438",
"0.51700777",
"0.5129585",
"0.51175195",
"0.51148605",
"0.51081395",
"0.5094479",
"0.5088534",
"0.50876766",
"0.5083076",
"0.50563556",
"0.5043129",
"0.5018782",
"0.50118124",
"0.5006939"
] | 0.649805 | 0 |
Report processor usage metrics for CPCs. In addition to the commandspecific options shown in this help text, the general options (see 'zhmc help') can also be specified right after the 'zhmc' command name. | def metrics_proc(cmd_ctx, cpc, **options):
cmd_ctx.execute_cmd(lambda: cmd_metrics_proc(cmd_ctx, cpc, options)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def metrics_cpc(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_cpc(cmd_ctx, cpc, options))",
"def metrics_channel(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_channel(cmd_ctx, cpc, options))",
"def metrics_crypto(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_crypto(cmd_ctx, cpc, options))",
"def do_hostinfo(self, args):\n host = opts = None\n if args:\n args = args.split()\n host = args.pop()\n\n if not host:\n print('Usage: hostinfo [-cdmu] host_name_or_ip')\n print(' uptime and load stats returned if no options specified')\n return\n\n try:\n ip = socket.gethostbyname(host)\n except socket.gaierror:\n print('cannot resolve', host, file=sys.stderr)\n return\n\n opts = []\n while args:\n arg = args.pop(0)\n if arg.startswith('--'):\n if arg == '--cpu':\n opts.append('c')\n elif arg == '--disk':\n opts.append('d')\n elif arg == '--memory':\n opts.append('m')\n elif arg == '--uptime':\n opts.append('u')\n else:\n print('unrecognized option:', arg, file=sys.stderr)\n return\n else:\n if arg[0] == '-':\n for ch in arg[1:]:\n if ch in ('cdmu') and ch not in opts:\n opts.append(ch)\n else:\n print('unrecognized option:', ch, file=sys.stderr)\n return\n\n stats = self._qm.get_host_stats(ip)\n\n if not opts:\n # Get uptime and load averages.\n up = stats['uptime']\n load = stats['cpu_load']\n print('Up for %s days, %s hours, %s minutes, '\n 'load averages: %s, %s, %s'\n % (up['days'], up['hours'], up['minutes'], load['one'],\n load['five'], load['fifteen']))\n return\n\n all_stats = []\n for opt in opts:\n if opt == 'd':\n # Get disk usage.\n disks = stats['disk_usage']\n st = ['Disk Usage:']\n for mount, disk_info in disks.viewitems():\n st.append(' Usage for: %s' % mount)\n for k, v in disk_info.viewitems():\n st.append(' %s: %s' % (k, v))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'c':\n # Get CPU load.\n load_stats = stats['cpu_load']\n st = ['CPU Load Average:']\n st.append(' last one minute: %s' % load_stats['one'])\n st.append(' last five minutes: %s' % load_stats['five'])\n st.append(' last fifteen minutes: %s' % load_stats['fifteen'])\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'm':\n # Get Memory Usage.\n memory_usage = stats['memory_usage']\n st = ['Memory usage:']\n for k, v in memory_usage.viewitems():\n st.append(' %s: %s' % (k, v))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'u':\n # Get uptime.\n up = stats['uptime']\n st = ['Uptime:']\n st.append(' Up for %s days, %s hours and %s minutes'\n % (up['days'], up['hours'], up['minutes']))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n\n print('\\n'.join(all_stats))",
"def _cmd_metrics(args):\n if (\n len(args.cnarrays) > 1\n and args.segments\n and len(args.segments) > 1\n and len(args.cnarrays) != len(args.segments)\n ):\n raise ValueError(\n \"Number of coverage/segment filenames given must be \"\n \"equal, if more than 1 segment file is given.\"\n )\n\n cnarrs = map(read_cna, args.cnarrays)\n if args.segments:\n args.segments = map(read_cna, args.segments)\n table = metrics.do_metrics(cnarrs, args.segments, args.drop_low_coverage)\n write_dataframe(args.output, table)",
"def print_help():\n print \"\"\"\nMeasure Operating System Performance (mosp)\n-------------------------------------------\n\nUse this program to measure and report on operating system\nperformance.\n\nThis code measures operating system performance,\nincluding CPU, memory, disk and network, and\noutputs stats to screen and optionally to file\ntoo for use in performance analysis\n\nUses the psutil library\n\nInstall psutil (Ubuntu) if you don't already have it:\n sudo apt-get install python-dev\n sudo pip install psutil\n\nUsage:\n python mosp.py [options]\n\nExample usage:\n python mosp.py -W -i 2\n\nOptions:\n -h --help Display this help and exit\n -m --max-run-time Maximum time to run for before exiting\n (default is infinite)\n -i --interval Interval between requests in seconds\n (default is 1)\n -w --output-file Specify an output filename\n -W Output results to default filename\n default format is:\n mosp-HOSTNAME-YYYYMMDD-HHMMSS.csv\n -b --output-path Specify path to output file directory\n -j --no-header-row Suppress writing header row into CSV\n -v --version Output version information and exit\n\n \"\"\"\n return()",
"def metrics_env(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_env(cmd_ctx, cpc, options))",
"def print_help(self):\r\n\t\ttext = \"\\tName: ml_scikit_OPTICS\"\r\n\t\ttext += \"\\n\\t\\tThis machine learning plugin uses scikit-learn's OPTICS algorithm.\\n\"\r\n\t\ttext += \"\\n\\t\\tOptional Parameters:\"\r\n\t\ttext += \"\\n\\t\\t\\tOPTICS_skip_normalization: Do NOT perform normalization (scaling) of data, skip this step.\"\r\n\t\ttext += \"\\n\\t\\t\\OPTICS_eps: Specify eps parameter (default is 1.0).\"\r\n\t\ttext += \"\\n\\t\\t\\OPTICS_min_samples: Specify min_samples parameter (default is 5).\"\r\n#\r\n# OPTICS (with memory complexity n) is an alternative to DBSCAN (with memory complexity n^2)\r\n# which has time complexity n^2 in general with the default max_eps = np.inf. \r\n# We will set max_eps = eps to reduce the run-time.\r\n#\r\n\t\treturn text",
"def measure(self,command_exe, command_args, measure_out):\n pass",
"def do_stats(self, args):\n total_cpu = free_cpu = in_use_cpu = 0\n\n summary = self._qm.get_all_host_summary()\n for host_id, host_info in summary.viewitems():\n host_cpu = int(host_info['total cores'])\n total_cpu += host_cpu\n locked = host_info.get('locked by')\n if locked:\n # If host is locked then all CPUs are in use.\n in_use_cpu += host_cpu\n else:\n free_host_cpu = int(host_info['free cores'])\n in_use_cpu += (host_cpu - free_host_cpu)\n free_cpu += free_host_cpu\n\n print('total CPU: ', total_cpu)\n print('used/locked CPU: ', in_use_cpu)\n print('free CPU: ', free_cpu)\n capacity = float(in_use_cpu) / float(total_cpu)\n print('capacity used: %.1f%%' % (capacity * 100,))\n capacity = float(free_cpu) / float(total_cpu)\n print('capacity remaining: %.1f%%' % (capacity * 100,))",
"def metrics_roce(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_roce(cmd_ctx, cpc, options))",
"def stats(caesar, input):\n commands = {}\n users = {}\n channels = {}\n\n ignore = set(['f_note', 'startup', 'message', 'noteuri'])\n for (name, user), count in caesar.stats.items(): \n if name in ignore: continue\n if not user: continue\n\n if not user.startswith('#'): \n try: users[user] += count\n except KeyError: users[user] = count\n else: \n try: commands[name] += count\n except KeyError: commands[name] = count\n\n try: channels[user] += count\n except KeyError: channels[user] = count\n\n comrank = sorted([(b, a) for (a, b) in commands.iteritems()], reverse=True)\n userank = sorted([(b, a) for (a, b) in users.iteritems()], reverse=True)\n charank = sorted([(b, a) for (a, b) in channels.iteritems()], reverse=True)\n\n # most heavily used commands\n creply = 'most used commands: '\n for count, command in comrank[:10]: \n creply += '%s (%s), ' % (command, count)\n caesar.say(creply.rstrip(', '))\n\n # most heavy users\n reply = 'power users: '\n for count, user in userank[:10]: \n reply += '%s (%s), ' % (user, count)\n caesar.say(reply.rstrip(', '))\n\n # most heavy channels\n chreply = 'power channels: '\n for count, channel in charank[:3]: \n chreply += '%s (%s), ' % (channel, count)\n caesar.say(chreply.rstrip(', '))",
"def main():\n processor.custom_config = parse_arguments()\n processor.process()\n logger.info(processor.statistics)\n logger.info(processor.custom_config)",
"def procs_calculate_axyzc(molecules, n_cores=-1, show_progress=True, scr=None, cmd=XTB_CMD):\n results = None\n return results",
"async def stats(self, ctx):\n if ctx.invoked_subcommand is None:\n await send_cmd_help(ctx)",
"def main():\n\n args = parse_args()\n metric_sender = MetricSender(verbose=args.verbose, debug=args.debug)\n\n discovery_key_disk = 'disc.disk'\n interval = 3\n pcp_disk_dev_metrics = ['disk.dev.total', 'disk.dev.avactive']\n item_prototype_macro_disk = '#OSO_DISK'\n item_prototype_key_tps = 'disc.disk.tps'\n item_prototype_key_putil = 'disc.disk.putil'\n\n disk_metrics = pminfo.get_sampled_data(pcp_disk_dev_metrics, interval, 2)\n\n pcp_metrics_divided = {}\n for metric in pcp_disk_dev_metrics:\n pcp_metrics_divided[metric] = {k: v for k, v in disk_metrics.items() if metric in k}\n\n # do TPS checks; use disk.dev.total\n filtered_disk_totals = clean_up_metric_dict(pcp_metrics_divided[pcp_disk_dev_metrics[0]],\n pcp_disk_dev_metrics[0] + '.')\n\n # Add dynamic items\n metric_sender.add_dynamic_metric(discovery_key_disk, item_prototype_macro_disk, filtered_disk_totals.keys())\n\n # calculate the TPS and add them to the ZaggSender\n for disk, totals in filtered_disk_totals.iteritems():\n disk_tps = (totals[1] - totals[0]) / interval\n metric_sender.add_metric({'%s[%s]' % (item_prototype_key_tps, disk): disk_tps})\n\n # do % Util checks; use disk.dev.avactive\n filtered_disk_totals = clean_up_metric_dict(pcp_metrics_divided[pcp_disk_dev_metrics[1]],\n pcp_disk_dev_metrics[1] + '.')\n\n # calculate the % Util and add them to the ZaggSender\n for disk, totals in filtered_disk_totals.iteritems():\n total_active = (float)(totals[1] - totals[0]) / 1000.0\n putil = 100 * total_active / interval\n\n metric_sender.add_metric({'%s[%s]' % (item_prototype_key_putil, disk): putil})\n\n metric_sender.send_metrics()",
"def process_info(process):\n\thelp(process)",
"def metrics_flash(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_flash(cmd_ctx, cpc, options))",
"def phast_cmmd(self):\n temp = '{prog} -R {rho} -C {ecov} -E {elen} -N {chrom} -i MAF {maf} {model} > {wig}\\n'.format(**self.dict)\n return temp.format(fnum=self.fnum)",
"def help(self, args):\n print('No commands available for this consumer')",
"def treatCmdOpts(argv):\n baseName = os.path.basename(__file__)\n amc.cBaseName = colored(baseName, 'yellow')\n\n helpTxt = amc.cBaseName + ' analyses observation statistics file for selected GNSSs'\n\n # create the parser for command line arguments\n parser = argparse.ArgumentParser(description=helpTxt)\n\n parser.add_argument('--obsstat', help='observation statistics file', type=str, required=True)\n\n parser.add_argument('--freqs', help='select frequencies to use (out of {freqs:s}, default {freq:s})'.format(freqs='|'.join(gfzc.lst_freqs), freq=colored(gfzc.lst_freqs[0], 'green')), default=gfzc.lst_freqs[0], type=str, required=False, action=gco.freqtype_action, nargs='+')\n\n parser.add_argument('--cutoff', help='cutoff angle in degrees (default {mask:s})'.format(mask=colored('0', 'green')), default=0, type=int, required=False, action=gco.cutoff_action)\n\n parser.add_argument('--dbcvs', help='Add information to CVS database (default {cvsdb:s})'.format(cvsdb=colored(gco.CVSDB_OBSTLE, 'green')), required=False, type=str, default=gco.CVSDB_OBSTLE)\n\n parser.add_argument('--plot', help='displays interactive plots (default False)', action='store_true', required=False, default=False)\n\n parser.add_argument('--logging', help='specify logging level console/file (two of {choices:s}, default {choice:s})'.format(choices='|'.join(gco.lst_logging_choices), choice=colored(' '.join(gco.lst_logging_choices[3:5]), 'green')), nargs=2, required=False, default=gco.lst_logging_choices[3:5], action=gco.logging_action)\n\n # drop argv[0]\n args = parser.parse_args(argv[1:])\n\n # return arguments\n return args.obsstat, args.freqs, args.cutoff, args.dbcvs, args.plot, args.logging",
"def main(cls):\n parser = cls.make_argument_parser()\n args = parser.parse_args()\n args.device = make_hoomd_device(args)\n benchmark = cls(**vars(args))\n performance = benchmark.execute()\n\n if args.device.communicator.rank == 0:\n print(f'{numpy.mean(performance)}')",
"def help(self):\n msg = \"`%s' performs the computational aspects of genotyping-by-sequencing.\\n\" % os.path.basename(sys.argv[0])\n msg += \"\\n\"\n msg += \"Usage: %s [OPTIONS] ...\\n\" % os.path.basename(sys.argv[0])\n msg += \"\\n\"\n msg += \"Options:\\n\"\n msg += \" -h, --help\\tdisplay the help and exit\\n\"\n msg += \" -V, --version\\toutput version information and exit\\n\"\n msg += \" -v, --verbose\\tverbosity level (0/default=1/2/3)\\n\"\n msg += \" --proj1\\tname of the project used for steps 1 to 4\\n\"\n msg += \"\\t\\tmention a reference genome only if all samples belong to\\n\"\n msg += \"\\t\\t the same species, and will be mapped to the same ref genome\\n\"\n msg += \" --proj2\\tname of the project used for steps 4 to 8\\n\"\n msg += \"\\t\\tcan be the same as --proj1, or can be different\\n\"\n msg +=\"\\t\\t notably when samples come from different species\\n\"\n msg += \"\\t\\t or if one wants to align reads to different ref genomes\\n\"\n msg += \" --schdlr\\tname of the cluster scheduler (default=SGE)\\n\"\n msg += \" --queue\\tname of the cluster queue (default=normal.q)\\n\"\n msg += \" --resou\\tcluster resources (e.g. 'test' for 'qsub -l test')\\n\"\n msg += \" --rmvb\\tremove bash scripts for jobs launched in parallel\\n\"\n msg += \" --step\\tstep to perform (1/2/3/.../9)\\n\"\n msg += \"\\t\\t1: raw read quality per lane (with FastQC v >= 0.11.2)\\n\"\n msg += \"\\t\\t2: demultiplexing per lane (with demultiplex.py v >= 1.14.0)\\n\"\n msg += \"\\t\\t3: cleaning per sample (with CutAdapt v >= 1.8)\\n\"\n msg += \"\\t\\t4: alignment per sample (with BWA MEM v >= 0.7.12, Samtools v >= 1.3, Picard and R v >= 3)\\n\"\n msg += \"\\t\\t5: local realignment per sample (with GATK v >= 3.5)\\n\"\n msg += \"\\t\\t6: local realignment per genotype (with GATK v >= 3.5)\\n\"\n msg += \"\\t\\t7: variant and genotype calling per genotype (with GATK HaplotypeCaller v >= 3.5)\\n\"\n msg += \"\\t\\t8: variant and genotype calling jointly across genotypes (with GATK GenotypeGVCFs v >= 3.5)\\n\"\n msg += \"\\t\\t9: variant and genotype filtering (with GATK v >= 3.5)\\n\"\n msg += \" --samples\\tpath to the 'samples' file\\n\"\n msg += \"\\t\\tcompulsory for all steps, but can differ between steps\\n\"\n msg += \"\\t\\t e.g. if samples come from different species or are aligned\\n\"\n msg += \"\\t\\t on different ref genomes, different samples file should\\n\"\n msg += \"\\t\\t be used for steps 4-9, representing different subsets of\\n\"\n msg += \"\\t\\t the file used for steps 1-3\\n\"\n msg += \"\\t\\tthe file should be encoded in ASCII\\n\"\n msg += \"\\t\\tthe first row should be a header with column names\\n\"\n msg += \"\\t\\teach 'sample' (see details below) should have one and only one row\\n\"\n msg += \"\\t\\tany two columns should be separated with one tabulation\\n\"\n msg += \"\\t\\tcolumns can be in any order\\n\"\n msg += \"\\t\\trows starting by '#' are skipped\\n\"\n msg += \"\\t\\t12 columns are compulsory (but there can be more):\\n\"\n msg += \"\\t\\t genotype (see details below, e.g. 'Col-0', but use neither underscore '_' nor space ' ' nor dot '.', use dash '-' instead)\\n\"\n msg += \"\\t\\t ref_genome (identifier of the reference genome used for alignment, e.g. 'Atha_v2', but use neither space ' ' nor dot '.'; the full species name, e.g. 'Arabidopsis thaliana', will be present in the file given to --dict)\\n\"\n msg += \"\\t\\t library (e.g. can be the same as 'genotype')\\n\"\n msg += \"\\t\\t barcode (e.g. 'ATGG')\\n\"\n msg += \"\\t\\t seq_center (e.g. 'Broad Institute', 'GenoToul', etc)\\n\"\n msg += \"\\t\\t seq_platform (e.g. 'ILLUMINA', see SAM format specification)\\n\"\n msg += \"\\t\\t seq_platform_model (e.g. 'HiSeq 2000')\\n\"\n msg += \"\\t\\t flowcell (e.g. 'C5YMDACXX')\\n\"\n msg += \"\\t\\t lane (e.g. '3', can be '31' if a first demultiplexing was done per index)\\n\"\n msg += \"\\t\\t date (e.g. '2015-01-15', see SAM format specification)\\n\"\n msg += \"\\t\\t fastq_file_R1 (filename, one per lane, gzip-compressed)\\n\"\n msg += \"\\t\\t fastq_file_R2 (filename, one per lane, gzip-compressed)\\n\"\n msg += \" --fcln\\tidentifier of a flowcell and lane number\\n\"\n msg += \"\\t\\tformat as <flowcell>_<lane-number>, e.g. 'C5YMDACXX_1'\\n\"\n msg += \"\\t\\tif set, only the samples from this lane will be analyzed\\n\"\n msg += \" --pird\\tpath to the input reads directory\\n\"\n msg += \"\\t\\tcompulsory for steps 1 and 2\\n\"\n msg += \"\\t\\twill be added to the columns 'fastq_file_R*' from the sample file\\n\"\n msg += \"\\t\\tif not set, input read files should be in current directory\\n\"\n msg += \" --enz\\tname of the restriction enzyme\\n\"\n msg += \"\\t\\tcompulsory for step 2\\n\"\n msg += \"\\t\\tdefault=ApeKI\\n\"\n msg += \" --dmxmet\\tmethod used to demultiplex\\n\"\n msg += \"\\t\\tcompulsory for step 2\\n\"\n msg += \"\\t\\tdefault=4c (see the help of demultiplex.py to know more)\\n\"\n msg += \" --subst\\tnumber of substitutions allowed during demultiplexing\\n\"\n msg += \"\\t\\tcompulsory for step 2\\n\"\n msg += \"\\t\\tdefault=2\\n\"\n msg += \" --ensubst\\tenforce the nb of substitutions allowed\\n\"\n msg += \"\\t\\tcompulsory for step 2\\n\"\n msg += \"\\t\\tdefault=lenient/strict\\n\"\n msg += \" --adp\\tpath to the file containing the adapters\\n\"\n msg += \"\\t\\tcompulsory for step 3\\n\"\n msg += \"\\t\\tsame format as FastQC: name<tab>sequence\\n\"\n msg += \"\\t\\tname: at least 'adpR1' (also 'adpR2' if paired-end)\\n\"\n msg += \"\\t\\tsequence: from 5' (left) to 3' (right)\\n\"\n msg += \" --errtol\\terror tolerance to find adapters\\n\"\n msg += \"\\t\\tcompulsory for step 3\\n\"\n msg += \"\\t\\tdefault=0.2\\n\"\n msg += \" --minovl\\tminimum overlap length between reads and adapters\\n\"\n msg += \"\\t\\tcompulsory for step 3\\n\"\n msg += \"\\t\\tdefault=3 (in bases)\\n\"\n msg += \" --minrl\\tminimum length to keep a read\\n\"\n msg += \"\\t\\tcompulsory for step 3\\n\"\n msg += \"\\t\\tdefault=35 (in bases)\\n\"\n msg += \" --minq\\tminimum quality to trim a read\\n\"\n msg += \"\\t\\tcompulsory for step 3\\n\"\n msg += \"\\t\\tdefault=20 (used for both reads if paired-end)\\n\"\n msg += \" --maxNp\\tmaximum percentage of N to keep a read\\n\"\n msg += \"\\t\\tcompulsory for step 3\\n\"\n msg += \"\\t\\tdefault=0.2\\n\"\n msg += \" --ref\\tpath to the prefix of files for the reference genome\\n\"\n msg += \"\\t\\tcompulsory for steps 4, 5, 6, 7, 8, 9\\n\"\n msg += \"\\t\\tshould correspond to the 'ref_genome' column in --samples\\n\"\n msg += \"\\t\\te.g. '/data/Atha_v2' for '/data/Atha_v2.fa', '/data/Atha_v2.bwt', etc\\n\"\n msg += \"\\t\\tthese files are produced via 'bwa index ...'\\n\"\n msg += \" --dict\\tpath to the 'dict' file (SAM header with @SQ tags)\\n\"\n msg += \"\\t\\tcompulsory for step 4\\n\"\n msg += \"\\t\\tsee 'CreateSequenceDictionary' in the Picard software\\n\"\n msg += \" --jgid\\tcohort identifier to use for joint genotyping\\n\"\n msg += \"\\t\\tcompulsory for steps 8, 9\\n\"\n msg += \"\\t\\tuseful to launch several, different cohorts in parallel\\n\"\n msg += \" --rat\\trestrict alleles to be of a particular allelicity\\n\"\n msg += \"\\t\\tused in step 9\\n\"\n msg += \"\\t\\tdefault=ALL/BIALLELIC/MULTIALLELIC\\n\"\n msg += \"\\t\\tsee '--restrictAllelesTo' in GATK's SelectVariant\\n\"\n msg += \" --mdp\\tminimum value for DP (read depth; e.g. 10)\\n\"\n msg += \"\\t\\tused in step 9\\n\"\n msg += \"\\t\\tsee GATK's VariantFiltration\\n\"\n msg += \" --mgq\\tminimum value for GQ (genotype quality; e.g. 20)\\n\"\n msg += \"\\t\\tused in step 9\\n\"\n msg += \"\\t\\tsee GATK's VariantFiltration\\n\"\n msg += \" --mnfg\\tmaximum number of filtered genotypes to keep a variant\\n\"\n msg += \"\\t\\tused in step 9\\n\"\n msg += \"\\t\\tsee '--maxFilteredGenotypes' in GATK's SelectVariants\\n\"\n msg += \" --mffg\\tmaximum fraction of filtered genotypes to keep a variant\\n\"\n msg += \"\\t\\tused in step 9\\n\"\n msg += \"\\t\\tsee '--maxFractionFilteredGenotypes' in GATK's SelectVariants\\n\"\n msg += \" --mnnc\\tmaximum number of not-called genotypes to keep a variant\\n\"\n msg += \"\\t\\tused in step 9\\n\"\n msg += \" --mfnc\\tmaximum fraction of not-called genotypes to keep a variant\\n\"\n msg += \"\\t\\tused in step 9\\n\"\n msg += \"\\t\\tsee '--maxNOCALLfraction' in GATK's SelectVariants\\n\"\n msg += \" --fam\\tpath to the file containing pedigree information\\n\"\n msg += \"\\t\\tused in step 9\\n\"\n msg += \"\\t\\tdiscard variants with Mendelian violations (see Semler et al, 2012)\\n\"\n msg += \"\\t\\tshould be in the 'fam' format specified by PLINK\\n\"\n msg += \"\\t\\tvalidation strictness (GATK '-pedValidationType') is set at 'SILENT'\\n\"\n msg += \"\\t\\t allowing some samples to be absent from the pedigree\\n\"\n msg += \" --mvq\\tminimum GQ for each trio member to accept a variant as a Mendelian violation\\n\"\n msg += \"\\t\\tused in step 9 if '--fam' is specified\\n\"\n msg += \"\\t\\tdefault=0\\n\"\n msg += \" --xlssf\\tpath to the file with genotypes to exclude\\n\"\n msg += \"\\t\\tused in step 9 (can be especially useful if '--fam' is specified)\\n\"\n msg += \" --tmpd\\tpath to a temporary directory on child nodes (default=.)\\n\"\n msg += \"\\t\\te.g. it can be /tmp or /scratch\\n\"\n msg += \"\\t\\tused in step 4 for 'samtools sort'\\n\"\n msg += \"\\t\\tused in step 7 for 'GATK HaplotypeCaller'\\n\"\n msg += \" --jvmXms\\tinitial memory allocated to the Java Virtual Machine\\n\"\n msg += \"\\t\\tdefault=512m (can also be specified as 1024k, 1g, etc)\\n\"\n msg += \"\\t\\tused in steps 4, 5, 6, 7 and 8 for Picard and GATK\\n\"\n msg += \" --jvmXmx\\tmaximum memory allocated to the Java Virtual Machine\\n\"\n msg += \"\\t\\tdefault=4g\\n\"\n msg += \"\\t\\tused in steps 4, 5, 6, 7 and 8 for Picard and GATK\\n\"\n msg += \" --queue2\\tname of the second cluster queue (default=bigmem.q)\\n\"\n msg += \"\\t\\tused in step 4 for Picard to collect insert sizes\\n\"\n msg += \" --knowni\\tpath to a VCF file with known indels (for local realignment)\\n\"\n msg += \" --known\\tpath to a VCF file with known variants (e.g. from dbSNP)\\n\"\n msg += \" --force\\tforce to re-run step(s)\\n\"\n msg += \"\\t\\tthis removes without warning the step directory if it exists\\n\"\n msg += \"\\n\"\n msg += \"Examples:\\n\"\n msg += \" %s --step 1 --samples samples.txt\\n\" % os.path.basename(sys.argv[0])\n msg += \"\\n\"\n msg += \"Details:\\n\"\n msg += \"This program aims at genotyping a set of 'genotypes' using data from\\n\"\n msg += \"a restriction-assisted DNA sequencing (RAD-seq) experiment, also known\\n\"\n msg += \"as a genotyping-by-sequencing (GBS) experiment.\\n\"\n msg += \"Here, by 'genotype', we mean the entity which is the focus of the\\n\"\n msg += \"study. For instance, it can be a plant variety (or a human being), or\\n\"\n msg += \"the specific clone of a given plant variety (or a specific tumor of a\\n\"\n msg += \"given human being), etc.\\n\"\n msg += \"Importantly, note that the content of the 'genotype' column will\\n\"\n msg += \"be used to set the 'SM' (sample) tag of the 'RG' (read group) header\\n\"\n msg += \"record type of the SAM format (see http://www.htslib.org/). However,\\n\"\n msg += \"internal to this program, the term 'sample' corresponds to the unique\\n\"\n msg += \"quadruplet (genotype,flowcell,lane,barcode) for steps 1 and 2, and to\\n\"\n msg += \"the unique triplet (genotype,flowcell,lane) for the others.\\n\"\n msg += \"Jobs are executed in parallel (--schdlr). Their return status is\\n\"\n msg += \"recorded in a SQLite database which is removed at the end. If a job\\n\"\n msg += \"fails, the whole script stops with an error.\\n\"\n msg += \"\\n\"\n msg += \"Dependencies:\\n\"\n msg += \"Python >= 2.7; Biopython; pyutilstimflutre >= 0.5\\n\"\n msg += \"\\n\"\n msg += \"Report bugs to <[email protected]>.\"\n print(msg); sys.stdout.flush()",
"def qc_metrics(self, files_in, qc_files):\n self.cmd(\"{samtools} index {bam_in}\"\n .format(\n samtools=self.cmds[\"samtools\"],\n bam_in=files_in[0],\n ),\n shell=True)\n self.cmd(\"{samtools} idxstats {bam_in} | tee {qc_file}\"\n .format(\n samtools=self.cmds[\"samtools\"],\n bam_in = files_in[0],\n qc_file = qc_files[0],\n ),\n shell=True,\n log_output=True)\n self.cmd(\"{samtools} flagstat {bam_in} | tee {qc_file}\"\n .format(\n samtools=self.cmds[\"samtools\"],\n bam_in = files_in[0],\n qc_file = qc_files[1],\n ),\n shell=True,\n log_output=True)\n \n self.checkpoint(qc_files[0])\n self.checkpoint(qc_files[1])\n self.checkpoint(qc_files[2])",
"def usage(progname):\n \n sys.stderr.write(\"Usage: \" +progname + \" [-cmnv] [-z score] \"\n \" <outdir>\\n\")\n sys.stderr.write(' -c class level not fold level evaluation\\n')\n sys.stderr.write(' -m read multiquery file on stdin\\n')\n sys.stderr.write(' -n negate scores (so that most -ve is best)\\n')\n sys.stderr.write(' -v verbose messages to stderr\\n')\n sys.stderr.write(' -z score : assign identifiers not present in the output a score of score\\n')\n sys.exit(1)",
"def _cmd_segmetrics(args):\n if not 0.0 < args.alpha <= 1.0:\n raise RuntimeError(\"alpha must be between 0 and 1.\")\n\n if not any((args.location_stats, args.spread_stats, args.interval_stats)):\n logging.info(\"No stats specified\")\n return\n\n # Calculate all metrics\n cnarr = read_cna(args.cnarray)\n segarr = read_cna(args.segments)\n segarr = do_segmetrics(\n cnarr,\n segarr,\n args.location_stats,\n args.spread_stats,\n args.interval_stats,\n args.alpha,\n args.bootstrap,\n args.smooth_bootstrap,\n skip_low=args.drop_low_coverage,\n )\n tabio.write(segarr, args.output or segarr.sample_id + \".segmetrics.cns\")",
"def main(argv=None):\n\n if argv is None:\n argv = sys.argv\n\n # setup command line parser\n parser = E.OptionParser(version=\"%prog version: $Id$\",\n usage=globals()[\"__doc__\"])\n\n parser.add_option(\"-t\", \"--test\", dest=\"test\", type=\"string\",\n help=\"supply help\")\n\n parser.add_option(\"--method\", dest=\"method\", type=\"choice\",\n choices=(\"metrics\", \"summary\", \"module_summary\"),\n help=\"method to summarise clustering\")\n\n parser.add_option(\"--ref-gtf-files\", dest=\"ref_gtf\", type=\"string\",\n help=\"comma separated list of reference gtf files\")\n\n # add common options (-h/--help, ...) and parse command line\n (options, args) = E.Start(parser, argv=argv)\n\n if options.method == \"metrics\":\n infile = argv[-1]\n E.info(\"loading input file: %s\" % infile)\n assert infile\n\n df = pd.read_table(infile,\n sep=\"\\t\",\n header=None,\n index_col=0)\n\n df = df.ix[:, :50]\n cluster_combs = (x for x in itertools.combinations(df.columns,\n 2))\n genes = df.index\n results_dict = {}\n all_clusts = {}\n\n E.info(\"setting up cluster containers\")\n for i in df.columns:\n clusters = set(df[i].values.tolist())\n cluster_dict = {}\n for clust in clusters:\n cluster_dict[clust] = []\n for gene in genes:\n cluster_dict[df[i][gene]].append(gene)\n\n for col in clusters:\n col_set = set()\n clust_col = cluster_dict[col]\n gene_members = itertools.combinations(clust_col,\n 2)\n col_set.update(gene_members)\n cluster_dict[col] = col_set\n all_clusts[i] = cluster_dict\n E.info(\"generating all pair-wise cluster comparisons\")\n E.info(\"calculating adjusted mutual information\")\n for k in cluster_combs:\n clusters1 = all_clusts[k[0]]\n clusters2 = all_clusts[k[1]]\n metric_dict = {}\n metric_dict['AMI'] = TS.adjustedMutualInformation(clusters1,\n clusters2)\n results_dict[k] = metric_dict\n\n res_frame = pd.DataFrame(results_dict).T\n res_frame = res_frame.reset_index()\n res_frame.drop(['level_0'], inplace=True, axis=1)\n res_frame.drop(['level_1'], inplace=True, axis=1)\n\n # flatten rand indices and add to output dataframe\n rand_arrays = TS.randIndexes(df)\n flat_adj_rand = TS.unravel_arrays(rand_arrays[0])\n flat_rand = TS.unravel_arrays(rand_arrays[1])\n res_frame['Rand_Index'] = flat_rand\n res_frame['Adjusted_Rand_Index'] = flat_adj_rand\n E.info(\"aggregating results\")\n\n res_frame.to_csv(options.stdout,\n sep=\"\\t\",\n index_label='idx')\n\n elif options.method == \"summary\":\n infiles = argv[-1]\n list_of_files = infiles.split(\",\")\n\n file_dict = {}\n for fle in list_of_files:\n fname = fle.split(\"/\")[-1]\n condition = fname.split(\"-\")[0]\n ref = fname.split(\"-\")[1]\n df_ = pd.read_table(fle,\n sep=\"\\t\",\n header=0,\n index_col=0)\n df_.columns = ['gene_id', 'cluster']\n clust_dict = {}\n for idx in df_.index:\n cluster = df_.loc[idx]['cluster']\n gene = df_.loc[idx]['gene_id']\n try:\n clust_dict[cluster] += 1\n except KeyError:\n clust_dict[cluster] = 1\n med_size = np.median(clust_dict.values())\n file_dict[fname] = {'condition': condition,\n 'reference': ref,\n 'median_cluster_size': med_size}\n\n outframe = pd.DataFrame(file_dict).T\n outframe.to_csv(options.stdout,\n sep=\"\\t\",\n index_label='idx')\n\n elif options.method == \"module_summary\":\n # get lncRNA/gene lengths from reference gtfs\n ref_gtfs = options.ref_gtf.split(\",\")\n length_dict = {}\n for ref in ref_gtfs:\n oref = IOTools.openFile(ref, \"rb\")\n git = GTF.transcript_iterator(GTF.iterator(oref))\n for gene in git:\n for trans in gene:\n length = trans.end - trans.start\n try:\n length_dict[trans.gene_id] += length\n except KeyError:\n length_dict[trans.gene_id] = length\n oref.close()\n\n infiles = argv[-1]\n list_of_files = infiles.split(\",\")\n\n fdfs = []\n for fle in list_of_files:\n cond = fle.split(\"/\")[-1].split(\"-\")[0]\n refer = fle.split(\"/\")[-1].split(\"-\")[1]\n _df = pd.read_table(fle, sep=\"\\t\",\n header=0, index_col=0)\n _df.columns = ['gene_id', 'cluster']\n clusters = set(_df['cluster'])\n c_dict = {}\n # summarize over each cluster\n for clust in clusters:\n lengths = []\n c_df = _df[_df['cluster'] == clust]\n for lid in c_df['gene_id']:\n lengths.append(length_dict[lid])\n c_dict[clust] = {'cluster_size': len(c_df['gene_id']),\n 'mean_length': np.mean(lengths),\n 'index': (cond, refer),\n 'module': clust}\n cdf = pd.DataFrame(c_dict).T\n # use a multindex for hierarchical indexing\n midx = pd.MultiIndex.from_tuples(cdf['index'])\n cdf.index = midx\n cdf.drop(['index'], inplace=True, axis=1)\n fdfs.append(cdf)\n\n # generate a single output df\n s_df = fdfs[0]\n fdfs.pop(0)\n for df in fdfs:\n s_df = s_df.append(df)\n\n s_df.to_csv(options.stdout,\n index_label=(\"condition\", \"reference\"),\n sep=\"\\t\")\n\n # write footer and output benchmark information.\n E.Stop()",
"def main():\n\n parser = argparse.ArgumentParser(description=main.__doc__,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n epilog=\"Homepage: https://github.com/kbat/mc-tools\")\n parser.add_argument('com', type=str, help='plot requests file name', nargs='?', default=\"/tmp/foo.c\")\n parser.add_argument('comout', type=str, help='COMOUT file name', nargs='?', default=\"zoom\")\n\n args = parser.parse_args()\n\n cmd = {} # dictionary of commands\n\n bas = False\n plane = False\n \n with open(args.com) as f:\n for line in f.readlines():\n words = line.strip().split()\n if len(words) is 0:\n continue\n\n for i,w in enumerate(words):\n if re.search(\"^bas\", w):\n cmd['bas'] = list(map(float, words[i+1:i+7]))\n if plane is False: bas = True # basis was before plane cuts\n elif re.search(\"^or\", w):\n cmd['or'] = list(map(float, words[i+1:i+4]))\n elif re.search(\"^ex\", w):\n try: # both x and y scales are given\n cmd['ex'] = list(map(float, words[i+1:i+3]))\n continue\n except ValueError: # just 1 scale is given\n cmd['ex'] = list(map(float, words[i+1:i+2]))\n elif re.search(\"^lab\", w):\n cmd['label'] = list(map(int, map(float, words[i+1:i+3]))) #+ [words[i+3]]\n elif re.search(\"^p[xyz]\", w):\n cmd[w] = [float(words[i+1])]\n if bas is False: plane = True # plane cuts were before basis\n elif re.search(\"^legend\", w):\n cmd[w] = [words[i+1]]\n elif w == \"scale\":\n print(w)\n if int(words[i+1]): # no need to put 'scale 0'\n cmd[w] = [words[i+1]]\n elif w in (\"mesh\"):\n if int(words[i+1])==1: # no need to put 'mesh 1'\n cmd[w] = [words[i+1]]\n\n print(bas, plane)\n\n if plane: # bas was first\n keys = ('bas', 'or', 'ex', 'px', 'py', 'pz', 'label', 'mesh', 'legend', 'scale')\n elif bas:\n keys = ('or', 'ex', 'px', 'py', 'pz', 'bas', 'label', 'mesh', 'legend', 'scale')\n else:\n keys = {'or', 'ex', 'label', 'mesh', 'legend', 'scale'}\n \n with open(args.comout, 'w') as f:\n for key in keys:\n if key in cmd:\n # newline required by mcplot:\n if key in ('mesh', 'legend', 'scale', 'label'):\n f.write(\"\\n\")\n f.write(\"%s %s \" % (key,\" \".join(str(e) for e in cmd[key]),))\n f.write(\"\\n\")",
"def main():\n parser = argparse.ArgumentParser(\n description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter\n )\n add_common_snmp_args(parser)\n parser.add_argument(\n \"-w\",\n \"--warning\",\n type=int,\n default=70,\n help=\"Warning memory usage percentage (0-100)\",\n )\n parser.add_argument(\n \"-c\",\n \"--critical\",\n type=int,\n default=90,\n help=\"Critical memory usage percentage (0-100)\",\n )\n parser.add_argument(\n \"-f\",\n \"--family\",\n required=True,\n help=\"Switch family. Supported families: 1910, 1920, 1920S\",\n )\n\n config = vars(parser.parse_args())\n check_snmp_args(config)\n check_thresholds(config)\n\n dataset = {}\n\n if config[\"family\"] == \"1920S\":\n cpu = ObjectType(\n ObjectIdentity(\n \"HP-SWITCHING-MIB\", \"agentSwitchCpuProcessTotalUtilization\", 0\n )\n )\n elif config[\"family\"] in [\"1910\", \"1920\"]:\n cpu = ObjectType(\n ObjectIdentity(\"HH3C-ENTITY-EXT-MIB\", \"hh3cEntityExtCpuUsage\", 8)\n )\n else:\n unknown_exit(SERVICE, f\"Switch family {config['family']} NOT known\")\n\n try:\n raw_data = get_snmp_data(config, cpu)\n except ValueError as err:\n unknown_exit(SERVICE, err)\n add_vars_to_dataset(dataset, raw_data)\n\n if config[\"family\"] == \"1920S\":\n dataset[\"cpu_usage\"] = get_hp_cpu_usage(\n dataset[\"agentSwitchCpuProcessTotalUtilization\"]\n )\n elif config[\"family\"] in [\"1910\", \"1920\"]:\n dataset[\"cpu_usage\"] = int(dataset[\"hh3cEntityExtCpuUsage\"])\n else:\n unknown_exit(SERVICE, f\"Switch family {config['family']} NOT known\")\n\n state, message = generate_output(config, dataset)\n report(state, message)",
"def cmd_help(args):"
] | [
"0.65378803",
"0.627548",
"0.6020114",
"0.5702169",
"0.56018233",
"0.55116755",
"0.55000883",
"0.5320139",
"0.5315897",
"0.5299815",
"0.52958316",
"0.5274619",
"0.52392995",
"0.52203125",
"0.52038133",
"0.5199431",
"0.5187151",
"0.5178949",
"0.5170392",
"0.5164013",
"0.5105565",
"0.5103818",
"0.50898266",
"0.5052539",
"0.5035622",
"0.502821",
"0.501706",
"0.5015949",
"0.49987954",
"0.4995435"
] | 0.66221523 | 0 |
Report usage metrics for all active Crypto Express adapters of CPCs. In addition to the commandspecific options shown in this help text, the general options (see 'zhmc help') can also be specified right after the 'zhmc' command name. | def metrics_crypto(cmd_ctx, cpc, **options):
cmd_ctx.execute_cmd(lambda: cmd_metrics_crypto(cmd_ctx, cpc, options)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def metrics_channel(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_channel(cmd_ctx, cpc, options))",
"def metrics_adapter(cmd_ctx, cpc, adapter, **options):\n cmd_ctx.execute_cmd(\n lambda: cmd_metrics_adapter(cmd_ctx, cpc, adapter, options))",
"def do_hostinfo(self, args):\n host = opts = None\n if args:\n args = args.split()\n host = args.pop()\n\n if not host:\n print('Usage: hostinfo [-cdmu] host_name_or_ip')\n print(' uptime and load stats returned if no options specified')\n return\n\n try:\n ip = socket.gethostbyname(host)\n except socket.gaierror:\n print('cannot resolve', host, file=sys.stderr)\n return\n\n opts = []\n while args:\n arg = args.pop(0)\n if arg.startswith('--'):\n if arg == '--cpu':\n opts.append('c')\n elif arg == '--disk':\n opts.append('d')\n elif arg == '--memory':\n opts.append('m')\n elif arg == '--uptime':\n opts.append('u')\n else:\n print('unrecognized option:', arg, file=sys.stderr)\n return\n else:\n if arg[0] == '-':\n for ch in arg[1:]:\n if ch in ('cdmu') and ch not in opts:\n opts.append(ch)\n else:\n print('unrecognized option:', ch, file=sys.stderr)\n return\n\n stats = self._qm.get_host_stats(ip)\n\n if not opts:\n # Get uptime and load averages.\n up = stats['uptime']\n load = stats['cpu_load']\n print('Up for %s days, %s hours, %s minutes, '\n 'load averages: %s, %s, %s'\n % (up['days'], up['hours'], up['minutes'], load['one'],\n load['five'], load['fifteen']))\n return\n\n all_stats = []\n for opt in opts:\n if opt == 'd':\n # Get disk usage.\n disks = stats['disk_usage']\n st = ['Disk Usage:']\n for mount, disk_info in disks.viewitems():\n st.append(' Usage for: %s' % mount)\n for k, v in disk_info.viewitems():\n st.append(' %s: %s' % (k, v))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'c':\n # Get CPU load.\n load_stats = stats['cpu_load']\n st = ['CPU Load Average:']\n st.append(' last one minute: %s' % load_stats['one'])\n st.append(' last five minutes: %s' % load_stats['five'])\n st.append(' last fifteen minutes: %s' % load_stats['fifteen'])\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'm':\n # Get Memory Usage.\n memory_usage = stats['memory_usage']\n st = ['Memory usage:']\n for k, v in memory_usage.viewitems():\n st.append(' %s: %s' % (k, v))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'u':\n # Get uptime.\n up = stats['uptime']\n st = ['Uptime:']\n st.append(' Up for %s days, %s hours and %s minutes'\n % (up['days'], up['hours'], up['minutes']))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n\n print('\\n'.join(all_stats))",
"def metrics_proc(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_proc(cmd_ctx, cpc, options))",
"def metrics_cpc(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_cpc(cmd_ctx, cpc, options))",
"def metrics_env(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_env(cmd_ctx, cpc, options))",
"def help(self, args):\n print('No commands available for this consumer')",
"async def stats(self, ctx):\n if ctx.invoked_subcommand is None:\n await send_cmd_help(ctx)",
"def stats(caesar, input):\n commands = {}\n users = {}\n channels = {}\n\n ignore = set(['f_note', 'startup', 'message', 'noteuri'])\n for (name, user), count in caesar.stats.items(): \n if name in ignore: continue\n if not user: continue\n\n if not user.startswith('#'): \n try: users[user] += count\n except KeyError: users[user] = count\n else: \n try: commands[name] += count\n except KeyError: commands[name] = count\n\n try: channels[user] += count\n except KeyError: channels[user] = count\n\n comrank = sorted([(b, a) for (a, b) in commands.iteritems()], reverse=True)\n userank = sorted([(b, a) for (a, b) in users.iteritems()], reverse=True)\n charank = sorted([(b, a) for (a, b) in channels.iteritems()], reverse=True)\n\n # most heavily used commands\n creply = 'most used commands: '\n for count, command in comrank[:10]: \n creply += '%s (%s), ' % (command, count)\n caesar.say(creply.rstrip(', '))\n\n # most heavy users\n reply = 'power users: '\n for count, user in userank[:10]: \n reply += '%s (%s), ' % (user, count)\n caesar.say(reply.rstrip(', '))\n\n # most heavy channels\n chreply = 'power channels: '\n for count, channel in charank[:3]: \n chreply += '%s (%s), ' % (channel, count)\n caesar.say(chreply.rstrip(', '))",
"def options():\n print \"\"\"Options summary:\n -h, --help\n -u, --usage\n -v, --verbose <verb_level>\n -e, --endpoint <endpoint>\n -i, --interface-type <iface_type>\n -r, --recursive\n --dbs-conf <conf_file>\n --show-prod\n --show-caf\n --only-subscribed\n --only-custodial\n \"\"\"",
"def metrics_networkport(cmd_ctx, cpc, adapter, **options):\n cmd_ctx.execute_cmd(\n lambda: cmd_metrics_networkport(cmd_ctx, cpc, adapter, options))",
"def main():\n\n args = parse_args()\n metric_sender = MetricSender(verbose=args.verbose, debug=args.debug)\n\n discovery_key_disk = 'disc.disk'\n interval = 3\n pcp_disk_dev_metrics = ['disk.dev.total', 'disk.dev.avactive']\n item_prototype_macro_disk = '#OSO_DISK'\n item_prototype_key_tps = 'disc.disk.tps'\n item_prototype_key_putil = 'disc.disk.putil'\n\n disk_metrics = pminfo.get_sampled_data(pcp_disk_dev_metrics, interval, 2)\n\n pcp_metrics_divided = {}\n for metric in pcp_disk_dev_metrics:\n pcp_metrics_divided[metric] = {k: v for k, v in disk_metrics.items() if metric in k}\n\n # do TPS checks; use disk.dev.total\n filtered_disk_totals = clean_up_metric_dict(pcp_metrics_divided[pcp_disk_dev_metrics[0]],\n pcp_disk_dev_metrics[0] + '.')\n\n # Add dynamic items\n metric_sender.add_dynamic_metric(discovery_key_disk, item_prototype_macro_disk, filtered_disk_totals.keys())\n\n # calculate the TPS and add them to the ZaggSender\n for disk, totals in filtered_disk_totals.iteritems():\n disk_tps = (totals[1] - totals[0]) / interval\n metric_sender.add_metric({'%s[%s]' % (item_prototype_key_tps, disk): disk_tps})\n\n # do % Util checks; use disk.dev.avactive\n filtered_disk_totals = clean_up_metric_dict(pcp_metrics_divided[pcp_disk_dev_metrics[1]],\n pcp_disk_dev_metrics[1] + '.')\n\n # calculate the % Util and add them to the ZaggSender\n for disk, totals in filtered_disk_totals.iteritems():\n total_active = (float)(totals[1] - totals[0]) / 1000.0\n putil = 100 * total_active / interval\n\n metric_sender.add_metric({'%s[%s]' % (item_prototype_key_putil, disk): putil})\n\n metric_sender.send_metrics()",
"def usage(self, host):",
"def show(self):\n prev_queries = 0\n prev_cpu_sys = 0\n prev_cpu_user = 0\n \n lines = {\n \"Uptime (seconds)\": \"--\",\n \"Number of queries\": \"--\",\n \"Query per second\": \"--\",\n \"ACL drops\": \"--\",\n \"Dynamic drops\": \"--\",\n \"Rule drops\": \"--\",\n \"CPU Usage (%s)\": \"--\",\n \"Cache hitrate\": \"--\"\n }\n\n while True:\n try:\n # get stats from dnsdist\n stats = Statistics(console=self.console)\n global_stats = stats[\"global\"]\n \n qps = int(global_stats[\"queries\"]) - prev_queries\n prev_queries = int(global_stats[\"queries\"])\n cpu = (int(global_stats[\"cpu-sys-msec\"])+int(global_stats[\"cpu-user-msec\"]) - prev_cpu_sys - prev_cpu_user) / 10\n prev_cpu_sys = int(global_stats[\"cpu-sys-msec\"])\n prev_cpu_user = int(global_stats[\"cpu-user-msec\"])\n \n lines[\"Uptime (seconds)\"] = global_stats[\"uptime\"]\n lines[\"Number of queries\"] = global_stats[\"queries\"]\n lines[\"Query per second\"] = qps\n lines[\"CPU Usage (%s)\"] = cpu\n lines[\"ACL drops\"] = global_stats[\"acl-drops\"]\n lines[\"Rule drops\"] = global_stats[\"rule-drop\"]\n lines[\"Cache hitrate\"] = global_stats[\"cache-hits\"]\n lines[\"Dynamic drops\"] = global_stats[\"dyn-blocked\"]\n\n # reprint the lines \n sys.stdout.write(\"\\033[1mDashboard for dnsdist\\033[0m\\n\")\n sys.stdout.write(\"\\n\")\n sys.stdout.write(\"Global:\\n\")\n for k,v in lines.items():\n sys.stdout.write(\"\\t%s: %s\\n\" % (k,v))\n sys.stdout.write(\"Backends:\\n\")\n for s in stats[\"backends\"]:\n if not len(s[\"name\"]):\n s[\"name\"] = \"--\"\n if not len(s[\"pools\"]):\n s[\"pools\"] = \"--\"\n sys.stdout.write(\"\\t#%s / %s / %s / %s\\n\" % (s[\"#\"],s[\"address\"],s[\"name\"],s[\"pools\"]) )\n sys.stdout.write(\"\\t\\tNumber of queries: %s\\n\" % s[\"queries\"])\n sys.stdout.write(\"\\t\\tQuery per second: %s\\n\" % s[\"qps\"])\n sys.stdout.write(\"\\t\\tNumber of drops: %s\\n\" % s[\"drops\"])\n sys.stdout.write(\"\\n\")\n sys.stdout.write(\"Ctrl+C to exit\\n\")\n \n time.sleep(1)\n \n \n # move up cursor and delete whole line\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\") \n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n for k,v in lines.items():\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\") \n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n for s in stats[\"backends\"]:\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\") \n sys.stdout.write(\"\\x1b[1A\\x1b[2K\") \n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n \n del stats\n except KeyboardInterrupt:\n break",
"def do_stats(self, args):\n total_cpu = free_cpu = in_use_cpu = 0\n\n summary = self._qm.get_all_host_summary()\n for host_id, host_info in summary.viewitems():\n host_cpu = int(host_info['total cores'])\n total_cpu += host_cpu\n locked = host_info.get('locked by')\n if locked:\n # If host is locked then all CPUs are in use.\n in_use_cpu += host_cpu\n else:\n free_host_cpu = int(host_info['free cores'])\n in_use_cpu += (host_cpu - free_host_cpu)\n free_cpu += free_host_cpu\n\n print('total CPU: ', total_cpu)\n print('used/locked CPU: ', in_use_cpu)\n print('free CPU: ', free_cpu)\n capacity = float(in_use_cpu) / float(total_cpu)\n print('capacity used: %.1f%%' % (capacity * 100,))\n capacity = float(free_cpu) / float(total_cpu)\n print('capacity remaining: %.1f%%' % (capacity * 100,))",
"def usage():\n print(\"[1] Getting help from a cipher \")\n print(\" ---> ./cryptogra.py caesar -h \")\n print(\"\")",
"def usage():\n \n print '-b <bench> the bench to show.'\n print '-c <config> the config to show (GPU, 8888, 565, etc).'\n print '-d <dir> a directory containing bench_r<revision>_<scalar> files.'\n print '-e <file> file containing expected bench values/ranges.'\n print ' Will raise exception if actual bench values are out of range.'\n print ' See bench_expectations.txt for data format and examples.'\n print '-f <revision>[:<revision>] the revisions to use for fitting.'\n print ' Negative <revision> is taken as offset from most recent revision.'\n print '-i <time> the time to ignore (w, c, g, etc).'\n print ' The flag is ignored when -t is set; otherwise we plot all the'\n print ' times except the one specified here.'\n print '-l <title> title to use for the output graph'\n print '-m <representation> representation of bench value.'\n print ' See _ListAlgorithm class in bench_util.py.'\n print '-o <path> path to which to write output; writes to stdout if not specified'\n print '-r <revision>[:<revision>] the revisions to show.'\n print ' Negative <revision> is taken as offset from most recent revision.'\n print '-s <setting>[=<value>] a setting to show (alpha, scalar, etc).'\n print '-t <time> the time to show (w, c, g, etc).'\n print '-x <int> the desired width of the svg.'\n print '-y <int> the desired height of the svg.'\n print '--default-setting <setting>[=<value>] setting for those without.'",
"def _cmd_metrics(args):\n if (\n len(args.cnarrays) > 1\n and args.segments\n and len(args.segments) > 1\n and len(args.cnarrays) != len(args.segments)\n ):\n raise ValueError(\n \"Number of coverage/segment filenames given must be \"\n \"equal, if more than 1 segment file is given.\"\n )\n\n cnarrs = map(read_cna, args.cnarrays)\n if args.segments:\n args.segments = map(read_cna, args.segments)\n table = metrics.do_metrics(cnarrs, args.segments, args.drop_low_coverage)\n write_dataframe(args.output, table)",
"def print_help(self):\r\n\t\ttext = \"\\tName: ml_scikit_OPTICS\"\r\n\t\ttext += \"\\n\\t\\tThis machine learning plugin uses scikit-learn's OPTICS algorithm.\\n\"\r\n\t\ttext += \"\\n\\t\\tOptional Parameters:\"\r\n\t\ttext += \"\\n\\t\\t\\tOPTICS_skip_normalization: Do NOT perform normalization (scaling) of data, skip this step.\"\r\n\t\ttext += \"\\n\\t\\t\\OPTICS_eps: Specify eps parameter (default is 1.0).\"\r\n\t\ttext += \"\\n\\t\\t\\OPTICS_min_samples: Specify min_samples parameter (default is 5).\"\r\n#\r\n# OPTICS (with memory complexity n) is an alternative to DBSCAN (with memory complexity n^2)\r\n# which has time complexity n^2 in general with the default max_eps = np.inf. \r\n# We will set max_eps = eps to reduce the run-time.\r\n#\r\n\t\treturn text",
"def HelpCommand(self, unused_args, unused_sub_opts=None, unused_headers=None,\n unused_debug=None):\n self.OutputUsageAndExit()",
"def usage(err=''):\r\n m = '%s\\n' %err\r\n m += 'Default usage is to list Cases closed for the 30 days\\n'\r\n m += '\\n Example:\\n'\r\n m += ' closedcases -n 90 \\n' \r\n m += ' \\n'\r\n# m += ' closedcases -n 60 -s blast5 \\n'\r\n return m",
"def qc_metrics(self, files_in, qc_files):\n self.cmd(\"{samtools} index {bam_in}\"\n .format(\n samtools=self.cmds[\"samtools\"],\n bam_in=files_in[0],\n ),\n shell=True)\n self.cmd(\"{samtools} idxstats {bam_in} | tee {qc_file}\"\n .format(\n samtools=self.cmds[\"samtools\"],\n bam_in = files_in[0],\n qc_file = qc_files[0],\n ),\n shell=True,\n log_output=True)\n self.cmd(\"{samtools} flagstat {bam_in} | tee {qc_file}\"\n .format(\n samtools=self.cmds[\"samtools\"],\n bam_in = files_in[0],\n qc_file = qc_files[1],\n ),\n shell=True,\n log_output=True)\n \n self.checkpoint(qc_files[0])\n self.checkpoint(qc_files[1])\n self.checkpoint(qc_files[2])",
"def ShowAllVouchers(cmd_args=[], cmd_options={}):\n iv_hash_table = kern.globals.ivht_bucket\n num_buckets = sizeof(kern.globals.ivht_bucket) / sizeof(kern.globals.ivht_bucket[0])\n print GetIPCVoucherSummary.header\n for i in range(num_buckets):\n for v in IterateQueue(iv_hash_table[i], 'ipc_voucher_t', 'iv_hash_link'):\n print GetIPCVoucherSummary(v)",
"def help_help(self):\n print(\"List commands or print details about a command\")",
"def treatCmdOpts(argv):\n baseName = os.path.basename(__file__)\n amc.cBaseName = colored(baseName, 'yellow')\n\n helpTxt = amc.cBaseName + ' analyses observation statistics file for selected GNSSs'\n\n # create the parser for command line arguments\n parser = argparse.ArgumentParser(description=helpTxt)\n\n parser.add_argument('--obsstat', help='observation statistics file', type=str, required=True)\n\n parser.add_argument('--freqs', help='select frequencies to use (out of {freqs:s}, default {freq:s})'.format(freqs='|'.join(gfzc.lst_freqs), freq=colored(gfzc.lst_freqs[0], 'green')), default=gfzc.lst_freqs[0], type=str, required=False, action=gco.freqtype_action, nargs='+')\n\n parser.add_argument('--cutoff', help='cutoff angle in degrees (default {mask:s})'.format(mask=colored('0', 'green')), default=0, type=int, required=False, action=gco.cutoff_action)\n\n parser.add_argument('--dbcvs', help='Add information to CVS database (default {cvsdb:s})'.format(cvsdb=colored(gco.CVSDB_OBSTLE, 'green')), required=False, type=str, default=gco.CVSDB_OBSTLE)\n\n parser.add_argument('--plot', help='displays interactive plots (default False)', action='store_true', required=False, default=False)\n\n parser.add_argument('--logging', help='specify logging level console/file (two of {choices:s}, default {choice:s})'.format(choices='|'.join(gco.lst_logging_choices), choice=colored(' '.join(gco.lst_logging_choices[3:5]), 'green')), nargs=2, required=False, default=gco.lst_logging_choices[3:5], action=gco.logging_action)\n\n # drop argv[0]\n args = parser.parse_args(argv[1:])\n\n # return arguments\n return args.obsstat, args.freqs, args.cutoff, args.dbcvs, args.plot, args.logging",
"def _get_metrics_options(metrics):\n metrics_options = []\n if metrics is None:\n metrics = []\n for static_metric in metrics:\n metrics_options += [\n \"-m\",\n static_metric.metric.mp_metric_name,\n str(static_metric.value),\n ]\n return metrics_options",
"def command_help(self, *args, **kwargs):\n print(\"Commands available:\\n\")\n for name in dir(self):\n if not name.startswith(\"command_\"):\n continue\n name_clean = name[len(\"command_\"):]\n print(\"%s:\\n - %s\\n\" % (name_clean, getattr(self, name).__doc__.strip()))",
"def display_memcache_info(request):\n # pylint: disable-msg=E1101\n return utility.respond(request, 'admin/memcache_info',\n {'memcache_info': memcache.get_stats()})",
"def help_opt(self):\n print(OPTIONS)",
"def usage():\n\n # Local constants\n\n # Local variables\n\n #****** start usage() ******#\n print()\n print(\" Usage: python TCGCardTracker.py <arguement below> <optional-argument-1>\")\n print(\"\\tadd (Optional): Add a card to your collection. Requires TCGPlayer URL.\")\n print(\"\\tdelete (Optional): Delete a card from your collection. Requires TCGPlayer URL.\")\n print(\"\\tupdate (Optional): Updates pricing data for every card in your collection.\")\n print(\"\\ttop25 (Optional): Outputs the 25 most valuable cards from your collection.\")\n print(\"\\texport (Optional): Exports a list of TCGPlayer URLs to a text file.\")\n print(\"\\texport_collection (Optional): Exports your collection to a .csv including most recent price data.\")\n print(\"\\timport (Optional): Imports a text file of TCGPlayer URLs to bulk import cards into your collection. Requires text file.\")\n print(\"\\tworth (Optional): Ouputs how much your collection is worth using latest price data.\")\n print(\"\\tgraph (Optional): Outputs historical pricing data for a given card. Requires TCGPlayer URL.\")\n print(\"\\tgraph (Optional): Outputs historical pricing data for a given card. Requires TCGPlayer URL.\")\n print(\"\\tticker (Optional): Displays a ticker grid of the change in value over a given time. If run without the days back parameter it will default to 7 days.\")\n sys.exit()"
] | [
"0.61710244",
"0.5943986",
"0.5742064",
"0.5595102",
"0.55200773",
"0.54445076",
"0.5411969",
"0.53798133",
"0.5354008",
"0.535235",
"0.5349474",
"0.5332392",
"0.53124905",
"0.5226011",
"0.51669437",
"0.51390135",
"0.5104674",
"0.5103828",
"0.50836706",
"0.5078611",
"0.5078313",
"0.50721085",
"0.50588006",
"0.5033912",
"0.5027998",
"0.5023201",
"0.5021214",
"0.5019734",
"0.49973506",
"0.49908763"
] | 0.6303562 | 0 |
Report usage metrics for the ports of network adapters of CPCs in DPM mode. In addition to the commandspecific options shown in this help text, the general options (see 'zhmc help') can also be specified right after the 'zhmc' command name. | def metrics_networkport(cmd_ctx, cpc, adapter, **options):
cmd_ctx.execute_cmd(
lambda: cmd_metrics_networkport(cmd_ctx, cpc, adapter, options)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def metrics_channel(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_channel(cmd_ctx, cpc, options))",
"def metrics_adapter(cmd_ctx, cpc, adapter, **options):\n cmd_ctx.execute_cmd(\n lambda: cmd_metrics_adapter(cmd_ctx, cpc, adapter, options))",
"def do_hostinfo(self, args):\n host = opts = None\n if args:\n args = args.split()\n host = args.pop()\n\n if not host:\n print('Usage: hostinfo [-cdmu] host_name_or_ip')\n print(' uptime and load stats returned if no options specified')\n return\n\n try:\n ip = socket.gethostbyname(host)\n except socket.gaierror:\n print('cannot resolve', host, file=sys.stderr)\n return\n\n opts = []\n while args:\n arg = args.pop(0)\n if arg.startswith('--'):\n if arg == '--cpu':\n opts.append('c')\n elif arg == '--disk':\n opts.append('d')\n elif arg == '--memory':\n opts.append('m')\n elif arg == '--uptime':\n opts.append('u')\n else:\n print('unrecognized option:', arg, file=sys.stderr)\n return\n else:\n if arg[0] == '-':\n for ch in arg[1:]:\n if ch in ('cdmu') and ch not in opts:\n opts.append(ch)\n else:\n print('unrecognized option:', ch, file=sys.stderr)\n return\n\n stats = self._qm.get_host_stats(ip)\n\n if not opts:\n # Get uptime and load averages.\n up = stats['uptime']\n load = stats['cpu_load']\n print('Up for %s days, %s hours, %s minutes, '\n 'load averages: %s, %s, %s'\n % (up['days'], up['hours'], up['minutes'], load['one'],\n load['five'], load['fifteen']))\n return\n\n all_stats = []\n for opt in opts:\n if opt == 'd':\n # Get disk usage.\n disks = stats['disk_usage']\n st = ['Disk Usage:']\n for mount, disk_info in disks.viewitems():\n st.append(' Usage for: %s' % mount)\n for k, v in disk_info.viewitems():\n st.append(' %s: %s' % (k, v))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'c':\n # Get CPU load.\n load_stats = stats['cpu_load']\n st = ['CPU Load Average:']\n st.append(' last one minute: %s' % load_stats['one'])\n st.append(' last five minutes: %s' % load_stats['five'])\n st.append(' last fifteen minutes: %s' % load_stats['fifteen'])\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'm':\n # Get Memory Usage.\n memory_usage = stats['memory_usage']\n st = ['Memory usage:']\n for k, v in memory_usage.viewitems():\n st.append(' %s: %s' % (k, v))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'u':\n # Get uptime.\n up = stats['uptime']\n st = ['Uptime:']\n st.append(' Up for %s days, %s hours and %s minutes'\n % (up['days'], up['hours'], up['minutes']))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n\n print('\\n'.join(all_stats))",
"def metrics_cpc(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_cpc(cmd_ctx, cpc, options))",
"def metrics_proc(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_proc(cmd_ctx, cpc, options))",
"def metrics_crypto(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_crypto(cmd_ctx, cpc, options))",
"def metrics_env(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_env(cmd_ctx, cpc, options))",
"def show(self):\n prev_queries = 0\n prev_cpu_sys = 0\n prev_cpu_user = 0\n \n lines = {\n \"Uptime (seconds)\": \"--\",\n \"Number of queries\": \"--\",\n \"Query per second\": \"--\",\n \"ACL drops\": \"--\",\n \"Dynamic drops\": \"--\",\n \"Rule drops\": \"--\",\n \"CPU Usage (%s)\": \"--\",\n \"Cache hitrate\": \"--\"\n }\n\n while True:\n try:\n # get stats from dnsdist\n stats = Statistics(console=self.console)\n global_stats = stats[\"global\"]\n \n qps = int(global_stats[\"queries\"]) - prev_queries\n prev_queries = int(global_stats[\"queries\"])\n cpu = (int(global_stats[\"cpu-sys-msec\"])+int(global_stats[\"cpu-user-msec\"]) - prev_cpu_sys - prev_cpu_user) / 10\n prev_cpu_sys = int(global_stats[\"cpu-sys-msec\"])\n prev_cpu_user = int(global_stats[\"cpu-user-msec\"])\n \n lines[\"Uptime (seconds)\"] = global_stats[\"uptime\"]\n lines[\"Number of queries\"] = global_stats[\"queries\"]\n lines[\"Query per second\"] = qps\n lines[\"CPU Usage (%s)\"] = cpu\n lines[\"ACL drops\"] = global_stats[\"acl-drops\"]\n lines[\"Rule drops\"] = global_stats[\"rule-drop\"]\n lines[\"Cache hitrate\"] = global_stats[\"cache-hits\"]\n lines[\"Dynamic drops\"] = global_stats[\"dyn-blocked\"]\n\n # reprint the lines \n sys.stdout.write(\"\\033[1mDashboard for dnsdist\\033[0m\\n\")\n sys.stdout.write(\"\\n\")\n sys.stdout.write(\"Global:\\n\")\n for k,v in lines.items():\n sys.stdout.write(\"\\t%s: %s\\n\" % (k,v))\n sys.stdout.write(\"Backends:\\n\")\n for s in stats[\"backends\"]:\n if not len(s[\"name\"]):\n s[\"name\"] = \"--\"\n if not len(s[\"pools\"]):\n s[\"pools\"] = \"--\"\n sys.stdout.write(\"\\t#%s / %s / %s / %s\\n\" % (s[\"#\"],s[\"address\"],s[\"name\"],s[\"pools\"]) )\n sys.stdout.write(\"\\t\\tNumber of queries: %s\\n\" % s[\"queries\"])\n sys.stdout.write(\"\\t\\tQuery per second: %s\\n\" % s[\"qps\"])\n sys.stdout.write(\"\\t\\tNumber of drops: %s\\n\" % s[\"drops\"])\n sys.stdout.write(\"\\n\")\n sys.stdout.write(\"Ctrl+C to exit\\n\")\n \n time.sleep(1)\n \n \n # move up cursor and delete whole line\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\") \n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n for k,v in lines.items():\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\") \n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n for s in stats[\"backends\"]:\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\") \n sys.stdout.write(\"\\x1b[1A\\x1b[2K\") \n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n \n del stats\n except KeyboardInterrupt:\n break",
"def usage(self, host):",
"def ShowPort(cmd_args=None, cmd_options={}):\n show_kmsgs = True\n if \"-K\" in cmd_options:\n show_kmsgs = False\n if not cmd_args:\n print \"Please specify the address of the port whose details you want to print\"\n print ShowPort.__doc__\n return\n port = kern.GetValueFromAddress(cmd_args[0], 'struct ipc_port *')\n print PrintPortSummary.header\n PrintPortSummary(port, show_kmsgs)",
"def main():\n\n args = parse_args()\n metric_sender = MetricSender(verbose=args.verbose, debug=args.debug)\n\n discovery_key_disk = 'disc.disk'\n interval = 3\n pcp_disk_dev_metrics = ['disk.dev.total', 'disk.dev.avactive']\n item_prototype_macro_disk = '#OSO_DISK'\n item_prototype_key_tps = 'disc.disk.tps'\n item_prototype_key_putil = 'disc.disk.putil'\n\n disk_metrics = pminfo.get_sampled_data(pcp_disk_dev_metrics, interval, 2)\n\n pcp_metrics_divided = {}\n for metric in pcp_disk_dev_metrics:\n pcp_metrics_divided[metric] = {k: v for k, v in disk_metrics.items() if metric in k}\n\n # do TPS checks; use disk.dev.total\n filtered_disk_totals = clean_up_metric_dict(pcp_metrics_divided[pcp_disk_dev_metrics[0]],\n pcp_disk_dev_metrics[0] + '.')\n\n # Add dynamic items\n metric_sender.add_dynamic_metric(discovery_key_disk, item_prototype_macro_disk, filtered_disk_totals.keys())\n\n # calculate the TPS and add them to the ZaggSender\n for disk, totals in filtered_disk_totals.iteritems():\n disk_tps = (totals[1] - totals[0]) / interval\n metric_sender.add_metric({'%s[%s]' % (item_prototype_key_tps, disk): disk_tps})\n\n # do % Util checks; use disk.dev.avactive\n filtered_disk_totals = clean_up_metric_dict(pcp_metrics_divided[pcp_disk_dev_metrics[1]],\n pcp_disk_dev_metrics[1] + '.')\n\n # calculate the % Util and add them to the ZaggSender\n for disk, totals in filtered_disk_totals.iteritems():\n total_active = (float)(totals[1] - totals[0]) / 1000.0\n putil = 100 * total_active / interval\n\n metric_sender.add_metric({'%s[%s]' % (item_prototype_key_putil, disk): putil})\n\n metric_sender.send_metrics()",
"def main(cls):\n parser = cls.make_argument_parser()\n args = parser.parse_args()\n args.device = make_hoomd_device(args)\n benchmark = cls(**vars(args))\n performance = benchmark.execute()\n\n if args.device.communicator.rank == 0:\n print(f'{numpy.mean(performance)}')",
"def usage(err=''):\r\n m = '%s\\n' %err\r\n m += 'Default usage is to list Cases closed for the 30 days\\n'\r\n m += '\\n Example:\\n'\r\n m += ' closedcases -n 90 \\n' \r\n m += ' \\n'\r\n# m += ' closedcases -n 60 -s blast5 \\n'\r\n return m",
"def usage():\n \n print '-b <bench> the bench to show.'\n print '-c <config> the config to show (GPU, 8888, 565, etc).'\n print '-d <dir> a directory containing bench_r<revision>_<scalar> files.'\n print '-e <file> file containing expected bench values/ranges.'\n print ' Will raise exception if actual bench values are out of range.'\n print ' See bench_expectations.txt for data format and examples.'\n print '-f <revision>[:<revision>] the revisions to use for fitting.'\n print ' Negative <revision> is taken as offset from most recent revision.'\n print '-i <time> the time to ignore (w, c, g, etc).'\n print ' The flag is ignored when -t is set; otherwise we plot all the'\n print ' times except the one specified here.'\n print '-l <title> title to use for the output graph'\n print '-m <representation> representation of bench value.'\n print ' See _ListAlgorithm class in bench_util.py.'\n print '-o <path> path to which to write output; writes to stdout if not specified'\n print '-r <revision>[:<revision>] the revisions to show.'\n print ' Negative <revision> is taken as offset from most recent revision.'\n print '-s <setting>[=<value>] a setting to show (alpha, scalar, etc).'\n print '-t <time> the time to show (w, c, g, etc).'\n print '-x <int> the desired width of the svg.'\n print '-y <int> the desired height of the svg.'\n print '--default-setting <setting>[=<value>] setting for those without.'",
"def options():\n print \"\"\"Options summary:\n -h, --help\n -u, --usage\n -v, --verbose <verb_level>\n -e, --endpoint <endpoint>\n -i, --interface-type <iface_type>\n -r, --recursive\n --dbs-conf <conf_file>\n --show-prod\n --show-caf\n --only-subscribed\n --only-custodial\n \"\"\"",
"def port_desc_stats_reply_handler(self, ev):\n msg = ev.msg\n dpid = msg.datapath.id\n ofproto = msg.datapath.ofproto\n\n config_dict = {ofproto.OFPPC_PORT_DOWN: \"Down\",\n ofproto.OFPPC_NO_RECV: \"No Recv\",\n ofproto.OFPPC_NO_FWD: \"No Farward\",\n ofproto.OFPPC_NO_PACKET_IN: \"No Packet-in\"}\n\n state_dict = {ofproto.OFPPS_LINK_DOWN: \"Down\",\n ofproto.OFPPS_BLOCKED: \"Blocked\",\n ofproto.OFPPS_LIVE: \"Live\"}\n\n ports = []\n for p in ev.msg.body:\n ports.append('port_no=%d hw_addr=%s name=%s config=0x%08x '\n 'state=0x%08x curr=0x%08x advertised=0x%08x '\n 'supported=0x%08x peer=0x%08x curr_speed=%d '\n 'max_speed=%d' %\n (p.port_no, p.hw_addr,\n p.name, p.config,\n p.state, p.curr, p.advertised,\n p.supported, p.peer, p.curr_speed,\n p.max_speed))\n\n if p.config in config_dict:\n config = config_dict[p.config]\n else:\n config = \"up\"\n\n if p.state in state_dict:\n state = state_dict[p.state]\n else:\n state = \"up\"\n port_feature = (config, state, p.curr_speed)\n self.port_features[dpid][p.port_no] = port_feature",
"def show_help(argv=None):\n if argv:\n if \"list_datasets\" in argv:\n karr, _, desc = COMMANDS_TABLE[\"list_datasets\"]\n sdm_util.print_message(\"command : %s\" % (\" | \".join(karr)))\n sdm_util.print_message(\"usage : sdm ls\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(desc)\n return 0\n elif \"search_datasets\" in argv:\n karr, _, desc = COMMANDS_TABLE[\"search_datasets\"]\n sdm_util.print_message(\"command : %s\" % (\" | \".join(karr)))\n sdm_util.print_message(\"usage : sdm search <keyword>\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(desc)\n return 0\n elif \"show_mounts\" in argv:\n karr, _, desc = COMMANDS_TABLE[\"show_mounts\"]\n sdm_util.print_message(\"command : %s\" % (\" | \".join(karr)))\n sdm_util.print_message(\"usage : sdm ps\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(desc)\n return 0\n elif \"mount\" in argv:\n karr, _, desc = COMMANDS_TABLE[\"mount\"]\n sdm_util.print_message(\"command : %s\" % (\" | \".join(karr)))\n sdm_util.print_message(\"usage : sdm mount <dataset_name> [<mount_path>]\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(desc)\n return 0\n elif \"mmount\" in argv:\n karr, _, desc = COMMANDS_TABLE[\"mmount\"]\n sdm_util.print_message(\"command : %s\" % (\" | \".join(karr)))\n sdm_util.print_message(\"usage : sdm mmount <dataset_name> [<dataset_name> ...]\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(desc)\n return 0\n elif \"unmount\" in argv:\n karr, _, desc = COMMANDS_TABLE[\"unmount\"]\n sdm_util.print_message(\"command : %s\" % (\" | \".join(karr)))\n sdm_util.print_message(\"usage : sdm unmount <mount_id> [<cleanup_flag>]\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(desc)\n return 0\n elif \"munmount\" in argv:\n karr, _, desc = COMMANDS_TABLE[\"munmount\"]\n sdm_util.print_message(\"command : %s\" % (\" | \".join(karr)))\n sdm_util.print_message(\"usage : sdm munmount <mount_id> [<mount_id> ...]\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(desc)\n return 0\n elif \"clean\" in argv:\n karr, _, desc = COMMANDS_TABLE[\"clean\"]\n sdm_util.print_message(\"command : %s\" % (\" | \".join(karr)))\n sdm_util.print_message(\"usage : sdm clean\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(desc)\n return 0\n else:\n sdm_util.print_message(\"Unrecognized command\")\n return 1\n else:\n sdm_util.print_message(\"command : sdm <COMMAND> [<COMMAND_SPECIFIC_ARGS> ...]\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(\"Available Commands\")\n\n tbl = PrettyTable()\n tbl.field_names = [\"COMMAND\", \"DESCRIPTION\"]\n for cmd in COMMANDS:\n command, _, desc = cmd\n command_str = \" | \".join(command)\n tbl.add_row([command_str, desc])\n\n sdm_util.print_message(tbl)\n sdm_util.print_message(\"\")\n return 0",
"def handleCmdLine(self):\n description = \"Nagios monitoring script to check for open ports\\n\"\n usage = (\"%prog <options>\\n\")\n parser = OptionParser(usage=usage, description=description)\n\n parser.add_option(\"-c\", \"--config\",\n type=\"string\",\n help=\"path to open ports configuration file\")\n parser.add_option(\"-l\", \"--list\",\n type=\"string\",\n help=\"supply list of allowed ports seperated by comma.\")\n\n (self.options, args) = parser.parse_args()",
"def CountAllPorts(cmd_args=None, cmd_options={}):\n p_set = set()\n p_intransit = set()\n p_bytask = {}\n\n find_psets = False\n if \"-P\" in cmd_options:\n find_psets = True\n\n ## optionally include port sets\n ## DO recurse on busy ports\n ## DO log progress\n IterateAllPorts(None, CountPortsCallback, (p_set, p_intransit, p_bytask), find_psets, True, True)\n sys.stderr.write(\"{:120s}\\r\".format(' '))\n\n print \"Total ports found: {:d}\".format(len(p_set))\n print \"In Transit: {:d}\".format(len(p_intransit))\n print \"By Task:\"\n for pname in sorted(p_bytask.keys()):\n count = p_bytask[pname]\n print \"\\t{: <20s}: table={: <5d}, transit={: <5d}, other={: <5d}\".format(pname, count['table'], count['transit'], count['other'])\n return",
"def _get_metrics_options(metrics):\n metrics_options = []\n if metrics is None:\n metrics = []\n for static_metric in metrics:\n metrics_options += [\n \"-m\",\n static_metric.metric.mp_metric_name,\n str(static_metric.value),\n ]\n return metrics_options",
"def test_shortopt(self):\n pp = ParlaiParser(False, False)\n pp.add_argument(\"-m\", \"--model\")\n pp.add_argument(\"-mtw\", \"--multitask-weights\")\n opt = pp.parse_args([\"-m\", \"memnn\"])\n print(opt)",
"def usage(print_code_name=True):\n\tprint(\"*********************************************************************\")\n\tprint(\"* Scanner and Flooder Tool *\")\n\tprint(\"*********************************************************************\")\n\tprint()\n\tprint(\"ex, scan usage: scanner.py -s <target_host> <start_port> <end_port>\")\n\tprint(\"-h, -help\t- print out the description of usage\")\n\tprint(\"-s\t - scan a target host and a range of ports\\n\"\n\t\t\t\" Requires three args, <host> and <port start> and <port end>\")\n\tprint(\"-l - list the sets of ports found open for all hosts scanned\")\n\tprint(\"-pf - flood a target host with an ICMP PING flood.\\n\" \n\t\t\t\" Requires three args, <host> and <port start> and <port end>\")\n\tprint(\"-syn - flood a target host with an SYN ACK flood.\\n\"\n\t\t \" Requires two arguments: <host>, <ports> in format of 'p1,p2,p3,...,pn'. Has optional third argument, <amount> \")\n\tprint(\"-udp - DDOS a target host with UPD Packets.\\n\"\n\t\t \" Requires 3 arguments: <host>, <port>, <amount> (default =1)\")\n\tprint(\"-a - save hosts and open ports to a .txt file\")\n\tprint(\"-r - read in hosts and open ports from a .txt file\")\n\tprint()\n\tprint()\n\tprint(\"Examples: \")\n\tprint(\"-l\")\n\tprint(\"-s 192.168.0.1 0 500 # host, port range (space delimited)\")\n\tprint(\"-pf 192.168.0.1 100 # host, num of pings (optional, defaults to 1)\")\n\tprint(\"-syn 192.168.0.1 80,8080 100 # host, ports (comma delimited), and amount (optional)\")\n\tprint(\"-udp 192.168.0.1 80 100 # host, port, amount (optional, defaults to 1)\")",
"def show_meraki_mx_ports(self, job_req):\n logger.info(\"Job Received : %s\", job_req)\n api_uri = f\"/v1/networks/{self.meraki_net}/appliance/ports\"\n data = get_meraki_api_data(api_uri)\n # Parse the JSON\n message = \"Here is the detail: \\n\"\n port_counter = 0\n check_icon = chr(0x2705)\n for mx_port in data:\n message += f\"* **{mx_port['number']}** | Port Mode: **{mx_port['type']}** | Vlan ID: **{mx_port['vlan']}** \\n\"\n port_counter += 1\n message += f\"{check_icon} Total: **{port_counter}** \\n\" \n return message",
"def metrics(self, account_id):\n from pureport_client.commands.accounts.metrics import Command\n return Command(self.client, account_id)",
"def print_help():\n print \"\"\"\nMeasure Operating System Performance (mosp)\n-------------------------------------------\n\nUse this program to measure and report on operating system\nperformance.\n\nThis code measures operating system performance,\nincluding CPU, memory, disk and network, and\noutputs stats to screen and optionally to file\ntoo for use in performance analysis\n\nUses the psutil library\n\nInstall psutil (Ubuntu) if you don't already have it:\n sudo apt-get install python-dev\n sudo pip install psutil\n\nUsage:\n python mosp.py [options]\n\nExample usage:\n python mosp.py -W -i 2\n\nOptions:\n -h --help Display this help and exit\n -m --max-run-time Maximum time to run for before exiting\n (default is infinite)\n -i --interval Interval between requests in seconds\n (default is 1)\n -w --output-file Specify an output filename\n -W Output results to default filename\n default format is:\n mosp-HOSTNAME-YYYYMMDD-HHMMSS.csv\n -b --output-path Specify path to output file directory\n -j --no-header-row Suppress writing header row into CSV\n -v --version Output version information and exit\n\n \"\"\"\n return()",
"def Usage(shorthelp=0, writeto_stdout=0, detailed_error=None,\n exitcode=None, show_cmd=None, show_global_flags=False):\n printer('%s: Incorrect usage; details below.' % show_cmd)\n printer('Correct usage is as follows:')\n printer('')\n for line in (' ' + cmd.__doc__.rstrip()).splitlines():\n printer(line)\n # Print out str(FLAGS) for just the UICmd-specific flags.\n tmp_flags = flags.FlagValues()\n unused_cmd = type(cmd)(show_cmd, tmp_flags)\n prefix = _UICMD_MODULE_NAME + ':\\n'\n flag_str = tmp_flags.ModuleHelp(_UICMD_MODULE_NAME)\n flag_str = flag_str.lstrip()\n if flag_str.startswith(prefix):\n flag_str = flag_str[len(prefix):]\n if flag_str:\n printer('')\n printer('flags:')\n for line in flag_str.splitlines():\n printer(line)\n if detailed_error is not None:\n printer('')\n printer('The incorrect usage is as follows:')\n printer('')\n for line in unicode(detailed_error).splitlines():\n printer(' ' + line)",
"def build_command(args, parser):\n cmd = \"ipmitool -I lanplus\"\n if not args.host:\n print \"\\nERROR: hostname is required.\\n\"\n parser.print_help()\n sys.exit(1)\n else:\n cmd += ' -H ' + args.host\n if args.port:\n cmd += ' -p ' + args.port\n if not args.user:\n print \"\\nERROR: username is required.\\n\"\n parser.print_help()\n sys.exit(1)\n else:\n cmd += ' -U ' + args.user\n if args.passwd:\n cmd += ' -P ' + args.passwd\n cmd += ' dcmi power reading'\n if args.interval:\n global INTERVAL\n INTERVAL = args.interval\n if args.nread:\n global NREAD\n NREAD = args.nread\n else:\n global INFINITY\n INFINITY = True\n if args.store:\n global STORE\n STORE = True\n return cmd",
"def printOptions(opts,subject_ids,session_ids,task_list, run_list, acq, rec):\n uname = os.popen('uname -s -n -r').read()\n print \"\\n\"\n print \"* Pipeline started at \"+time.strftime(\"%c\")+\"on \"+uname\n print \"* Command line is : \\n \"+str(sys.argv)+\"\\n\"\n print \"* The source directory is : \"+opts.sourceDir\n print \"* The target directory is : \"+opts.targetDir+\"\\n\"\n print \"* Data-set Subject ID(s) is/are : \"+str(', '.join(subject_ids))+\"\\n\"\n # print \"* PET conditions : \"+ ','.join(opts.condiList)+\"\\n\"\n print \"* Sessions : \", session_ids, \"\\n\"\n print \"* Tasks : \" , task_list , \"\\n\"\n print \"* Runs : \" , run_list , \"\\n\"\n print \"* Acquisition : \" , acq , \"\\n\"\n print \"* Reconstruction : \" , rec , \"\\n\"",
"def print_help(self):\r\n\t\ttext = \"\\tName: ml_scikit_OPTICS\"\r\n\t\ttext += \"\\n\\t\\tThis machine learning plugin uses scikit-learn's OPTICS algorithm.\\n\"\r\n\t\ttext += \"\\n\\t\\tOptional Parameters:\"\r\n\t\ttext += \"\\n\\t\\t\\tOPTICS_skip_normalization: Do NOT perform normalization (scaling) of data, skip this step.\"\r\n\t\ttext += \"\\n\\t\\t\\OPTICS_eps: Specify eps parameter (default is 1.0).\"\r\n\t\ttext += \"\\n\\t\\t\\OPTICS_min_samples: Specify min_samples parameter (default is 5).\"\r\n#\r\n# OPTICS (with memory complexity n) is an alternative to DBSCAN (with memory complexity n^2)\r\n# which has time complexity n^2 in general with the default max_eps = np.inf. \r\n# We will set max_eps = eps to reduce the run-time.\r\n#\r\n\t\treturn text",
"def display_memcache_info(request):\n # pylint: disable-msg=E1101\n return utility.respond(request, 'admin/memcache_info',\n {'memcache_info': memcache.get_stats()})"
] | [
"0.6124976",
"0.59950477",
"0.5756093",
"0.5687688",
"0.55233353",
"0.5309227",
"0.52750784",
"0.5252932",
"0.5208929",
"0.5170246",
"0.5106128",
"0.50764817",
"0.50519806",
"0.50233275",
"0.5003854",
"0.4984393",
"0.4930946",
"0.49051604",
"0.49042228",
"0.49029616",
"0.48779762",
"0.48247278",
"0.48189783",
"0.4812931",
"0.47986087",
"0.47940293",
"0.47751278",
"0.477329",
"0.4772547",
"0.47691017"
] | 0.70290184 | 0 |
Report usage metrics for the NICs of partitions of CPCs in DPM mode. In addition to the commandspecific options shown in this help text, the general options (see 'zhmc help') can also be specified right after the 'zhmc' command name. | def metrics_nic(cmd_ctx, cpc, partition, nic, **options):
cmd_ctx.execute_cmd(
lambda: cmd_metrics_nic(cmd_ctx, cpc, partition, nic, options)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def metrics_cpc(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_cpc(cmd_ctx, cpc, options))",
"def metrics_networkport(cmd_ctx, cpc, adapter, **options):\n cmd_ctx.execute_cmd(\n lambda: cmd_metrics_networkport(cmd_ctx, cpc, adapter, options))",
"def metrics_channel(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_channel(cmd_ctx, cpc, options))",
"def do_hostinfo(self, args):\n host = opts = None\n if args:\n args = args.split()\n host = args.pop()\n\n if not host:\n print('Usage: hostinfo [-cdmu] host_name_or_ip')\n print(' uptime and load stats returned if no options specified')\n return\n\n try:\n ip = socket.gethostbyname(host)\n except socket.gaierror:\n print('cannot resolve', host, file=sys.stderr)\n return\n\n opts = []\n while args:\n arg = args.pop(0)\n if arg.startswith('--'):\n if arg == '--cpu':\n opts.append('c')\n elif arg == '--disk':\n opts.append('d')\n elif arg == '--memory':\n opts.append('m')\n elif arg == '--uptime':\n opts.append('u')\n else:\n print('unrecognized option:', arg, file=sys.stderr)\n return\n else:\n if arg[0] == '-':\n for ch in arg[1:]:\n if ch in ('cdmu') and ch not in opts:\n opts.append(ch)\n else:\n print('unrecognized option:', ch, file=sys.stderr)\n return\n\n stats = self._qm.get_host_stats(ip)\n\n if not opts:\n # Get uptime and load averages.\n up = stats['uptime']\n load = stats['cpu_load']\n print('Up for %s days, %s hours, %s minutes, '\n 'load averages: %s, %s, %s'\n % (up['days'], up['hours'], up['minutes'], load['one'],\n load['five'], load['fifteen']))\n return\n\n all_stats = []\n for opt in opts:\n if opt == 'd':\n # Get disk usage.\n disks = stats['disk_usage']\n st = ['Disk Usage:']\n for mount, disk_info in disks.viewitems():\n st.append(' Usage for: %s' % mount)\n for k, v in disk_info.viewitems():\n st.append(' %s: %s' % (k, v))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'c':\n # Get CPU load.\n load_stats = stats['cpu_load']\n st = ['CPU Load Average:']\n st.append(' last one minute: %s' % load_stats['one'])\n st.append(' last five minutes: %s' % load_stats['five'])\n st.append(' last fifteen minutes: %s' % load_stats['fifteen'])\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'm':\n # Get Memory Usage.\n memory_usage = stats['memory_usage']\n st = ['Memory usage:']\n for k, v in memory_usage.viewitems():\n st.append(' %s: %s' % (k, v))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'u':\n # Get uptime.\n up = stats['uptime']\n st = ['Uptime:']\n st.append(' Up for %s days, %s hours and %s minutes'\n % (up['days'], up['hours'], up['minutes']))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n\n print('\\n'.join(all_stats))",
"def metrics_proc(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_proc(cmd_ctx, cpc, options))",
"def main():\n\n args = parse_args()\n metric_sender = MetricSender(verbose=args.verbose, debug=args.debug)\n\n discovery_key_disk = 'disc.disk'\n interval = 3\n pcp_disk_dev_metrics = ['disk.dev.total', 'disk.dev.avactive']\n item_prototype_macro_disk = '#OSO_DISK'\n item_prototype_key_tps = 'disc.disk.tps'\n item_prototype_key_putil = 'disc.disk.putil'\n\n disk_metrics = pminfo.get_sampled_data(pcp_disk_dev_metrics, interval, 2)\n\n pcp_metrics_divided = {}\n for metric in pcp_disk_dev_metrics:\n pcp_metrics_divided[metric] = {k: v for k, v in disk_metrics.items() if metric in k}\n\n # do TPS checks; use disk.dev.total\n filtered_disk_totals = clean_up_metric_dict(pcp_metrics_divided[pcp_disk_dev_metrics[0]],\n pcp_disk_dev_metrics[0] + '.')\n\n # Add dynamic items\n metric_sender.add_dynamic_metric(discovery_key_disk, item_prototype_macro_disk, filtered_disk_totals.keys())\n\n # calculate the TPS and add them to the ZaggSender\n for disk, totals in filtered_disk_totals.iteritems():\n disk_tps = (totals[1] - totals[0]) / interval\n metric_sender.add_metric({'%s[%s]' % (item_prototype_key_tps, disk): disk_tps})\n\n # do % Util checks; use disk.dev.avactive\n filtered_disk_totals = clean_up_metric_dict(pcp_metrics_divided[pcp_disk_dev_metrics[1]],\n pcp_disk_dev_metrics[1] + '.')\n\n # calculate the % Util and add them to the ZaggSender\n for disk, totals in filtered_disk_totals.iteritems():\n total_active = (float)(totals[1] - totals[0]) / 1000.0\n putil = 100 * total_active / interval\n\n metric_sender.add_metric({'%s[%s]' % (item_prototype_key_putil, disk): putil})\n\n metric_sender.send_metrics()",
"def metrics_partition(cmd_ctx, cpc, partition, **options):\n cmd_ctx.execute_cmd(\n lambda: cmd_metrics_partition(cmd_ctx, cpc, partition, options))",
"def metrics_adapter(cmd_ctx, cpc, adapter, **options):\n cmd_ctx.execute_cmd(\n lambda: cmd_metrics_adapter(cmd_ctx, cpc, adapter, options))",
"def metrics_crypto(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_crypto(cmd_ctx, cpc, options))",
"def print_help(self):\r\n\t\ttext = \"\\tName: ml_scikit_OPTICS\"\r\n\t\ttext += \"\\n\\t\\tThis machine learning plugin uses scikit-learn's OPTICS algorithm.\\n\"\r\n\t\ttext += \"\\n\\t\\tOptional Parameters:\"\r\n\t\ttext += \"\\n\\t\\t\\tOPTICS_skip_normalization: Do NOT perform normalization (scaling) of data, skip this step.\"\r\n\t\ttext += \"\\n\\t\\t\\OPTICS_eps: Specify eps parameter (default is 1.0).\"\r\n\t\ttext += \"\\n\\t\\t\\OPTICS_min_samples: Specify min_samples parameter (default is 5).\"\r\n#\r\n# OPTICS (with memory complexity n) is an alternative to DBSCAN (with memory complexity n^2)\r\n# which has time complexity n^2 in general with the default max_eps = np.inf. \r\n# We will set max_eps = eps to reduce the run-time.\r\n#\r\n\t\treturn text",
"def stat_cuda(msg: str) -> None:\n print(f'-- {msg:<35} allocated: %dM, max allocated: %dM, cached: %dM, max cached: %dM' % (\n torch.cuda.memory_allocated() / 1024 / 1024,\n torch.cuda.max_memory_allocated() / 1024 / 1024,\n torch.cuda.memory_cached() / 1024 / 1024,\n torch.cuda.max_memory_cached() / 1024 / 1024\n ))",
"def _cmd_metrics(args):\n if (\n len(args.cnarrays) > 1\n and args.segments\n and len(args.segments) > 1\n and len(args.cnarrays) != len(args.segments)\n ):\n raise ValueError(\n \"Number of coverage/segment filenames given must be \"\n \"equal, if more than 1 segment file is given.\"\n )\n\n cnarrs = map(read_cna, args.cnarrays)\n if args.segments:\n args.segments = map(read_cna, args.segments)\n table = metrics.do_metrics(cnarrs, args.segments, args.drop_low_coverage)\n write_dataframe(args.output, table)",
"def metrics_env(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_env(cmd_ctx, cpc, options))",
"def show_help(argv=None):\n if argv:\n if \"list_datasets\" in argv:\n karr, _, desc = COMMANDS_TABLE[\"list_datasets\"]\n sdm_util.print_message(\"command : %s\" % (\" | \".join(karr)))\n sdm_util.print_message(\"usage : sdm ls\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(desc)\n return 0\n elif \"search_datasets\" in argv:\n karr, _, desc = COMMANDS_TABLE[\"search_datasets\"]\n sdm_util.print_message(\"command : %s\" % (\" | \".join(karr)))\n sdm_util.print_message(\"usage : sdm search <keyword>\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(desc)\n return 0\n elif \"show_mounts\" in argv:\n karr, _, desc = COMMANDS_TABLE[\"show_mounts\"]\n sdm_util.print_message(\"command : %s\" % (\" | \".join(karr)))\n sdm_util.print_message(\"usage : sdm ps\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(desc)\n return 0\n elif \"mount\" in argv:\n karr, _, desc = COMMANDS_TABLE[\"mount\"]\n sdm_util.print_message(\"command : %s\" % (\" | \".join(karr)))\n sdm_util.print_message(\"usage : sdm mount <dataset_name> [<mount_path>]\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(desc)\n return 0\n elif \"mmount\" in argv:\n karr, _, desc = COMMANDS_TABLE[\"mmount\"]\n sdm_util.print_message(\"command : %s\" % (\" | \".join(karr)))\n sdm_util.print_message(\"usage : sdm mmount <dataset_name> [<dataset_name> ...]\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(desc)\n return 0\n elif \"unmount\" in argv:\n karr, _, desc = COMMANDS_TABLE[\"unmount\"]\n sdm_util.print_message(\"command : %s\" % (\" | \".join(karr)))\n sdm_util.print_message(\"usage : sdm unmount <mount_id> [<cleanup_flag>]\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(desc)\n return 0\n elif \"munmount\" in argv:\n karr, _, desc = COMMANDS_TABLE[\"munmount\"]\n sdm_util.print_message(\"command : %s\" % (\" | \".join(karr)))\n sdm_util.print_message(\"usage : sdm munmount <mount_id> [<mount_id> ...]\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(desc)\n return 0\n elif \"clean\" in argv:\n karr, _, desc = COMMANDS_TABLE[\"clean\"]\n sdm_util.print_message(\"command : %s\" % (\" | \".join(karr)))\n sdm_util.print_message(\"usage : sdm clean\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(desc)\n return 0\n else:\n sdm_util.print_message(\"Unrecognized command\")\n return 1\n else:\n sdm_util.print_message(\"command : sdm <COMMAND> [<COMMAND_SPECIFIC_ARGS> ...]\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(\"Available Commands\")\n\n tbl = PrettyTable()\n tbl.field_names = [\"COMMAND\", \"DESCRIPTION\"]\n for cmd in COMMANDS:\n command, _, desc = cmd\n command_str = \" | \".join(command)\n tbl.add_row([command_str, desc])\n\n sdm_util.print_message(tbl)\n sdm_util.print_message(\"\")\n return 0",
"def show(self):\n prev_queries = 0\n prev_cpu_sys = 0\n prev_cpu_user = 0\n \n lines = {\n \"Uptime (seconds)\": \"--\",\n \"Number of queries\": \"--\",\n \"Query per second\": \"--\",\n \"ACL drops\": \"--\",\n \"Dynamic drops\": \"--\",\n \"Rule drops\": \"--\",\n \"CPU Usage (%s)\": \"--\",\n \"Cache hitrate\": \"--\"\n }\n\n while True:\n try:\n # get stats from dnsdist\n stats = Statistics(console=self.console)\n global_stats = stats[\"global\"]\n \n qps = int(global_stats[\"queries\"]) - prev_queries\n prev_queries = int(global_stats[\"queries\"])\n cpu = (int(global_stats[\"cpu-sys-msec\"])+int(global_stats[\"cpu-user-msec\"]) - prev_cpu_sys - prev_cpu_user) / 10\n prev_cpu_sys = int(global_stats[\"cpu-sys-msec\"])\n prev_cpu_user = int(global_stats[\"cpu-user-msec\"])\n \n lines[\"Uptime (seconds)\"] = global_stats[\"uptime\"]\n lines[\"Number of queries\"] = global_stats[\"queries\"]\n lines[\"Query per second\"] = qps\n lines[\"CPU Usage (%s)\"] = cpu\n lines[\"ACL drops\"] = global_stats[\"acl-drops\"]\n lines[\"Rule drops\"] = global_stats[\"rule-drop\"]\n lines[\"Cache hitrate\"] = global_stats[\"cache-hits\"]\n lines[\"Dynamic drops\"] = global_stats[\"dyn-blocked\"]\n\n # reprint the lines \n sys.stdout.write(\"\\033[1mDashboard for dnsdist\\033[0m\\n\")\n sys.stdout.write(\"\\n\")\n sys.stdout.write(\"Global:\\n\")\n for k,v in lines.items():\n sys.stdout.write(\"\\t%s: %s\\n\" % (k,v))\n sys.stdout.write(\"Backends:\\n\")\n for s in stats[\"backends\"]:\n if not len(s[\"name\"]):\n s[\"name\"] = \"--\"\n if not len(s[\"pools\"]):\n s[\"pools\"] = \"--\"\n sys.stdout.write(\"\\t#%s / %s / %s / %s\\n\" % (s[\"#\"],s[\"address\"],s[\"name\"],s[\"pools\"]) )\n sys.stdout.write(\"\\t\\tNumber of queries: %s\\n\" % s[\"queries\"])\n sys.stdout.write(\"\\t\\tQuery per second: %s\\n\" % s[\"qps\"])\n sys.stdout.write(\"\\t\\tNumber of drops: %s\\n\" % s[\"drops\"])\n sys.stdout.write(\"\\n\")\n sys.stdout.write(\"Ctrl+C to exit\\n\")\n \n time.sleep(1)\n \n \n # move up cursor and delete whole line\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\") \n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n for k,v in lines.items():\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\") \n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n for s in stats[\"backends\"]:\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\") \n sys.stdout.write(\"\\x1b[1A\\x1b[2K\") \n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n \n del stats\n except KeyboardInterrupt:\n break",
"def print_help():\n print \"\"\"\nMeasure Operating System Performance (mosp)\n-------------------------------------------\n\nUse this program to measure and report on operating system\nperformance.\n\nThis code measures operating system performance,\nincluding CPU, memory, disk and network, and\noutputs stats to screen and optionally to file\ntoo for use in performance analysis\n\nUses the psutil library\n\nInstall psutil (Ubuntu) if you don't already have it:\n sudo apt-get install python-dev\n sudo pip install psutil\n\nUsage:\n python mosp.py [options]\n\nExample usage:\n python mosp.py -W -i 2\n\nOptions:\n -h --help Display this help and exit\n -m --max-run-time Maximum time to run for before exiting\n (default is infinite)\n -i --interval Interval between requests in seconds\n (default is 1)\n -w --output-file Specify an output filename\n -W Output results to default filename\n default format is:\n mosp-HOSTNAME-YYYYMMDD-HHMMSS.csv\n -b --output-path Specify path to output file directory\n -j --no-header-row Suppress writing header row into CSV\n -v --version Output version information and exit\n\n \"\"\"\n return()",
"def main(cls):\n parser = cls.make_argument_parser()\n args = parser.parse_args()\n args.device = make_hoomd_device(args)\n benchmark = cls(**vars(args))\n performance = benchmark.execute()\n\n if args.device.communicator.rank == 0:\n print(f'{numpy.mean(performance)}')",
"async def stats(self, ctx):\n if ctx.invoked_subcommand is None:\n await send_cmd_help(ctx)",
"def dicom_cli():",
"def _display_cuda_devices():\n\n cuda_query_output = subprocess.run(\"nvidia-smi --query-gpu=gpu_uuid,gpu_name,compute_mode --format=csv\", shell=True, capture_output=True, text=True)\n # Check if command worked\n if cuda_query_output.returncode == 0:\n # Split by line jump and comma\n cuda_devices_list = [entry for entry in cuda_query_output.stdout.splitlines()]\n logger.debug(f\"CUDA devices available: {*cuda_devices_list,}\")\n # We only support \"Default\" and not \"Exclusive_Process\" for the compute mode\n if \"Default\" not in cuda_query_output.stdout:\n logger.warning(f\"GPU in 'Exclusive_Process' mode (or Prohibited), one context is allowed per device. This may prevent some openmmtools features from working. GPU must be in 'Default' compute mode\")\n # Handel the case where the command had some error\n else:\n logger.debug(f\"nvidia-smi command failed: {cuda_query_output.stderr}, this is expected if there is no GPU available\")",
"def metrics_flash(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_flash(cmd_ctx, cpc, options))",
"def main():\n parser = argparse.ArgumentParser(\n description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter\n )\n add_common_snmp_args(parser)\n parser.add_argument(\n \"-w\",\n \"--warning\",\n type=int,\n default=70,\n help=\"Warning memory usage percentage (0-100)\",\n )\n parser.add_argument(\n \"-c\",\n \"--critical\",\n type=int,\n default=90,\n help=\"Critical memory usage percentage (0-100)\",\n )\n parser.add_argument(\n \"-f\",\n \"--family\",\n required=True,\n help=\"Switch family. Supported families: 1910, 1920, 1920S\",\n )\n\n config = vars(parser.parse_args())\n check_snmp_args(config)\n check_thresholds(config)\n\n dataset = {}\n\n if config[\"family\"] == \"1920S\":\n cpu = ObjectType(\n ObjectIdentity(\n \"HP-SWITCHING-MIB\", \"agentSwitchCpuProcessTotalUtilization\", 0\n )\n )\n elif config[\"family\"] in [\"1910\", \"1920\"]:\n cpu = ObjectType(\n ObjectIdentity(\"HH3C-ENTITY-EXT-MIB\", \"hh3cEntityExtCpuUsage\", 8)\n )\n else:\n unknown_exit(SERVICE, f\"Switch family {config['family']} NOT known\")\n\n try:\n raw_data = get_snmp_data(config, cpu)\n except ValueError as err:\n unknown_exit(SERVICE, err)\n add_vars_to_dataset(dataset, raw_data)\n\n if config[\"family\"] == \"1920S\":\n dataset[\"cpu_usage\"] = get_hp_cpu_usage(\n dataset[\"agentSwitchCpuProcessTotalUtilization\"]\n )\n elif config[\"family\"] in [\"1910\", \"1920\"]:\n dataset[\"cpu_usage\"] = int(dataset[\"hh3cEntityExtCpuUsage\"])\n else:\n unknown_exit(SERVICE, f\"Switch family {config['family']} NOT known\")\n\n state, message = generate_output(config, dataset)\n report(state, message)",
"def collect_cluster_info(output_dir, k8s_cli):\n collect_helper(output_dir, cmd=\"{} cluster-info\".format(k8s_cli),\n file_name=\"cluster_info\", resource_name=\"cluster-info\")",
"def display_memcache_info(request):\n # pylint: disable-msg=E1101\n return utility.respond(request, 'admin/memcache_info',\n {'memcache_info': memcache.get_stats()})",
"def do_stats(self, args):\n total_cpu = free_cpu = in_use_cpu = 0\n\n summary = self._qm.get_all_host_summary()\n for host_id, host_info in summary.viewitems():\n host_cpu = int(host_info['total cores'])\n total_cpu += host_cpu\n locked = host_info.get('locked by')\n if locked:\n # If host is locked then all CPUs are in use.\n in_use_cpu += host_cpu\n else:\n free_host_cpu = int(host_info['free cores'])\n in_use_cpu += (host_cpu - free_host_cpu)\n free_cpu += free_host_cpu\n\n print('total CPU: ', total_cpu)\n print('used/locked CPU: ', in_use_cpu)\n print('free CPU: ', free_cpu)\n capacity = float(in_use_cpu) / float(total_cpu)\n print('capacity used: %.1f%%' % (capacity * 100,))\n capacity = float(free_cpu) / float(total_cpu)\n print('capacity remaining: %.1f%%' % (capacity * 100,))",
"def Usage(shorthelp=0, writeto_stdout=0, detailed_error=None,\n exitcode=None, show_cmd=None, show_global_flags=False):\n printer('%s: Incorrect usage; details below.' % show_cmd)\n printer('Correct usage is as follows:')\n printer('')\n for line in (' ' + cmd.__doc__.rstrip()).splitlines():\n printer(line)\n # Print out str(FLAGS) for just the UICmd-specific flags.\n tmp_flags = flags.FlagValues()\n unused_cmd = type(cmd)(show_cmd, tmp_flags)\n prefix = _UICMD_MODULE_NAME + ':\\n'\n flag_str = tmp_flags.ModuleHelp(_UICMD_MODULE_NAME)\n flag_str = flag_str.lstrip()\n if flag_str.startswith(prefix):\n flag_str = flag_str[len(prefix):]\n if flag_str:\n printer('')\n printer('flags:')\n for line in flag_str.splitlines():\n printer(line)\n if detailed_error is not None:\n printer('')\n printer('The incorrect usage is as follows:')\n printer('')\n for line in unicode(detailed_error).splitlines():\n printer(' ' + line)",
"def printOptions(opts,subject_ids,session_ids,task_list, run_list, acq, rec):\n uname = os.popen('uname -s -n -r').read()\n print \"\\n\"\n print \"* Pipeline started at \"+time.strftime(\"%c\")+\"on \"+uname\n print \"* Command line is : \\n \"+str(sys.argv)+\"\\n\"\n print \"* The source directory is : \"+opts.sourceDir\n print \"* The target directory is : \"+opts.targetDir+\"\\n\"\n print \"* Data-set Subject ID(s) is/are : \"+str(', '.join(subject_ids))+\"\\n\"\n # print \"* PET conditions : \"+ ','.join(opts.condiList)+\"\\n\"\n print \"* Sessions : \", session_ids, \"\\n\"\n print \"* Tasks : \" , task_list , \"\\n\"\n print \"* Runs : \" , run_list , \"\\n\"\n print \"* Acquisition : \" , acq , \"\\n\"\n print \"* Reconstruction : \" , rec , \"\\n\"",
"def phast_cmmd(self):\n temp = '{prog} -R {rho} -C {ecov} -E {elen} -N {chrom} -i MAF {maf} {model} > {wig}\\n'.format(**self.dict)\n return temp.format(fnum=self.fnum)",
"def gather_info_and_display():\n # Obtain total rss displayed in memory.stat for each group,\n # container and service.\n try:\n output_mem = pipe_command(GREP_CMD, AWK_CMD, cwd=MEMPATH)\n LOG.debug(\n 'command: %s\\n%s',\n \"grep -rs total_rss '/sys/fs/cgroup/memory/' \"\n \"| awk '$2>0{print$0}' \",\n output_mem)\n except subprocess.CalledProcessError as error:\n LOG.error('Could not get total_rss memory, error=%s', error)\n return 1\n\n mem_info = get_meminfo()\n pt_groups = gather_groups_memory(output_mem)\n pt_cont = gather_containers_memory(output_mem)\n pt_serv = sys_service_memory()\n\n # Dump the tables out\n print('\\nPer groups memory usage:')\n\n # Get string to be printed and create list of elements separated by \\n\n list_of_table_lines = pt_groups.get_string().split('\\n')\n\n # Use the first line (+---+-- ...) as horizontal rule to insert later\n horizontal_line = list_of_table_lines[0]\n\n # Print the table, except last two lines ( \"Total\" row + final separator).\n print(\"\\n\".join(list_of_table_lines[:-2]))\n # Print separator, and finally the \"Total\" row.\n print(horizontal_line)\n print(\"\\n\".join(list_of_table_lines[-2:]))\n\n pt_namespc = prettytable.PrettyTable(\n ['Namespace',\n 'Resident Set Size (MiB)',\n ], caching=False)\n pt_namespc.align = 'l'\n pt_namespc.align['Resident Set Size (MiB)'] = 'r'\n\n print('\\nPer namespace memory usage:')\n for n_s in MEMORY['namespaces']:\n pt_namespc.add_row(\n [n_s,\n MEMORY['namespaces'][n_s],\n ])\n print(pt_namespc)\n\n print('\\nPer container memory usage:')\n print(pt_cont)\n\n print('\\nPer service memory usage:')\n print(pt_serv)\n\n base_mebib = 0.0\n k8s_system = 0.0\n k8s_addon = 0.0\n platform_memory_percent = 0.0\n\n # Calculate base memory usage (i.e., normal memory, exclude K8S and VMs)\n # e.g., docker, system.slice, user.slice\n for group in MEMORY['cgroups']:\n if group in BASE_GROUPS:\n base_mebib += float(MEMORY['cgroups'][group])\n\n # K8S platform system usage (essential) and addons usage (non-essential)\n for n_s in MEMORY['namespaces']:\n if n_s in K8S_NAMESPACE_SYSTEM:\n k8s_system += MEMORY['namespaces'][n_s]\n elif n_s in K8S_NAMESPACE_ADDON:\n k8s_addon += MEMORY['namespaces'][n_s]\n\n # Calculate platform memory usage\n platform_mebib = base_mebib + k8s_system\n\n anon_mebib = float(mem_to_mebibytes(\n mem_info['Active(anon)'] + mem_info['Inactive(anon)'])) * KBYTE\n avail_mebib = float(mem_to_mebibytes(\n mem_info['MemAvailable'])) * KBYTE\n total_mebib = float(anon_mebib + avail_mebib)\n\n anon_percent = py2_round(100 * anon_mebib / total_mebib, DECIMAL_DIGITS) # pylint: disable=W1619\n\n reserved_mebib = get_platform_reserved_memory()\n # Calculate platform memory in terms of percent reserved\n if reserved_mebib > 0.0:\n platform_memory_percent = py2_round(\n 100 * platform_mebib / reserved_mebib, DECIMAL_DIGITS) # pylint: disable=W1619\n\n pt_platf = prettytable.PrettyTable(\n ['Reserved',\n 'Platform',\n 'Base',\n 'K8s Platform system',\n 'k8s-addon'\n ], caching=False)\n pt_platf.align = 'l'\n\n pt_platf.add_row(\n [reserved_mebib,\n '{} ({}%)'.format(platform_mebib, platform_memory_percent),\n base_mebib,\n k8s_system,\n k8s_addon\n ])\n print('\\nPlatform memory usage in MiB:')\n print(pt_platf)\n\n pt_4k = prettytable.PrettyTable(\n ['Anon',\n 'Cgroup-rss',\n 'Available',\n 'Total'\n ], caching=False)\n pt_4k.align = 'l'\n\n pt_4k.add_row(\n ['{} ({}%)'.format(anon_mebib, anon_percent),\n MEMORY['cgroups']['total_rss'],\n avail_mebib,\n total_mebib\n ])\n\n print('\\n4K memory usage in MiB:')\n print(pt_4k)\n\n return 0",
"def show_mem(cmd, cnt, args):\n if cpu is None:\n log(\"Load program first\") \n return\n elif len(cpu.memory) == 0:\n log(\"Load program first\") \n return \n chunk = 0\n chunk_count = len(cpu.memory)\n while chunk < chunk_count: \n chunk_start = cpu.memory[chunk][MEMADDR]\n chunk_end = chunk_start + cpu.memory[chunk][MEMSIZE] \n log(\"{:d} {:#x}..{:#x}\".format(chunk, chunk_start, chunk_end)) \n chunk += 1\n if machine == \"ARM\":\n if len(cpu.high_memory) != 0:\n log(\"High memory\")\n for addr in sorted(cpu.high_memory):\n log(\"{:#x}\".format(addr))"
] | [
"0.6029862",
"0.59694433",
"0.593867",
"0.5872437",
"0.57943094",
"0.56738853",
"0.5640134",
"0.5486397",
"0.5422634",
"0.53095937",
"0.5250618",
"0.5230403",
"0.510633",
"0.5087089",
"0.50808835",
"0.5045866",
"0.49836123",
"0.4982057",
"0.49766484",
"0.4954867",
"0.49396986",
"0.49386987",
"0.49295557",
"0.49147475",
"0.49089444",
"0.49000442",
"0.4887093",
"0.48772013",
"0.48531625",
"0.483787"
] | 0.6058172 | 0 |
Computes the Modulation SpectrumBased ECG Quality Index (MSQI) for one or many ECG signals defined in x, sampled with a sampling frequency fs | def msqi_ama(x, fs):
# test ecg shape
try:
x.shape[1]
except IndexError:
x = x[:, np.newaxis]
# Empirical values for the STFFT transformation
win_size_sec = 0.125 #seconds
win_over_sec = 0.09375 #seconds
nfft_factor_1 = 16
nfft_factor_2 = 4
win_size_smp = int(win_size_sec * fs) #samples
win_over_smp = int(win_over_sec * fs) #samples
win_shft_smp = win_size_smp - win_over_smp
# Computes Modulation Spectrogram
modulation_spectrogram = ama.strfft_modulation_spectrogram(x, fs, win_size_smp,
win_shft_smp, nfft_factor_1, 'cosine', nfft_factor_2, 'cosine' )
# Find fundamental frequency (HR)
# f = (0, 40)Hz
ix_f_00 = (np.abs(modulation_spectrogram['freq_axis'] - 0)).argmin(0)
ix_f_40 = (np.abs(modulation_spectrogram['freq_axis'] - 40)).argmin(0) + 1
# Look for the maximum only from 0.6 to 3 Hz (36 to 180 bpm)
valid_f_ix = np.logical_or(modulation_spectrogram['freq_mod_axis'] < 0.66 , modulation_spectrogram['freq_mod_axis'] > 3)
# number of epochs
n_epochs = modulation_spectrogram['power_modulation_spectrogram'].shape[2]
msqi_vals = np.zeros(n_epochs)
hr_vals = np.zeros(n_epochs)
for ix_epoch in range(n_epochs):
B = np.sqrt(modulation_spectrogram['power_modulation_spectrogram'][:, :, ix_epoch])
# Scale to maximun of B
B = B / np.max(B)
# Add B in the conventional frequency axis from 0 to 40 Hz
tmp = np.sum(B[ix_f_00:ix_f_40, :], axis=0)
# Look for the maximum only from 0.6 to 3 Hz (36 to 180 bpm)
tmp[valid_f_ix] = 0
ix_max = np.argmax(tmp)
freq_funda = modulation_spectrogram['freq_mod_axis'][ix_max]
# TME
tme = np.sum(B)
eme = 0
for ix_harm in range(1, 5):
ix_fm = (np.abs(modulation_spectrogram['freq_mod_axis'] - (ix_harm * freq_funda) )).argmin(0)
ix_b = int(round(.3125 / modulation_spectrogram['freq_mod_delta'] )) # 0.3125Hz, half lobe
# EME
eme = eme + np.sum(B[ 0 : ix_f_40, ix_fm - ix_b : ix_fm + ix_b + 1 ])
# RME
rme = tme - eme
# MS-QI
msqi_vals[ix_epoch] = eme / rme
# HR
hr_vals[ix_epoch] = freq_funda * 60
return (msqi_vals, hr_vals, modulation_spectrogram) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def find_optimal_Q(x, y, data, min_Q, max_Q, fs=1., added_kernel = None, plot_BIC=True):\n Qs= np.arange(min_Q, max_Q)\n BIC = np.zeros((Qs.shape[0]))\n \n for i, q in enumerate(Qs):\n sm = SpectralMixture(q, x=x.flatten(),y=y.flatten(),fs=fs)\n for k in sm.kernels:\n if isinstance(k, SpectralMixtureComponent):\n k.lengthscale.prior = tfd.Gamma(f64(8.), f64(.6)) \n k.mixture_weight.prior = tfd.Gamma(f64(2.), f64(1.))\n\n if added_kernel is not None:\n sm += added_kernel\n \n# model = models.ContinuousModel(sm, (util.ensure_tf_matrix(x),util.ensure_tf_matrix(y)))\n model = models.ContinuousModel(sm, data)\n model.train(verbose=False)\n BIC[i] = model.log_posterior_density(\"bic\").numpy()\n \n if plot_BIC:\n fig = plt.figure()\n plt.plot(Qs, BIC)\n plt.xlabel('Number of Spectral Mixture components (Q)')\n plt.show()\n\n return np.argmax(BIC) + min_Q",
"def harmonic_cqt(x_in, sr, hop_length=1024, fmin=27.5, n_bins=72,\n n_harmonics=5, bins_per_octave=36, tuning=0.0, filter_scale=1,\n aggregate=None, norm=1, sparsity=0.0, real=False):\n\n kwargs = dict(n_bins=n_bins, bins_per_octave=bins_per_octave,\n hop_length=hop_length, sr=sr, tuning=tuning,\n filter_scale=filter_scale, aggregate=aggregate, norm=norm,\n sparsity=sparsity, real=real)\n\n cqt_spectra = []\n min_tdim = np.inf\n for i in range(1, n_harmonics + 1):\n cqt_spectra += [np.array([librosa.cqt(x_c, fmin=i * fmin, **kwargs).T\n for x_c in x_in.T])[:, np.newaxis, ...]]\n min_tdim = min([cqt_spectra[-1].shape[2], min_tdim])\n cqt_spectra = [x[:, :, :min_tdim, :] for x in cqt_spectra]\n\n return np.concatenate(cqt_spectra, axis=1)",
"def getUIQM(x):\n x = x.astype(np.float32)\n ### from https://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7300447\n # c1 = 0.4680; c2 = 0.2745; c3 = 0.2576\n ### from https://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7300447\n c1 = 0.0282\n c2 = 0.2953\n c3 = 3.5753\n\n uicm = _uicm(x)\n uism = _uism(x)\n uiconm = _uiconm(x, 8)\n uiqm = (c1 * uicm) + (c2 * uism) + (c3 * uiconm)\n return uiqm",
"def test_sm_spectrum(self):\n\n spectrum_mg = np.asarray([3.41707366e-02, 1.02592426e-02, 3.20641729e-03, 9.63481603e-04,\n 2.81233386e-04, 8.12019322e-05, 2.13711295e-05, 5.30226309e-06,\n 1.14687576e-06])\n # Number of SM events generated in MG [66095., 25637., 33458., 48654., 18351., 6849., 59869., 32043., 9044.]\n\n s = 13e3**2\n logbins = np.linspace(np.log10(200),np.log10(2000),10)\n bins = 10**logbins\n nbins = len(bins)-1\n for i in range(nbins):\n center = 0.5*(bins[i]+bins[i+1])\n width = bins[i+1]-bins[i]\n spectrum = pplnu.sigma_qqlnu_int(s, bins[i], bins[i+1], 'mu', 0, par2, center**2, 0, newphys=False)*GeVtopb/width\n err = (spectrum-spectrum_mg[i])/spectrum_mg[i]\n self.assertAlmostEqual(err,0,delta=0.02,msg=f'error in bin {i}: {err}')",
"def initSMParamsFourier(Q, x, y, sn, samplingFreq, nPeaks, relMaxOrder=2):\n x = np.atleast_2d(x)\n y = np.atleast_2d(y)\n n, D = x.shape\n w = np.zeros(Q)\n m = np.zeros((D,Q))\n s = np.zeros((D,Q))\n w[:] = np.std(y) / Q\n hypinit = {\n 'cov': np.zeros(Q+2*D*Q),\n 'lik': np.atleast_1d(np.log(sn)),\n 'mean': np.array([])\n }\n\n # Assign hyperparam weights\n hypinit['cov'][0:Q] = np.log(w)\n\n # Assign hyperparam frequencies (mu's)\n signal = np.array(y.ravel()).ravel() # Make into 1D array\n n = x.shape[0]\n k = np.arange(n)\n ts = n/samplingFreq\n frqx = k/float(ts)\n frqx = frqx[range(n/2)]\n frqy = np.fft.fft(signal)/n\n frqy = abs(frqy[range(n/2)])\n # Find the peaks in the frequency spectrum\n peakIdx = np.array([])\n while not peakIdx.any() and relMaxOrder > 0:\n peakIdx = spsig.argrelmax(np.log(frqy**2), order=relMaxOrder)[0]\n relMaxOrder -= 1\n if not peakIdx.any():\n raise ValueError(\"Data doesn't have any detectable peaks in Fourier space.\"\n \" Switching to a different kernel besides the spectral \"\n \"mixture is recommended.\")\n # Find specified number (nPeaks) largest peaks\n sortedIdx = frqy[peakIdx].argsort()[::-1][:nPeaks]\n sortedPeakIdx = peakIdx[sortedIdx]\n hypinit['cov'][Q + np.arange(0,Q*D)] = np.log(frqx[sortedPeakIdx])\n\n # Assign hyperparam length scales (sigma's)\n for i in range(0,D):\n xslice = np.atleast_2d(x[:,i])\n d2 = spat.distance.cdist(xslice, xslice, 'sqeuclidean')\n if n > 1:\n d2[d2 == 0] = d2[0,1]\n else:\n d2[d2 == 0] = 1\n maxshift = np.max(np.max(np.sqrt(d2)))\n s[i,:] = 1./np.abs(maxshift*np.random.ranf((1,Q)))\n hypinit['cov'][Q + Q*D + np.arange(0,Q*D)] = np.log(s[:]).T\n \n return hypinit",
"def music(csi_corr, csi_target, Ntx, Nrx, d_tx, d_rx, t):\n\n In = 0\n s = phase_correction(csi_corr, csi_target)\n s_lin = (s[:, :, 0, t:t + 2].reshape(6, 2, order='F'))\n\n '''Compute the covariance matrix and the eigendecompositon'''\n R_hat = np.cov(s_lin)\n D, Q = ln.eig(R_hat)\n\n '''Sort the eigenvalues in D'''\n Do = np.abs(D)\n D = np.sort(Do)[::-1]\n I = np.argsort(Do)[::-1]\n Q = Q[:, I]\n\n ''' Compute the Number of signal that are significative'''\n T = np.cumsum(np.real(D))\n for i in range(1, 1, np.size(T)):\n if T(i) >= 0.99 * T(np.size(T)):\n In = i\n break\n\n ''' Get the signal eigenvectors'''\n In = 0 # take the first signal\n Qs = Q[:, :In]\n\n ''' Get the noise eigenvectors'''\n Qn = Q[:, In + 1:]\n\n ''' Angles at which MUSIC Pseudospectrum will be computed '''\n angles1 = np.arange(-90, 90, 1)\n angles2 = np.arange(-90, 90, 1)\n\n '''Compute steering vectors corresponding values in angles'''\n a1 = np.exp(-1.j * 2 * np.pi * d_rx * np.tensordot(arange(Nrx), sin(angles1 * np.pi / 180), 0))\n a2 = np.exp(-1.j * 2 * np.pi * d_tx * np.tensordot(arange(Ntx), sin(angles1 * np.pi / 180), 0))\n\n '''Compute MUSIC \"spectrum\" '''\n music_spectrum = np.zeros((np.size(angles1), np.size(angles2)), dtype=complex)\n for k in range(1, np.size(angles2)):\n for j in range(1, np.size(angles1)):\n K = np.kron(a1[:, j], a2[:, k])\n s = dot(K.T, Qn)\n music_spectrum[j, k] = 1 / dot(abs(s), abs(s).T)\n\n ''' compute the mesh and plot the surf of the pseudospectrum '''\n\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n x = angles2\n y = angles1\n X, Y = np.meshgrid(x, y)\n Z = np.abs(np.squeeze(music_spectrum))\n ax = fig.add_subplot(111, projection='3d')\n ax.set_ylabel('AoA')\n ax.set_xlabel('AoD')\n ax.set_xlim3d(-90, 90)\n ax.set_ylim3d(-90, 90)\n ax.plot_surface(X, Y, Z, rstride=2, cstride=2, cmap=cm.jet, alpha=0.7, linewidth=0.25)\n\n ''' detect the peaks corresponding to DoD and DoA '''\n detect = detect_peaks(Z)\n index_max = np.column_stack(np.where(detect))\n x_ind = index_max[:, 0]\n y_ind = index_max[:, 1]\n tab = (np.transpose(np.array((Z[x_ind, y_ind], x[x_ind], y[y_ind])))).tolist()\n tab.sort(key=lambda e: e[0], reverse=True)\n myarray = np.asarray(tab[0])\n angles = myarray[1:]\n plt.show()\n\n return angles",
"def _eta_sfr_scaling(self,x,q):\n i = self.enum[q]\n A = self.scaling_params['A'][i]\n b = self.scaling_params['b'][i]\n return A*x**b",
"def smethod(fx,L=11,nh=2**8,tstep=2**7,ng=1,df=1.0,nfbins=2**10,sigmaL=None):\r\n \t\r\n df=float(df)\r\n \r\n if type(fx) is list:\r\n fx=np.array(fx)\r\n try:\r\n fn,fm=fx.shape\r\n if fm>fn:\r\n fm,fn=fx.shape\r\n except ValueError:\r\n fn=len(fx)\r\n fm=1\r\n if fm>1:\r\n print 'computing cross spectra'\r\n #compute the analytic signal of function f and dctrend\r\n #fa=sps.hilbert(dctrend(fx[0]))\r\n #fb=sps.hilbert(dctrend(fx[1]))\r\n fa=fx[0]\r\n fb=fx[1]\r\n fa=fa.reshape(fn)\r\n fb=fb.reshape(fn)\r\n pxa,tlst,flst=stft(fa,nh=nh,tstep=tstep,ng=ng,df=df,nfbins=nfbins)\r\n pxb,tlst,flst=stft(fb,nh=nh,tstep=tstep,ng=ng,df=df,nfbins=nfbins)\r\n pxx=pxa*pxb.conj()\r\n else:\r\n #compute the analytic signal of function f and dctrend\r\n #fa=sps.hilbert(dctrend(fx))\r\n fa=fx\r\n fa=fa.reshape(fn)\r\n fb=fa\r\n pxx,tlst,flst=stft(fa,nh=nh,tstep=tstep,ng=ng,df=df,nfbins=nfbins)\r\n# pxb=pxa\r\n\r\n #make an new array to put the new tfd in\r\n tfarray=abs(pxx)**2\r\n #get shape of spectrogram\r\n nf,nt=tfarray.shape\r\n #create a list of frequency shifts\r\n Llst=np.arange(start=-L/2+1,stop=L/2+1,step=1,dtype='int')\r\n #create a frequency gaussian window\r\n if sigmaL==None:\r\n sigmaL=L/(1*np.sqrt(2*np.log(2)))\r\n p=sps.gaussian(L,sigmaL)\r\n #make a matrix of windows\r\n pm=np.zeros((L,nt))\r\n for kk in range(nt):\r\n pm[:,kk]=p\r\n \r\n #loop over frequency and calculate the s-method \r\n for ff in range(L/2,nf-L/2):\r\n tfarray[ff,:]=tfarray[ff,:]+2*np.real(np.sum(pm*pxx[ff+Llst,:]*\r\n pxx[ff-Llst,:].conj(),axis=0))\r\n tfarray=tfarray/L\r\n \r\n return tfarray,tlst,flst,pxx",
"def metric_iaf(self, x):\n data = np.asarray(x['data'])\n iaf = [10.0] * data.shape[0]\n for ch, ch_data in enumerate(data):\n pxx, freqs = mlab.psd(ch_data, Fs=128.0, NFFT=256)\n alpha_mask = np.abs(freqs - 10) <= 2.0\n alpha_pxx = 10*np.log10(pxx[alpha_mask])\n alpha_pxx = scipy.signal.detrend(alpha_pxx)\n # iaf[ch] = alpha_pxx.shape\n iaf[ch] = freqs[alpha_mask][np.argmax(alpha_pxx)]\n return iaf",
"def spectrum_processing(s):\n s = default_filters(s)\n s = add_precursor_mz(s)\n s = normalize_intensities(s)\n s = reduce_to_number_of_peaks(s, n_required=5, ratio_desired=0.5, n_max=500)\n s = select_by_mz(s, mz_from=0, mz_to=1000)\n s = add_losses(s, loss_mz_from=10.0, loss_mz_to=200.0)\n s = require_minimum_number_of_peaks(s, n_required=5)\n return s",
"def qgset(x):\n return 0.2855*x - 0.8565",
"def get_mixture_evals(self, x):\n \n q_xs_list, _ = theano.scan(lambda i: \n self.get_importance_evals(x,\n T.transpose(self.mix_means[i,:]) \n ),\n sequences = [T.arange(self.num_comps)])\n \n max_q = T.max(q_xs_list)\n \n q_xs_list = q_xs_list - max_q\n \n q_xs_list = max_q + T.log(T.sum(T.exp(q_xs_list)))\n \n q_xs_list = -T.log(self.num_comps) + q_xs_list\n \n return q_xs_list",
"def xs_retrival_FG(self, xs_ofinterest, domain_ofinterest, out_folder, out_alias, flag_FG2semiFG):\n self.iso_read = xs_ofinterest['i']\n # only isotopes of interest are going to be read. However, iso_A3 and\n # iso_read should be the same if macroscopic XS are going to be\n # calculated.\n # A list is generated. Each element is another list with the index\n # positions of the requested domain in the phase space. e.g. [[3], [1],\n # [1], [1], [1], [1], [1], [1, 2, 3, 4, 5]]. Self.order establishes the\n # link between the phase space index of a given dimension and its names\n # (keys). Any manipulation on the domain of interest must not invalidate\n # the search np.where(), otherwise empty arrays (array()) may come up.\n idx_tuple_calc = []\n for di in range(self.d):\n idx_tuple_calc.append([np.where(val == self.phase_space[self.order[di]])[0][\n 0] + 1 for val in domain_ofinterest[self.order[di]]])\n # print idx_tuple_calc\n idx_tuple_calc = self.FG2semiFG(idx_tuple_calc, flag_FG2semiFG)\n # print idx_tuple_calc;sys.exit()\n order = [self.order[i] for i in range(0, 6)]\n # I want to locate XS for index in phase space. So a USER DEFINED set of indexes is considerd\n # The parametrization is on iota, so only [0:6] is considered, but I do need to apply the rules on FG and tupleFG2tuple_semiFG for assuring consistancy of variables.\n #'''\n # generating anisotropy vector\n # This can be passed further up if in the future many files have different\n # number of groups or anisotropy levels\n anysotropy = 3\n anysotropy_vec = [str(lvl) for lvl in range(anysotropy + 1)]\n groups = 2\n groups_vec = [str(lvl) for lvl in range(1, groups + 1)]\n # generation of xs dictionary\n xs_dic = {}\n for i in xs_ofinterest['i']:\n xs_dic[i] = {}\n xs_dic[i]['R'] = {}\n for r in xs_ofinterest['r']:\n if r != 'tran':\n if xs_exists(i, r, None):\n xs_dic[i]['R'][r] = {}\n for g in xs_ofinterest['g']:\n xs_dic[i]['R'][r][g] = {}\n for tuple_i in itertools.product(*idx_tuple_calc):\n aux = tuple(self.tupleFG2tuple_semiFG(\n np.array(tuple_i), flag_FG2semiFG))\n # print aux\n xs_dic[i]['R'][r][g][aux[0:6]] = []\n else:\n \"\"\"\n tran XS are saved indexed as 'tran'+'anisotropy level'+'input group'+'output group'\n level 0 are the standard scaterring xs for a whole assembly flux. So:\n tran011=\\sigma_{1->1},tran012=\\sigma_{1->2},tran021=\\sigma_{2->1},tran022=\\sigma_{2->2}\n\n Note: scaterring xs for iso=MACR and anisotropy>1 is generated, i.e. tran2** and tran3** but then they won't be filled with anything\n \"\"\"\n for p in anysotropy_vec:\n for g1 in groups_vec:\n for g2 in groups_vec:\n # print r+p+g1+g2\n if xs_exists(i, r + p + g1 + g2, None):\n xs_dic[i]['R'][r + p + g1 + g2] = {}\n xs_dic[i]['R'][r + p + g1 + g2][g1] = {}\n for tuple_i in itertools.product(*idx_tuple_calc):\n aux = tuple(self.tupleFG2tuple_semiFG(\n np.array(tuple_i), flag_FG2semiFG))\n xs_dic[i]['R'][r + p + g1 + g2][g1][aux[0:6]] = []\n # From the list of required indices of d dimensions a list of tuples is\n # build. For the requested tuples, a point of calculation in the auxiliary\n # *.out files is found by self.conversion_table. The condition for a\n # requesting a point of calculation is a match between the tuple and the\n # available touples in the phase space. e.g [49, 50, 51, 52, 53, 54, 55,\n # 56, 57, 58]. If user-imposed specific 'non FG' whese consider in the\n # conversion table generation here they need to be considered as well\n\n point_calc = None\n for tuple_i in itertools.product(*idx_tuple_calc):\n # USER IMPOSED: the conversion table saves user defined relation in the\n # indexes of the nodes\n tuple_i = self.tupleFG2tuple_semiFG(np.array(tuple_i), flag_FG2semiFG)\n # print tuple_i\n # for the requested tuple_i the corresponding .out file is found\n for i in range(len(self.conversion_table)):\n if all(tuple_i == self.conversion_table[i][0]):\n # the conversion table permits to consider custom naming of .out files\n point_calc = self.conversion_table[i][1]\n break # calculation points are unique. After the first match the search for that tuple is abandoned\n if i == len(self.conversion_table):\n raise ValueError(\n 'a point not existing in the .out files has been requested. tuple=', tuple_i)\n\n # un-comment for locating specific .out files in the xs reading process\n \"\"\"\n if all(tuple_i==[ 2, 2, 1, 1, 2, 1 , 1, 1, 1, 1]) or all(tuple_i==[ 2, 2, 1, 1 , 2 , 1 , 1 ,24 ,24, 24]):\n print tuple_i, point_calc\n\n if all(tuple_i==[ 2, 2, 1, 1 , 1 , 1 , 2 ,1 ,24, 24]):\n print tuple_i, point_calc\n \"\"\"\n\n # Access auxiliary *.out files\n fout = open(out_folder + out_alias + '/' + str(point_calc) + \".out\", 'r')\n iso = None\n\n for line in fout:\n # Detect isotopes specification\n if line.find('isotope') != -1:\n iso = line.split()[1]\n tran_counter = 0\n\n # Append only xs of interest. tran is a special case and treated as group independent\n # print xs_ofinterest;sys.exit()\n if iso in xs_ofinterest[\"i\"]:\n for reac in ['abso', 'fiss', 'nufi', 'spec', 'tran', 'ener', 'difc', 'tota', 'excs']:\n # A xs may not be present, this automaticly handled by line.find(reac)!=-1\n # A xs may be present but not wanted, this is handled by: reac in xs_ofinterest[\"r\"]\n # A xs may be unphysical (nufi in MACR) this is handle by\n # xs_exists(iso,r,None)\n if line.find(reac) != -1 and reac in xs_ofinterest[\"r\"] and xs_exists(iso, reac, None):\n if reac != 'tran':\n # print iso, reac,xs_dic[iso]['R'].keys(), xs_exists(iso,reac,None)\n if '1' in str(xs_ofinterest[\"g\"]):\n xs_dic[iso]['R'][reac]['1'][\n tuple(tuple_i[0:6])].append(float(line.split()[1]))\n if '2' in str(xs_ofinterest[\"g\"]):\n xs_dic[iso]['R'][reac]['2'][\n tuple(tuple_i[0:6])].append(float(line.split()[2]))\n else:\n # this is for P3 anisotropy. Associating a group preservs structure\n # of dictionary.\n xs_dic[iso]['R'][\n reac + str(tran_counter) + '1' + '1']['1'][tuple(tuple_i[0:6])].append(float(line.split()[1]))\n xs_dic[iso]['R'][\n reac + str(tran_counter) + '1' + '2']['1'][tuple(tuple_i[0:6])].append(float(line.split()[3]))\n xs_dic[iso]['R'][\n reac + str(tran_counter) + '2' + '1']['2'][tuple(tuple_i[0:6])].append(float(line.split()[2]))\n xs_dic[iso]['R'][\n reac + str(tran_counter) + '2' + '2']['2'][tuple(tuple_i[0:6])].append(float(line.split()[4]))\n tran_counter += 1\n fout.close()\n self.domain_ofinterest = domain_ofinterest\n for i in xs_dic.keys():\n for r in xs_dic[i]['R'].keys():\n for g in xs_dic[i]['R'][r].keys():\n for iota in xs_dic[i]['R'][r][g].keys():\n if len(xs_dic[i]['R'][r][g][iota]) != len(domain_ofinterest['BURNUP']):\n print i, r, g, iota\n raise ValueError(\"empty entries for\")\n\n # if zero values are prefared to inexistent data (for isotopes associated\n # to CR and things like that)\n AD_HOC_ZERO = 'no'\n i0 = xs_dic.keys()[0]\n r0 = xs_dic[i0]['R'].keys()[0]\n g0 = xs_dic[i0]['R'][r0].keys()[0]\n iota0 = xs_dic[i0]['R'][r0][g0].keys()[0]\n aux = len(xs_dic[i0]['R'][r0][g0][iota0])\n\n if AD_HOC_ZERO == 'yes':\n for i in xs_dic.keys():\n for r in xs_dic[i]['R'].keys():\n for g in xs_dic[i]['R'][r].keys():\n for iota in xs_dic[i]['R'][r][g].keys():\n print iota, len(xs_dic[i]['R'][r][g][iota])\n if len(xs_dic[i]['R'][r][g][iota]) == 0:\n xs_dic[i]['R'][r][g][iota] = np.zeros(aux)\n\n return xs_dic, order",
"def submitPirQuery(self,q,base):\n x,omega = self.db.shape\n print ('OMEGA IS ',omega)\n results = np.zeros(omega,dtype=np.uint64) \n for bit_idx in range(len(q)):\n if q[bit_idx]==0:\n continue\n results = (utilities.scaleArrayGF(self.db[bit_idx],q[bit_idx],base) + results) % base\n \n return results",
"def qfunc(x):\n # Error check inputs\n if isinstance(x, np.ndarray):\n if x.dtype == np.complex128:\n raise TypeError(\"complex input not supported\")\n else:\n if isinstance(x, complex):\n raise TypeError(\"complex input not supported\")\n\n Q = 0.5 * erfc(x / np.sqrt(2.0))\n return Q",
"def generate_measurements(\n self, x: numpy.ndarray\n ) -> Tuple[List[numpy.ndarray]]:\n # Generate A matrices\n signal_power = 0\n A_list = []\n for t in range(self.T):\n if self.A_type == 1:\n # IID Gausian with unit-norm colums\n A = (\n numpy.random.randn(self.M, self.N) +\n 1j * numpy.random.randn(self.M, self.N)\n ) / numpy.sqrt(2 * self.M)\n for n in range(self.N):\n A[:, n] /= numpy.linalg.norm(A[:, n])\n else:\n raise ValueError(\"Invalid A_type: {}\".format(self.A_type))\n\n A_list.append(A)\n signal_power += numpy.linalg.norm(A.dot(x[:, t])) ** 2\n\n # Extract noise variance for desired SNR\n sig2e = signal_power / (self.M * self.T) * 10 ** (-self.desired_SNR/10)\n\n # Generate noisy measurements\n y_list = []\n for t in range(self.T):\n e = numpy.sqrt(sig2e/2) * (\n numpy.random.randn(self.M, 2).dot([1, 1j]))\n y_list.append(\n A[t].dot(x[:, t]) + e\n )\n\n return y_list, A_list, sig2e",
"def pureNi_solliq():\n # Given temperature.\n T = 800\n # Render thermodynamic database.\n db = Database(\"AlNiAnsara1997.TDB\")\n # Define the element.\n comp = \"NI\"\n # Two phases separated by the interface.\n phasenames = [\"FCC_A1\", \"LIQUID\"]\n\n # Molar volumes for elements.\n # Molar volume of Ni. \n vni = 6.718 * 10.0 ** (-6.0) + (2.936 * 10.0 ** (-5) * 10.0 ** (-6.0)) * (\n T ** 1.355\n )\n\n # Call the module for calculating solid/liquid interfacial energies in pure metals.\n sigma = SigmaPure(T, vni, db, comp, phasenames)\n\n # Print the calculated interfacial energy with xarray.Dataset type.\n print(sigma, \"\\n\")\n # Print the calculated interfacial energy with xarray.DataArray type.\n print(sigma.Interfacial_Energy, \"\\n\")\n # Print the calculated interfacial energy value.\n print(sigma.Interfacial_Energy.values)\n\n # Output\n \"\"\"\n <xarray.Dataset>\n Dimensions: ()\n Data variables:\n Component <U2 'NI'\n Temperature int64 800\n Melting_Enthalpy float64 1.748e+04\n Interfacial_Energy float64 0.3211 \n\n <xarray.DataArray 'Interfacial_Energy' ()>\n array(0.321081580921721) \n\n 0.321081580921721\n \"\"\"",
"def gtgram_xe(wave, fs, channels, f_min, f_max):\n cfs = centre_freqs(fs, channels, f_min, f_max)\n fcoefs = np.flipud(make_erb_filters(fs, cfs))\n xf = erb_filterbank(wave, fcoefs)\n xe = np.power(xf, 2)\n return xe",
"def gtgram_xe(wave, fs, channels, f_min, f_max):\n cfs = centre_freqs(fs, channels, f_min, f_max)\n fcoefs = np.flipud(gf.make_erb_filters(fs, cfs))\n xf = gf.erb_filterbank(wave, fcoefs)\n xe = np.power(xf, 2)\n return xe",
"def SIDFT(X,D):\n N=len(X)\n x=np.zeros(N,'complex')\n for n in range(0,N,1):\n for k in range(0,N,1):\n x[n]=x[n]+np.exp(-1j*2*np.pi*k*D/N)*X[k]*np.exp(1j*2*np.pi*k*n/N)\n return x/N",
"def collect_quantum_energies(quantum_outputs):\n #here we will cycle throught the outputs in order to detect SCF enery\n input_files = glob.glob(quantum_outputs)\n dict_energy = {}\n #now cycle through all the output gaussian files\n for f in input_files:\n #to be sure we take the last indexes\n phi =int( f.split(\"/\")[-2]) # to be more consistent, we know that in -2 there's phi\n psi =int( f.split(\"/\")[-1].split(\".out\")[0].split(\"structure_\")[1])\n #first fix phi and psi values:\n #plot from -180 to 180 so we can compare with Ramachandran\n if phi > 180.0:\n phi = phi - 360.0\n if psi > 180.0 :\n psi = psi - 360.0\n #open the output file\n gout = open(f,\"r\").readlines()\n #Extract energies\n scf = []\n for line in gout:\n if \"SCF Done\" in line:\n scf.append(line.split()[4])\n dict_energy[phi,psi] = float(scf[-1])*627.50\n print(\"Apparently quantum energies were correctly extracted\")\n\n return dict_energy",
"def dfluxes(wavelength, s, line1, line2, lowlow= 25, lowhigh=15, highlow=15, highhigh = 25, \n lmin=0, lmax=0, fmin=0, fmax=0,\n broad1=2.355, broad2=2.355, sus_line1=True, sus_line2=True,\n plot=True, verbose=True, plot_sus = False, fcal = True, \n fit_continuum = True, median_kernel=35, warnings = True ): # Broad is FWHM for Gaussian sigma= 1, \n # Setup wavelength limits\n if lmin == 0 :\n lmin = line1-65. # By default, +-65 A with respect to line\n if lmax == 0 :\n lmax = line2+65.\n \n # Extract subrange to fit\n w_spec = []\n f_spec = []\n w_spec.extend((wavelength[i]) for i in range(len(wavelength)) if (wavelength[i] > lmin and wavelength[i] < lmax) ) \n f_spec.extend((s[i]) for i in range(len(wavelength)) if (wavelength[i] > lmin and wavelength[i] < lmax) ) \n \n \n if np.nanmedian(f_spec) == np.nan: print(\" NO HAY DATOS.... todo son NANs!\")\n\n \n # Setup min and max flux values in subrange to fit\n if fmin == 0 :\n fmin = np.nanmin(f_spec) \n if fmax == 0 :\n fmax = np.nanmax(f_spec) \n \n\n # We have to find some \"guess numbers\" for the Gaussian\n # Now guess_centre is line\n guess_centre1 = line1\n guess_centre2 = line2 \n guess_centre = (guess_centre1+guess_centre2)/2. \n # Define continuum regions: [-lowlow, -lowhigh] and [highlow,highhigh] in Angstroms with respect to guess_centre\n \n\n w_cont=[]\n f_cont=[]\n w_cont.extend((w_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre-lowlow and w_spec[i] < guess_centre-lowhigh) or (w_spec[i] > guess_centre+highlow and w_spec[i] < guess_centre+highhigh) ) \n f_cont.extend((f_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre-lowlow and w_spec[i] < guess_centre-lowhigh) or (w_spec[i] > guess_centre+highlow and w_spec[i] < guess_centre+highhigh) ) \n\n if fit_continuum:\n # Linear Fit to continuum \n f_cont_filtered=sig.medfilt(f_cont,np.int(median_kernel))\n try: \n mm,bb = np.polyfit(w_cont, f_cont_filtered, 1)\n except Exception:\n bb = np.nanmedian(f_cont_filtered)\n mm = 0.\n if warnings: \n print(\" WARNING: Impossible to get the continuum!\")\n print(\" Scaling the continuum to the median value\") \n continuum = mm*np.array(w_spec)+bb \n c_cont = mm*np.array(w_cont)+bb \n\n else: \n # Median value in each continuum range # NEW 15 Sep 2019\n w_cont_low = []\n f_cont_low = []\n w_cont_low.extend((w_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre-lowlow and w_spec[i] < guess_centre-lowhigh) ) \n f_cont_low.extend((f_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre-lowlow and w_spec[i] < guess_centre-lowhigh) ) \n median_w_cont_low = np.nanmedian(w_cont_low)\n median_f_cont_low = np.nanmedian(f_cont_low)\n w_cont_high = []\n f_cont_high = []\n w_cont_high.extend((w_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre+highlow and w_spec[i] < guess_centre+highhigh) ) \n f_cont_high.extend((f_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre+highlow and w_spec[i] < guess_centre+highhigh) ) \n median_w_cont_high = np.nanmedian(w_cont_high)\n median_f_cont_high = np.nanmedian(f_cont_high) \n \n b = (median_f_cont_low-median_f_cont_high)/(median_w_cont_low-median_w_cont_high)\n a = median_f_cont_low- b * median_w_cont_low\n \n continuum = a + b*np.array(w_spec)\n c_cont = b*np.array(w_cont)+ a \n \n # rms continuum\n rms_cont = np.nansum([ np.abs(f_cont[i] - c_cont[i]) for i in range(len(w_cont)) ]) / len(c_cont)\n\n # Search for index here w_spec(index) closest to line\n min_w = np.abs(np.array(w_spec)-line1)\n mini = np.nanmin(min_w)\n guess_peak1 = f_spec[min_w.tolist().index(mini)] - continuum[min_w.tolist().index(mini)]\n min_w = np.abs(np.array(w_spec)-line2)\n mini = np.nanmin(min_w)\n guess_peak2 = f_spec[min_w.tolist().index(mini)] - continuum[min_w.tolist().index(mini)]\n\n # Search for beginning/end of emission line, choosing line +-10 \n # 28th Feb 2019: Check central value between low_limit and high_limit\n\n # LOW limit\n low_limit=0\n w_fit = []\n f_fit = []\n w_fit.extend((w_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre1-15 and w_spec[i] < guess_centre1)) \n f_fit.extend((f_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre1-15 and w_spec[i] < guess_centre1)) \n if fit_continuum: \n c_fit=mm*np.array(w_fit)+bb \n else: \n c_fit=b*np.array(w_fit)+a\n \n\n fs=[]\n ws=[]\n for ii in range(len(w_fit)-1,1,-1):\n if f_fit[ii]/c_fit[ii] < 1.05 and f_fit[ii-1]/c_fit[ii-1] < 1.05 and low_limit == 0: low_limit = w_fit[ii]\n# if f_fit[ii]/c_fit[ii] < 1.05 and low_limit == 0: low_limit = w_fit[ii]\n fs.append(f_fit[ii]/c_fit[ii])\n ws.append(w_fit[ii])\n if low_limit == 0: \n sorted_by_flux=np.argsort(fs)\n low_limit = ws[sorted_by_flux[0]]\n \n # HIGH LIMIT \n high_limit=0\n w_fit = []\n f_fit = []\n w_fit.extend((w_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre2 and w_spec[i] < guess_centre2+15)) \n f_fit.extend((f_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre2 and w_spec[i] < guess_centre2+15)) \n if fit_continuum: \n c_fit=mm*np.array(w_fit)+bb \n else: \n c_fit=b*np.array(w_fit)+a\n \n fs=[]\n ws=[]\n for ii in range(len(w_fit)-1):\n if f_fit[ii]/c_fit[ii] < 1.05 and f_fit[ii+1]/c_fit[ii+1] < 1.05 and high_limit == 0: high_limit = w_fit[ii]\n# if f_fit[ii]/c_fit[ii] < 1.05 and high_limit == 0: high_limit = w_fit[ii]\n fs.append(f_fit[ii]/c_fit[ii])\n ws.append(w_fit[ii])\n if high_limit == 0: \n sorted_by_flux=np.argsort(fs)\n high_limit = ws[sorted_by_flux[0]] \n \n # Fit a Gaussian to data - continuum \n p0 = [guess_centre1, guess_peak1, broad1/2.355, guess_centre2, guess_peak2, broad2/2.355] # broad is the Gaussian sigma, 1.0 for emission lines\n try:\n fit, pcov = curve_fit(dgauss, w_spec, f_spec-continuum, p0=p0, maxfev=10000) # If this fails, increase maxfev...\n fit_error = np.sqrt(np.diag(pcov))\n\n\n # New 28th Feb 2019: Check central value between low_limit and high_limit\n # Better: between guess_centre - broad, guess_centre + broad\n # If not, redo fit fixing central value to the peak (it does not work... just fix FWHM= (high_limit-low_limit)/2.5 )\n\n if verbose != False: print(\" ----------------------------------------------------------------------------------------\")\n if fit[0] < guess_centre1 - broad1 or fit[0] > guess_centre1 + broad1 or fit[3] < guess_centre2 - broad2 or fit[3] > guess_centre2 + broad2:\n if warnings: \n if fit[0] < guess_centre1 - broad1 or fit[0] > guess_centre1 + broad1: \n print(\" Fitted center wavelength\", fit[0],\"is NOT in the expected range [\",guess_centre1 - broad1,\",\",guess_centre1 + broad1,\"]\")\n else:\n print(\" Fitted center wavelength\", fit[0],\"is in the expected range [\",guess_centre1 - broad1,\",\",guess_centre1 + broad1,\"]\")\n if fit[3] < guess_centre2 - broad2 or fit[3] > guess_centre2 + broad2: \n print(\" Fitted center wavelength\", fit[3],\"is NOT in the expected range [\",guess_centre2 - broad2,\",\",guess_centre2 + broad2,\"]\")\n else:\n print(\" Fitted center wavelength\", fit[3],\"is in the expected range [\",guess_centre2 - broad2,\",\",guess_centre2 + broad2,\"]\")\n print(\" Fit failed!\")\n \n fit[0]=guess_centre1\n fit_error[0] = 0.000001\n fit[1]=guess_peak1\n fit_error[1] = 0.000001\n fit[2] = broad1/2.355\n fit_error[2] = 0.000001 \n fit[3]=guess_centre2\n fit_error[3] = 0.000001\n fit[4]=guess_peak2\n fit_error[4] = 0.000001\n fit[5] = broad2/2.355\n fit_error[5] = 0.000001\n else:\n if warnings: print(\" Fitted center wavelength\", fit[0],\"is in the expected range [\",guess_centre1 - broad1,\",\",guess_centre1 + broad1,\"]\")\n if warnings: print(\" Fitted center wavelength\", fit[3],\"is in the expected range [\",guess_centre2 - broad2,\",\",guess_centre2 + broad2,\"]\")\n \n\n if warnings: \n print(\" Fit parameters = \", fit[0], fit[1], fit[2]) \n print(\" \", fit[3], fit[4], fit[5])\n if fit[2] == broad1/2.355 and warnings == True : \n print(\" WARNING: Fit in\",fit[0],\"failed! Using given centre wavelengths (cw), peaks at (cv) & sigmas=broad/2.355 given.\") # CHECK THIS \n\n gaussian_fit = dgauss(w_spec, fit[0], fit[1], fit[2],fit[3], fit[4], fit[5])\n \n gaussian_1 = gauss(w_spec, fit[0], fit[1], fit[2])\n gaussian_2 = gauss(w_spec, fit[3], fit[4], fit[5])\n \n\n # Estimate rms of the Gaussian fit in range [low_limit, high_limit]\n residuals = f_spec-gaussian_fit-continuum\n rms_fit = np.nansum([ ((residuals[i]**2)/(len(residuals)-2))**0.5 for i in range(len(w_spec)) if (w_spec[i] >= low_limit and w_spec[i] <= high_limit) ]) \n \n # Fluxes, FWHM and Eq. Width calculations # CHECK THIS , not well done for dfluxes !!!\n \n gaussian_flux_1 = gauss_flux(fit[1],fit[2])\n gaussian_flux_2 = gauss_flux(fit[4],fit[5]) \n gaussian_flux = gaussian_flux_1+ gaussian_flux_2 \n if warnings: \n print(\" Gaussian flux = \", gaussian_flux_1, \" + \",gaussian_flux_2,\" = \",gaussian_flux)\n print(\" Gaussian ratio = \", gaussian_flux_1/gaussian_flux_2)\n \n error1 = np.abs(gauss_flux(fit[1]+fit_error[1],fit[2]) - gaussian_flux)\n error2 = np.abs(gauss_flux(fit[1],fit[2]+fit_error[2]) - gaussian_flux)\n gaussian_flux_error = 1 / ( 1/error1**2 + 1/error2**2 )**0.5\n \n fwhm=fit[2]*2.355\n fwhm_error = fit_error[2] *2.355\n fwhm_vel = fwhm / fit[0] * C \n fwhm_vel_error = fwhm_error / fit[0] * C \n \n gaussian_ew = gaussian_flux/np.nanmedian(f_cont)\n gaussian_ew_error = gaussian_ew * gaussian_flux_error/gaussian_flux \n \n # Integrated flux\n # IRAF: flux = sum ((I(i)-C(i)) * (w(i2) - w(i1)) / (i2 - i2) \n flux = np.nansum([ (f_spec[i]-continuum[i])*(w_spec[i+1]-w_spec[i]) for i in range(len(w_spec)) if (w_spec[i] >= low_limit and w_spec[i] <= high_limit) ]) \n flux_error = rms_cont * (high_limit - low_limit)\n wave_resolution = (wavelength[-1]-wavelength[0])/len(wavelength)\n ew = wave_resolution * np.nansum ([ (1 - f_spec[i]/continuum[i]) for i in range(len(w_spec)) if (w_spec[i] >= low_limit and w_spec[i] <= high_limit) ]) \n ew_error = np.abs(ew*flux_error/flux) \n gauss_to_integrated = gaussian_flux/flux * 100.\n \n # Plotting \n if plot :\n plt.figure(figsize=(10, 4))\n #Plot input spectrum\n plt.plot(np.array(w_spec),np.array(f_spec), \"blue\", lw=2, alpha = 0.7)\n plt.minorticks_on() \n plt.xlabel(\"Wavelength [$\\mathrm{\\AA}$]\")\n if fcal:\n plt.ylabel(\"Flux [ erg cm$^{-2}$ s$^{-1}$ $\\mathrm{\\AA}^{-1}$ ]\")\n else:\n plt.ylabel(\"Flux [ counts ]\")\n plt.xlim((line1+line2)/2-40,(line1+line2)/2+40)\n plt.ylim(fmin,fmax)\n \n # Vertical line at guess_centre\n plt.axvline(x=guess_centre1, color='r', linestyle='-', alpha=0.5)\n plt.axvline(x=guess_centre2, color='r', linestyle='-', alpha=0.5)\n\n # Horizontal line at y = 0\n plt.axhline(y=0, color='k', linestyle=':', alpha=0.5) \n # Dashed green regions for continuum, defined by [lowlow, lowhigh] and [highlow,highhigh]\n plt.axvspan(guess_centre+highlow, guess_centre+highhigh, facecolor='g', alpha=0.15,zorder=3)\n plt.axvspan(guess_centre-lowlow, guess_centre-lowhigh, facecolor='g', alpha=0.15,zorder=3)\n # Plot linear fit for continuum\n plt.plot(w_spec, continuum,\"g--\")\n # Plot Gaussian fit \n plt.plot(w_spec, gaussian_fit+continuum, 'r-', alpha=0.8) \n # Vertical line at Gaussian center\n plt.axvline(x=fit[0], color='k', linestyle='-', alpha=0.5)\n plt.axvline(x=fit[3], color='k', linestyle='-', alpha=0.5)\n # Plot Gaussians + cont\n plt.plot(w_spec, gaussian_fit+continuum, 'r-', alpha=0.5, lw=3) \n plt.plot(w_spec, gaussian_1+continuum, color=\"navy\",linestyle='--', alpha=0.8)\n plt.plot(w_spec, gaussian_2+continuum, color=\"#1f77b4\",linestyle='--', alpha=0.8)\n plt.plot(w_spec, np.array(f_spec)-(gaussian_fit), 'orange', alpha=0.4, linewidth=5) \n\n # Vertical lines to emission line\n plt.axvline(x= low_limit, color='k', linestyle=':', alpha=0.5)\n plt.axvline(x= high_limit, color='k', linestyle=':', alpha=0.5) \n plt.title('Double Gaussian Fit') # Fit: x0=%.2f y0=%.2e sigma=%.2f flux=%.2e rms=%.3e' % (fit[0], fit[1], fit[2], gaussian_flux, rms_fit))\n plt.show()\n plt.close()\n \n # Plot residuals\n# plt.figure(figsize=(10, 1))\n# plt.xlabel(\"Wavelength [$\\mathrm{\\AA}$]\")\n# plt.ylabel(\"RMS\")\n# plt.xlim((line1+line2)/2-40,(line1+line2)/2+40)\n# plt.axvline(x=fit[0], color='k', linestyle='-', alpha=0.5)\n# plt.axvline(x=fit[3], color='k', linestyle='-', alpha=0.5)\n# plt.plot(w_spec, residuals, 'k')\n# plt.minorticks_on()\n# plt.show()\n# plt.close()\n\n \n # Printing results\n if verbose :\n #print \"\\n> WARNING !!! CAREFUL WITH THE VALUES PROVIDED BELOW, THIS TASK NEEDS TO BE UPDATED!\\n\"\n print(\"\\n> Gauss and continuum fitting + integrated flux calculations:\\n\")\n print(\" rms continuum = %.3e erg/cm/s/A \" % (rms_cont)) \n print(\" Gaussian Fit parameters: x0 = ( %.2f +- %.2f ) A \" % (fit[0], fit_error[0]))\n print(\" y0 = ( %.3f +- %.3f ) 1E-16 erg/cm2/s/A\" % (fit[1]/1E-16, fit_error[1]/1E-16 ))\n print(\" sigma = ( %.3f +- %.3f ) A\" % (fit[2], fit_error[2])) \n print(\" rms fit = %.3e erg/cm2/s/A\" % (rms_fit))\n print(\" Gaussian Flux = ( %.2f +- %.2f ) 1E-16 erg/s/cm2 (error = %.1f per cent)\" % (gaussian_flux/1E-16, gaussian_flux_error/1E-16, gaussian_flux_error/gaussian_flux*100))\n print(\" FWHM = ( %.3f +- %.3f ) A = ( %.1f +- %.1f ) km/s \" % (fwhm, fwhm_error, fwhm_vel, fwhm_vel_error))\n print(\" Eq. Width = ( %.1f +- %.1f ) A\" % (-gaussian_ew, gaussian_ew_error)) \n print(\"\\n Integrated flux = ( %.2f +- %.2f ) 1E-16 erg/s/cm2 (error = %.1f per cent) \" % ( flux/1E-16, flux_error/1E-16, flux_error/flux *100)) \n print(\" Eq. Width = ( %.1f +- %.1f ) A\" % (ew, ew_error))\n print(\" Gauss/Integrated = %.2f per cent \" % gauss_to_integrated)\n \n \n # New 22 Jan 2019: sustract Gaussian fit\n index=0\n s_s=np.zeros_like(s)\n sustract_this = np.zeros_like(gaussian_fit)\n if sus_line1:\n sustract_this = sustract_this + gaussian_1\n if sus_line2:\n sustract_this = sustract_this + gaussian_2 \n \n \n for wave in range(len(wavelength)):\n s_s[wave]=s[wave]\n if wavelength[wave] == w_spec[0] : \n s_s[wave] = f_spec[0]-sustract_this[0]\n index=1\n if wavelength[wave] > w_spec[0] and wavelength[wave] <= w_spec[-1]:\n s_s[wave] = f_spec[index]-sustract_this[index]\n index=index+1\n if plot_sus: \n plt.figure(figsize=(10, 4))\n plt.plot(wavelength,s, \"r\")\n plt.plot(wavelength,s_s, \"c\")\n plt.minorticks_on() \n plt.xlabel(\"Wavelength [$\\mathrm{\\AA}$]\")\n if fcal:\n plt.ylabel(\"Flux [ erg cm$^{-2}$ s$^{-1}$ $\\mathrm{\\AA}^{-1}$ ]\")\n else:\n plt.ylabel(\"Flux [ counts ]\")\n plt.xlim(lmin,lmax)\n plt.ylim(fmin,fmax)\n plt.show()\n plt.close()\n \n # This gaussian_flux in 3 is gaussian 1 + gaussian 2, given in 15, 16, respectively\n # 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16\n resultado = [rms_cont, fit[0], fit_error[0], gaussian_flux, gaussian_flux_error, fwhm, fwhm_error, flux, flux_error, ew, ew_error, s_s, fit[3], fit[4],fit[5], gaussian_flux_1, gaussian_flux_2 ]\n return resultado \n except Exception:\n if verbose: print(\" Double Gaussian fit failed!\")\n resultado = [0, line1, 0, 0, 0, 0, 0, 0, 0, 0, 0, s, 0, 0, 0, 0, 0 ] # line was identified at lambda=line but Gaussian fit failed\n\n # NOTA: PUEDE DEVOLVER EL FLUJO INTEGRADO AUNQUE FALLE EL AJUSTE GAUSSIANO...\n\n # Plotting \n if plot :\n plt.figure(figsize=(10, 4))\n plt.plot(np.array(w_spec),np.array(f_spec), \"b\", lw=3, alpha = 0.5)\n plt.minorticks_on() \n plt.xlabel(\"Wavelength [$\\mathrm{\\AA}$]\")\n if fcal:\n plt.ylabel(\"Flux [ erg cm$^{-2}$ s$^{-1}$ $\\mathrm{\\AA}^{-1}$ ]\")\n else:\n plt.ylabel(\"Flux [ counts ]\") \n plt.xlim(lmin,lmax)\n plt.ylim(fmin,fmax)\n \n # Vertical line at guess_centre\n plt.axvline(x=guess_centre, color='r', linestyle='-', alpha=0.5)\n # Horizontal line at y = 0\n plt.axhline(y=0, color='k', linestyle=':', alpha=0.5) \n # Dashed green regions for continuum, defined by [lowlow, lowhigh] and [highlow,highhigh]\n plt.axvspan(guess_centre+highlow, guess_centre+highhigh, facecolor='g', alpha=0.15,zorder=3)\n plt.axvspan(guess_centre-lowlow, guess_centre-lowhigh, facecolor='g', alpha=0.15,zorder=3)\n # Plot linear fit for continuum\n plt.plot(w_spec, continuum,\"g--\")\n # Plot Gaussian fit \n# plt.plot(w_spec, gaussian_fit+continuum, 'r-', alpha=0.8) \n # Vertical line at Gaussian center\n# plt.axvline(x=fit[0], color='k', linestyle='-', alpha=0.5)\n # Vertical lines to emission line\n plt.axvline(x= low_limit, color='k', linestyle=':', alpha=0.5)\n plt.axvline(x= high_limit, color='k', linestyle=':', alpha=0.5) \n # Plot residuals\n# plt.plot(w_spec, residuals, 'k')\n plt.title(\"No Gaussian fit obtained...\")\n plt.show()\n\n\n return resultado",
"def test_energy_increment(self):\n sqw_ws = MuscatSofQW(SampleWorkspace=self._sample_ws,\n ResolutionWorkspace=self._resolution_ws,\n ParameterWorkspace=self._param_ws,\n OutputWorkspace='__MuscatSofQWTest_result',\n EnergyInc=0.1)\n\n self.assertEqual(sqw_ws.getNumberHistograms(), self._sample_ws.getNumberHistograms())\n self.assertEqual(sqw_ws.getAxis(0).getUnit().unitID(), 'Energy')\n self.assertEqual(sqw_ws.getAxis(1).getUnit().unitID(), 'MomentumTransfer')\n\n x_data = sqw_ws.dataX(0)\n self.assertAlmostEqual(x_data[0], -0.5)\n self.assertAlmostEqual(x_data[-1], 0.5)\n self.assertAlmostEqual(x_data[len(x_data)/2], 0.0)\n\n self.assertEquals(sqw_ws.blocksize(), 10)",
"def ssc(signal,samplerate=16000,winlen=0.025,winstep=0.01,\n nfilt=26,nfft=512,lowfreq=0,highfreq=None,preemph=0.97): \n highfreq= highfreq or samplerate/2\n signal = sigproc.preemphasis(signal,preemph)\n frames = sigproc.framesig(signal, winlen*samplerate, winstep*samplerate)\n pspec = sigproc.powspec(frames,nfft)\n pspec = pylab.where(pspec == 0,pylab.finfo(float).eps,pspec) # if things are all zeros we get problems\n \n fb = get_filterbanks(nfilt,nfft,samplerate,lowfreq,highfreq)\n feat = pylab.dot(pspec,fb.T) # compute the filterbank energies\n R = pylab.tile(pylab.linspace(1,samplerate/2,pylab.size(pspec,1)),(pylab.size(pspec,0),1))\n \n return pylab.dot(pspec*R,fb.T) / feat",
"def IvsQ(fileList):\n\n # Produce a list if it is not the case\n if not isinstance(fileList,list):\n fileList = [fileList]\n\n for file in fileList:\n sol = loadSol(file)\n histI = sol.histI[0:(sol.NIterGrad + 1)]\n histQ = sol.histQ[0:(sol.NIterGrad + 1)]\n plt.semilogx(histQ,histI,label=file)\n plt.grid(True)\n plt.legend()\n plt.xlabel('Hist Q')\n plt.ylabel('Hist I')\n plt.title('I vs. Q')\n plt.show()",
"def measureDataComplexM_multiext(filename,sigma = 1.1,scale=0.27):\n hdu=pf.open(filename)\n nn = len(hdu)\n data = []\n colnames = ['x','y','M20','M22','M31','M33']\n sigma = sigma/scale\n for hdui in hdu[1:]:\n Nobj = hdui.data.shape[0]\n M20=np.zeros(Nobj)\n M22=np.zeros(Nobj).astype(complex)\n M31=np.zeros(Nobj).astype(complex)\n M33=np.zeros(Nobj).astype(complex)\n for i in range(Nobj):\n M20[i],M22[i],M31[i],M33[i]=complexMoments(data=hdui.data[i][4:].reshape(npix,npix),sigma=sigma)\n x=hdui.header['ccdXcen']\n y=hdui.header['ccdYcen']\n data.append([x,y,np.median(M20), np.median(M22), np.median(M31), np.median(M33)])\n data=np.array(data)\n hp.mwrfits(filename[:-7]+'_complexMoments_gausswt_'+str(sigma*scale)+'.fit',data.T,colnames=colnames)\n return '---done !-----'",
"def ha(sf,sfn,mX,pX,params,verbose=[],onlySelected=False,hc=-2,div=8,L=30,fs=44100,gt=[]):\r\n \r\n M,N,H,B = params\r\n \r\n idx = candidSelection(sf,t=0.025,hw=25) \r\n idx = np.concatenate((np.zeros(1),idx,np.array([sf.shape[0]])))\r\n idx_orig = idx.copy()\r\n mask = np.ones(idx.shape)\r\n mask[0]=0\r\n mask[-1]=0\r\n errors = np.zeros(mX.shape[0])\r\n scores = np.zeros(idx.shape)\r\n freqs = []\r\n \r\n tFlag = False\r\n vFlag = False # flag to enable prints and plots\r\n \r\n rms = np.sum(mX,axis=1)\r\n rms = rms-np.mean(rms)\r\n rms = rms/np.max(rms)\r\n rms = savgol_filter(rms,3,1)\r\n \r\n rms_t = -0.1\r\n \r\n # sending every onset candidate to harmonic analysis\r\n for i in range(len(idx)-2,0,-1):\r\n \r\n if onlySelected:\r\n if idx[i] not in verbose:\r\n continue\r\n \r\n b = int((idx[i]-(10240/H)) if (idx[i]>(idx[i-1]+(10240/H))) else idx[i-1])\r\n e = int((idx[i]+(10240/H)) if (idx[i]<(idx[i+1]-(10240/H))) else idx[i+1])\r\n \r\n \r\n if np.mean(rms[int(idx[i]):int(idx[i])+50])<rms_t:\r\n continue\r\n \r\n onst = int(idx[i]-b)\r\n pmX = np.copy(mX[b:e])\r\n \r\n\r\n if idx[i] in verbose:\r\n print(\"\\nOnset candidate:\")\r\n print(\"onset frame: %d\" %idx[i])\r\n print(\"sf onset number: %d\" %i)\r\n vFlag = True\r\n y = MRStftSynth(pmX,pX[b:e],M,H,B)\r\n print(\"synthesized sound\")\r\n ipd.display(ipd.Audio(data=y, rate=fs))\r\n \r\n if vFlag:\r\n print(\"STFT around candidate\")\r\n plt.pcolormesh(np.arange(pmX.shape[0]), np.arange(pmX.shape[1]), np.transpose(pmX))\r\n plt.show()\r\n \r\n print(\"filtered spectral flux\")\r\n plt.plot(sf[b:e])\r\n plt.show()\r\n print(\"raw spectral flux\")\r\n plt.plot(sfn[b:e])\r\n plt.show()\r\n \r\n allErrors,allf0s,pmXv = f0detection(pmX,pX[b:e],sfn[b:e],-100,10,onst,vFlag,hc,div,params,fs,tFlag)\r\n\r\n aL = np.min((e-idx[i]/2,L)) \r\n segments = getSegments(allf0s,allErrors,onst,pmX,vFlag)\r\n scores[i],freq,segmentScores = harmonicScore(segments,aL,vFlag,tFlag)\r\n freqs.append(freq)\r\n \r\n if scores[i]<1: # prevent rejected candidates from creating boundary for adjacent onset\r\n idx[i] = sf.shape[0]\r\n \r\n if vFlag:\r\n print(\"Score for this onset: %d\" %scores[i])\r\n \r\n if tFlag and scores[i]<1:\r\n pred_time = np.abs(idx[i]*(H/fs))\r\n closest_gt_ind = np.argmin(pred_time-gt)[0]\r\n if np.abs(gt[closest_gt_ind]-pred_time)<0.05:\r\n if score[i]>1:\r\n tp.append[idx[i]]\r\n if score[i]<1:\r\n fn.append[idx[i]]\r\n \r\n print(\"STFT around onset\")\r\n plt.pcolormesh(np.arange(pmX.shape[0]), np.arange(pmX.shape[1]), np.transpose(pmX))\r\n plt.show()\r\n \r\n y = MRStftSynth(pmXv,pX,M,H,B)\r\n ipd.display(ipd.Audio(data=y, rate=fs))\r\n \r\n plt.pcolormesh(np.arange(pmXv.shape[0]), np.arange(pmXv.shape[1]), np.transpose(pmXv))\r\n plt.show()\r\n\r\n vFlag = False\r\n tFlag = False\r\n \r\n avg = np.mean(scores)\r\n mask[scores<1] = 0\r\n result = idx_orig[mask==1]\r\n return idx_orig[1:-1],result,freqs,scores[1:-1]",
"def spectrum_test62(f):\n format_wav = ff.FortranRecordReader(\"(10f8.2)\")\n format_flux = ff.FortranRecordReader(\"(6e12.5)\")\n\n wav = []\n flux = []\n npts = int(f.readline()) # number of frequency points\n\n while len(wav) < npts:\n wav += format_wav.read(f.readline())\n wav = np.array(wav[:npts])\n\n test = f.readline() # atmospheric parameters\n if len(test.split()) == 6:\n flux += format_flux.read(test)\n\n while len(flux) < npts:\n flux += format_flux.read(f.readline())\n flux = np.array(flux[:npts])\n\n return wav, flux",
"def irregularity(signal,fs, **kwargs):\n S = np.abs(np.fft.fft(signal))\n fv = np.fft.fftfreq(len(S), 1./fs)\n idx = fv >= 0\n S_plus = S[idx]\n fv_plus = fv[idx]\n S_k = S_plus[1:-1]\n S_left = S_plus[2:]\n S_right = S_plus[:-2]\n return np.log(20*np.sum(np.abs(np.log(S_k/(S_left*S_k*S_right)**(1./3)))))",
"def music(idx, n_music=200):\n f = freqs[idx]\n Rxx = np.dot(X[:, idx], X[:, idx].H)\n lam, V = eig_sorted(Rxx)\n En = V[:, 1:] # Noise subspace for one source\n\n theta_range = np.linspace(0, 2*np.pi, n_music)\n P_music = np.zeros(n_music)\n for i in range(n_music):\n sv = ma.steering_vector(theta_range[i], f)\n vec = np.dot(En.H, ma.steering_vector(theta_range[i], f))\n P_music[i] = 1/np.linalg.norm(vec)**2\n\n vv = V[:, 0].flatten()\n print('----------')\n print('Performing MUSIC at {:.5} Hz'.format(f))\n print('-----------------------------')\n print('Steering vector subspace check:\\n')\n print('At the correct angle of {:.3}, '.format(theta*180/np.pi) +\n 'the real parts of the eigenvalues of R_xx are:')\n print('\\n'.join(' {:.3}'.format(np.real(l)) for l in lam))\n print('\\nSteering vector / eigenvector of max eigenvalue:')\n print((ma.steering_vector(theta, f) / vv).T)\n return P_music, theta_range"
] | [
"0.5476735",
"0.5465266",
"0.5431895",
"0.5348803",
"0.5319814",
"0.5249597",
"0.5233212",
"0.52282983",
"0.51446587",
"0.51347893",
"0.51147515",
"0.5109146",
"0.50809836",
"0.50781864",
"0.5074541",
"0.50507593",
"0.5049618",
"0.5042971",
"0.50428605",
"0.5027192",
"0.5024835",
"0.5012998",
"0.5006414",
"0.5004561",
"0.4985085",
"0.49617916",
"0.49595293",
"0.4952039",
"0.4944486",
"0.4938164"
] | 0.64019674 | 0 |
Load the image on initial load of the application | def OnInit(self):
self.imageID = self.loadImage() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def OnInit( self ):\n self.imageID = self.loadImage ()",
"def load_image(self, **kwargs):\n ...",
"def load_image(self):\n # Minimal progress display while image is loaded.\n group = displayio.Group()\n group.append(centered_label('LOADING...', 40, 3))\n #self.rect = Rect(-board.DISPLAY.width, 120,\n # board.DISPLAY.width, 40, fill=0x00B000)\n #group.append(self.rect)\n board.DISPLAY.show(group)\n\n # pylint: disable=eval-used\n # (It's cool, is a 'trusted string' in the code)\n duration = eval(TIMES[self.time]) # Playback time in seconds\n # The 0.9 here is an empirical guesstimate; playback is ever-so-\n # slightly slower than benchmark speed due to button testing.\n rows = int(duration * self.rows_per_second * 0.9 + 0.5)\n # Remap brightness from 0.0-1.0 to brightness_range.\n brightness = (self.brightness_range[0] + self.brightness *\n (self.brightness_range[1] - self.brightness_range[0]))\n try:\n self.num_rows = self.bmp2led.process(self.path + '/' +\n self.images[self.image_num],\n self.tempfile,\n rows, brightness,\n self.loop,\n self.load_progress)\n except (MemoryError, BMPError):\n group = displayio.Group()\n group.append(centered_label('TOO BIG', 40, 3))\n board.DISPLAY.show(group)\n sleep(4)\n\n board.DISPLAY.show(displayio.Group()) # Clear display\n self.clear_strip() # LEDs off",
"def initImages(self):\n pass",
"def initImages(self):\n pass",
"def initImages(self):\n pass",
"def set_image(self):\r\n return loader.GFX['loadgamebox']",
"def load_image_i(self, img_tk):\n\n self.p2_label_img.configure(image=img_tk)\n self.p2_label_img.image = img_tk",
"def initial_image(path_images):\n\n path = os.getcwd()+path_images\n dirs = os.listdir(path)\n path = os.getcwd()+path_images+dirs[0]\n parent.ui.label_design_image.setPixmap(QtGui.QPixmap(path))",
"def load_image(self):\n try:\n return Image.open(self._path, 'r')\n except IOError:\n messagebox.showerror(\"Error\", \"Wrong sprite file path!\")",
"def _load_img_label(self):\n name = self._launch_file_b()\n self._img_label.configure(text=name)",
"def load_background(self, image):\n self.bg = pygame.image.load(image).convert()",
"def loaded_image(self, image):\r\n self.loaded_images.append(image)",
"def load(self, step=0):\n \n # take a step, if requested\n self.step_and_validate(step)\n \n with self.img_output:\n clear_output(wait=True)\n display(Image(self.imgs[self.i], width=850, unconfined=True))",
"def load_image(self, filename):\n return pygame.image.load(os.path.join('images', filename))",
"def _load_img(self, name):\n try:\n img_path = os.path.join(global_var.PATH, \"maps\", name + \".png\")\n env_img = pygame.image.load(img_path)\n except Exception as e:\n print(e)\n print(\"Environment\", name, \"does not exist. Make sure that a PNG image exists\",\n \"under that name in the \\\"maps\\\" folder.\")\n sys.exit()\n\n return env_img",
"def load(cls):\n\n cls.images[\"Wall\"] = pygame.image.load(\n \"ressources/images/wall.png\").convert()\n cls.images[\"MacGyver\"] = pygame.image.load(\n \"ressources/images/Mac.png\").convert()\n cls.images[\"Guardian\"] = pygame.image.load(\n \"ressources/images/Guardian.png\").convert()\n cls.images[\"Path\"] = pygame.image.load(\n \"ressources/images/path.png\").convert()\n cls.images[\"Tube\"] = pygame.image.load(\n \"ressources/images/tube.png\").convert()\n cls.images[\"Ether\"] = pygame.image.load(\n \"ressources/images/ether.png\").convert()\n cls.images[\"Needle\"] = pygame.image.load(\n \"ressources/images/needle.png\").convert()\n cls.images[\"gr\"] = pygame.image.load(\n \"ressources/images/but_du_jeu.png\").convert()",
"def __start_loading_window(self):\n\n loading_img = ImageTk.PhotoImage(PIL.Image.open(r\"Images server\\loading screen.png\"))\n self.__main_window.geometry(f\"{loading_img.width()}x{loading_img.height()-20}\")\n self.__main_window.title(\"Loading\")\n self.__main_window.iconbitmap(r'Images server\\icon.ico') # put stuff to icon\n\n loading_label = Label(self.__main_window, image=loading_img, bg=\"#192b3d\")\n loading_label.place(x=0, y=0)\n self.__main_window.after(1000, self.__load_everything)\n self.__main_window.mainloop()",
"def load_image(self, path):\n if path:\n self.original_image = cv2.imread(path, 1)\n self.prepare_images()",
"def set_image(self, path):\r\n \r\n image = self._load_image(path)\r\n self.image_raw = image\r\n self.image = ImageTk.PhotoImage(image)\r\n self.image_panel.configure(image=self.image)",
"def initImg(self):\n self.img = Image.new('RGBA',(self.width,self.height),color='#' + getConfigPart(self.theme,\"bg\"))\n self.draw = ImageDraw.Draw(self.img)",
"def importImg(self):\n logger.info(\"import image \"+ str(self))\n file,types = QtWidgets.QFileDialog.getOpenFileName(self, 'Choose Image',\n BASE_DIR,\"Image files (*.jpg *.gif *.png)\")\n logger.debug(file)\n self.imageFile = file\n self.image.setPixmap(QtGui.QPixmap(file))\n self.image.adjustSize()",
"def reload_image(self):\n img = self.img_manager.update_image()\n\n q_image = PyQt5.QtGui.QImage.fromData(img.read())\n q_pixmap = PyQt5.QtGui.QPixmap.fromImage(q_image)\n\n self.image_widget.setPixmap(q_pixmap)",
"def load_background(self, filename):\n img = pygame.image.load(filename)\n return self.fit_image(img, self.width, self.height)",
"def load(self):\n logger.debug(f\"Reading {self.path.name}\")\n self.label = int(Data.fromLabel(self.path.parent.name))\n self.image = skimg.data.imread(self.path)",
"def initialize(self):\n super(WXImageView, self).initialize()\n shell = self.shell_obj\n self.set_image(shell.image)\n self.set_scale_to_fit(shell.scale_to_fit)\n self.set_preserve_aspect_ratio(shell.preserve_aspect_ratio)\n self.set_allow_upscaling(shell.allow_upscaling)",
"def set_image(self, image_URL, bkg = None):\r\n\r\n self.image = self.image = pygame.image.load(image_URL).convert()\r\n if not bkg == None:\r\n # Set our transparent color\r\n self.image.set_colorkey(white)\r\n self.rect = self.image.get_rect()\r\n if self.drawable:\r\n self.set_drawable()",
"def load_image(path_to_image, image_name):\n print(\"Loading: \", path_to_image + image_name, \" ...\")\n return Image.open(path_to_image + image_name)",
"def load_image(self, image_name, piece_name):\n img = ImageTk.PhotoImage(Image.open(image_name))\n self.loaded_images[piece_name] = (img, image_name)\n return img",
"def _load(self):\r\n\t\t\r\n\t\tself.image.blit(self.sheet.sheet, (0,0), (self.x, self.y, self.size, self.size))"
] | [
"0.7676823",
"0.72862685",
"0.6990877",
"0.69560045",
"0.69560045",
"0.69560045",
"0.6736933",
"0.66402304",
"0.6628148",
"0.65887284",
"0.65709776",
"0.6562708",
"0.6558569",
"0.6513278",
"0.64529467",
"0.6451759",
"0.6443347",
"0.64257944",
"0.64138216",
"0.6399771",
"0.6373137",
"0.63559926",
"0.63493764",
"0.63477296",
"0.6342138",
"0.6341588",
"0.63383293",
"0.632966",
"0.6303601",
"0.62960935"
] | 0.77568126 | 0 |
Draw a cube with texture coordinates | def drawCube(self):
glBegin(GL_QUADS);
glTexCoord2f(0.0, 0.0);
glVertex3f(-1.0, -1.0, 1.0);
glTexCoord2f(1.0, 0.0);
glVertex3f(1.0, -1.0, 1.0);
glTexCoord2f(1.0, 1.0);
glVertex3f(1.0, 1.0, 1.0);
glTexCoord2f(0.0, 1.0);
glVertex3f(-1.0, 1.0, 1.0);
glTexCoord2f(1.0, 0.0);
glVertex3f(-1.0, -1.0, -1.0);
glTexCoord2f(1.0, 1.0);
glVertex3f(-1.0, 1.0, -1.0);
glTexCoord2f(0.0, 1.0);
glVertex3f(1.0, 1.0, -1.0);
glTexCoord2f(0.0, 0.0);
glVertex3f(1.0, -1.0, -1.0);
glTexCoord2f(0.0, 1.0);
glVertex3f(-1.0, 1.0, -1.0);
glTexCoord2f(0.0, 0.0);
glVertex3f(-1.0, 1.0, 1.0);
glTexCoord2f(1.0, 0.0);
glVertex3f(1.0, 1.0, 1.0);
glTexCoord2f(1.0, 1.0);
glVertex3f(1.0, 1.0, -1.0);
glTexCoord2f(1.0, 1.0);
glVertex3f(-1.0, -1.0, -1.0);
glTexCoord2f(0.0, 1.0);
glVertex3f(1.0, -1.0, -1.0);
glTexCoord2f(0.0, 0.0);
glVertex3f(1.0, -1.0, 1.0);
glTexCoord2f(1.0, 0.0);
glVertex3f(-1.0, -1.0, 1.0);
glTexCoord2f(1.0, 0.0);
glVertex3f(1.0, -1.0, -1.0);
glTexCoord2f(1.0, 1.0);
glVertex3f(1.0, 1.0, -1.0);
glTexCoord2f(0.0, 1.0);
glVertex3f(1.0, 1.0, 1.0);
glTexCoord2f(0.0, 0.0);
glVertex3f(1.0, -1.0, 1.0);
glTexCoord2f(0.0, 0.0);
glVertex3f(-1.0, -1.0, -1.0);
glTexCoord2f(1.0, 0.0);
glVertex3f(-1.0, -1.0, 1.0);
glTexCoord2f(1.0, 1.0);
glVertex3f(-1.0, 1.0, 1.0);
glTexCoord2f(0.0, 1.0);
glVertex3f(-1.0, 1.0, -1.0);
glEnd() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def drawCube( self ):\n glBegin(GL_QUADS);\n glTexCoord2f(0.0, 0.0); glVertex3f(-1.0, -1.0, 1.0);\n glTexCoord2f(1.0, 0.0); glVertex3f( 1.0, -1.0, 1.0);\n glTexCoord2f(1.0, 1.0); glVertex3f( 1.0, 1.0, 1.0);\n glTexCoord2f(0.0, 1.0); glVertex3f(-1.0, 1.0, 1.0);\n glTexCoord2f(1.0, 0.0); glVertex3f(-1.0, -1.0, -1.0);\n glTexCoord2f(1.0, 1.0); glVertex3f(-1.0, 1.0, -1.0);\n glTexCoord2f(0.0, 1.0); glVertex3f( 1.0, 1.0, -1.0);\n glTexCoord2f(0.0, 0.0); glVertex3f( 1.0, -1.0, -1.0);\n glTexCoord2f(0.0, 1.0); glVertex3f(-1.0, 1.0, -1.0);\n glTexCoord2f(0.0, 0.0); glVertex3f(-1.0, 1.0, 1.0);\n glTexCoord2f(1.0, 0.0); glVertex3f( 1.0, 1.0, 1.0);\n glTexCoord2f(1.0, 1.0); glVertex3f( 1.0, 1.0, -1.0);\n glTexCoord2f(1.0, 1.0); glVertex3f(-1.0, -1.0, -1.0);\n glTexCoord2f(0.0, 1.0); glVertex3f( 1.0, -1.0, -1.0);\n glTexCoord2f(0.0, 0.0); glVertex3f( 1.0, -1.0, 1.0);\n glTexCoord2f(1.0, 0.0); glVertex3f(-1.0, -1.0, 1.0);\n glTexCoord2f(1.0, 0.0); glVertex3f( 1.0, -1.0, -1.0);\n glTexCoord2f(1.0, 1.0); glVertex3f( 1.0, 1.0, -1.0);\n glTexCoord2f(0.0, 1.0); glVertex3f( 1.0, 1.0, 1.0);\n glTexCoord2f(0.0, 0.0); glVertex3f( 1.0, -1.0, 1.0);\n glTexCoord2f(0.0, 0.0); glVertex3f(-1.0, -1.0, -1.0);\n glTexCoord2f(1.0, 0.0); glVertex3f(-1.0, -1.0, 1.0);\n glTexCoord2f(1.0, 1.0); glVertex3f(-1.0, 1.0, 1.0);\n glTexCoord2f(0.0, 1.0); glVertex3f(-1.0, 1.0, -1.0);\n glEnd()",
"def drawCube( self ):\n glBegin(GL_QUADS);\n mTexture(0.0, 0.0); glVertex3f(-1.0, -1.0, 1.0);\n mTexture(1.0, 0.0); glVertex3f( 1.0, -1.0, 1.0);\n mTexture(1.0, 1.0); glVertex3f( 1.0, 1.0, 1.0);\n mTexture(0.0, 1.0); glVertex3f(-1.0, 1.0, 1.0);\n mTexture(1.0, 0.0); glVertex3f(-1.0, -1.0, -1.0);\n mTexture(1.0, 1.0); glVertex3f(-1.0, 1.0, -1.0);\n mTexture(0.0, 1.0); glVertex3f( 1.0, 1.0, -1.0);\n mTexture(0.0, 0.0); glVertex3f( 1.0, -1.0, -1.0);\n mTexture(0.0, 1.0); glVertex3f(-1.0, 1.0, -1.0);\n mTexture(0.0, 0.0); glVertex3f(-1.0, 1.0, 1.0);\n mTexture(1.0, 0.0); glVertex3f( 1.0, 1.0, 1.0);\n mTexture(1.0, 1.0); glVertex3f( 1.0, 1.0, -1.0);\n mTexture(1.0, 1.0); glVertex3f(-1.0, -1.0, -1.0);\n mTexture(0.0, 1.0); glVertex3f( 1.0, -1.0, -1.0);\n mTexture(0.0, 0.0); glVertex3f( 1.0, -1.0, 1.0);\n mTexture(1.0, 0.0); glVertex3f(-1.0, -1.0, 1.0);\n mTexture(1.0, 0.0); glVertex3f( 1.0, -1.0, -1.0);\n mTexture(1.0, 1.0); glVertex3f( 1.0, 1.0, -1.0);\n mTexture(0.0, 1.0); glVertex3f( 1.0, 1.0, 1.0);\n mTexture(0.0, 0.0); glVertex3f( 1.0, -1.0, 1.0);\n mTexture(0.0, 0.0); glVertex3f(-1.0, -1.0, -1.0);\n mTexture(1.0, 0.0); glVertex3f(-1.0, -1.0, 1.0);\n mTexture(1.0, 1.0); glVertex3f(-1.0, 1.0, 1.0);\n mTexture(0.0, 1.0); glVertex3f(-1.0, 1.0, -1.0);\n glEnd()",
"def __drawCube(self):\n self.cubePos = [[[(160, 160), (200, 160), (240, 160)],\n [(160, 200), (200, 200), (240, 200)],\n [(160, 240), (200, 240), (240, 240)]],\n [[(400, 160), (440, 160), (480, 160)],\n [(400, 200), (440, 200), (480, 200)],\n [(400, 240), (440, 240), (480, 240)]],\n [[(280, 160), (320, 160), (360, 160)],\n [(280, 200), (320, 200), (360, 200)],\n [(280, 240), (320, 240), (360, 240)]],\n [[(40, 160), (80, 160), (120, 160)],\n [(40, 200), (80, 200), (120, 200)],\n [(40, 240), (80, 240), (120, 240)]],\n [[(160, 40), (200, 40), (240, 40)],\n [(160, 80), (200, 80), (240, 80)],\n [(160, 120), (200, 120), (240, 120)]],\n [[(160, 280), (200, 280), (240, 280)],\n [(160, 320), (200, 320), (240, 320)],\n [(160, 360), (200, 360), (240, 360)]]]\n self.cubeColor = {1: 'green', 2: 'blue', 3: 'red', 4: 'orange',\\\n 5: 'white', 6: 'yellow'}\n for x in range(6):\n for y in range(3):\n for z in range(3):\n pos = self.cubePos[x][y][z]\n color = self.cubeColor[self.cube.cube[x][y][z]]\n self.cv.create_rectangle(pos[0], pos[1], pos[0]+40, pos[1]+40,\n fill=color, width='2')",
"def create_cube_textured(texture_list):\n a = Point3(-1.0, -1.0, -1.0)\n b = Point3(1.0, -1.0, -1.0)\n c = Point3(1.0, -1.0, 1.0)\n d = Point3(-1.0, -1.0, 1.0)\n e = Point3(-1.0, 1.0, -1.0)\n f = Point3(1.0, 1.0, -1.0)\n g = Point3(1.0, 1.0, 1.0)\n h = Point3(-1.0, 1.0, 1.0)\n t_list = [Point2(0, 0), Point2(1, 0), Point2(1, 1), Point2(0, 1)]\n\n obj = glGenLists(1)\n glNewList(obj, GL_COMPILE)\n glPushMatrix()\n for _i in range(len(texture_list)):\n glActiveTexture(GL_TEXTURE0 + _i)\n glEnable(GL_TEXTURE_2D)\n glBindTexture(GL_TEXTURE_2D, texture_list[_i])\n glBegin(GL_QUADS)\n drawVertexListCreateNormal_textured([a, b, c, d], t_list)\n drawVertexListCreateNormal_textured([b, f, g, c], t_list)\n drawVertexListCreateNormal_textured([f, e, h, g], t_list)\n drawVertexListCreateNormal_textured([e, a, d, h], t_list)\n drawVertexListCreateNormal_textured([d, c, g, h], t_list)\n drawVertexListCreateNormal_textured([a, e, f, b], t_list)\n glEnd()\n for _i in range(len(texture_list)):\n glActiveTexture(GL_TEXTURE0 + _i)\n glDisable(GL_TEXTURE_2D)\n glPopMatrix()\n glEndList()\n return obj",
"def draw_cube(self, vec):\n # TOP FACE\n gl.glBegin(gl.GL_QUADS)\n gl.glVertex3f(vec[0] + self.spacer, vec[1], vec[2] + self.spacer)\n gl.glVertex3f(vec[0], vec[1], vec[2] + self.spacer)\n gl.glVertex3f(vec[0], vec[1] + self.spacer, vec[2] + self.spacer)\n gl.glVertex3f(vec[0] + self.spacer, vec[1] + self.spacer, vec[2] + \\\n self.spacer)\n # BOTTOM FACE\n gl.glVertex3f(vec[0] + self.spacer, vec[1], vec[2])\n gl.glVertex3f(vec[0], vec[1], vec[2])\n gl.glVertex3f(vec[0], vec[1] + self.spacer, vec[2])\n gl.glVertex3f(vec[0] + self.spacer, vec[1] + self.spacer, vec[2])\n # FRONT FACE\n gl.glVertex3f(vec[0] + self.spacer, vec[1] + self.spacer, vec[2] + \\\n self.spacer)\n gl.glVertex3f(vec[0], vec[1] + self.spacer, vec[2] + self.spacer)\n gl.glVertex3f(vec[0], vec[1] + self.spacer, vec[2])\n gl.glVertex3f(vec[0] + self.spacer, vec[1] + self.spacer, vec[2])\n # BACK FACE\n gl.glVertex3f(vec[0] + self.spacer, vec[1], vec[2] + self.spacer)\n gl.glVertex3f(vec[0], vec[1], vec[2] + self.spacer)\n gl.glVertex3f(vec[0], vec[1], vec[2])\n gl.glVertex3f(vec[0] + self.spacer, vec[1], vec[2])\n # RIGHT FACE\n gl.glVertex3f(vec[0] + self.spacer, vec[1], vec[2] + self.spacer)\n gl.glVertex3f(vec[0] + self.spacer, vec[1] + self.spacer, vec[2] + \\\n self.spacer)\n gl.glVertex3f(vec[0] + self.spacer, vec[1] + self.spacer, vec[2])\n gl.glVertex3f(vec[0] + self.spacer, vec[1], vec[2])\n # LEFT FACE\n gl.glVertex3f(vec[0], vec[1] + self.spacer, vec[2] + self.spacer)\n gl.glVertex3f(vec[0], vec[1], vec[2] + self.spacer)\n gl.glVertex3f(vec[0], vec[1], vec[2])\n gl.glVertex3f(vec[0], vec[1] + self.spacer, vec[2])\n gl.glEnd()",
"def draw_cube(self, window):\n size = pygame.display.get_surface().get_size()\n width = (size[0]/4)\n\n window.fill((000,000,000))\n\n self.draw_face(\"U\", window, (0 + (width*1), 0 + (width*0)), width)\n self.draw_face(\"L\", window, (0 + (width*0), 0 + (width*1)), width)\n self.draw_face(\"F\", window, (0 + (width*1) * 1, 0 + (width*1)), width)\n self.draw_face(\"R\", window, (0 + (width*2), 0 + (width*1)), width)\n self.draw_face(\"B\", window, (0 + (width*3), 0 + (width*1)), width)\n self.draw_face(\"D\", window, (0 + (width*1), 0 + (width*2)), width)\n\n pygame.display.update()",
"def draw( self ):\r\n print \"Drawing cuboid!\"\r\n glTranslated( *self.pos3D ) # This moves the origin of drawing , so that we can use the above coordinates at each draw location\r\n if self.rotnByOGL:\r\n glRotated( self.thetaDeg , *self.rotAxis )\r\n # glTranslated( 0 , 0 , 0 ) # This moves the origin of drawing , so that we can use the above coordinates at each draw location\r\n print \"DEBUG:\" , \"Translated to\" , 0 , 0 , 0\r\n glColor3ub( *self.color ) # Get the color according to the voxel type\r\n print \"DEBUG:\" , \"Set color to\" , self.color\r\n pyglet.graphics.draw_indexed( \r\n 8 , # --------------------- Number of seqential triplet in vertex list\r\n GL_QUADS , # -------------- Draw quadrilaterals\r\n self.indices , # ---------- Indices where the coordinates are stored\r\n ( 'v3f' , self.vertX ) # vertex list , OpenGL offers an optimized vertex list object , but this is not it\r\n ) # 'v3i' # This is for integers I suppose!\r\n \r\n glColor3ub( *self.colorLine )\r\n pyglet.gl.glLineWidth( 3 )\r\n pyglet.graphics.draw_indexed( \r\n 8 , # --------------------- Number of seqential triplet in vertex list\r\n GL_LINES , # -------------- Draw quadrilaterals\r\n self.linDices , # ---------- Indices where the coordinates are stored\r\n ( 'v3f' , self.vertX ) # vertex list , OpenGL offers an optimized vertex list object , but this is not it\r\n ) # 'v3i' # This is for integers I suppose!\r\n \r\n print \"DEBUG:\" , \"Indices\"\r\n print self.indices \r\n print \"DEBUG:\" , \"Vertices\"\r\n print self.vertices \r\n \"\"\" URL: http://pyglet.readthedocs.io/en/pyglet-1.2-maintenance/programming_guide/graphics.html#vertex-lists\r\n \r\n There is a significant overhead in using pyglet.graphics.draw and pyglet.graphics.draw_indexed due to pyglet \r\n interpreting and formatting the vertex data for the video device. Usually the data drawn in each frame (of an animation) \r\n is identical or very similar to the previous frame, so this overhead is unnecessarily repeated.\r\n \r\n A VertexList is a list of vertices and their attributes, stored in an efficient manner that’s suitable for direct \r\n upload to the video card. On newer video cards (supporting OpenGL 1.5 or later) the data is actually stored in video memory.\r\n \"\"\"\r\n if self.rotnByOGL:\r\n glRotated( -self.thetaDeg , *self.rotAxis )\r\n glTranslated( *np.multiply( self.pos3D , -1 ) ) # Reset the transform coordinates\r\n print \"DEBUG:\" , \"Translated to\" , 0 , 0 , 0\r\n print \"Done drawing!\"",
"def draw_cuboid(self, x_pos, z_pos, half_width, half_depth, height):\n GL.glBegin(GL.GL_QUADS)\n GL.glNormal3f(0, -1, 0)\n GL.glVertex3f(x_pos - half_width, -6, z_pos - half_depth)\n GL.glVertex3f(x_pos + half_width, -6, z_pos - half_depth)\n GL.glVertex3f(x_pos + half_width, -6, z_pos + half_depth)\n GL.glVertex3f(x_pos - half_width, -6, z_pos + half_depth)\n GL.glNormal3f(0, 1, 0)\n GL.glVertex3f(x_pos + half_width, -6 + height, z_pos - half_depth)\n GL.glVertex3f(x_pos - half_width, -6 + height, z_pos - half_depth)\n GL.glVertex3f(x_pos - half_width, -6 + height, z_pos + half_depth)\n GL.glVertex3f(x_pos + half_width, -6 + height, z_pos + half_depth)\n GL.glNormal3f(-1, 0, 0)\n GL.glVertex3f(x_pos - half_width, -6 + height, z_pos - half_depth)\n GL.glVertex3f(x_pos - half_width, -6, z_pos - half_depth)\n GL.glVertex3f(x_pos - half_width, -6, z_pos + half_depth)\n GL.glVertex3f(x_pos - half_width, -6 + height, z_pos + half_depth)\n GL.glNormal3f(1, 0, 0)\n GL.glVertex3f(x_pos + half_width, -6, z_pos - half_depth)\n GL.glVertex3f(x_pos + half_width, -6 + height, z_pos - half_depth)\n GL.glVertex3f(x_pos + half_width, -6 + height, z_pos + half_depth)\n GL.glVertex3f(x_pos + half_width, -6, z_pos + half_depth)\n GL.glNormal3f(0, 0, -1)\n GL.glVertex3f(x_pos - half_width, -6, z_pos - half_depth)\n GL.glVertex3f(x_pos - half_width, -6 + height, z_pos - half_depth)\n GL.glVertex3f(x_pos + half_width, -6 + height, z_pos - half_depth)\n GL.glVertex3f(x_pos + half_width, -6, z_pos - half_depth)\n GL.glNormal3f(0, 0, 1)\n GL.glVertex3f(x_pos - half_width, -6 + height, z_pos + half_depth)\n GL.glVertex3f(x_pos - half_width, -6, z_pos + half_depth)\n GL.glVertex3f(x_pos + half_width, -6, z_pos + half_depth)\n GL.glVertex3f(x_pos + half_width, -6 + height, z_pos + half_depth)\n GL.glEnd()",
"def drawPlane(width, height, texture):\n glBindTexture(GL_TEXTURE_2D, texture)\n glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_DECAL) # try GL_DECAL/GL_REPLACE/GL_MODULATE\n glHint(GL_PERSPECTIVE_CORRECTION_HINT, GL_NICEST) # try GL_NICEST/GL_FASTEST\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT) # try GL_CLAMP/GL_REPEAT/GL_CLAMP_TO_EDGE\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT)\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR) # try GL_LINEAR/GL_NEAREST\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)\n\n # Enable/Disable each time or OpenGL ALWAYS expects texturing!\n glEnable(GL_TEXTURE_2D)\n\n ex = width / 2\n sx = -ex\n ey = height\n sy = 0\n glBegin(GL_QUADS)\n glNormal3f(0, 0, 1)\n glTexCoord2f(0, 0)\n glVertex3f(sx, sy, 0)\n glTexCoord2f(2, 0)\n glVertex3f(ex, sy, 0)\n glTexCoord2f(2, 2)\n glVertex3f(ex, ey, 0)\n glTexCoord2f(0, 2)\n glVertex3f(sx, ey, 0)\n glEnd()\n\n glDisable(GL_TEXTURE_2D)",
"def create_cube(color=COLOR_WHITE):\n a = Point3(-1.0, -1.0, -1.0)\n b = Point3(1.0, -1.0, -1.0)\n c = Point3(1.0, -1.0, 1.0)\n d = Point3(-1.0, -1.0, 1.0)\n e = Point3(-1.0, 1.0, -1.0)\n f = Point3(1.0, 1.0, -1.0)\n g = Point3(1.0, 1.0, 1.0)\n h = Point3(-1.0, 1.0, 1.0)\n\n obj = glGenLists(1)\n glNewList(obj, GL_COMPILE)\n glPushMatrix()\n glBegin(GL_QUADS)\n glColor4fv(color)\n drawVertexListCreateNormal([a, b, c, d])\n drawVertexListCreateNormal([b, f, g, c])\n drawVertexListCreateNormal([f, e, h, g])\n drawVertexListCreateNormal([e, a, d, h])\n drawVertexListCreateNormal([d, c, g, h])\n drawVertexListCreateNormal([a, e, f, b])\n glEnd()\n glPopMatrix()\n glEndList()\n return obj",
"def draw_cube(self, points, color=(255, 0, 0)):\n\n # draw front\n self.draw_line(points[0], points[1], color)\n self.draw_line(points[1], points[2], color)\n self.draw_line(points[3], points[2], color)\n self.draw_line(points[3], points[0], color)\n\n # draw back\n self.draw_line(points[4], points[5], color)\n self.draw_line(points[6], points[5], color)\n self.draw_line(points[6], points[7], color)\n self.draw_line(points[4], points[7], color)\n\n # draw sides\n self.draw_line(points[0], points[4], color)\n self.draw_line(points[7], points[3], color)\n self.draw_line(points[5], points[1], color)\n self.draw_line(points[2], points[6], color)\n\n # draw dots\n self.draw_dot(points[0], point_color=color, point_radius=4)\n self.draw_dot(points[1], point_color=color, point_radius=4)\n\n # draw x on the top\n self.draw_line(points[0], points[5], color)\n self.draw_line(points[1], points[4], color)",
"def render(self):\n GL.glColor(*self._color)\n\n GL.glLoadIdentity()\n GL.glTranslate(self._x, self._y, 0)\n\n GL.glBegin(GL.GL_QUADS)\n GL.glVertex3f(0, 0, 0)\n GL.glVertex3f(self._width, 0, 0)\n GL.glVertex3f(self._width, self._height, 0)\n GL.glVertex3f(0, self._height, 0)\n GL.glEnd()",
"def draw(self):\n\n glEnable(self.texture.target)\n glBindTexture(self.texture.target, self.texture.id)\n if self.mipmaps:\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR)",
"def testRendersSimpleCube(self):\n\n model_transforms = camera_utils.euler_matrices(\n [[-20.0, 0.0, 60.0], [45.0, 60.0, 0.0]])[:, :3, :3]\n\n vertices_world_space = torch.matmul(\n torch.stack([self.cube_vertices, self.cube_vertices]),\n model_transforms.transpose())\n\n normals_world_space = torch.matmul(\n torch.stack([self.cube_normals, self.cube_normals]),\n model_transforms.transpose())\n\n # camera position:\n eye = torch.tensor([[0.0, 0.0, 6.0], [0.0, 0.0, 6.0]], dtype=torch.float32)\n center = torch.tensor([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], dtype=torch.float32)\n world_up = torch.tensor([[0.0, 1.0, 0.0], [0.0, 1.0, 0.0]], dtype=torch.float32)\n image_width = 640\n image_height = 480\n light_positions = torch.tensor([[[0.0, 0.0, 6.0]], [[0.0, 0.0, 6.0]]])\n light_intensities = torch.ones([2, 1, 3], dtype=torch.float32)\n vertex_diffuse_colors = torch.ones_like(vertices_world_space, dtype=torch.float32)\n\n renders = mesh_renderer.mesh_renderer(\n vertices_world_space, self.cube_triangles, normals_world_space,\n vertex_diffuse_colors, eye, center, world_up, light_positions,\n light_intensities, image_width, image_height)\n\n for image_id in range(renders.shape[0]):\n target_image_name = \"Gray_Cube_%i.png\" % image_id\n baseline_image_path = os.path.join(self.test_data_directory,\n target_image_name)\n test_utils.expect_image_file_and_render_are_near(\n self, baseline_image_path, renders[image_id, :, :, :])",
"def cube(im_in, azimuth=30., elevation=45., filename=None,\n do_axis=True, show_label=True,\n cube_label = {'x':'x', 'y':'y', 't':'t'},\n colormap='gray', roll=-180., vmin=0., vmax=1.,\n figsize=figsize, dpi=300, **kwargs):\n im = im_in.copy()\n\n N_X, N_Y, N_frame = im.shape\n fx, fy, ft = get_grids(N_X, N_Y, N_frame)\n import numpy as np\n from vispy import app, scene, use\n try:\n AffineTransform = scene.transforms.AffineTransform\n except:\n AffineTransform = scene.transforms.MatrixTransform\n\n use(app='pyglet', gl='pyopengl2')\n from vispy.util.transforms import perspective, translate, rotate\n canvas = scene.SceneCanvas(size=figsize, bgcolor='white', dpi=dpi)\n view = canvas.central_widget.add_view()\n\n# frame = scene.visuals.Cube(size = (N_X/2, N_frame/2, N_Y/2), color=(0., 0., 0., 0.),\n# edge_color='k',\n# parent=view.scene)\n for p in ([1, 1, 1, -1, 1, 1], [1, 1, -1, -1, 1, -1], [1, -1, 1, -1, -1, 1],[1, -1, -1, -1, -1, -1],\n [1, 1, 1, 1, -1, 1], [-1, 1, 1, -1, -1, 1], [1, 1, -1, 1, -1, -1], [-1, 1, -1, -1, -1, -1],\n [1, 1, 1, 1, 1, -1], [-1, 1, 1, -1, 1, -1], [1, -1, 1, 1, -1, -1], [-1, -1, 1, -1, -1, -1]):\n# line = scene.visuals.Line(pos=np.array([[p[0]*N_Y/2, p[1]*N_X/2, p[2]*N_frame/2], [p[3]*N_Y/2, p[4]*N_X/2, p[5]*N_frame/2]]), color='black', parent=view.scene)\n line = scene.visuals.Line(pos=np.array([[p[0]*N_X/2, p[1]*N_frame/2, p[2]*N_Y/2],\n [p[3]*N_X/2, p[4]*N_frame/2, p[5]*N_Y/2]]), color='black', parent=view.scene)\n\n opts = {'parent':view.scene, 'cmap':'grays', 'clim':(0., 1.)}\n image_xy = scene.visuals.Image(np.rot90(im[:, :, 0], 3), **opts)\n tr_xy = AffineTransform()\n tr_xy.rotate(90, (1, 0, 0))\n tr_xy.translate((-N_X/2, -N_frame/2, -N_Y/2))\n image_xy.transform = tr_xy\n\n image_xt = scene.visuals.Image(np.fliplr(im[:, -1, :]), **opts)\n tr_xt = AffineTransform()\n tr_xt.rotate(90, (0, 0, 1))\n tr_xt.translate((N_X/2, -N_frame/2, N_Y/2))\n image_xt.transform = tr_xt\n\n image_yt = scene.visuals.Image(np.rot90(im[-1, :, :], 1), **opts)\n tr_yt = AffineTransform()\n tr_yt.rotate(90, (0, 1, 0))\n tr_yt.translate((+N_X/2, -N_frame/2, N_Y/2))\n image_yt.transform = tr_yt\n\n if do_axis:\n t = {}\n for text in ['x', 'y', 't']:\n t[text] = scene.visuals.Text(cube_label[text], parent=canvas.scene, face='Helvetica', color='black')\n t[text].font_size = 8\n t['x'].pos = canvas.size[0] // 3, canvas.size[1] - canvas.size[1] // 8\n t['t'].pos = canvas.size[0] - canvas.size[0] // 5, canvas.size[1] - canvas.size[1] // 6\n t['y'].pos = canvas.size[0] // 12, canvas.size[1] // 2\n\n cam = scene.TurntableCamera(elevation=35, azimuth=30)\n cam.fov = 45\n cam.scale_factor = N_X * 1.7\n if do_axis: margin = 1.3\n else: margin = 1\n cam.set_range((-N_X/2, N_X/2), (-N_Y/2*margin, N_Y/2/margin), (-N_frame/2, N_frame/2))\n view.camera = cam\n if not(filename is None):\n im = canvas.render()\n app.quit()\n import vispy.io as io\n io.write_png(filename, im)\n else:\n app.quit()\n return im",
"def enable(self):\n\t\tglEnable(GL_TEXTURE_3D)\n\t\tglTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_S, GL_REPEAT)\n\t\tglTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_T, GL_REPEAT)\n\t\tglTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_R, GL_REPEAT)\n\t\tglTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)\n\t\tglTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)",
"def drawFloor(width, height, texture):\n glBindTexture(GL_TEXTURE_2D, texture)\n glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_REPLACE) # try GL_DECAL/GL_REPLACE/GL_MODULATE\n glHint(GL_PERSPECTIVE_CORRECTION_HINT, GL_NICEST) # try GL_NICEST/GL_FASTEST\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE) # try GL_CLAMP/GL_REPEAT/GL_CLAMP_TO_EDGE\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE)\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR) # try GL_LINEAR/GL_NEAREST\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)\n\n sx = width / 2\n ex = -sx\n sz = height / 2\n ez = -sz\n\n # Enable/Disable each time or OpenGL ALWAYS expects texturing!\n glEnable(GL_TEXTURE_2D)\n\n glBegin(GL_QUADS)\n glTexCoord2f(0, 0)\n glVertex3f(sx, 0, sz)\n glTexCoord2f(0, 1)\n glVertex3f(sx, 0, ez)\n glTexCoord2f(1, 1)\n glVertex3f(ex, 0, ez)\n glTexCoord2f(1, 0)\n glVertex3f(ex, 0, sz)\n glEnd()\n\n glDisable(GL_TEXTURE_2D)",
"def cube(self):\n\n dims = self.voxels.shape\n max_dim = max(dims)\n \n x_target = (max_dim - dims[0]) / 2\n y_target = (max_dim - dims[1]) / 2\n z_target = (max_dim - dims[2]) / 2\n\n self.voxels = np.pad(self.voxels,\n ((int(np.ceil(x_target)), int(np.floor(x_target))),\n (int(np.ceil(y_target)), int(np.floor(y_target))),\n (int(np.ceil(z_target)), int(np.floor(z_target)))),\n 'constant',\n constant_values=(0))\n\n self.point_position = self.point_position + [np.ceil(z_target),\n np.ceil(y_target),\n np.ceil(x_target)]\n\n return(self)",
"def mlab_plt_cube(xmin, xmax, ymin, ymax, zmin, zmax):\n faces = cube_faces(xmin, xmax, ymin, ymax, zmin, zmax)\n for grid in faces:\n x, y, z = grid\n mlab.mesh(x, y, z, opacity=0.1, color=(0.1, 0.2, 0.3))",
"def create_cube(scale=(1.0,1.0,1.0), st=False, rgba=False, dtype='float32', type='triangles'):\n\n shape = [24, 3]\n rgba_offset = 3\n\n width, height, depth = scale\n # half the dimensions\n width /= 2.0\n height /= 2.0\n depth /= 2.0\n\n vertices = np.array([\n # front\n # top right\n ( width, height, depth,),\n # top left\n (-width, height, depth,),\n # bottom left\n (-width,-height, depth,),\n # bottom right\n ( width,-height, depth,),\n\n # right\n # top right\n ( width, height,-depth),\n # top left\n ( width, height, depth),\n # bottom left\n ( width,-height, depth),\n # bottom right\n ( width,-height,-depth),\n\n # back\n # top right\n (-width, height,-depth),\n # top left\n ( width, height,-depth),\n # bottom left\n ( width,-height,-depth),\n # bottom right\n (-width,-height,-depth),\n\n # left\n # top right\n (-width, height, depth),\n # top left\n (-width, height,-depth),\n # bottom left\n (-width,-height,-depth),\n # bottom right\n (-width,-height, depth),\n\n # top\n # top right\n ( width, height,-depth),\n # top left\n (-width, height,-depth),\n # bottom left\n (-width, height, depth),\n # bottom right\n ( width, height, depth),\n\n # bottom\n # top right\n ( width,-height, depth),\n # top left\n (-width,-height, depth),\n # bottom left\n (-width,-height,-depth),\n # bottom right\n ( width,-height,-depth),\n ], dtype=dtype)\n\n st_values = None\n rgba_values = None\n\n if st:\n # default st values\n st_values = np.tile(\n np.array([\n (1.0, 1.0,),\n (0.0, 1.0,),\n (0.0, 0.0,),\n (1.0, 0.0,),\n ], dtype=dtype),\n (6,1,)\n )\n\n if isinstance(st, bool):\n pass\n elif isinstance(st, (int, float)):\n st_values *= st\n elif isinstance(st, (list, tuple, np.ndarray)):\n st = np.array(st, dtype=dtype)\n if st.shape == (2,2,):\n # min / max\n st_values *= st[1] - st[0]\n st_values += st[0]\n elif st.shape == (4,2,):\n # per face st values specified manually\n st_values[:] = np.tile(st, (6,1,))\n elif st.shape == (6,2,):\n # st values specified manually\n st_values[:] = st\n else:\n raise ValueError('Invalid shape for st')\n else:\n raise ValueError('Invalid value for st')\n\n shape[-1] += st_values.shape[-1]\n rgba_offset += st_values.shape[-1]\n\n if rgba:\n # default rgba values\n rgba_values = np.tile(np.array([1.0, 1.0, 1.0, 1.0], dtype=dtype), (24,1,))\n\n if isinstance(rgba, bool):\n pass\n elif isinstance(rgba, (int, float)):\n # int / float expands to RGBA with all values == value\n rgba_values *= rgba \n elif isinstance(rgba, (list, tuple, np.ndarray)):\n rgba = np.array(rgba, dtype=dtype)\n\n if rgba.shape == (3,):\n rgba_values = np.tile(rgba, (24,1,))\n elif rgba.shape == (4,):\n rgba_values[:] = np.tile(rgba, (24,1,))\n elif rgba.shape == (4,3,):\n rgba_values = np.tile(rgba, (6,1,))\n elif rgba.shape == (4,4,):\n rgba_values = np.tile(rgba, (6,1,))\n elif rgba.shape == (6,3,):\n rgba_values = np.repeat(rgba, 4, axis=0)\n elif rgba.shape == (6,4,):\n rgba_values = np.repeat(rgba, 4, axis=0)\n elif rgba.shape == (24,3,):\n rgba_values = rgba\n elif rgba.shape == (24,4,):\n rgba_values = rgba\n else:\n raise ValueError('Invalid shape for rgba')\n else:\n raise ValueError('Invalid value for rgba')\n\n shape[-1] += rgba_values.shape[-1]\n\n data = np.empty(shape, dtype=dtype)\n data[:,:3] = vertices\n if st_values is not None:\n data[:,3:5] = st_values\n if rgba_values is not None:\n data[:,rgba_offset:] = rgba_values\n\n if type == 'triangles':\n # counter clockwise\n # top right -> top left -> bottom left\n # top right -> bottom left -> bottom right\n indices = np.tile(np.array([0, 1, 2, 0, 2, 3], dtype='int'), (6,1))\n for face in range(6):\n indices[face] += (face * 4)\n indices.shape = (-1,)\n elif type == 'triangle_strip':\n raise NotImplementedError\n elif type == 'triangle_fan':\n raise NotImplementedError\n elif type == 'quads':\n raise NotImplementedError\n elif type == 'quad_strip':\n raise NotImplementedError\n else:\n raise ValueError('Unknown type')\n\n return data, indices",
"def cube_vertices(x, y, z, n):\n #def cube_vertices(self):\n # \"\"\" Return the vertices of the cube at position x, y, z with size 2*n.\n #\n # \"\"\"\n # return [\n # x-n,y+n,z-n, x-n,y+n,z+n, x+n,y+n,z+n, x+n,y+n,z-n, # top\n # x-n,y-n,z-n, x+n,y-n,z-n, x+n,y-n,z+n, x-n,y-n,z+n, # bottom\n # x-n,y-n,z-n, x-n,y-n,z+n, x-n,y+n,z+n, x-n,y+n,z-n, # left\n # x+n,y-n,z+n, x+n,y-n,z-n, x+n,y+n,z-n, x+n,y+n,z+n, # right\n # x-n,y-n,z+n, x+n,y-n,z+n, x+n,y+n,z+n, x-n,y+n,z+n, # front\n # x+n,y-n,z-n, x-n,y-n,z-n, x-n,y+n,z-n, x+n,y+n,z-n, # back\n # ]\n return [\n x-n,y+n,z-n, x-n,y+n,z+n, x+n,y+n,z+n, x+n,y+n,z-n, # top\n x-n,y-n,z-n, x+n,y-n,z-n, x+n,y-n,z+n, x-n,y-n,z+n, # bottom\n x-n,y-n,z-n, x-n,y-n,z+n, x-n,y+n,z+n, x-n,y+n,z-n, # left\n x+n,y-n,z+n, x+n,y-n,z-n, x+n,y+n,z-n, x+n,y+n,z+n, # right\n x-n,y-n,z+n, x+n,y-n,z+n, x+n,y+n,z+n, x-n,y+n,z+n, # front\n x+n,y-n,z-n, x-n,y-n,z-n, x-n,y+n,z-n, x+n,y+n,z-n, # back\n ]",
"def generate(self, x, y=0, z=0):\n lerp, grad, fade, p = self._lerp, self._grad, self._fade, self._p\n # Find unit cuve that contains point (x,y,z).\n X = int(floor(x)) & 255\n Y = int(floor(y)) & 255\n Z = int(floor(z)) & 255\n # Find relative (x,y,z) of point in cube.\n # Compute fade curves.\n x, y, z = x-floor(x), y-floor(y), z-floor(z)\n u, v, w = fade(x), fade(y), fade(z)\n # Hash coordinates of the cube corners.\n A = Y + p[X]\n B = Y + p[X+1]\n AA, AB, BA, BB = Z+p[A], Z+p[A+1], Z+p[B], Z+p[B+1]\n # Add blended results from the cube corners.\n return lerp(w, \n lerp(v, lerp(u, grad(p[AA ], x , y , z ), \n grad(p[BA ], x-1, y , z )),\n lerp(u, grad(p[AB ], x , y-1, z ), \n grad(p[BB ], x-1, y-1, z ))),\n lerp(v, lerp(u, grad(p[AA+1], x , y , z-1), \n grad(p[BA+1], x-1, y , z-1)),\n lerp(u, grad(p[AB+1], x , y-1, z-1), \n grad(p[BB+1], x-1, y-1, z-1))))",
"def load(self):\n\t\tglTexImage3D(GL_TEXTURE_3D, 0, GL_LUMINANCE16_ALPHA16, \n\t\t\tself.width, self.width, self.width, 0, GL_LUMINANCE_ALPHA, \n\t\t\tGL_UNSIGNED_SHORT, ctypes.byref(self.data))",
"def __init__(self, camera=None, light=None, name=\"\", z=0.1):\r\n super(Canvas, self).__init__(camera, light, name, x=0.0, y=0.0, z=0.0,\r\n rx=0.0, ry=0.0, rz=0.0, sx=1.0, sy=1.0, sz=1.0,\r\n cx=0.0, cy=0.0, cz=0.0)\r\n self.ttype = GL_TRIANGLES\r\n self.verts = []\r\n self.norms = []\r\n self.texcoords = []\r\n self.inds = []\r\n self.depth = z\r\n\r\n ww = 20.0\r\n hh = 20.0\r\n\r\n self.verts = ((-ww, -hh, z), (0.0, hh, z), (ww, -hh, z))\r\n self.norms = ((0, 0, -1), (0, 0, -1), (0, 0, -1))\r\n self.texcoords = ((0.0, 0.0), (0.5, 1.0), (1.0, 0.0))\r\n\r\n self.inds = ((0, 1, 2), ) #python quirk: comma for tuple with only one val\r\n\r\n self.buf = []\r\n self.buf.append(Buffer(self, self.verts, self.texcoords, self.inds, self.norms))",
"def dessinerRectangle(p0, p1, p2, p3,texture=None, textureRepeat = True, color = (0,1,0)):\n \n \n if texture == None:\n r,v,b = color\n glDisable(GL_TEXTURE_2D)\n glColor3f(r,v,b)\n glBegin(GL_QUADS)\n glVertex3f(p0[0],p0[1],p0[2])\n glVertex3f(p1[0],p1[1],p1[2])\n glVertex3f(p2[0],p2[1],p2[2])\n glVertex3f(p3[0],p3[1],p3[2])\n glEnd()\n glEnable(GL_TEXTURE_2D)\n else:\n\n if textureRepeat:\n a = fabs(p0[0] - p1[0])\n b = fabs(p0[1] - p1[1])\n c = fabs(p0[2] - p1[2])\n\n if a >= b and a >= c:\n d = a\n elif b >= a and b >= c:\n d = b\n elif c >= a and c >= b:\n d = c\n else:\n d = a\n\n a = fabs(p1[0] - p2[0])\n b = fabs(p1[1] - p2[1])\n c = fabs(p1[2] - p2[2])\n\n if a >= b and a >= c:\n e = a\n elif b >= a and b >= c:\n e = b\n elif c >= a and c >= b:\n e = c\n else:\n e = a\n\n del a\n del b\n del c\n\n glColor4f(1,1,1,1)\n glBindTexture(GL_TEXTURE_2D,texture.id)\n glBegin(GL_QUADS)\n glTexCoord2f(0.0,0.0)\n glVertex3f(p0[0],p0[1],p0[2])\n glTexCoord2f(d,0.0)\n glVertex3f(p1[0],p1[1],p1[2])\n glTexCoord2f(d,e)\n glVertex3f(p2[0],p2[1],p2[2])\n glTexCoord2f(0,e)\n glVertex3f(p3[0],p3[1],p3[2])\n glEnd()\n else:\n glColor4f(1,1,1,1)\n glBindTexture(GL_TEXTURE_2D,texture.id)\n glBegin(GL_QUADS)\n glTexCoord2f(0.0,0.0)\n glVertex3f(p0[0],p0[1],p0[2])\n glTexCoord2f(0.0,1.0)\n glVertex3f(p1[0],p1[1],p1[2])\n glTexCoord2f(1.0,1.0)\n glVertex3f(p2[0],p2[1],p2[2])\n glTexCoord2f(1.0,0.0)\n glVertex3f(p3[0],p3[1],p3[2])\n glEnd()",
"def setupTexture( self ):\n glEnable(GL_TEXTURE_2D)\n glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)\n glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)\n glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_DECAL)\n glBindTexture(GL_TEXTURE_2D, self.imageID)",
"def GetInTextureCoord(self):\n ...",
"def redraw(self):\n self.update_spin()\n glMatrixMode( GL_MODELVIEW )\n glLoadIdentity()\n\n glClear( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT )\n self.SetCurrent()\n texture_id = self.texture.texture_id\n width = self.texture.GetWidth()\n height = self.texture.GetHeight()\n\n self.texture.load_jpeg('Sunrise.jpg')\n self.texture.enable()\n\n glTranslatef( 0.0, 0.0, -5.0 )\n glRotatef( self.angle, 0, 1.0, 0 )\n yscale = 1.75\n xscale = yscale * self.x2yAspect\n\n glScalef( xscale, yscale, 2.0 )\n\n glBegin( GL_QUADS )\n # Lower left quad corner\n glTexCoord2f( self.offset, self.offset )\n glVertex3f(-1.0, -1.0, 0.0)\n\n # Lower right quad corner\n glTexCoord2f( self.replication + self.offset, self.offset )\n glVertex3f(1.0, -1.0, 0.0)\n\n # Upper right quad corner\n glTexCoord2f( self.replication + self.offset, self.replication + self.offset )\n glVertex3f(1.0, 1.0, 0.0)\n\n # Upper left quad corner\n glTexCoord2f( self.offset, self.replication + self.offset )\n glVertex3f(-1.0, 1.0, 0.0)\n glEnd()\n\n self.texture.disable()\n glutSwapBuffers()",
"def polyCube(*args, axis: Union[List[float, float, float], bool]=None, caching: bool=True,\n constructionHistory: bool=True, createUVs: Union[int, bool]=3, depth: Union[float,\n bool]=1.0, height: Union[float, bool]=1.0, name: AnyStr=\"\", nodeState: Union[int,\n bool]=0, object: bool=True, subdivisionsDepth: Union[int, bool]=1,\n subdivisionsHeight: Union[int, bool]=1, subdivisionsWidth: Union[int, bool]=1,\n subdivisionsX: Union[int, bool]=1, subdivisionsY: Union[int, bool]=1,\n subdivisionsZ: Union[int, bool]=1, texture: Union[int, bool]=1, width: Union[float,\n bool]=1.0, q=True, query=True, e=True, edit=True, **kwargs)->Union[List[AnyStr],\n Any]:\n pass",
"def drawReference(x, y, z, l):\r\n\r\n glPushMatrix()\r\n\r\n glColor3f(1.0, 0.0, 0.0)\r\n\r\n glBegin(GL_LINES)\r\n glNormal3f(0.0, 0.0, 1.0)\r\n glVertex3f(x, y, z)\r\n glVertex3f(x + l, y, z)\r\n glEnd()\r\n\r\n glColor3f(0.0, 1.0, 0.0)\r\n\r\n glBegin(GL_LINES)\r\n glNormal3f(0.0, 0.0, 1.0)\r\n glVertex3f(x, y, z)\r\n glVertex3f(x, y + l, z)\r\n glEnd()\r\n\r\n glColor3f(0.0, 0.0, 1.0)\r\n\r\n glBegin(GL_LINES)\r\n glNormal3f(0.0, 0.0, 1.0)\r\n glVertex3f(x, y, z)\r\n glVertex3f(x, y, z + l)\r\n glEnd()\r\n\r\n glPopMatrix()"
] | [
"0.8082414",
"0.790963",
"0.73804325",
"0.7115595",
"0.7054056",
"0.70481575",
"0.6607423",
"0.6532412",
"0.64749116",
"0.6449474",
"0.6229988",
"0.6222093",
"0.6187685",
"0.61688286",
"0.6162942",
"0.6152132",
"0.6028768",
"0.59879607",
"0.5984257",
"0.59456533",
"0.5933637",
"0.5922008",
"0.588329",
"0.5874939",
"0.58282906",
"0.5820203",
"0.58158016",
"0.5789243",
"0.5743865",
"0.5733173"
] | 0.80746436 | 1 |
Adds a message to the chat and scrolls down. | def add_message_to_chat(self, message: str):
scroll_length = (len(message) // Client.TEXTBOX_CHARACTER_LENGTH) + 1
self.chat_text.config(state=NORMAL)
self.chat_text.insert(END, message + '\n')
self.chat_text.yview_scroll(scroll_length, "units")
self.chat_text.config(state=DISABLED) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def list_add_chat_message(self, chat_message):\n self.chat_messages.append(chat_message)\n\n #logging.info(\"adding message: %s\" % chat_message.message)\n\n if len(self.chat_messages) > ENVIRONMENT['BUFFER_SIZE']:\n self.chat_messages.pop(0)\n\n # alert our polling clients\n self.new_message_event.set()\n self.new_message_event.clear()",
"def _add_to_chat_queue(self, message):\n self.chat_message_queue.appendleft(message)",
"def add_msg(self, msg):\n self.chat_win.addch('\\n')\n self.chat_win.addstr(\"[{}] {}\".format(\n datetime.strftime(datetime.now(), \"%H:%M\"), msg)\n )\n self.refresh_all()",
"def new_message(self, message):\n self.message_counter += 1\n self.message_buffer.append(str(message))\n self.event_loop()",
"def add_message(self, msg):\n self.messages.append(msg)",
"def messageScrolled(self,message):\n from dialogs import speDialog\n if sys.platform!='win32':message='<font size=-2>%s</font>'%message\n speDialog.create(self, message, self.path)",
"def add_message(self, message):\n self.message_list.append(message)",
"def scrollUp(self, messages=1):\n self.scrollOffset -= messages\n self._recalculateCoordinates()",
"def scrollDown(self, messages=1):\n if self.scrollOffset < 1:\n self.scrollOffset += messages\n self._recalculateCoordinates()",
"def _log_append(self, msg):\n\t\tp = self._edit.get_buffer()\n\t\tstart,end = p.get_bounds()\n\t\tp.insert(end, msg)\n\t\tself._trunc_lines()\n\t\tself._edit.scroll_to_iter(p.get_end_iter(), 0.0)",
"def add_chat_message(self, message):\n try:\n data = message.to_json()\n key = ENVIRONMENT['REDIS_PREFIX'] + \"chat_messages:%s\" % self.channel_id\n \n logging.info(data)\n \n self.redis_server.rpush(key, data)\n self.redis_server.publish(ENVIRONMENT['REDIS_PREFIX'] + 'chat_messages', data)\n except Exception, e:\n logging.info(\"ERROR adding message %s: %s\" % (message, e))\n raise",
"def show_message(self, message):\n self.sense.show_message(\n message,\n scroll_speed=self.SCROLL_SPEED,\n text_colour=self.TEXT_COLOUR\n )",
"def __draw_message(self, message):\n x_offset = (curses.COLS - len(message)) // 2\n self.message_win.addstr(0, x_offset, message)",
"def send_message(self, message:str):\n self.chat.click()\n text_box = self.chat.find_element_by_xpath(\"//div[@class='_2_1wd copyable-text selectable-text' and @data-tab='6']\")\n text_box.click()\n text_box.send_keys(message)\n time.sleep(0.1)\n send_button = self.chat.find_element_by_xpath(\"//button[@class='_1E0Oz']\")\n send_button.click()",
"def display_message(self, message):\n with self.lock:\n self.messages_list.configure(state='normal')\n self.messages_list.insert(tk.END, message)\n self.messages_list.configure(state='disabled')\n self.messages_list.see(tk.END)",
"def display_message(self, message):\n with self.lock:\n self.messages_list.configure(state='normal')\n self.messages_list.insert(tk.END, message)\n self.messages_list.configure(state='disabled')\n self.messages_list.see(tk.END)",
"def __addmsg(self, msg: str) -> None:\n # region Docstring\n # endregion\n self.record += msg\n self.textbox.kill()\n self.textbox = UITextBox(\n html_text=self.record,\n relative_rect=Rect((0, 0), (self.size[0], self.size[1] - 25)),\n container=self,\n manager=self.ui_manager,\n )",
"async def chat_message(self, event):\n\t\t# Send a message down to the client\n\t\tprint(\"DocumentChatConsumer: chat_message from user #\" + str(event))\n\t\ttimestamp = calculate_timestamp(timezone.now())\n\t\tawait self.send_json(\n\t\t\t{\n\t\t\t\t\"msg_type\": MSG_TYPE_MESSAGE,\n\t\t\t\t\"annotationId\": event['annotationId'],\n\t\t\t\t\"username\": event[\"username\"],\n\t\t\t\t\"user_id\": event[\"user_id\"],\n\t\t\t\t\"xfdfString\": event[\"message\"],\n\t\t\t\t\"natural_timestamp\": timestamp,\n\t\t\t},\n\t\t)",
"async def chat_message(self, event):\n message = event['message']\n await self.send_json({\n 'message': message\n })",
"def append_message(self, message):\n if message['message_id'] in self.message_ids:\n return\n self.message_ids.append(message['message_id'])\n self.messages.append(message)",
"def scrollUp(self):\n if self.__firstShownLine > 0:\n self.__firstShownLine -= 1\n self.__refreshContent()\n else:\n curses.beep()",
"def ScrollMessage(text, color, repeat):\n text_area.text = text\n text_area.color = color\n\n # Start the message just off the side of the glasses\n x = display.width\n text_area.x = x\n\n # Determine the width of the message to scroll\n width = text_area.bounding_box[2]\n\n for _ in range(repeat):\n while x != -width:\n x = x - 1\n text_area.x = x\n\n # Update the switch and if it has been pressed abort scrolling this message\n switch.update()\n if not switch.value:\n return\n\n time.sleep(0.025) # adjust to change scrolling speed\n x = display.width",
"def updateChat(self, ):\n self.__redrawChat()",
"def send_messages(self):\r\n self.clear_screen()\r\n user_label = Label(self.root, text=\"Hello \" + self.username,\r\n font=self.title_font, bg=self.bg_color, height=2)\r\n user_label.pack(pady=10, padx=50)\r\n messages_frame = Frame(self.root)\r\n messages_frame.pack(padx=30, pady=10)\r\n scrollbar_msg = Scrollbar(messages_frame)\r\n scrollbar_msg.pack(side=RIGHT, fill=Y)\r\n write_message = Text(messages_frame, width=50, height=15, font=self.text_font,\r\n yscrollcommand=scrollbar_msg.set)\r\n write_message.pack()\r\n scrollbar_msg.config(command=write_message.yview)\r\n button_speech_rec = Button(self.root, text=\"listen\\nto speech\", font=self.text_font,\r\n height=2, width=20,\r\n command=lambda: self.create_speech_thread(write_message))\r\n button_speech_rec.pack(pady=10)\r\n button_send = Button(self.root, text=\"send\", font=self.text_font,\r\n height=2, width=20, command=lambda: self.send(write_message))\r\n button_send.pack(pady=10)\r\n button_send = Button(self.root, text=\"go back\", font=self.text_font,\r\n height=2, width=20, command=self.choose_path)\r\n button_send.pack(pady=10)",
"def add_message(self, message):\n try:\n self.send_loop(message)\n except AttributeError:\n raise UnsupportedMessageTypeError(message.__class__.__name__)",
"def append_message(self, message_object):\n self.messages.append(message_object)",
"async def new_message(self, message):\n user = self.scope['user']\n response_data = {\n 'message': message,\n 'username': user.get_full_name()\n }\n await self.create_chat_message(user, message)\n await self.channel_layer.group_send(\n self.conversation_name,\n {\n 'type': 'chat_message',\n 'response_data': json.dumps(response_data)\n }\n )",
"def text(message):\n global list_messages\n room = session.get('room')\n msg = session.get('name') + ':' + message['msg']\n list_messages.append(msg)\n addNewMsg(message,session)\n print ('size of list_messages ' + str(len(list_messages)) + ', session ' + str(session))\n emit('message', {'msg': msg}, room=room)",
"def add_message(self, msg_id, location, msg):\n\n self._messages.append((msg_id,location,msg))",
"def add_message(self, msg):\n msg_string = json.dumps(msg)\n self.redis_client.publish(self.message_channel, msg_string)\n self.redis_client.lpush(self.message_list, msg_string)\n self.redis_client.ltrim(self.message_list, 0,\n app.config[\"MAX_MESSAGES\"]-1)"
] | [
"0.6858262",
"0.68089944",
"0.6778456",
"0.67170316",
"0.66270477",
"0.65076065",
"0.6480786",
"0.6332747",
"0.6320004",
"0.6282979",
"0.6253987",
"0.6231954",
"0.6195",
"0.6192585",
"0.61682373",
"0.61682373",
"0.6153406",
"0.61134017",
"0.6108938",
"0.60916406",
"0.60653615",
"0.6035066",
"0.6014792",
"0.60144955",
"0.60139024",
"0.6002169",
"0.59412557",
"0.5919042",
"0.59148926",
"0.5905361"
] | 0.799143 | 0 |
Creates a data folder containing a 100class subset of ImageNet, then creates a zipped copy of it | def zip_imagenet100c():
#First make sure the directory we are given is correct!
if not os.path.isdir(DATA_SRC_ROOT):
raise Exception("Bad filepath given")
#create the destiantion directories if they don't exist
if not os.path.isdir(IMAGENET100_DIR):
os.mkdir(IMAGENET100_DIR)
#grab the subset wnids for the 100 class-subset
with open(IMAGENET100_CLASSES) as f:
subset_wnids = f.readlines()
subset_wnids = [x.strip() for x in subset_wnids] #list of the 100 WNIDs we grab
#Grab the names of all of the folders inside the root data source
#Structure is distortion/sub_distortion/level/wnids
for distortion in os.listdir(DATA_SRC_ROOT):
if distortion != "meta.bin":
print(distortion)
folder_path = os.path.join(DATA_SRC_ROOT, distortion)
if not os.path.isdir(folder_path):
continue
for sub_distortion in os.listdir(folder_path):
print(sub_distortion)
subfolder_path = os.path.join(folder_path, sub_distortion)
if not os.path.isdir(subfolder_path):
continue
for level in os.listdir(subfolder_path):
print(level)
level_path = os.path.join(subfolder_path, level)
#grab the correcrt validation d9recotires
for wnid in os.listdir(level_path):
wnid_path = os.path.join(level_path, wnid)
if not os.path.isdir(wnid_path):
continue
if wnid in subset_wnids:
dest_path = os.path.join(IMAGENET100_DIR, distortion, sub_distortion, level, wnid)
shutil.copytree(wnid_path, dest_path)
#copy the metadata bin file
meta_file = os.path.join(DATA_SRC_ROOT, 'meta.bin')
meta_dest = os.path.join(IMAGENET100_DIR, 'meta.bin')
shutil.copy(meta_file, meta_dest)
#Zip the destinatio file
shutil.make_archive(ZIP_PATH + '/ImageNet100C', 'tar', IMAGENET100_DIR) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_dataset(data_folder: str, dataset_file: str, targets_file: str = os.path.join('data', 'targets.pkl')):\n files = sorted(glob.glob(os.path.join(data_folder, '**/*.jpg'), recursive=True))\n images = []\n crop_sizes = []\n crop_centers = []\n targets = []\n for image in tqdm(files, desc='creating dataset', total=len(files)):\n img = Image.open(image)\n # quadruple dataset by vertical and horizontal flipping\n for i in range(4):\n if i == 1 or i == 3:\n img = img.transpose(Image.FLIP_LEFT_RIGHT)\n if i == 2:\n img = img.transpose(Image.FLIP_TOP_BOTTOM)\n x, y, w, h, cx, cy = get_random_image_values()\n resized = img.resize((y, x), Image.LANCZOS) # mind thee: x and y swapped\n arr = np.array(resized, dtype=np.float32)\n arr, target_array = create_cropped_data(np.copy(arr), (w, h), (cx, cy), crop_only=False)\n images.append(arr)\n crop_sizes.append((w, h))\n crop_centers.append((cx, cy))\n targets.append(target_array)\n data = {'images': images, 'crop_sizes': crop_sizes, 'crop_centers': crop_centers}\n # persist on harddrive\n with open(dataset_file, 'wb') as f:\n pickle.dump(data, f)\n with open(targets_file, 'wb') as f:\n pickle.dump(targets, f)\n print(f'created datset and saved it to {dataset_file} and targets to {targets_file}')",
"def zip_files():\n zipper = ZipFile(\"Moritz_Bunse_ML_project.zip\", \"w\")\n files_to_write = [\"poi_id.py\",\n \"my_classifier.pkl\",\n \"my_dataset.pkl\",\n \"my_feature_list.pkl\",\n \"tester.py\",\n \"Look+At+Enron+data+set.html\",\n \"Look At Enron data set.ipynb\",\n \"data_dict.pkl\",\n \"final_project_dataset.pkl\",\n \"img/Flow chart feature selection.png\"\n ]\n for filename in files_to_write:\n zipper.write(filename)\n\n zipper.close()",
"def data_directory(class_labels):\n\n dataset_folders = ['train','validation','test']\n object_class = class_labels\n os.mkdir(BASE_DIR)\n\n for folder in dataset_folders:\n for obj_cls in object_class:\n training_dir = BASE_DIR + os.sep +'{}'.format(folder)\n if not os.path.exists(BASE_DIR+os.sep +'{}'.format(folder)):\n os.mkdir(training_dir)\n class_dir = training_dir + os.sep + '{}'.format(obj_cls)\n if not os.path.exists(training_dir + os.sep + '{}'.format(obj_cls)):\n os.mkdir(class_dir)",
"def create_folders():\n if not os.path.exists(\"data/train-npy/\"):\n os.makedirs(\"data/train-npy/\")\n if not os.path.exists(\"data/test-npy/\"):\n os.makedirs(\"data/test-npy/\")\n if not os.path.exists(\"data/valid-npy/\"):\n os.makedirs(\"data/valid-npy/\")",
"def MakeDataSetFiles(dirname):\n\n\n if not os.path.exists(dirname):\n os.mkdir(dirname)\n if not os.path.exists(os.path.join(dirname, 'train')):\n os.mkdir(os.path.join(dirname, 'train'))\n if not os.path.exists(os.path.join(dirname, 'test')):\n os.mkdir(os.path.join(dirname, 'test'))\n data_train = fetch_20newsgroups(subset='train', categories=None, shuffle=True, random_state=42)\n data_test = fetch_20newsgroups(subset='test', categories=None, shuffle=True, random_state=42)\n\n if dirname[-1] == '/' or dirname[-1] == '\\\\':\n dirname = dirname[:-1]\n \n Util.WriteClassFile(data_train.target, os.path.join(dirname, 'train_classes.txt'))\n Util.WriteClassFile(data_test.target,os.path.join(dirname, 'test_classes.txt'))\n\n\n train_counter = 0;\n for doc in data_train.data:\n filename = 'train_' + str(train_counter).zfill(5);\n f = file(os.path.join(dirname, 'train', filename), 'w');\n f.write(doc.encode('ascii', 'ignore'));\n f.close();\n train_counter = train_counter + 1;\n\n test_counter = 0;\n for doc in data_test.data:\n filename = 'test_' + str(test_counter).zfill(5);\n f = file(os.path.join(dirname, 'test', filename), 'w');\n f.write(doc.encode('ascii', 'ignore'));\n f.close();\n test_counter = test_counter + 1;\n\n class_index = file(os.path.join(dirname, 'class_label_index.txt'), 'w')\n for label in data_train.target_names:\n class_index.write(label + '\\n')\n class_index.close()",
"def distributeDataset(destinationFolder, testFolder, trainFolder):\n \n # Set up directories for test and training data sets\n if not os.path.exists(testFolder):\n os.makedirs(testFolder)\n if not os.path.exists(trainFolder):\n os.makedirs(trainFolder)\n\n # Generate list of directories\n dirs = []\n for i in range(0,8):\n dirs.append(os.path.join(destinationFolder, \"NISTSpecialDatabase4GrayScaleImagesofFIGS\\\\sd04\\\\png_txt\\\\figs_\" + str(i)))\n\n # Extract Test data\n files = os.listdir(dirs[0])\n\n for filename in files:\n shutil.copy(os.path.join(dirs[0], filename), testFolder)\n shutil.rmtree(dirs[0])\n\n # Extract Train data\n for i in range(1,8):\n\n files = os.listdir(dirs[i])\n for filename in files:\n shutil.copy(os.path.join(dirs[i], filename), trainFolder)\n shutil.rmtree(dirs[i])\n shutil.rmtree(os.path.join(destinationFolder, \"NISTSpecialDatabase4GrayScaleImagesofFIGS\"))",
"def prepare_data_for_training(args):\n # Form the train/test splits and write them to disk\n dataset = data.Dataset(args)\n # get image classes and image counts in each class\n label_map = dataset.get_class_info()\n class_count = len(list(label_map.values()))\n # split the data and store it in log dir\n df_train, df_test = dataset.split_dataset()\n\n # perform dataset augmentations\n image_data = augment.Augmentation(args)\n # get the data gens for training and test images\n train_data_gen, _ = image_data.map_fn_train(df_train)\n test_data_gen, _ = image_data.map_fn_test(df_test)\n\n return train_data_gen, test_data_gen, df_train, df_test, class_count",
"def training_data_generation(DATA_DIR, img_height_size, img_width_size, label_list):\r\n \r\n img_ms_files = glob.glob(DATA_DIR + '\\\\Train_MS' + '\\\\Train_*.tif')\r\n img_pan_files = glob.glob(DATA_DIR + '\\\\Train_Pan' + '\\\\Train_*.tif')\r\n polygon_files = glob.glob(DATA_DIR + '\\\\Train_Polygons' + '\\\\Train_*.geojson')\r\n \r\n img_ms_array_list = []\r\n img_pan_array_list = []\r\n mask_array_list = []\r\n \r\n for file in range(len(img_ms_files)):\r\n with rasterio.open(img_ms_files[file]) as f:\r\n metadata = f.profile\r\n img_ms = np.transpose(f.read(tuple(np.arange(metadata['count']) + 1)), [1, 2, 0])\r\n \r\n with rasterio.open(img_pan_files[file]) as g:\r\n metadata_pan = g.profile\r\n img_pan = np.expand_dims(g.read(1), axis = 2)\r\n \r\n ms_to_pan_ratio = metadata['transform'][0] / metadata_pan['transform'][0]\r\n \r\n if (img_height_size % ms_to_pan_ratio) != 0 or (img_width_size % ms_to_pan_ratio) != 0:\r\n raise ValueError('Please make sure that both img_height_size and img_width_size can be divided by {}'.format(int(ms_to_pan_ratio)))\r\n \r\n mask = training_mask_generation(img_pan_files[file], polygon_files[file], labels = label_list)\r\n \r\n img_ms_array, img_pan_array, mask_array = image_clip_to_segment_and_convert(img_ms, img_pan, mask, ms_to_pan_ratio, \r\n img_height_size, img_width_size)\r\n \r\n img_ms_array_list.append(img_ms_array)\r\n img_pan_array_list.append(img_pan_array)\r\n mask_array_list.append(mask_array)\r\n \r\n img_ms_full_array = np.concatenate(img_ms_array_list, axis = 0)\r\n img_pan_full_array = np.concatenate(img_pan_array_list, axis = 0)\r\n mask_full_array = to_categorical(np.concatenate(mask_array_list, axis = 0), num_classes = len(label_list))\r\n \r\n return img_ms_full_array, img_pan_full_array, mask_full_array",
"def create_train_folder(df_train, target_path):\n folder_path = os.path.join(target_path, 'xray_preprocess/train')\n print(f'Create train set at: {folder_path}')\n for _, row in tqdm(df_train.iterrows(), total=df_train.shape[0]):\n if row['class']=='negative':\n destination_path = os.path.join(folder_path, 'negative')\n elif row['class']=='positive':\n destination_path = os.path.join(folder_path, 'positive')\n if not os.path.exists(destination_path):\n os.makedirs(destination_path) \n img = os.path.join(target_path, 'xray', 'train', row['filename'])\n shutil.copy(img, destination_path )",
"def prepare_data(src, dst):\n\n data_prefix = 'miniCelebA_'\n for split in ['train', 'val', 'test']:\n print('processing %s split' % split)\n if (not os.path.exists(os.path.join(dst, 'x_' + split + '.npy')) or not\n os.path.exists(os.path.join(dst, 'y_' + split + '.npy'))):\n labels = glob(os.path.join(src, split, '*'))\n no_sample = 0\n for lb in labels:\n no_sample += len(os.listdir(lb))\n\n x = np.zeros((no_sample, 224, 224, 3))\n y = np.zeros((no_sample, 20))\n count = 0\n for lb in labels:\n files = glob(os.path.join(lb, '*.png'))\n for f in files:\n print('processing file: %s, with label %s' % (f, lb.split('/')[-1]))\n y[count] = to_categorical(int(lb.split('/')[-1]), 20)\n img = misc.imresize(misc.imread(f), (224, 224), 'bicubic')\n if img.ndim == 2:\n img = np.expand_dims(img, -1)\n img = np.concatenate((img, img, img), axis=-1)\n x[count] = img\n\n count += 1\n\n assert count == no_sample, \"number of sample (%d) is different than number of read image (%d)\" % (\n no_sample, count)\n\n x = get_deep_feature(x)\n np.save(os.path.join(dst, data_prefix + 'x_' + split + '.npy'), x)\n np.save(os.path.join(dst, data_prefix + 'y_' + split + '.npy'), y)",
"def _make_dataset(input_dir, output_dir, image_size, margin, split='train'):\n input_dir = os.path.join(input_dir, split)\n\n output_root = os.path.join(output_dir, split)\n if not os.path.exists(output_root):\n os.makedirs(output_root)\n\n class_folders = glob.glob(os.path.join(input_dir, '*'))\n detector = MTCNN()\n\n for class_folder in class_folders:\n target_output_dir = os.path.join(output_root, class_folder.split('/')[-1])\n if not os.path.exists(target_output_dir):\n os.makedirs(target_output_dir)\n\n target_files = glob.glob(os.path.join(class_folder, '*'))\n logger.debug('processing %s...', class_folder)\n for file in target_files:\n img = cv2.imread(file)\n detect_result = detector.detect_faces(img)\n\n if not detect_result:\n logger.warning('WARNING: failed to detect face in file %s, skip', file)\n continue\n\n x0, y0, width, height = detect_result[0]['box']\n x1, y1 = x0 + width, y0 + height\n\n x0 = max(x0 - margin // 2, 0)\n y0 = max(y0 - margin // 2, 0)\n x1 = min(x1 + margin // 2, img.shape[1])\n y1 = min(y1 + margin // 2, img.shape[0])\n\n face_img = img[y0:y1, x0:x1, :]\n face_img = cv2.resize(face_img, dsize=(image_size, image_size),\n interpolation=cv2.INTER_LINEAR)\n\n filename = file.split('/')[-1]\n img_name = filename.split('.')[0]\n cv2.imwrite(os.path.join(target_output_dir, filename),\n face_img)\n with open(os.path.join(target_output_dir, img_name + '.txt'), 'w') as f:\n f.write('%d %d %d %d\\n' % (x0, y0, x1, y1))\n logger.debug('processing %s finished!', class_folder)",
"def make_dataset():\n\n\tnumberOfTrials = dataset_params.num_of_samples\n\tnumberOfTrials_train = int(numberOfTrials*0.8)\n\tnumberOfTrials_test = int(numberOfTrials*0.2)\n\n\tprint(\"==================================================\")\n\tprint(\"1. Generating Train images ......\")\n\tprint(\"\\nTrain image per variation\", numberOfTrials_train)\n\tmakeDataset(numberOfTrials_train, \"train\")\n\n\tprint(\"==================================================\")\n\tprint(\"2. Generating Test images ......\")\n\tprint(\"\\nTest image per variation\", numberOfTrials_test)\n\tmakeDataset(numberOfTrials_test, \"test\")\n\n\tprint(\"==================================================\")\n\tprint(\"Done!!!\")",
"def save_all_features(nb_samples, source=\"./datasets/D1/images/\", dest=\"./datasets/D1/features/\", input_size=(416, 416), batch_size=16):\n\n # check if the directory exists, and if not make it\n if not os.path.exists(dest):\n os.makedirs(dest)\n\n # define image height and width\n (img_height, img_width) = input_size\n\n # build the VGG16 network and extract features after every MaxPool layer\n model = VGG16(weights='imagenet', include_top=False)\n\n c1 = model.layers[-16].output\n c1 = GlobalAveragePooling2D()(c1)\n\n c2 = model.layers[-13].output\n c2 = GlobalAveragePooling2D()(c2)\n\n c3 = model.layers[-9].output\n c3 = GlobalAveragePooling2D()(c3)\n\n c4 = model.layers[-5].output\n c4 = GlobalAveragePooling2D()(c4)\n\n c5 = model.layers[-1].output\n c5 = GlobalAveragePooling2D()(c5)\n\n\n model = Model(inputs=model.input, outputs=(c1, c2, c3, c4, c5))\n\n # always save your weights after training or during training\n model.save_weights('first_try.h5')\n model.save('model_save')\n\n # define image generator without augmentation\n datagen = ImageDataGenerator(rescale=1. / 255.)\n\n generator = datagen.flow_from_directory(\n source,\n target_size=(img_height, img_width),\n batch_size=batch_size,\n class_mode=\"sparse\",\n shuffle=False)\n\n # generate and save features, labels and respective filenames\n steps = nb_samples / batch_size + 1\n X = model.predict_generator(generator, steps)\n Y = np.concatenate([generator.next()[1] for i in range(0, generator.samples, batch_size)])\n names = generator.filenames\n\n for n, i in enumerate(X):\n print(\"Saving \" + n + \" and \" + i)\n with open(dest + \"X-\" + str(img_height) + \"-c\" + str(n + 1) + \"-AVG.npy\", 'w') as f:\n np.save(f.name, i)\n\n if not os.path.exists(dest + \"Y.npy\"):\n with open(dest + \"Y.npy\", 'w') as f:\n np.save(f.name, Y)\n\n if not os.path.exists(dest + \"filenames.npy\"):\n with open(dest + \"filenames.npy\", 'w') as f:\n np.save(f.name, names)",
"def gen_data_dir(img_dir, id_label_dict, num_class, shuffle=True):\n img_file_path = gen_img_files(img_dir, shuffle)\n return gen_data_file(img_file_path, id_label_dict, num_class)",
"def create_noobj_folder(\n folder: PathLike, \n img_ext: str = \".jpg\",\n):\n folder = Path(folder).expanduser().resolve()\n images = glob(folder, img_ext)\n \n for image in images:\n filename = image.name\n _folder = image.parent.name\n path = folder / (image.stem + \".xml\")\n img_w, img_h = get_image_size(image)\n\n tree = ET.Element(\"annotation\")\n\n et_folder = ET.SubElement(tree, \"folder\")\n et_folder.text = _folder\n\n et_filename = ET.SubElement(tree, \"filename\")\n et_filename.text = filename\n\n et_path = ET.SubElement(tree, \"path\")\n et_path.text = str(path)\n\n et_img_size = ET.SubElement(tree, \"size\")\n ET.SubElement(et_img_size, \"width\").text = str(img_w)\n ET.SubElement(et_img_size, \"height\").text = str(img_h)\n ET.SubElement(et_img_size, \"depth\").text = \"3\"\n\n content = ET.tostring(tree, encoding=\"unicode\", pretty_print=True)\n try: \n path.write_text(content)\n except KeyboardInterrupt:\n path.write_text(content)\n exit()",
"def preprocess(data_path, dataset):\n il_data_path = os.path.join(data_path, 'il' + dataset)\n train_path = os.path.join(il_data_path, 'train')\n val_path = os.path.join(il_data_path, 'val')\n\n if os.path.isdir(il_data_path):\n return\n\n os.makedirs(train_path)\n os.makedirs(val_path)\n\n train_set = _datasets[dataset](data_path, train=True, download=True)\n val_set = _datasets[dataset](data_path, train=False, download=True)\n\n # dump pickles for each class\n for cur_set, cur_path in [[train_set, train_path], [val_set, val_path]]:\n for idx, item in enumerate(cur_set):\n label = item[1]\n if not os.path.exists(os.path.join(cur_path, str(label))):\n os.makedirs(os.path.join(cur_path, str(label)))\n with open(os.path.join(cur_path, str(label), str(idx) + '.p'), 'wb') as f:\n pickle.dump(item, f)",
"def exporting_cropped_images (fpath_tiff):\n src = rasterio.open(fpath_tiff, 'r')\n outfolder_irregular = '/train/irregular'\n outfolder_healthy = '/train/healthy'\n outfolder_concrete = '/train/concrete'\n outfolder_incomplete = '/train/incomplete'\n outfolder_other = '/train/other'\n outfolder = '/train/batch'\n #os.makedirs (outfolder, exist_ok = True)",
"def generate_nmnist_dataset(initial_size, input_dir, num_spikes, step_factor):\n image_dataset = np.rec.array(None, dtype=[('height', np.uint16),\n ('width', np.uint16),\n ('image_data', 'object'),\n ('label', np.uint32)],\n shape=initial_size)\n num_images = 0\n\n # loop through each folder within the test directories\n for i in range(0, 10):\n current_dir = input_dir + os.path.sep + str(i) + os.path.sep + '*.bin'\n print('Processing {}...'.format(current_dir))\n for filename in glob.iglob(current_dir):\n images = prepare_n_mnist(filename, True, num_spikes, step_factor)\n if num_images + len(images) >= image_dataset.size:\n image_dataset = np.resize(image_dataset,\n (num_images + len(images)) * 2)\n add_images_to_dataset(image_dataset, images, num_images, i, 28, 28)\n num_images += len(images)\n\n return image_dataset[0:num_images]",
"def compress_wrapper(args: Namespace) -> None:\n directory_path = os.path.join(DATASETS_DIR, args.directory)\n compress_datasets(directory_path, args.holdout)",
"def create_random_data(output_path: str, num_images: int = 5) -> None:\n train_path = os.path.join(output_path, \"train\")\n class1_train_path = os.path.join(train_path, \"class1\")\n class2_train_path = os.path.join(train_path, \"class2\")\n\n val_path = os.path.join(output_path, \"val\")\n class1_val_path = os.path.join(val_path, \"class1\")\n class2_val_path = os.path.join(val_path, \"class2\")\n\n test_path = os.path.join(output_path, \"test\")\n class1_test_path = os.path.join(test_path, \"class1\")\n class2_test_path = os.path.join(test_path, \"class2\")\n\n paths = [\n class1_train_path,\n class1_val_path,\n class1_test_path,\n class2_train_path,\n class2_val_path,\n class2_test_path,\n ]\n\n for path in paths:\n try:\n os.makedirs(path)\n except FileExistsError:\n pass\n\n for i in range(num_images):\n pixels = numpy.random.rand(64, 64, 3) * 255\n im = Image.fromarray(pixels.astype(\"uint8\")).convert(\"RGB\")\n im.save(os.path.join(path, f\"rand_image_{i}.jpeg\"))\n\n process_images(output_path)",
"def makeDataset(numberOfTrials, data_type):\n\n\tdata_folder = data_type + \"_images\"\n\tlabel_file = os.path.join(dataset_params.data_path, data_type + \"_lables.csv\")\n\n\tutils.create_directory(dataset_params.data_path)\n\tutils.create_directory(os.path.join(dataset_params.data_path, data_folder))\n\n\tallowedRadius = utils.defineShapePerimeter()\n\tcolorsRGB = utils.defineColorValues()\n\tshapeDict = utils.defineShapeSides()\n\tpadding = dataset_params.padding\n\n\tnum = 0\n\toutput_images = [[\"figNum\", \"shape\", \"color\", \"size\", \"background\", \"quadrant\", \"radius\"]]\n\tfor c in dataset_params.colors: # for all 7 foreground colors \n\t\tfor q in dataset_params.quadrants: # for all 4 quadratns \n\t\t\tfor s in dataset_params.shapes: # for all 5 shapes\n\t\t\t\tfor k in dataset_params.sizes: # for all 3 sizes\n\t\t\t\t\tfor b in dataset_params.backgrounds: # for all 3 background colors\n\t\t\t\t\t\tfor i in range(numberOfTrials):\n\t\t\t\t\t\t\tfileName = os.path.join(dataset_params.data_path, data_folder, str(num) + \".png\")\n\t\t\t\t\t\t\tpresentQuadrant = dataset_params.quadrants[q]\n\t\t\t\t\t\t\tradius = random.randint(allowedRadius[s][k][0],allowedRadius[s][k][1])\n\n\t\t\t\t\t\t\tif(presentQuadrant == 3):\n\t\t\t\t\t\t\t\txMin = 128 + padding\n\t\t\t\t\t\t\t\txMax = 255 - radius\n\t\t\t\t\t\t\t\tyMin = 128 + padding\n\t\t\t\t\t\t\t\tyMax = 255 - radius\n\n\t\t\t\t\t\t\telif(presentQuadrant == 2):\n\t\t\t\t\t\t\t\txMin = 0 + radius\n\t\t\t\t\t\t\t\txMax = 128 - padding\n\t\t\t\t\t\t\t\tyMin = 128 + padding\n\t\t\t\t\t\t\t\tyMax = 255 - radius\n\n\t\t\t\t\t\t\telif(presentQuadrant == 1):\n\t\t\t\t\t\t\t\txMin = 0 + radius\n\t\t\t\t\t\t\t\txMax = 128 - padding\n\t\t\t\t\t\t\t\tyMin = 0 + radius\n\t\t\t\t\t\t\t\tyMax = 128 - padding\n\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\txMin = 128 + padding\n\t\t\t\t\t\t\t\txMax = 255 - radius\n\t\t\t\t\t\t\t\tyMin = 0 + radius\n\t\t\t\t\t\t\t\tyMax = 128 - padding\n\n\t\t\t\t\t\t\txCenter = random.randint(xMin, xMax)\n\t\t\t\t\t\t\tyCenter = random.randint(yMin, yMax)\n\t\t\t\t\t\t\tcenter = [xCenter, yCenter]\n\n\t\t\t\t\t\t\tif(s == \"circle\"):\n\t\t\t\t\t\t\t\toutput_images.append([num, \"circle\", c, k, b, presentQuadrant, radius])\n\t\t\t\t\t\t\t\timg = makeCircle(c, radius, center, b, colorsRGB)\n\t\t\t\t\t\t\t\timg = img[:,:,::-1]\n\t\t\t\t\t\t\t\tcv2.imwrite(fileName, img)\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tn = shapeDict[s]\n\t\t\t\t\t\t\t\timg = makePolygon(center, n, radius, b, c, colorsRGB)\n\t\t\t\t\t\t\t\timg = img[:,:,::-1]\n\t\t\t\t\t\t\t\tcv2.imwrite(fileName, img)\n\t\t\t\t\t\t\t\toutput_images.append([num, s, c, k, b, presentQuadrant, radius])\n\t\t\t\t\t\t\tnum += 1\n\t\n\tprint(\"Number of image generated\", num)\n\n\tprint(\"Saving \" + data_type + \" data meta information to CSV ......\")\n\tdf = pd.DataFrame(output_images[1:], columns=output_images[0])\n\tdf.to_csv(label_file, index=False)\n\tprint(\"Saved \" + data_type + \" data meta information: \" + data_folder)\n\t\n\n\tprint(\"Saving \" + data_type + \" images data to npz(numpy) compressed file ......\")\n\tmake_npz_file(data_type)\n\tprint(\"Saved \" + data_type + \" images data to npz(numpy) compressed file!\")\n\t\n\treturn None",
"def creation_data_sets(quality, dataset, test_case=False):\n current_path = Path.cwd()\n if dataset == 0:\n (x_train, y_train), (x_test, y_test) = mnist.load_data()\n del y_train, y_test\n train_path = current_path.joinpath(\"Mnist_{}\".format(quality))\n test_path = current_path.joinpath(\"Mnist_{}_test\".format(quality))\n else:\n (x_train, y_train), (x_test, y_test) = cifar10.load_data()\n del y_train, y_test\n train_path = current_path.joinpath(\"Cifar-10_{}\".format(quality))\n test_path = current_path.joinpath(\"Cifar-10_{}_test\".format(quality))\n\n create_directories(train_path, test_path)\n convert(train_path, x_train, dataset, quality, test_case)\n convert(test_path, x_test, dataset, quality, test_case)",
"def make_data(config, data, label):\n if not os.path.isdir(os.path.join(os.getcwd(), config.checkpoint_dir)):\n os.makedirs(os.path.join(os.getcwd(), config.checkpoint_dir))\n\n if config.is_train:\n savepath = os.path.join(os.getcwd(), config.checkpoint_dir +'/train.h5')\n else:\n savepath = os.path.join(os.getcwd(), config.checkpoint_dir +'/test.h5')\n\n with h5py.File(savepath, 'w') as hf:\n hf.create_dataset('data', data=data)\n hf.create_dataset('label', data=label)",
"def copy_files():\n\n # Load the Knifey-Spoony dataset.\n # This is very fast as it only gathers lists of the files\n # and does not actually load the images into memory.\n dataset = load()\n\n # Copy the files to separate training- and test-dirs.\n dataset.copy_files(train_dir=train_dir, test_dir=test_dir)",
"def make_npz_file(data_type):\n\n\tdata_folder = data_type + \"_images\"\n\tlabel_file = os.path.join(dataset_params.data_path, data_type + \"_lables.csv\")\n\toutput_file = os.path.join(dataset_params.data_path, \"synthetic_\" + data_type + \"_data\")\n\tline_reader = csv.DictReader(open(label_file,\"r\"))\n\n\tdata = []\n\tlabels = []\n\tdata_points = 0\n\tfor row in line_reader:\n\t\timage_name = os.path.join(dataset_params.data_path,data_folder,row[\"figNum\"] + \".png\")\n\t\timage_data = cv2.imread(image_name, cv2.IMREAD_COLOR)\n\t\timage_data = cv2.cvtColor(image_data, cv2.COLOR_BGR2RGB)\n\t\timage_label = [int(dataset_params.shapes[row[\"shape\"]]), int(dataset_params.colors[row[\"color\"]]), int(dataset_params.sizes[row[\"size\"]]), int(row[\"quadrant\"]), int(dataset_params.backgrounds[row[\"background\"]]) ]\n\t\tdata.append(image_data)\n\t\tlabels.append(image_label)\n\t\tdata_points += 1\n\n\t# Converting list to data to np array\n\tdata = np.asarray(data)\n\tlabels = np.asarray(labels)\n\n\t# Printing log information\n\tprint(data_type, \"statistics being saved: \")\n\tprint(data_type, \"data shape\", data.shape)\n\tprint(data_type, \"label shape\", labels.shape)\n\n\t# saveing the file as npz file\n\tnp.savez_compressed(output_file, data=data, lables=labels)",
"def createDataset(sources,output,labels,sparse):\n global has_joblib\n out_path = str(output)\n # delete the output file\n if os.path.exists(os.path.abspath(out_path)):\n os.remove(os.path.abspath(out_path))\n \n # first, list the source files\n fpaths_src, fnames_src = utils.listFiles(directory=os.path.abspath(sources), ext='png')\n \n label_map={}\n \n # read the label file\n if not (labels == None):\n label_map = utils.readLabelMap(labels)\n # check that the numbers match\n print(\"Number of images in label map : %s\"%str(len(label_map.keys())-1))\n print(\"Number of images in source dir: %s\"%str(len(fpaths_src)))\n assert len(label_map.keys())-1 == len(fpaths_src)\n \n # generate KNN classifier\n if not (args.codebook == 'None' or args.codebook == None):\n args.knn = getKNNClassifier() \n else:\n args.knn = None\n \n # precompute number of images\n n_imgs = len(fpaths_src)\n \n # preallocate array\n # if augmentation, calculate (9*4+1)*n samples\n all_features_list = []\n \n # parallel implementation (default, if joblib available)\n if has_joblib:\n image_features = Parallel(n_jobs=args.njobs,verbose=5) (delayed(processImage)(fpaths_src, label_map, fnames_src, img_idx) for img_idx in range(n_imgs))\n # collect all images into a single matrix\n image_features = np.concatenate(image_features, axis=0)\n all_features_list.append(image_features)\n else:\n for img_idx in xrange(n_imgs):\n image_features = processImage(fpaths_src, label_map, fnames_src, img_idx)\n all_features_list.append(image_features)\n \n # make a 2D matrix from the list of features (stack all images vertically)\n feat_matrix = np.concatenate(all_features_list, axis=0).astype(np.float32) \n \n # do scaling of each feature dimension \n #if False:\n if not (args.scale == 0):\n print \"Scaling data...\"\n \n # preserve the labels\n label_vec = feat_matrix[:,0]\n feat_matrix = np.delete(feat_matrix,0,1)\n \n featurestats = np.zeros((2,feat_matrix.shape[1]))\n \n # use soft-normalization (zero-mean, unit var whitening)\n if (args.scale == 1):\n # if we specified featurestats from a training set, use them\n if not (args.featurestats == None):\n # load the statistics\n featurestats = loadFeatureStats()\n # featurestats contains 2 rows, first row = mean, second row = std\n # and n feature dimensions\n assert feat_matrix.shape[1]==featurestats.shape[1]\n else:\n pass\n \n \n # use hard-normalization \n elif (args.scale == 2):\n # if we specified featurestats from a training set, use them\n if not (args.featurestats == None):\n # load the statistics\n featurestats = loadFeatureStats()\n # the featurestats contains 2 rows, first row = min, second row = max \n # and n feature dimensions\n assert feat_matrix.shape[1]==featurestats.shape[1]\n else:\n pass\n \n \n # normalize each feature dimension\n for feat_idx in xrange(feat_matrix.shape[1]):\n feat_vec = feat_matrix[:,feat_idx]\n \n # soft-normalization (zero-mean, approx. unit variance)\n if (args.scale == 1): \n # if feature statistics are specified\n if not (args.featurestats == None):\n feat_mean = featurestats[0,feat_idx]\n feat_std = featurestats[1,feat_idx]\n else:\n # compute them from the data\n feat_mean = feat_vec.mean()\n feat_std = (feat_vec.std() + 1e-10)\n # store them \n featurestats[0,feat_idx] = feat_mean\n featurestats[1,feat_idx] = feat_std\n \n # shift to zero mean and (unit) variance\n feat_vec_scaled = (feat_vec - feat_mean) / (1.*feat_std)\n \n \n # hard-normalization (min/max = borders estimated from the (training) dataset)\n elif (args.scale == 2):\n if not (args.featurestats == None):\n feat_min = featurestats[0,feat_idx]\n feat_max = featurestats[1,feat_idx]\n else:\n # compute them freshly\n feat_min = np.min(feat_vec)\n feat_max = np.max(feat_vec)\n # store them \n featurestats[0,feat_idx] = feat_min\n featurestats[1,feat_idx] = feat_max\n \n # standardize/normalize between 0 and 1\n feat_vec_std = (feat_vec - feat_min) / (feat_max - feat_min + 1e-10) \n \n # linearly scale between -1 and 1 \n feat_vec_scaled = (1.0*feat_vec_std * (1 - -1)) - 1\n \n \n # set column back to matrix\n feat_matrix[:,feat_idx] = feat_vec_scaled\n \n # finally prepend the label_vec again\n feat_matrix = np.concatenate((np.reshape(label_vec,(feat_matrix.shape[0],1)),feat_matrix), axis=1)\n \n print \"Done.\"\n else:\n print \"Data may not be properly scaled, use the 'svm-scale' implementation of libsvm.\"\n \n if not (args.savefeaturestats == None):\n saveFeatureStats(featurestats) \n\n #Parallel(n_jobs=args.njobs, verbose=5)(delayed(function)(params) for i in range(10))\n # open the output file\n output_file = open(os.path.abspath(out_path), 'wb')\n\n # run through the feature matrix \n print \"Writing %s rows and %s cols to file...\"%(feat_matrix.shape)\n # parallel implementation (default, if joblib available)\n if has_joblib:\n lines = Parallel(n_jobs=args.njobs, verbose=5)(delayed(writeLine)(i, feat_matrix) for i in range(feat_matrix.shape[0]))\n output_file.writelines(lines) \n else:\n for i in xrange(feat_matrix.shape[0]):\n line = writeLine(i, feat_matrix)\n output_file.writelines(line)\n \n output_file.close()\n \n return 0",
"def download():\n\n trainset = torchvision.datasets.CIFAR10(root=paths.raw_cifar10_dir(), train=True, download=True)\n testset = torchvision.datasets.CIFAR10(root=paths.raw_cifar10_dir(), train=False, download=True)\n train_images = numpy.array(trainset.data)\n train_labels = numpy.array(trainset.targets)\n test_images = numpy.array(testset.data)\n test_labels = numpy.array(testset.targets)\n\n assert numpy.max(train_images) == 255\n\n train_images = train_images/255.\n test_images = test_images/255.\n\n utils.write_hdf5(paths.cifar10_train_images_file(), train_images.astype(numpy.float32))\n log('wrote %s' % paths.cifar10_train_images_file())\n utils.write_hdf5(paths.cifar10_test_images_file(), test_images.astype(numpy.float32))\n log('wrote %s' % paths.cifar10_test_images_file())\n utils.write_hdf5(paths.cifar10_train_labels_file(), train_labels.reshape(-1, 1).astype(numpy.int))\n log('wrote %s' % paths.cifar10_train_labels_file())\n utils.write_hdf5(paths.cifar10_test_labels_file(), test_labels.reshape(-1, 1).astype(numpy.int))\n log('wrote %s' % paths.cifar10_test_labels_file())",
"def prepare_data(self, *args, **kwargs):\n # get paths to train and test splits\n _split_paths = [os.path.join(self.path_to_data, split)\n for split in os.listdir(self.path_to_data)]\n\n # for each split [train, test]\n for _path in _split_paths:\n _img_classes = os.listdir(_path) # get subfolders representing each class\n self.splits[os.path.basename(_path)] = []\n\n # get the images in pairs with its corresponding class\n for _class in _img_classes:\n _data = self.get_img_text_pair(os.path.join(_path, _class))\n\n if os.path.basename(_path) == 'train':\n self.weights[self.encode_label(_class)] = len(_data)\n self.splits[os.path.basename(_path)].extend(_data)",
"def create_train_file(img_folder_path: str, train_file_path: str) -> None:\n files = []\n for ext in (\"*.gif\", \"*.png\", \"*.jpg\", \"*.bmp\"):\n img_path = glob(join(img_folder_path, ext))\n if img_path:\n files.extend(img_path)\n\n write_to_train_file(files, train_file_path)\n\n print(\"Training files are created in \" + img_folder_path)",
"def _create_layout(root_dir, subsets):\n _create_folder(os.path.join(root_dir, \"images\"))\n _create_folder(os.path.join(root_dir, \"labels\"))\n\n for subset in subsets:\n _create_folder(os.path.join(root_dir, \"images\", subset))\n _create_folder(os.path.join(root_dir, \"labels\", subset))"
] | [
"0.6529704",
"0.6481513",
"0.6435279",
"0.6418067",
"0.63397163",
"0.6227401",
"0.62035155",
"0.61983734",
"0.6165103",
"0.61225355",
"0.612228",
"0.60964",
"0.60848254",
"0.6073827",
"0.60563177",
"0.6047265",
"0.60234",
"0.6019958",
"0.60174394",
"0.6004536",
"0.5983722",
"0.5966851",
"0.59544486",
"0.59262556",
"0.5918337",
"0.591189",
"0.58968794",
"0.58944523",
"0.58807254",
"0.58793026"
] | 0.7347322 | 0 |
Show the popup and return True if accepted, False if canceled. | def popup(self):
return self.exec_() == QDialog.Accepted | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def verify_popup(self, type):",
"def _show_popup(self) -> None:\n\n top = tk.Toplevel()\n email_list_len = len(self.get_recipients())\n msg = tk.messagebox.askquestion('Confirm send emails', 'Are you sure you want to email {} client{}?'\n .format(email_list_len, \"s\" if email_list_len > 1 else \"\"),\n icon='warning')\n if msg == \"yes\":\n self._disable_buttons()\n email_process(self.get_recipients())\n top.destroy()\n else:\n top.destroy()",
"def __window_confirm(self, text):\n return True",
"def show_confirm_dialog(text):\n dialog = QDialog()\n interface = confirmGenerated.Ui_Dialog()\n interface.setupUi(dialog)\n interface.label.setText(text)\n if dialog.exec_() == 1:\n return True\n return False",
"def showOk(parent,message,title=''):\r\n return askStyled(parent,message,title,wx.OK)",
"def IsOk(self):\r\n \r\n return self.window != None",
"def acceptAlert(self):\n self.log_info(f\"Browser.acceptAlert: Accepting alert\")\n alert = self.CORE.switch_to.alert\n alert.accept()\n return",
"def alert_accept(self):\n self._alert_accept_cancel(True)",
"def is_shown(self):\n return self.page.q(css=self.MODAL_SELECTOR).present",
"def on_okButton_clicked(self):\n self.accept=True",
"def askOk(parent,message,title=''):\r\n return askStyled(parent,message,title,wx.OK|wx.CANCEL)",
"def run(self):\n # show the dialog\n self.dlg.show()\n # Run the dialog event loop\n result = self.dlg.exec_()\n\n # See if OK was pressed\n if result:\n # Do something useful here - delete the line containing pass and\n # substitute with your code.\n pass",
"def confirm(self):\n with self.handle_alert(confirm=True):\n self.q(css='button#confirm').first.click()",
"def run(self):\n # show the dialog\n self.dlg.show()\n # Run the dialog event loop\n result = self.dlg.exec_()\n # See if OK was pressed\n if result:\n # Do something useful here - delete the line containing pass and\n # substitute with your code.\n pass",
"def run(self):\n # show the dialog\n self.dlg.show()\n # Run the dialog event loop\n result = self.dlg.exec_()\n # See if OK was pressed\n if result:\n # Do something useful here - delete the line containing pass and\n # substitute with your code.\n pass",
"def run(self):\n # show the dialog\n self.dlg.show()\n # Run the dialog event loop\n result = self.dlg.exec_()\n # See if OK was pressed\n if result:\n # Do something useful here - delete the line containing pass and\n # substitute with your code.\n pass",
"def show_ok_message(self, msg, msecs=3):\n\n message.PopupMessage.success(msg, parent=self, duration=msecs, closable=True)",
"def askStyled(parent,message,title,style):\r\n dialog = wx.MessageDialog(parent,message,title,style)\r\n result = dialog.ShowModal()\r\n dialog.Destroy()\r\n return result in (wx.ID_OK,wx.ID_YES)",
"def _onOk(self):\n\n self.accepted = True\n self.close()",
"def _onOk(self):\n\n self.accepted = True\n self.close()",
"def show_popup(self, popup_type, popup_msg):\n # Setup the MessageBox\n msg = QMessageBox()\n\n # Title the window\n msg.setWindowTitle(f\"{popup_type}\")\n\n # Set text inside the window\n if popup_type == \"Error\":\n msg.setText(f\"Error: {popup_msg}\")\n elif popup_type == \"Success\":\n msg.setText(f\"Success: {popup_msg}\")\n\n # Set the icon\n if popup_type == \"Error\":\n msg.setIcon(QMessageBox.Warning)\n elif popup_type == \"Success\":\n msg.setIcon(QMessageBox.Information)\n\n # Add buttons to the bottom\n msg.setStandardButtons(QMessageBox.Cancel)\n\n x = msg.exec_()",
"def popup():\n msg = messagebox.askyesno('Warning', 'Are you sure you would like to submit?')\n if msg: # if user clicked yes\n save_txt()\n save_db()\n root.destroy()",
"def consent(s, eType, eVal):\n try:\n import maya.cmds as cmds # Is Maya active? Ask using their GUI\n answer = cmds.confirmDialog(t=eType.__name__, m=CONFIRM_MSG, b=(\"Yes\",\"No\"), db=\"Yes\", cb=\"No\", ds=\"No\")\n return \"Yes\" == answer\n except ImportError:\n return True # No means to ask? Ah well ...",
"def dialog(message, timeout=0, buttons=DIALOGBUTTON_OK):\n box = Dialogs(__name__, message, buttons)\n box.timeout = timeout\n return _retcode2bool(box.show())",
"def onAccepted():\n dialog.done(1)",
"def yes_no_cancel_popup(title=None,\n text=None):\n d = gtk.Dialog(title=title,\n parent=None,\n flags=gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,\n buttons=( gtk.STOCK_YES, gtk.RESPONSE_YES,\n gtk.STOCK_NO, gtk.RESPONSE_NO,\n gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL ))\n hb=gtk.HBox()\n hb.show()\n d.vbox.add(hb)\n\n i=gtk.Image()\n i.set_from_stock(gtk.STOCK_DIALOG_QUESTION, gtk.ICON_SIZE_DIALOG)\n i.show()\n hb.pack_start(i, expand=False)\n\n if text is not None:\n l=gtk.Label(text)\n l.show()\n hb.add(l)\n d.connect('key-press-event', dialog_keypressed_cb)\n\n d.show()\n center_on_mouse(d)\n retval=d.run()\n d.destroy()\n return retval",
"def isModal(self) -> bool:\n ...",
"def isModal(self) -> bool:\n ...",
"def isModal(self) -> bool:\n ...",
"def isModal(self) -> bool:\n ..."
] | [
"0.6642623",
"0.6464855",
"0.63802284",
"0.62683916",
"0.6240332",
"0.6218975",
"0.6172441",
"0.61592615",
"0.6059267",
"0.5996099",
"0.5937343",
"0.5933337",
"0.5915153",
"0.5883959",
"0.5883959",
"0.5883959",
"0.58716357",
"0.58673215",
"0.5851311",
"0.5851311",
"0.5847941",
"0.5827843",
"0.5816396",
"0.57947785",
"0.5788484",
"0.5782045",
"0.576139",
"0.576139",
"0.576139",
"0.576139"
] | 0.79470927 | 0 |
Fill the heavy metal unit labels with the selected unit. | def set_hm_unit_display(self):
units = str(self.entries['units'].combobox.currentText())
self.ui.is_unitL1.setText(units)
self.ui.is_unitL2.setText(units)
self.ui.is_unitL3.setText(units)
self.ui.is_unitL4.setText(units) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def unit_label(self, unit_label):\n\n self._unit_label = unit_label",
"def unit_label(self, unit_label):\n\n self._unit_label = unit_label",
"def unit_label(self, unit_label):\n\n self._unit_label = unit_label",
"def update_units(self):\n unit_var_value = self.view.vars['unit'].get()\n if unit_var_value == 'm3ph':\n self.minran_u_label.config(text='m³/h')\n self.maxran_u_label.config(text='m³/h')\n self.points_tview.heading('vflow', text='Przepływ [m³/h]', anchor=tk.CENTER)\n elif unit_var_value == 'lps':\n self.minran_u_label.config(text='l/s')\n self.maxran_u_label.config(text='l/s')\n self.points_tview.heading('vflow', text='Przepływ [l/s]', anchor=tk.CENTER)\n self.view.vars['pump_eff_min'].convert_unit(unit_var_value)\n self.view.vars['pump_eff_max'].convert_unit(unit_var_value)\n self.view.vars['pump_characteristic'].convert_unit(unit_var_value)",
"def assign_unit(self):\n self.units = {}\n for unit in RADIAL_UNITS:\n if unit.REPR == \"2th_deg\":\n self.units[unit] = self.tth_deg\n elif unit.REPR == \"2th_rad\":\n self.units[unit] = self.tth_rad\n elif unit.REPR == \"q_nm^-1\":\n self.units[unit] = self.q_nm\n elif unit.REPR == \"q_A^-1\":\n self.units[unit] = self.q_A\n elif unit.REPR == \"r_mm\":\n self.units[unit] = self.r_mm\n else:\n logger.warning(\"Unit unknown to GUI %s\" % unit)",
"def set_unit(self,unit):\n self.unit = unit",
"def unitUpdate(self):\n newText = self.unitGroup.unitString()\n cursorPos = len(newText) - self.text().length() + self.cursorPosition()\n if cursorPos < 0: # cursor set to same distance from right end\n cursorPos = 0\n self.blockSignals(True)\n self.setText(newText)\n self.setCursorPosition(cursorPos)\n self.blockSignals(False)\n self.emit(QtCore.SIGNAL('unitChanged')) # update numEdit",
"def set_units(self, units):\n self.units = units",
"def setDataUnit(self, dataUnit):\n\t\tself.urmaswin.setDataUnit(dataUnit)",
"def update_units(self):\r\n self.units_index = self.UnitsComboBox.currentIndex()\r\n self.cmd = None\r\n if self.connected:\r\n self.cmd = self.unit_switch.get(self.units_index, None)\r\n self.I_source.write(self.cmd)\r\n self.update_header_string()",
"def set_unit(self, length='cm'):\n if length == 'cm':\n self.DUMMY = 1.0\n elif length == 'mm':\n self.DUMMY = 0.1\n elif length == 'm':\n self.DUMMY = 0.0",
"def UpdateLabel(self) -> _n_6_t_0:",
"def FillSquad(self):\n unitName = \"\"\n if isinstance(self.squad, squad.Squad):\n unitName = list(self.squad.additional_units.keys())[0]\n while self.squad.current_size < self.squad.max_size:\n self.squad.addUnit(unitName)\n self.addButton\n self.exportButton\n self.pointLabel['text'] = self.squad.point_cost\n self.sizeLabel['text'] = self.squad.current_size\n r=6\n\n if isinstance(self.squad, squad.Squad):\n for u in self.squad.units:\n Label(self.__mainWindow, text=u.name, font=__item_format__).grid(row=r, column=0)\n Label(self.__mainWindow, text=u.weapon_skill.__str__(), font=__item_format__).grid(row=r, column=1)\n Label(self.__mainWindow, text=u.ballistics_skill.__str__(), font=__item_format__).grid(row=r, column=2)\n Label(self.__mainWindow, text=u.strength.__str__(), font=__item_format__).grid(row=r, column=3)\n Label(self.__mainWindow, text=u.toughness.__str__(), font=__item_format__).grid(row=r, column=4)\n Label(self.__mainWindow, text=u.wounds.__str__(), font=__item_format__).grid(row=r, column=5)\n Label(self.__mainWindow, text=u.initiative, font=__item_format__).grid(row=r, column=6)\n Label(self.__mainWindow, text=u.melee_attacks.__str__(), font=__item_format__).grid(row=r, column=7)\n Label(self.__mainWindow, text=u.leadership.__str__(), font=__item_format__).grid(row=r, column=8)\n Label(self.__mainWindow, text=u.armor_save.__str__(), font=__item_format__).grid(row=r, column=9)\n Label(self.__mainWindow, text=u.invuln_save.__str__(), font=__item_format__).grid(row=r, column=10)\n r += 1\n\n else:\n for i in range(self.squad.current_size):\n Label(self.__mainWindow, text=self.squad.squad_name, font=__item_format__).grid(row=r, column=0)\n Label(self.__mainWindow, text=self.squad.ballistics_skill.__str__(), font=__item_format__).grid(row=r, column=1)\n Label(self.__mainWindow, text=self.squad.front_armor.__str__(), font=__item_format__).grid(row=r, column=2)\n Label(self.__mainWindow, text=self.squad.side_armor.__str__(), font=__item_format__).grid(row=r, column=3)\n Label(self.__mainWindow, text=self.squad.rear_armor.__str__(), font=__item_format__).grid(row=r, column=4)\n Label(self.__mainWindow, text=self.squad.hull_points, font=__item_format__).grid(row=r, column=5)\n r += 1\n \n self.addButton['state']='normal'\n if self.squad.current_size == self.squad.max_size:\n self.addButton['state']='disabled'\n if isinstance(self.squad, squad.Squad):\n self.wepSpin.grid(row=r, column=1, columnspan=4)\n self.weaponAdd.grid(row=r, column=5)\n r += 1",
"def AddUnit(self):\n unitName = \"\"\n if isinstance(self.squad, squad.Squad):\n unitName = list(self.squad.additional_units.keys())[0]\n self.squad.addUnit(unitName)\n self.addButton\n self.exportButton\n self.pointLabel['text'] = self.squad.point_cost\n self.sizeLabel['text'] = self.squad.current_size\n r=6\n if isinstance(self.squad, squad.Squad):\n for u in self.squad.units:\n Label(self.__mainWindow, text=u.name, font=__item_format__).grid(row=r, column=0)\n Label(self.__mainWindow, text=u.weapon_skill.__str__(), font=__item_format__).grid(row=r, column=1)\n Label(self.__mainWindow, text=u.ballistics_skill.__str__(), font=__item_format__).grid(row=r, column=2)\n Label(self.__mainWindow, text=u.strength.__str__(), font=__item_format__).grid(row=r, column=3)\n Label(self.__mainWindow, text=u.toughness.__str__(), font=__item_format__).grid(row=r, column=4)\n Label(self.__mainWindow, text=u.wounds.__str__(), font=__item_format__).grid(row=r, column=5)\n Label(self.__mainWindow, text=u.initiative, font=__item_format__).grid(row=r, column=6)\n Label(self.__mainWindow, text=u.melee_attacks.__str__(), font=__item_format__).grid(row=r, column=7)\n Label(self.__mainWindow, text=u.leadership.__str__(), font=__item_format__).grid(row=r, column=8)\n Label(self.__mainWindow, text=u.armor_save.__str__(), font=__item_format__).grid(row=r, column=9)\n Label(self.__mainWindow, text=u.invuln_save.__str__(), font=__item_format__).grid(row=r, column=10)\n r += 1\n\n else:\n for i in range(self.squad.current_size):\n Label(self.__mainWindow, text=self.squad.squad_name, font=__item_format__).grid(row=r, column=0)\n Label(self.__mainWindow, text=self.squad.ballistics_skill.__str__(), font=__item_format__).grid(row=r, column=1)\n Label(self.__mainWindow, text=self.squad.front_armor.__str__(), font=__item_format__).grid(row=r, column=2)\n Label(self.__mainWindow, text=self.squad.side_armor.__str__(), font=__item_format__).grid(row=r, column=3)\n Label(self.__mainWindow, text=self.squad.rear_armor.__str__(), font=__item_format__).grid(row=r, column=4)\n Label(self.__mainWindow, text=self.squad.hull_points, font=__item_format__).grid(row=r, column=5)\n r += 1\n \n self.addButton['state']='normal'\n if self.squad.current_size == self.squad.max_size:\n self.addButton['state']='disabled'\n if isinstance(self.squad, squad.Squad):\n self.wepSpin.grid(row=r, column=1, columnspan=4)\n self.weaponAdd.grid(row=r, column=5)\n r += 1",
"def updateCurrentUnit(self):\n self.unitGroup.updateCurrentUnit(unicode(self.text()),\n self.cursorPosition())\n self.emit(QtCore.SIGNAL('currentChanged')) # update listView",
"def load_unit(self, unit_id):",
"def unit(self,unit_str,unit_scale):\n self.units[unit_str] = unit_scale\n return self",
"def _units_chosen(self):\r\n sender = self.sender()\r\n\r\n # get current state/values\r\n mode = self._mode\r\n height = self.height_field.value()\r\n weight = self.weight_field.value()\r\n\r\n # update widgets\r\n if sender == self.imperial_button:\r\n self.height_units_label.setText('in')\r\n self.weight_units_label.setText('lb')\r\n self._mode = 'imperial'\r\n else:\r\n self.height_units_label.setText('cm')\r\n self.weight_units_label.setText('kg')\r\n self._mode = 'metric'\r\n\r\n # convert values\r\n if mode == 'metric' and self._mode == 'imperial':\r\n self._height = height / 2.54\r\n self._weight = weight / 0.454\r\n elif mode == 'imperial' and self._mode == 'metric':\r\n self._height = height * 2.54\r\n self._weight = weight * 0.454\r\n\r\n # update data widgets\r\n self.height_field.setValue(self._height)\r\n self.weight_field.setValue(self._weight)",
"def __init__(self, lunit=\"nm\"):\n super().__init__(lunit)",
"def reset_units(shared, *args):\n shared.config.remove_section('units')\n shared.config.add_section('units')\n \n return",
"def units(self, units):\n\n self._units = units",
"def units(self, units):\n\n self._units = units",
"def units(self, units):\n\n self._units = units",
"def set_labels(self):\n\n if 1 <= self.selected_data <= 2:\n self.plot_select.setLabel(\"left\", \"P (kPa)\")\n self.plot_select.setLabel(\"bottom\", \"t\", \"s\")\n self.plot_zoom.setLabel(\"left\", \"P (kPa)\")\n self.plot_zoom.setLabel(\"bottom\", \"t\", \"s\")\n\n elif self.selected_data == 3:\n self.plot_select.setLabel(\"left\", \"ext\", \"\")\n self.plot_select.setLabel(\"bottom\", \"t\", \"s\")\n self.plot_zoom.setLabel(\"left\", \"ext\", \"\")\n self.plot_zoom.setLabel(\"bottom\", \"t\", \"s\")\n\n elif self.selected_data == 4:\n self.plot_select.setLabel(\"left\", \"U\", \"V\")\n self.plot_select.setLabel(\"bottom\", \"t\", \"s\")\n self.plot_zoom.setLabel(\"left\", \"U\", \"V\")\n self.plot_zoom.setLabel(\"bottom\", \"t\", \"s\")\n\n # self.plot_simulate.setLabel(\"left\", \"ext\", \"\")\n # self.plot_simulate.setLabel(\"bottom\", \"t\", \"s\")\n\n self.plot_distribution.setLabel(\"left\", \"N ×10¹⁰ (#/m³)\")\n self.plot_distribution.setLabel(\"bottom\", \"d_p\", \"m\")\n self.plot_distribution.showGrid(y=True)\n\n self.plot_rotatometer.setLabel(\"left\", \"N ×10¹⁰ (#/m³)\")\n self.plot_rotatometer.setLabel(\"bottom\", \"laimennusvirtaus\")\n self.plot_rotatometer.showGrid(y=True)",
"def set_default_unit(unit):\n self.default_unit = unit",
"def useUnits():",
"def set_W0_unit(self, value):\n if self.lf_W0.text() != \"\":\n self.set_W0() # Update for deg if needed and call comp_output\n # Notify the machine GUI that the machine has changed\n self.saveNeeded.emit()",
"def setDistanceUnits(self, units: Unit) -> None:\n self.units = ...",
"def autolabel(rects):",
"def addLabels(t):\n if not t.label:\n t.label = \"\".join([choice(\"abcdefghijklmnopqrstuvwxyz\") for i in range(4)])\n for r,w in t.children:\n addLabels(r)"
] | [
"0.65493584",
"0.65493584",
"0.65493584",
"0.61209",
"0.5876205",
"0.5848301",
"0.5719211",
"0.56192005",
"0.56159955",
"0.5609573",
"0.55953413",
"0.55090725",
"0.5500092",
"0.54777503",
"0.54361576",
"0.5392763",
"0.5386514",
"0.53529835",
"0.5319486",
"0.53029513",
"0.5297845",
"0.5297845",
"0.5297845",
"0.52888274",
"0.52446663",
"0.52168554",
"0.5202094",
"0.5187809",
"0.51671207",
"0.5160897"
] | 0.6863121 | 0 |
Check if a task exists on the server | def exists(self, server):
try:
server.get(
'task',
replacements={
'slug': self.__challenge__.slug,
'identifier': self.identifier})
except Exception:
return False
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __check_if_task_exists(self, server_id):\n if server_id in self.__migrating_tasks.keys():\n return True\n return False",
"def exists_task(self, task):\n assert task, \"Must input a valid task name.\"\n return any(self.get_by_task(task))",
"def isTasksExists(request):\n task_status = {}\n task_result = 0\n flag = None\n for task in request.data['tasks']:\n task_obj = Tafv2Task.objects.filter(script=task)\n if task_obj:\n task_status[task] = \"Task Exists.\"\n else:\n task_result += 1\n task_status[task] = \"Task doesn't Exists.\"\n if task_result > 0:\n flag = False\n else:\n flag = True\n\n return {'taskResult': flag, 'taskStatus': task_status}",
"def is_registered(task_name):\n if tasks.find({'name': task_name}).count() > 0:\n return True\n else:\n return False",
"def exists(self, task_identifier: str, timeout: int) -> bool:\n raise NotImplementedError",
"def exists(taskname):\n with open(todofile, 'r') as todo:\n tasks = todo.readlines()\n for task in tasks:\n try:\n task = json.loads(task)\n if taskname == task['name']:\n return True\n except json.decoder.JSONDecodeError:\n return False\n return False",
"def is_task(self, task_id, tasks):\r\n for t in tasks:\r\n if t.id == task_id:\r\n return True\r\n return False",
"def exists(self, task, name=None):\n assert task, \"Must input a valid task name.\"\n if name is not None:\n return self._is_task_in_dataset(name, task)\n else:\n return self._is_task_in_any_dataset(task)",
"def status_check(task_id):\n logger.info(f\"Checking task status for {task_id}\")\n task = Task.objects.get(kf_id=task_id)\n task.status_check()",
"def check_task(request, tid):\n try:\n slogger.glob.info(\"check task #{}\".format(tid))\n response = task.check(tid)\n except Exception as e:\n slogger.glob.error(\"cannot check task #{}\".format(tid), exc_info=True)\n return HttpResponseBadRequest(str(e))\n\n return JsonResponse(response)",
"def __contains__(self, task):\n return task in self._tasks",
"def is_task_stagnant(task):",
"def _CheckIfHuntTaskWasAssigned(self, client_id, hunt_id):\n for _ in aff4.FACTORY.Stat(\n [client_id.Add(\"flows/%s:hunt\" %\n rdfvalue.RDFURN(hunt_id).Basename())],\n token=self.token):\n return True\n\n return False",
"def check_repeated_task(self, task):\n task_status = task in self.tasks_asked\n\n # append if never asked\n if task_status == False:\n self.tasks_asked.append(task)\n\n return task_status",
"def is_task_in_schedule(self, tid: str) -> bool:\n return tid in self.__tasks",
"def __contains__(self, name):\n return name in self._tasks",
"def test_update_task_exists(self):\n task_id = util.MOCK_UUID_4\n\n rv = TEST_CLIENT.patch(\n f\"/tasks/{task_id}\",\n json={\n \"name\": \"task-5\",\n },\n )\n result = rv.json()\n expected = {\n \"message\": \"a task with that name already exists\",\n \"code\": \"TaskNameExists\",\n }\n self.assertDictEqual(expected, result)\n self.assertEqual(rv.status_code, 400)",
"def check_task_id(id):\n\n\t# Open connection and execute SQL to get a task\n\ttry:\n\t\tdb, cursor = connect()\n\t\t\n\t\tcursor.execute(\"\"\"SELECT id FROM tasks \n\t\t\t\t\t\tWHERE id=%s\"\"\" % id)\n\n\t\ttask = cursor.fetchone()\n\n\t# Get error messages\n\texcept catch_error(), e:\n\t\tprint \"Error %d: %s\" % (e.args[0],e.args[1])\n\n\t# Close connection\n\tfinally:\n\t\tif db:\n\t\t\tdb.close()\n\n\treturn task",
"def need_update(self, task: Union[Task, Path]) -> bool:\n if isinstance(task, Path):\n return not task.exists()\n if task.name not in self._database:\n return True\n task_time = self._database.get(task.name)\n return task.need_rerun(task_time)",
"def is_task_taken(new_task, tasks):\n task_ids = [t.task_data.get('task_id') for t in tasks]\n new_task_id = new_task.get('task_id')\n if new_task_id is None:\n return False\n taken = new_task_id in task_ids\n if taken:\n logger.info('Task {} is already taken'.format(new_task_id))\n return taken",
"def check(request,task_name):\n try:\n todo = getattr(tasks,task_name,None)\n except KeyError:\n return JsonResponse(\n {'error':'This {} is not a known task'.format(taskname)})\n \n parameters = todo().settings.get.keys()\n \n try:\n kwargs = {par:request.GET[par] for par in parameters}\n except KeyError:\n return JsonResponse(\n {'error':'Missing parameter: please provide {}'.format(parameters)})\n\n action = todo(**kwargs)\n if not action.settings.valid:\n return JsonResponse(\n {'error':'Invalid settings: {}'.format(action.settings.errors)})\n\n \n action.set_result()\n \n a = action.result\n add_to_project(action.result.hash,project)\n\n return JsonResponse(action.description)",
"def is_task_done(self, task_name):\n logging.info(f\"Checking if '{task_name}' is done\")\n if task_name in self._container:\n res = self._container[task_name].running\n logging.info(f\"Result: {res}\")\n return res\n logging.info(f\"Could not find task: {task_name}\")\n raise TaskNotFoundException(f\"Could not find task: {task_name}\")",
"def _check_task(self, task: Task) -> bool:\n try:\n extents = list(fiemap(task.path, sync=task.frequency > 1))\n except OSError:\n self.logger.error('Error#%d %s', task.id, task.path, exc_info=True)\n return False\n\n if not extents:\n return False\n\n planner = Planner(self.planner_params, extents)\n clusters = planner.result()\n\n if not clusters:\n return False\n\n task.extents = extents\n task.clusters = clusters\n\n return True",
"def _get_task(self, task_id):\n if not task_id:\n return None\n task = objects.Transaction.get_by_uid(task_id, fail_if_not_found=False)\n if task and task.cluster_id == self.cluster.id:\n return task\n return False",
"def exists(self, task_identifier: str, timeout: int) -> bool:\n session = self.result_session()\n with self.session_cleanup(session):\n lock = session.query(Lock)\\\n .filter(Lock.task_identifier == task_identifier).first() # pylint: disable=no-member\n if not lock:\n return False\n difference = datetime.utcnow() - lock.created\n if difference < timedelta(seconds=timeout):\n return True\n\n return False",
"def is_event_service_task(jeditaskid):\n eventservice = False\n\n query = {'jeditaskid': jeditaskid}\n task = list(JediTasks.objects.filter(**query).values('eventservice'))\n if len(task) > 0 and 'eventservice' in task[0] and task[0]['eventservice'] is not None and task[0]['eventservice'] == 1:\n eventservice = True\n\n return eventservice",
"def exists(path):\n r = requests.head(path)\n # print(r.status_code)\n return r.status_code == requests.codes.ok",
"def _is_python_task(task, pidstr):\n if str(task.pid) != pidstr:\n return False\n else:\n return True",
"def task_is_failure(task):\n\n if task and task.state == 'FAILURE':\n return True\n return False",
"def _check_host_existence(self, hostname: str) -> bool:\n with self.lock:\n hosts = self.hosts.all()\n for host in hosts:\n if host['hostname'] == hostname:\n return True\n return False"
] | [
"0.77683973",
"0.7662436",
"0.74845606",
"0.73977894",
"0.7334475",
"0.68549156",
"0.68336856",
"0.6772",
"0.67269367",
"0.6711562",
"0.67005736",
"0.6666161",
"0.6534565",
"0.64875007",
"0.6391127",
"0.6377171",
"0.6374866",
"0.63482434",
"0.63294256",
"0.6309257",
"0.6295972",
"0.6288763",
"0.62789893",
"0.6259014",
"0.62515867",
"0.6174665",
"0.6157906",
"0.61175084",
"0.6103796",
"0.60824"
] | 0.8097337 | 0 |
Find the pooled sample variance for two samples. | def pooled_sample_variance(sample1, sample2):
deg_freedom = len(sample1) + len(sample2) - 2
mean1 = statistics.mean(sample1)
squares1 = ((x - mean1) ** 2 for x in sample1)
mean2 = statistics.mean(sample2)
squares2 = ((x - mean2) ** 2 for x in sample2)
return (math.fsum(squares1) + math.fsum(squares2)) / float(deg_freedom) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def variance(self):\r\n\t\t_mean = sum(self.sample)/len(self.sample)\r\n\t\treturn sum(map(lambda x: (x - _mean)**2, self.sample))/(len(self.sample) - 1)",
"def variance( values, sample=False ):\n mean_val = mean_value( values )\n n_val = len( values ) -1 if sample else len( values )\n return sum( [ j**2 for j in [ i - mean_val for i in values ] ] ) / n_val",
"def calc_variances(ds):\n if ds.size <= 1:\n print 'Fail: not enough items for calculation %d' % ds.size\n return 0,1\n obs_var = ((ds.storage - ds.storage.sum()/ds.size)**2).sum()/(ds.size-1)\n rep_var = ds.var.sum()/ds.size\n return obs_var,rep_var",
"def variance(self, sample=True):\n distance_squared = list(map(lambda x: (x - sum(self.data)/self.size)**2, self.data))\n\n if sample == True:\n variance = sum(distance_squared)/(self.size - 1)\n if sample == False: \n variance = sum(distance_squared)/(self.size)\n return variance",
"def std_meandiff_pooledvar(self):\n # this uses ``_var`` to use ddof=0 for formula\n\n d1 = self.d1\n d2 = self.d2\n # could make var_pooled into attribute\n var_pooled = (\n (d1.sumsquares + d2.sumsquares)\n /\n # (d1.nobs - d1.ddof + d2.nobs - d2.ddof))\n (d1.nobs - 1 + d2.nobs - 1)\n )\n return np.sqrt(var_pooled * (1.0 / d1.nobs + 1.0 / d2.nobs))",
"def _variance(mean_variance, samples):\n mean = mean_variance[0] / samples\n variance = mean_variance[1]\n variance /= samples\n variance -= mean * mean\n return variance",
"def test_two_pop_unknown_var_ind(data1_: tuple, data2_: tuple):\n x_bar = cls.get_mean(data1_)\n y_bar = cls.get_mean(data2_)\n var_pool = cls.get_var_pool(data1_, data2_)\n n_x = cls.get_n(data1_)\n n_y = cls.get_n(data2_)\n return (x_bar - y_bar) / sqrt(var_pool / n_x + var_pool / n_y)",
"def compute_variance(\n self,\n parameters: NDArray,\n resids: NDArray,\n sigma2: NDArray,\n backcast: Union[float, NDArray],\n var_bounds: NDArray,\n ) -> NDArray:",
"def test_variance(self):\n self.assertEqual(variance(list1, sample=False), np.var(list1))\n self.assertEqual(variance(list1), np.var(list1, ddof=1))",
"def variance(self):\n observations_raw = input(\"Observations: \").split()\n observations = [int(elem) for elem in observations_raw]\n observations_squared = sum([num**2 for num in observations])\n aggregate_squared = sum(observations)**2\n n = len(observations)\n mean = sum(observations)/n\n variance = (observations_squared - (aggregate_squared/n))/(n-1)\n print(f\"Variance is: {variance}\")\n return variance, mean",
"def variance(self):\n return 1 / self.count() * sum((number-self.average())**2 for number in self.numbers)",
"def variance(L, is_sample=0):\n\tm = mean(L)\n\treturn sum((x-m)**2 for x in L) / (len(L) - is_sample)",
"def get_var_pool(cls, data1: tuple, data2: tuple) -> float:\n cls._data_validation(data1)\n cls._data_validation(data2)\n n1 = cls.get_n(data1)\n var1 = cls.get_var(data1)\n n2 = cls.get_n(data2)\n var2 = cls.get_var(data2)\n return ((n1 - 1) * var1 + (n2 - 1) * var2) / (n1 + n2 - 2)",
"def variance(self, mean=None):\n raise NotImplementedError",
"def test_two_pop_known_var_ind(data1_: tuple, data2_: tuple):\n x_bar = cls.get_mean(data1_)\n y_bar = cls.get_mean(data2_)\n var_x = cls.get_var(data1_, is_population=True)\n var_y = cls.get_var(data2_, is_population=True)\n n_x = cls.get_n(data1_)\n n_y = cls.get_n(data2_)\n return (x_bar - y_bar) / sqrt(var_x / n_x + var_y / n_y)",
"def sample_variance(self, x_dict={}):\n raise NotImplementedError()",
"def variance(dataset):\n avg = sum(dataset)/len(dataset)\n v = 0.0\n for data in dataset:\n v += (data - avg) * (data - avg)\n v = v / len(dataset)\n return v",
"def _variance(self, features):\n return np.mean(np.var(features.reshape((features.shape[0], -1)), axis=1))",
"def variance_scorer(x, y):\n scores = [np.var(column) for column in x.T]\n return scores, np.array([np.NaN]*len(scores))",
"def sampleVariance(numlist):\n\tsum1 = sum2 = 0.0\n\tn = 0.0\n\tfor x in numlist:\n\t\tassert isinstance(x, int) or isinstance(x, float)\n\t\tsum1 += x\n\t\tsum2 += x * x\n\t\tn += 1.0\n\tif n < 2.0:\n\t\treturn 0.0\n\tvar = (1.0/(n+1.0))*(sum2 - (1/n)*sum1*sum1)\n\tif var < 0.0: # Due to numerical problems only!\n\t\tvar = 0.0\n\treturn var",
"def variance(data):\n differences = data - np.mean(data)\n diff_sq = differences ** 2\n variance = np.mean(diff_sq)\n\n return variance",
"def variance(data):\n differences = data - np.mean(data)\n diff_sq = differences ** 2\n variance = np.mean(diff_sq)\n\n return variance",
"def variance(values, weights=None, axis=0):\n \n average = np.average(values, weights=weights, axis=axis)\n variance = np.average((values-average)**2, weights=weights, axis=axis)\n return variance",
"def variance(data, xbar=None):\n if iter(data) is data:\n data = list(data)\n data_len = len(data)\n if data_len < 2:\n raise StatisticsError('variance requires at least two data points')\n return _ss(data, xbar) / (data_len - 1)",
"def _compute_covariance(self, lc1, lc2):\n return np.cov(lc1.counts, lc2.counts)[0][1]",
"def explained_variance_score(y_true, y_pred, *, sample_weight=..., multioutput=...):\n ...",
"def variance(self):\n sum_sqdif = 0 # initialize sum of squared differences\n # Calculate sum of squared differences\n for site in self.sites:\n sqdif = (site.siteZmArea - self.meanZmArea()) ** 2\n sum_sqdif = sqdif + sum_sqdif \n # Standard Deviation\n stddev = ((1 / ( float(self.ni) - 1 )) * sum_sqdif ) ** 0.5\n # Variance\n var = stddev ** 2\n return var",
"def _variance(self,gp):\r\n return self.variance",
"def get_population_variance(self):\n\t\treturn self.variables.get('population_variance')",
"def variance(numbers, mean):\n variance = 0 # We will add to this value in a loop\n N = len(numbers)\n \n for i in numbers:\n\n # Operations follow typical BEDMAS\n variance += ((i - mean) * (i - mean))/N\n \n return variance"
] | [
"0.69535667",
"0.68496686",
"0.67732096",
"0.6738547",
"0.67180645",
"0.65582496",
"0.6516183",
"0.64949",
"0.6486372",
"0.6415831",
"0.6393349",
"0.639173",
"0.63470465",
"0.63140863",
"0.6311792",
"0.6310086",
"0.6282926",
"0.62156796",
"0.62076354",
"0.61959153",
"0.60444593",
"0.60444593",
"0.60425395",
"0.60338295",
"0.6019867",
"0.5990985",
"0.5990547",
"0.59890926",
"0.5965683",
"0.5951495"
] | 0.80888563 | 0 |
Calculate a ttest score for the difference between two samples. | def tscore(sample1, sample2):
if len(sample1) != len(sample2):
raise ValueError("different number of values")
error = pooled_sample_variance(sample1, sample2) / len(sample1)
diff = statistics.mean(sample1) - statistics.mean(sample2)
return diff / math.sqrt(error * 2) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def ttest(array1, array2):\n diff = np.mean(array1) - np.mean(array2)\n if diff < c.cart_p60:\n return c.low_score\n if array1.size <= 1 or array2.size <= 1:\n return min(diff, c.single_item_cart_max)\n return 1 - ttest_ind(array1, array2, equal_var=False).pvalue\n # return diff",
"def _t_test(_sample_a, _sample_b):\n res = stats.ttest_ind(_sample_a, _sample_b, axis=0, equal_var=equal_var, nan_policy='propagate')\n print('Independent t-test\\nt-statistic: {}\\np-value: {}'.format(res[0], res[1]))\n print('-' * 10)",
"def calculate_t_test(mean1, mean2, var1, var2, n1, n2, alpha):\n # Two Sample T Test (M0 == M1) (Two Tails)\n t = (mean1 - mean2) / sqrt((var1 / n1) + (var2 / n2)) # t statistic calculation for two sample\n df = n1 + n2 - 2 # degree of freedom for two sample t - set\n pval = 1 - stats.t.sf(np.abs(t), df) * 2 # two-sided pvalue = Prob(abs(t)>tt) # p - value\n cv = stats.t.ppf(1 - (alpha / 2), df)\n standart_error = cv * sqrt((var1 / n1) + (var2 / n2))\n confidence_intervals = [abs(mean1 - mean2) - standart_error, abs(mean1 - mean2) + standart_error, standart_error]\n acception = 'HO REJECTED!' if pval < (alpha / 2) else 'HO ACCEPTED!' # left tail\n acception = 'HO REJECTED!' if pval > 1 - (alpha / 2) else 'HO ACCEPTED!' # right tail\n return pval, confidence_intervals, acception",
"def eeg_twosample_ttest(array1,array2):\t\n\tfrom scipy.stats import ttest_rel\n\ts1 = array1.shape\n\tp = np.zeros(s1[1])\n\tt = np.zeros(s1[1])\n\tfor i in range(s1[1]):\n\t\ttval,pval = ttest_rel(array1[:,i],array2[:,i])\n\t\tp[i]=pval\n\t\tt[i]=tval\n\t\t\n\treturn t,p",
"def TestStatistic(self, data):\n group1, group2 = data\n test_stat = abs(group1.mean() - group2.mean())\n return test_stat",
"def ttest_ind_corrected(performance_a, performance_b, k=10, r=10):\n df = k * r - 1\n\n x = performance_a - performance_b\n m = np.mean(x)\n\n sigma_2 = np.var(x, ddof=1)\n denom = np.sqrt((1 / k * r + 1 / (k - 1)) * sigma_2)\n\n with np.errstate(divide='ignore', invalid='ignore'):\n t = np.divide(m, denom)\n\n prob = stats.t.sf(np.abs(t), df) * 2\n\n return t, prob",
"def t_test1(data1,data2):\n if not isinstance(data1,np.ndarray):\n\tdata1 = np.array(data1)\n if not isinstance(data2,np.ndarray):\n\tdata2 = np.array(data2)\n\n N1, N2 = len(data1), len(data2)\n mean1, mean2 = np.mean(data1), np.mean(data2)\n # Eq. 14.2.1\n sD = np.sqrt( (np.sum( (data1 - np.ones(N1) * mean1) ** 2.) + np.sum( (data2 - np.ones(N2) * mean2) ** 2.)) / (N1 + N2 - 2.) * (1./N1 + 1./N2))\n T = (mean1 - mean2) / sD\n return t.cdf(T, N1 + N2 - 2),T,N1 + N2 - 2",
"def t_test2(data1,data2):\n N1, N2 = len(data1), len(data2)\n mean1, mean2 = np.mean(data1), np.mean(data2)\n var1, var2= np.var(data1,ddof = 1), np.var(data2,ddof = 1)\n\n T = (mean1 - mean2) / np.sqrt(var1/N1 + var2/N2)\t# Eq. 14.2.3\n df = (var1/N1 + var2/N2)**2. / ( (var1/N1)**2./(N1 - 1) + (var2/N2)**2./(N2 - 1))\n return t.cdf(T, df), T, df",
"def t_tests(self):\n se = self.se()\n t = self._coef / se\n p = 2 * stats.distributions.t.sf(np.abs(t), self._rdf)\n return (t, p)",
"def ttest_review(sample_1, sample_2, alpha=.05):\n\n result = stats.ttest_ind(sample_1, sample_2)\n crit_val, p_val = result\n \n ## Creating interpretation based on p-value results.\n\n if p_val < .05:\n print(f'The feature is statistically significant with a p-value of {p_val}.')\n\n else:\n print(f'The feature is not statistically significant with a p-value of {p_val}.')\n \n return p_val",
"def run_welchs_ttest(stat1, stat2, alpha, faster):\n m1 = stat1[MEAN]\n m2 = stat2[MEAN]\n\n s1 = stat1[STDDEV]\n s2 = stat2[STDDEV]\n\n n1 = stat1[ROUNDS]\n n2 = stat2[ROUNDS]\n\n df1 = n1 - 1 # degree of freedom of stat1\n df2 = n2 - 1 # degree of freedom of stat2\n\n sample_v1 = s1**2 / n1 # biased estimated sample variance of stat1\n sample_v2 = s2**2 / n2 # biased estimated sample variance of stat2\n\n biased_variance = np.sqrt(sample_v1 + sample_v2)\n # degree of freedom\n df = (sample_v1 + sample_v2) ** 2 / (\n sample_v1**2 / (df1) + sample_v2**2 / (df2)\n )\n\n mean_delta = m1 - m2\n t_stat = mean_delta / biased_variance\n\n if faster:\n # Null hypothesis is stat1 >= stat2.\n # Alternative hypothesis is stat1 < stat2.\n p_value = t.cdf(t_stat, df)\n\n # Compute one sided confidence interval (-inf, x)\n upper_bound = mean_delta + t.ppf(1.0 - alpha, df) * biased_variance\n upper_bound = format(upper_bound, \".5f\")\n lower_bound = \"-inf\"\n else:\n # Null hypothesis is stat1 <= stat2.\n # Alternative hypothesis is stat1 > stat2.\n p_value = 1.0 - t.cdf(t_stat, df)\n\n # Compute one sided confidence interval (x, inf)\n upper_bound = \"inf\"\n lower_bound = mean_delta + t.ppf(alpha, df) * biased_variance\n lower_bound = format(lower_bound, \".5f\")\n\n return TTestResult(\n p_value=p_value,\n t_stat=t_stat,\n lower_bound=lower_bound,\n upper_bound=upper_bound,\n mean_delta=format(mean_delta, \".5f\"),\n )",
"def lttest_rel (a,b,printit=0,name1='Sample1',name2='Sample2',writemode='a'):\r\n if len(a)<>len(b):\r\n raise ValueError, 'Unequal length lists in ttest_rel.'\r\n x1 = mean(a)\r\n x2 = mean(b)\r\n v1 = var(a)\r\n v2 = var(b)\r\n n = len(a)\r\n cov = 0\r\n for i in range(len(a)):\r\n cov = cov + (a[i]-x1) * (b[i]-x2)\r\n df = n-1\r\n cov = cov / float(df)\r\n sd = math.sqrt((v1+v2 - 2.0*cov)/float(n))\r\n t = (x1-x2)/sd\r\n prob = betai(0.5*df,0.5,df/(df+t*t))\r\n\r\n if printit <> 0:\r\n statname = 'Related samples T-test.'\r\n outputpairedstats(printit,writemode,\r\n name1,n,x1,v1,min(a),max(a),\r\n name2,n,x2,v2,min(b),max(b),\r\n statname,t,prob)\r\n return t, prob",
"def test_model_outcome(predicted, actual, planned):\n if not isinstance(predicted, pd.DataFrame):\n predicted = pd.DataFrame(predicted, columns=[\"PREDICTED_TRIP_DURATION\"])\n if not isinstance(actual, pd.DataFrame):\n actual = pd.DataFrame(actual, columns=[\"ACTUAL_TRIP_DURATION\"])\n if not isinstance(planned, pd.DataFrame):\n planned = pd.DataFrame(planned, columns=[\"PLANNED_TRIP_DURATION\"])\n # Initialise the combined dataframe\n combined = pd.concat([predicted, actual, planned], axis=1)\n # Calculate the actual delay\n actual_delay = combined[\"PLANNED_TRIP_DURATION\"] - combined[\"ACTUAL_TRIP_DURATION\"]\n # Calculate the predicted delay\n predicted_delay = combined[\"PLANNED_TRIP_DURATION\"] - combined[\"PREDICTED_TRIP_DURATION\"]\n # Calculate the difference in delay\n delay_diff = actual_delay - predicted_delay\n # Combine the delays into a single dataframe\n combined_delay = pd.concat([pd.DataFrame(actual_delay, columns=['Actual_Delay']),\n pd.DataFrame(predicted_delay, columns=['Predicted_Delay']),\n pd.DataFrame(delay_diff, columns=['Difference_In_Delay'])], axis=1)\n # Obtain the index of the max and min values of the actual, predicted and difference delays\n actual_max_index = combined_delay[\"Actual_Delay\"].argmax()\n actual_min_index = combined_delay[\"Actual_Delay\"].argmin()\n predicted_max_index = combined_delay[\"Predicted_Delay\"].argmax()\n predicted_min_index = combined_delay[\"Predicted_Delay\"].argmin()\n delay_diff_max_index = combined_delay[\"Difference_In_Delay\"].argmax()\n delay_diff_min_index = combined_delay[\"Difference_In_Delay\"].argmin()\n # Get the Mean Absolute Error\n MAE = metrics.mean_absolute_error(combined[\"ACTUAL_TRIP_DURATION\"], combined[\"PREDICTED_TRIP_DURATION\"])\n # Get the R2 Score\n R2 = metrics.r2_score(combined[\"ACTUAL_TRIP_DURATION\"], combined[\"PREDICTED_TRIP_DURATION\"])\n # Get the Root Mean Squared Error\n RMSE = metrics.mean_squared_error(combined[\"ACTUAL_TRIP_DURATION\"], combined[\"PREDICTED_TRIP_DURATION\"],\n squared=False)\n # Get the Median Absolute Error\n MEDAE = metrics.median_absolute_error(combined[\"ACTUAL_TRIP_DURATION\"], combined[\"PREDICTED_TRIP_DURATION\"])\n # Get the Mean Squared Error Log Value\n MSLE = metrics.mean_squared_log_error(combined[\"ACTUAL_TRIP_DURATION\"], combined[\"PREDICTED_TRIP_DURATION\"])\n # Build Dictionary\n pass_val = {\"combined\": combined,\n \"combined_delay\": combined_delay,\n \"actual_max_index\": actual_max_index,\n \"actual_min_index\": actual_min_index,\n \"predicted_max_index\": predicted_max_index,\n \"predicted_min_index\": predicted_min_index,\n \"delay_diff_max_index\": delay_diff_max_index,\n \"delay_diff_min_index\": delay_diff_min_index,\n \"MAE\": MAE,\n \"R2\": R2,\n \"MEDAE\": MEDAE,\n \"RMSE\": RMSE,\n \"MSLE\": MSLE}\n # Return Dictionary\n return pass_val",
"def score(self, X_test, y_test):\n correct = []\n for one in X_test:\n correct.append(self.predict(one))\n try:\n return sum(0 if correct[i] != y_test[i] else 1 for i in range(len(X_test))) / len(\n X_test\n )\n except ZeroDivisionError:\n pass",
"def t_test(sample1, sample2, paired=False, alpha=0.05,\n alternative='two-sided', correction='auto', r=0.707,\n show_graph=True, **kwargs):\n confidence = 1 - alpha\n df_result = pg.ttest(\n sample1,\n sample2,\n paired=paired,\n confidence=confidence,\n alternative=alternative,\n correction=correction,\n r=r\n )\n if show_graph:\n if paired:\n difference = [x - y for x, y in zip(sample1, sample2)]\n Visualization.histogram(difference, **kwargs)\n else:\n Visualization.density_plot(sample1, sample2,\n fig_size=(5, 4), **kwargs)\n return HypothesisTester.define_hypothesis(df_result, 'mean',\n alternative, paired,\n alpha).T",
"def baseline_score(self,t0,t1):\n return len(set(t0) & set(t1))/len(set(t0).union(set(t1)))",
"def t_test(result, reference):\n \n # Check that result and reference are 1D and that they have the same length\n \n print('\\nChecking that result and reference are 1D and that they have the same length\\n')\n \n if (len(result.shape) == 1) and (len(reference.shape) == 1):\n \n if len(result) == len(reference):\n \n print('Performing t test\\n')\n \n t_stat, p_value = scipy.stats.ttest_ind(result, reference)\n \n print('t test completed successfully!\\n')\n \n print('t statistic: {} // p value: {}'.format(t_stat, p_value))\n \n return t_stat, p_value\n \n else:\n \n print('Result and reference vectors do not have the same length. Please input them so that they have the same length')\n \n else:\n \n print('Result or reference vectors are not 1D. Please reformat them to be 1D')",
"def score(self, X_test, y_test):\r\n counter = 0\r\n sr = self.predict(X_test)\r\n for i in range(len(y_test)):\r\n if sr[i] == y_test[i]:\r\n counter += 1\r\n return counter / len(y_test)\r\n pass",
"def TestStatistic(self, data):\n group1, group2 = data\n n1, n2 = len(group1), len(group2)\n pred1 = [i/n1 for i in range(n1, 0, -1)] \n pred2 = [i/n2 for i in range(n2, 0, -1)] \n test_stat = abs(\n roc_auc_score(group1, pred1) \n - roc_auc_score(group2, pred2)\n )\n return test_stat",
"def score(self, test_data):\n\n\t\tpass",
"def attest_rel (a,b,dimension=None,printit=0,name1='Samp1',name2='Samp2',writemode='a'):\r\n if dimension == None:\r\n a = N.ravel(a)\r\n b = N.ravel(b)\r\n dimension = 0\r\n if len(a)<>len(b):\r\n raise ValueError, 'Unequal length arrays.'\r\n x1 = amean(a,dimension)\r\n x2 = amean(b,dimension)\r\n v1 = avar(a,dimension)\r\n v2 = avar(b,dimension)\r\n n = a.shape[dimension]\r\n df = float(n-1)\r\n d = (a-b).astype('d')\r\n\r\n denom = N.sqrt((n*N.add.reduce(d*d,dimension) - N.add.reduce(d,dimension)**2) /df)\r\n zerodivproblem = N.equal(denom,0)\r\n denom = N.where(zerodivproblem,1,denom) # avoid zero-division in 1st place\r\n t = N.add.reduce(d,dimension) / denom # N-D COMPUTATION HERE!!!!!!\r\n t = N.where(zerodivproblem,1.0,t) # replace NaN/wrong t-values with 1.0\r\n probs = abetai(0.5*df,0.5,float(df)/(df+t*t))\r\n if type(t) == N.ndarray:\r\n probs = N.reshape(probs,t.shape)\r\n if probs.shape == (1,):\r\n probs = probs[0]\r\n\r\n if printit <> 0:\r\n statname = 'Related samples T-test.'\r\n outputpairedstats(printit,writemode,\r\n name1,n,x1,v1,N.minimum.reduce(N.ravel(a)),\r\n N.maximum.reduce(N.ravel(a)),\r\n name2,n,x2,v2,N.minimum.reduce(N.ravel(b)),\r\n N.maximum.reduce(N.ravel(b)),\r\n statname,t,probs)\r\n return\r\n return t, probs",
"def ttest_two_sided(arr1, arr2, alpha=0.05, verbose=False):\n res = stats.ttest_ind(arr1, arr2)\n if res[1] <= alpha:\n if verbose: print(\n f'P-value = {round(res[1], 3)}, i.e. at alpha={alpha} the samples are significantly DIFFERENT')\n is_significant = True\n else:\n if verbose: print(f'P-value = {round(res[1], 3)}, i.e. at alpha={alpha} the samples are from the SAME set')\n is_significant = False\n return res[1], is_significant",
"def elapsed_time_for_test(self, test_class, test_name, end_time):\n if test_class is None or test_name is None:\n return -2.0\n\n test_key = \"{}.{}\".format(test_class, test_name)\n if test_key not in self.start_time_by_test:\n return -1.0\n else:\n start_time = self.start_time_by_test[test_key]\n del self.start_time_by_test[test_key]\n return end_time - start_time",
"def league_ttest(df_league_one: pd.DataFrame, df_league_two: pd.DataFrame, parameter: str, alpha: float, ):\n assert isinstance(df_league_one, pd.DataFrame), 'df_league_one needs to be a pandas dataframe.'\n assert isinstance(df_league_two, pd.DataFrame), 'df_league_two needs to be a pandas dataframe.'\n assert isinstance(alpha, float), 'alpha needs to be a float.'\n\n\n df_league_one_mean = df_league_one.mean()\n n = len(df_league_one['club'])\n df = n-1\n t_critical = stats.t.ppf(1-alpha, df)\n leagues_ttest = stats.ttest_1samp(a= df_league_two[f'{parameter}'], popmean= df_league_one_mean)\n t_value = leagues_ttest[0]\n p_value = leagues_ttest[1]\n\n stats_values = {}\n\n stats_values['p_value'] = round(list(p_value)[0], 4)\n\n if stats_values['p_value'] < alpha:\n return ('Enough evidence to reject null hypothesis')\n elif stats_values['p_value'] > alpha:\n return ('Not enough evidence to reject null hypothesis')",
"def t_test(dataType):\n\n\t# read the data\n\tparser = ExperimentUtils()\n\tdata = parser.parse_data(dataType)\n\n\tN = len(data.keys()) # number participants\n\n\t# - for trial 1 and trial 2:\n\t# \tL2 norm over each timestep, then sum all the values together\n\t# - average over two trials for each participant \n\ttask_avgs = {}\n\n\t# participant ID can take values 0 - 9\n\tfor ID in data.keys():\n\t\tfor task in data[ID]:\n\t\t\t# dont include the familiarization task (task can take values 1,2,3)\n\t\t\tif task != 0:\n\t\t\t\tif task not in task_avgs:\n\t\t\t\t\ttask_avgs[task] = {}\n\t\t\t\t\ttask_avgs[task][\"A\"] = np.array([0.0]*N)\n\t\t\t\t\ttask_avgs[task][\"B\"] = np.array([0.0]*N)\n\n\t\t\t\ttrialAsum = [0.0,0.0]\n\t\t\t\ttrialBsum = [0.0,0.0]\n\t\t\t\t# trial can take values 1 or 2\n\t\t\t\tfor trial in data[ID][task]:\n\t\t\t\t\t# only compute metrics over data, not timestamps\n\t\t\t\t\tAdata = data[ID][task][trial]['A'][1:8]\n\t\t\t\t\tBdata = data[ID][task][trial]['B'][1:8]\n\t\t\t\n\t\t\t\t\t#print str(ID)+str(task)+str(trial)+\"A\"\n\t\t\t\t\t#print \"Adata: \" + str(Adata)\n\t\t\t\t\t#print str(ID)+str(task)+str(trial)+\"B\"\n\t\t\t\t\t#print \"Bdata: \" + str(Bdata)\n\n\t\t\t\t\t(h, w) = np.shape(Adata)\n\t\t\t\t\tfor i in range(w):\n\t\t\t\t\t\ttrialAsum[trial-1] += np.linalg.norm(Adata[:,i])\n\t\t\t\t\t(h, w) = np.shape(Bdata)\n\t\t\t\t\tfor i in range(w):\n\t\t\t\t\t\ttrialBsum[trial-1] += np.linalg.norm(Bdata[:,i])\n\t\t\t\tavg_methodA = (trialAsum[0]+trialAsum[1])/2.0\n\t\t\t\tavg_methodB = (trialBsum[0]+trialBsum[1])/2.0\n\n\t\t\t\ttask_avgs[task][\"A\"][ID] = avg_methodA\n\t\t\t\ttask_avgs[task][\"B\"][ID] = avg_methodB\n\n\t# comput independent two-sample t-test \n\t# NOTE: we can assume that the two sample sizes are the same, and \n\t#\t\tthat the two distributions have the same variance\n\tfor task in range(1,4):\n\t\ttaskA = task_avgs[task][\"A\"]\n\t\ttaskB = task_avgs[task][\"B\"]\n\n\t\tmeanA = np.mean(taskA)\n\t\tmeanB = np.mean(taskB)\n\t\tprint \"meanA: \" + str(meanA)\n\t\tprint \"meanB: \" + str(meanB)\n\t\tdiff = meanA - meanB\n\t\tprint \"diff: \" + str(diff)\n\n\t\t(statistic, pvalue) = stats.ttest_ind(a=taskA, b=taskB, equal_var=True)\n\n\t\tprint \"\\n\"\n\t\tprint \"task\"+str(task)+\" statistic: \" + str(statistic)\n\t\tprint \"task\"+str(task)+\" pvalue: \" + str(pvalue)",
"def calculate_td_error(self, old_state, new_state, reward):\n for state in [old_state, new_state]:\n if state not in self.expected:\n self.expected[state] = 0\n self.delta = reward + self.gamma*self.expected[new_state] - self.expected[old_state]\n return self.delta",
"def _compare_pre_post_sampling(X_train, y_train, X_new, y_new):\n train_data_info = _basic_data_info(X_train, y_train)\n new_data_info = _basic_data_info(X_new, y_new)\n\n print(\"\\nNum samples increased from {} to {} samples\\n\".format(train_data_info[\"Num_samples\"], new_data_info[\"Num_samples\"]))\n\n # Create pandas Dataframe\n df = pd.DataFrame(np.nan, index = train_data_info['classes'], columns = ['og_dist', 'og_prop', 'new_dist', 'new_prop'])\n df.iloc[:, 0] = train_data_info[\"counts\"]\n df.iloc[:, 1] = train_data_info[\"percs\"]\n df.iloc[:, 2] = new_data_info[\"counts\"]\n df.iloc[:, 3] = new_data_info[\"percs\"]\n\n df.index.name = \"classes\"\n\n # Difference in distributions\n print(\"Count comparison is as follows: \\n\", df)",
"def lttest_ind (a, b, printit=0, name1='Samp1', name2='Samp2', writemode='a'):\r\n x1 = mean(a)\r\n x2 = mean(b)\r\n v1 = stdev(a)**2\r\n v2 = stdev(b)**2\r\n n1 = len(a)\r\n n2 = len(b)\r\n df = n1+n2-2\r\n svar = ((n1-1)*v1+(n2-1)*v2)/float(df)\r\n t = (x1-x2)/math.sqrt(svar*(1.0/n1 + 1.0/n2))\r\n prob = betai(0.5*df,0.5,df/(df+t*t))\r\n\r\n if printit <> 0:\r\n statname = 'Independent samples T-test.'\r\n outputpairedstats(printit,writemode,\r\n name1,n1,x1,v1,min(a),max(a),\r\n name2,n2,x2,v2,min(b),max(b),\r\n statname,t,prob)\r\n return t,prob",
"def ttest(\n data, dataLabel=None, paired=False, decimals=4,\n textline=False, units=None\n ):\n\n # test calling values\n if data is None or not isinstance(data, dict) or len(data.keys()) != 2:\n raise ValueError('RSTATS.ttest: data must be a dictionary'\n + ' with at exactly 2 keys'\n + '\\nUse KW (anova) for more than 2 groups')\n\n k = list(data.keys())\n g = {}\n n = {}\n gmean = {}\n gstd = {}\n\n g[1] = data[k[0]]\n g[2] = data[k[1]]\n n[1] = len(g[1])\n n[2] = len(g[2])\n # (w1, p1) = Stats.shapiro(g1, a=None, reta=False)\n # (w2, p2) = Stats.shapiro(g2, a=None, reta=False)\n # Tb, pb = Stats.bartlett(g1, g2) # do bartletss for equal variance\n equalVar = False\n\n if paired:\n print (len(g[1]), len(g[2]))\n (t, p) = Stats.ttest_rel(g[1], g[2])\n else:\n (t, p) = Stats.ttest_ind(g[1], g[2], equal_var=equalVar)\n gmean[1] = np.mean(g[1])\n gstd[1] = np.std(g[1], ddof=1)\n gmean[2] = np.mean(g[2])\n gstd[2] = np.std(g[2], ddof=1)\n # df = (tstd[k]**2/tN[k] + dstd[k]**2/dN[k])**2 / (( (tstd[k]**2 /\n # tN[k])**2 / (tN[k] - 1) ) + ( (dstd[k]**2 / dN[k])**2 / (tN[k] - 1) ) )\n df = ((gstd[1]**2/n[1] + gstd[2]**2/n[2])**2\n / (((gstd[1]**2 / n[1])**2 / (n[1] - 1)\n + ((gstd[2]**2 / n[2])**2 / (n[1] - 1))))\n )\n if dataLabel is not None:\n testtype = 'Independent'\n if paired:\n testtype = 'Paired'\n n = max([len(l) for l in k])\n print ('\\n%s\\n %s T-test, Welch correction' % (dataLabel, testtype))\n # if p1 < 0.05 and p2 < 0.05:\n # print(u' Both data sets appear normally distributed: Shapiro-Wilk Group 1 p = {:6.3f}, Group2 p = {:6.3f}'.format(p1, p2))\n # else:\n # print(u' ****At least one Data set is NOT normally distributed****\\n Shapiro-Wilk Group 1 p = {:6.3f}, Group2 p = {:6.3f}'.format(p1, p2))\n # print (u' (performing test anyway, as requested)')\n # if equalVar:\n # print(u' Variances are equivalent (Bartletts test, p = {:.3f})'.format(pb))\n # else:\n # print(u' Variances are unequal (Bartletts test, p = {:.3f}); not assuming equal variances'.format(pb))\n print(u' {:s}={:8.{pc}f} (SD {:.{pc}f}, N = {:d})'.\n format(k[0].rjust(n), gmean[1], gstd[1],\n len(g[1]), pc=decimals))\n print(u' {:s}={:8.{pc}f} (SD {:.{pc}f}, N = {:d})'.\n format(k[1].rjust(n), gmean[2], gstd[2],\n len(g[2]), pc=decimals))\n print(u' t({:6.2f})={:8.4f} p={:8.6f}\\n'.\n format(df, float(t), float(p)))\n # generate one line of text suitable for pasting into a paper\n if textline:\n if units is not None:\n units = ' ' + units\n else:\n units = ''\n fmtstring = u'{:s}: {:.{pc}f} (SD {:.{pc}f}, N={:d}){:s}; '\n print(u'(', end='')\n for s in range(1, 3):\n print(fmtstring.format(\n k[s-1], gmean[s], gstd[s], len(g[s]), units, \n pc=decimals), end='')\n print(u't{:.2f}={:.3f}, p={:s})\\n'.format(df, float(t), pformat(p)))\n\n return(df, float(t), float(p))",
"def test_score_2():\n\n tpot_obj = TPOTClassifier()\n tpot_obj._pbar = tqdm(total=1, disable=True)\n known_score = 0.986318199045 # Assumes use of the TPOT balanced_accuracy function\n\n # Reify pipeline with known score\n tpot_obj._optimized_pipeline = creator.Individual.\\\n from_string('RandomForestClassifier(input_matrix)', tpot_obj._pset)\n tpot_obj._fitted_pipeline = tpot_obj._toolbox.compile(expr=tpot_obj._optimized_pipeline)\n tpot_obj._fitted_pipeline.fit(training_features, training_classes)\n\n # Get score from TPOT\n score = tpot_obj.score(testing_features, testing_classes)\n\n # http://stackoverflow.com/questions/5595425/\n def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):\n return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)\n\n assert isclose(known_score, score)"
] | [
"0.75622785",
"0.6732271",
"0.67147094",
"0.67101526",
"0.66661763",
"0.64419353",
"0.6416758",
"0.64082766",
"0.63968855",
"0.6324951",
"0.6282402",
"0.6253952",
"0.6186208",
"0.61227715",
"0.6105249",
"0.60951906",
"0.5997991",
"0.59836954",
"0.59664416",
"0.5946647",
"0.5934196",
"0.59319234",
"0.5923768",
"0.5921423",
"0.5902078",
"0.58966976",
"0.5885101",
"0.5866355",
"0.58485174",
"0.58318895"
] | 0.7724763 | 0 |
return autsizeable field names in idfobject | def autosize_fieldname(idfobject):
# undocumented stuff in this code
return [
fname
for (fname, dct) in zip(idfobject.objls, idfobject["objidd"])
if "autosizable" in dct
] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def field_names(self):\n ...",
"def objectFields(self):\n raise NotImplementedError",
"def fields(self):",
"def _fields_names(cls) -> List:\n return list(field.name for field in dataclasses.fields(cls))",
"def fields(self):\n ...",
"def get_field_names(self):\n return {rv[0] for rv in self.iter_fields()}",
"def fields(cls):\n return cls._nameToValue",
"def get_field_attr(name):\n # de variant met een repeating group (entiteit, dataitem) levert hier nog een probleem op.\n # is dat omdat er twee entiteiten in 1 scherm staan?\n fields = []\n opts = my.rectypes[name]._meta\n for x in opts.get_fields(): # fields:\n fldname = x.name\n fldtype = x.get_internal_type()\n if fldname == 'id' or fldtype in ('ForeignKey', 'ManyToManyField'):\n # if fldname == 'id' or any((x.many2one, x.many2many, x.one2many))\n continue\n try:\n length = x.max_length\n except AttributeError:\n length = -1\n fields.append((fldname, fldtype[:-5], length))\n return fields",
"def fields(self):\r\n pass",
"def field_names(self):\n return self.base_field_names() + list(self.data.keys())",
"def get_field_names() -> Sequence[str]:\n raise NotImplementedError",
"def db_fields(self):",
"def list_fields(fc):\n return [f.name for f in arcpy.ListFields(fc)]",
"def namespaced_fields(self):\n ...",
"def fields(self):\r\n return self._by_name.iteritems()",
"def get_default_field_names(self, declared_fields, model_info):\n return (\n list(declared_fields.keys()) +\n list(model_info.fields.keys())\n )",
"def get_field_names(self, declared_fields, info):\n return self._requested_fields",
"def get_fields(cls):\n return map(lambda x: getattr(cls, x), cls.get_field_names())",
"def get_fields(self):\n field_list = []\n for field in self._meta.local_fields:\n if not field.primary_key:\n field_list.append([field.verbose_name.title(),\n self.__getattribute__(field.name),\n field.get_internal_type()])\n return field_list",
"def f(self):\r\n return self.fields()",
"def model_fields(self):\n converter = connections[self.db].introspection.identifier_converter\n model_fields = {}\n for field in self.model._meta.fields:\n name, column = field.get_attname_column()\n model_fields[converter(column)] = field\n return model_fields",
"def fields(self):\n return [f[1] for f in sorted(self.dd.fields.items())]",
"def field_names(self):\n\n entry_time_name = forms_builder.forms.models.FormEntry._meta.get_field('entry_time').verbose_name.title()\n document_title_name = Document._meta.get_field('name').verbose_name.title()\n document_url_name = Document._meta.get_field('url').verbose_name.title()\n\n form = self.form.all()[0]\n return ['user'] \\\n + [document_title_name, document_url_name] \\\n + [f.label\n for f in form.fields.all()] \\\n + [entry_time_name]",
"def __fields(self):\n return [self.__class__.__dict__[f] for f in self.__class__._fields]",
"def get_field_names(self):\n return self._keys",
"def field_names(self):\n if not self._field_names:\n self._field_names.update(self.properties.keys())\n\n self._field_names = [attr for attr in self._field_names if not attr.startswith(\"_\")]\n\n return self._field_names",
"def get_all_fields(self):\n fields = []\n for f in self._meta.fields:\n\n fname = f.name \n # resolve picklists/choices, with get_xyz_display() function\n get_choice = 'get_'+fname+'_display'\n if hasattr( self, get_choice):\n value = getattr( self, get_choice)()\n else:\n try :\n value = getattr(self, fname)\n except User.DoesNotExist:\n value = None\n\n # only display fields with values and skip some fields entirely\n if f.editable and value and f.name not in ('id', 'status', 'workshop', 'user', 'complete') :\n\n fields.append(\n {\n 'label':f.verbose_name, \n 'name':f.name, \n 'value':value,\n }\n )\n return fields",
"def modelfields(entity) -> Dict[str, Field]:\n return entity.__modelfields__",
"def get_field_names(cls):\n return cls._meta.get_field_names()",
"def getFields(self):\n return sorted(self.schema.fields, key=lambda f: f.name)"
] | [
"0.7096624",
"0.7052038",
"0.68935066",
"0.66643006",
"0.6553258",
"0.6494789",
"0.6485936",
"0.6427995",
"0.64223516",
"0.63584894",
"0.6328418",
"0.63140696",
"0.62944096",
"0.62799215",
"0.6263371",
"0.62606794",
"0.6165774",
"0.6162808",
"0.61518234",
"0.611477",
"0.6103165",
"0.60989463",
"0.60954493",
"0.6091914",
"0.606502",
"0.6063161",
"0.6052018",
"0.6041478",
"0.60404396",
"0.5988789"
] | 0.765945 | 0 |
Checks whether the given ISBN10 code is valid. >>> isISBN10('9971502100') True >>> isISBN10('9971502108') False | def isISBN10(code):
# helper function for computing ISBN-10 check digit
def check_digit(code):
# compute check digit
check = sum((i + 1) * int(code[i]) for i in range(9)) % 11
# convert check digit into its string representation
return 'X' if check == 10 else str(check)
# check whether given code is a string
if not isinstance(code, str):
return False
# check whether given code contains 10 characters
if len(code) != 10:
return False
# check whether first nine characters of given code are digits
if not code[:9].isdigit():
return False
# check the check digit
return check_digit(code) == code[-1] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def isISBN(code):\n if not (\n isinstance(code, str) and # code must be a string\n len(code) == 10 and # code must contain 10 characters\n code[:9].isdigit() # first nine characters must be digits\n ):\n return False\n\n # check the check digit\n return checkdigit(code) == code[-1]",
"def is_isbn_10(isbn10):\r\n isbn10 = re.sub(r'[^0-9X]', '', isbn10.replace('x', 'X'))\r\n if len(isbn10) != 10: return False\r\n return False if isbn_10_check_digit(isbn10[:-1]) != isbn10[-1] else True",
"def isbn_10_check_structure(isbn10):\r\n return True if re.match(RE_ISBN10, isbn10) else False",
"def is_valid_isbn(isbn):\n clean = clean_isbn(isbn)\n return clean[-1] == isbn_check_digit(clean[:-1])",
"def is_valid(isbn: str) -> bool:\n no_dashes = isbn.replace('-', '')\n\n nums = []\n\n valid_num = False\n\n if no_dashes:\n for char in no_dashes:\n if char == 'X':\n nums.append(10)\n elif char != 'X' and char.isalpha() or len(no_dashes) < 10 or len(no_dashes) > 10:\n break\n elif 'X' in no_dashes and no_dashes[-1] != 'X':\n break\n else:\n nums.append(int(char))\n\n char = 0\n value = 0\n\n if nums and len(nums) == 10:\n for n in range(10, 0, -1):\n value += (n * nums[char])\n char += 1\n valid_num = (value % 11 == 0)\n\n return valid_num",
"def isISBN(code, isbn13=True):\n\n return isISBN13(code) if isbn13 else isISBN10(code)",
"def isISBN13(code):\n\n # helper function for computing ISBN-10 check digit\n def check_digit(code):\n\n # compute check digit\n check = sum((3 if i % 2 else 1) * int(code[i]) for i in range(12))\n\n # convert check digit into a single digit\n return str((10 - check) % 10)\n\n # check whether given code is a string\n if not isinstance(code, str):\n return False\n\n # check whether given code contains 10 characters\n if len(code) != 13:\n return False\n\n # check whether first nine characters of given code are digits\n if not code[:12].isdigit():\n return False\n\n # check the check digit\n return check_digit(code) == code[-1]",
"def isbn_has_valid_check_digit(self, isbn):\n if not self.ISBN_RE.match(isbn):\n raise ValueError(str(isbn) + \" is no valid 13-digit ISBN!\")\n checksum = 0\n for index, digit in enumerate(isbn):\n if index % 2 == 0:\n checksum += int(digit)\n else:\n checksum += 3 * int(digit)\n return checksum % 10 == 0",
"def is_isbn(val):\n if is_isbn10(val) or is_isbn13(val):\n if val[0:3] in [\"978\", \"979\"] or not is_ean13(val):\n return True\n return False",
"def verify(isbn):\n\n isbn = isbn.replace(\"-\", \"\")\n if not verify_format(isbn):\n return False\n\n isbn_sum = 0\n for digit, i in zip(isbn, range(10, 0, -1)):\n if digit == \"X\":\n isbn_sum += 10 * i\n else:\n isbn_sum += int(digit) * i\n\n return isbn_sum % 11 == 0",
"def verify_format(isbn):\n\n return len(isbn) == 10 and (isbn[-1] == \"X\" or isbn[-1].isdigit()) \\\n and all(digit.isdigit() for digit in isbn[:-1])",
"def isbn():\n message = 'Informe um ISBN válido'\n def _isbn(form, field):\n if not is_isbn10(field.data) and not is_isbn13(field.data):\n raise ValidationError(message)\n return _isbn",
"def areISBN(codes, isbn13=None):\n\n # initialize list of checks\n checks = []\n\n # construct list of checks\n for code in codes:\n\n if isinstance(code, str):\n\n if isbn13 is None:\n checks.append(isISBN(code, len(code) == 13))\n else:\n checks.append(isISBN(code, isbn13))\n\n else:\n\n checks.append(False)\n\n # return list of checks\n return checks",
"def isbn_13_check_structure(isbn13):\r\n return True if re.match(RE_ISBN13, isbn13) else False",
"def is_isbn_13(isbn13):\r\n isbn13 = re.sub(r'[^0-9X]', '', isbn13.replace('x', 'X'))\r\n if len(isbn13) != 13: return False\r\n if isbn13[0:3] not in ('978', '979'): return False\r\n return False if isbn_13_check_digit(isbn13[:-1]) != isbn13[-1] else True",
"def verify(s):\n\t# Remove any spurious characters\n\ts = re.sub(r'[^0-9xX]', '', s).upper().strip()\n\n\tl = len(s)\n\n\tif l==10:\n\t\tif verify_10(s):\n\t\t\treturn s\n\telif l==13:\n\t\tif verify_13(s):\n\t\t\treturn s\n\n\t# It's not the right length to be an ISBN\n\treturn False",
"def isbn_convert(isbn10):\r\n if not is_isbn_10(isbn10): return None\r\n return '978' + isbn10[:-1] + isbn_13_check_digit('978' + isbn10[:-1])",
"def valid_book(self, info):\n self.cursor.execute(\"SELECT ISBN, title, price, stock FROM book WHERE ISBN=%s\", (info['ISBN'],))\n for book in self.cursor.fetchall():\n return True, float(book[2]), book[1], book[3]\n return False, 0, 0, 0",
"def isbn_check_digit(isbn):\n return (11 - (sum(x * y for (x, y) in enumerate(reversed(isbn), start=2))\n % 11)) % 11",
"def testBadFormatISBN(self): \n val = format_isbn(\"1234567843534594123\")\n self.assertFalse(val)",
"def is_valid_issn(issn):\n try:\n return bool(validate_issn(issn))\n except (ValueError, TypeError):\n return False",
"def isbn_10_check_digit(nine_digits):\r\n if len(nine_digits) != 9: return None\r\n try: int(nine_digits)\r\n except: return None\r\n remainder = int(sum((i + 2) * int(x) for i, x in enumerate(reversed(nine_digits))) % 11)\r\n if remainder == 0: tenth_digit = 0\r\n else: tenth_digit = 11 - remainder\r\n if tenth_digit == 10: tenth_digit = 'X'\r\n return str(tenth_digit)",
"def test_book_isbn_length_must_be_ten(self):\n\n\t\twith self.client:\n\t\t\tadd_book = {\n\t\t\t\t'title': 'Hello Books',\n\t\t\t\t'isbn': '56987451'\n\t\t\t}\n\t\t\tlogin_data = self.login_test_user()\n\t\t\ttoken = login_data['auth_token']\n\t\t\tres = self.client.post(\n\t\t\t\tf'{URL_BOOKS}',\n\t\t\t\theaders=dict(Authorization=f'Bearer {token}'),\n\t\t\t\tcontent_type='application/json',\n\t\t\t\tdata=json.dumps(add_book)\n\t\t\t)\n\t\t\tres2 = json.loads(res.data.decode())\n\t\t\tself.assertIn('isbn length must be 10', str(res2))",
"def test_book_isbn_must_only_be_numbers(self):\n\n\t\twith self.client:\n\t\t\tadd_book = {\n\t\t\t\t'title': 'Hello Books',\n\t\t\t\t'isbn': '56987451Ky'\n\t\t\t}\n\t\t\tlogin_data = self.login_test_user()\n\t\t\ttoken = login_data['auth_token']\n\t\t\tres = self.client.post(\n\t\t\t\tf'{URL_BOOKS}',\n\t\t\t\theaders=dict(Authorization=f'Bearer {token}'),\n\t\t\t\tcontent_type='application/json',\n\t\t\t\tdata=json.dumps(add_book)\n\t\t\t)\n\t\t\tres2 = json.loads(res.data.decode())\n\t\t\tself.assertIn('isbn must only include numbers', str(res2))\n\t\t\tself.assertEqual(res.status_code, 400)",
"def is_issn(val):\n try:\n val = val.replace(\"-\", \"\").replace(\" \", \"\").upper()\n if len(val) != 8:\n return False\n r = sum([(8 - i) * (_convert_x_to_10(x)) for i, x in enumerate(val)])\n return not (r % 11)\n except ValueError:\n return False",
"def testBadFormatISBNAgain(self): \n val = format_isbn(\"12345678\")\n self.assertFalse(val)",
"def validate_isbn_format(isbn_code: str):\n format_valid = False\n isbn = list(isbn_code)\n msj = ''\n\n if len(isbn) == 13:\n\n isbn_numbers = []\n isbn_separator = []\n index = 0\n isbn_characters = []\n\n for character in isbn:\n\n if character in '0123456789':\n isbn_numbers.append(character)\n\n elif character not in '0123456789':\n isbn_characters.append(character)\n\n if character == '-':\n isbn_separator.append(character)\n\n if index > 0:\n if isbn[index - 1] not in '0123456789':\n msj = 'Se ingresaron dos separadores juntos'\n break\n else:\n msj = 'Se ingresó un caracter inválido'\n break\n\n index += 1\n\n if len(isbn_numbers) < 10:\n msj = 'Faltan dígitos'\n\n if len(isbn_separator) != 3:\n msj = 'No son 4 grupos de números.'\n\n if len(isbn_separator) < 3:\n diff = 3 - len(isbn_separator)\n msj += ' Faltan ' + str(diff) + ' separadores'\n else:\n diff = len(isbn_separator) - 3\n msj += ' Hay ' + str(diff) + ' separador sobrante'\n\n if msj == '':\n format_valid = True\n\n elif len(isbn) < 13:\n msj = 'Faltan caracteres'\n\n else:\n msj = 'Se excede la cantidad de carácteres'\n\n return format_valid, msj",
"def test_and_normalize_isbn(self, isbn):\n ret = {\"valid\": False, \"input_value\": str(isbn)}\n stripped_isbn = isbn.strip()\n unsplit_isbn = stripped_isbn.replace(\"-\", \"\")\n split_on_input = False\n if self.ISBN_SPLIT_RE.match(stripped_isbn):\n if len(stripped_isbn) < 17:\n ret[\"error_type\"] = 1\n return ret\n elif len(stripped_isbn) > 17:\n ret[\"error_type\"] = 2\n return ret\n else:\n split_on_input = True\n if self.ISBN_RE.match(unsplit_isbn):\n split_isbn = self.split_isbn(unsplit_isbn)[\"value\"]\n if split_on_input and split_isbn != stripped_isbn:\n ret[\"error_type\"] = 3\n return ret\n ret[\"normalised\"] = split_isbn\n ret[\"valid\"] = True\n return ret\n ret[\"error_type\"] = 0\n return ret",
"def is_id_valid(id_code: str) -> bool:\n if id_code.isdigit():\n if len(str(id_code)) == 11:\n id_code = str(id_code)\n gender_number = int(id_code[0:1])\n day = int(id_code[5:7])\n month = int(id_code[3:5])\n year = id_code[1:3]\n birth_number = id_code[7:10]\n if is_valid_gender_number(gender_number) \\\n and is_valid_year_number(int(year)) \\\n and is_valid_month_number(int(month)) \\\n and is_valid_day_number(gender_number, int(year), int(month), int(day)) \\\n and is_valid_birth_number(int(birth_number)) \\\n and is_valid_control_number(str(id_code)):\n return True\n return False\n return False\n return False",
"def valid_ric(ticker, ric):\n split_ric = ric.split('.')\n ticker_ = split_ric[0]\n exchange = split_ric[1]\n database = helper.create_db()\n exchange_list = database.retrieve_column_as_list(\"exchanges\",\n \"exchange_code\")\n return ticker == ticker_ and exchange in exchange_list"
] | [
"0.82796675",
"0.8265154",
"0.7875872",
"0.76414096",
"0.7556362",
"0.734542",
"0.73319346",
"0.72045577",
"0.7138764",
"0.7138109",
"0.704493",
"0.64504737",
"0.6407236",
"0.6328351",
"0.63012135",
"0.6227752",
"0.6151876",
"0.60880727",
"0.60821617",
"0.60790056",
"0.6012908",
"0.58996457",
"0.585556",
"0.5822463",
"0.58159876",
"0.58129185",
"0.581137",
"0.57795024",
"0.5748956",
"0.57382864"
] | 0.8820598 | 0 |
Checks whether the given ISBN13 code is valid. >>> isISBN13('9789743159664') True >>> isISBN13('9787954527409') False >>> isISBN13('8799743159665') False | def isISBN13(code):
# helper function for computing ISBN-10 check digit
def check_digit(code):
# compute check digit
check = sum((3 if i % 2 else 1) * int(code[i]) for i in range(12))
# convert check digit into a single digit
return str((10 - check) % 10)
# check whether given code is a string
if not isinstance(code, str):
return False
# check whether given code contains 10 characters
if len(code) != 13:
return False
# check whether first nine characters of given code are digits
if not code[:12].isdigit():
return False
# check the check digit
return check_digit(code) == code[-1] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_isbn_13(isbn13):\r\n isbn13 = re.sub(r'[^0-9X]', '', isbn13.replace('x', 'X'))\r\n if len(isbn13) != 13: return False\r\n if isbn13[0:3] not in ('978', '979'): return False\r\n return False if isbn_13_check_digit(isbn13[:-1]) != isbn13[-1] else True",
"def isISBN(code, isbn13=True):\n\n return isISBN13(code) if isbn13 else isISBN10(code)",
"def isbn_13_check_structure(isbn13):\r\n return True if re.match(RE_ISBN13, isbn13) else False",
"def areISBN(codes, isbn13=None):\n\n # initialize list of checks\n checks = []\n\n # construct list of checks\n for code in codes:\n\n if isinstance(code, str):\n\n if isbn13 is None:\n checks.append(isISBN(code, len(code) == 13))\n else:\n checks.append(isISBN(code, isbn13))\n\n else:\n\n checks.append(False)\n\n # return list of checks\n return checks",
"def isISBN(code):\n if not (\n isinstance(code, str) and # code must be a string\n len(code) == 10 and # code must contain 10 characters\n code[:9].isdigit() # first nine characters must be digits\n ):\n return False\n\n # check the check digit\n return checkdigit(code) == code[-1]",
"def isbn_has_valid_check_digit(self, isbn):\n if not self.ISBN_RE.match(isbn):\n raise ValueError(str(isbn) + \" is no valid 13-digit ISBN!\")\n checksum = 0\n for index, digit in enumerate(isbn):\n if index % 2 == 0:\n checksum += int(digit)\n else:\n checksum += 3 * int(digit)\n return checksum % 10 == 0",
"def is_valid_isbn(isbn):\n clean = clean_isbn(isbn)\n return clean[-1] == isbn_check_digit(clean[:-1])",
"def isISBN10(code):\n\n # helper function for computing ISBN-10 check digit\n def check_digit(code):\n\n # compute check digit\n check = sum((i + 1) * int(code[i]) for i in range(9)) % 11\n\n # convert check digit into its string representation\n return 'X' if check == 10 else str(check)\n\n # check whether given code is a string\n if not isinstance(code, str):\n return False\n\n # check whether given code contains 10 characters\n if len(code) != 10:\n return False\n\n # check whether first nine characters of given code are digits\n if not code[:9].isdigit():\n return False\n\n # check the check digit\n return check_digit(code) == code[-1]",
"def is_valid(isbn: str) -> bool:\n no_dashes = isbn.replace('-', '')\n\n nums = []\n\n valid_num = False\n\n if no_dashes:\n for char in no_dashes:\n if char == 'X':\n nums.append(10)\n elif char != 'X' and char.isalpha() or len(no_dashes) < 10 or len(no_dashes) > 10:\n break\n elif 'X' in no_dashes and no_dashes[-1] != 'X':\n break\n else:\n nums.append(int(char))\n\n char = 0\n value = 0\n\n if nums and len(nums) == 10:\n for n in range(10, 0, -1):\n value += (n * nums[char])\n char += 1\n valid_num = (value % 11 == 0)\n\n return valid_num",
"def is_isbn(val):\n if is_isbn10(val) or is_isbn13(val):\n if val[0:3] in [\"978\", \"979\"] or not is_ean13(val):\n return True\n return False",
"def verify_format(isbn):\n\n return len(isbn) == 10 and (isbn[-1] == \"X\" or isbn[-1].isdigit()) \\\n and all(digit.isdigit() for digit in isbn[:-1])",
"def verify(s):\n\t# Remove any spurious characters\n\ts = re.sub(r'[^0-9xX]', '', s).upper().strip()\n\n\tl = len(s)\n\n\tif l==10:\n\t\tif verify_10(s):\n\t\t\treturn s\n\telif l==13:\n\t\tif verify_13(s):\n\t\t\treturn s\n\n\t# It's not the right length to be an ISBN\n\treturn False",
"def verify(isbn):\n\n isbn = isbn.replace(\"-\", \"\")\n if not verify_format(isbn):\n return False\n\n isbn_sum = 0\n for digit, i in zip(isbn, range(10, 0, -1)):\n if digit == \"X\":\n isbn_sum += 10 * i\n else:\n isbn_sum += int(digit) * i\n\n return isbn_sum % 11 == 0",
"def isbn13_convert(isbn13):\r\n if not is_isbn_13(isbn13): return None\r\n return isbn13[3:-1] + isbn_10_check_digit(isbn13[3:-1])",
"def isbn_10_check_structure(isbn10):\r\n return True if re.match(RE_ISBN10, isbn10) else False",
"def isbn():\n message = 'Informe um ISBN válido'\n def _isbn(form, field):\n if not is_isbn10(field.data) and not is_isbn13(field.data):\n raise ValidationError(message)\n return _isbn",
"def validate_isbn_format(isbn_code: str):\n format_valid = False\n isbn = list(isbn_code)\n msj = ''\n\n if len(isbn) == 13:\n\n isbn_numbers = []\n isbn_separator = []\n index = 0\n isbn_characters = []\n\n for character in isbn:\n\n if character in '0123456789':\n isbn_numbers.append(character)\n\n elif character not in '0123456789':\n isbn_characters.append(character)\n\n if character == '-':\n isbn_separator.append(character)\n\n if index > 0:\n if isbn[index - 1] not in '0123456789':\n msj = 'Se ingresaron dos separadores juntos'\n break\n else:\n msj = 'Se ingresó un caracter inválido'\n break\n\n index += 1\n\n if len(isbn_numbers) < 10:\n msj = 'Faltan dígitos'\n\n if len(isbn_separator) != 3:\n msj = 'No son 4 grupos de números.'\n\n if len(isbn_separator) < 3:\n diff = 3 - len(isbn_separator)\n msj += ' Faltan ' + str(diff) + ' separadores'\n else:\n diff = len(isbn_separator) - 3\n msj += ' Hay ' + str(diff) + ' separador sobrante'\n\n if msj == '':\n format_valid = True\n\n elif len(isbn) < 13:\n msj = 'Faltan caracteres'\n\n else:\n msj = 'Se excede la cantidad de carácteres'\n\n return format_valid, msj",
"def is_isbn_10(isbn10):\r\n isbn10 = re.sub(r'[^0-9X]', '', isbn10.replace('x', 'X'))\r\n if len(isbn10) != 10: return False\r\n return False if isbn_10_check_digit(isbn10[:-1]) != isbn10[-1] else True",
"def validate_isbn_math_relation(isbn_code: str):\n isbn_code_valid = False\n isbn_only_numbers = []\n msj = ''\n\n for character in isbn_code:\n if character in '0123456789':\n char_parse_int = int(character)\n isbn_only_numbers.append(char_parse_int)\n else:\n pass\n\n pos = 10\n addition = 0\n for num in isbn_only_numbers:\n mult = pos * num\n addition += mult\n pos -= 1\n\n final_result = addition % 11\n\n if final_result == 0:\n isbn_code_valid = True\n\n if not isbn_code_valid:\n msj = 'No se cumple la relación matemática'\n\n return isbn_code_valid, msj",
"def testBadFormatISBN(self): \n val = format_isbn(\"1234567843534594123\")\n self.assertFalse(val)",
"def is_code_valid_checksum(processed_code):\n\n if processed_code.isnumeric():\n list_of_digits = [int(digit) for digit in processed_code]\n else:\n converted_digits = convert_code_to_decimal(processed_code)\n list_of_digits = [int(digit) for digit in converted_digits]\n\n return sum(list_of_digits) > 0 and get_calculated_checksum(list_of_digits) % 11 == 0",
"def is_valid_issn(issn):\n try:\n return bool(validate_issn(issn))\n except (ValueError, TypeError):\n return False",
"def test_book_isbn_must_only_be_numbers(self):\n\n\t\twith self.client:\n\t\t\tadd_book = {\n\t\t\t\t'title': 'Hello Books',\n\t\t\t\t'isbn': '56987451Ky'\n\t\t\t}\n\t\t\tlogin_data = self.login_test_user()\n\t\t\ttoken = login_data['auth_token']\n\t\t\tres = self.client.post(\n\t\t\t\tf'{URL_BOOKS}',\n\t\t\t\theaders=dict(Authorization=f'Bearer {token}'),\n\t\t\t\tcontent_type='application/json',\n\t\t\t\tdata=json.dumps(add_book)\n\t\t\t)\n\t\t\tres2 = json.loads(res.data.decode())\n\t\t\tself.assertIn('isbn must only include numbers', str(res2))\n\t\t\tself.assertEqual(res.status_code, 400)",
"def is_ean13(val):\n if len(val) != 13:\n return False\n sequence = [1, 3]\n try:\n r = sum([int(x) * sequence[i % 2] for i, x in enumerate(val[:-1])])\n ck = (10 - r % 10) % 10\n return ck == int(val[-1])\n except ValueError:\n return False",
"def valid_book(self, info):\n self.cursor.execute(\"SELECT ISBN, title, price, stock FROM book WHERE ISBN=%s\", (info['ISBN'],))\n for book in self.cursor.fetchall():\n return True, float(book[2]), book[1], book[3]\n return False, 0, 0, 0",
"def validate_identifier(identifier: str) -> bool:\n if identifier[:2] == 'NR':\n return True\n\n if len(identifier) < 9:\n return False\n\n try:\n d = int(identifier[-7:])\n if d == 0:\n return False\n except ValueError:\n return False\n # TODO This is not correct for entity types that are not Coops\n if identifier[:-7] not in ('CP', 'XCP', 'BC'):\n return False\n\n return True",
"def is_id_valid(id_code: str) -> bool:\n if is_valid_gender_number(int(id_code[0:1])):\n if is_valid_year_number(int(id_code[1:3])):\n if is_valid_month_number(int(id_code[3:5])):\n if is_valid_day_number(int(id_code[0:1]), int(id_code[1:3]), int(id_code[3:5]), int(id_code[5:7])):\n if is_valid_birth_number(int(float(id_code[7:10]))):\n if is_valid_control_number(id_code):\n return True\n else:\n return False\n else:\n return False\n\n else:\n return False\n else:\n return False\n else:\n return False",
"def testBadFormatISBNAgain(self): \n val = format_isbn(\"12345678\")\n self.assertFalse(val)",
"def is_id_valid(id_code: str) -> bool:\n if id_code.isdigit():\n if len(str(id_code)) == 11:\n id_code = str(id_code)\n gender_number = int(id_code[0:1])\n day = int(id_code[5:7])\n month = int(id_code[3:5])\n year = id_code[1:3]\n birth_number = id_code[7:10]\n if is_valid_gender_number(gender_number) \\\n and is_valid_year_number(int(year)) \\\n and is_valid_month_number(int(month)) \\\n and is_valid_day_number(gender_number, int(year), int(month), int(day)) \\\n and is_valid_birth_number(int(birth_number)) \\\n and is_valid_control_number(str(id_code)):\n return True\n return False\n return False\n return False",
"def barcode_is_valid(s):\n return (bool(re.match(r'^[ATGC]*$',s))\n or barcode_is_10xgenomics(s))"
] | [
"0.8294502",
"0.8156061",
"0.7813687",
"0.7593365",
"0.7539352",
"0.7096975",
"0.70109516",
"0.6998053",
"0.6760535",
"0.6702344",
"0.6442007",
"0.64231825",
"0.6403078",
"0.6313602",
"0.6305555",
"0.6260889",
"0.6107894",
"0.5969835",
"0.5896946",
"0.57390374",
"0.56742215",
"0.56619984",
"0.5472034",
"0.5456971",
"0.54418916",
"0.5413317",
"0.5353483",
"0.534153",
"0.5326616",
"0.5303506"
] | 0.86895674 | 0 |
>>> codes = ['0012345678', '0012345679', '9971502100', '080442957X', 5, True, 'The Practice of Computing Using Python', '9789027439642', '5486948320146'] >>> areISBN(codes) [False, True, True, True, False, False, False, True, False] >>> areISBN(codes, True) [False, False, False, False, False, False, False, True, False] >>> areISBN(codes, False) [False, True, True, True, False, False, False, False, False] | def areISBN(codes, isbn13=None):
# initialize list of checks
checks = []
# construct list of checks
for code in codes:
if isinstance(code, str):
if isbn13 is None:
checks.append(isISBN(code, len(code) == 13))
else:
checks.append(isISBN(code, isbn13))
else:
checks.append(False)
# return list of checks
return checks | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_isbn(val):\n if is_isbn10(val) or is_isbn13(val):\n if val[0:3] in [\"978\", \"979\"] or not is_ean13(val):\n return True\n return False",
"def isISBN(code, isbn13=True):\n\n return isISBN13(code) if isbn13 else isISBN10(code)",
"def isISBN(code):\n if not (\n isinstance(code, str) and # code must be a string\n len(code) == 10 and # code must contain 10 characters\n code[:9].isdigit() # first nine characters must be digits\n ):\n return False\n\n # check the check digit\n return checkdigit(code) == code[-1]",
"def isbn_10_check_structure(isbn10):\r\n return True if re.match(RE_ISBN10, isbn10) else False",
"def isISBN10(code):\n\n # helper function for computing ISBN-10 check digit\n def check_digit(code):\n\n # compute check digit\n check = sum((i + 1) * int(code[i]) for i in range(9)) % 11\n\n # convert check digit into its string representation\n return 'X' if check == 10 else str(check)\n\n # check whether given code is a string\n if not isinstance(code, str):\n return False\n\n # check whether given code contains 10 characters\n if len(code) != 10:\n return False\n\n # check whether first nine characters of given code are digits\n if not code[:9].isdigit():\n return False\n\n # check the check digit\n return check_digit(code) == code[-1]",
"def isISBN13(code):\n\n # helper function for computing ISBN-10 check digit\n def check_digit(code):\n\n # compute check digit\n check = sum((3 if i % 2 else 1) * int(code[i]) for i in range(12))\n\n # convert check digit into a single digit\n return str((10 - check) % 10)\n\n # check whether given code is a string\n if not isinstance(code, str):\n return False\n\n # check whether given code contains 10 characters\n if len(code) != 13:\n return False\n\n # check whether first nine characters of given code are digits\n if not code[:12].isdigit():\n return False\n\n # check the check digit\n return check_digit(code) == code[-1]",
"def verify(isbn):\n\n isbn = isbn.replace(\"-\", \"\")\n if not verify_format(isbn):\n return False\n\n isbn_sum = 0\n for digit, i in zip(isbn, range(10, 0, -1)):\n if digit == \"X\":\n isbn_sum += 10 * i\n else:\n isbn_sum += int(digit) * i\n\n return isbn_sum % 11 == 0",
"def is_valid_isbn(isbn):\n clean = clean_isbn(isbn)\n return clean[-1] == isbn_check_digit(clean[:-1])",
"def isbn_13_check_structure(isbn13):\r\n return True if re.match(RE_ISBN13, isbn13) else False",
"def is_valid(isbn: str) -> bool:\n no_dashes = isbn.replace('-', '')\n\n nums = []\n\n valid_num = False\n\n if no_dashes:\n for char in no_dashes:\n if char == 'X':\n nums.append(10)\n elif char != 'X' and char.isalpha() or len(no_dashes) < 10 or len(no_dashes) > 10:\n break\n elif 'X' in no_dashes and no_dashes[-1] != 'X':\n break\n else:\n nums.append(int(char))\n\n char = 0\n value = 0\n\n if nums and len(nums) == 10:\n for n in range(10, 0, -1):\n value += (n * nums[char])\n char += 1\n valid_num = (value % 11 == 0)\n\n return valid_num",
"def verify_format(isbn):\n\n return len(isbn) == 10 and (isbn[-1] == \"X\" or isbn[-1].isdigit()) \\\n and all(digit.isdigit() for digit in isbn[:-1])",
"def valid_book(self, info):\n self.cursor.execute(\"SELECT ISBN, title, price, stock FROM book WHERE ISBN=%s\", (info['ISBN'],))\n for book in self.cursor.fetchall():\n return True, float(book[2]), book[1], book[3]\n return False, 0, 0, 0",
"def is_isbn_10(isbn10):\r\n isbn10 = re.sub(r'[^0-9X]', '', isbn10.replace('x', 'X'))\r\n if len(isbn10) != 10: return False\r\n return False if isbn_10_check_digit(isbn10[:-1]) != isbn10[-1] else True",
"def is_isbn_13(isbn13):\r\n isbn13 = re.sub(r'[^0-9X]', '', isbn13.replace('x', 'X'))\r\n if len(isbn13) != 13: return False\r\n if isbn13[0:3] not in ('978', '979'): return False\r\n return False if isbn_13_check_digit(isbn13[:-1]) != isbn13[-1] else True",
"def checker(product):\n for item in INSTOCK:\n if item == product:\n return True\n return False",
"def isbn_has_valid_check_digit(self, isbn):\n if not self.ISBN_RE.match(isbn):\n raise ValueError(str(isbn) + \" is no valid 13-digit ISBN!\")\n checksum = 0\n for index, digit in enumerate(isbn):\n if index % 2 == 0:\n checksum += int(digit)\n else:\n checksum += 3 * int(digit)\n return checksum % 10 == 0",
"def isbn():\n message = 'Informe um ISBN válido'\n def _isbn(form, field):\n if not is_isbn10(field.data) and not is_isbn13(field.data):\n raise ValidationError(message)\n return _isbn",
"def checker(item):\n return item in INSTOCK",
"def checkBINn(L, n):\n binaire = [0,1]\n if len(L)==n:\n for i in range(len(L)):\n if L[i] not in binaire:\n return False\n return True\n else: \n return False",
"def isbn_check_digit(isbn):\n return (11 - (sum(x * y for (x, y) in enumerate(reversed(isbn), start=2))\n % 11)) % 11",
"def find_book_dois_in_crossref(isbn_list):\n ret_value = {\n \"success\": False,\n \"dois\": []\n }\n if type(isbn_list) != type([]) or len(isbn_list) == 0:\n ret_value['error_msg'] = \"Parameter must be a non-empty list!\"\n return ret_value\n filter_list = [\"isbn:\" + isbn.strip() for isbn in isbn_list]\n filters = \",\".join(filter_list)\n api_url = \"https://api.crossref.org/works?filter=\"\n url = api_url + filters + \"&rows=500\"\n request = Request(url)\n request.add_header(\"User-Agent\", USER_AGENT)\n try:\n ret = urlopen(request)\n content = ret.read()\n data = json.loads(content)\n if data[\"message\"][\"total-results\"] == 0:\n ret_value[\"success\"] = True\n else:\n for item in data[\"message\"][\"items\"]:\n if item[\"type\"] in [\"monograph\", \"book\"] and item[\"DOI\"] not in ret_value[\"dois\"]:\n ret_value[\"dois\"].append(item[\"DOI\"])\n if len(ret_value[\"dois\"]) == 0:\n msg = \"No monograph/book DOI type found in Crossref ISBN search result ({})!\"\n raise ValueError(msg.format(url))\n else:\n ret_value[\"success\"] = True\n except HTTPError as httpe:\n ret_value['error_msg'] = \"HTTPError: {} - {}\".format(httpe.code, httpe.reason)\n except URLError as urle:\n ret_value['error_msg'] = \"URLError: {}\".format(urle.reason)\n except ValueError as ve:\n ret_value['error_msg'] = str(ve)\n return ret_value",
"def validate_isbn_math_relation(isbn_code: str):\n isbn_code_valid = False\n isbn_only_numbers = []\n msj = ''\n\n for character in isbn_code:\n if character in '0123456789':\n char_parse_int = int(character)\n isbn_only_numbers.append(char_parse_int)\n else:\n pass\n\n pos = 10\n addition = 0\n for num in isbn_only_numbers:\n mult = pos * num\n addition += mult\n pos -= 1\n\n final_result = addition % 11\n\n if final_result == 0:\n isbn_code_valid = True\n\n if not isbn_code_valid:\n msj = 'No se cumple la relación matemática'\n\n return isbn_code_valid, msj",
"def verify(s):\n\t# Remove any spurious characters\n\ts = re.sub(r'[^0-9xX]', '', s).upper().strip()\n\n\tl = len(s)\n\n\tif l==10:\n\t\tif verify_10(s):\n\t\t\treturn s\n\telif l==13:\n\t\tif verify_13(s):\n\t\t\treturn s\n\n\t# It's not the right length to be an ISBN\n\treturn False",
"def validate_isbn_format(isbn_code: str):\n format_valid = False\n isbn = list(isbn_code)\n msj = ''\n\n if len(isbn) == 13:\n\n isbn_numbers = []\n isbn_separator = []\n index = 0\n isbn_characters = []\n\n for character in isbn:\n\n if character in '0123456789':\n isbn_numbers.append(character)\n\n elif character not in '0123456789':\n isbn_characters.append(character)\n\n if character == '-':\n isbn_separator.append(character)\n\n if index > 0:\n if isbn[index - 1] not in '0123456789':\n msj = 'Se ingresaron dos separadores juntos'\n break\n else:\n msj = 'Se ingresó un caracter inválido'\n break\n\n index += 1\n\n if len(isbn_numbers) < 10:\n msj = 'Faltan dígitos'\n\n if len(isbn_separator) != 3:\n msj = 'No son 4 grupos de números.'\n\n if len(isbn_separator) < 3:\n diff = 3 - len(isbn_separator)\n msj += ' Faltan ' + str(diff) + ' separadores'\n else:\n diff = len(isbn_separator) - 3\n msj += ' Hay ' + str(diff) + ' separador sobrante'\n\n if msj == '':\n format_valid = True\n\n elif len(isbn) < 13:\n msj = 'Faltan caracteres'\n\n else:\n msj = 'Se excede la cantidad de carácteres'\n\n return format_valid, msj",
"def testBadFormatISBN(self): \n val = format_isbn(\"1234567843534594123\")\n self.assertFalse(val)",
"def check_code(item_code):\r\n # RA matches\r\n if re.match(r'^MCRNC[0-9]{4}\\.T$', item_code):\r\n return True\r\n\r\n if re.match(r'^RAN[0-9]{3,4}(\\.[0-9])?C?(\\.T)?$', item_code):\r\n return True\r\n\r\n if re.match(r'^RAS[0-9]{5}$', item_code):\r\n return True\r\n\r\n if re.match(r'^RNC[0-9]{4}\\.T$', item_code):\r\n return True\r\n\r\n if re.match(r'^RU[0-9]{5}(\\.T)?$', item_code):\r\n return True\r\n\r\n # Feature ID (RAN) matches\r\n if re.match(r'^RAN[0-9]{2,5}$', item_code):\r\n return True\r\n\r\n if re.match(r'^(?P<code>RAN[1,2](\\.[0-9]{3,4}))$', item_code):\r\n return True\r\n\r\n return False",
"def isbimol(rxn_typ):\n return rxn_typ in BIMOL_REACTIONS",
"def checker(self, product):\n for item in self.instock:\n if item == product:\n return True\n return False",
"def __contains__(self, code: str) -> bool:\n return code in self._all_codes_map",
"def decode(code):\n def h(x):\n hs = []\n for i in range(len(code)):\n if code[i] != '0' and (code[i] == '?' or code[i] == x[i]):\n hs.append(True)\n else:\n hs.append(False)\n return all(hs)\n return h"
] | [
"0.74519074",
"0.72815156",
"0.7090671",
"0.65742284",
"0.65578085",
"0.6463269",
"0.63166803",
"0.6275632",
"0.6265059",
"0.62025166",
"0.6028817",
"0.59755796",
"0.58285195",
"0.5798127",
"0.5781443",
"0.5775947",
"0.570381",
"0.563113",
"0.5504642",
"0.54982454",
"0.5482436",
"0.5482395",
"0.54187334",
"0.5376089",
"0.53284603",
"0.52972955",
"0.5294533",
"0.5265732",
"0.5236231",
"0.5225542"
] | 0.8204821 | 0 |
Calculates a two's complement integer from the given input value's bits | def twos_complement(input_value, num_bits=16):
mask = 2 ** (num_bits - 1)
return -(input_value & mask) + (input_value & ~mask) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def twos_complement(n, bits):\n if n < 0 or n >= 2**bits:\n raise ValueError\n\n return 2**bits - n",
"def twos_complement(val, bits):\n if (val & (1 << (bits - 1))) != 0: # if sign bit is set e.g., 8bit: 128-255\n val = val - (1 << bits) # compute negative value\n return val # return positive value as is",
"def twos_complement(val, bits):\n if (val & (1 << (bits - 1))) != 0: # if sign bit is set\n val = val - (2 ** bits) # compute negative value\n return val",
"def twos_complement_to_unsigned(val, bits):\n if val >= 0:\n return val\n all_one = (1 << bits)-1\n val = ((-val)^all_one)+1\n\n return val",
"def ones_complement(val):\n #mask = (1 << val.bit_length()) - 1\n #return int(hex(val ^ mask), 16)\n b = bin(val)\n b = b.replace('0', 'x')\n b = b.replace('1', '0')\n b = b.replace('x', '1')\n b = b.replace('1b', '0b')\n return int(b, 2)",
"def complement(x):\n out = 1 - x\n return out",
"def findComplement(self, num: int) -> int:\n n = num\n xor = 1\n while n > 0:\n num = num ^ xor\n xor = xor << 1\n n = n >> 1\n return num",
"def _twosComplement(x, bits=16):\n _checkInt(bits, minvalue=0, description='number of bits')\n _checkInt(x, description='input')\n upperlimit = 2 ** (bits - 1) - 1\n lowerlimit = -2 ** (bits - 1)\n if x > upperlimit or x < lowerlimit:\n raise ValueError('The input value is out of range. Given value is {0}, but allowed range is {1} to {2} when using {3} bits.' \\\n .format(x, lowerlimit, upperlimit, bits))\n\n # Calculate two'2 complement\n if x >= 0:\n return x\n return x + 2 ** bits",
"def _fromTwosComplement(x, bits=16):\n _checkInt(bits, minvalue=0, description='number of bits')\n\n _checkInt(x, description='input')\n upperlimit = 2 ** (bits) - 1\n lowerlimit = 0\n if x > upperlimit or x < lowerlimit:\n raise ValueError('The input value is out of range. Given value is {0}, but allowed range is {1} to {2} when using {3} bits.' \\\n .format(x, lowerlimit, upperlimit, bits))\n\n # Calculate inverse(?) of two'2 complement\n limit = 2 ** (bits - 1) - 1\n if x <= limit:\n return x\n return x - 2 ** bits",
"def twos_complement(value: int, width: int) -> int:\n signmask = 1 << (width - 1)\n if (value & signmask) == 0:\n # Mask off sign bit.\n return value & (signmask - 1)\n else:\n # Two's complement.\n return -bit_invert(value, width - 1) - 1",
"def ones_complement(x, bits=16):\n return x ^ ((1 << bits) - 1)",
"def create_bit_negative(self, value, bits):\n imm_code = bin(value).split('b')[1]\n imm_code = '0'*(bits - len(imm_code)) + imm_code\n if value < 0:\n imm_lst = []\n for bit in imm_code:\n imm_lst.append(bit)\n flip_bit = False\n place = bits - 1\n while place >= 0:\n if not flip_bit and imm_lst[place] == \"1\":\n flip_bit = True\n elif flip_bit:\n if imm_lst[place] == \"0\":\n imm_lst[place] = \"1\"\n else:\n imm_lst[place] = \"0\"\n place -= 1\n imm_code = \"\".join(imm_lst)\n return imm_code",
"def _bits(num):\r\n return bin(int(num))[2:]",
"def find_complement(num):\n pass",
"def twos_comp(val, bits):\n if (val & (1 << (bits - 1))) != 0: # if sign bit is set e.g., 8bit: 128-255\n val = val - (1 << bits) # compute negative value\n return val # return positive value as is",
"def twos_comp(val, bits):\n if (val & (1 << (bits - 1))) != 0: # if sign bit is set e.g., 8bit: 128-255\n val = val - (1 << bits) # compute negative value\n return val # return positive value as is",
"def twos_comp(val, bits):\n if (val & (1 << (bits - 1))) != 0: # if sign bit is set e.g., 8bit: 128-255\n val = val - (1 << bits) # compute negative value\n return val # return positive value as is",
"def twos_comp(val, bits):\r\n if (val & (1 << (bits - 1))) != 0: # if sign bit is set e.g., 8bit: 128-255\r\n val = val - (1 << bits) # compute negative value\r\n return val # return positive value as is\r",
"def twos_comp(val, bits):\r\n if (val & (1 << (bits - 1))) != 0: # if sign bit is set e.g., 8bit: 128-255\r\n val = val - (1 << bits) # compute negative value\r\n return val # return positive value as is\r",
"def _get_binary(value, bits):\n\n # http://www.daniweb.com/code/snippet216539.html\n return ''.join([str((value >> y) & 1) for y in range(bits - 1, -1, -1)])",
"def twos_comp(val, bits):\n if (val & (1 << (bits - 1))) != 0: # if sign bit is set e.g., 8bit: 128-255\n val = val - (1 << bits) # compute negative value\n return val",
"def twos_comp(val, bits):\n if (val & (1 << (bits - 1))) != 0: # if sign bit is set e.g., 8bit: 128-255\n val = val - (1 << bits) # compute negative value\n return val",
"def complement(self)->'SInt':\r\n S = SInt(self.nbBytes)\r\n S.binaire = '0' * (len(self) - 1) + '1'\r\n S += super(SInt, self).complement()\r\n return S",
"def bitmask(n: int) -> int:\n if n >= 0:\n return (1 << n) - 1\n else:\n return -1 << -n",
"def twos_comp(val, bits):\n if (val & (1 << (bits - 1))) != 0: # if sign bit is set e.g., 8bit: 128-255\n val = val - (1 << bits) # compute negative value\n return val",
"def twos_comp(val, bits):\n if( (val&(1<<(bits-1))) != 0 ):\n val = val - (1<<bits)\n return val",
"def bintogray(x: int) -> int:\n assert x >= 0\n return x ^ (x >> 1)",
"def bin2dec(number):\n\tmysum = 0\n\tnumber = str(number)[::-1]\n\tfor i,x in enumerate(number):\n\t\tif int(x) > 0:\n\t\t\tmysum += (2**i)\n\treturn mysum",
"def maskbits(x: int, n:int) -> int:\n if n >= 0:\n return x & ((1 << n) - 1)\n else:\n return x & (-1 << -n)",
"def twos_comp(val, num_bits):\n if ((val & (1 << (num_bits - 1))) != 0):\n val = val - (1 << num_bits)\n return val"
] | [
"0.773702",
"0.7683835",
"0.76752746",
"0.7462103",
"0.74167454",
"0.7336416",
"0.70802414",
"0.70657045",
"0.70034766",
"0.6954941",
"0.69006544",
"0.68687546",
"0.676691",
"0.67314565",
"0.67230797",
"0.67230797",
"0.67230797",
"0.6720881",
"0.6720881",
"0.6685405",
"0.6657924",
"0.6655872",
"0.66186553",
"0.6578435",
"0.6566544",
"0.65465236",
"0.6530952",
"0.6446756",
"0.64436054",
"0.6426137"
] | 0.79249084 | 0 |
Transfer models to target port | def transfer(self, target_port: Port, evaluator: Evaluator, config_uids: List[int] = None) -> None:
if target_port.name not in self.transfer_defs:
print(f"No transfer definition found for target port '{target_port.name}'")
return
# transfer definitions for specified target port
tds = self.transfer_defs[target_port.name]
output_dir = os.path.join(script_dir, os.pardir, "output")
training_type = "transfer"
print(f"TRANSFERRING MODELS TO TARGET PORT '{target_port.name}'")
if config_uids is not None:
print(f"Transferring configs -> {config_uids} <-")
window_width = 50
num_epochs = 25
train_lr = 0.01
fine_num_epochs = 20
fine_tune_lr = 1e-5
batch_size = 1024
# skip port if fully transferred
num_not_transferred = 0
for td in tds:
for config in self.transfer_configs:
if not self._is_transferred(target_port.name, td.base_port_name, config.uid):
# print(f"Not transferred: {td.base_port_name} -> {target_port.name} ({config.uid})")
num_not_transferred += 1
num_transfers = len(tds) * len(self.transfer_configs)
print(f"Transferred count {num_transfers - num_not_transferred}/{num_transfers}")
if num_not_transferred == 0:
print(f"All transfers done for target port '{target_port.name}': Skipping")
return
X_ts, y_ts = load_data(target_port, window_width)
baseline = mean_absolute_error(y_ts, np.full_like(y_ts, np.mean(y_ts)))
evaluator.set_naive_baseline(target_port, baseline)
print(f"Naive baseline: {baseline}")
# X_train_orig, X_test_orig, y_train_orig, y_test_orig = train_test_split(X_ts, y_ts, test_size=0.2,
# random_state=42, shuffle=False)
# train_optimizer = Adam(learning_rate=train_lr)
# fine_tune_optimizer = Adam(learning_rate=fine_tune_lr)
for td in tds:
print(f".:'`!`':. TRANSFERRING PORT {td.base_port_name} TO {td.target_port_name} .:'`!`':.")
print(f"- - Epochs {num_epochs} </> </> Learning rate {train_lr} - -")
print(f"- - Window width {window_width} </> Batch size {batch_size} - -")
# print(f"- - Number of model's parameters {num_total_trainable_parameters(model)} device {device} - -")
base_port = self.pm.find_port(td.base_port_name)
if base_port is None:
raise ValueError(f"Unable to associate port with port name '{td.base_port_name}'")
# model = inception_time(input_shape=(window_width, 37))
# print(model.summary())
# apply transfer config
for config in self.transfer_configs:
if config_uids is not None and config.uid not in config_uids:
continue
if self._is_transferred(target_port.name, td.base_port_name, config.uid):
print(f"Skipping config {config.uid}")
continue
print(f"\n.:'':. APPLYING CONFIG {config.uid} ::'':.")
print(f"-> -> {config.desc} <- <-")
print(f"-> -> nth_subset: {config.nth_subset} <- <-")
print(f"-> -> trainable layers: {config.train_layers} <- <-")
_, _, start_time, _, _ = decode_keras_model(os.path.split(td.base_model_path)[1])
model_file_name = encode_keras_model(td.target_port_name, start_time, td.base_port_name, config.uid)
file_path = os.path.join(output_dir, "model", td.target_port_name, model_file_name)
X_train_orig, X_test_orig, y_train_orig, y_test_orig = train_test_split(X_ts, y_ts, test_size=0.2,
random_state=42, shuffle=False)
train_optimizer = Adam(learning_rate=train_lr)
fine_tune_optimizer = Adam(learning_rate=fine_tune_lr)
checkpoint = ModelCheckpoint(file_path, monitor='val_mae', mode='min', verbose=2, save_best_only=True)
early = EarlyStopping(monitor="val_mae", mode="min", patience=10, verbose=2)
redonplat = ReduceLROnPlateau(monitor="val_mae", mode="min", patience=3, verbose=2)
callbacks_list = [checkpoint, early, redonplat]
# optimizer = Adam(learning_rate=lr)
#
# # configure model
# model.compile(optimizer=optimizer, loss="mse", metrics=["mae"])
# load base model
model = load_model(td.base_model_path)
# if config.uid == 0:
# print(model.summary())
# else:
# print(model.summary())
# del model
X_train = X_train_orig
X_test = X_test_orig
y_train = y_train_orig
y_test = y_test_orig
# apply transfer configuration
if config.nth_subset > 1:
if X_train.shape[0] < config.nth_subset:
print(f"Unable to apply nth-subset. Not enough data")
X_train = X_train_orig[0::config.nth_subset]
X_test = X_test_orig[0::config.nth_subset]
y_train = y_train_orig[0::config.nth_subset]
y_test = y_test_orig[0::config.nth_subset]
print(f"Orig shape: {X_train_orig.shape} {config.nth_subset} th-subset shape: {X_train.shape}")
print(f"Orig shape: {X_test_orig.shape} {config.nth_subset} th-subset shape: {X_test.shape}")
print(f"Orig shape: {y_train_orig.shape} {config.nth_subset} th-subset shape: {y_train.shape}")
print(f"Orig shape: {y_test_orig.shape} {config.nth_subset} th-subset shape: {y_test.shape}")
modified = False
# freeze certain layers
for layer in model.layers:
if layer.name not in config.train_layers:
modified = True
print(f"setting layer {layer.name} to False")
layer.trainable = False
else:
print(f"layer {layer.name} stays True")
if modified:
print(f"modified. compiling")
# re-compile
model.compile(optimizer=train_optimizer, loss="mse", metrics=["mae"])
# trainable_count = int(np.sum([K.count_params(p) for p in set(model.trainable_weights)]))
# non_trainable_count = int(np.sum([K.count_params(p) for p in set(model.non_trainable_weights)]))
trainable_count = count_params(model.trainable_weights)
non_trainable_count = count_params(model.non_trainable_weights)
print(f"Total params: {trainable_count + non_trainable_count}")
print(f"Trainable params: {trainable_count}")
print(f"Non trainable params: {non_trainable_count}")
# transfer model
result = model.fit(X_train, y_train, epochs=num_epochs, batch_size=batch_size, verbose=2,
validation_data=(X_test, y_test), callbacks=callbacks_list)
train_mae = result.history["mae"]
val_mae = result.history["val_mae"]
gc.collect()
tune_result = None
tune_train_mae = None
tune_val_mae = None
if config.tune:
print(f"Fine-Tuning transferred model")
# apply fine-tuning: unfreeze all but batch-normalization layers!
for layer in model.layers:
if not layer.name.startswith("batch_normalization"):
layer.trainable = True
model.compile(optimizer=fine_tune_optimizer, loss="mse", metrics=["mae"])
# print(f"model for fine tuning")
# print(model.summary())
tune_result = model.fit(X_train, y_train, epochs=fine_num_epochs, batch_size=batch_size, verbose=2,
validation_data=(X_test, y_test), callbacks=callbacks_list)
tune_train_mae = tune_result.history["mae"]
tune_val_mae = tune_result.history["val_mae"]
model.load_weights(file_path)
# set evaluation
def _compute_mae(_val_mae: List[float], _tune_val_mae: List[float]) -> float:
if _tune_val_mae is not None:
_val_mae = _val_mae + _tune_val_mae
return min(val_mae)
evaluator.set_mae(target_port, start_time, _compute_mae(val_mae, tune_val_mae), base_port, config.uid)
y_pred = model.predict(X_test)
grouped_mae = evaluator.group_mae(y_test, y_pred)
evaluator.set_mae(target_port, start_time, grouped_mae, base_port, config.uid)
# save history
history_file_name = encode_history_file(training_type, target_port.name, start_time, td.base_port_name,
config.uid)
history_path = os.path.join(output_dir, "data", target_port.name, history_file_name)
np.save(history_path, [result.history, tune_result.history if tune_result else None])
# plot history
plot_dir = os.path.join(output_dir, "plot")
plot_history(train_mae, val_mae, plot_dir, target_port.name, start_time, training_type,
td.base_port_name, config.uid, tune_train_mae, tune_val_mae)
# evaluator.plot_grouped_mae(target_port, training_type, start_time, config.uid)
plot_predictions(y_pred, y_test, plot_dir, target_port.name, start_time, training_type,
td.base_port_name, config.uid)
self.set_transfer(target_port.name, td.base_port_name, config.uid)
del checkpoint, early, redonplat
del X_train_orig, X_test_orig, y_train_orig, y_test_orig, model, X_train, y_train, X_test, y_test
gc.collect()
tf.keras.backend.clear_session()
gc.collect()
del X_ts, y_ts | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def deploy_to_device(self):\n if self.device_ids is not None and len(self.device_ids) > 1:\n if not isinstance(self.model, torch.nn.DataParallel):\n self.model = torch.nn.DataParallel(self.model, self.device_ids)\n\n self.model = self.model.to(self.device)\n self.criterion = self.criterion.to(self.device)",
"def to(self, device):\n self.device = device\n self.model.to(self.device)",
"def module_transfer_to_device(self) -> None:\n for name, module in self.modules.items():\n module.to(self.device)\n if self.device.type == 'cuda':\n self.modules[name] = torch.nn.DataParallel(module, self.gpu_ids)\n return",
"def change_port( self ):\n # disconnect and delete controller\n self.delete_controller()\n \n # update port\n self.update_port()",
"def __call__(self):\n topo.sim.connect(str(self.src),str(self.dest),\n self.projection_type,\n **self.parameters)",
"def to_device(model, device):\n p = next(model.parameters())\n if p.device == device:\n return\n model.to(device)",
"def install_sample(self, datapath, table_id):\n parser = datapath.ofproto_parser\n ofproto = datapath.ofproto\n # Incoming port 1.\n in_port = 1;\n for timeout in range(60, 1 ,-1):\n # Incoming Ethernet destination\n match = self.create_match(parser,\n {ofproto.OXM_OF_METADATA: timeout})\n # Output to port 2.\n output = parser.OFPActionOutput(2, 0)\n write = parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS,\n [output])\n instructions = [write]\n flow_mod = self.create_flow_add(datapath, 100, timeout,\n table_id, match, instructions)\n datapath.send_msg(flow_mod)\n\n print \"sent flow_mod\"",
"def transfer_weights(src_model, dest_model):\r\n # ingore the first layer Input()\r\n # layer 1-24 to 1-24\r\n for i in range(1, 24):\r\n dest_model.layers[i].set_weights(src_model.layers[i].get_weights())\r\n print(\"Partially load weights from layer 1-24 successfully!\")\r\n\r\n # layer 25-45 to 65-85\r\n for i in range(25, 45):\r\n dest_model.layers[i+40].set_weights(src_model.layers[i].get_weights())\r\n print(\"Partially load weights from layer 25-45 successfully!\")\r\n\r\n # layer 46-65 to 126-145\r\n for i in range(46, 65):\r\n dest_model.layers[i+80].set_weights(src_model.layers[i].get_weights())\r\n print(\"Partially load weights from layer 46-65 successfully!\")\r\n\r\n # 69 to 189\r\n dest_model.layers[69+120].set_weights(src_model.layers[69].get_weights())\r\n print(\"Partially load weights from layer 69 successfully!\")",
"def forward(opt):\n my_utils.plant_seeds(randomized_seed=opt.randomize)\n os.makedirs(opt.output_dir, exist_ok=True)\n\n trainer = t.Trainer(opt)\n trainer.build_dataset_train_for_matching()\n trainer.build_dataset_test_for_matching()\n trainer.build_network()\n trainer.build_losses()\n trainer.network.eval()\n\n if opt.eval_list and os.path.isfile(opt.eval_list):\n source_target_files = np.loadtxt(opt.eval_list, dtype=str)\n source_target_files = source_target_files.tolist()\n for i, st in enumerate(source_target_files):\n source, target = st\n cat1, fname1 = source.split('/')\n fname1 = os.path.splitext(fname1)[0]\n cat2, fname2 = target.split('/')\n fname2 = os.path.splitext(fname2)[0]\n if len(opt.shapenetv1_path) > 0:\n source_target_files[i] = (os.path.join(opt.shapenetv1_path, cat1, fname1, \"model.obj\"), os.path.join(opt.shapenetv1_path, cat2, fname2, \"model.obj\"))\n elif len(opt.shapenetv2_path) > 0:\n source_target_files[i] = (os.path.join(opt.shapenetv2_path, cat1, fname1, \"models\", \"model_normalized.obj\"), os.path.join(opt.shapenetv2_path, cat2, fname2, \"models\", \"model_normalized.obj\"))\n elif (opt.eval_source != \"\" and opt.eval_source[-4:] == \".txt\") and (opt.eval_target != \"\" and opt.eval_target[-4:] == \".txt\"):\n source_target_files = [(figure_2_3.convert_path(opt.shapenetv1_path, opt.eval_source), figure_2_3.convert_path(opt.shapenetv1_path, opt.eval_target))]\n\n rot_mat = get_3D_rot_matrix(1, np.pi/2)\n rot_mat_rev = get_3D_rot_matrix(1, -np.pi/2)\n isV2 = len(opt.shapenetv2_path) > 0\n for i, source_target in enumerate(source_target_files):\n basename = get_model_id(source_target[0], isV2) + \"-\" + get_model_id(source_target[1], isV2)\n path_deformed = os.path.join(opt.output_dir, basename + \"-Sab.ply\")\n path_source = os.path.join(opt.output_dir, basename + \"-Sa.ply\")\n path_target = os.path.join(opt.output_dir, basename +\"-Sb.ply\")\n\n mesh_path = source_target[0]\n print(mesh_path)\n source_mesh_edge = get_shapenet_model.link(mesh_path)\n\n mesh_path = source_target[1]\n target_mesh_edge = get_shapenet_model.link(mesh_path)\n\n\n print(\"Deforming source in target\")\n\n source = source_mesh_edge.vertices\n target = target_mesh_edge.vertices\n\n pymesh.save_mesh_raw(path_source, source, source_mesh_edge.faces, ascii=True)\n pymesh.save_mesh_raw(path_target, target, target_mesh_edge.faces, ascii=True)\n\n if len(opt.shapenetv2_path) > 0:\n source = source.dot(rot_mat)\n target = target.dot(rot_mat)\n\n source = torch.from_numpy(source).cuda().float().unsqueeze(0)\n target = torch.from_numpy(target).cuda().float().unsqueeze(0)\n\n with torch.no_grad():\n source, _, _, _, _ = loss.forward_chamfer(trainer.network, source, target, local_fix=None,\n distChamfer=trainer.distChamfer)\n\n try:\n source = source.squeeze().cpu().detach().numpy()\n if len(opt.shapenetv2_path) > 0:\n source = source.dot(rot_mat_rev)\n P2_P1_mesh = pymesh.form_mesh(vertices=source, faces=source_mesh_edge.faces)\n pymesh.save_mesh(path_deformed, P2_P1_mesh, ascii=True)\n\n # print(\"computing signal tranfer form source to target\")\n # high_frequencies.high_frequency_propagation(path_source, path_deformed, path_target)\n except Exception as e:\n print(e)\n import pdb; pdb.set_trace()\n path_deformed = path_deformed[:-4] + \".pts\"\n save_pts(path_deformed, source.squeeze().cpu().detach().numpy())",
"def transfer(self, addr, port, object_id):\n return libplasma.transfer(self.conn, object_id, addr, port)",
"def transfer(self):\n pass",
"def hard_update_target_network(self,step):\n \n if step % self.C == 0:\n pars = self.model.get_weights()\n self.target_model.set_weights(pars)",
"def hard_update_target_network(self,step):\n \n if step % self.C == 0:\n pars = self.model.get_weights()\n self.target_model.set_weights(pars)",
"def port_setup(robot_name, num_cameras):\n\tglobal local_in_port\n\tglobal local_out_port\n\tglobal local_GPS_port\n\tglobal local_Dest_port\n\n\tglobal local_in_port_name\n\tglobal local_out_port_name\n\tglobal local_GPS_port_name\n\tglobal local_Dest_port_name\n\n\tglobal local_Radio_in_port\n\tglobal local_Radio_out_port\n\n\tglobal ors_in_port_name\n\tglobal ors_out_port_name\n\tglobal ors_GPS_port_name\n\tglobal ors_Dest_port_name\n\tglobal ors_Radio_in_port_name\n\tglobal ors_Radio_out_port_name\n\n\t# Define the names for all the ports\n\tport_prefix = \"/ors/robots/\" + robot_name + \"/\"\n\tlocal_port_prefix = \"/atrv_client/\" + robot_name + \"/\"\n\tview_prefix = \"/img/\" + robot_name + \"/\"\n\n\tors_in_port_name = port_prefix + \"in\"\n\tors_out_port_name = port_prefix + \"out\"\n\n\tors_Dest_port_name = port_prefix + \"Motion_Controller/in\"\n\tors_GPS_port_name = port_prefix + \"GPS/out\"\n\n\tors_Radio_out_port_name = port_prefix + \"Radio/out\"\n\tors_Radio_in_port_name = port_prefix + \"Radio/in\"\n\n\tlocal_in_port_name = local_port_prefix + \"in/\"\n\tlocal_out_port_name = local_port_prefix + \"out/\"\n\n\tlocal_GPS_port_name = local_port_prefix + \"GPS/in/\"\n\tlocal_Dest_port_name = local_port_prefix + \"Motion_Controller/out/\"\n\n\tlocal_Radio_in_port_name = local_port_prefix + \"Radio/in\"\n\tlocal_Radio_out_port_name = local_port_prefix + \"Radio/out\"\n\n\t# Start the yarp network connection\n\tyarp.Network.init()\n\n\t# Open the client ports\n\tlocal_in_port = yarp.BufferedPortBottle()\n\tlocal_in_port.open(local_in_port_name)\n\tlocal_out_port = yarp.BufferedPortBottle()\n\tlocal_out_port.open(local_out_port_name)\n\n\tlocal_GPS_port = yarp.BufferedPortBottle()\n\tlocal_GPS_port.open(local_GPS_port_name)\n\tlocal_Dest_port = yarp.BufferedPortBottle()\n\tlocal_Dest_port.open(local_Dest_port_name)\n\n\tlocal_Radio_out_port = yarp.BufferedPortBottle()\n\tlocal_Radio_out_port.open(local_Radio_out_port_name)\n\tlocal_Radio_in_port = yarp.BufferedPortBottle()\n\tlocal_Radio_in_port.open(local_Radio_in_port_name)\n\n\t# Connect the client ports to the simulator ports\n\tyarp.Network.connect (local_out_port_name, ors_in_port_name)\n\tyarp.Network.connect (ors_out_port_name, local_in_port_name)\n\n\tyarp.Network.connect (ors_GPS_port_name, local_GPS_port_name)\n\tyarp.Network.connect (local_Dest_port_name, ors_Dest_port_name)\n\n\tyarp.Network.connect (local_Radio_out_port_name, ors_Radio_in_port_name)\n\tyarp.Network.connect (ors_Radio_out_port_name, local_Radio_in_port_name)\n\n\n\t# Connect the cameras to yarpview windows\n\tprint (\" * Initializing yarpview windows.\")\n\tfor id in range(int(num_cameras)):\n\t\t# Build the name of the camera\n\t\tcamera_name = \"Camera{0}\".format(id+1)\n\n\t\t# Prepare the ports to be used\n\t\timg_view_port = view_prefix + camera_name\n\t\tatrv_camera_port = port_prefix + camera_name\n\n\t\tyarp.Network.connect (atrv_camera_port, img_view_port)",
"def to(self, device):\n self.detector.to(device)\n # self.recognizer.to(device)\n self.shared_conv.to(device)",
"def _generate_transfers(self) -> Dict[str, List[TransferDefinition]]:\n config = read_json(self.config_path)\n transfer_defs = {}\n ports = list(config[\"ports\"])\n permutations = list(itertools.permutations(ports, r=2))\n\n # for pair in _permute(config[\"ports\"]):\n for pair in permutations:\n base_port, target_port = self.pm.find_port(pair[0]), self.pm.find_port(pair[1])\n if target_port is None:\n raise ValueError(f\"No port found: Unable to transfer from base-port with name '{base_port.name}'\")\n if target_port is None:\n raise ValueError(f\"No port found: Unable to transfer to target-port with name '{pair[1]}'\")\n\n trainings = self.pm.load_trainings(base_port, self.output_dir, self.routes_dir, training_type=\"base\")\n # print(f\"loaded trainings. base port {base_port.name}:\\n{trainings.keys()}\")\n if len(trainings.keys()) < 1:\n print(f\"No base-training found for port '{base_port.name}'. Skipping\")\n continue\n\n training = list(trainings.values())[-1][0]\n # print(f\"training ({len(trainings.values())}): {training}\")\n # print(f\"Pair {base_port.name} ({len(trainings)} base-trains) -> {target_port.name}. \"\n # f\"Using latest at '{training.start_time}'\")\n verify_output_dir(self.output_dir, target_port.name)\n td = TransferDefinition(base_port_name=base_port.name,\n base_model_path=training.model_path,\n target_port_name=target_port.name,\n target_routes_dir=os.path.join(self.routes_dir, target_port.name),\n target_model_dir=os.path.join(self.output_dir, \"model\", target_port.name),\n target_output_data_dir=os.path.join(self.output_dir, \"data\", target_port.name),\n target_plot_dir=os.path.join(self.output_dir, \"plot\", target_port.name),\n target_log_dir=os.path.join(self.output_dir, \"log\", target_port.name))\n name = target_port.name\n if name in transfer_defs:\n transfer_defs[target_port.name].append(td)\n else:\n transfer_defs[target_port.name] = [td]\n return transfer_defs",
"def transfer(self, address, direction, repeats):\n if direction == \"in\":\n out_data = (\n \"/\"\n + str(address)\n + self.switch_valve(\"inlet\")\n + self.goto_position(stroke_volume)\n + self.switch_valve(\"outlet\")\n + self.goto_position(0)\n + self.repeat(repeats)\n + \"R\"\n + \"\\r\"\n )\n if self.is_ready(address):\n self.connection.write(out_data.encode())\n print(\"Pump \" + str(address) + \" is transferring from inlet to outlet \" + str(repeats) + \" times.\")\n self.is_ready(address)\n print(\"Done.\")\n elif direction == \"out\":\n out_data = (\n \"/\"\n + str(address)\n + self.switch_valve(\"outlet\")\n + self.goto_position(stroke_volume)\n + self.switch_valve(\"inlet\")\n + self.goto_position(0)\n + self.repeat(repeats)\n + \"R\"\n + \"\\r\"\n )\n if self.is_ready(address):\n self.connection.write(out_data.encode())\n print(\"Pump \" + str(address) + \" is transferring from inlet to outlet \" + str(repeats) + \" times.\")\n self.is_ready(address)\n print(\"Done.\")\n else:\n pass # return error",
"def copy_para(from_model, to_model):\n for i, j in zip(from_model.trainable_weights, to_model.trainable_weights):\n j.assign(i)",
"def invoke(self, msg, req):\n node = Node.create()\n node.acquire_lock()\n\n if msg.name == 'forward':\n try:\n with node.graph.as_default():\n if node.num_devices == 5:\n output, name = Model_5.forward(req['input'], req['next'], node)\n Thread(target=self.send, args=(output, name, req['tag'])).start()\n elif node.num_devices == 6:\n output, name = Model_6.forward(req['input'], req['next'], node)\n Thread(target=self.send, args=(output, name, req['tag'])).start()\n elif node.num_devices == 7:\n output, name = Model_7.forward(req['input'], req['next'], node)\n Thread(target=self.send, args=(output, name, req['tag'])).start()\n elif node.num_devices == 8:\n output, name = Model_8.forward(req['input'], req['next'], node)\n Thread(target=self.send, args=(output, name, req['tag'])).start()\n\n node.release_lock()\n return\n\n except Exception, e:\n node.log('Error', e.message)\n elif msg.name == 'update':\n \"\"\"update this node's task configuration,based on the received massage \"\"\"\n try:\n node.num_devices = req['num_devices']\n available_ip = req['available_ip']\n\n update_ip(get_file(node.num_devices), available_ip)\n load_ip(node)\n\n node.release_lock()\n return\n\n except Exception, e:\n node.log('Error', e.message)\n\n else:\n raise schema.AvroException('unexpected message:', msg.getname())",
"def sync_target_network(self):\n for t, e in zip(\n self.target_network.trainable_variables, self.online_network.trainable_variables\n ):\n t.assign(e)",
"def connectionMade(self):\n self.protocol.makeConnection(BridgeTransport(self.transport))",
"def forward(self, output, target):\n raise NotImplementedError",
"def cmd_port(args):",
"def update_target_network(self):\r\n self.send(self.server_conn, (sys._getframe().f_code.co_name, {}))",
"def run_model(project=None, model=None, raw=None, dyr=None, xls=None, path=None, server='tcp://127.0.0.1:5678'):\n ret = 0\n if (not project) or (not model):\n logging.error('RT-LAB project or model undefined.')\n sys.exit(-1)\n if (not raw) and (not xls):\n logging.error('PSS/E raw file or ePHASORsim Excel file undefined.')\n sys.exit(-1)\n if not dyr:\n logging.debug('PSS/E dyr file not specified')\n\n sim = SimControl(project, model, path)\n\n simulink = os.path.join(path,project, 'simulink')\n models = os.path.join(path,project, 'models')\n if not os.path.isdir(simulink):\n logging.error('No <{}> directory found.'.format(simulink))\n if not os.path.isdir(models):\n logging.error('No <{}> directory found.'.format(models))\n sys.exit(1)\n else:\n logging.info('Using <{}> directory'.format(models))\n modelPath = models\n else:\n logging.info('Using <{}> directory'.format(simulink))\n modelPath = simulink\n\n\n sim_data = LTBSetup(raw=raw, dyr=dyr, xls=xls, path=modelPath, model=model, simObject=sim)\n\n streaming = Streaming(name='sim', server=server, ltb_data=sim_data)\n\n sim.open()\n sim.load()\n\n sim_data.get_sysparam()\n sim_data.get_varheader_idxvgs()\n sim.set_settings(sim_data.Settings)\n # sim_data.Idxvgs['Line'].update(sim.add_branch_power_to_idxvgs())\n # sim_data.Varheader.extend(sim.add_vars_varheader(sim_data.Idxvgs['Line']))\n # sim_data.Idxvgs['Bus'].update(sim.add_bus_power_to_idxvgs())\n # sim_data.Varheader.extend(sim.add_vars_varheader(sim_data.Idxvgs['Bus']))\n streaming.send_init()\n logging.debug('Varheader, SysParam and Idxvgs sent.')\n sleep(0.5)\n\n sim.start()\n\n streaming.run()",
"def run(self):\n self.socket.connect()\n with open('src/inputs/output.file', 'rb') as f:\n self.sent_bytes = f.read()\n self.socket.send(self.sent_bytes)\n self.socket.disconnect()\n self.socket.close()",
"def trainNet():",
"def bind_transport_to_device(device, protocol_refs):\n transport = protocol_refs.result()[0]\n protocol = protocol_refs.result()[1]\n \n protocol.device = device\n device.transport = transport\n device.send_request('model?power?volume?mute?source?freq?')",
"def save_model(self):\n self.pred_net.save((self.save_path / \"iqn_pred_net\").absolute().as_posix())\n self.target_net.save((self.save_path / \"iqn_target_net\").absolute().as_posix())",
"def update_target_network(self):\n self.target_Qmodel = clone_model(self.Qmodel)\n self.target_Qmodel.set_weights(self.Qmodel.get_weights())\n\n # target network is never compiled\n self.target_Qmodel.compile(loss='mse', optimizer=Adam())"
] | [
"0.60630095",
"0.60588175",
"0.57406735",
"0.56002295",
"0.551898",
"0.55181533",
"0.5453672",
"0.5411719",
"0.5400127",
"0.53960764",
"0.538731",
"0.53615785",
"0.53615785",
"0.5356729",
"0.53435755",
"0.5335418",
"0.5325586",
"0.5305902",
"0.52706647",
"0.52651966",
"0.5254695",
"0.5242576",
"0.52215403",
"0.5216141",
"0.52030295",
"0.5190994",
"0.51713437",
"0.5158228",
"0.51548004",
"0.51527536"
] | 0.6467971 | 0 |
Generate TransferDefinitions based on transferconfig.json, containing those ports that have a base training for transferring to another port | def _generate_transfers(self) -> Dict[str, List[TransferDefinition]]:
config = read_json(self.config_path)
transfer_defs = {}
ports = list(config["ports"])
permutations = list(itertools.permutations(ports, r=2))
# for pair in _permute(config["ports"]):
for pair in permutations:
base_port, target_port = self.pm.find_port(pair[0]), self.pm.find_port(pair[1])
if target_port is None:
raise ValueError(f"No port found: Unable to transfer from base-port with name '{base_port.name}'")
if target_port is None:
raise ValueError(f"No port found: Unable to transfer to target-port with name '{pair[1]}'")
trainings = self.pm.load_trainings(base_port, self.output_dir, self.routes_dir, training_type="base")
# print(f"loaded trainings. base port {base_port.name}:\n{trainings.keys()}")
if len(trainings.keys()) < 1:
print(f"No base-training found for port '{base_port.name}'. Skipping")
continue
training = list(trainings.values())[-1][0]
# print(f"training ({len(trainings.values())}): {training}")
# print(f"Pair {base_port.name} ({len(trainings)} base-trains) -> {target_port.name}. "
# f"Using latest at '{training.start_time}'")
verify_output_dir(self.output_dir, target_port.name)
td = TransferDefinition(base_port_name=base_port.name,
base_model_path=training.model_path,
target_port_name=target_port.name,
target_routes_dir=os.path.join(self.routes_dir, target_port.name),
target_model_dir=os.path.join(self.output_dir, "model", target_port.name),
target_output_data_dir=os.path.join(self.output_dir, "data", target_port.name),
target_plot_dir=os.path.join(self.output_dir, "plot", target_port.name),
target_log_dir=os.path.join(self.output_dir, "log", target_port.name))
name = target_port.name
if name in transfer_defs:
transfer_defs[target_port.name].append(td)
else:
transfer_defs[target_port.name] = [td]
return transfer_defs | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def transfer(self, target_port: Port, evaluator: Evaluator, config_uids: List[int] = None) -> None:\n if target_port.name not in self.transfer_defs:\n print(f\"No transfer definition found for target port '{target_port.name}'\")\n return\n # transfer definitions for specified target port\n tds = self.transfer_defs[target_port.name]\n output_dir = os.path.join(script_dir, os.pardir, \"output\")\n training_type = \"transfer\"\n print(f\"TRANSFERRING MODELS TO TARGET PORT '{target_port.name}'\")\n if config_uids is not None:\n print(f\"Transferring configs -> {config_uids} <-\")\n window_width = 50\n num_epochs = 25\n train_lr = 0.01\n fine_num_epochs = 20\n fine_tune_lr = 1e-5\n batch_size = 1024\n\n # skip port if fully transferred\n num_not_transferred = 0\n for td in tds:\n for config in self.transfer_configs:\n if not self._is_transferred(target_port.name, td.base_port_name, config.uid):\n # print(f\"Not transferred: {td.base_port_name} -> {target_port.name} ({config.uid})\")\n num_not_transferred += 1\n num_transfers = len(tds) * len(self.transfer_configs)\n print(f\"Transferred count {num_transfers - num_not_transferred}/{num_transfers}\")\n if num_not_transferred == 0:\n print(f\"All transfers done for target port '{target_port.name}': Skipping\")\n return\n X_ts, y_ts = load_data(target_port, window_width)\n\n baseline = mean_absolute_error(y_ts, np.full_like(y_ts, np.mean(y_ts)))\n evaluator.set_naive_baseline(target_port, baseline)\n print(f\"Naive baseline: {baseline}\")\n # X_train_orig, X_test_orig, y_train_orig, y_test_orig = train_test_split(X_ts, y_ts, test_size=0.2,\n # random_state=42, shuffle=False)\n # train_optimizer = Adam(learning_rate=train_lr)\n # fine_tune_optimizer = Adam(learning_rate=fine_tune_lr)\n\n for td in tds:\n print(f\".:'`!`':. TRANSFERRING PORT {td.base_port_name} TO {td.target_port_name} .:'`!`':.\")\n print(f\"- - Epochs {num_epochs} </> </> Learning rate {train_lr} - -\")\n print(f\"- - Window width {window_width} </> Batch size {batch_size} - -\")\n # print(f\"- - Number of model's parameters {num_total_trainable_parameters(model)} device {device} - -\")\n base_port = self.pm.find_port(td.base_port_name)\n if base_port is None:\n raise ValueError(f\"Unable to associate port with port name '{td.base_port_name}'\")\n\n # model = inception_time(input_shape=(window_width, 37))\n # print(model.summary())\n\n # apply transfer config\n for config in self.transfer_configs:\n if config_uids is not None and config.uid not in config_uids:\n continue\n if self._is_transferred(target_port.name, td.base_port_name, config.uid):\n print(f\"Skipping config {config.uid}\")\n continue\n print(f\"\\n.:'':. APPLYING CONFIG {config.uid} ::'':.\")\n print(f\"-> -> {config.desc} <- <-\")\n print(f\"-> -> nth_subset: {config.nth_subset} <- <-\")\n print(f\"-> -> trainable layers: {config.train_layers} <- <-\")\n _, _, start_time, _, _ = decode_keras_model(os.path.split(td.base_model_path)[1])\n model_file_name = encode_keras_model(td.target_port_name, start_time, td.base_port_name, config.uid)\n file_path = os.path.join(output_dir, \"model\", td.target_port_name, model_file_name)\n\n X_train_orig, X_test_orig, y_train_orig, y_test_orig = train_test_split(X_ts, y_ts, test_size=0.2,\n random_state=42, shuffle=False)\n train_optimizer = Adam(learning_rate=train_lr)\n fine_tune_optimizer = Adam(learning_rate=fine_tune_lr)\n\n checkpoint = ModelCheckpoint(file_path, monitor='val_mae', mode='min', verbose=2, save_best_only=True)\n early = EarlyStopping(monitor=\"val_mae\", mode=\"min\", patience=10, verbose=2)\n redonplat = ReduceLROnPlateau(monitor=\"val_mae\", mode=\"min\", patience=3, verbose=2)\n callbacks_list = [checkpoint, early, redonplat]\n\n # optimizer = Adam(learning_rate=lr)\n #\n # # configure model\n # model.compile(optimizer=optimizer, loss=\"mse\", metrics=[\"mae\"])\n\n # load base model\n model = load_model(td.base_model_path)\n # if config.uid == 0:\n # print(model.summary())\n # else:\n # print(model.summary())\n # del model\n\n X_train = X_train_orig\n X_test = X_test_orig\n y_train = y_train_orig\n y_test = y_test_orig\n\n # apply transfer configuration\n if config.nth_subset > 1:\n if X_train.shape[0] < config.nth_subset:\n print(f\"Unable to apply nth-subset. Not enough data\")\n X_train = X_train_orig[0::config.nth_subset]\n X_test = X_test_orig[0::config.nth_subset]\n y_train = y_train_orig[0::config.nth_subset]\n y_test = y_test_orig[0::config.nth_subset]\n print(f\"Orig shape: {X_train_orig.shape} {config.nth_subset} th-subset shape: {X_train.shape}\")\n print(f\"Orig shape: {X_test_orig.shape} {config.nth_subset} th-subset shape: {X_test.shape}\")\n print(f\"Orig shape: {y_train_orig.shape} {config.nth_subset} th-subset shape: {y_train.shape}\")\n print(f\"Orig shape: {y_test_orig.shape} {config.nth_subset} th-subset shape: {y_test.shape}\")\n modified = False\n # freeze certain layers\n for layer in model.layers:\n if layer.name not in config.train_layers:\n modified = True\n print(f\"setting layer {layer.name} to False\")\n layer.trainable = False\n else:\n print(f\"layer {layer.name} stays True\")\n if modified:\n print(f\"modified. compiling\")\n # re-compile\n model.compile(optimizer=train_optimizer, loss=\"mse\", metrics=[\"mae\"])\n # trainable_count = int(np.sum([K.count_params(p) for p in set(model.trainable_weights)]))\n # non_trainable_count = int(np.sum([K.count_params(p) for p in set(model.non_trainable_weights)]))\n trainable_count = count_params(model.trainable_weights)\n non_trainable_count = count_params(model.non_trainable_weights)\n print(f\"Total params: {trainable_count + non_trainable_count}\")\n print(f\"Trainable params: {trainable_count}\")\n print(f\"Non trainable params: {non_trainable_count}\")\n\n # transfer model\n result = model.fit(X_train, y_train, epochs=num_epochs, batch_size=batch_size, verbose=2,\n validation_data=(X_test, y_test), callbacks=callbacks_list)\n train_mae = result.history[\"mae\"]\n val_mae = result.history[\"val_mae\"]\n gc.collect()\n tune_result = None\n tune_train_mae = None\n tune_val_mae = None\n\n if config.tune:\n print(f\"Fine-Tuning transferred model\")\n # apply fine-tuning: unfreeze all but batch-normalization layers!\n for layer in model.layers:\n if not layer.name.startswith(\"batch_normalization\"):\n layer.trainable = True\n model.compile(optimizer=fine_tune_optimizer, loss=\"mse\", metrics=[\"mae\"])\n # print(f\"model for fine tuning\")\n # print(model.summary())\n tune_result = model.fit(X_train, y_train, epochs=fine_num_epochs, batch_size=batch_size, verbose=2,\n validation_data=(X_test, y_test), callbacks=callbacks_list)\n tune_train_mae = tune_result.history[\"mae\"]\n tune_val_mae = tune_result.history[\"val_mae\"]\n model.load_weights(file_path)\n\n # set evaluation\n def _compute_mae(_val_mae: List[float], _tune_val_mae: List[float]) -> float:\n if _tune_val_mae is not None:\n _val_mae = _val_mae + _tune_val_mae\n return min(val_mae)\n\n evaluator.set_mae(target_port, start_time, _compute_mae(val_mae, tune_val_mae), base_port, config.uid)\n y_pred = model.predict(X_test)\n grouped_mae = evaluator.group_mae(y_test, y_pred)\n evaluator.set_mae(target_port, start_time, grouped_mae, base_port, config.uid)\n\n # save history\n history_file_name = encode_history_file(training_type, target_port.name, start_time, td.base_port_name,\n config.uid)\n history_path = os.path.join(output_dir, \"data\", target_port.name, history_file_name)\n np.save(history_path, [result.history, tune_result.history if tune_result else None])\n\n # plot history\n plot_dir = os.path.join(output_dir, \"plot\")\n plot_history(train_mae, val_mae, plot_dir, target_port.name, start_time, training_type,\n td.base_port_name, config.uid, tune_train_mae, tune_val_mae)\n # evaluator.plot_grouped_mae(target_port, training_type, start_time, config.uid)\n plot_predictions(y_pred, y_test, plot_dir, target_port.name, start_time, training_type,\n td.base_port_name, config.uid)\n self.set_transfer(target_port.name, td.base_port_name, config.uid)\n del checkpoint, early, redonplat\n del X_train_orig, X_test_orig, y_train_orig, y_test_orig, model, X_train, y_train, X_test, y_test\n gc.collect()\n tf.keras.backend.clear_session()\n gc.collect()\n del X_ts, y_ts",
"def generate_config(context):\n\n\n properties = context.properties\n project_id = properties.get('project', context.env['project'])\n\n network = context.properties.get('networkURL', generate_network_uri(\n project_id,\n context.properties.get('network','')\n ))\n target_vpn_gateway = context.env['name'] + '-tvpng'\n esp_rule = context.env['name'] + '-esp-rule'\n udp_500_rule = context.env['name'] + '-udp-500-rule'\n udp_4500_rule = context.env['name'] + '-udp-4500-rule'\n vpn_tunnel = context.env['name'] + '-vpn'\n router_vpn_binding = context.env['name'] + '-router-vpn-binding'\n resources = []\n if 'ipAddress' in context.properties:\n ip_address = context.properties['ipAddress']\n static_ip = ''\n else:\n static_ip = context.env['name'] + '-ip'\n resources.append({\n # The reserved address resource.\n 'name': static_ip,\n # https://cloud.google.com/compute/docs/reference/rest/v1/addresses\n 'type': 'gcp-types/compute-v1:addresses',\n 'properties': {\n 'name': properties.get('name', static_ip),\n 'project': project_id,\n 'region': context.properties['region']\n }\n })\n ip_address = '$(ref.' + static_ip + '.address)'\n\n resources.extend([\n {\n # The target VPN gateway resource.\n 'name': target_vpn_gateway,\n # https://cloud.google.com/compute/docs/reference/rest/v1/targetVpnGateways\n 'type': 'gcp-types/compute-v1:targetVpnGateways',\n 'properties':\n {\n 'name': properties.get('name', target_vpn_gateway),\n 'project': project_id,\n 'network': network,\n 'region': context.properties['region'],\n }\n },\n {\n # The forwarding rule resource for the ESP traffic.\n 'name': esp_rule,\n # https://cloud.google.com/compute/docs/reference/rest/v1/forwardingRules\n 'type': 'gcp-types/compute-v1:forwardingRules',\n 'properties':\n {\n 'name': '{}-esp'.format(properties.get('name')) if 'name' in properties else esp_rule,\n 'project': project_id,\n 'IPAddress': ip_address,\n 'IPProtocol': 'ESP',\n 'region': context.properties['region'],\n 'target': '$(ref.' + target_vpn_gateway + '.selfLink)',\n }\n },\n {\n # The forwarding rule resource for the UDP traffic on port 4500.\n 'name': udp_4500_rule,\n # https://cloud.google.com/compute/docs/reference/rest/v1/forwardingRules\n 'type': 'gcp-types/compute-v1:forwardingRules',\n 'properties':\n {\n 'name': '{}-udp-4500'.format(properties.get('name')) if 'name' in properties else udp_4500_rule,\n 'project': project_id,\n 'IPAddress': ip_address,\n 'IPProtocol': 'UDP',\n 'portRange': 4500,\n 'region': context.properties['region'],\n 'target': '$(ref.' + target_vpn_gateway + '.selfLink)',\n }\n },\n {\n # The forwarding rule resource for the UDP traffic on port 500\n 'name': udp_500_rule,\n # https://cloud.google.com/compute/docs/reference/rest/v1/forwardingRules\n 'type': 'gcp-types/compute-v1:forwardingRules',\n 'properties':\n {\n 'name': '{}-udp-500'.format(properties.get('name')) if 'name' in properties else udp_500_rule,\n 'project': project_id,\n 'IPAddress': ip_address,\n 'IPProtocol': 'UDP',\n 'portRange': 500,\n 'region': context.properties['region'],\n 'target': '$(ref.' + target_vpn_gateway + '.selfLink)',\n }\n },\n\n ])\n router_url_tag = 'routerURL'\n router_name_tag = 'router'\n\n if router_name_tag in context.properties:\n router_url = context.properties.get(router_url_tag, generate_router_uri(\n context.env['project'],\n context.properties['region'],\n context.properties[router_name_tag]))\n # Create dynamic routing VPN\n resources.extend([\n {\n # The VPN tunnel resource.\n 'name': vpn_tunnel,\n # https://cloud.google.com/compute/docs/reference/rest/v1/vpnTunnels\n 'type': 'gcp-types/compute-v1:vpnTunnels',\n 'properties':\n {\n 'name': properties.get('name', vpn_tunnel),\n 'project': project_id,\n 'description':\n 'A vpn tunnel',\n 'ikeVersion':\n 2,\n 'peerIp':\n context.properties['peerAddress'],\n 'region':\n context.properties['region'],\n 'router': router_url,\n 'sharedSecret':\n context.properties['sharedSecret'],\n 'targetVpnGateway':\n '$(ref.' + target_vpn_gateway + '.selfLink)'\n },\n 'metadata': {\n 'dependsOn': [esp_rule,\n udp_500_rule,\n udp_4500_rule]\n }\n }])\n else:\n # Create static routing VPN\n resources.append(\n {\n # The VPN tunnel resource.\n 'name': vpn_tunnel,\n 'type': 'gcp-types/compute-v1:vpnTunnels',\n 'properties': {\n 'name': vpn_tunnel,\n 'description':\n 'A vpn tunnel',\n 'ikeVersion':\n 2,\n 'peerIp':\n context.properties['peerAddress'],\n 'region':\n context.properties['region'],\n 'sharedSecret':\n context.properties['sharedSecret'],\n 'targetVpnGateway':\n '$(ref.' + target_vpn_gateway + '.selfLink)',\n 'localTrafficSelector':\n context.properties['localTrafficSelector'],\n 'remoteTrafficSelector':\n context.properties['remoteTrafficSelector'],\n\n },\n 'metadata': {\n 'dependsOn': [esp_rule, udp_500_rule, udp_4500_rule]\n }\n },\n )\n\n return {\n 'resources':\n resources,\n 'outputs':\n [\n {\n 'name': 'targetVpnGateway',\n 'value': target_vpn_gateway\n },\n {\n 'name': 'staticIp',\n 'value': static_ip\n },\n {\n 'name': 'espRule',\n 'value': esp_rule\n },\n {\n 'name': 'udp500Rule',\n 'value': udp_500_rule\n },\n {\n 'name': 'udp4500Rule',\n 'value': udp_4500_rule\n },\n {\n 'name': 'vpnTunnel',\n 'value': vpn_tunnel\n },\n {\n 'name': 'vpnTunnelUri',\n 'value': '$(ref.'+vpn_tunnel+'.selfLink)'\n }\n ]\n }",
"def plot_transfer_effect(self, port: Union[str, Port]) -> None:\n if isinstance(port, str):\n orig_port = port\n port = self.pm.find_port(port)\n if port is None:\n raise ValueError(f\"Unable to associate port with port name '{orig_port}'\")\n transfer_trainings = self.pm.load_trainings(port, output_dir=self.output_dir, routes_dir=self.routes_dir,\n training_type=\"transfer\")\n if len(transfer_trainings) < 1:\n print(f\"No training of type 'transfer' found for port {port.name}. Skipping plot_transfer_effect\")\n return\n\n transfer_training = transfer_trainings[-1]\n _, _, start_time, _, source_port_name = decode_model_file(os.path.split(transfer_training.model_path)[1])\n\n base_trainings = self.pm.load_trainings(source_port_name, output_dir=self.output_dir,\n routes_dir=self.routes_dir, training_type=\"base\")\n base_trainings = [t for t in base_trainings if t.start_time == start_time]\n if len(base_trainings) != 1:\n raise ValueError(f\"Unable to identify base-training for start_time '{start_time}': \"\n f\"Got {len(base_trainings)}, expected exactly 1\")\n base_training = base_trainings[0]\n base_key = self._encode_base_key(source_port_name, base_training.start_time)\n # print(f\"normal keys: {self.mae_base.keys()}\")\n # print(f\"grouped keys: {self.mae_base_groups.keys()}\")\n # print(f\"transferred normal keys: {self.mae_transfer.keys()}\")\n # print(f\"transferred grouped keys: {self.mae_transfer_groups.keys()}\")\n transfer_key = self._encode_transfer_key(source_port_name, port.name, start_time)\n base_data = self.mae_base_groups[base_key]\n transfer_data = self.mae_transfer_groups[transfer_key]\n path = os.path.join(self.output_dir, \"eval\", f\"transfer-effect_{source_port_name}-{port.name}.png\")\n plot_transfer_effect(base_data, transfer_data, source_port_name, port.name, path)",
"def generate_simple_flows(tgen_names, num_bots, bot_msg_size, bot_msg_rate, num_comps, comp_msg_size, comp_msg_rate):\n bot_tgen_ips = [TGEN_IP_PATTERN.format(TGEN_SUBNET_BASE + i) for i in range(num_bots)]\n bot_tgen_ports = [TGEN_PORT_BASE + i for i in range(num_bots)]\n\n # competitor IPs and port numbers start after those assigned to the bots\n comp_tgen_ips = [TGEN_IP_PATTERN.format(TGEN_SUBNET_BASE + i) for i in range(num_bots, num_bots+num_comps)]\n comp_tgen_ports = [TGEN_PORT_BASE + i for i in range(num_bots, num_bots+num_comps)]\n\n # build up flows for bots. Each bot talks to every other bot.\n bot_flows = {}\n for i in range(num_bots):\n bot_flows[i] = {\"flows\": [], \"tgen_name\": tgen_names[i]}\n\n # add a flow for each neighbor bot\n for j in range(len(bot_tgen_ips)):\n # don't add flows to self\n if i != j:\n # send from a unique source port based on the DESTINATION node.\n # use a destination port based on the SOURCE node number\n bot_flows[i][\"flows\"].append({\"src_port\": bot_tgen_ports[j],\n \"dst_ip\": bot_tgen_ips[j],\n \"dst_port\": bot_tgen_ports[i],\n \"msg_rate\": bot_msg_rate,\n \"msg_size\": bot_msg_size,\n })\n\n # build up flows for competitor nodes. Each competitor node talks to every other competitor node.\n comp_flows = {}\n for i in range(num_comps):\n comp_flows[i] = {\"flows\": [], \"tgen_name\": tgen_names[i+num_bots]}\n\n # add a flow for each neighbor bot\n for j in range(len(comp_tgen_ips)):\n # don't add flows to self\n if i != j:\n # send from a unique source port based on the DESTINATION node.\n # use a destination port based on the SOURCE node number\n comp_flows[i][\"flows\"].append({\"src_port\": comp_tgen_ports[j],\n \"dst_ip\": comp_tgen_ips[j],\n \"dst_port\": comp_tgen_ports[i],\n \"msg_rate\": comp_msg_rate,\n \"msg_size\": comp_msg_size,\n })\n\n return bot_flows, comp_flows",
"def gen_port_resources(self, server, ports):\n if (self.SuppressServerStatuses is False):\n print \"\\t* Adding all the port interface resources\"\n data = {}\n port_idx = \"0\"\n for idx, port in enumerate(ports):\n\n # get fixedips\n fixed_ip = port._info[\"fixed_ips\"]\n fixed_ip_address = fixed_ip[0][\"ip_address\"]\n\n # filter all_nets by subnet_id\n net_data = []\n fip = None\n for x in self.all_nets:\n for fip in fixed_ip:\n if x[0][\"id\"] in fip[\"subnet_id\"]:\n net_data.append(x)\n\n if len(net_data) > 0:\n net = net_data[0][1]\n subnet = net_data[0][2]\n\n networkID = [netw['id'] for netw in self.neutronclient.list_networks()['networks'] if netw['name'] == net][0]\n networkIsShared = self.neutronclient.show_network(networkID)['network']['shared']\n\n if networkIsShared is True:\n port_properties_ = {\n \"network_id\": networkID,\n \"fixed_ips\": [\n {\"subnet_id\": fip[\"subnet_id\"]}\n ]\n }\n else:\n port_properties_ = {\n \"network_id\": {\"get_resource\": net},\n \"fixed_ips\": [\n {\"subnet_id\": {\"get_resource\": subnet}}\n ]\n }\n if self.staticips:\n fixed_ips = []\n for address in server.addresses:\n server_ip_address = server.addresses[address][0]['addr']\n if server_ip_address == fixed_ip_address:\n fixed_ips.append({\"ip_address\": server_ip_address})\n\n port_properties_ = {\n \"network_id\": {\"get_resource\": net},\n \"fixed_ips\": fixed_ips\n }\n data = {\"type\": \"OS::Neutron::Port\",\"properties\": port_properties_}\n else:\n print \"!!Probable error grabbing port information for server %s!!\" % (server.name)\n data = {\"type\": \"OS::Neutron::Port\"}\n\n self.compute_data[\"resources\"][\"%s_port%s\" % (server.name, port_idx)] = data\n if len(ports) >= 1:\n port_idx = str(1 + idx)",
"def hh_assign_ports():\n print('******* Assigning ports')\n for device in ci_addrs.switches_hh1:\n print ('******* Connecting to ', device.get('ip'))\n net_connect = ConnectHandler(**device)\n output_vlan_cp = net_connect.send_config_set(cp_hh1_config_commands)\n print (output_vlan_cp)\n output_vlan_cloud = net_connect.send_config_set(cloud_hh1_config_commands)\n print (output_vlan_cloud)\n output_vlan_f5 = net_connect.send_config_set(f5_hh_config_commands)\n print (output_vlan_f5)\n net_connect.send_config_set('wr')\n\n for device in ci_addrs.switches_hh2:\n print ('******* Connecting to ', device.get('ip'))\n net_connect = ConnectHandler(**device)\n output_vlan_cp = net_connect.send_config_set(cp_hh2_config_commands)\n print (output_vlan_cp)\n output_vlan_cloud = net_connect.send_config_set(cloud_hh2_config_commands)\n print (output_vlan_cloud)\n output_vlan_f5 = net_connect.send_config_set(f5_hh_config_commands)\n print (output_vlan_f5)\n net_connect.send_config_set('wr')",
"def create_net(args):\n\n # Load config file for this experiment\n xinfo = yaml.load(open(args.exp)) # experiment info\n\n # copy config to run directory\n assert osp.isdir(args.cache_dir), 'Working directory not found: ' + args.cache_dir\n # output config file\n yaml.dump(xinfo, open(args.exp_config_path, 'w'),\n default_flow_style=False)\n\n # Load dataset config file\n dcfg_path = osp.join(args.data_config_path, xinfo['INPUT']['DATASET'])\n dinfo = yaml.load(open(dcfg_path)) # dataset info\n data_dir = dinfo['ROOT']\n\n layout = xinfo['INPUT']['LAYOUT']\n inps = [s.strip() for l in layout for s in l.split(',')]\n outs = [s.strip() for s in xinfo['REFINE']['TARGETS'].split(',')]\n\n supports = ['seg', 'flow', 'norm', 'rgb', 'depth']\n\n nets = {}\n for split in ['train', 'test']:\n net_inps = []\n net_outs = []\n for inp in inps:\n match = re.search('^(gt|pr)({})'.format('|'.join(supports)), inp)\n assert match is not None, 'Error in config INPUT-LAYOUT: ' + inp\n\n modality = match.group(2)\n nchannels = dinfo[modality]['n']\n path = osp.join(data_dir, dinfo[modality][match.group(1) + '-' + split])\n\n assert osp.exists(path), 'File not found: ' + path\n net_inps.append((inp, path, nchannels))\n\n for out in outs:\n # TODO: read target type: zero couplings, tight, loose couplings\n match = re.search('({})'.format('|'.join(supports)), out)\n assert match is not None, 'Error in config REFINE-TARGET: '+ out\n\n modality = match.group(1)\n nchannels = dinfo[modality]['n']\n path = osp.join(data_dir, dinfo[modality]['gt-' + split])\n\n assert osp.exists(path), 'File not found: ' + path\n net_outs.append((out, path, nchannels))\n\n loss_params = dict()\n mapping = None\n if 'mapping' in dinfo['seg']:\n idx = dinfo['seg']['mapping']\n mapping = dict(zip(idx, xrange(len(idx))))\n\n if split == 'train':\n\n # if the class weights is not in the dataset config file\n if 'gt-train-weights' not in dinfo['seg']:\n print 'Generating median frequency balancing weights.'\n (weights, mapping) = gcw.get_mfb(osp.join(dinfo['ROOT'], dinfo['seg']['gt-train']),\n dinfo['seg']['ignore_label'],\n mapping)\n # save back to dataset config\n dinfo['seg']['gt-train-weights'] = weights\n yaml.dump(dinfo, open(dcfg_path, 'w'), default_flow_style=False)\n else:\n weights = dinfo['seg']['gt-train-weights']\n # update data\n # update loss parameter\n ignore_label = dinfo['seg']['ignore_label']\n ignore_label = mapping[ignore_label] if mapping is not None else ignore_label\n loss_params['loss_param'] = {\n 'ignore_label': ignore_label,\n 'class_weighting': weights\n }\n\n # generate net prototxt\n loader = dinfo['NAME'] + '_loader'\n net_proto = arch.create_net(net_inps, net_outs, split, loader, layout, mapping, **loss_params)\n\n # output to file\n path = osp.join(args.cache_dir, getattr(args, 'exp_{}_path'.format(split)))\n open(path, 'w').write(str(net_proto))\n nets[split] = net_proto\n\n return nets",
"def all_net(configuration):\n net_dict_all = {\n \"design\" : ['H1', 'L1', 'V1' ],\n \"GW170817\" : ['H1', 'L1', 'V1' ],\n \"GW170814\" : ['H1', 'L1', 'V1' ],\n \"GW170817_without_Virgo\" : ['H1', 'L1' ],\n \"ET\" : [\"ET_L_Eu\", \"ET_L_Eu_2\"], # Triangular ET\n \"ET1\" : ['H1', 'L1', 'V1', 'ETdet1', 'ETdet2' ], # Triangular ET +LVC\n \"ET2\" : ['H1', 'L1', 'V1', 'ETdet1', 'ETdet3' ], # L-shaped at 2 places +LVC\n \"ET3\" : ['ETdet1', 'ETdet3', 'ETdet4'], # 3 L-shaped ET at three different places\n \"ET3L_EU\" : [\"ET_L_Eu\", \"ET_L_Aus_Eu\", \"ET_L_Argentina\"],\n \"3ET\" : [\"ET_L_US\", \"ET_L_Aus_US\", \"ET_L_Central_Africa\"],\n \"3CE\" : [\"CE_US\", \"CE_Aus_US\", \"CE_Central_Africa\"],\n \"1CE-ET\" : [\"CE_US\", \"ET_L_Eu\", \"ET_L_Eu_2\"],\n \"2CE-ET\" : [\"CE_US\", \"CE_Aus_US\", \"ET_L_Eu\", \"ET_L_Eu_2\"], #named 1 and 2 to distinguish from CE-ET (below) in Mills et al 2018.\n \"CE-ET\" : [\"CE_US\", \"CE_Aus_US\", \"ET_L_Eu\", \"ET_L_Eu_2\"],\n \"Voyager-ET\" : [\"LBB_H1\", \"LBB_L1\", \"LBB_I1\", \"ET_L_Eu\", \"ET_L_Eu_2\"],\n # next three networks are for calculating the impact of duty cycle on the Voyager-ET network\n \"VoyagerLI-ET\" : [\"LBB_L1\", \"LBB_I1\", \"ET_L_Eu\", \"ET_L_Eu_2\"],\n \"VoyagerHI-ET\" : [\"LBB_H1\", \"LBB_I1\", \"ET_L_Eu\", \"ET_L_Eu_2\"],\n \"VoyagerHL-ET\" : [\"LBB_H1\", \"LBB_L1\", \"ET_L_Eu\", \"ET_L_Eu_2\"],\n \n \"VoyagerETtri\" : [\"LBB_H1\", \"LBB_L1\", \"LBB_I1\", \"ET_Tri_Eu_1\", \"ET_Tri_Eu_2\", \"ET_Tri_Eu_3\"],\n \"Voyager\" : [\"LBB_H1\", \"LBB_L1\", \"LBB_I1\"],\n \"VoyagerWithAL\" : [\"LBB_H1\", \"LBB_L1\", \"LBB_I1\", \"ALV1\", \"ALK1\"],\n \"3_TriangularET\" : [\"ET_L_US\", \"ET_L_Aus_US\", \"ET_L_Central_Africa\",\"ET_L_US_2\", \"ET_L_Aus_US_2\", \"ET_L_Central_Africa_2\"],\n # for comparing to klimenko et al 2011:\n 'LHVA2' : [\"LBB_L1\",\"LBB_H1\",\"LBB_V1\",\"LBB_A-\"],\n 'LHVA' : [\"LBB_L1\",\"LBB_H1\",\"LBB_V1\",\"LBB_A\"],\n 'LHVJ' : [\"LBB_L1\",\"LBB_H1\",\"LBB_V1\",\"LBB_K1\"],\n 'LHVAJ' : [\"LBB_L1\",\"LBB_H1\",\"LBB_V1\",\"LBB_A\",\"LBB_K1\"],\n # for calculating alignment factor distributions in inclincation paper\n \"HL\" : [\"H1\", \"L1\"],\n \"HLV\" : [\"H1\", \"L1\", \"V1\" ],\n \"HLVK\" : [\"L1\",\"H1\",\"V1\",\"K1\"],\n \"HLVKI\" : [\"L1\",\"H1\",\"V1\",\"K1\", \"I1\"],\n \n\n #for optimizing the orientations of ET3L_EU w.r.t. polarization metric (see optimizing polarization notebook)\n #first optimize for the two detector network:\n \"ET2L_EU\" : [\"ET_L_Eu\", \"ET_L_Aus_Eu\"],\n \"2ET\" : [\"ET_L_US\", \"ET_L_Aus_US\"],\n #ranges\n }\n return(net_dict_all[configuration])",
"def _build_networks(self):\n self.online_convnet = self._create_network(name='Online')\n self.target_convnet = self._create_network(name='Target')\n self._net_outputs = self.online_convnet(self.state_ph, training=True)\n self._q_argmax = tf.argmax(self._net_outputs.q_values, axis=1)[0]\n self._replay_net_outputs = self.online_convnet(self._replay.states,\n training=True)\n self._replay_next_target_net_outputs = self.target_convnet(\n self._replay.next_states)",
"def rr1_assign_ports():\n print('******* Assigning ports')\n for device in ci_addrs.switches_rr1_12:\n print ('******* Connecting to ', device.get('ip'))\n net_connect = ConnectHandler(**device)\n output_vlan_cp = net_connect.send_config_set(cp_rr1_config_commands)\n print (output_vlan_cp)\n output_vlan_cloud = net_connect.send_config_set(cloud_rr1_config_commands)\n print (output_vlan_cloud)\n output_vlan_f5 = net_connect.send_config_set(f5_config_commands)\n print (output_vlan_f5)\n net_connect.send_config_set('wr')\n for device in ci_addrs.switches_rr2_12:\n print ('******* Connecting to ', device.get('ip'))\n net_connect = ConnectHandler(**device)\n output_vlan_cp = net_connect.send_config_set(cp_rr1_config_commands)\n print (output_vlan_cp)\n output_vlan_cloud = net_connect.send_config_set(cloud_rr2_config_commands)\n print (output_vlan_cloud)\n output_vlan_f5 = net_connect.send_config_set(f5_config_commands)\n print (output_vlan_f5)\n net_connect.send_config_set('wr') \n assign_ports_n5k34()",
"def task_generate_tasks():\n \n yield {\n 'basename': 'generate_tasks',\n 'name': None,\n # 'doc': 'docs for X',\n 'watch': ['trains/'],\n 'task_dep': ['create_folders'],\n }\n \n for root, dirs, files in os.walk('trains/',topdown=False):\n for f in files:\n #print(f)\n yield template_train_model(os.path.join(root,f))",
"def kk_assign_ports():\n print('******* Assigning ports')\n for device in ci_addrs.switches_kk_all:\n print ('******* Connecting to ', device.get('ip'))\n net_connect = ConnectHandler(**device)\n output_vlan_cp = net_connect.send_config_set(cp_kk_config_commands)\n print (output_vlan_cp)\n output_vlan_cloud = net_connect.send_config_set(cloud_kk_config_commands)\n print (output_vlan_cloud)\n output_vlan_f5 = net_connect.send_config_set(f5_kk_config_commands)\n print (output_vlan_f5)\n net_connect.send_config_set('wr')",
"def build_configs():",
"def generate_config(context):\n\n enable_flow_logs = context.properties.get('enableFlowLogs', False)\n\n subnetwork_resource = {\n 'name': context.properties['resourceName'],\n 'type': 'gcp-types/compute-beta:subnetworks',\n 'properties': {\n # Required properties.\n 'name':\n context.properties['name'],\n 'network':\n context.properties['network'],\n 'ipCidrRange':\n context.properties['ipCidrRange'],\n 'region':\n context.properties['region'],\n 'project':\n context.properties['projectId'],\n\n # Optional properties, with defaults.\n 'enableFlowLogs':\n enable_flow_logs,\n 'privateIpGoogleAccess':\n context.properties.get('privateIpGoogleAccess', False),\n 'secondaryIpRanges':\n context.properties.get('secondaryIpRanges', []),\n }\n }\n \n if enable_flow_logs:\n # If flow logs are enabled, we want to adjust the default config in two ways:\n # (1) Increase the sampling ratio (defaults to 0.5) so we sample all traffic.\n # (2) Reduce the aggregation interval to 30 seconds (default is 5secs) to save on\n # storage.\n subnetwork_resource['properties']['logConfig'] = {\n 'aggregationInterval': 'INTERVAL_30_SEC',\n 'enable': True,\n 'flowSampling': 1.0,\n 'metadata': 'INCLUDE_ALL_METADATA',\n }\n\n # Pass the 'dependsOn' property to the subnetwork resource if present.\n if 'dependsOn' in context.properties:\n subnetwork_resource['metadata'] = {\n 'dependsOn': context.properties['dependsOn']\n }\n\n output = [\n {\n 'name': 'name',\n 'value': subnetwork_resource['name'],\n },\n {\n 'name': 'selfLink',\n 'value': '$(ref.{}.selfLink)'.format(subnetwork_resource['name']),\n },\n ]\n\n return {'resources': [subnetwork_resource], 'outputs': output}",
"def configure(task):\n r = task.run(\n name=\"Base Configuration\",\n task=template_file,\n template=\"base.j2\",\n path=f\"templates/{task.host.nos}\",\n severity_level=0,\n )\n # r.result holds the result of rendering the template\n config = r.result\n\n r = task.run(\n name=\"Loading extra underlay data\",\n task=load_yaml,\n file=f\"extra_data/{task.host}/underlay.yaml\",\n severity_level=0,\n )\n # r.result holds the data contained in the yaml files\n # we load the data inside the host itself for further use\n task.host[\"underlay\"] = r.result\n\n r = task.run(\n name=\"Loading extra evpn data\",\n task=load_yaml,\n file=f\"extra_data/{task.host}/evpn.yaml\",\n severity_level=0,\n )\n # r.result holds the data contained in the yaml files\n # we load the data inside the host itself for further use\n task.host[\"evpn\"] = r.result\n\n r = task.run(\n name=\"Loading extra vxlan data\",\n task=load_yaml,\n file=f\"extra_data/{task.host}/vxlan.yaml\",\n severity_level=0,\n )\n # r.result holds the data contained in the yaml files\n # we load the data inside the host itself for further use\n task.host[\"vxlan\"] = r.result\n\n r = task.run(\n name=\"Interfaces Configuration\",\n task=template_file,\n template=\"interfaces.j2\",\n path=f\"templates/{task.host.nos}\",\n severity_level=0,\n )\n # we append the generated configuration\n config += r.result\n\n r = task.run(\n name=\"Routing Configuration\",\n task=template_file,\n template=\"routing.j2\",\n path=f\"templates/{task.host.nos}\",\n severity_level=0,\n )\n config += r.result\n\n r = task.run(\n name=\"EVPN Configuration\",\n task=template_file,\n template=\"evpn.j2\",\n path=f\"templates/{task.host.nos}\",\n severity_level=0,\n )\n config += r.result\n\n r = task.run(\n name=\"Role-specific Configuration\",\n task=template_file,\n template=f\"{task.host['role']}.j2\",\n path=f\"templates/{task.host.nos}\",\n severity_level=0,\n )\n # we update our hosts' config\n config += r.result\n\n task.run(\n name=\"Loading Configuration on the device\",\n task=napalm_configure,\n replace=True,\n configuration=config,\n )",
"def networkx_resource_generator (func_name, seed=0, max_cpu=40, max_mem=16000,\n max_storage=30, max_link_bw=70,\n abc_nf_types_len=10,\n supported_nf_cnt=6, max_link_delay=2,\n sap_cnt=10,\n **kwargs):\n rnd = random.Random()\n rnd.seed(seed)\n nx_graph = get_networkx_func(func_name, seed=seed, **kwargs)\n\n nf_types = list(string.ascii_uppercase)[:abc_nf_types_len]\n nffg = NFFG(id=\"net-\" + func_name + \"-seed\" + str(seed))\n gen = NameGenerator()\n\n for infra_id in nx_graph.nodes_iter():\n infra = nffg.add_infra(id=\"infra\" + str(infra_id),\n bandwidth=rnd.random() * max_link_bw * 1000,\n cpu=rnd.random() * max_cpu,\n mem=rnd.random() * max_mem,\n storage=rnd.random() * max_storage)\n infra.add_supported_type(rnd.sample(nf_types, supported_nf_cnt))\n\n for i, j in nx_graph.edges_iter():\n infra1 = nffg.network.node[\"infra\" + str(i)]\n infra2 = nffg.network.node[\"infra\" + str(j)]\n nffg.add_undirected_link(port1=infra1.add_port(id=gen.get_name(\"port\")),\n port2=infra2.add_port(id=gen.get_name(\"port\")),\n p1p2id=gen.get_name(\"link\"),\n p2p1id=gen.get_name(\"link\"),\n dynamic=False,\n delay=rnd.random() * max_link_delay,\n bandwidth=rnd.random() * max_link_bw)\n\n infra_ids = [i.id for i in nffg.infras]\n for s in xrange(0, sap_cnt):\n sap_obj = nffg.add_sap(id=gen.get_name(\"sap\"))\n sap_port = sap_obj.add_port(id=gen.get_name(\"port\"))\n infra_id = rnd.choice(infra_ids)\n infra = nffg.network.node[infra_id]\n nffg.add_undirected_link(port1=sap_port,\n port2=infra.add_port(id=gen.get_name(\"port\")),\n p1p2id=gen.get_name(\"link\"),\n p2p1id=gen.get_name(\"link\"),\n dynamic=False,\n delay=rnd.random() * max_link_delay,\n bandwidth=rnd.uniform(max_link_bw / 2.0,\n max_link_bw))\n\n return nffg",
"def get_port_fields(module, system, host):\n host_fc_initiators = find_host_initiators_data(module, system, host, initiator_type='FC')\n host_iscsi_initiators = find_host_initiators_data(module, system, host, initiator_type='ISCSI')\n\n field_dict = dict(\n ports=[],\n )\n\n connectivity_lut = {\n 0: \"DISCONNECTED\",\n 1: \"DEGRADED\",\n 2: \"DEGRADED\",\n 3: \"CONNECTED\"\n }\n\n ports = host.get_ports()\n for port in ports:\n if str(type(port)) == \"<class 'infi.dtypes.wwn.WWN'>\":\n found_initiator = False\n for initiator in host_fc_initiators:\n if initiator['address'] == str(port).replace(\":\", \"\"):\n found_initiator = True\n #print(\"initiator targets:\", initiator['targets'])\n unique_initiator_target_ids = \\\n {target['node_id'] for target in initiator['targets']}\n port_dict = {\n \"address\": str(port),\n \"address_long\": initiator['address_long'],\n \"connectivity\": connectivity_lut[len(unique_initiator_target_ids)],\n \"targets\": initiator['targets'],\n \"type\": initiator['type'],\n }\n\n if not found_initiator:\n address_str = str(port)\n address_iter = iter(address_str)\n long_address = ':'.join(a+b for a, b in zip(address_iter, address_iter))\n port_dict = {\n \"address\": str(port),\n \"address_long\": long_address,\n \"connectivity\": connectivity_lut[0],\n \"targets\": [],\n \"type\": \"FC\"\n }\n\n field_dict['ports'].append(port_dict)\n\n if str(type(port)) == \"<class 'infi.dtypes.iqn.IQN'>\":\n found_initiator = False\n for initiator in host_iscsi_initiators:\n if initiator['address'] == str(port):\n found_initiator = True\n #print(\"initiator targets:\", initiator['targets'])\n unique_initiator_target_ids = \\\n {target['node_id'] for target in initiator['targets']}\n port_dict = {\n \"address\": str(port),\n \"connectivity\": connectivity_lut[len(unique_initiator_target_ids)],\n \"targets\": initiator['targets'],\n \"type\": initiator['type'],\n }\n\n if not found_initiator:\n port_dict = {\n \"address\": str(port),\n \"connectivity\": connectivity_lut[0],\n \"targets\": [],\n \"type\": \"ISCSI\"\n }\n\n field_dict['ports'].append(port_dict)\n\n return field_dict",
"def build_network(self, inputs, targets, training=False):\n raise NotImplementedError",
"def generateTopology():\n switches = {}\n interfaces = {}\n links = {}\n return (switches,links)",
"def _build_networks(self):\n # Calling online_convnet will generate a new graph as defined in\n # self._get_network_template using whatever input is passed, but will always\n # share the same weights.\n self.online_convnet = tf.make_template('Online', self._network_template)\n self.target_convnet = tf.make_template('Target', self._network_template)\n self._net_outputs = self.online_convnet(self.state_ph)\n\n self._replay_net_outputs = self.online_convnet(self._replay.states)\n self._replay_next_target_net_outputs = self.target_convnet(\n self._replay.next_states)\n\n if self.acting_policy == 'hyperbolic':\n self._q_argmax = tf.argmax(self._net_outputs.hyp_q_value, axis=1)[0]\n elif self.acting_policy == 'largest_gamma':\n self._q_argmax = tf.argmax(self._net_outputs.q_values[-1], axis=1)[0]\n else:\n raise NotImplementedError",
"def schema_generators():\n return {\n \"trips\": trips_schema,\n \"status_changes\": status_changes_schema,\n \"events\": events_schema,\n \"vehicles\": vehicles_schema,\n \"stops\": stops_schema\n }",
"def create_packet_definition(packet_to_send):\n source_mac = \"00:00:00:00:00:01\"\n destination_mac = \"00:00:00:00:00:02\"\n source_ip = \"10.10.10.1\"\n destination_ip = \"10.10.10.2\"\n source_ip6 = 'fe80::214:f2ff:fe07:af0'\n destination_ip6 = 'ff02::1'\n sport = 1\n dport = 2\n tos = 4\n if packet_to_send[\"type\"] == \"ip\":\n packet_definition = ({\"Ether\": {\"dst\": destination_mac, \"src\": source_mac, \"type\": 0x0800}},\n {\"IP\": {\"dst\": destination_ip, \"src\": source_ip, \"tos\": tos}},\n {\"TCP\": {}})\n elif packet_to_send[\"type\"] == \"tagged_ip\":\n packet_definition = ({\"Ether\": {\"dst\": destination_mac, \"src\": source_mac, \"type\": 0x8100}},\n {\"Dot1Q\": {\"vlan\": packet_to_send[\"vlan\"],\n \"prio\": packet_to_send[\"priority\"]}},\n {\"IP\": {\"dst\": destination_ip, \"src\": source_ip, \"tos\": tos}})\n elif packet_to_send[\"type\"] == \"tcp\":\n packet_definition = ({\"Ether\": {\"dst\": destination_mac, \"src\": source_mac, \"type\": 0x0800}},\n {\"IP\": {\"dst\": destination_ip, \"src\": source_ip, \"tos\": tos}},\n {\"TCP\": {\"sport\": sport, \"dport\": dport}})\n elif packet_to_send[\"type\"] == \"udp\":\n packet_definition = ({\"Ether\": {\"dst\": destination_mac, \"src\": source_mac, \"type\": 0x0800}},\n {\"IP\": {\"dst\": destination_ip, \"src\": source_ip, \"tos\": tos}},\n {\"UDP\": {\"sport\": sport, \"dport\": dport}})\n elif packet_to_send[\"type\"] == \"double_tagged_ip\":\n packet_definition = ({\"Ether\": {\"dst\": destination_mac, \"src\": source_mac, \"type\": 0x8100}},\n {\"Dot1Q\": {\"vlan\": packet_to_send[\"outer_vlan\"], \"type\": 0x8100,\n \"prio\": packet_to_send[\"outer_priority\"]}},\n {\"Dot1Q\": {\"vlan\": packet_to_send[\"inner_vlan\"], \"type\": 0x0800,\n \"prio\": packet_to_send[\"inner_priority\"]}},\n {\"IP\": {\"dst\": destination_ip, \"src\": source_ip, \"tos\": tos}})\n elif packet_to_send[\"type\"] == \"arp\":\n packet_definition = (\n {\"Ether\": {\"src\": source_mac, \"dst\": 'FF:FF:FF:FF:FF:FF', \"type\": 0x0806}},\n {\"ARP\": {\"op\": 1, \"hwsrc\": source_mac,\n \"psrc\": source_ip, \"pdst\": destination_ip}},)\n elif packet_to_send[\"type\"] == \"arp_reply_tagged\":\n packet_definition = ({\"Ether\": {\"src\": source_mac, \"dst\": destination_mac, \"type\": 0x8100}},\n {\"Dot1Q\": {\"vlan\": 2}},\n {\"ARP\": {\"op\": 2, \"hwsrc\": source_mac, \"hwdst\": destination_mac,\n \"pdst\": destination_ip, \"psrc\": source_ip}}, )\n elif packet_to_send[\"type\"] == \"icmp\":\n packet_definition = ({\"Ether\": {\"dst\": destination_mac, \"src\": source_mac, \"type\": 0x0800}},\n {\"IP\": {\"dst\": destination_ip, \"src\": source_ip, \"proto\": 1}},\n {\"ICMP\": {\"type\": 8, \"code\": 0}})\n elif packet_to_send[\"type\"] == \"ipv6\":\n packet_definition = ({\"Ether\": {\"dst\": destination_mac, \"src\": source_mac, \"type\": 0x86dd}},\n {\"IPv6\": {\"dst\": destination_ip6, \"src\": source_ip6, \"version\": 6,\n \"hlim\": 255, \"plen\": 64, \"tc\": 225}})\n elif packet_to_send[\"type\"] == \"tcp6\":\n packet_definition = ({\"Ether\": {\"dst\": destination_mac, \"src\": source_mac, \"type\": 0x86dd}},\n {\"IPv6\": {\"dst\": destination_ip6, \"src\": source_ip6, \"version\": 6,\n \"hlim\": 255, \"tc\": 224, \"nh\": 6}},\n {\"TCP\": {\"sport\": sport, \"dport\": dport}})\n elif packet_to_send[\"type\"] == \"udp6\":\n packet_definition = ({\"Ether\": {\"dst\": destination_mac, \"src\": source_mac, \"type\": 0x86dd}},\n {\"IPv6\": {\"dst\": destination_ip6, \"src\": source_ip6, \"version\": 6,\n \"hlim\": 255, \"tc\": 224, \"nh\": 17}},\n {\"UDP\": {\"sport\": sport, \"dport\": dport}})\n elif packet_to_send[\"type\"] == \"icmp6\":\n packet_definition = ({\"Ether\": {\"dst\": destination_mac, \"src\": source_mac, \"type\": 0x86dd}},\n {\"IPv6\": {\"dst\": destination_ip6, \"src\": source_ip6, \"version\": 6,\n \"hlim\": 255, \"tc\": 224, \"nh\": 1}},\n {\"ICMP\": {\"type\": 8, \"code\": 0}})\n return packet_definition",
"def generateConfig(run,subrun,conditions):\n \n configname = (conditions.numcdir + \"/\" + str(run) + \"/\" + str(subrun)\n + \"/numc_config_\" + str(run) + \"_\" + str(subrun) + \".cfg\")\n \n configContents = \"\"\n \n configContents += \"[software]\\n\"\n if conditions.oldneut:\n configContents += \"neut_setup_script = /project/t/tanaka/T2K/neut/branches/5.1.4.2_nd280_ROOTv5r34p09n01/src/neutgeom/setup.sh\\n\"\n elif conditions.newoldneut:\n configContents += \"neut_setup_script = /project/t/tanaka/T2K/neut/branches/5.1.4.3_nd280/src/neutgeom/setup.sh\\n\"\n else:\n #configContents += \"neut_setup_script = /project/t/tanaka/T2K/neut/branches/5.3.1_nd280/src/neutgeom/setup.sh\\n\"\n #configContents += \"neut_setup_script = /project/t/tanaka/T2K/neut/branches/5.3.1_nd280_wBBBA05/src/neutgeom/setup.sh\\n\"\n configContents += \"neut_setup_script = /project/t/tanaka/T2K/neut/branches/5.3.2_nd280/src/neutgeom/setup.sh\\n\"\n \n configContents += \"[geometry]\\n\"\n\n configContents += \"baseline = \" + conditions.geometry +\"\\n\"\n if conditions.waterair == \"water\":\n configContents += \"p0d_water_fill = 1\\n\"\n else:\n configContents += \"p0d_water_fill = 0\\n\"\n \n configContents += \"\"\"\n \n[configuration]\nmodule_list = neutMC\n\n[filenaming]\n\"\"\"\n configContents += \"comment = \" + conditions.comment + \"\\n\"\n configContents += \"run_number = \" + str(run) +\"\\n\"\n configContents += \"subrun = \" + str(subrun) + \"\\n\"\n\n if conditions.oldneut:\n configContents += \"\"\" \n\n[neutrino]\nneut_card = /project/t/tanaka/T2K/neut/branches/5.1.4.2_nd280_ROOTv5r34p09n01/src/neutgeom/neut.card\n\"\"\"\n elif conditions.newoldneut:\n configContents += \"\"\" \n\n[neutrino]\nneut_card = /project/t/tanaka/T2K/neut/branches/5.1.4.3_nd280/src/neutgeom/neut.card\n\"\"\"\n else:\n configContents += \"\"\" \n\n[neutrino]\nneut_card = /project/t/tanaka/T2K/neut/branches/5.3.2_nd280/src/neutgeom/neut.card\n\"\"\"\n\n configContents += \"flux_file = \" + conditions.ram_disk + \"/\" + conditions.flux_base + \"\\n\"\n\n#flux_file = flux_file\n#\"\"\"\n\n# configContents += \"flux_file_path = \" + conditions.ram_disk + \"/\" + conditions.flux_base\n\n# configContents += \"\"\" \n#flux_file_start = 1\n#flux_file_stop = 300\n#\"\"\"\n\n configContents += \"maxint_file = \" + conditions.maxint_file_local + \"\\n\"\n\n# default: 5e17 but for basket MC special production higher\n configContents += \"\"\" \npot = 5.0e17\nneutrino_type = beam\n\"\"\"\n if conditions.baskmagn == \"basket\":\n configContents += \"\"\" \nflux_region = basket\nmaster_volume = Basket \nrandom_start = 1\n\"\"\"\n elif conditions.baskmagn == \"magnet\":\n configContents += \"\"\" \nflux_region = magnet\nmaster_volume = Magnet \nrandom_start = 1\n\"\"\"\n else:\n print \"Unknown basket/magnet condition\"\n \n\n configContents += \"random_seed = \" + str(getRandom()) +\"\\n\"\n configContents += \"neut_seed1 = \" + str(getRandom())+\"\\n\" \n configContents += \"neut_seed2 = \" + str(getRandom())+\"\\n\" \n configContents += \"neut_seed3 = \" + str(getRandom())+\"\\n\" \n\n configContents += \"\\n\"\n configContents += \"[nd280mc]\\n\"\n configContents += \"mc_type=Neut_RooTracker \\n\"\n\n #print configContents\n\n try:\n macFile = open(configname,\"w\")\n macFile.write(configContents)\n \n except:\n print \"can't write config file\" \n \n\n return configname",
"def defineTasks(self,partition):\n recv_slots = partition.recvSlices()\n strm_slots = partition.streamSlices()\n recvNodes = partition.recvNodesFromSlots()\n strmNodes = partition.streamNodesFromSlots()\n opt = '/'+self.manager.hostName()+'/'+partition.manager.name()+'/'+partition.name+'/'\n cl0 = '/Class0'+opt\n cl1 = '/Class1'+opt\n\n partition.setDataSources([])\n tasks = []\n pn = self.partitionName()\n print '---------------------- Partition name is:',pn\n for i in xrange(len(recv_slots)):\n slot = recv_slots[i]\n node = slot[:slot.find(':')]\n sub_farm = 'SF%02d'%(i,)\n short_name = sub_farm+'_SND' # Keep this name to ensure storageMon is working!\n task = pn+'_'+node+'_'+short_name\n tasks.append(node+'/'+task+'/'+short_name+'/RecStorageSend'+cl1+'(\"'+sub_farm+'\",'+str(i)+',)')\n partition.setRecvSenders(tasks)\n tasks = []\n for i in xrange(len(strm_slots)):\n slot = strm_slots[i]\n node = slot[:slot.find(':')]\n sub_farm = 'SF%02d'%(i,)\n short_name = sub_farm+'_HLT' # Keep this name to ensure storageMon is working!\n task = pn+'_'+node+'_'+short_name\n tasks.append(node+'/'+task+'/'+short_name+'/RecStorageRecv'+cl1+'(\"'+sub_farm+'\",'+str(i)+',)')\n partition.setStreamReceivers(tasks)\n cnt = 0\n tasks = []\n infra = []\n for j in recvNodes:\n for itm in self.rcvInfra.data:\n i,cl=itm.split('/')\n infra.append(j+'/'+pn+'_'+j+'_'+i+'/'+i+'/'+i+'/'+cl+opt+'(\"'+str(cnt)+'\",)')\n cnt = cnt + 1\n partition.setRecvInfrastructure(infra)\n partition.setRecvReceivers(tasks)\n cnt = 0\n tasks = []\n infra = []\n for j in strmNodes:\n for itm in self.strInfra.data:\n i,cl=itm.split('/')\n infra.append(j+'/'+pn+'_'+j+'_'+i+'/'+i+'/'+i+'/'+cl+opt+'(\"'+str(cnt)+'\",)')\n cnt = cnt + 1\n partition.setStreamInfrastructure(infra)\n partition.setStreamSenders(tasks)\n if partition.saveTasks():\n tasks = partition.collectTasks(tasks={},with_data_sources=0)\n return tasks\n return None",
"def _generate_config(self, type, org, node):\n args = {}\n if type == \"peer\":\n args.update({\"peer_id\": \"{}.{}\".format(node, org)})\n args.update({\"peer_address\": \"{}.{}:{}\".format(node, org, 7051)})\n args.update(\n {\"peer_gossip_externalEndpoint\": \"{}.{}:{}\".format(node, org, 7051)})\n args.update(\n {\"peer_chaincodeAddress\": \"{}.{}:{}\".format(node, org, 7052)})\n args.update({\"peer_tls_enabled\": True})\n args.update({\"peer_localMspId\": \"{}MSP\".format(org.capitalize())})\n\n a = NodeConfig(org)\n a.peer(node, **args)\n else:\n args.update({\"General_ListenPort\": 7050})\n args.update(\n {\"General_LocalMSPID\": \"{}OrdererMSP\".format(org.capitalize())})\n args.update({\"General_TLS_Enabled\": True})\n args.update({\"General_BootstrapFile\": \"genesis.block\"})\n\n a = NodeConfig(org)\n a.orderer(node, **args)",
"def build_net(graph, training=True, validation=False):\n\n with graph.as_default(): \n x = tf.placeholder(tf.float32, [None] + resize_shape, 'x')\n # TODO: use len(labels_map)\n y = tf.placeholder(tf.int32, [None, 17], 'y')\n phase_train = tf.placeholder(tf.bool, name='phase_train')\n keep_prob = tf.placeholder(tf.float32, name='keep_prob')\n keep_prob_fc1 = tf.placeholder(tf.float32, name='keep_prob_fc1')\n learning_rate = tf.placeholder(tf.float32, name='learning_rate')\n\n # Create Input Pipeline for Train, Validation and Test Sets\n if training:\n batch, batch_labels, batch_image_paths = dsutils.create_input_pipeline(\n image_paths=image_paths[:index_split_train_val],\n labels=labels_onehot_list[:index_split_train_val],\n batch_size=batch_size,\n n_epochs=n_epochs,\n shape=input_shape,\n crop_factor=resize_factor,\n training=training,\n randomize=True)\n elif validation:\n batch, batch_labels, batch_image_paths = dsutils.create_input_pipeline(\n image_paths=image_paths[index_split_train_val:],\n labels=labels_onehot_list[index_split_train_val:],\n batch_size=batch_size,\n # only one epoch for test output\n n_epochs=1,\n shape=input_shape,\n crop_factor=resize_factor,\n training=training) \n else:\n batch, batch_labels, batch_image_paths = dsutils.create_input_pipeline(\n image_paths=test_image_paths,\n labels=test_onehot_list,\n batch_size=batch_size,\n # only one epoch for test output\n n_epochs=1,\n shape=input_shape,\n crop_factor=resize_factor,\n training=training)\n\n Ws = []\n \n current_input = x\n\n for layer_i, n_output in enumerate(n_filters):\n with tf.variable_scope('layer{}'.format(layer_i)):\n # 2D Convolutional Layer with batch normalization and relu\n h, W = utils.conv2d(x=current_input,\n n_output=n_output,\n k_h=filter_sizes[layer_i],\n k_w=filter_sizes[layer_i])\n h = tf.layers.batch_normalization(h, training=phase_train)\n h = tf.nn.relu(h, 'relu' + str(layer_i))\n\n # Apply Max Pooling Every 2nd Layer\n if layer_i % 2 == 0:\n h = tf.nn.max_pool(value=h,\n ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1],\n padding='SAME')\n\n # Apply Dropout Every 2nd Layer\n if layer_i % 2 == 0:\n h = tf.nn.dropout(h, keep_prob)\n\n Ws.append(W)\n current_input = h\n\n h = utils.linear(current_input, fc_size, name='fc_t')[0]\n h = tf.layers.batch_normalization(h, training=phase_train)\n h = tf.nn.relu(h, name='fc_t/relu')\n h = tf.nn.dropout(h, keep_prob_fc1)\n\n logits = utils.linear(h, len(labels_map), name='fc_t2')[0]\n h = tf.nn.sigmoid(logits, 'fc_t2')\n\n # must be the same type as logits\n y_float = tf.cast(y, tf.float32)\n\n cross_entropy = tf.nn.sigmoid_cross_entropy_with_logits(logits=logits,\n labels=y_float)\n loss = tf.reduce_mean(cross_entropy)\n\n if training:\n # update moving_mean and moving_variance so it will be available at inference time\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n with tf.control_dependencies(update_ops):\n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss)\n else:\n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss)\n \n saver = tf.train.Saver()\n init = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())\n return batch, batch_labels, batch_image_paths, init, x, y, phase_train, keep_prob, keep_prob_fc1, learning_rate, h, loss, optimizer, saver",
"def build(self):\n\n LOG.debug('-' * 80)\n LOG.debug(\"build\")\n LOG.debug('-' * 80)\n for b in self._bridges:\n bridge = b['bridge']\n # TODO(tomohiko) Need to something when not bridge['provided']?\n if bridge['provided']:\n LOG.info('Skipped building bridge=%r', bridge)\n\n for h in self._hosts:\n host = h['host']\n if host.get('tunnel_zone'):\n tz_data = host.get('tunnel_zone')\n tzs = self._api.get_tunnel_zones()\n\n # Ensure that TZ exists\n tz = [t for t in tzs if t.get_name() == tz_data['name']]\n if tz == []:\n if is_vxlan_enabled():\n tz = self._api.add_vxlan_tunnel_zone()\n else:\n tz = self._api.add_gre_tunnel_zone()\n tz.name(tz_data['name'])\n tz.create()\n else:\n tz = tz[0]\n\n # Ensure that the host is in the TZ\n tz_hosts = tz.get_hosts()\n tz_host = filter(\n lambda x: x.get_host_id() == host['mn_host_id'],\n tz_hosts)\n if tz_host == []:\n tz_host = tz.add_tunnel_zone_host()\n tz_host.ip_address(tz_data['ip_addr'])\n tz_host.host_id(host['mn_host_id'])\n tz_host.create()\n\n\n if host['provided'] == True:\n LOG.info('Skipped building host=%r', host)\n else:\n #TODO(tomoe): when we support provisioning Midolman host with\n # this tool.\n pass\n interfaces = host['interfaces']\n\n futures = []\n for i in interfaces:\n iface = Interface(i['interface'], host)\n self._interfaces[(host['id'], i['interface']['id'])] = iface\n f = iface.create()\n futures.append(f)\n\n wait_on_futures(futures)\n\n LOG.debug('-' * 80)\n LOG.debug(\"end build\")\n LOG.debug('-' * 80)",
"def _makeConfig(self, store):\n config = PortConfiguration()\n config.parent = CommandStub(store, \"port\")\n return config",
"def definitions(self) -> Dict[str, GraphOutput]:\n # Get the right output dictionary.\n d = self._manual_outputs if len(self._manual_outputs) > 0 else self._default_outputs\n\n # Extract port definitions (Neural Types) and return an immutable dictionary,\n # so the user won't be able to modify its content by an accident!\n return frozendict({k: v.ntype for k, v in d.items()})",
"def generate_nnie_config(nnie_cfg, config, nnie_out_path='./config.json', tensor_type='float'):\n u8_start = False if tensor_type == 'float' else False\n default_config = {\n \"default_net_type_token\": \"nnie\",\n \"rand_input\": False,\n \"data_num\": 100,\n \"input_path_map\": {\n \"data\": \"./image_bins\",\n },\n \"nnie\": {\n \"max_batch\": 1,\n \"output_names\": [],\n \"mapper_version\": 11,\n \"u8_start\": u8_start,\n \"device\": \"gpu\",\n \"verbose\": False,\n \"image_path_list\": [\"./image_list.txt\"],\n \"mean\": [128, 128, 128],\n \"std\": [1, 1, 1]\n }\n }\n image_path_list = nnie_cfg['image_path_list']\n assert os.path.exists(image_path_list)\n with open(image_path_list, 'r') as f:\n image_list = [item.strip() for item in f.readlines()]\n\n mean = config.to_kestrel.get('pixel_means', [123.675, 116.28, 103.53])\n std = config.to_kestrel.get('pixel_stds', [58.395, 57.12, 57.375])\n resize_hw = config.to_kestrel.get('resize_hw', (224, 224))\n resize_hw = tuple(resize_hw)\n data_num = len(image_list)\n image_bin_path = generate_image_bins(image_list, mean, std, resize_hw)\n default_config['data_num'] = data_num\n default_config['input_path_map']['data'] = image_bin_path\n default_config['nnie']['max_batch'] = nnie_cfg.get('max_batch', 1)\n default_config['nnie']['mapper_version'] = nnie_cfg.get('mapper_version', 11)\n default_config['nnie']['image_path_list'] = [image_path_list]\n default_config['nnie']['mean'] = [128] * len(std)\n default_config['nnie']['std'] = [1] * len(std)\n with open(nnie_out_path, \"w\") as f:\n json.dump(default_config, f, indent=2)\n\n return nnie_out_path"
] | [
"0.6021851",
"0.5251869",
"0.51448715",
"0.5136205",
"0.5126871",
"0.5095791",
"0.50636524",
"0.49946162",
"0.49695787",
"0.49350056",
"0.49320048",
"0.49314785",
"0.4922587",
"0.48967493",
"0.48889312",
"0.4869051",
"0.48633268",
"0.4862456",
"0.4857697",
"0.484664",
"0.48465016",
"0.48330945",
"0.48329172",
"0.48281318",
"0.48279575",
"0.48007402",
"0.47873983",
"0.47758648",
"0.47714445",
"0.47679576"
] | 0.8338399 | 0 |
Compares each curve with the next to verify continuity. Note that this function treats curves as directed, thus two curves that start at the same point will return `False` when compared. | def assert_continuous(*curves: CubicBezierCurve) -> bool:
if not curves:
raise ValueError("CurveChecker.assert_continuous() cannot be called on an empty list")
previous_curve = curves[0]
for curve in curves[1:]:
if previous_curve.p1 != curve.p0:
return False
previous_curve = curve
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def assert_differentiable(*curves: CubicBezierCurve) -> bool:\n if not curves:\n raise ValueError(\"CurveChecker.assert_differentiable() cannot be called on an empty list\")\n\n if not assert_continuous(*curves):\n return False\n\n for curve0, curve1 in zip(curves, curves[1:]):\n if not assert_collinear(curve0.c1, curve1.p0, curve1.c0):\n return False\n return True",
"def is_on_curve(self):\n if self.infinity:\n return True\n left = self.y * self.y\n right = self.x * self.x * self.x + self.ec.a * self.x + self.ec.b\n\n return left == right",
"def has_converged(x, y, epsilon=EPSILON):\n for a, b in itertools.izip(x, y):\n if abs(a - b) > epsilon:\n return False\n return True",
"def has_converged(x, y, epsilon=EPSILON):\n for a, b in itertools.izip(x, y):\n if abs(a - b) > epsilon:\n return False\n return True",
"def curvesSimilar(t1, y1, t2, y2, tol):\n # Make synchornized version of t2,y2 called t2sync,y2sync.\n t2sync=[]\n y2sync=[]\n for timepoint1 in t1:\n (index, timepoint2)=getNearestTime(timepoint1, t2sync)\n t2sync.append(timepoint2)\n y2sync.append(y2[index])\n\n # Get R^2 value equivalent:\n normalizedError=[(y1[x]-y2sync[x])**2/y1[x]**2 for x in range(len(y1))]/len(y1)\n\n if normalizedError > tol:\n return False\n else: \n return True",
"def edges_is_closed_curve(edges):\n e_prev = first = edges[0]\n for e in edges[1:]:\n if e_prev[1] != e[0]:\n if e_prev[1] == first[0]:\n # new loop\n first = e\n else:\n return False\n e_prev = e\n if e_prev[1] != first[0]:\n return False\n return True",
"def comparison_test():\n for pose in SE2.interesting_points():\n se2 = se2_from_SE2(pose)\n SE2a = SE2_from_se2_slow(se2)\n SE2b = SE2_from_se2(se2)\n # printm('pose', pose, 'se2', se2)\n # printm('SE2a', SE2a, 'SE2b', SE2b)\n SE2.assert_close(SE2a, pose)\n # print('SE2a = pose Their distance is %f' % d)\n SE2.assert_close(SE2b, pose)\n # print('SE2b = pose Their distance is %f' % d)\n assert_allclose(SE2a, SE2b, atol=1e-8, err_msg=\"SE2a != SE2b\")\n assert_allclose(SE2a, pose, atol=1e-8, err_msg=\"SE2a != pose\")\n assert_allclose(SE2b, pose, atol=1e-8, err_msg=\"SE2b != pose\")",
"def is_converged(self,a,b):\n return np.array_equal(a,b)",
"def _isConsecutive(self, chord1, chord2):\n for voice1, note1 in enumerate(chord2.getNotes()):\n if note1 != None:\n for voice2, note2 in enumerate(chord2.getNotes()[voice1+1:]):\n if note2 != None:\n voice2 += voice1 + 1\n if note1.distance(note2) in [6, 7, 12]:\n if (chord1.getNote(voice1).distance(chord1.getNote(voice2)) % 12) in [0, 6, 7]: # Check if parallel\n return True\n elif chord1.getNote(voice1) < note1 and chord1.getNote(voice2) < note2: # Check if consecutive upward\n return True\n elif chord1.getNote(voice1) > note1 and chord1.getNote(voice2) > note2: # Check if consecutive downward\n return True\n\n return False",
"def done_comparator(self, readback: float, setpoint: float) -> bool:\n kwargs = {}\n if self.atol is not None:\n kwargs[\"atol\"] = self.atol\n if self.rtol is not None:\n kwargs[\"rtol\"] = self.rtol\n return np.isclose(readback, setpoint, **kwargs)",
"def Checker(a,b,n,x):\n if n==0:\n if abs(a[0]-b[0])>=x: #if the changes in eta from one time step to another is more than .05mm\n return True #return true to continue the loop\n else:\n return False #stop the loop (this only happens if all of the points had a change of less than .05mm)\n elif abs(a[n]-b[n])>=x: #this checks each of the points in the channel \n return True #if any have too big a change the loop continues\n else: #if that point in the channel has small enough change\n Checker(a,b,n-1) #check the next point in the channel",
"def test_circular_vs_linear():\n vac_sys = circular_or_linear_system(False)\n P_circ = [chamber.P for chamber in vac_sys.chambers()]\n P0_circ = [0.9999876873794514, 0.0012312620548606836]\n\n vac_sys = circular_or_linear_system(True)\n P_straight = [chamber.P for chamber in vac_sys.chambers()]\n P0_straight = [0.999975405344194, 0.0024594655806102796]\n assert all(isclose(P, P0) for P, P0 in zip(P_circ, P0_circ))\n assert all(isclose(P, P0) for P, P0 in zip(P_straight, P0_straight))\n\n assert isclose(P_circ[0], P_straight[0], abs_tol=.01)\n assert isclose(P_circ[1], 2 * P_straight[1], abs_tol=.01)",
"def _is_converged(self):\n if self._last_operating_point is None:\n return False\n\n # Tolerance for comparing operating points. If all states changes\n # within this tolerance in the Euclidean norm then we've converged.\n TOLERANCE = 1e-4\n for ii in range(self._horizon):\n last_x = self._last_operating_point[0][ii]\n current_x = self._current_operating_point[0][ii]\n\n if np.linalg.norm(last_x - current_x) > TOLERANCE:\n return False\n\n return True",
"def test_compare_different_expectations(self):\n\n pd_single = norm(0, 1)\n pd = []\n for i in range(0, 3):\n pd.append(pd_single)\n meas = [-1, 0, 1]\n meanCRIGN1, singleCRIGN1 = crign.crign(pd, meas)\n\n pd2 = []\n for i in range(0, 3):\n pd2.append(norm(i, 1))\n meas2 = [-1, 1, 3]\n\n meanCRIGN2, singleCRIGN2 = crign.crign(pd2, meas2)\n\n is_good = np.isclose(singleCRIGN1, singleCRIGN2).all()\n assert_true(is_good, msg=\"Relation of individual CRIGN values should return roughly the same value.\")",
"def checarPs(self,p1,p2):\n return abs(p1-p2) < 0.00001",
"def compare_curve(geometry_x, geometry_y):\n arct = CreateGeometryFromWkt(geometry_x)\n pgis = CreateGeometryFromWkt(geometry_y)\n\n intersection_length = Geometry.Length(Geometry.Intersection(arct, pgis))\n arct_length = Geometry.Length(arct)\n pgis_length = Geometry.Length(pgis)\n # result = compare_float(intersection_length, arct_length, pgis_length,EPOCH_CURVE)\n result = compare3float_relative(pgis_length, arct_length,\n intersection_length, EPOCH_CURVE_RELATIVE)\n return result",
"def are_symmetrically_related(self, point_a, point_b, tol=0.001):\n if np.allclose(self.operate(point_a), point_b, atol=tol):\n return True\n if np.allclose(self.operate(point_b), point_a, atol=tol):\n return True\n return False",
"def is_point_on_curve(a, b, p, x, y):\n assert isinstance(a, Bn)\n assert isinstance(b, Bn)\n assert isinstance(p, Bn) and p > 0\n assert (isinstance(x, Bn) and isinstance(y, Bn)) \\\n or (x == None and y == None)\n\n if x == None and y == None:\n return True\n\n lhs = (y * y) % p\n rhs = (x*x*x + a*x + b) % p\n on_curve = (lhs == rhs)\n\n return on_curve",
"def _same(p1,p2,prec=0.0001):\n #return num.all(num.equal(p1,p2))\n t1 = num.fabs(p1[0]-p2[0]) < prec\n t2 = num.fabs(p1[1]-p2[1]) < prec\n if t1 and t2:\n #print \"same\", p1,p2\n return True",
"def is_recurrent(self):\n G = self._get_recurrence_graph()\n # C = G.strongly_connected_components()\n first_component = next(nx.strongly_connected_components(G))\n abs_numbers = {abs(x) for x in first_component}\n # return sorted(list(set([abs(x) for x in C[0]]))) == \\\n # range(1, self.num_branches()+1)\n return abs_numbers == set(range(1, self.num_branches()+1))",
"def check_changes(yield_curve, forecast):\r\n df = pd.read_csv('data/diff_df.csv').set_index('DATE')\r\n if list(df.index)[-1] != list(yield_curve.index)[-1]:\r\n forecast = pd.read_csv('data/forecast.csv')\r\n yield_curve = df",
"def _chain_almost_equal(a,b, rtol=RTOL, atol=ATOL):\n for a_part, b_part in zip(a.parts, b.parts):\n for a_seg, b_seg in zip(a_part, b_part):\n if not np.allclose(a_seg, b_seg,\n rtol=RTOL, atol=ATOL):\n return False\n return True",
"def same_edge(self, other, precision=0):\n return self.id == other.id \\\n and self.start_node == other.start_node \\\n and self.end_node == other.end_node \\\n and abs(self.cost - other.cost) <= precision \\\n and abs(self.reverse_cost - other.reverse_cost) <= precision \\\n and self.reversed == other.reversed",
"def point_on_curve(self, P):\n x, y = modp(self.p, P.x, P.y)\n lhs = y ** 2\n rhs = x ** 3 + x * self.a + self.b\n return lhs == rhs",
"def all_close(goal, actual, tolerance):\n #all_equal = True\n if type(goal) is list:\n for index in range(len(goal)):\n if abs(actual[index] - goal[index]) > tolerance:\n return False\n\n elif type(goal) is geometry_msgs.msg.PoseStamped:\n return all_close(goal.pose, actual.pose, tolerance)\n\n elif type(goal) is geometry_msgs.msg.Pose:\n return all_close(pose_to_list(goal), pose_to_list(actual), tolerance)\n\n return True",
"def get_similar_lines(self, Coe1, Coe2):\n line1_victor = [Coe1[1], -Coe1[0]]\n line2_victor = [Coe2[1], -Coe2[0]]\n victor = line1_victor[1] * line2_victor[0] - line2_victor[1] * line1_victor[0]\n if 0 <= round(victor, 2) <= 0.2:\n return True\n else:\n return False",
"def part1b_1():\n xs = exampleInput\n backward = submission.computeBackward(simpleCRF, xs)\n for i in xrange(len(xs)):\n grader.requireIsEqual( 1.0, sum( backward[i].values() ) )",
"def all_close(goal, actual, tolerance):\n all_equal = True\n if type(goal) is list:\n for index in range(len(goal)):\n if abs(actual[index] - goal[index]) > tolerance:\n return False\n\n elif type(goal) is geometry_msgs.msg.PoseStamped:\n return all_close(goal.pose, actual.pose, tolerance)\n\n elif type(goal) is geometry_msgs.msg.Pose:\n return all_close(pose_to_list(goal), pose_to_list(actual), tolerance)\n\n return True",
"def all_close(goal, actual, tolerance):\n all_equal = True\n if type(goal) is list:\n for index in range(len(goal)):\n if abs(actual[index] - goal[index]) > tolerance:\n return False\n\n elif type(goal) is geometry_msgs.msg.PoseStamped:\n return all_close(goal.pose, actual.pose, tolerance)\n\n elif type(goal) is geometry_msgs.msg.Pose:\n return all_close(pose_to_list(goal), pose_to_list(actual), tolerance)\n\n return True",
"def all_close(goal, actual, tolerance):\n all_equal = True\n if type(goal) is list:\n for index in range(len(goal)):\n if abs(actual[index] - goal[index]) > tolerance:\n return False\n\n elif type(goal) is geometry_msgs.msg.PoseStamped:\n return all_close(goal.pose, actual.pose, tolerance)\n\n elif type(goal) is geometry_msgs.msg.Pose:\n return all_close(pose_to_list(goal), pose_to_list(actual), tolerance)\n\n return True"
] | [
"0.6867624",
"0.65761954",
"0.60859615",
"0.60859615",
"0.6069512",
"0.5920149",
"0.5899444",
"0.58113146",
"0.5743151",
"0.5739253",
"0.5737969",
"0.57277334",
"0.5719145",
"0.570918",
"0.5702754",
"0.5661689",
"0.5660275",
"0.5631335",
"0.5627203",
"0.5584733",
"0.55798703",
"0.5552405",
"0.55463606",
"0.5544422",
"0.5531419",
"0.55269027",
"0.5524913",
"0.5511427",
"0.5511427",
"0.5511427"
] | 0.67138547 | 1 |
Verifies that the adjacent slopes between points are within specified tolerance of one another. Note that assert_collinear assumes ordered points; three actually collinear points passed with the middle point as the first or last argument will return `False` | def assert_collinear(*points: Point, tolerance: float = 1e-2) -> bool:
if len(points) < 3:
raise ValueError("CurveChecker.assert_collinear() must be called with at least three points")
thetas = [np.arctan2(p0[1] - p1[1], p0[0] - p1[0]) for p0, p1 in zip(points, points[1:])]
for t0, t1 in zip(thetas, thetas[1:]):
if abs(t0 - t1) > tolerance:
return False
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def isCollinear(a,b,c):\r\n #return slope(a, b) == slope(b, c) == slope(c, a) #DOES NOT WORK\r\n #return (b[0] - a[0]) * (c[1] - a[1]) == (c[0] - a[0]) * (b[1] - a[1]) \r\n #return distance(a,b) + distance(b,c) == distance(a,c)\r\n x1 = a[0]\r\n y1 = a[1]\r\n x2 = b[0]\r\n y2 = b[1]\r\n x3 = c[0]\r\n y3 = c[1] \r\n if (x1*(y2 - y3)) + (x2*(y3 - y1)) + (x3*(y1-y2)) == 0: \r\n return True\r\n else:\r\n return False",
"def hasCollinearPoints(listOfPoints):\r\n for points in listOfPoints:\r\n if isCollinear(points[0], points[1], points[2]): #If any of the points are collinear\r\n return True\r\n else:\r\n pass\r\n return False #If none of the points are collinear\r",
"def test_endpoint_slope(b,c,d,x_n_minus_1,x_n,expected_slope):\n\tactual_slope = b + 2*c*(x_n-x_n_minus_1) + 3*d*(x_n-x_n_minus_1)**2\n\tresult = abs(actual_slope-expected_slope)<0.001\n\treturn(result)",
"def collinear(a:tuple, b:tuple, c:tuple)->bool:\n return ((b[1] - c[1]) * (a[0] - b[0])) == ((a[1] - b[1]) * (b[0] - c[0]))",
"def collinear(cls, *vectors, e=10e-10):\n l = len(vectors)\n if l == 2:\n v1 = vectors[0]\n v2 = vectors[1]\n return abs(v1.x * v2.y - v1.y - v2.x) < e\n else:\n for i in range(l):\n for j in range(i + 1, l):\n if not cls.collinear(vectors[i], vectors[j]):\n return False\n return True",
"def test_b_coefficients(x1,x2,x3,y1,y2,y3,CCoefficients,DCoefficients,expected_slope):\n\tB = b_coefficients(x1,x2,x3,y1,y2,y3,CCoefficients,DCoefficients)\n\tresult = abs(B[0]-expected_slope)< 0.001\n\treturn(result)\n\tassert B[0]==expected_slope, \"First b coefficient (%f) does not equal initial slope (%f).\" (B[0],expected_slope)",
"def test_epipolar(dxy_0, ep_vec, dxy, tol):\n delta=np.abs(np.dot((dxy-dxy_0), [ep_vec[1], -ep_vec[0]]))\n disp_mag=np.sqrt((dxy[:,0]-dxy_0[0])**2 +(dxy[:,1]-dxy_0[1])**2)\n good=(delta < tol) | (delta < 0.02 * disp_mag )\n return good, delta",
"def assert_data_with_normal_vector_has_slope(nvect, expected_slope):\n neighborhood, pc = create_point_cloud_in_plane_and_neighborhood(nvect)\n extractor = EigenValueVectorizeFeatureExtractor()\n slope = extractor.extract(pc, neighborhood, None, None, None)[6]\n np.testing.assert_allclose(slope, expected_slope, atol=1e-6)",
"def assert_differentiable(*curves: CubicBezierCurve) -> bool:\n if not curves:\n raise ValueError(\"CurveChecker.assert_differentiable() cannot be called on an empty list\")\n\n if not assert_continuous(*curves):\n return False\n\n for curve0, curve1 in zip(curves, curves[1:]):\n if not assert_collinear(curve0.c1, curve1.p0, curve1.c0):\n return False\n return True",
"def checkStraightLine(coordinates: List[List[int]]) -> bool:\n\t# initializing our comparison slope value\n\tnum = coordinates[1][1] - coordinates[0][1]\n\tden = coordinates[1][0] - coordinates[0][0]\n\tif den == 0:\n\t\tslope = math.inf\n\telse:\n\t\tslope = num / den\n\n\t# checking the initial slope against all other slopes\n\tslope_check = 0\n\tfor i in range(2, len(coordinates)):\n\t\tnum = coordinates[i][1] - coordinates[i-1][1]\n\t\tden = coordinates[i][0] - coordinates[i-1][0]\n\t\tif den == 0:\n\t\t\tslope_check = math.inf\n\t\telse:\n\t\t\tslope_check = num/den\n\n\t\tif slope_check != slope:\n\t\t\treturn False\n\n\treturn True",
"def collinear(a1, b1, a2, b2, a3, b3):\n a = x1 * (b2 - b3) + a2 * (b3 - b1) + a3 * (b1 - b2)\n \n if (a == 0):\n print \"Yes\"\n else:\n print \"No\"",
"def test_positive_slope(self):\n slopes = []\n for i in range(100):\n neighborhood, pc = create_point_cloud_in_plane_and_neighborhood()\n slopes += list(EigenValueVectorizeFeatureExtractor().extract(pc, neighborhood, None, None, None)[6])\n np.testing.assert_array_less(np.zeros_like(slopes), slopes)",
"def linear_constraint(u, Lin_lhs, Lin_rhs, tol = 0.05):\n return Lin_lhs.dot(u) <= Lin_rhs",
"def approx_eq(x, y, tolerance=1e-15):\n return abs(x - y) < tolerance",
"def _check_convergence(current_position,\n next_position,\n current_objective,\n next_objective,\n next_gradient,\n grad_tolerance,\n f_relative_tolerance,\n x_tolerance):\n grad_converged = _check_within_tolerance(next_gradient, grad_tolerance)\n x_converged = _check_within_tolerance(next_position - current_position,\n x_tolerance)\n f_converged = _check_within_tolerance(\n next_objective - current_objective,\n f_relative_tolerance * current_objective)\n return grad_converged | x_converged | f_converged",
"def validate_points(a, b):\r\n\tdiff_y = b[0] - a[0]\r\n\tdiff_x = b[1] - a[1]\r\n\r\n\treturn (diff_y == 0 and diff_x != 0) or (diff_x == 0 and diff_y != 0) or abs(diff_x) == abs(diff_y)",
"def approx_eq(x, y, tolerance = 0.000001):\n\treturn abs(x - y) < tolerance",
"def check_correctness(S, bc_start='not-a-knot', bc_end='not-a-knot',\n tol=1e-14):\n x = S.x\n c = S.c\n dx = np.diff(x)\n dx = dx.reshape([dx.shape[0]] + [1] * (c.ndim - 2))\n dxi = dx[:-1]\n\n # Check C2 continuity.\n assert_allclose(c[3, 1:], c[0, :-1] * dxi**3 + c[1, :-1] * dxi**2 +\n c[2, :-1] * dxi + c[3, :-1], rtol=tol, atol=tol)\n assert_allclose(c[2, 1:], 3 * c[0, :-1] * dxi**2 +\n 2 * c[1, :-1] * dxi + c[2, :-1], rtol=tol, atol=tol)\n assert_allclose(c[1, 1:], 3 * c[0, :-1] * dxi + c[1, :-1],\n rtol=tol, atol=tol)\n\n # Check that we found a parabola, the third derivative is 0.\n if x.size == 3 and bc_start == 'not-a-knot' and bc_end == 'not-a-knot':\n assert_allclose(c[0], 0, rtol=tol, atol=tol)\n return\n\n # Check periodic boundary conditions.\n if bc_start == 'periodic':\n assert_allclose(S(x[0], 0), S(x[-1], 0), rtol=tol, atol=tol)\n assert_allclose(S(x[0], 1), S(x[-1], 1), rtol=tol, atol=tol)\n assert_allclose(S(x[0], 2), S(x[-1], 2), rtol=tol, atol=tol)\n return\n\n # Check other boundary conditions.\n if bc_start == 'not-a-knot':\n if x.size == 2:\n slope = (S(x[1]) - S(x[0])) / dx[0]\n assert_allclose(S(x[0], 1), slope, rtol=tol, atol=tol)\n else:\n assert_allclose(c[0, 0], c[0, 1], rtol=tol, atol=tol)\n elif bc_start == 'clamped':\n assert_allclose(S(x[0], 1), 0, rtol=tol, atol=tol)\n elif bc_start == 'natural':\n assert_allclose(S(x[0], 2), 0, rtol=tol, atol=tol)\n else:\n order, value = bc_start\n assert_allclose(S(x[0], order), value, rtol=tol, atol=tol)\n\n if bc_end == 'not-a-knot':\n if x.size == 2:\n slope = (S(x[1]) - S(x[0])) / dx[0]\n assert_allclose(S(x[1], 1), slope, rtol=tol, atol=tol)\n else:\n assert_allclose(c[0, -1], c[0, -2], rtol=tol, atol=tol)\n elif bc_end == 'clamped':\n assert_allclose(S(x[-1], 1), 0, rtol=tol, atol=tol)\n elif bc_end == 'natural':\n assert_allclose(S(x[-1], 2), 0, rtol=2*tol, atol=2*tol)\n else:\n order, value = bc_end\n assert_allclose(S(x[-1], order), value, rtol=tol, atol=tol)",
"def slope(a, b):\r\n if a[0] == b[0]: #If the x values are both 0\r\n return 0 #Technically, undefined, but doesn't matter for finding collinearity\r\n return (a[1] - b[1]) / (a[0] - b[0])",
"def is_coplanar(points, tol=0.01):\n tol2 = tol ** 2\n if len(points) == 4:\n v01 = subtract_vectors(points[1], points[0])\n v02 = subtract_vectors(points[2], points[0])\n v23 = subtract_vectors(points[3], points[0])\n res = dot_vectors(v02, cross_vectors(v01, v23))\n return res**2 < tol2\n # len(points) > 4\n # compare length of cross product vector to tolerance\n u = subtract_vectors(points[1], points[0])\n v = subtract_vectors(points[2], points[1])\n w = cross_vectors(u, v)\n for i in range(1, len(points) - 2):\n u = v\n v = subtract_vectors(points[i + 2], points[i + 1])\n wuv = cross_vectors(w, cross_vectors(u, v))\n if wuv[0]**2 > tol2 or wuv[1]**2 > tol2 or wuv[2]**2 > tol2:\n return False\n return True",
"def contains(self, points, abs_tol=ABS_TOL):\n test = self.A.dot(points) - self.b[:, np.newaxis] < abs_tol\n return np.all(test, axis=0)",
"def _check_approx_fixed_point(V_current, V_previous, tol):\n\n # Compute the sup norm between `V_current` and `V_previous`\n sup_norm = np.max(np.abs(V_current - V_previous))\n\n # Algorithm termination condition\n fp = sup_norm <= tol\n\n return fp, sup_norm",
"def _allclose(x, y, rtol=1e-7, atol=1e-14):\n for a, b in zip(x, y):\n if np.abs(a - b) > (atol + rtol * np.abs(b)):\n return False\n return True",
"def _raise_assert_on_np_is_close_all(self, np0, np1):\r\n\r\n return self.assertTrue(np.isclose(np0, np1).all())",
"def test_lfc_and_el_below_lcl():\n dewpoint = [264.5351, 261.13443, 259.0122, 252.30063, 248.58017, 242.66582] * units.kelvin\n temperature = [273.09723, 268.40173, 263.56207, 260.257, 256.63538,\n 252.91345] * units.kelvin\n pressure = [1017.16, 950, 900, 850, 800, 750] * units.hPa\n el_pressure, el_temperature = el(pressure, temperature, dewpoint)\n lfc_pressure, lfc_temperature = lfc(pressure, temperature, dewpoint)\n assert_nan(lfc_pressure, pressure.units)\n assert_nan(lfc_temperature, temperature.units)\n assert_nan(el_pressure, pressure.units)\n assert_nan(el_temperature, temperature.units)",
"def within_tolerance(x, y, tolerance): \r\n return abs(x) <= tolerance and abs(y) <= tolerance",
"def get_overland_vector(catchpoints, closest, tol = 0.1, min_slope = 0.00001):\n\n length = get_distance_vector(catchpoints, closest)\n slope = (catchpoints[:,2] - closest[:,2]) / length / 100000\n\n for l, s in zip(length, slope):\n if l < tol: l, s = tol, min_slope\n\n return length / 2., slope",
"def test_xy(self):\n x = np.array([[1,3], [2,8], [1,3]])\n y = np.array([1,1,-1])\n lro = LogisticRegressionOptimiser(x,y)\n expected = np.array([[1,3], [2,8], [-1,-3]])\n for i in 0,1,2:\n for j in 0,1:\n self.assertEqual(lro.xy[i][j], expected[i][j])",
"def positive_slope(line:tuple)->bool:\n return line[0][1] < line[1][1] == line[0][0] < line[1][0]",
"def test_linear():\n import nose.tools as nt\n A = -0.11; B = -0.13; g = 9.81; m = 50.; T = 10.; dt = 0.01;\n Cd = 1.2; rho = 1.0; A = 0.5;\n a = Cd*rho*A/(2.*m)\n def exact(t):\n return A*t+B\n\n def src(t):\n return m*g + m*a*abs(exact(t-dt/2.))*exact(t+dt/2.) + m*A\n \n v, t = solver(T, dt, B, Cd, rho, A, m, Source=src)\n ve = exact(t)\n diff = abs(ve - v)\n nt.assert_almost_equal(diff.max(), 0, delta=1e-12)"
] | [
"0.70717025",
"0.6318565",
"0.6101531",
"0.60696363",
"0.605181",
"0.59607244",
"0.5879398",
"0.5877427",
"0.5806856",
"0.57670236",
"0.5622218",
"0.5608094",
"0.5563991",
"0.55154955",
"0.5489862",
"0.54876494",
"0.54695606",
"0.5420858",
"0.5409832",
"0.5381098",
"0.53751147",
"0.53664863",
"0.53404224",
"0.5323383",
"0.53217673",
"0.5308804",
"0.5283994",
"0.528149",
"0.52712834",
"0.52668166"
] | 0.80137265 | 0 |
Verifies differentiability of curves by checking collinearity of adjacent curves' control points | def assert_differentiable(*curves: CubicBezierCurve) -> bool:
if not curves:
raise ValueError("CurveChecker.assert_differentiable() cannot be called on an empty list")
if not assert_continuous(*curves):
return False
for curve0, curve1 in zip(curves, curves[1:]):
if not assert_collinear(curve0.c1, curve1.p0, curve1.c0):
return False
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def isCollinear(a,b,c):\r\n #return slope(a, b) == slope(b, c) == slope(c, a) #DOES NOT WORK\r\n #return (b[0] - a[0]) * (c[1] - a[1]) == (c[0] - a[0]) * (b[1] - a[1]) \r\n #return distance(a,b) + distance(b,c) == distance(a,c)\r\n x1 = a[0]\r\n y1 = a[1]\r\n x2 = b[0]\r\n y2 = b[1]\r\n x3 = c[0]\r\n y3 = c[1] \r\n if (x1*(y2 - y3)) + (x2*(y3 - y1)) + (x3*(y1-y2)) == 0: \r\n return True\r\n else:\r\n return False",
"def assert_collinear(*points: Point, tolerance: float = 1e-2) -> bool:\n if len(points) < 3:\n raise ValueError(\"CurveChecker.assert_collinear() must be called with at least three points\")\n\n thetas = [np.arctan2(p0[1] - p1[1], p0[0] - p1[0]) for p0, p1 in zip(points, points[1:])]\n for t0, t1 in zip(thetas, thetas[1:]):\n if abs(t0 - t1) > tolerance:\n return False\n\n return True",
"def hasCollinearPoints(listOfPoints):\r\n for points in listOfPoints:\r\n if isCollinear(points[0], points[1], points[2]): #If any of the points are collinear\r\n return True\r\n else:\r\n pass\r\n return False #If none of the points are collinear\r",
"def collinear(a1, b1, a2, b2, a3, b3):\n a = x1 * (b2 - b3) + a2 * (b3 - b1) + a3 * (b1 - b2)\n \n if (a == 0):\n print \"Yes\"\n else:\n print \"No\"",
"def test_coherence_regularized():\r\n for method in methods:\r\n f, c = tsa.coherence_regularized(tseries, 0.05, 1000,\r\n csd_method=method)\r\n npt.assert_array_almost_equal(c[0, 1], c[1, 0])",
"def collinear(a:tuple, b:tuple, c:tuple)->bool:\n return ((b[1] - c[1]) * (a[0] - b[0])) == ((a[1] - b[1]) * (b[0] - c[0]))",
"def checkCollinearity(x):\n C_mat = x.corr()\n fig = plt.figure(figsize = (15,15))\n sb.heatmap(C_mat, vmax = .8, square = True)\n plt.show()",
"def test_coherence():\r\n\r\n for method in methods:\r\n f, c = tsa.coherence(tseries, csd_method=method)\r\n npt.assert_array_almost_equal(c[0, 1], c[1, 0])\r\n npt.assert_array_almost_equal(c[0, 0], np.ones(f.shape))",
"def test_closeness_centrality_after_element_perturbation_isolating():\n F = FaultDiagnosis(\"tests/TOY_graph_nofaultresistant.csv\")\n F.simulate_element_perturbation([\"1\"])\n\n clo_cen_after_element_perturbation = {\n '2': 0,\n '3': 0,\n '4': 0.058823529411764705,\n '5': 0.058823529411764705,\n '6': 0.18823529411764706,\n '7': 0.11764705882352941,\n '8': 0.11764705882352941,\n '9': 0.15126050420168066,\n '10': 0.12538699690402477,\n '11': 0.1660899653979239,\n '12': 0.1859114015976761,\n '13': 0.16020025031289112,\n '14': 0.1859114015976761,\n '15': 0,\n '16': 0.1711229946524064,\n '17': 0.12981744421906694,\n '18': 0.17346938775510204,\n '19': 0.22145328719723184\n }\n\n np.testing.assert_array_almost_equal(\n np.asarray(sorted(clo_cen_after_element_perturbation.values())),\n np.asarray(sorted(F.G.closeness_centrality.values())),\n err_msg=\"FINAL CLOSENESS CENTRALITY failure: perturbation of element 1\")",
"def test_coherence_linear_dependence():\r\n t = np.linspace(0, 16 * np.pi, 2 ** 14)\r\n x = np.sin(t) + np.sin(2 * t) + np.sin(3 * t) + \\\r\n 0.1 * np.random.rand(t.shape[-1])\r\n N = x.shape[-1]\r\n\r\n alpha = 10\r\n m = 3\r\n noise = 0.1 * np.random.randn(t.shape[-1])\r\n y = alpha * np.roll(x, m) + noise\r\n\r\n f_noise = fftpack.fft(noise)[0:N / 2]\r\n f_x = fftpack.fft(x)[0:N / 2]\r\n\r\n c_t = (1 / (1 + (f_noise / (f_x * (alpha ** 2)))))\r\n\r\n method = {\"this_method\": 'welch',\r\n \"NFFT\": 2048,\r\n \"Fs\": 2 * np.pi}\r\n\r\n f, c = tsa.coherence(np.vstack([x, y]), csd_method=method)\r\n c_t = np.abs(signaltools.resample(c_t, c.shape[-1]))\r\n\r\n npt.assert_array_almost_equal(c[0, 1], c_t, 2)",
"def test_cov_changebasis(self):\n cov_xp = np.array([[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15]])\n\n cov_symmetric = np.array([[0, 2, 1, 3], [8, 10, 9, 11], [4, 6, 5, 7], [12, 14, 13, 15]])\n\n assert np.all(symplectic.xxpp_to_xpxp(cov_xp) == cov_symmetric)\n assert np.all(symplectic.xpxp_to_xxpp(cov_symmetric) == cov_xp)",
"def check_correctness(S, bc_start='not-a-knot', bc_end='not-a-knot',\n tol=1e-14):\n x = S.x\n c = S.c\n dx = np.diff(x)\n dx = dx.reshape([dx.shape[0]] + [1] * (c.ndim - 2))\n dxi = dx[:-1]\n\n # Check C2 continuity.\n assert_allclose(c[3, 1:], c[0, :-1] * dxi**3 + c[1, :-1] * dxi**2 +\n c[2, :-1] * dxi + c[3, :-1], rtol=tol, atol=tol)\n assert_allclose(c[2, 1:], 3 * c[0, :-1] * dxi**2 +\n 2 * c[1, :-1] * dxi + c[2, :-1], rtol=tol, atol=tol)\n assert_allclose(c[1, 1:], 3 * c[0, :-1] * dxi + c[1, :-1],\n rtol=tol, atol=tol)\n\n # Check that we found a parabola, the third derivative is 0.\n if x.size == 3 and bc_start == 'not-a-knot' and bc_end == 'not-a-knot':\n assert_allclose(c[0], 0, rtol=tol, atol=tol)\n return\n\n # Check periodic boundary conditions.\n if bc_start == 'periodic':\n assert_allclose(S(x[0], 0), S(x[-1], 0), rtol=tol, atol=tol)\n assert_allclose(S(x[0], 1), S(x[-1], 1), rtol=tol, atol=tol)\n assert_allclose(S(x[0], 2), S(x[-1], 2), rtol=tol, atol=tol)\n return\n\n # Check other boundary conditions.\n if bc_start == 'not-a-knot':\n if x.size == 2:\n slope = (S(x[1]) - S(x[0])) / dx[0]\n assert_allclose(S(x[0], 1), slope, rtol=tol, atol=tol)\n else:\n assert_allclose(c[0, 0], c[0, 1], rtol=tol, atol=tol)\n elif bc_start == 'clamped':\n assert_allclose(S(x[0], 1), 0, rtol=tol, atol=tol)\n elif bc_start == 'natural':\n assert_allclose(S(x[0], 2), 0, rtol=tol, atol=tol)\n else:\n order, value = bc_start\n assert_allclose(S(x[0], order), value, rtol=tol, atol=tol)\n\n if bc_end == 'not-a-knot':\n if x.size == 2:\n slope = (S(x[1]) - S(x[0])) / dx[0]\n assert_allclose(S(x[1], 1), slope, rtol=tol, atol=tol)\n else:\n assert_allclose(c[0, -1], c[0, -2], rtol=tol, atol=tol)\n elif bc_end == 'clamped':\n assert_allclose(S(x[-1], 1), 0, rtol=tol, atol=tol)\n elif bc_end == 'natural':\n assert_allclose(S(x[-1], 2), 0, rtol=2*tol, atol=2*tol)\n else:\n order, value = bc_end\n assert_allclose(S(x[-1], order), value, rtol=tol, atol=tol)",
"def assert_continuous(*curves: CubicBezierCurve) -> bool:\n if not curves:\n raise ValueError(\"CurveChecker.assert_continuous() cannot be called on an empty list\")\n\n previous_curve = curves[0]\n for curve in curves[1:]:\n if previous_curve.p1 != curve.p0:\n return False\n previous_curve = curve\n return True",
"def test_closeness_centrality_after_single_area_perturbation():\n F = FaultDiagnosis(\"tests/TOY_graph.csv\")\n F.simulate_area_perturbation(['area1'])\n\n clo_cen_after_single_area_perturbation = {\n '2': 0,\n '3': 0,\n '4': 0.058823529411764705,\n '5': 0.058823529411764705,\n '6': 0.18823529411764706,\n '7': 0.11764705882352941,\n '8': 0.11764705882352941,\n '9': 0.15126050420168066,\n '10': 0.12538699690402477,\n '11': 0.1660899653979239,\n '12': 0.1859114015976761,\n '13': 0.16020025031289112,\n '14': 0.1859114015976761,\n '15': 0,\n '16': 0.1711229946524064,\n '17': 0.12981744421906694,\n '18': 0.17346938775510204,\n '19': 0.22145328719723184\n }\n\n np.testing.assert_array_almost_equal(\n np.asarray(sorted(clo_cen_after_single_area_perturbation.values())),\n np.asarray(sorted(F.G.closeness_centrality.values())),\n err_msg=\"FINAL CLOSENESS CENTRALITY failure: perturbation in area 1\")",
"def test_closeness_centrality_after_multi_area_perturbation():\n F = FaultDiagnosis(\"tests/TOY_graph.csv\")\n F.simulate_area_perturbation(['area1', 'area2', 'area3'])\n\n clo_cen_after_multi_area_perturbation = {\n '2': 0,\n '3': 0,\n '4': 0.16666666666666666,\n '5': 0.16666666666666666,\n '6': 0.5333333333333333,\n '7': 0.3333333333333333,\n '8': 0.3333333333333333\n }\n\n np.testing.assert_array_almost_equal(\n np.asarray(sorted(clo_cen_after_multi_area_perturbation.values())),\n np.asarray(sorted(F.G.closeness_centrality.values())),\n err_msg=\n \"FINAL CLOSENESS CENTRALITY failure: perturbation in areas 1, 2, 3\")",
"def main():\n df = pd.read_csv('data/ch5_q8_simulation.csv')\n\n # Part b\n plt.figure()\n plt.scatter(df['x'], df['y'])\n plt.xlabel('x')\n plt.ylabel('y')\n plt.title('Scatterplot y vs. x')\n plt.savefig('plots/8a.png')\n\n # Part c\n response_var = 'y'\n pred_vars_lin = ['x']\n pred_vars_quad = ['x', 'x2']\n pred_vars_cub = ['x', 'x2', 'x3']\n pred_vars_quar = ['x', 'x2', 'x3', 'x4']\n\n poly_terms = pd.DataFrame({'x2': np.power(df['x'], 2),\n 'x3': np.power(df['x'], 3),\n 'x4': np.power(df['x'], 4)})\n df = pd.concat([df, poly_terms], axis=1)\n\n CV_error_lin = loocv(df, response_var, pred_vars_lin)\n CV_error_quad = loocv(df, response_var, pred_vars_quad)\n CV_error_cub = loocv(df, response_var, pred_vars_cub)\n CV_error_quar = loocv(df, response_var, pred_vars_quar)\n\n print('Part c')\n print('CV error (linear) = {:.3f}'.format(CV_error_lin))\n print('CV error (quadratic) = {:.3f}'.format(CV_error_quad))\n print('CV error (cubic) = {:.3f}'.format(CV_error_cub))\n print('CV error (quartic) = {:.3f}'.format(CV_error_quar))\n\n # Part d\n np.random.seed(801)\n y = np.random.randn(100)\n x = np.random.randn(100)\n y = x - 2 * np.power(x, 2) + np.random.randn(100)\n\n df = pd.DataFrame({'x': x,\n 'x2': np.power(x, 2),\n 'x3': np.power(x, 3),\n 'x4': np.power(x, 4),\n 'y': y})\n\n CV_error_lin = loocv(df, response_var, pred_vars_lin)\n CV_error_quad = loocv(df, response_var, pred_vars_quad)\n CV_error_cub = loocv(df, response_var, pred_vars_cub)\n CV_error_quar = loocv(df, response_var, pred_vars_quar)\n\n print('Part d')\n print('CV error (linear) = {:.3f}'.format(CV_error_lin))\n print('CV error (quadratic) = {:.3f}'.format(CV_error_quad))\n print('CV error (cubic) = {:.3f}'.format(CV_error_cub))\n print('CV error (quartic) = {:.3f}'.format(CV_error_quar))\n\n # Part f\n model = sm.OLS(df.loc[:, response_var], df.loc[:, pred_vars_quar]).fit()\n print(model.summary())",
"def test_closeness_centrality_after_element_perturbation():\n F = FaultDiagnosis(\"tests/TOY_graph.csv\")\n F.simulate_element_perturbation([\"1\"])\n\n clo_cen_after_element_perturbation = {\n '2': 0,\n '3': 0,\n '4': 0.058823529411764705,\n '5': 0.058823529411764705,\n '6': 0.18823529411764706,\n '7': 0.11764705882352941,\n '8': 0.11764705882352941,\n '9': 0.15126050420168066,\n '10': 0.12538699690402477,\n '11': 0.1660899653979239,\n '12': 0.1859114015976761,\n '13': 0.16020025031289112,\n '14': 0.1859114015976761,\n '15': 0,\n '16': 0.1711229946524064,\n '17': 0.12981744421906694,\n '18': 0.17346938775510204,\n '19': 0.22145328719723184\n }\n\n np.testing.assert_array_almost_equal(\n np.asarray(sorted(clo_cen_after_element_perturbation.values())),\n np.asarray(sorted(F.G.closeness_centrality.values())),\n err_msg=\"FINAL CLOSENESS CENTRALITY failure: perturbation of element 1\")",
"def test_closeness_centrality_after_element_perturbation():\n F = FaultDiagnosis(\"tests/TOY_graph.csv\")\n F.simulate_element_perturbation([\"1\"])\n\n clo_cen_after_element_perturbation = {\n '2': 0,\n '3': 0,\n '4': 0.058823529411764705,\n '5': 0.058823529411764705,\n '6': 0.18823529411764706,\n '7': 0.11764705882352941,\n '8': 0.11764705882352941,\n '9': 0.15126050420168066,\n '10': 0.12538699690402477,\n '11': 0.1660899653979239,\n '12': 0.1859114015976761,\n '13': 0.16020025031289112,\n '14': 0.1859114015976761,\n '15': 0,\n '16': 0.1711229946524064,\n '17': 0.12981744421906694,\n '18': 0.17346938775510204,\n '19': 0.22145328719723184\n }\n\n np.testing.assert_array_almost_equal(\n np.asarray(sorted(clo_cen_after_element_perturbation.values())),\n np.asarray(sorted(F.G.closeness_centrality.values())),\n err_msg=\"FINAL CLOSENESS CENTRALITY failure: perturbation of element 1\")",
"def test_control_cs_valid():\n sdf_graph = cs.convert_to_sdf(example.control.gauss_seidel(1., 5., 1.))\n assert sdf.validate_graph(sdf_graph)",
"def test_indegree_centrality_after_multi_area_perturbation():\n F = FaultDiagnosis(\"tests/TOY_graph.csv\")\n F.simulate_area_perturbation(['area1', 'area2', 'area3'])\n\n indeg_cen_after_multi_area_perturbation = {\n '2': 0.0,\n '3': 0.0,\n '4': 0.16666666666666666,\n '5': 0.16666666666666666,\n '6': 0.5,\n '7': 0.16666666666666666,\n '8': 0.16666666666666666\n }\n\n np.testing.assert_array_almost_equal(\n np.asarray(sorted(indeg_cen_after_multi_area_perturbation.values())),\n np.asarray(sorted(F.G.indegree_centrality.values())),\n err_msg=\n \"FINAL INDEGREE CENTRALITY failure: perturbation in areas 1, 2, 3\")",
"def test_coherency_regularized():\r\n\r\n for method in methods:\r\n f, c = tsa.coherency_regularized(tseries, 0.05, 1000,\r\n csd_method=method)\r\n npt.assert_array_almost_equal(c[0, 1], c[1, 0].conjugate())",
"def test_cov_changebasis(self):\n C = so.changebasis(2)\n cov_xp = np.array(\n [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15]]\n )\n\n cov_symmetric = np.array(\n [[0, 2, 1, 3], [8, 10, 9, 11], [4, 6, 5, 7], [12, 14, 13, 15]]\n )\n\n assert np.all(C @ cov_xp @ C.T == cov_symmetric)\n assert np.all(C.T @ cov_symmetric @ C == cov_xp)",
"def test_indegree_centrality_after_single_area_perturbation():\n F = FaultDiagnosis(\"tests/TOY_graph.csv\")\n F.simulate_area_perturbation(['area1'])\n\n indeg_cen_after_single_area_perturbation = {\n '2': 0.0,\n '3': 0.0,\n '4': 0.058823529411764705,\n '5': 0.058823529411764705,\n '6': 0.17647058823529413,\n '7': 0.058823529411764705,\n '8': 0.058823529411764705,\n '9': 0.11764705882352941,\n '10': 0.058823529411764705,\n '11': 0.11764705882352941,\n '12': 0.11764705882352941,\n '13': 0.11764705882352941,\n '14': 0.11764705882352941,\n '15': 0.0,\n '16': 0.11764705882352941,\n '17': 0.058823529411764705,\n '18': 0.058823529411764705,\n '19': 0.17647058823529413\n }\n\n np.testing.assert_array_almost_equal(\n np.asarray(sorted(indeg_cen_after_single_area_perturbation.values())),\n np.asarray(sorted(F.G.indegree_centrality.values())),\n err_msg=\"FINAL INDEGREE CENTRALITY failure: perturbation in area 1\")",
"def test_subtract_curve():\n c1 = Curve(data=data_num, mnemonic='test')\n c2 = c1 - 100\n assert (c2.df.iloc[0][0] + 99) < 0.0001",
"def test_indegree_centrality_after_element_perturbation_initially_closed(self):\n F = FaultDiagnosis(\"tests/TOY_graph_initiallyopen.csv\")\n F.simulate_element_perturbation([\"1\"])\n\n indeg_cen_2closed = {\n '1': 0.0,\n '2': 0.05555555555555555,\n '3': 0.0,\n '4': 0.05555555555555555,\n '5': 0.05555555555555555,\n '6': 0.16666666666666666,\n '7': 0.05555555555555555,\n '8': 0.05555555555555555,\n '9': 0.1111111111111111,\n '10': 0.05555555555555555,\n '11': 0.1111111111111111,\n '12': 0.1111111111111111,\n '13': 0.1111111111111111,\n '14': 0.1111111111111111,\n '15': 0.0,\n '16': 0.1111111111111111,\n '17': 0.05555555555555555,\n '18': 0.05555555555555555,\n '19': 0.16666666666666666\n }\n\n indeg_cen_3closed = {\n '1': 0.0,\n '2': 0.0,\n '3': 0.05555555555555555,\n '4': 0.05555555555555555,\n '5': 0.05555555555555555,\n '6': 0.16666666666666666,\n '7': 0.05555555555555555,\n '8': 0.05555555555555555,\n '9': 0.1111111111111111,\n '10': 0.05555555555555555,\n '11': 0.1111111111111111,\n '12': 0.1111111111111111,\n '13': 0.1111111111111111,\n '14': 0.1111111111111111,\n '15': 0.0,\n '16': 0.1111111111111111,\n '17': 0.05555555555555555,\n '18': 0.05555555555555555,\n '19': 0.16666666666666666\n }\n\n if F.G.final_status == {'2': 1, '3': 0}:\n np.testing.assert_array_almost_equal(\n np.asarray(sorted(indeg_cen_2closed.values())),\n np.asarray(sorted(F.G.indegree_centrality.values())),\n err_msg=\"FINAL INDEGREE CENTRALITY failure: perturbation of element 1\")\n else:\n np.testing.assert_array_almost_equal(\n np.asarray(sorted(indeg_cen_3closed.values())),\n np.asarray(sorted(F.G.indegree_centrality.values())),\n err_msg=\"FINAL INDEGREE CENTRALITY failure: perturbation of element 1\")",
"def collinear(cls, *vectors, e=10e-10):\n l = len(vectors)\n if l == 2:\n v1 = vectors[0]\n v2 = vectors[1]\n return abs(v1.x * v2.y - v1.y - v2.x) < e\n else:\n for i in range(l):\n for j in range(i + 1, l):\n if not cls.collinear(vectors[i], vectors[j]):\n return False\n return True",
"def test_degree_centrality_after_element_perturbation_initially_closed(self):\n F = FaultDiagnosis(\"tests/TOY_graph_initiallyopen.csv\")\n F.simulate_element_perturbation([\"1\"])\n\n deg_cen_2closed = {\n '1': 0.05555555555555555,\n '2': 0.1111111111111111,\n '3': 0.05555555555555555,\n '4': 0.1111111111111111,\n '5': 0.1111111111111111,\n '6': 0.2777777777777778,\n '7': 0.1111111111111111,\n '8': 0.16666666666666666,\n '9': 0.16666666666666666,\n '10': 0.1111111111111111,\n '11': 0.16666666666666666,\n '12': 0.2222222222222222,\n '13': 0.2222222222222222,\n '14': 0.2777777777777778,\n '15': 0.05555555555555555,\n '16': 0.16666666666666666,\n '17': 0.16666666666666666,\n '18': 0.05555555555555555,\n '19': 0.2777777777777778\n }\n\n deg_cen_3closed = {\n '1': 0.05555555555555555,\n '2': 0.05555555555555555,\n '3': 0.1111111111111111,\n '4': 0.1111111111111111,\n '5': 0.1111111111111111,\n '6': 0.2777777777777778,\n '7': 0.1111111111111111,\n '8': 0.16666666666666666,\n '9': 0.16666666666666666,\n '10': 0.1111111111111111,\n '11': 0.16666666666666666,\n '12': 0.2222222222222222,\n '13': 0.2222222222222222,\n '14': 0.2777777777777778,\n '15': 0.05555555555555555,\n '16': 0.16666666666666666,\n '17': 0.16666666666666666,\n '18': 0.05555555555555555,\n '19': 0.2777777777777778\n }\n\n if F.G.final_status == {'2': 1, '3': 0}:\n np.testing.assert_array_almost_equal(\n np.asarray(sorted(deg_cen_2closed.values())),\n np.asarray(sorted(F.G.degree_centrality.values())),\n err_msg=\"FINAL DEGREE CENTRALITY failure: perturbation of element 1\")\n else:\n np.testing.assert_array_almost_equal(\n np.asarray(sorted(deg_cen_3closed.values())),\n np.asarray(sorted(F.G.degree_centrality.values())),\n err_msg=\"FINAL DEGREE CENTRALITY failure: perturbation of element 1\")",
"def test_reliability2contingency():\n import matplotlib.pyplot as plt\n import veripy.contingency as contingency\n \n fcsts = np.array([100., 90, 80, 70, 60, 50, 40, 30, 20, 10, 0])\n obs = np.array([10., 9, 8, 7, 6, 5, 4, 3, 2, 1, 0])\n obs = np.ones_like(fcsts)\n obs = np.minimum(obs, fcsts)\n\n a, b, c, d = reliability2contingency(fcsts, obs)\n pod = contingency.pod(a, b, c, d)\n pofd = contingency.pofd(a, b, c, d)\n \n fig = plt.figure(figsize=(12,12))\n ax = plt.subplot(111)\n ax.plot(pofd, pod)\n diag = np.linspace(0, 1)\n ax.plot(diag, diag, color='black', linestyle='dashed', linewidth=0.5)\n ax.set_yticks(np.arange(0, 1+1e-6, .1))\n ax.set_xticks(ax.get_yticks())\n ax.set_aspect('equal')\n ax.grid()\n ax.set_title('Area Under Curve: %.4f' % (-1 * np.trapz(pod, pofd)))\n plt.show()",
"def test_multiple_conditions(self):\n matrices = [\n np.array([[0, 0.6], [1.0, 0.0]]),\n np.array([[0, 0.0], [1.0, 0.0]]),\n np.array([[0, 0.1], [1.0, 0.0]]),\n ]\n coefficients = get_importance_coeffs(['A', 'B'], ['A'], matrices)\n assert coefficients['A'] == 1.0\n assert coefficients['B'] == 0.6",
"def test_gradient_convergence(self):\n pass"
] | [
"0.6670978",
"0.6513759",
"0.6198429",
"0.6138979",
"0.61240935",
"0.61158717",
"0.60906714",
"0.6050059",
"0.6006302",
"0.59238374",
"0.5918185",
"0.59094197",
"0.58958864",
"0.5882234",
"0.5876733",
"0.5855049",
"0.579847",
"0.579847",
"0.577797",
"0.57669806",
"0.5764593",
"0.574557",
"0.5733494",
"0.573222",
"0.5716096",
"0.5713724",
"0.56938845",
"0.56743795",
"0.5668846",
"0.5661076"
] | 0.68860763 | 0 |
Converts a path to a string representation for inclusion in an SVG file as | def path_to_string(path: Path) -> str:
assert_continuous(path)
pieces = ["M {} {}".format(path[0].p0[0], path[0].p0[1])]
for curve in iter(path): # iter cast not strictly necessary
piece = "C {} {} {} {} {} {}".format(
int(round(curve.c0[0])), int(round(curve.c0[1])),
int(round(curve.c1[0])), int(round(curve.c1[1])),
int(round(curve.p1[0])), int(round(curve.p1[1]))
)
pieces.append(piece)
return " ".join(pieces) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def as_string(path: pathlib.Path) -> str:\n return path.as_posix()",
"def path_to_str(path):\n if hasattr(path, '__fspath__'):\n path = as_str_any(path.__fspath__())\n return path",
"def save_svg(string, file_name):\n file_handle = file(file_name, \"w\")\n file_handle.write(string)\n file_handle.close()",
"def _path_to_string(path):\n return '.'.join(path)",
"def getSVGpath(filePath):\n openfile = open(filePath, 'r')\n textFile = openfile.read()\n openfile.close()\n textSel = re.search('<path[^/>]*..', textFile).group()\n textPathPos = re.search('d=\"[^\"]*', textSel).group()\n tokens = re.split('[\\s,\"]', textPathPos)\n return tokens",
"def path_str(path):\n\toutput = \"PATH: \"\n\tif path:\n\t\tfor i in path:\n\t\t\toutput += str(i.data) + \" -> \"\n\telse:\n\t\toutput += \"Empty\"\n\treturn output",
"def save_as_svg(file_name, path = DEFAULT_PATH):\n plt.ioff()\n plt.savefig(path + file_name + '.svg')\n plt.close()",
"def dvi_to_svg(dvi_file: str) -> str:\n file_type = get_tex_config()[\"intermediate_filetype\"]\n result = dvi_file.replace(\".\" + file_type, \".svg\")\n if not os.path.exists(result):\n commands = [\n \"dvisvgm\",\n \"\\\"{}\\\"\".format(dvi_file),\n \"-n\",\n \"-v\",\n \"0\",\n \"-o\",\n \"\\\"{}\\\"\".format(result),\n \">\",\n os.devnull\n ]\n os.system(\" \".join(commands))\n return result",
"def construct_svg_path(path, transform=None):\n if transform is None:\n transform = IdentityTransform()\n\n steps = []\n for vert, code in path.iter_segments(simplify=False):\n vert = transform.transform(vert.reshape(-1, 2)).ravel()\n step = PATH_DICT[code]\n if step != 'Z':\n step += ' '.join(map(str, vert))\n steps.append(step)\n\n return ' '.join(steps)",
"def get_svgout(self):\n return tempfile.mktemp(dir=self.tmpdir, suffix='.svg')",
"def path_filename_representation(path):\n # Strip leading / and replace / with .\n return re.sub(r\"^/(.*)$\", r\"\\1\", path).replace(\"/\", \".\")",
"def segments_svg_path(self):\n verts = self.vertices.split(',') # leave as string\n segs = [int(v) for v in self.segments.split(',')]\n data = []\n for i in xrange(0, len(segs), 2):\n v0 = 2 * segs[i]\n v1 = 2 * segs[i + 1]\n data.append(u\"M%s,%sL%s,%s\" % (\n verts[v0], verts[v0 + 1],\n verts[v1], verts[v1 + 1],\n ))\n return u\"\".join(data)",
"def dvi_to_svg(dvi_file, regen_if_exists=False):\n result = dvi_file.replace(\".dvi\", \".svg\")\n if not os.path.exists(result):\n commands = [\n \"dvisvgm\",\n dvi_file,\n \"-n\",\n \"-v\",\n \"0\",\n \"-o\",\n result,\n \">\",\n get_null()\n ]\n os.system(\" \".join(commands))\n return result",
"def convertString(path):\n if (\"win\" in sys.platform):\n return path.replace(\"/\",\"\\\\\")\n elif (\"linux\" in sys.platform):\n return path.replace(\"\\\\\",\"/\")",
"def stringyfy(path):\n try:\n # Pathlib support\n path = path.__fspath__()\n except AttributeError:\n pass\n if hasattr(path, 'name'): # passed in a file\n path = path.name\n if isinstance(path, str):\n return path\n raise ValueError(f'Cannot convert {path} to a path')",
"def get_filename(checksum):\n return '%s.svg' % checksum",
"def processed_json_path(path):\n return path.replace(RAW_ASSETS_PATH, ASSETS_PATH).replace('.json', '.bin')",
"def processed_texture_path(path):\n return path.replace(RAW_ASSETS_PATH, ASSETS_PATH).replace('png', 'webp')",
"def format_path(path):\n if len(path) > 1:\n result = [crayons.yellow(path[0].name)]\n\n previous = path[0]\n for item in path[1:]:\n result.append(' -> ')\n result.append(crayons.yellow(item.name))\n result.append(': Line ')\n result.append(crayons.cyan(str(item.is_imported_from[previous.full_path][0])))\n previous = item\n result.append(' =>> ')\n\n result.append(crayons.magenta(path[0].name))\n return ''.join(str(x) for x in result)\n else:\n return ''",
"def convert_path(path: str, path_type: PathType = PathType.AUTO) -> str:\r\n path_template = get_template_from_path(path)\r\n path = get_path_from_template(path_template, path_type)\r\n return path",
"def dump_graph(self) -> str:\n graph_dot_file = f'{self._name}.dot'\n graph_diagram_file = f'{self._name}.svg'\n write_dot(self._graph, graph_dot_file)\n subprocess.check_output(\n shlex.split(f'dot -Tsvg {graph_dot_file} -o {graph_diagram_file}')\n )\n return graph_diagram_file",
"def ps2svg(sFile, method=\"default\"):\n\n sBack = \"\"\n oErr = ErrHandle()\n try:\n # Read the file\n sText = \"\"\n with open(sFile, \"r\") as f:\n sText = f.read()\n if method == \"default\":\n sBack = ps2svg_string(sText)\n elif method == \"simple\":\n sBack = ps2svg_simple(sText)\n except:\n msg = oErr.get_error_message()\n oErr.DoError(\"ps2svg\")\n\n # Return what we have gathered\n return sBack",
"def Sourceify(path):\n return path",
"def getSvgHtml(svgFile, width, height):\n html = '<object type=\"image/svg+xml\" data=\"%s\" width=\"%s\" height=\"%s\"/>'\n return html % (svgFile, width, height)",
"def export_as_svg(self):\n from ExportCommand import ExportCommand\n\n options = QFileDialog.Options()\n options |= QFileDialog.DontUseNativeDialog\n file_path, _ = QFileDialog.getSaveFileName(self, \"Export as svg\", os.getcwd(), \"svg file(*.svg)\",\n options=options)\n if file_path:\n cmd = ExportCommand(self.graphicsView.scene(), 'svg')\n cmd.display_message.connect(self.onAddMessage)\n if cmd.execute(file_path):\n QMessageBox.information(self, self.tr('Information'), self.tr('Successfully export to svg file'))\n else:\n QMessageBox.information(self, self.tr('Error'), self.tr('Fail to export to svg file'))",
"def resource_string(self, path):\n\t\tdata = pkg_resources.resource_string(__name__, path)\n\t\treturn data.decode(\"utf8\")",
"def openSVG(path):\n from xml.dom import minidom\n doc = minidom.parse(open(path))\n svg = doc.getElementsByTagName(\"svg\")[0]\n sizeMatch = re.match(r\"(\\d+) (\\d+) (\\d+) (\\d+)\", svg.getAttribute(\"viewBox\"))\n w, h = int(sizeMatch.group(3)), int(sizeMatch.group(4))\n return svg, w, h",
"def jsonpath_to_xpath(path):\n return '/' + path.replace('.', \"/\")",
"def format_path(path):\n if not path:\n return path\n\n path = re.sub(r'/+', '/', path)\n\n if path == '/':\n return (u\"\" if isinstance(path, unicode) else \"\")\n else:\n return '/' + path.strip('/')",
"def _path_to_str(var):\n if not isinstance(var, (Path, str)):\n raise ValueError(\"All path parameters must be either strings or \"\n \"pathlib.Path objects. Found type %s.\" % type(var))\n else:\n return str(var)"
] | [
"0.664361",
"0.6505727",
"0.6453225",
"0.6349524",
"0.631118",
"0.62037516",
"0.6048819",
"0.6006023",
"0.5888196",
"0.5883319",
"0.5875403",
"0.5838811",
"0.578169",
"0.57720757",
"0.57636064",
"0.5753614",
"0.5715025",
"0.57089794",
"0.569792",
"0.5695481",
"0.56592214",
"0.5653134",
"0.56510067",
"0.5633124",
"0.5623576",
"0.5623083",
"0.5616998",
"0.5607472",
"0.55974567",
"0.5590173"
] | 0.6743669 | 0 |
Tests the TurbiniaSetup method. | def testTurbiniaSetup(self, _mock_read_config):
_mock_read_config.return_value = {"OUTPUT_DIR": "/tmp"}
self.turbinia_processor.TurbiniaSetUp(
project="turbinia-project",
turbinia_auth=False,
turbinia_recipe=None,
turbinia_zone="us-central1f",
turbinia_api="http://localhost:8001",
incident_id="123456789",
sketch_id="12345",
)
self.assertEqual(self.turbinia_processor.project, "turbinia-project")
self.assertEqual(self.turbinia_processor.turbinia_zone, "us-central1f")
self.assertEqual(
self.turbinia_processor.turbinia_api, "http://localhost:8001")
self.assertEqual(self.turbinia_processor.incident_id, "123456789")
self.assertEqual(self.turbinia_processor.sketch_id, "12345")
self.assertEqual(self.turbinia_processor.output_path, "/tmp")
self.assertEqual(self.turbinia_processor.turbinia_recipe, None) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def setup():\n pass",
"def TurbiniaSetUp(\n self, project: str, turbinia_auth: bool,\n turbinia_recipe: Union[str, None], turbinia_zone: str, turbinia_api: str,\n incident_id: str, sketch_id: int) -> None:\n self.project = project\n self.turbinia_auth = turbinia_auth\n self.turbinia_api = turbinia_api\n self.turbinia_recipe = turbinia_recipe\n self.turbinia_zone = turbinia_zone\n self.incident_id = incident_id\n self.sketch_id = sketch_id\n self.client_config = turbinia_api_lib.Configuration(host=self.turbinia_api)\n self.client = self.InitializeTurbiniaApiClient(self.credentials)\n self.requests_api_instance = turbinia_requests_api.TurbiniaRequestsApi(\n self.client)\n # We need to get the output path from the Turbinia server.\n api_instance = turbinia_configuration_api.TurbiniaConfigurationApi(\n self.client)\n try:\n api_response = api_instance.read_config()\n self.output_path = api_response.get('OUTPUT_DIR')\n except turbinia_api_lib.ApiException as exception:\n self.ModuleError(exception.body, critical=True)",
"def setup( self ):",
"def _setup_dut(ptfhost, request):\n logger.info(\"Set up SAI tests.\")\n\n _prepare_test_cases(ptfhost, request)",
"def test_functionality(self):\n \n self.browserObject = globalVars.browserObject\n\n self.preRunSetup()\n \n self.runTestCase()\n \n self.postRunCleanup()\n \n self.logout()",
"def setup(self) -> None:",
"def test_functionality(self):\n self.browserObject = globalVars.browserObject\n\n self.preRunSetup()\n \n self.runTestCase()\n \n self.postRunCleanup()",
"def runTest(self):\n self.setUp()\n self.test_NeuroPath1()",
"def setup(self):\n pass",
"def runTest(self):\n self.setUp()\n self.test_visuThreeD1()",
"def setup(self):\n ...",
"def test_setup(self):\n assert self.transaction_behaviour.setup() is None\n self.assert_quantity_in_outbox(0)",
"def setup(self):\n pass",
"def runTest(self):\n self.setUp()\n self.test_BiplaneRegistration1()",
"def test_functionality(self):\n self.templateName = \"Test Template\"\n self.browserObject = globalVars.browserObject\n\n self.preRunSetup()\n \n self.runTestCase()\n \n self.postRunCleanup()",
"def setup(self):\n pass",
"def setup(self):\n pass",
"def setup(self):\n pass",
"def setup(self):\n pass",
"def setup(self):\n pass",
"def setup(self):\n pass",
"def setup(self):\n pass",
"def setup(self):\n pass",
"def setup(self):\n pass",
"def setup(self):\n pass",
"def setup(self):\n pass",
"def setup(self):\n pass",
"def setup(self):\n pass",
"def setup(self):\n pass",
"def SetupEnvironment(self):\n pass"
] | [
"0.6720269",
"0.65703833",
"0.6467602",
"0.6449046",
"0.6447386",
"0.64372605",
"0.64077723",
"0.6403362",
"0.63865745",
"0.635866",
"0.6345223",
"0.63323843",
"0.63103575",
"0.6282889",
"0.6277148",
"0.62679124",
"0.62679124",
"0.62679124",
"0.62679124",
"0.62679124",
"0.62679124",
"0.62679124",
"0.62679124",
"0.62679124",
"0.62679124",
"0.62679124",
"0.62679124",
"0.62636167",
"0.62636167",
"0.6255071"
] | 0.77893466 | 0 |
Tests the _isInterestingPath method. | def testIsInterestingPath(self):
# pylint: disable=protected-access
self.assertTrue(self.turbinia_processor._isInterestingPath(TEST_TASK_PATH)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _isInterestingPath(self, path: str) -> bool:\n for suffix in self.extensions:\n if path.endswith(suffix):\n return True\n return False",
"def test_find_path_bi():\n assert True",
"def is_path(self, s):\n return True",
"def _is_interesting_op(self, op):\n return op_priority(op.type) <= self._parameters.trace_level",
"def _IsTestFile(self, path):\n\n raise NotImplementedError",
"def test_expand_path_2(self):\n input_path = \"/fake/path\"\n expanded_path = basic.expand_path(input_path)\n expected_path = input_path\n self.assertEqual(expanded_path, expected_path)",
"def _is_nested(pkg: str, pkg_path: str, parent: str, parent_path: str) -> bool:\n norm_pkg_path = _path.normpath(pkg_path)\n rest = pkg.replace(parent, \"\", 1).strip(\".\").split(\".\")\n return pkg.startswith(parent) and norm_pkg_path == _path.normpath(\n Path(parent_path, *rest)\n )",
"def check_endpoint_in_paths(context, endpoint):\n data = context.response.json()\n paths = check_and_get_attribute(data, \"paths\")\n assert endpoint in paths, \"Cannot find the expected endpoint {e}\".format(\n e=endpoint)",
"def _issubpath(self, a, b):\n p1 = a.rstrip(os.sep).split(os.sep)\n p2 = b.rstrip(os.sep).split(os.sep)\n return p1[:len(p2)] == p2",
"def test_verify_path2_8(self):\n result, msg = basic.verify_path2(self.file, kind=None, expect=False)\n with self.subTest():\n self.assertTrue(result)\n with self.subTest():\n self.assertIsNone(msg)",
"def test_verify_path2_9(self):\n result, msg = basic.verify_path2(self.file, kind=None, expect=True)\n with self.subTest():\n self.assertFalse(result)\n with self.subTest():\n self.assertIsNotNone(msg)",
"def is_subpath(path: Path, other: Path):\n try:\n Path(path).relative_to(other)\n except ValueError:\n return False\n else:\n return True",
"def test(cls, pathHolder, parentCrawler):\n if not super(AsciiCrawler, cls).test(pathHolder, parentCrawler):\n return False\n\n return pathHolder.ext() in ['json']",
"def _is_bad_path(path, base):\r\n return not resolved(joinpath(base, path)).startswith(base)",
"def test_path_reactions(self):\n self.assertEqual(str(self.PathReaction2), 'CH2OH <=> methoxy')",
"def test_verify_path2_13(self):\n result, msg = basic.verify_path2(self.dir, kind=\"dir\", expect=False)\n with self.subTest():\n self.assertTrue(result)\n with self.subTest():\n self.assertIsNone(msg)",
"def test_path(self, fs_path, fs):\n assert fs.path == fs_path",
"def test_verify_path2_14(self):\n result, msg = basic.verify_path2(self.dir, kind=\"dir\", expect=True)\n with self.subTest():\n self.assertFalse(result)\n with self.subTest():\n self.assertIsNotNone(msg)",
"def test_verify_path2_7(self):\n self.file.touch()\n result, msg = basic.verify_path2(self.file, kind=None, expect=False)\n with self.subTest():\n self.assertFalse(result)\n with self.subTest():\n self.assertIsNotNone(msg)",
"def test_verify_path2_4(self):\n result, msg = basic.verify_path2(self.file, kind=\"file\", expect=True)\n with self.subTest():\n self.assertFalse(result)\n with self.subTest():\n self.assertIsNotNone(msg)",
"def test(cls, pathHolder, parentCrawler):\n if not super(Scene, cls).test(pathHolder, parentCrawler):\n return False\n\n return pathHolder.ext() in cls.extensions()",
"def _veritesting(self):\n\n p = self._input_path.copy()\n\n try:\n new_path_group = self._execute_and_merge(p)\n\n except (ClaripyError, SimError, AngrError):\n if not BYPASS_VERITESTING_EXCEPTIONS in p.state.options:\n raise\n else:\n l.warning(\"Veritesting caught an exception.\", exc_info=True)\n return False, PathGroup(self.project, stashes={'deviated', p})\n\n except VeritestingError as ex:\n l.warning(\"Exception occurred: %s\", str(ex))\n return False, PathGroup(self.project, stashes={'deviated', p})\n\n l.info('Returning a set of new paths: %s (successful: %s, deadended: %s, errored: %s, deviated: %s)',\n new_path_group,\n new_path_group.successful,\n new_path_group.deadended,\n new_path_group.errored,\n new_path_group.deviated\n )\n\n return True, new_path_group",
"def testPathToLocator(self, _mock_inside, mock_cwd):\n ws = self.workspace_dir\n mock_cwd.return_value = ws\n\n foo_path = workspace_lib.PathToLocator(os.path.join(ws, 'foo'))\n baz_path = workspace_lib.PathToLocator(os.path.join(ws, 'bar', 'foo',\n 'baz'))\n daisy_path = workspace_lib.PathToLocator(os.path.join(constants.SOURCE_ROOT,\n 'src', 'overlays',\n 'overlay-daisy'))\n some_path = workspace_lib.PathToLocator(os.path.join(constants.SOURCE_ROOT,\n 'srcs', 'bar'))\n\n self.assertEqual('//foo', foo_path)\n self.assertEqual('//bar/foo/baz', baz_path)\n self.assertEqual('board:daisy', daisy_path)\n self.assertEqual(None, some_path)\n\n def assertReversible(loc):\n path = workspace_lib.LocatorToPath(loc)\n self.assertEqual(loc, workspace_lib.PathToLocator(path))\n\n assertReversible('//foo')\n assertReversible('//foo/bar/baz')\n assertReversible('board:gizmo')",
"def test_verify_path2_6(self):\n self.file.touch()\n result, msg = basic.verify_path2(self.file, kind=None, expect=True)\n with self.subTest():\n self.assertTrue(result)\n with self.subTest():\n self.assertIsNone(msg)",
"def _is_request_in_include_path(self, request):\n if self._include_paths:\n for path in self._include_paths:\n if request.path.startswith(path):\n return True\n return False\n else:\n return True",
"def test_verify_path2_10(self):\n result, msg = basic.verify_path2(self.file, kind=\"invalid\", expect=True)\n with self.subTest():\n self.assertFalse(result)\n with self.subTest():\n self.assertIsNotNone(msg)",
"def exists(self, path: PathLike):",
"def test_n_path_reactions(self):\n self.assertEqual(self.Npath, 3)",
"def test_image_path(self):\n self.assertEqual(\n self.mineral.image_path,\n 'minerals/images/some_filename.jpg')",
"def test_infodir(self):\n self.chck_triple('infodir')"
] | [
"0.72631514",
"0.58760685",
"0.58147526",
"0.56765735",
"0.55693245",
"0.55457276",
"0.54889286",
"0.5446209",
"0.53737456",
"0.53603786",
"0.53477657",
"0.5298553",
"0.5297065",
"0.5293575",
"0.5228027",
"0.5226015",
"0.519997",
"0.51928556",
"0.5165649",
"0.51393193",
"0.51282936",
"0.5125355",
"0.51160234",
"0.5116009",
"0.508583",
"0.50801396",
"0.50783086",
"0.5071152",
"0.5055903",
"0.50488335"
] | 0.81139785 | 0 |
Tests the RefreshClientCredentials method. | def testRefreshClientCredentials(self,
mock_get_credentials, mock_initialize_client):
# Set an expired token.
self.turbinia_processor.credentials = mock.MagicMock(
expiry = FAKE_CREDENTIALS['expiry'], expired = True)
self.turbinia_processor.RefreshClientCredentials()
mock_get_credentials.assert_called_once()
mock_initialize_client.assert_called_once() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def RefreshClientCredentials(self) -> bool:\n refresh = False\n if self.credentials and self.credentials.expired:\n self.credentials = self.GetCredentials(\n self.credentials_path, self.client_secrets_path)\n self.client = self.InitializeTurbiniaApiClient(self.credentials)\n refresh = True\n return bool(refresh)",
"def test_refresh_token(self):\n self.assertEqual(CloudCredentials.objects.count(), 0)\n with HTTMock(spark_cloud_mock):\n CloudCredentials.objects.refresh_token()\n self.assertEqual(CloudCredentials.objects.count(), 1)\n self.assertEqual(CloudCredentials.objects._access_token(), ACCESS_TOKEN)\n CloudCredentials.objects.all().delete()",
"def test_renews_token(self):\n self.assertEqual(CloudCredentials.objects.count(), 0)\n old = self.factory.create(access_token='old_token', expires_at=self.expired_dt)\n with HTTMock(spark_cloud_mock):\n refresh_access_token()\n self.assertEqual(CloudCredentials.objects.count(), 2)\n self.assertEqual(CloudCredentials.objects._access_token(), ACCESS_TOKEN)\n CloudCredentials.objects.all().delete()",
"def test_authenticate_refresh(app, client, session, models):\n user = models[\"user\"][0]\n # Authenticate to receive a refresh token\n response = client.post(\n \"/authenticate/local\",\n data={\"email\": user.email, \"password\": \"hunter2\"},\n )\n refresh_token = json.loads(response.data)[\"refresh_token\"]\n\n # Check that token values are as expected\n assert len(refresh_token[\"val\"]) == 64\n assert datetime.fromtimestamp(refresh_token[\"exp\"]) > datetime.now()\n assert datetime.fromtimestamp(refresh_token[\"exp\"]) < (\n datetime.now() + app.config[\"REFRESH_TOKEN_VALIDITY\"]\n )\n\n # Check that the returned token is now stored in the database\n assert refresh_token[\"val\"] == user.refresh_tokens[0].token\n\n # Expect refreshing token to succeed\n response = client.post(\n \"/refresh\", data={\"refresh_token\": refresh_token[\"val\"]}\n )\n assert response.status_code == 200\n raw_jwt_token = json.loads(response.data)[\"jwt\"]\n\n # Expect that the new claims are equal to the user claims, except for the\n # expiry which will have refreshed\n refresh_claims = jwt.decode(\n raw_jwt_token, app.config[\"RSA_PUBLIC_KEY\"], app.config[\"ALGORITHM\"],\n )\n del refresh_claims[\"exp\"]\n assert user.claims == refresh_claims\n\n # Expect refreshing an expired token to fail\n token = user.refresh_tokens[0]\n token.expiry = datetime.now() - timedelta(seconds=1)\n response = client.post(\"/refresh\", data={\"refresh_token\": token.token})\n assert response.status_code == 401",
"def refresh_credentials():\n global auth_token\n auth_token = get_oauth_token()",
"def test_legacy_client_invalid_refresh_token_expired_access_token(self):\n self.legacy_client._client._expires_at = 1\n self.legacy_client.token['refresh_token'] = 'invalidrefreshtoken'\n response = self.legacy_client.request('GET', urljoin(BASE_URL, MOCK_ENDPOINT))\n self.assertEquals(response.content, RESPONSE)",
"def test_legacy_client_invalid_refresh_token(self):\n self.legacy_client._client.access_token = 'invalidaccesstoken'\n self.legacy_client.token['refresh_token'] = 'invalidrefreshtoken'\n response = self.legacy_client.request('GET', urljoin(BASE_URL, MOCK_ENDPOINT))\n self.assertEquals(response.content, RESPONSE)",
"def test_legacy_client_expired_access_token(self):\n self.legacy_client._client._expires_at = 1\n response = self.legacy_client.request('GET', urljoin(BASE_URL, MOCK_ENDPOINT))\n self.assertEquals(response.content, RESPONSE)",
"def test_renew_token(self):\n self.assertEqual(CloudCredentials.objects.count(), 0)\n with HTTMock(spark_cloud_mock):\n CloudCredentials.objects._renew_token(self.cloud)\n self.assertEqual(CloudCredentials.objects.count(), 1)\n self.assertEqual(CloudCredentials.objects._access_token(), ACCESS_TOKEN)\n CloudCredentials.objects.all().delete()",
"def test_patch_o_auth_client(self):\n pass",
"def refresh(self):\n self._request_token(grant_type='client_credentials')",
"async def test_request_refresh(client, monkeypatch, caplog):\n mock_refresh_token_called = 0\n\n async def mock_refresh_token():\n nonlocal mock_refresh_token_called\n mock_refresh_token_called += 1\n\n monkeypatch.setattr(\n client._auth_client, 'refresh_token', mock_refresh_token)\n\n async def mock_valid_token_set():\n pass\n\n monkeypatch.setattr(client, 'valid_token_set', mock_valid_token_set)\n\n resp_text = 'ohai'\n\n with aioresponses() as mocked:\n mocked.get(conftest.API_URL, status=401)\n mocked.get(conftest.API_URL, status=200, body=resp_text)\n resp = await client.request('get', conftest.API_URL)\n\n assert 2 == mock_refresh_token_called\n assert resp == resp_text\n assert 6 == len(caplog.records)",
"def test_reused_token_get_auth_info(self):\r\n client_ = client.HTTPClient(username=USERNAME,\r\n tenant_name=TENANT_NAME,\r\n token=TOKEN,\r\n password=PASSWORD,\r\n auth_url=AUTH_URL,\r\n region_name=REGION)\r\n expected = {'auth_token': TOKEN,\r\n 'auth_tenant_id': None,\r\n 'auth_user_id': None,\r\n 'endpoint_url': self.client.endpoint_url}\r\n self.assertEqual(client_.get_auth_info(), expected)",
"def test_patch_o_auth_client_authorization(self):\n pass",
"def test_expired_credentials():\n pass",
"def _refresh_token(self, client):\n\n url = self._url('token')\n client_data = self.clients[client]\n refresh_token = client_data['token']['refresh_token']\n data = {'grant_type': 'refresh_token',\n 'scope': 'PRODUCTION',\n 'refresh_token': refresh_token}\n consumer_key = client_data['response']['consumerKey']\n consumer_secret = client_data['response']['consumerSecret']\n auth = requests.auth.HTTPBasicAuth(consumer_key, consumer_secret)\n return self.POST(url, data=data, auth=auth)",
"def test_mdb_revoking_credential(self):\n this_id = 9898\n data = self.cred_data\n data['credential_id'] = this_id\n cred = vccs_auth.credential.from_dict(data, None)\n self.mdb.add_credential(cred)\n\n # assert no exception\n cred2 = self.mdb.get_credential(this_id)\n\n print(\"Revoking credential :\\n{}\".format(pformat(cred2)))\n\n cred2.revoke({'reason': 'unit testing'})\n self.mdb.update_credential(cred2)\n\n # assert exception when fetching revoked credential\n with self.assertRaises(vccs_auth.credential.VCCSAuthCredentialError):\n self.mdb.get_credential(this_id)\n\n # assert exception when trying to activate credential again\n with self.assertRaises(ValueError):\n cred2.status('active')",
"def test_mail_client_invalid_refresh_token_expired_access_token(self):\n self.mail_client._client._expires_at = 1\n self.mail_client.token['refresh_token'] = 'invalidrefreshtoken'\n with self.assertRaises(InvalidGrantError):\n self.mail_client.request('GET', urljoin(BASE_URL, MOCK_ENDPOINT))",
"def test_mail_client_invalid_refresh_token(self):\n self.mail_client._client.access_token = 'invalidaccesstoken'\n self.mail_client.token['refresh_token'] = 'invalidrefreshtoken'\n with self.assertRaises(InvalidGrantError):\n self.mail_client.request('GET', urljoin(BASE_URL, MOCK_ENDPOINT))",
"def test_replace_o_auth_client(self):\n pass",
"def test_replace_o_auth_client_authorization(self):\n pass",
"def test_authtoken_refresh(self):\n hagrid = models.User(username='hagrid', fullname='Rubeus Hagrid')\n auth_token = models.AuthToken(user=hagrid, algorithm='hmac-sha-1')\n existing_token = auth_token.token\n existing_secret = auth_token.secret\n auth_token.refresh()\n self.assertNotEqual(existing_token, auth_token.token)\n self.assertNotEqual(existing_secret, auth_token.secret)",
"def test_mail_client_expired_access_token(self):\n self.mail_client._client._expires_at = 1\n response = self.mail_client.request('GET', urljoin(BASE_URL, MOCK_ENDPOINT))\n self.assertEquals(response.content, RESPONSE)",
"def test_update_client(self):\n pass",
"def test_update_user_profile(self):\n\n new_credentials = {'name': 'New Name', 'password': 'NewTestpass12'}\n response = self.client.patch(URL_ME, new_credentials)\n\n # Refresh the details of the user from the database.\n self.user.refresh_from_db()\n\n # Check that the update is successful.\n self.assertEqual(self.user.name, new_credentials['name'])\n self.assertTrue(self.user.check_password(new_credentials['password']))\n self.assertEqual(response.status_code, status.HTTP_200_OK)",
"def test_good_token(self):\n self.assertEqual(CloudCredentials.objects.count(), 0)\n cred = self.factory.create(access_token='good_token', expires_at=self.current_dt)\n with HTTMock(spark_cloud_mock):\n refresh_access_token()\n self.assertEqual(CloudCredentials.objects.count(), 1)\n self.assertEqual(CloudCredentials.objects._access_token(), 'good_token')\n cred.delete()",
"def testWarnsOnReturningErrorFromAuthenticateClient(self):\n\n class ErrorTestClientStorage(TestClientStorage):\n \"\"\" A ClientStorage to test returning errors from authenticateClient. \"\"\"\n\n def __init__(self, errorToReturn):\n super(ErrorTestClientStorage, self).__init__()\n self.error = errorToReturn\n\n def authenticateClient(self, client, request, secret=None):\n return self.error\n\n validRequest = self.generateValidTokenRequest(arguments={\n 'grant_type': 'refresh_token',\n 'client_id': self._VALID_CLIENT.id,\n 'client_secret': self._VALID_CLIENT.secret,\n 'refresh_token': self._VALID_REFRESH_TOKEN\n })\n tokenResource = TokenResource(\n self._TOKEN_FACTORY, self._PERSISTENT_STORAGE,\n self._REFRESH_TOKEN_STORAGE, self._AUTH_TOKEN_STORAGE,\n ErrorTestClientStorage(errorToReturn=MalformedParameterError('client_secret')),\n passwordManager=self._PASSWORD_MANAGER)\n with warnings.catch_warnings(record=True) as caughtWarnings:\n warnings.simplefilter('always')\n result = tokenResource.render_POST(validRequest)\n self.assertEqual(\n 1, len(caughtWarnings),\n msg='Expected the token resource to generate a warning, if '\n 'authenticateClient returns an OAuth2Error instead of raising it')\n self.assertTrue(issubclass(caughtWarnings[0].category, DeprecationWarning),\n msg='Expected the token resource to generate a DeprecationWarning')\n self.assertIn(\n 'Returning an error from authenticateClient is deprecated',\n str(caughtWarnings[0].message),\n msg='Expected the token resource to generate a DeprecationWarning explaining that '\n 'returning an error from authenticateClient is deprecated.')\n self.assertFailedTokenRequest(\n validRequest, result, MalformedParameterError('client_secret'),\n msg='Expected the token resource to reject the request '\n 'if authenticateClient returns an error.')",
"def test_revoke_refresh_token(client, tokens):\n response = client.delete(\n \"/auth/refresh-token/\",\n headers={\"Authorization\": \"Bearer {}\".format(tokens[\"refresh\"])},\n )\n\n payload = response.get_json()\n assert response.status_code == HTTPStatus.OK\n assert payload[\"msg\"] == \"Refresh token successfully revoked\"",
"def refresh(self):\n self._request_token(grant_type='password', username=self._username,\n password=self._password)",
"def test_reset_passwd(self, test_client, user_test1):\n response = test_client.post('/api/auth/reset', json=dict(\n reset_password_token=create_access_token(identity=user_test1),\n password=\"Azerty!123\"\n ))\n res = json.loads(response.data)\n\n assert response.status_code == 200\n assert res['status'] == True"
] | [
"0.7480343",
"0.6987335",
"0.6519567",
"0.647827",
"0.64674634",
"0.6362675",
"0.6351274",
"0.6320688",
"0.6314245",
"0.6286668",
"0.6279366",
"0.6243212",
"0.6241278",
"0.6184748",
"0.6173102",
"0.61290675",
"0.6063998",
"0.6048179",
"0.60427195",
"0.6041711",
"0.6039306",
"0.59483266",
"0.5948239",
"0.59248537",
"0.5895134",
"0.58802384",
"0.5864514",
"0.58497673",
"0.58264863",
"0.5806661"
] | 0.791131 | 0 |
Tests the InitializeTurbiniaApiClient method. | def testInitializeTurbiniaApiClientNoCreds(self, mock_get_credentials):
self.turbinia_processor.turbinia_api = 'http://127.0.0.1:8000'
self.turbinia_processor.turbinia_auth = True
mock_credentials = mock.MagicMock(spec=Credentials, id_token = FAKE_CREDENTIALS['token'])
mock_credentials.id_token = mock.MagicMock()
mock_credentials.id_token.return_value = FAKE_CREDENTIALS['token']
self.turbinia_processor.credentials = mock_credentials
mock_get_credentials.return_value = mock_credentials
result = self.turbinia_processor.InitializeTurbiniaApiClient(None)
mock_get_credentials.assert_called_once()
self.assertIsInstance(result, turbinia_api_lib.ApiClient) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def testInitializeTurbiniaApiClient(self, mock_get_credentials):\n self.turbinia_processor.turbinia_api = 'http://127.0.0.1:8000'\n self.turbinia_processor.turbinia_auth = True\n mock_credentials = mock.MagicMock(spec=Credentials, id_token = FAKE_CREDENTIALS['token'])\n mock_credentials.id_token = mock.MagicMock()\n mock_credentials.id_token.return_value = FAKE_CREDENTIALS['token']\n self.turbinia_processor.credentials = mock_credentials\n mock_get_credentials.return_value = mock_credentials\n result = self.turbinia_processor.InitializeTurbiniaApiClient(mock_credentials)\n mock_get_credentials.assert_not_called()\n self.assertIsInstance(result, turbinia_api_lib.ApiClient)",
"def setUp(self):\n self.client = api.Client(config.get_config(), api.json_handler)",
"def setUp(self):\n self.api = api.InvenTreeAPI(\n SERVER,\n username=USERNAME, password=PASSWORD,\n timeout=30,\n )",
"def setUp(self):\n super(TestSyncServiceRisk, self).setUp()\n self.api = ExternalApiClient()",
"def setUp(self):\n super(TestSyncServiceControl, self).setUp()\n self.api = ExternalApiClient()",
"def setUp(self):\n self.client = APIClient()",
"def setUp(self):\n self.client = APIClient()",
"def setUp(self):\r\n super(CLITestAuthKeystoneWithId, self).setUp()\r\n self.client = client.HTTPClient(user_id=USER_ID,\r\n tenant_id=TENANT_ID,\r\n password=PASSWORD,\r\n auth_url=AUTH_URL,\r\n region_name=REGION)",
"def setUp(self) -> None:\n self.client = APIClient()",
"def InitializeTurbiniaApiClient(\n self, credentials: Credentials) -> turbinia_api_lib.ApiClient:\n self.client_config = turbinia_api_lib.Configuration(host=self.turbinia_api)\n if not self.client_config:\n self.ModuleError('Unable to configure Turbinia API server', critical=True)\n # Check if Turbinia requires authentication.\n if self.turbinia_auth:\n if not credentials:\n self.credentials = self.GetCredentials(\n self.credentials_path, self.client_secrets_path)\n if self.credentials and self.credentials.id_token:\n self.client_config.access_token = self.credentials.id_token\n else:\n self.ModuleError(\n 'Unable to obtain id_token from identity provider', critical=True)\n return turbinia_api_lib.ApiClient(self.client_config)",
"def setUp(self):\n\n self.client = APIClient()",
"def setUp(self):\n\n self.client = APIClient()",
"def setUp(self):\n\n self.client = APIClient()",
"def setUp(self):\n super().setUp()\n self.client = APIClient()",
"def setUp(self):\n global access_token\n global accountID\n global account_cur\n global api\n # self.maxDiff = None\n try:\n accountID, account_cur, access_token = unittestsetup.auth()\n setattr(sys.modules[\"oandapyV20.oandapyV20\"],\n \"TRADING_ENVIRONMENTS\",\n {\"practice\": {\n \"stream\": \"https://test.com\",\n \"api\": \"https://test.com\",\n }})\n api = API(environment=environment,\n access_token=access_token,\n headers={\"Content-Type\": \"application/json\"})\n api.api_url = 'https://test.com'\n except Exception as e:\n print(\"%s\" % e)\n exit(0)",
"def setUp(self):\r\n super(CLITestAuthKeystoneWithIdandName, self).setUp()\r\n self.client = client.HTTPClient(username=USERNAME,\r\n user_id=USER_ID,\r\n tenant_id=TENANT_ID,\r\n tenant_name=TENANT_NAME,\r\n password=PASSWORD,\r\n auth_url=AUTH_URL,\r\n region_name=REGION)",
"def testTurbiniaSetup(self, _mock_read_config):\n _mock_read_config.return_value = {\"OUTPUT_DIR\": \"/tmp\"}\n self.turbinia_processor.TurbiniaSetUp(\n project=\"turbinia-project\",\n turbinia_auth=False,\n turbinia_recipe=None,\n turbinia_zone=\"us-central1f\",\n turbinia_api=\"http://localhost:8001\",\n incident_id=\"123456789\",\n sketch_id=\"12345\",\n )\n self.assertEqual(self.turbinia_processor.project, \"turbinia-project\")\n self.assertEqual(self.turbinia_processor.turbinia_zone, \"us-central1f\")\n self.assertEqual(\n self.turbinia_processor.turbinia_api, \"http://localhost:8001\")\n self.assertEqual(self.turbinia_processor.incident_id, \"123456789\")\n self.assertEqual(self.turbinia_processor.sketch_id, \"12345\")\n self.assertEqual(self.turbinia_processor.output_path, \"/tmp\")\n self.assertEqual(self.turbinia_processor.turbinia_recipe, None)",
"def test_get_client(self):\n pass",
"def setUp(self):\n self.client = Client()",
"def setUp(self):\n self.client = Client()",
"def setUp(self):\n self.client = Client()",
"def setUp(self):\n self.client = Client()",
"def setUp(self):\r\n super(CLITestAuthKeystone, self).setUp()\r\n self.mox = mox.Mox()\r\n self.client = client.HTTPClient(username=USERNAME,\r\n tenant_name=TENANT_NAME,\r\n password=PASSWORD,\r\n auth_url=AUTH_URL,\r\n region_name=REGION)\r\n self.addCleanup(self.mox.VerifyAll)\r\n self.addCleanup(self.mox.UnsetStubs)",
"def testclient():\n base_url = PARAMS.get(\"url\") + \"/v2\"\n client = Client(\n base_url=base_url,\n headers={\n \"Authorization\": f\"GenieKey {PARAMS.get('token')}\",\n }\n )\n return client",
"def test_create_o_auth_client(self):\n pass",
"def setUpClass(cls):\n\n cls.client = TestClient(fastapi_app.app)\n log.info('Completed initialization for FastAPI based REST API tests')",
"def setUp(self):\n self.client = DummyClient()",
"def TurbiniaSetUp(\n self, project: str, turbinia_auth: bool,\n turbinia_recipe: Union[str, None], turbinia_zone: str, turbinia_api: str,\n incident_id: str, sketch_id: int) -> None:\n self.project = project\n self.turbinia_auth = turbinia_auth\n self.turbinia_api = turbinia_api\n self.turbinia_recipe = turbinia_recipe\n self.turbinia_zone = turbinia_zone\n self.incident_id = incident_id\n self.sketch_id = sketch_id\n self.client_config = turbinia_api_lib.Configuration(host=self.turbinia_api)\n self.client = self.InitializeTurbiniaApiClient(self.credentials)\n self.requests_api_instance = turbinia_requests_api.TurbiniaRequestsApi(\n self.client)\n # We need to get the output path from the Turbinia server.\n api_instance = turbinia_configuration_api.TurbiniaConfigurationApi(\n self.client)\n try:\n api_response = api_instance.read_config()\n self.output_path = api_response.get('OUTPUT_DIR')\n except turbinia_api_lib.ApiException as exception:\n self.ModuleError(exception.body, critical=True)",
"def test_create_client(self):\n pass",
"def setUp(self):\n rand = ''.join(\n [random\n .choice(string.ascii_letters + string.digits) for n in range(16)])\n self.secret_key = 'sk_test_16c58271c29a007970de0353d8a47868df727cd0'\n self.random_ref = util.utf8(rand)\n self.test_email = '[email protected]'\n self.test_amount = 5000\n self.plan = 'Basic'\n self.client = TransactionResource(self.secret_key, self.random_ref)\n # self.client.initialize(util.utf8(self.test_amount),\n # util.utf8(self.test_email),\n # util.utf8(self.plan))"
] | [
"0.763972",
"0.71817064",
"0.69348216",
"0.6902786",
"0.6898086",
"0.68290806",
"0.68290806",
"0.6813784",
"0.6806354",
"0.67816716",
"0.6775598",
"0.6775598",
"0.6775598",
"0.6712668",
"0.6631375",
"0.65852684",
"0.65815634",
"0.6529811",
"0.6472687",
"0.6472687",
"0.6472687",
"0.6472687",
"0.6447478",
"0.64440966",
"0.6405738",
"0.63905644",
"0.63741815",
"0.6279089",
"0.6278553",
"0.62766874"
] | 0.73480415 | 1 |
Tests the InitializeTurbiniaApiClient method. | def testInitializeTurbiniaApiClient(self, mock_get_credentials):
self.turbinia_processor.turbinia_api = 'http://127.0.0.1:8000'
self.turbinia_processor.turbinia_auth = True
mock_credentials = mock.MagicMock(spec=Credentials, id_token = FAKE_CREDENTIALS['token'])
mock_credentials.id_token = mock.MagicMock()
mock_credentials.id_token.return_value = FAKE_CREDENTIALS['token']
self.turbinia_processor.credentials = mock_credentials
mock_get_credentials.return_value = mock_credentials
result = self.turbinia_processor.InitializeTurbiniaApiClient(mock_credentials)
mock_get_credentials.assert_not_called()
self.assertIsInstance(result, turbinia_api_lib.ApiClient) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def testInitializeTurbiniaApiClientNoCreds(self, mock_get_credentials):\n self.turbinia_processor.turbinia_api = 'http://127.0.0.1:8000'\n self.turbinia_processor.turbinia_auth = True\n mock_credentials = mock.MagicMock(spec=Credentials, id_token = FAKE_CREDENTIALS['token'])\n mock_credentials.id_token = mock.MagicMock()\n mock_credentials.id_token.return_value = FAKE_CREDENTIALS['token']\n self.turbinia_processor.credentials = mock_credentials\n mock_get_credentials.return_value = mock_credentials\n result = self.turbinia_processor.InitializeTurbiniaApiClient(None)\n mock_get_credentials.assert_called_once()\n self.assertIsInstance(result, turbinia_api_lib.ApiClient)",
"def setUp(self):\n self.client = api.Client(config.get_config(), api.json_handler)",
"def setUp(self):\n self.api = api.InvenTreeAPI(\n SERVER,\n username=USERNAME, password=PASSWORD,\n timeout=30,\n )",
"def setUp(self):\n super(TestSyncServiceRisk, self).setUp()\n self.api = ExternalApiClient()",
"def setUp(self):\n super(TestSyncServiceControl, self).setUp()\n self.api = ExternalApiClient()",
"def setUp(self):\n self.client = APIClient()",
"def setUp(self):\n self.client = APIClient()",
"def setUp(self):\r\n super(CLITestAuthKeystoneWithId, self).setUp()\r\n self.client = client.HTTPClient(user_id=USER_ID,\r\n tenant_id=TENANT_ID,\r\n password=PASSWORD,\r\n auth_url=AUTH_URL,\r\n region_name=REGION)",
"def setUp(self) -> None:\n self.client = APIClient()",
"def InitializeTurbiniaApiClient(\n self, credentials: Credentials) -> turbinia_api_lib.ApiClient:\n self.client_config = turbinia_api_lib.Configuration(host=self.turbinia_api)\n if not self.client_config:\n self.ModuleError('Unable to configure Turbinia API server', critical=True)\n # Check if Turbinia requires authentication.\n if self.turbinia_auth:\n if not credentials:\n self.credentials = self.GetCredentials(\n self.credentials_path, self.client_secrets_path)\n if self.credentials and self.credentials.id_token:\n self.client_config.access_token = self.credentials.id_token\n else:\n self.ModuleError(\n 'Unable to obtain id_token from identity provider', critical=True)\n return turbinia_api_lib.ApiClient(self.client_config)",
"def setUp(self):\n\n self.client = APIClient()",
"def setUp(self):\n\n self.client = APIClient()",
"def setUp(self):\n\n self.client = APIClient()",
"def setUp(self):\n super().setUp()\n self.client = APIClient()",
"def setUp(self):\n global access_token\n global accountID\n global account_cur\n global api\n # self.maxDiff = None\n try:\n accountID, account_cur, access_token = unittestsetup.auth()\n setattr(sys.modules[\"oandapyV20.oandapyV20\"],\n \"TRADING_ENVIRONMENTS\",\n {\"practice\": {\n \"stream\": \"https://test.com\",\n \"api\": \"https://test.com\",\n }})\n api = API(environment=environment,\n access_token=access_token,\n headers={\"Content-Type\": \"application/json\"})\n api.api_url = 'https://test.com'\n except Exception as e:\n print(\"%s\" % e)\n exit(0)",
"def setUp(self):\r\n super(CLITestAuthKeystoneWithIdandName, self).setUp()\r\n self.client = client.HTTPClient(username=USERNAME,\r\n user_id=USER_ID,\r\n tenant_id=TENANT_ID,\r\n tenant_name=TENANT_NAME,\r\n password=PASSWORD,\r\n auth_url=AUTH_URL,\r\n region_name=REGION)",
"def testTurbiniaSetup(self, _mock_read_config):\n _mock_read_config.return_value = {\"OUTPUT_DIR\": \"/tmp\"}\n self.turbinia_processor.TurbiniaSetUp(\n project=\"turbinia-project\",\n turbinia_auth=False,\n turbinia_recipe=None,\n turbinia_zone=\"us-central1f\",\n turbinia_api=\"http://localhost:8001\",\n incident_id=\"123456789\",\n sketch_id=\"12345\",\n )\n self.assertEqual(self.turbinia_processor.project, \"turbinia-project\")\n self.assertEqual(self.turbinia_processor.turbinia_zone, \"us-central1f\")\n self.assertEqual(\n self.turbinia_processor.turbinia_api, \"http://localhost:8001\")\n self.assertEqual(self.turbinia_processor.incident_id, \"123456789\")\n self.assertEqual(self.turbinia_processor.sketch_id, \"12345\")\n self.assertEqual(self.turbinia_processor.output_path, \"/tmp\")\n self.assertEqual(self.turbinia_processor.turbinia_recipe, None)",
"def test_get_client(self):\n pass",
"def setUp(self):\n self.client = Client()",
"def setUp(self):\n self.client = Client()",
"def setUp(self):\n self.client = Client()",
"def setUp(self):\n self.client = Client()",
"def setUp(self):\r\n super(CLITestAuthKeystone, self).setUp()\r\n self.mox = mox.Mox()\r\n self.client = client.HTTPClient(username=USERNAME,\r\n tenant_name=TENANT_NAME,\r\n password=PASSWORD,\r\n auth_url=AUTH_URL,\r\n region_name=REGION)\r\n self.addCleanup(self.mox.VerifyAll)\r\n self.addCleanup(self.mox.UnsetStubs)",
"def testclient():\n base_url = PARAMS.get(\"url\") + \"/v2\"\n client = Client(\n base_url=base_url,\n headers={\n \"Authorization\": f\"GenieKey {PARAMS.get('token')}\",\n }\n )\n return client",
"def test_create_o_auth_client(self):\n pass",
"def setUpClass(cls):\n\n cls.client = TestClient(fastapi_app.app)\n log.info('Completed initialization for FastAPI based REST API tests')",
"def setUp(self):\n self.client = DummyClient()",
"def TurbiniaSetUp(\n self, project: str, turbinia_auth: bool,\n turbinia_recipe: Union[str, None], turbinia_zone: str, turbinia_api: str,\n incident_id: str, sketch_id: int) -> None:\n self.project = project\n self.turbinia_auth = turbinia_auth\n self.turbinia_api = turbinia_api\n self.turbinia_recipe = turbinia_recipe\n self.turbinia_zone = turbinia_zone\n self.incident_id = incident_id\n self.sketch_id = sketch_id\n self.client_config = turbinia_api_lib.Configuration(host=self.turbinia_api)\n self.client = self.InitializeTurbiniaApiClient(self.credentials)\n self.requests_api_instance = turbinia_requests_api.TurbiniaRequestsApi(\n self.client)\n # We need to get the output path from the Turbinia server.\n api_instance = turbinia_configuration_api.TurbiniaConfigurationApi(\n self.client)\n try:\n api_response = api_instance.read_config()\n self.output_path = api_response.get('OUTPUT_DIR')\n except turbinia_api_lib.ApiException as exception:\n self.ModuleError(exception.body, critical=True)",
"def test_create_client(self):\n pass",
"def setUp(self):\n rand = ''.join(\n [random\n .choice(string.ascii_letters + string.digits) for n in range(16)])\n self.secret_key = 'sk_test_16c58271c29a007970de0353d8a47868df727cd0'\n self.random_ref = util.utf8(rand)\n self.test_email = '[email protected]'\n self.test_amount = 5000\n self.plan = 'Basic'\n self.client = TransactionResource(self.secret_key, self.random_ref)\n # self.client.initialize(util.utf8(self.test_amount),\n # util.utf8(self.test_email),\n # util.utf8(self.plan))"
] | [
"0.73480415",
"0.71817064",
"0.69348216",
"0.6902786",
"0.6898086",
"0.68290806",
"0.68290806",
"0.6813784",
"0.6806354",
"0.67816716",
"0.6775598",
"0.6775598",
"0.6775598",
"0.6712668",
"0.6631375",
"0.65852684",
"0.65815634",
"0.6529811",
"0.6472687",
"0.6472687",
"0.6472687",
"0.6472687",
"0.6447478",
"0.64440966",
"0.6405738",
"0.63905644",
"0.63741815",
"0.6279089",
"0.6278553",
"0.62766874"
] | 0.763972 | 0 |
Parse OpenSSLstyle foo.0, foo.1, ... subscripted options. Returns a list of values matching the specified option name. | def multiget(self, option, section = None):
matches = []
if section is None:
section = self.default_section
if self.cfg.has_option(section, option):
matches.append((-1, self.get(option, section = section)))
for key, value in self.cfg.items(section):
s = key.rsplit(".", 1)
if len(s) == 2 and s[0] == option and s[1].isdigit():
matches.append((int(s[1]), self.get(option, section = section)))
matches.sort()
return [match[1] for match in matches] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_option(self, name):\r\n if not isinstance(name, str):\r\n name = \" \".join(name)\r\n lines = self.sendAndRecv(\"GETCONF %s\\r\\n\" % name)\r\n\r\n r = []\r\n for _,line,_ in lines:\r\n try:\r\n key, val = line.split(\"=\", 1)\r\n r.append((key,val))\r\n except ValueError:\r\n r.append((line, None))\r\n\r\n return r",
"def value_options(*args):\n\n @with_pattern(r\"|\".join(args))\n def parse_options(text):\n return text\n\n return parse_options",
"def _parse_delimited_options(ttsoptions, _engine):\n options = []\n for prop, val in [s.strip().split('=') for s in ttsoptions.split(',')]:\n prop = prop.strip()\n val = val.strip()\n val = float(val) if val.isdecimal() else val\n options[prop] = val\n\n return options",
"def _handle_short_form(element):\n if len(element) <= 1:\n raise CmdLineException(\"Invalid option: '{}'\".format(element))\n tokens = []\n for i in range(1, len(element)):\n if element[i: i + 1] == \"=\":\n if i + 1 < len(element):\n tokens.append(element[i + 1:])\n break\n tokens.append(\"-\" + element[i: i + 1])\n return tokens",
"def parse_options(self, options):\n pass",
"def parse_options(options, return_list=True):\n\n cmd_options = []\n\n for key, value in options.items():\n\n if value is not None:\n txt = f\"--{key} {value}\"\n else:\n txt = f\"--{key}\"\n\n cmd_options.append(txt)\n\n if return_list:\n return cmd_options\n\n cmd_options = \" \".join(cmd_options)\n\n return cmd_options",
"def parse_options(options):\r\n # convert single quotes inside option values to html encoded string\r\n options = re.sub(r\"([a-zA-Z])('|\\\\')([a-zA-Z])\", r\"\\1'\\3\", options)\r\n options = re.sub(r\"\\\\'\", r\"'\", options) # replace already escaped single quotes\r\n # parse the set of possible options\r\n lexer = shlex.shlex(options[1:-1].encode('utf8'))\r\n lexer.quotes = \"'\"\r\n # Allow options to be separated by whitespace as well as commas\r\n lexer.whitespace = \", \"\r\n\r\n # remove quotes\r\n # convert escaped single quotes (html encoded string) back to single quotes\r\n tokens = [x[1:-1].decode('utf8').replace(\"'\", \"'\") for x in lexer]\r\n\r\n # make list of (option_id, option_description), with description=id\r\n return [(t, t) for t in tokens]",
"def parse_args(args, optinfos):\n\n for opt_identifier, optinfo in optinfos:\n try:\n options, arguments = getopt.gnu_getopt(args, optinfo)\n return opt_identifier, options, arguments\n except getopt.GetoptError:\n # That version doesn't work, so try the next one\n continue\n \n # If we got this far, they both failed (read: syntax error)\n error(2, \"Syntax Error: Incorrect option passed. See the man page for more information.\\nA common cause is using old LPRng syntax.\\nValid options: %s\\n\" % \n (string.replace(re.sub(r'([a-zA-Z])', r'-\\1 ',\n optinfos[SYSTEM_CUPS][1]), ':', '[arg] ')))",
"def _getOptions(self):\n args = []\n for iname, value in self.options:\n args.append('-' + iname)\n if value != 'true':\n args.append(value)\n return args",
"def options_by_name(self):\n pass",
"def options(argv=[]):\r\n parser = HendrixOptionParser\r\n return vars(parser.parse_args(argv)[0])",
"def parse_options():\n\n from optparse import OptionParser\n usage = r\"\"\"%prog [options] <voxel_file>\"\"\"\n p = OptionParser(usage=usage)\n p.add_option('-o', '--output', action='store', dest='output',\n default='plot', help='Path to output SILO or VTK file.')\n p.add_option('-v', '--vtk', action='store_true', dest='vtk',\n default=False, help='Flag to convert to VTK instead of SILO.')\n parsed = p.parse_args()\n if not parsed[1]:\n p.print_help()\n return parsed\n return parsed",
"def get_options(self, field):\n base, req_option = field.split(\"-\")\n assert base == \"options\", \"get_options can only be used to fetch options.\"\n option_type = self.option_str_to_int(req_option)\n i = 0\n # First, check if the option is already present in the packet\n for option in self.layer.options:\n # Scapy may try to be helpful and return the string of the option\n next_option = self.option_str_to_int(option[0])\n if option_type == next_option:\n _name, value = self.layer.options[i]\n # Some options (timestamp, checksums, nop) store their value in a\n # tuple.\n if isinstance(value, tuple):\n # Scapy returns values in any of these types\n if value in [None, b'', ()]:\n return ''\n value = value[0]\n if value in [None, b'', ()]:\n return ''\n if req_option == \"md5header\":\n return binascii.hexlify(value).decode(\"utf-8\")\n\n return value\n i += 1\n return ''",
"def interpret_options(options):\n # template always has to be index 0\n template = options[0]\n # namespace always has to be index 1. Support 'ec2' (human friendly) and\n # 'AWS/EC2' (how CloudWatch natively calls these things)\n namespace = options[1].rsplit('/', 2)[-1].lower()\n next_idx = 2\n # region might be index 2\n region = ''\n if len(options) > 2 and re.match(r'^\\w+\\-[\\w\\-]+\\-\\d+$', options[2]):\n region = options[2]\n next_idx += 1\n else:\n next_idx = 2\n region = region or boto.config.get('Boto', 'ec2_region_name', 'us-east-1')\n\n filter_by = {}\n extras = []\n for arg in options[next_idx:]:\n if arg.startswith('-'):\n # throw these away for now\n extras.append(arg)\n elif '=' in arg:\n key, value = arg.split('=', 2)\n filter_by[key] = value\n else:\n # throw these away for now\n extras.append(arg)\n\n return template, namespace, region, filter_by, extras",
"def get_options(options, opt_path):\r\n options_in = open(opt_path, 'r')\r\n # get exceptions\r\n for line_in in options_in:\r\n line = line_in.strip()\r\n if len(line) == 0:\r\n continue\r\n if line.startswith(\"#\"):\r\n continue\r\n if line.startswith(\"[\") and \"pep8\" in line:\r\n continue\r\n option = line\r\n if not line.startswith(\"-\"):\r\n line = \"--\" + line\r\n options.append(line)\r\n\r\n options_in.close()",
"def test_parsingValues(self):\n argV = (\"--fooint 912 --foofloat -823.1 \"\n \"--eggint 32 --eggfloat 21\").split()\n self.usage.parseOptions(argV)\n self.failUnlessEqual(self.usage.opts['fooint'], 912)\n self.assert_(isinstance(self.usage.opts['fooint'], int))\n self.failUnlessEqual(self.usage.opts['foofloat'], -823.1)\n self.assert_(isinstance(self.usage.opts['foofloat'], float))\n self.failUnlessEqual(self.usage.opts['eggint'], 32)\n self.assert_(isinstance(self.usage.opts['eggint'], int))\n self.failUnlessEqual(self.usage.opts['eggfloat'], 21.)\n self.assert_(isinstance(self.usage.opts['eggfloat'], float))",
"def _parse(self, args):\r\n\r\n ordered = []\r\n opt_full = dict()\r\n opt_abbrev = dict()\r\n\r\n args = args + [''] # Avoid out of range\r\n i = 0\r\n\r\n while i < len(args) - 1:\r\n arg = args[i]\r\n arg_next = args[i+1]\r\n if arg.startswith('--'):\r\n if arg_next.startswith('-'):\r\n raise ValueError('{} lacks value'.format(arg))\r\n else:\r\n opt_full[arg[2:]] = arg_next\r\n i += 2\r\n elif arg.startswith('-'):\r\n if arg_next.startswith('-'):\r\n raise ValueError('{} lacks value'.format(arg))\r\n else:\r\n opt_abbrev[arg[1:]] = arg_next\r\n i += 2\r\n else:\r\n ordered.append(arg)\r\n i += 1\r\n \r\n return ordered, opt_full, opt_abbrev",
"def ParseOption():\n parser = optparse.OptionParser()\n parser.add_option('--input', dest='input', help='Input file path')\n parser.add_option('--output', dest='output', help='Output file path')\n parser.add_option(\n '--var_name', dest='var_name', help='Var name for the array')\n return parser.parse_args()[0]",
"def check(options, rules = rules):\n s = [\"str\", \"unicode\"]\n for key in options:\n if not key.endswith(\" comment\"):\n if key in rules:\n c = rules[key]\n else:\n raise OptionKeyError(key)\n value = options[key]\n if c[0] == \"U\": continue\n elif c[0] == \"POT\":\n if not(((value & (value - 1)) == 0) and value):\n raise OptionPOTError(key)\n elif c[0] == \"R\":\n if value not in list(range(c[1], c[2]+1)):\n raise OptionRangeError(key, c[1], c[2]+1)\n elif c[0] == \"B\":\n if value not in list(range(0, 2)):\n #print (value)\n raise OptionRangeError(key, 0, 2)\n elif c[0] == \"N1+\":\n if value < 1:\n raise OptionRangeError(key, 1, float(\"inf\"))\n elif c[0] == \"N0+\":\n if value < 0:\n raise OptionRangeError(key, 0, float(\"inf\"))\n elif c[0] == \"FN0+\":\n if value < 0:\n raise OptionRangeError(key, 0, float(\"inf\"))\n elif c[0] == \"N-1+\":\n if value < -1:\n raise OptionRangeError(key, -1, float(\"inf\"))\n elif c[0] == \"S\":\n if value.__class__.__name__ not in s:\n raise OptionTypeError(key, \"text\")\n elif c[0] == \"Name\":check_name(value,key)\n\n elif c[0] == \"L\":\n if value.__class__.__name__ != \"list\":\n raise OptionTypeError(key, \"list\")\n\n elif c[0] == \"C\":\n if len(value) != 3:\n raise OptionError()\n if sum(value) < 1:\n raise OptionError()\n else:\n raise Exception(\"%s not valid rule type from %s\" % (c[0], key))",
"def versatileOptions():\r\n return tuple(sorted(i[0] for i in list(Options.defaults().items()) if i[1].find(' #v ') > 0))",
"def lookup_option_symbols(self, underlying: str) -> List[Symbol]:\n url = \"/v1/markets/options/lookup\"\n params = {\"underlying\": underlying}\n\n data = self.get(url, params)\n res = MarketsAPIResponse(**data)\n return res.symbols",
"def getOptionsNames(self) -> List[unicode]:\n ...",
"def parseSubscripts(part):\n subs = str(part)\n subs = part.split(\"]\")[:-1]\n return [int(sub[1:]) for sub in subs]",
"def parse_opts2(tokens, optpx='--', argparam=False):\n opts0 = []\n args = []\n n = len(optpx)\n\n for token in tokens:\n if token[:2] == optpx:\n opts0.append(token[n:])\n else:\n if argparam:\n token = token.split('=')\n args.append(token)\n\n opts = parse_opts(opts0)\n\n return args, opts",
"def parse(self, section):\n # try to find alternatives if they exist\n alternatives = deepcopy(self.alternatives)\n while len(alternatives) != 0 and self.name not in section.dict:\n other_name = alternatives.pop(0)\n if other_name in section.dict:\n section.dict[self.name] = section.dict[other_name]\n del section.dict[other_name]\n break\n if not self.optional:\n assert_exists(self.name, section.dict, section.name)\n if self.name not in section.dict:\n return self.default\n else:\n if self.dtype != list:\n if self.dtype == bool:\n # this is necessary since ``bool(\"False\")`` returns ``True``.\n value = parse_bool(section, self.name)\n else:\n value = self.dtype(section.dict[self.name])\n if not self.validation_func(value):\n raise ValueError('Invalid input for option ' + self.name +\n ' in section ' + section.name)\n return value\n else:\n\n value = parse_list(section.dict[self.name], self.datatype)\n\n # value validation\n if not all_true(self.validation_func, value):\n raise ValueError('Invalid input for option ' + self.name +\n ' in section ' + section.name)\n\n shape = deepcopy(self.shape)\n\n # now we need to get the correct shape\n if shape == -1:\n # we don't care for the shape of this\n if not isinstance(value, list):\n value = [value]\n return value\n\n if isinstance(shape, str):\n # in this case we simply use the shape of the option with this name\n if shape not in section.dict:\n raise ValueError(self.name + ' in ' + section.name + ' has an invalid ' +\\\n 'shape because the options whose shape it should have ' +\\\n 'does not exist. Check your option definitions!')\n shape = get_shape(section.dict[shape])\n if isinstance(shape, int):\n shape = [shape]\n # shape is now a list, but it might still contain strings\n for i in range(len(shape)):\n if isinstance(shape[i], str):\n shape[i] = len(section.dict[shape[i]])\n\n\n\n # shape is now either a 'flat' shape, i.e. something like [2, 3, 2],\n # or an expanded shape, e.g. [2, [3, 3], [[2, 2, 2],[2, 2, 2]]]\n # if it's flat, it might contain dimensions with -1 that cannot be\n # autoexpanded. We first need to determine the shape of this dimension.\n if is_flat(shape):\n real_shape = get_shape(value)\n if isinstance(real_shape, (list, tuple)):\n # if it's just a single number we can expand it\n # Here I'm trying to find the flat shape of the value that was\n # given in the configuration file.\n flat_shape_value = try_flattening_shape(real_shape)\n # It might happen that we cannot flatten the shape, in this\n # case there are negative values remaining in flat_shape_value.\n # If there are, this means that there is a dimension\n # containing lists of different lengths.\n # In any case I will try to replace any -1 in ``shape``\n # with the value in ``flat_shape_value``.\n shape = get_positive_shape(shape, flat_shape_value)\n # Now we do a test for equality of the asserted shape and\n # the shape of the value found in the config file. Keep in\n # mind that there might be -1 values left.\n if flat_shape_value != shape[-len(flat_shape_value):]:\n raise ShapeError(self.name, section.name)\n # If there are -1's left we must ensure that the \"depth\" of\n # the given value, i.e. the number of dimensions, is higher\n # than the ``number of dimensions after the value preceding\n # the first -1`` + 1 .\n if any(map(lambda x: x == -1, shape)):\n depth = numdim(value)\n mindepth = len(shape) - shape.index(-1) + 1\n if depth < mindepth:\n raise ValueError('Option ' + self.name + ' in section ' +\n section.name + ' can not be expanded!')\n shape = expand_shape(shape)\n\n # Now we have an expanded shape, so only two tasks remain:\n # * auto-expansion\n # * shape validation\n value = expand_to_shape(shape, value)\n if not compare_shapes(shape, get_shape(value)):\n raise ShapeError(self.name, section.name)\n return value",
"def Options():\n p = optparse.OptionParser('split_doc.py [options] input_file out_prefix')\n # Like awk -v\n p.add_option(\n '-v', dest='default_vals', action='append', default=[],\n help=\"If the doc's own metadata doesn't define 'name', set it to this value\")\n p.add_option(\n '-s', '--strict', dest='strict', action='store_true', default=False,\n help=\"Require metadata\")\n return p",
"def get_options(self, key):\n if key in self.options.get_option_names():\n return self.options\n\n try:\n scope, scoped_key = key.split('.')\n except ValueError:\n return None\n\n if scope == 'input' and scoped_key in self.input.options.get_option_names():\n return self.input.options\n elif scope == 'output' and scoped_key in self.output.options.get_option_names():\n return self.output.options\n elif scope == 'exploit' and scoped_key in self.exploit.options.get_option_names():\n return self.exploit.options\n else:\n return None",
"def extract_opt(options, optname):\n extracted = []\n remaining = []\n for o, v in options:\n if o == optname:\n extracted.append((o, v))\n else:\n remaining.append((o, v))\n return extracted, remaining",
"def _check_prefixes(self, docstring: PetscDocStringImpl) -> None:\n for key, opts in sorted(self.items.items()):\n lopts = len(opts)\n assert lopts >= 1, f'number of options {lopts} < 1, key: {key}, items: {self.items}'\n\n if lopts == 1:\n # only 1 option, should start with '.'\n self._check_opt_starts_with(docstring, opts[0], 'Solitary', '.')\n else:\n # more than 1, should be '+', then however many '.', then last is '-'\n self._check_opt_starts_with(docstring, opts[0], 'First multi', '+')\n for opt in opts[1:-1]:\n self._check_opt_starts_with(docstring, opt, 'Multi', '.')\n self._check_opt_starts_with(docstring, opts[-1], 'Last multi', '-')\n return",
"def _getOptions(self, sectionName):\r\n\r\n if sectionName in self.sections:\r\n attri_list = self.cf.options(sectionName)\r\n return attri_list\r\n else:\r\n return None"
] | [
"0.5681802",
"0.5620391",
"0.5565046",
"0.541359",
"0.53751975",
"0.5340231",
"0.5283279",
"0.5276728",
"0.52554685",
"0.5251639",
"0.51984483",
"0.5196909",
"0.519201",
"0.5174309",
"0.5122386",
"0.51024044",
"0.51020473",
"0.5095427",
"0.5034037",
"0.5030856",
"0.50224656",
"0.50154805",
"0.4999125",
"0.4985558",
"0.49723536",
"0.49649593",
"0.49568874",
"0.49501932",
"0.49457142",
"0.49314326"
] | 0.5834238 | 0 |
Get an integer option, perhaps with a default value. | def getint(self, option, default = None, section = None):
return int(self.get(option, default, section)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getint(self, option, default=None):\n\t\treturn self._get_raw(option, 'int', default)",
"def getint(self, option):\n return getint(self.name, option)",
"def getint(self, section, option, default=None):\r\n return self.get(section, option, type=int, default=default)",
"def getint(self, option, argument=None):\n value = self.get(option, argument)\n if value: return int(value)\n else: return 0",
"def getInt(self, section, option, default=0):\n return self.get(section, option, default, int)",
"def getint(self, section, option):\n return int(self.get(section, option))",
"def option(number, default='no'):\n return answer(number).get('options', default)",
"def _ParseIntegerOption(cls, options, argument_name, default_value=None):\n argument_value = getattr(options, argument_name, None)\n if not argument_value:\n return default_value\n\n if not isinstance(argument_value, py2to3.INTEGER_TYPES):\n raise errors.BadConfigOption(\n u'Unsupported option: {0:s} integer type required.'.format(\n argument_name))\n\n return argument_value",
"def config_get_int(section, option):\n return __CONFIG.getint(section, option)",
"def safe_get_int(self, section, option, default=None):\n try:\n return int(self.safe_get(section, option, default))\n except ValueError:\n if default is None:\n raise\n else:\n #gvlogger.info(\"Can't convert value from section '%s' option '%s' in configuration file, reverting to defaults\", section, option)\n return default",
"def find_option(number):\n if not isinstance(number, int):\n raise TypeError(number)\n if not ((0 <= number) and (number <= 65535)):\n raise ValueError(number)\n return _OptionRegistry.get(number, None)",
"def option(self, spec):\n return spec.options[self.rng.integers(len(spec.options))]",
"def getInt(self, item, default=0):\n value = self.getSection(CFG_GENERAL, item)\n return default if not value else int(value)",
"def get_int(self, sect, opt):\r\n vstr = self.get_safe(sect, opt)\r\n try:\r\n return int(vstr)\r\n except ValueError:\r\n return 0",
"def test_getint_with_default(self):\n self.assertEqual(self.config.getint('advanced','p'),None)\n self.assertEqual(self.config.getint('advanced','p',11),11)",
"def getIntValue(self):\n return _libsbml.ConversionOption_getIntValue(self)",
"def _get_option(self, name, datatype, default):\n return config.get_option(self._options,\n name,\n type=datatype,\n default=default)",
"def get_attr_int(self, name, default=0):\n v = self.get_attr(name)\n if v is None:\n return default\n try:\n return int(v)\n except: # noqa\n return default",
"def get_by_name_as_int(cls, name, default=None):\n try:\n return int(Configuration.get_by_name(name))\n except:\n return default",
"def to_int(name, default=0):\n try:\n return int(get(name))\n except (TypeError, ValueError):\n return default",
"def GetInteger(self,prompt=''):\n\t\treturn self.acad.ActiveDocument.Utility.GetInteger(prompt)",
"def value(self, value: Optional[int] = None) -> Optional[int]:\n ...",
"def int(self, item, default=None):\n try:\n item = self.__getattr__(item)\n except AttributeError as err:\n if default is not None:\n return default\n raise err\n return int(item)",
"def to_int_or_none(value: Union[None, int, str]) -> Optional[int]:\n return None if value is None else int(value)",
"def getDbIntDefault(self, db, key, default):\n val = self.getDbStrNone(db, key)\n if val != None:\n return int(val)\n else:\n return default",
"def getlong(self, option, default = None, section = None):\n return long(self.get(option, default, section))",
"def setIntegerOption(self, option, value):\n result = self.__lib.voikkoSetIntegerOption(self.__handle, option, value)\n if result == 0:\n raise VoikkoException(\"Could not set integer option %s to value %s\" % (option, value))",
"def get_option(self, option):\n\t\treturn self.options[option]",
"def get(key: 'int | str', default: 'Optional[int]' = -1) -> 'Flags':\n if isinstance(key, int):\n return Flags(key)\n return Flags[key] # type: ignore[misc]",
"def get_int_attribute(element, attr, default = 0):\n result = element.getAttribute(attr)\n if result == \"\":\n return default\n return int(result)"
] | [
"0.84793097",
"0.81527907",
"0.79768705",
"0.7929183",
"0.7760084",
"0.7490237",
"0.73363376",
"0.71646047",
"0.7082002",
"0.7010749",
"0.6840518",
"0.66669184",
"0.65775895",
"0.6542784",
"0.65416443",
"0.652801",
"0.63949615",
"0.6394363",
"0.63876903",
"0.633896",
"0.63153094",
"0.63023686",
"0.62728655",
"0.622992",
"0.6211607",
"0.6196161",
"0.61919236",
"0.61895233",
"0.61851114",
"0.61552656"
] | 0.8415297 | 1 |
Get a long integer option, perhaps with a default value. | def getlong(self, option, default = None, section = None):
return long(self.get(option, default, section)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getint(self, option, default = None, section = None):\n return int(self.get(option, default, section))",
"def getLong(self, int: int, int2: int) -> int:\n ...",
"def getLong(t, swipl):\n i = c_long()\n if swipl.PL_get_long(t, byref(i)):\n return i.value\n else:\n raise InvalidTypeError(\"long\")",
"def getLong(self, name: unicode) -> long:\n ...",
"def getint(self, option, default=None):\n\t\treturn self._get_raw(option, 'int', default)",
"def getint(self, option):\n return getint(self.name, option)",
"def getint(self, section, option, default=None):\r\n return self.get(section, option, type=int, default=default)",
"def to_long_int(val):\n return long(val) if six.PY2 else int(val)",
"def getint(self, option, argument=None):\n value = self.get(option, argument)\n if value: return int(value)\n else: return 0",
"def convertToLong(boolean: bool) -> int:\n ...",
"def setLong(self, name: unicode, value: long) -> None:\n ...",
"def getLong(self, addr: ghidra.program.model.address.Address) -> long:\n ...",
"def long_attr(attr):\n try:\n val = long(attr, 0)\n except ValueError:\n raise EzXMLError(\"%s did not parse as an integer\" % attr)\n return val",
"def field_to_long(value):\n if isinstance(value, (int, long)):\n return long(value)\n elif isinstance(value, basestring):\n return bytes_to_long(from_hex(value))\n else:\n return None",
"def getint(self, section, option):\n return int(self.get(section, option))",
"def read_long_long(data):\n s_type = \"=%s\" % get_type(\"long_long\")\n return struct.unpack(s_type, data.read(8))[0]",
"def read_long(self):\n return self._packers[\"l\"].unpack(self.read(4))[0]",
"def option(number, default='no'):\n return answer(number).get('options', default)",
"def getInt(self, section, option, default=0):\n return self.get(section, option, default, int)",
"def get_long(self, key):\n if self._handle is None:\n raise Exception(\"GRIB file %s not open\" % (self.fname,))\n\n val = ctypes.c_long()\n rc = grib_get_long(self._handle, key, ctypes.byref(val))\n if rc:\n raise Exception(\"grib_get_long() failed: %d\" % (rc,))\n return val.value",
"def get_long(self, key):\n if self._handle is None:\n raise Exception(\"GRIB file %s not open\" % (self.fname,))\n\n val = ctypes.c_long()\n rc = grib_get_long(self._handle, key, ctypes.byref(val))\n if rc:\n raise Exception(\"grib_get_long() failed: %d\" % (rc,))\n return val.value",
"def find_option(number):\n if not isinstance(number, int):\n raise TypeError(number)\n if not ((0 <= number) and (number <= 65535)):\n raise ValueError(number)\n return _OptionRegistry.get(number, None)",
"def getLong(self, address: ghidra.program.model.address.Address) -> long:\n ...",
"def test_ulong_long_int(self):\n self.failUnlessEqual(self.callFunc('encode_longlong', self.const_integer), self.const_integer_long_long_encoded, 'long long encoding FAILED...')",
"def nextLong(self) -> \"long\":\n raise NotImplementedError",
"def read_long(self):\n a, b, c, d = self.read_list(4)\n return a << 24 | b << 16 | c << 8 | d",
"def setLong(self, addr: ghidra.program.model.address.Address, value: long) -> None:\n ...",
"def getLong(self, addr: ghidra.program.model.address.Address, bigEndian: bool) -> long:\n ...",
"def config_get_int(section, option):\n return __CONFIG.getint(section, option)",
"def validateLong(sValue, lMin = 0, lMax = None, aoNilValues = tuple([long(-1), None, '']), fAllowNull = True):\n if sValue in aoNilValues:\n if fAllowNull:\n return (None if sValue is None else aoNilValues[0], None);\n return (sValue, 'Mandatory.');\n try:\n if utils.isString(sValue):\n lValue = long(sValue, 0);\n else:\n lValue = long(sValue);\n except:\n return (sValue, 'Not a long integer');\n\n if lValue in aoNilValues:\n return (aoNilValues[0], None if fAllowNull else 'Mandatory.');\n\n if lMin is not None and lValue < lMin:\n return (lValue, 'Value too small (min %d)' % (lMin,));\n elif lMax is not None and lValue > lMax:\n return (lValue, 'Value too high (max %d)' % (lMax,));\n return (lValue, None);"
] | [
"0.70192164",
"0.68363434",
"0.6702272",
"0.66861874",
"0.6630546",
"0.66111416",
"0.6431476",
"0.6385897",
"0.6310543",
"0.628531",
"0.6267423",
"0.62353796",
"0.6210837",
"0.61966425",
"0.6178884",
"0.6079542",
"0.60778195",
"0.60364276",
"0.59944767",
"0.59922874",
"0.59922874",
"0.595521",
"0.59507596",
"0.5885261",
"0.58822787",
"0.58808017",
"0.58609617",
"0.58520585",
"0.57999116",
"0.57695496"
] | 0.8568428 | 0 |
Consolidated control for all the little global control flags scattered through the libraries. This isn't a particularly good place for this function to live, but it has to live somewhere and making it a method of the config parser from which it gets all of its data is less silly than the available alternatives. | def set_global_flags(self):
import rpki.http, rpki.x509, rpki.sql, rpki.async, rpki.log
try:
rpki.http.debug_http = self.getboolean("debug_http")
except ConfigParser.NoOptionError:
pass
try:
rpki.http.want_persistent_client = self.getboolean("want_persistent_client")
except ConfigParser.NoOptionError:
pass
try:
rpki.http.want_persistent_server = self.getboolean("want_persistent_server")
except ConfigParser.NoOptionError:
pass
try:
rpki.http.use_adns = self.getboolean("use_adns")
except ConfigParser.NoOptionError:
pass
try:
rpki.http.enable_ipv6_clients = self.getboolean("enable_ipv6_clients")
except ConfigParser.NoOptionError:
pass
try:
rpki.http.enable_ipv6_servers = self.getboolean("enable_ipv6_servers")
except ConfigParser.NoOptionError:
pass
try:
rpki.x509.CMS_object.debug_cms_certs = self.getboolean("debug_cms_certs")
except ConfigParser.NoOptionError:
pass
try:
rpki.sql.sql_persistent.sql_debug = self.getboolean("sql_debug")
except ConfigParser.NoOptionError:
pass
try:
rpki.async.timer.gc_debug = self.getboolean("gc_debug")
except ConfigParser.NoOptionError:
pass
try:
rpki.async.timer.run_debug = self.getboolean("timer_debug")
except ConfigParser.NoOptionError:
pass
try:
rpki.x509.XML_CMS_object.dump_outbound_cms = rpki.x509.DeadDrop(self.get("dump_outbound_cms"))
except ConfigParser.NoOptionError:
pass
try:
rpki.x509.XML_CMS_object.dump_inbound_cms = rpki.x509.DeadDrop(self.get("dump_inbound_cms"))
except ConfigParser.NoOptionError:
pass
try:
rpki.async.gc_summary(self.getint("gc_summary"), self.getint("gc_summary_threshold", 0))
except ConfigParser.NoOptionError:
pass
try:
rpki.log.enable_tracebacks = self.getboolean("enable_tracebacks")
except ConfigParser.NoOptionError:
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _GclStyleSettings(self):\n settings = {\n 'port': self.GetCodeReviewSetting('TRYSERVER_HTTP_PORT'),\n 'host': self.GetCodeReviewSetting('TRYSERVER_HTTP_HOST'),\n 'svn_repo': self.GetCodeReviewSetting('TRYSERVER_SVN_URL'),\n 'gerrit_url': self.GetCodeReviewSetting('TRYSERVER_GERRIT_URL'),\n 'git_repo': self.GetCodeReviewSetting('TRYSERVER_GIT_URL'),\n 'project': self.GetCodeReviewSetting('TRYSERVER_PROJECT'),\n # Primarily for revision=auto\n 'revision': self.GetCodeReviewSetting('TRYSERVER_REVISION'),\n 'root': self.GetCodeReviewSetting('TRYSERVER_ROOT'),\n 'patchlevel': self.GetCodeReviewSetting('TRYSERVER_PATCHLEVEL'),\n }\n logging.info('\\n'.join(['%s: %s' % (k, v)\n for (k, v) in settings.iteritems() if v]))\n for (k, v) in settings.iteritems():\n # Avoid overwriting options already set using command line flags.\n if v and getattr(self.options, k) is None:\n setattr(self.options, k, v)",
"def base_settings():\n return \"\"\"\n iota = True\n rho = False\n omega = True\n chi = True\n pini = False\n\n emr = 0\n constrain_omega = 1\n iota.at_specific = 0\n iota.min = 0.0001\n iota.age_cnt = 2\n iota.time_cnt = 2\n omega.at_specific = 1\n omega.min = 0.0001\n omega.age_cnt = 0\n omega.time_cnt = 0\n chi.at_specific = 0\n chi.min = 0.0001\n chi.age_cnt = 1\n chi.time_cnt = 2\n drill_start = 0\n drill_end = -1\n re.iota = all\n re.omega = all\n re.chi = all\n study.0 = False\n study.11 = True\n study.11.at_specific = 0\n study.11.age_cnt = 1\n study.11.time_cnt = 1\n study.11.covtype = rate_value\n study.11.rate = chi\n study.1604 = True\n study.1604.at_specific = 0\n study.1604.age_cnt = 1\n study.1604.time_cnt = 1\n study.1604.covtype = meas_std\n country.156 = True\n country.156.at_specific = 0\n country.156.age_cnt = 1\n country.156.time_cnt = 1\n country.156.covtype = rate_value\n country.156.rate = iota\n country.1998 = True\n country.1998.at_specific = 0\n country.1998.age_cnt = 1\n country.1998.time_cnt = 1\n country.1998.covtype = meas_std\n job_idx = 0\n \"\"\"",
"def AutomagicalSettings(self):\n # Try to find gclient or repo root first.\n if not self.options.no_search:\n self.toplevel_root = gclient_utils.FindGclientRoot(self.checkout_root)\n if self.toplevel_root:\n logging.info('Found .gclient at %s' % self.toplevel_root)\n else:\n self.toplevel_root = gclient_utils.FindFileUpwards(\n os.path.join('..', '.repo'), self.checkout_root)\n if self.toplevel_root:\n logging.info('Found .repo dir at %s'\n % os.path.dirname(self.toplevel_root))\n\n # Parse TRYSERVER_* settings from codereview.settings before falling back\n # on setting self.options.root manually further down. Otherwise\n # TRYSERVER_ROOT would never be used in codereview.settings.\n self._GclStyleSettings()\n\n if self.toplevel_root and not self.options.root:\n assert os.path.abspath(self.toplevel_root) == self.toplevel_root\n self.options.root = gclient_utils.PathDifference(self.toplevel_root,\n self.checkout_root)\n else:\n self._GclStyleSettings()",
"def RPC_DigitizationToolCommonCfg(flags, name=\"RpcDigitizationTool\", **kwargs):\n from MuonConfig.MuonCondAlgConfig import RpcCondDbAlgCfg # MT-safe conditions access\n acc = RpcCondDbAlgCfg(flags)\n if flags.Digitization.DoXingByXingPileUp:\n kwargs.setdefault(\"FirstXing\", RPC_FirstXing())\n kwargs.setdefault(\"LastXing\", RPC_LastXing())\n kwargs.setdefault(\"OutputObjectName\", \"RPC_DIGITS\")\n if flags.Digitization.PileUpPremixing:\n kwargs.setdefault(\"OutputSDOName\", flags.Overlay.BkgPrefix + \"RPC_SDO\")\n else:\n kwargs.setdefault(\"OutputSDOName\", \"RPC_SDO\")\n # config\n kwargs.setdefault(\"DeadTime\", 100)\n kwargs.setdefault(\"PatchForRpcTime\", True)\n # kwargs.setdefault(\"PatchForRpcTimeShift\", 9.6875)\n kwargs.setdefault(\"PatchForRpcTimeShift\", 12.5)\n kwargs.setdefault(\"turnON_efficiency\", True)\n kwargs.setdefault(\"turnON_clustersize\", True)\n kwargs.setdefault(\"testbeam_clustersize\", 0)\n kwargs.setdefault(\"ClusterSize1_2uncorr\", 0)\n kwargs.setdefault(\"CutProjectedTracks\", 100)\n kwargs.setdefault(\"RPCInfoFromDb\", True)\n kwargs.setdefault(\"Efficiency_fromCOOL\", True)\n kwargs.setdefault(\"EfficiencyPatchForBMShighEta\", False)\n kwargs.setdefault(\"ClusterSize_fromCOOL\", True)\n kwargs.setdefault(\"DumpFromDbFirst\", False)\n kwargs.setdefault(\"PanelId_OFF_fromlist\", False)\n kwargs.setdefault(\"PanelId_OK_fromlist\", False)\n kwargs.setdefault(\"IgnoreRunDependentConfig\", False)\n kwargs.setdefault(\"PrintCalibrationVector\",False )\n kwargs.setdefault(\"PhiAndEtaEff_A\",[0.938, 0.938, 0.938, 0.938, 0.938, 0.938, 0.938, 0.938, 0.938])\n kwargs.setdefault(\"OnlyPhiEff_A\" ,[0.022, 0.022, 0.022, 0.022, 0.022, 0.022, 0.022, 0.022, 0.022])\n kwargs.setdefault(\"OnlyEtaEff_A\" ,[0.022, 0.022, 0.022, 0.022, 0.022, 0.022, 0.022, 0.022, 0.022])\n kwargs.setdefault(\"PhiAndEtaEff_C\",[0.938, 0.938, 0.938, 0.938, 0.938, 0.938, 0.938, 0.938, 0.938])\n kwargs.setdefault(\"OnlyPhiEff_C\" ,[0.022, 0.022, 0.022, 0.022, 0.022, 0.022, 0.022, 0.022, 0.022])\n kwargs.setdefault(\"OnlyEtaEff_C\" ,[0.022, 0.022, 0.022, 0.022, 0.022, 0.022, 0.022, 0.022, 0.022])\n kwargs.setdefault(\"FracClusterSize1_A\", [0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664])\n kwargs.setdefault(\"FracClusterSize2_A\", [0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986])\n kwargs.setdefault(\"FracClusterSizeTail_A\",[0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035 ])\n kwargs.setdefault(\"MeanClusterSizeTail_A\",[0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598])\n kwargs.setdefault(\"FracClusterSize1_C\", [0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664])\n kwargs.setdefault(\"FracClusterSize2_C\", [0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986])\n kwargs.setdefault(\"FracClusterSizeTail_C\",[0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035 ])\n kwargs.setdefault(\"MeanClusterSizeTail_C\",[0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598])\n RpcDigitizationTool = CompFactory.RpcDigitizationTool\n acc.setPrivateTools(RpcDigitizationTool(name, **kwargs))\n return acc",
"def parseFlags(self):\n # Blank return value.\n retVal = \"\"\n \n try:\n # Store flags as we parse them.\n allFlags = []\n \n # Get the accumulator flag.\n accFlag = self.__flags & self.f_accum\n trendFlag = self.__flags & self.f_trend\n modeFlag = self.__flags & self.f_mode\n \n # Complete set of readings?\n if accFlag == self.f_accum_complete:\n # Completed loading values into the accumulator.\n allFlags.append('C')\n elif accFlag == self.f_accum_accum:\n # Still accumulating.\n allFlags.append('A')\n elif accFlag == self.f_accum_unk:\n # Unknown.\n allFlags.append('?')\n else:\n # Bad value.\n allFlags.append('!')\n \n # Trend?\n if (trendFlag) == self.f_trend_stable:\n # Readings stable.\n allFlags.append('S')\n elif (trendFlag) == self.f_trend_up:\n # Still accumulating.\n allFlags.append('U')\n elif (trendFlag) == self.f_trend_dn:\n # Still accumulating.\n allFlags.append('D')\n elif (trendFlag) == self.f_trend_unk:\n # Still accumulating.\n allFlags.append('?')\n else:\n # Bad value.\n allFlags.append('!')\n \n # Mode?\n if modeFlag == self.f_mode_fast:\n # Fast\n allFlags.append('F')\n elif modeFlag == self.f_mode_slow:\n # Slow\n allFlags.append('S')\n elif modeFlag == self.f_mode_counter:\n # Stream\n allFlags.append('C')\n elif modeFlag == self.f_mode_scaler:\n # Roll\n allFlags.append('L')\n else:\n # Bad value.\n allFlags.append('!')\n \n # Build a nice string.\n retVal = ''.join(allFlags)\n \n \n except:\n raise\n \n # Return value.\n return retVal",
"def __process_flags(self, flags: int) -> Dict[str, bool]:\n return {\n 'ns': True if flags & 0x100 else False,\n 'cwr': True if flags & 0x080 else False,\n 'ece': True if flags & 0x040 else False,\n 'urg': True if flags & 0x020 else False,\n 'ack': True if flags & 0x010 else False,\n 'psh': True if flags & 0x008 else False,\n 'rst': True if flags & 0x004 else False,\n 'syn': True if flags & 0x002 else False,\n 'fin': True if flags & 0x001 else False,\n }",
"def get_flags(cls):\n return cls.get_short_flag(), cls.get_flag()",
"def _flags(self):\n done, data = self._request('GE')\n if done:\n flags = int(data[1], 16)\n else:\n raise EvseError\n return {\n 'service_level': (flags & 0x0001) + 1,\n 'diode_check': not flags & 0x0002,\n 'vent_required': not flags & 0x0004,\n 'ground_check': not flags & 0x0008,\n 'stuck_relay_check': not flags & 0x0010,\n 'auto_service_level': not flags & 0x0020,\n 'auto_start': not flags & 0x0040,\n 'serial_debug': not not flags & 0x0080,\n 'lcd_type': 'monochrome' if flags & 0x0100 else 'rgb',\n 'gfi_self_test': not flags & 0x0200\n }",
"def __options(self):\n\t\ta = 1 if self.random else 0\n\t\tb = 2 if self.topoftheday else 0\n\t\tc = 4 if self.offline else 0\n\t\treturn a+b+c",
"def versatileOptions():\r\n return tuple(sorted(i[0] for i in list(Options.defaults().items()) if i[1].find(' #v ') > 0))",
"def preprocess_settings(self, eventlist):\n\n # cache some stuff?\n self.controllerroot = self.settings.get_subvalue(mconst.DEF_SETTINGSEC_config, mconst.DEF_SETTINGNAME_controllerroot)\n # pack manager settings\n self.comp('packmanager').set_directories( self.get_root_pack_directory_list() + self.get_site_pack_directory_list() )\n self.comp('packmanager').set_packsettings( self.settings.get_value(mconst.DEF_SETTINGSEC_packs) )\n self.comp('packmanager').set_default_packsettings(mconst.DEF_SETTINGVAL_default_pack_settings)\n self.comp('packmanager').set_flag_loadsetuptoolspacks(self.settings.get_subvalue(mconst.DEF_SETTINGSEC_config, mconst.DEF_SETTINGNAME_flag_importsetuptoolspacks, mconst.DEF_SETTINGVAL_flag_importsetuptoolspacks))\n # database manager settings\n self.comp('dbmanager').set_databasesettings( self.settings.get_value(mconst.DEF_SETTINGSEC_database) )\n # isenabled flag\n self.isenabled = self.settings.get_subvalue(mconst.DEF_SETTINGSEC_config, mconst.DEF_SETTINGNAME_isenabled, self.isenabled)\n self.siteurl_relative = self.settings.get_subvalue(mconst.DEF_SETTINGSEC_config, mconst.DEF_SETTINGNAME_siteurl_relative, self.siteurl_relative)",
"def common_options(self):\n return self._common_options",
"def __get_options(self):\n for sect in self.file_parser.sections():\n if self.file_parser.has_option(sect, 'implementation'):\n selected_imp = self.file_parser.get(sect, 'implementation')\n imptype = self.file_parser.get(sect, 'optype')\n # pylint: disable = E1103\n enabled = self.file_parser.get(sect, 'enabled').lower()\n # pylint: enable = E1103\n if enabled == 'always':\n stateval = True\n permanent = True\n elif enabled == 'true':\n stateval = True\n permanent = False\n else:\n stateval = False\n permanent = False\n\n if self.file_parser.has_option(sect, 'id'):\n _id = self.file_parser.get(sect, 'id')\n self.opt_dict[sect]['id'] = _id\n\n self.opt_dict[sect]['permanent'] = permanent\n self.opt_dict[sect]['imptype'] = imptype\n if stateval == True:\n imp_unavailable = (selected_imp in self.imp2opt_dict) and (\n self.imp2opt_dict[selected_imp] != 'none' )\n if selected_imp == 'none' or imp_unavailable:\n self.opt_dict[sect]['enabled'] = False\n self.opt_dict[sect]['selected_imp'] = 'none'\n else:\n self.opt_dict[sect]['enabled'] = True\n self.set_imp(sect, selected_imp)\n# dbmsg = 'Add imp2opt_dict[{0}] = {1}'\n# print dbmsg.format(selected_imp, sect)\n else:\n self.opt_dict[sect]['enabled'] = False\n self.opt_dict[sect]['selected_imp'] = 'none'",
"def system_protection_config():\n\n\tprint_section_header(\"GENERAL SYSTEM PROTECTION\", Fore.BLUE)\n\n\t# Enable Gatekeeper\n\tif prompt_yes_no(top_line=\"-> Enable Gatekeeper?\",\n\t bottom_line=\"Defend against malware by enforcing code signing and verifying downloaded applications before letting them to run.\"):\n\t\tprint_confirmation(\"Enabling Gatekeeper...\")\n\t\tsp.run('sudo spctl --master-enable', shell=True, stdout=sp.PIPE)\n\t\tsp.run('sudo spctl --enable --label \"Developer ID\"', shell=True, stdout=sp.PIPE)\n\n\t# Disable automatic software whitelisting\n\tif prompt_yes_no(top_line=\"-> Prevent automatic software whitelisting?\",\n\t bottom_line=\"Both built-in and downloaded software will require user approval for whitelisting.\"):\n\t\tprint_confirmation(\"Preventing automatic whitelisting...\")\n\t\tsp.run(['sudo', '/usr/libexec/ApplicationFirewall/socketfilterfw', '--setallowsigned', 'off'], stdout=sp.PIPE)\n\t\tsp.run(['sudo', '/usr/libexec/ApplicationFirewall/socketfilterfw', '--setallowsignedapp', 'off'], stdout=sp.PIPE)\n\n\t# Captive Portal\n\tif prompt_yes_no(top_line=\"-> Disable Captive Portal Assistant and force login through browser on untrusted networks?\",\n\t bottom_line=\"Captive Portal could be triggered and direct you to a malicious site WITHOUT any user interaction.\"):\n\t\tprint_confirmation(\"Disabling Captive Portal Assistant...\")\n\t\tsp.run(['sudo', 'defaults', 'write', '/Library/Preferences/SystemConfiguration/com.apple.captive.control', 'Active', '-bool', 'false'], stdout=sp.PIPE)",
"def default_controls(self):\n\t\tcontrol_list = []\n\t\tconfig = ConfigParser.ConfigParser()\n\t\tconfig.read(\"./config.ini\")\n\t\tcontrols = config.options(\"default_controls\")\n\t\tfor c in controls:\n\t\t\ttry: control_list.append( config.get(\"default_controls\", c) )\n\t\t\texcept:\n\t\t\t\tprint \"ERROR: missing control settings. Check config.ini.\"\n\t\t\t\traise(SystemExit)\n\t\treturn control_list",
"def use_flags(*funcs):\n\n global GLOBAL_STATUS\n if funcs:\n GLOBAL_STATUS.discard('ERRORS')\n GLOBAL_STATUS.add('FLAGS')\n else:\n GLOBAL_STATUS.discard('ERRORS')\n GLOBAL_STATUS.discard('FLAGS')\n\n for name in _get_func_names(funcs):\n if 'error' not in name and 'flag' not in name:\n globals()[name] = globals()[name].flag",
"def _tools(self):\n # sign on\n yield \"\"\n yield \"# tools\"\n yield \"# librarian\"\n yield \"ar := ar\"\n yield \"ar.flags.create := rc\"\n yield \"ar.flags.extract := x\"\n yield \"ar.flags.remove := d\"\n yield \"ar.flags.update := ru\"\n yield \"ar.create := $(ar) $(ar.flags.create)\"\n yield \"ar.extract := $(ar) $(ar.flags.extract)\"\n yield \"ar.remove := $(ar) $(ar.flags.remove)\"\n yield \"ar.update := $(ar) $(ar.flags.update)\"\n yield \"\"\n yield \"# cwd\"\n yield \"cd := cd\"\n yield \"\"\n yield \"# file attributes\"\n yield \"chgrp := chgrp\"\n yield \"chgrp.flags.recurse := -R\"\n yield \"chgrp.recurse := $(chgrp) $(chgrp.flags.recurse)\"\n yield \"\"\n yield \"chmod := chmod\"\n yield \"chmod.flags.recurse := -R\"\n yield \"chmod.flags.write := +w\"\n yield \"chmod.recurse := $(chmod) $(chmod.flags.recurse)\"\n yield \"chmod.write := $(chmod) $(chmod.flags.write)\"\n yield \"chmod.write-recurse := $(chmod.recurse) $(chmod.flags.write)\"\n yield \"\"\n yield \"chown := chown\"\n yield \"chown.flags.recurse := -R\"\n yield \"chown.recurse := $(chown) $(chown.flags.recurse)\"\n yield \"\"\n yield \"# copy\"\n yield \"cp := cp\"\n yield \"cp.flags.force := -f\"\n yield \"cp.flags.recurse := -r\"\n yield \"cp.flags.force-recurse := -fr\"\n yield \"cp.force := $(cp) $(cp.flags.force)\"\n yield \"cp.recurse := $(cp) $(cp.flags.recurse)\"\n yield \"cp.force-recurse := $(cp) $(cp.flags.force-recurse)\"\n yield \"\"\n yield \"# date\"\n yield \"date := date\"\n yield \"date.date := $(date) '+%Y-%m-%d'\"\n yield \"date.stamp := $(date) -u\"\n yield \"date.year := $(date) '+%Y'\"\n yield \"\"\n yield \"# diff\"\n yield \"diff := diff\"\n yield \"\"\n yield \"# echo\"\n yield \"echo := echo\"\n yield \"\"\n yield \"# git\"\n yield \"git := git\"\n yield 'git.hash := $(git) log --format=format:\"%h\" -n 1'\n yield \"git.tag := $(git) describe --tags --long --always\"\n yield \"\"\n yield \"# loader\"\n yield \"ld := ld\"\n yield \"ld.flags.out := -o\"\n yield \"ld.flags.shared := -shared\"\n yield \"ld.out := $(ld) $(ld.flags.out)\"\n yield \"ld.shared := $(ld) $(ld.flags.shared)\"\n yield \"\"\n yield \"# links\"\n yield \"ln := ln\"\n yield \"ln.flags.soft := -s\"\n yield \"ln.soft := $(ln) $(ln.flags.soft)\"\n yield \"\"\n yield \"# directories\"\n yield \"mkdir := mkdir\"\n yield \"mkdir.flags.make-parents := -p\"\n yield \"mkdirp := $(mkdir) $(mkdir.flags.make-parents)\"\n yield \"\"\n yield \"# move\"\n yield \"mv := mv\"\n yield \"mv.flags.force := -f\"\n yield \"mv.force := $(mv) $(mv.flags.force)\"\n yield \"\"\n yield \"# ranlib\"\n yield \"ranlib := ranlib\"\n yield \"ranlib.flags :=\"\n yield \"\"\n yield \"# remove\"\n yield \"rm := rm\"\n yield \"rm.flags.force := -f\"\n yield \"rm.flags.recurse := -r\"\n yield \"rm.flags.force-recurse := -rf\"\n yield \"rm.force := $(rm) $(rm.flags.force)\"\n yield \"rm.recurse := $(rm) $(rm.flags.recurse)\"\n yield \"rm.force-recurse := $(rm) $(rm.flags.force-recurse)\"\n yield \"\"\n yield \"rmdir := rmdir\"\n yield \"\"\n yield \"# rsync\"\n yield \"rsync := rsync\"\n yield \"rsync.flags.recurse := -ruavz --progress --stats\"\n yield \"rsync.recurse := $(rsync) $(rsync.flags.recurse)\"\n yield \"\"\n yield \"# sed\"\n yield \"sed := sed\"\n yield \"\"\n yield \"# ssh\"\n yield \"ssh := ssh\"\n yield \"scp := scp\"\n yield \"scp.flags.recurse := -r\"\n yield \"scp.recurse := $(scp) $(scp.flags.recurse)\"\n yield \"\"\n yield \"# tags\"\n yield \"tags := true\"\n yield \"tags.flags :=\"\n yield \"tags.home :=\"\n yield \"tags.file := $(tags.home)/TAGS\"\n yield \"\"\n yield \"# tar\"\n yield \"tar := tar\"\n yield \"tar.flags.create := -cvj -f\"\n yield \"tar.create := $(tar) $(tar.flags.create)\"\n yield \"\"\n yield \"# TeX and associated tools\"\n yield \"tex.tex := tex\"\n yield \"tex.latex := latex\"\n yield \"tex.pdflatex := pdflatex\"\n yield \"tex.bibtex := bibtex\"\n yield \"tex.dvips := dvips\"\n yield \"tex.dvipdf := dvipdf\"\n yield \"\"\n yield \"# empty file creation and modification time updates\"\n yield \"touch := touch\"\n yield \"\"\n yield \"# yacc\"\n yield \"yacc := yacc\"\n yield \"yacc.c := y.tab.c\"\n yield \"yacc.h := y.tab.h\"\n\n # all done\n return",
"def ini_get_all():\n raise NotImplementedError()",
"def TRT_DigitizationBasicCfg(flags, **kwargs):\n acc = ComponentAccumulator()\n if \"PileUpTools\" not in kwargs:\n PileUpTools = acc.popToolsAndMerge(TRT_DigitizationToolCfg(flags))\n kwargs[\"PileUpTools\"] = PileUpTools\n acc.merge(PileUpToolsCfg(flags, **kwargs))\n return acc",
"def _CommonOptions(self, p):\n super()._CommonOptions(p, opt_v=False)",
"def __build_global_cfg(globalcfg):\n cfglst = []\n\n # Set Global configuration\n gbl_prefix = 'keylset global_config '\n __append_line(cfglst, '#Generated from Cafe')\n timestamp = time.strftime(\"%m-%d-%y %H:%M:%S\", time.localtime())\n __append_line(cfglst, '#TimeStamp: ' + timestamp)\n __append_line(cfglst, gbl_prefix + 'ChassisName {' + globalcfg['ChassisName'] + '}')\n __append_line(cfglst, gbl_prefix + 'RandomSeed ' + str(random.randint(1, 999999999)))\n __append_line(cfglst, \"\")\n __append_line(cfglst, '#LogsAndResultsInfo Global Options')\n # TODO: Remove hardcoded log directory\n __append_line(cfglst, gbl_prefix + 'LogsDir C:/Users/Testmin/VeriWave/WaveApps/Results')\n __append_line(cfglst, gbl_prefix + 'GeneratePdfReport True')\n __append_line(cfglst, \"\")\n __append_line(cfglst, '#Test Traffic Global Options')\n __append_line(cfglst, gbl_prefix + 'Source {' + globalcfg['Source'] + '}')\n __append_line(cfglst, gbl_prefix + 'Destination {' + globalcfg['Destination'] + '}')\n __append_line(cfglst, gbl_prefix + 'Ports {' + globalcfg['Ports'] + '}')\n # __append_line(cfglst, gbl_prefix + 'MappingOption 0')\n __append_line(cfglst, gbl_prefix + 'PayloadData None')\n # __append_line(cfglst, gbl_prefix + 'DestinationPort ' + globalcfg['DestinationPort'])\n # __append_line(cfglst, gbl_prefix + 'SourcePort ' + globalcfg['SourcePort'])\n #__append_line(cfglst, gbl_prefix + 'TestList {unicast_unidirectional_throughput}')\n __append_line(cfglst, gbl_prefix + 'TestList {' + globalcfg['TestList'] + '}')\n __append_line(cfglst, gbl_prefix + 'Direction {' + globalcfg['Direction'] + '}')\n # Assumption there always be a WiFi group present\n __append_line(cfglst, gbl_prefix + 'Channel {' + globalcfg['Channel'] + '}')\n __append_line(cfglst, gbl_prefix + 'WirelessGroupCount ' + str(globalcfg['WirelessGroupCount']))\n __append_line(cfglst, gbl_prefix + 'FlowType UDP')\n __append_line(cfglst, gbl_prefix + 'ArpNumRetries 5')\n __append_line(cfglst, gbl_prefix + 'ArpRate 100')\n __append_line(cfglst, gbl_prefix + 'ArpTimeout 5')\n __append_line(cfglst, gbl_prefix + 'NumTrials 1')\n if 'SettleTime' in globalcfg:\n __append_line(cfglst, gbl_prefix + 'SettleTime ' + globalcfg['SettleTime'])\n else:\n __append_line(cfglst, gbl_prefix + 'SettleTime 3')\n if 'LossTolerance' in globalcfg:\n __append_line(cfglst, gbl_prefix + 'LossTolerance ' + globalcfg['LossTolerance'])\n if 'TrialDuration' in globalcfg:\n __append_line(cfglst, gbl_prefix + 'TrialDuration ' + globalcfg['TrialDuration'])\n __append_line(cfglst, gbl_prefix + 'TestDurationSec ' + globalcfg['TrialDuration'])\n __append_line(cfglst, \"\")\n\n return cfglst",
"def read_flags():\n return flag_args",
"def _control_predefined(operation, num_ctrl_qubits):\n if operation.name == 'x' and num_ctrl_qubits in [1, 2]:\n if num_ctrl_qubits == 1:\n import qiskit.extensions.standard.cx\n cgate = qiskit.extensions.standard.cx.CnotGate()\n else:\n import qiskit.extensions.standard.ccx\n cgate = qiskit.extensions.standard.ccx.ToffoliGate()\n elif operation.name == 'y':\n import qiskit.extensions.standard.cy\n cgate = qiskit.extensions.standard.cy.CyGate()\n elif operation.name == 'z':\n import qiskit.extensions.standard.cz\n cgate = qiskit.extensions.standard.cz.CzGate()\n elif operation.name == 'h':\n import qiskit.extensions.standard.ch\n cgate = qiskit.extensions.standard.ch.CHGate()\n elif operation.name in {'rx', 'ry', 'rz'}:\n if operation.name == 'rx':\n import qiskit.extensions.standard.crx\n cgate = qiskit.extensions.standard.crx.CrxGate(*operation.params)\n elif operation.name == 'ry':\n import qiskit.extensions.standard.cry\n cgate = qiskit.extensions.standard.cry.CryGate(*operation.params)\n else: # operation.name == 'rz'\n import qiskit.extensions.standard.crz\n cgate = qiskit.extensions.standard.crz.CrzGate(*operation.params)\n if num_ctrl_qubits == 1:\n return cgate\n else:\n # only predefined for one control qubit\n return cgate.control(num_ctrl_qubits - 1)\n elif operation.name == 'swap':\n import qiskit.extensions.standard.cswap\n cgate = qiskit.extensions.standard.cswap.FredkinGate()\n elif operation.name == 'u1':\n import qiskit.extensions.standard.cu1\n cgate = qiskit.extensions.standard.cu1.Cu1Gate(*operation.params)\n elif operation.name == 'u3':\n import qiskit.extensions.standard.cu3\n cgate = qiskit.extensions.standard.cu3.Cu3Gate(*operation.params)\n elif operation.name == 'cx':\n import qiskit.extensions.standard.ccx\n cgate = qiskit.extensions.standard.ccx.ToffoliGate()\n else:\n raise QiskitError('No standard controlled gate for \"{}\"'.format(\n operation.name))\n return cgate",
"def common_options(func):\n\n def parse_preset(ctx, param, value):\n return PRESETS.get(value, (None, None))\n\n def parse_private(ctx, param, value):\n return hex_from_b64(value) if value else None\n\n func = click.option('--private', default=None, help='Private.', callback=parse_private)(func)\n\n func = click.option(\n '--preset',\n default=None, help='Preset ID defining prime and generator pair.',\n type=click.Choice(PRESETS.keys()), callback=parse_preset\n )(func)\n\n return func",
"def get_flags(self):\n return self.short_flag, self.long_flag",
"def options_set(self):\n\n global OPTIONS\n OPTIONS.append(config.ENABLE(self.threaded))\n OPTIONS.append(config.ENABLE(self.datasaver))\n OPTIONS.append(self.language)",
"def RPC_DigitizationBasicCfg(flags, **kwargs):\n acc = MuonGeoModelCfg(flags)\n if \"PileUpTools\" not in kwargs:\n PileUpTools = acc.popToolsAndMerge(RPC_DigitizationToolCfg(flags))\n kwargs[\"PileUpTools\"] = PileUpTools\n acc.merge(PileUpToolsCfg(flags, **kwargs))\n return acc",
"def process_flags(self):\n self.parse_search_terms(self.search_terms)\n \n # If randomisation is explicitly set, we enable it outright.. if not\n # it depends on whether we've provided search terms or not\n if self.force_randomise:\n self.randomise = True\n elif self.search_terms:\n self.randomise = False\n \n if self.update_index:\n self._update_index()\n \n if self.list_only:\n self.music_client = \"echo\" # FIXME: unix-only!\n self.loop_songs = False",
"def handleSpecialCERNMergeSettings(self, funcName):\n if self.getCmsswVersion().startswith(\"CMSSW_7_5\") and False:\n self.logger.info(\"Using fastCloning/lazydownload\")\n self.process.add_(cms.Service(\"SiteLocalConfigService\",\n overrideSourceCloneCacheHintDir=cms.untracked.string(\"lazy-download\")))\n elif funcName == \"merge\":\n self.logger.info(\"Using lazydownload\")\n self.process.add_(cms.Service(\"SiteLocalConfigService\",\n overrideSourceCacheHintDir=cms.untracked.string(\"lazy-download\")))\n return",
"def jail_global_option(action, option = ''):\n \n# find jail global optins section by comment in /etc/jail.conf\n jc = open(jailconf, 'r')\n jcl = []\n for i in jc:\n i = i.strip('\\n')\n jcl.append(i)\n\n# global option begin \n for i in jcl:\n if \"#@\" in i:\n jb = jcl.index(i) + 1\n break\n\n# global option end\n x = 0\n for i in jcl[jb:]:\n x = x + 1\n if \"#@\" in i:\n je = jb + x - 1\n break\n\n# create global options list\n lmen = [\"Number\", \"Jails Global Settnings\"]\n jail_global = []\n number_global = 0\n for i in jcl[jb:je]:\n jail_global.append([number_global, i[:-1]])\n number_global += 1\n\n# list jail global options in /etc/jail.conf\n if action == \"list\":\n print tabulate(jail_global, lmen)\n return False\n \n# remove jail global option from /etc/jail.conf\n if action == \"rm\":\n print tabulate(jail_global, lmen)\n \n if number_global == 0:\n return False\n \n while True:\n rmglobal = raw_input(\"global setting number or (!) :> \")\n if rmglobal == \"!\":\n print \" INFO: Interrupted by user\"\n return False\n\n try:\n int(rmglobal)\n except ValueError:\n print \" ERROR: Slecet valid number (%s - %s)!\" % (0, len(jail_global) - 1)\n continue\n\n if int(rmglobal) >= len(jail_global):\n print \" ERROR: Slecet valid number (%s - %s)!\" % (0, len(jail_global) - 1)\n continue\n \n# write new config with changed jails global options\n jc = open(jailconf, 'w')\n del jcl[jb+int(rmglobal)]\n jc.write('\\n'.join(jcl))\n jc.close()\n \n logvar = jail_global[int(rmglobal)][1]\n msg = \" INFO: '%s' Global setting was removed!\" % logvar\n print msg\n return False\n\n# add new jail global option at the end\n if action == \"add\":\n option.append(';')\n option = ''.join(option) # convert option list in string\n \n # write new config with changed jail local option\n jc = open(jailconf, 'w')\n jcl.insert(je, option)\n jc.write('\\n'.join(jcl))\n jc.close()\n \n msg = \" INFO: '%s' Global setting was added!\" % option[:-1]\n log(msg)\n return False"
] | [
"0.5948904",
"0.55440557",
"0.55275595",
"0.5320084",
"0.5296819",
"0.5249181",
"0.5204246",
"0.520022",
"0.51432735",
"0.5137308",
"0.5134827",
"0.5134588",
"0.5134572",
"0.5123408",
"0.50960565",
"0.5094958",
"0.50912803",
"0.50796866",
"0.50630355",
"0.50593835",
"0.5054699",
"0.5049256",
"0.5030954",
"0.5027189",
"0.50233203",
"0.50198644",
"0.50112945",
"0.5005883",
"0.5001551",
"0.50001395"
] | 0.66763616 | 0 |
Checks for vertices' degrees >>> vertices_degree([[1, 0], [1, 1]], [[0, 1], [1, 0]]) (False, []) >>> vertices_degree([[1, 1], [0, 1]], [[1, 0], [1, 1]]) (True, [2, 1], [1, 2]) | def vertices_degree(graph1: list, graph2: list):
check1 = []
check2 = []
for row, _ in enumerate(graph1):
degree1 = 0
degree2 = 0
for column, _ in enumerate(graph1[row]):
if graph1[row][column] == 1:
degree1 += 1
if graph2[row][column] == 1:
degree2 += 1
check1.append(degree1)
check2.append(degree2)
if sorted(check1) == sorted(check2):
return True, check1, check2
return False, [] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _has_degree(\n self,\n degree: int,\n vertex: Vertex,\n ) -> bool:\n\n return vertex.get_id() in self._vertices_of_degree[degree]",
"def vertice_degree(self):\r\n if(self.is_empty()):\r\n raise ValueError(\"Graph is empty.\")\r\n else:\r\n if(self.__directed):\r\n degrees = {}\r\n l = list(self.__graph_dict.values())\r\n flatter = []\r\n for x in l:\r\n for y in x:\r\n flatter.append(y)\r\n\r\n for k in self.__graph_dict.keys():\r\n degrees[k] = len(self.__graph_dict[k])\r\n if(k in flatter):\r\n degrees[k] += flatter.count(k)\r\n return degrees\r\n\r\n else:\r\n degrees = {}\r\n for k in self.__graph_dict.keys():\r\n degrees[k] = len(self.__graph_dict[k])\r\n return degrees",
"def in_degree(self, vertices=None, labels=False):\n if vertices in self:\n return self._backend.in_degree(vertices)\n elif labels:\n return {v:d for v, d in self.in_degree_iterator(vertices, labels=labels)}\n else:\n return list(self.in_degree_iterator(vertices, labels=labels))",
"def return_indeg(self, vertex: np.int_):\n return sum(map(lambda x: x>0,self.__mat[:,vertex]))",
"def degree(self, v):\n self._validateVertex(v)\n return self._adj[v].size()",
"def degree(adj_mat, vertex):\n return np.sum(adj_mat[vertex][:])",
"def hasvertices(self):\n if len(self.vertices) > 0:\n return True\n else:\n return False",
"def is_clockwise(vertices):\n v = vertices\n area = ((v[1][0] - v[0][0]) * (v[1][1] + v[0][1]) +\n (v[2][0] - v[1][0]) * (v[2][1] + v[1][1]) +\n (v[0][0] - v[2][0]) * (v[0][1] + v[2][1])) / 2\n return (area > 0)",
"def vert_degree(input_vertices):\n\tvertex_map = {}\n\tfor element in input_vertices:\n\t\tvertex_map[element] = 0\n\t\tfor x in prob:\n\t\t\tfor vertex in x:\n\t\t\t\tif element == vertex:\n\t\t\t\t\tvertex_map[element] += 1\n\treturn vertex_map",
"def degree(self, v):\n self._validateVertex(v)\n return self._adj[v].size()",
"def compute_in_degrees(digraph):\n # initialize in-degrees dictionary with zero values for all vertices\n in_degree = {}\n for vertex in digraph:\n in_degree[vertex] = 0\n # consider each vertex\n for vertex in digraph:\n # amend in_degree[w] for each outgoing edge from v to w\n for neighbour in digraph[vertex]:\n in_degree[neighbour] += 1\n return in_degree",
"def in_degree_iterator(self, vertices=None, labels=False):\n if vertices is None:\n vertices = self.vertex_iterator()\n if labels:\n for v in vertices:\n yield (v, self.in_degree(v))\n else:\n for v in vertices:\n yield self.in_degree(v)",
"def test_graph_no_vertices(self):\n # initialize empty vertex graph\n vertices = []\n vertex_frame = self.context.frame.create(vertices, self.vertex_schema)\n graph = self.context.graph.create(vertex_frame, self.doc_edge_frame)\n\n # call sparktk to calculate deg cen result\n res = graph.degree_centrality()\n\n # ensure that all deg cen result values are 0 since there\n # are no valid vertices\n pandas_res = res.to_pandas()\n for (index, row) in pandas_res.iterrows():\n self.asertAlmostEqual(row[\"degree_centrality\"], 0)",
"def _calculate_degree_centrality(self, vertices, edges):\n # here we are calculating our own deg cen res on the fly\n # edge counts will store the number of edges associated with\n # each vertex\n edge_counts = {}\n\n # get the edge frame in pandas form and iterate\n edge_pandas = edges.to_pandas()\n for (index, row) in edge_pandas.iterrows():\n # extract src and dest node index\n src = int(row[\"src\"])\n dest = int(row[\"dst\"])\n # now we increment the count for that node\n # in edge_counts, or initialize it to one\n # if it doesn't exist\n if src not in edge_counts.keys():\n edge_counts[src] = 1\n else:\n edge_counts[src] = edge_counts[src] + 1\n if dest not in edge_counts.values():\n edge_counts[dest] = 1\n else:\n edge_counts[dest] = edge_counts[dest] + 1\n return edge_counts",
"def return_outdeg(self, vertex: np.int_):\n return len(self.__adj[vertex])",
"def print_degree(self):\n vertex = int(input('enter vertex: '))\n in_degree = self._graph.get_in_degree(vertex)\n out_degree = self._graph.get_out_degree(vertex)\n print('The in degree of ' + str(vertex) + ' is ' + str(in_degree))\n print('The out degree of ' + str(vertex) + ' is ' + str(out_degree))",
"def is_vertex(self):\n return True",
"def is_vertex(self):\n return True",
"def get_degrees_dictionary(edges):\n dd = {} # degrees dictionary for vertexes\n\n def append_vertex(vertex, edge_index):\n if vertex not in dd.keys():\n dd[vertex] = [1, edge_index]\n else:\n dd[vertex][0] += 1\n dd[vertex].append(edge_index)\n\n e = edges\n for i in range(len(e)):\n append_vertex(e[i][0], i)\n append_vertex(e[i][1], i)\n\n return dd",
"def is_vertex(self): \n return False",
"def deg_mat(adj_mat, size, vertices):\n deg_mat = np.zeros((size,size))\n for i in vertices:\n deg_mat[i][i] = degree(adj_mat, i)\n return deg_mat",
"def get_degree(self, vertex):\r\n if not self.is_vertex_in_graph(vertex):\r\n raise GraphException(f\"The vertex {vertex} does not exist in the graph.\")\r\n return len(self.__neighbours[vertex])",
"def is_connected(self, vertices_encountered = None, start_vertex=None):\n\n if vertices_encountered is None:\n vertices_encountered = set()\n gdict = self.__graph_dict\n vertices = list(gdict.keys()) # list is necessary in python 3\n # if empty list return\n if len(vertices) == 0 :\n return False\n if not start_vertex:\n # Choose a vertex vertex from graph as starting point\n start_vertex = vertices[0]\n vertices_encountered.add(start_vertex)\n if len(vertices_encountered) != len(vertices):\n for vertex in gdict[start_vertex]:\n if vertex not in vertices_encountered:\n if self.is_connected(vertices_encountered,vertex):\n return True\n else:\n return True\n return False",
"def is_vertex(self):\n return False",
"def pertenece(self,v):\n return v in self.vertices.keys()",
"def demukron_network_order_function(vertices: List[Vertice], adj_matrix: np.ndarray) -> np.ndarray:\n current_level = 0\n vertice_indices_set = set(range(len(vertices)))\n m = adj_matrix.sum(axis=0) # array of in-degrees\n degrees_array = np.zeros(len(vertices))\n\n while vertice_indices_set:\n zero_on_the_current_step = {i for i in vertice_indices_set if m[i] == 0}\n for i in zero_on_the_current_step:\n degrees_array[i] = current_level\n m = m - adj_matrix[i]\n vertice_indices_set = vertice_indices_set - zero_on_the_current_step\n current_level += 1\n return degrees_array",
"def getDegrees(self):\n l = []\n for node in self.getNodes():\n l.append((node, len(self.graph[node])))\n\n return l",
"def degrees(self):\n A = self.adjacency()\n A.data = np.ones(A.nnz)\n right = np.array(A.sum(1)).ravel()\n left = np.array(A.sum(0)).ravel()\n return right, left",
"def _compute_node_degrees(self):\n mes = []\n args = []\n for metaedge, matrix in self.adj_matrices.items():\n mes.append(metaedge)\n args.append(matrix)\n res = parallel_process(array=args, function=mt.calculate_degrees, n_jobs=self.n_jobs, front_num=0)\n for metaedge, (out_degree, in_degree) in zip(mes, res):\n self.out_degree[metaedge] = out_degree\n self.in_degree[metaedge] = in_degree",
"def degree(self, node):\r\n if not 0 <= node < self.size:\r\n raise ValueError(\"Cannot find degree for a node not in the graph\")\r\n return len(self.edges[node])"
] | [
"0.70981395",
"0.6648923",
"0.66187066",
"0.6453982",
"0.6208992",
"0.6075277",
"0.6049173",
"0.6037286",
"0.59577584",
"0.5948439",
"0.59199977",
"0.5896408",
"0.58028036",
"0.580261",
"0.57468504",
"0.57298976",
"0.5702665",
"0.5702665",
"0.56859505",
"0.56500363",
"0.56339896",
"0.56014055",
"0.55758584",
"0.5567647",
"0.5564437",
"0.5562019",
"0.5549129",
"0.55312765",
"0.55051035",
"0.5477032"
] | 0.6969743 | 1 |
r""" Wait for the user to type a character (and hit Enter). If the user enters one of the characters in letters, return that character. If the user hits Enter without entering a character, and default is specified, returns `default`, Otherwise, asks the user to enter a character again. | def _prompt(letters='yn', default=None):
import sys
while True:
try:
inputstr = sys.stdin.readline().strip()
except KeyboardInterrupt:
sys.exit(0)
if inputstr and inputstr in letters:
return inputstr
if default is not None and inputstr == '':
return default
print 'Come again?' | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _prompt(letters='yn', default=None):\n while True:\n try:\n input_text = sys.stdin.readline().strip()\n except KeyboardInterrupt:\n sys.exit(0)\n if input_text and input_text in letters:\n return input_text\n if default is not None and input_text == '':\n return default\n print('Come again?')",
"def input_with_default(prompt, default):\n response = raw_input(\"%s (Default %s) \"%(prompt, default))\n if not response:\n return default\n return response",
"def default_input(prompt, default_value):\r\n item = input(prompt + \"[Enter for \" + default_value + \"]: \").lower()\r\n if item == \"\":\r\n item = default_value\r\n return item",
"def get_guess():\n print('Choose a letter:')\n return input()",
"def default(prompt, default, validator=(lambda x: True), hint=None):\n user_input = input(\"{0} [{1}]\".format(prompt, default))\n while not validator(user_input):\n user_input = input(\"{0} [{1}]\".format(prompt, default))\n return user_input or default",
"def prompt_string(prompt=\"Enter a value\",\n default=None):\n _new = None\n while True:\n try:\n _new = str(input(f\"{prompt}? [{str(default)}]: \")) # nosec\n break\n except ValueError:\n print(\"Sorry, I didn't understand that.\")\n continue\n except KeyboardInterrupt:\n break\n return default if _new in [None, ''] else _new",
"def user_prompt(prompt, default=None):\n prompt = f\"\\n {prompt} [{default}] runs or type an amount: \"\n response = input(prompt)\n if not response and default:\n return default\n else:\n return response",
"def get_input(prompt, default=None, choices=None, option_value=None):\r\n if option_value is not None:\r\n return option_value\r\n \r\n choices = choices or []\r\n while 1:\r\n r = raw_input(prompt+' ').strip()\r\n if not r and default is not None:\r\n return default\r\n if choices:\r\n if r not in choices:\r\n r = None\r\n else:\r\n break\r\n else:\r\n break\r\n return r",
"def prompt(name, default):\n value = raw_input('%s [%s]: ' %(name, default))\n if not value:\n value = default\n return value",
"def prompt(msg, default=NO_DEFAULT, validate=None):\n while True:\n response = input(msg + \" \").strip()\n if not response:\n if default is NO_DEFAULT:\n continue\n return default\n if validate is None or validate(response):\n return response",
"def ask(question, options, default):\n assert default in options\n\n question += \" ({})? \".format(\"/\".join(o.upper() if o == default else o for o in options))\n selected = None\n while selected not in options:\n selected = input(question).strip().lower()\n if selected == \"\":\n selected = default\n else:\n if selected not in options:\n question = \"Please type '{}'{comma} or '{}': \".format(\n \"', '\".join(options[:-1]), options[-1],\n comma=',' if len(options) > 2 else '',\n )\n return selected",
"def get_input():\n letters = input('Enter letters, Enter to quit:\\n')\n return letters",
"def query_input(question, default=None, color=default_color):\n if default is None or default == '':\n prompt = ' '\n elif type(default) == str:\n prompt = flo(' [{default}] ')\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(color(question + prompt))\n choice = raw_input()\n if default is not None and choice == '':\n return default\n if choice != '':\n return choice",
"def ask_letter(self):\n letter = ' '\n while letter not in string.ascii_lowercase:\n letter = input('Write a letter:\\n')\n letter.lower()\n\n return letter",
"def text_input(self, prompt, default=None):\n try:\n user_input = self(prompt)\n if default is not None and user_input == \"\":\n return default\n except InputDisabled:\n if default is not None:\n return default\n raise\n\n return user_input",
"def inputChoice(self, question, options, hotkeys, default=None):\n options = options[:] # we don't want to edit the passed parameter\n for i in range(len(options)):\n option = options[i]\n hotkey = hotkeys[i]\n # try to mark a part of the option name as the hotkey\n m = re.search('[%s%s]' % (hotkey.lower(), hotkey.upper()), option)\n if hotkey == default:\n caseHotkey = hotkey.upper()\n else:\n caseHotkey = hotkey\n if m:\n pos = m.start()\n options[i] = '%s[%s]%s' % (option[:pos], caseHotkey,\n option[pos+1:])\n else:\n options[i] = '%s [%s]' % (option, caseHotkey)\n # loop until the user entered a valid choice\n while True:\n prompt = '%s (%s)' % (question, ', '.join(options))\n answer = self.input(prompt)\n if answer.lower() in hotkeys or answer.upper() in hotkeys:\n return answer\n elif default and answer=='': # empty string entered\n return default",
"def simple_response(prompt, default=None):\n if default is None:\n response = input(prompt + ': ')\n else:\n response = input(prompt + f' [{default}]' + ': ')\n if response != '':\n return response\n elif response == '' and default is not None:\n return default\n else:\n print('Please enter a valid response')\n return simple_response(prompt, default)",
"def process_default(self, character):\n pass",
"def get_input():\n return getch()",
"def get_guess():\n letter = input(\"Please input a letter to check\").lower()\n if len(letter) != 1:\n print(\"Please input a single letter\")\n get_guess()\n elif letter not in \"abcdefghijklmnopqrstuvxyz\":\n print (\"Only input letters\")\n get_guess()\n else:\n return letter",
"def prompt_with_options(prompt, default=None, options=None):\n\n msg = \"%s [%s]: \" % (prompt, default) if default is not None else \"%s: \" % prompt\n value = None\n while value is None:\n value = raw_input(msg).strip()\n if value:\n if options and value not in options:\n value = None\n elif default is not None:\n value = default\n\n return value",
"def guess_input(self):\n try:\n self.player_guess = input('Guess a letter: ').lower()\n Character(self.player_guess, self.selected_phrase)\n except ValueError:\n print(\"That was not a valid input. Please pick a number between 1 and 10\")\n if self.player_guess == \"\":\n print (\"Please enter a letter,try again.\")\n if not self.player_guess.isalpha():\n print (\"Please only enter a letter(a-z),try again.\")\n if len(self.player_guess) > 1:\n print(\"Please enter only one letter at a time.\")",
"def get_value(prompt, default=None, hidden=False):\n _prompt = '%s : ' % prompt\n if default:\n _prompt = '%s [%s]: ' % (prompt, default)\n\n if hidden:\n ans = getpass(_prompt)\n else:\n ans = raw_input(_prompt)\n\n # If user hit Enter and there is a default value\n if not ans and default:\n ans = default\n return ans",
"def _ask_prompt(question: str,\n console: io.IO,\n validate: Optional[Callable[[str], None]] = None,\n default: Optional[str] = None) -> str:\n validate = validate or (lambda x: None)\n while True:\n answer = console.ask(question)\n if default and not answer:\n answer = default\n try:\n validate(answer)\n break\n except ValueError as e:\n console.error(e)\n\n return answer",
"def prompt_selection(self,\r\n prompt_text: str,\r\n validate: Union[Callable[[str], Optional[Any]], partial],\r\n default: Any) -> Any:\r\n while True:\r\n try:\r\n if self.__use_standard_console:\r\n user_input = prompt(prompt_text)\r\n else:\r\n user_input = self.__alt_prompt(prompt_text)\r\n except KeyboardInterrupt:\r\n return default\r\n if user_input == '':\r\n return default\r\n user_input = validate(user_input)\r\n if user_input is not None:\r\n break\r\n return user_input",
"def input_timeout(prompt: str, t_timeout: [float, int] = 30, default: str = None) -> str:\n print(prompt, end=\" \")\n rlist, _, _ = select.select([sys.stdin], [], [], t_timeout)\n\n if not rlist:\n if default is None:\n raise RuntimeError(f\"No input received within {t_timeout}s!\")\n else:\n return default\n\n return sys.stdin.readline().strip()",
"def ask_yes_no(question, default=\"y\"):\n valid = {\"y\": True, \"n\": False}\n\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"y\":\n prompt = \" [Y/n] \"\n elif default == \"n\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = input().lower()\n\n if default is not None and choice == '':\n return valid[default]\n\n choice_letter = choice[0]\n\n if choice_letter in valid:\n return valid[choice_letter]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\n \"(or 'y' or 'n').\\n\")",
"def _get_user_input(query, valid, default):\n\n # Wait for valid user input and return choice upon receipt\n while True:\n choice = input(query)\n if default is not None and choice == \"\":\n return default\n elif choice in valid:\n return choice\n else:\n print(\"Please respond with '\" + \\\n \"or '\".join(opt + \"' \" for opt in valid) + \"\\n\")",
"def waitenterpressed(message = \"Press ENTER to continue...\"):\n\ttry:\n\t\tinput = raw_input\n\texcept: \n\t\tpass\n\traw_input(message)\n\treturn 0",
"def choice():\n choice = input(\"press e to encode press d to decode or press q to quit:\")\n if choice == \"e\":\n return \"e\"\n\n elif choice == \"d\":\n return \"d\"\n else:\n print(\"Okay bye\")"
] | [
"0.7521456",
"0.7162696",
"0.7019684",
"0.6967578",
"0.6841188",
"0.67817444",
"0.6743779",
"0.66840625",
"0.66546315",
"0.65572464",
"0.6538709",
"0.64224786",
"0.636659",
"0.63616836",
"0.63427144",
"0.63147855",
"0.6242766",
"0.61260843",
"0.60846496",
"0.6079177",
"0.6052511",
"0.6009335",
"0.5971891",
"0.5946004",
"0.5933735",
"0.58956397",
"0.5891569",
"0.58412546",
"0.5811614",
"0.58091176"
] | 0.750709 | 1 |
Function to remove test results and confirmations older than 10 blocks | async def cleanTestResults(CURRENT_HEIGHT):
LAST_GOOD_HEIGHT = int(CURRENT_HEIGHT) - 10
for testId in list(testResults):
if int(testId) <= LAST_GOOD_HEIGHT:
del testResults[testId]
for testId in list(testConfirmations):
if int(testId) <= LAST_GOOD_HEIGHT:
del testConfirmations[testId] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def remaining_batch_tests(loaded_batch_tests):\n remaining_tests = batch_test_set - set(loaded_batch_tests)\n with open('remaining_tests.txt', mode='w') as outfile:\n for batch_test in remaining_tests:\n outfile.write(\"%s\\n\" % batch_test)",
"def clean_leftovers(tests):\n for test in tests:\n test.clean()",
"def remove_totally_failed_tests(df):\n all_runs = df.group_uuid.unique()\n removed_guuids = []\n for test_run in all_runs:\n overall_status = df[(df.group_uuid == test_run) & ~get_failed_mask(df)]\n if not len(overall_status):\n df = df[df.group_uuid != test_run]\n removed_guuids.append(test_run)\n return df, removed_guuids",
"def stopTestRun(self):",
"def trim_data_back_to(monthToKeep):\n global g_failed_tests_info_dict\n current_time = time.time() # unit in seconds\n\n oldest_time_allowed = current_time - monthToKeep*30*24*3600 # in seconds\n\n clean_up_failed_test_dict(oldest_time_allowed)\n clean_up_summary_text(oldest_time_allowed)",
"def test_clean_exit(self):\n ch = connection_helper()\n qr = list_test_artifacts(None, ch.tables)\n self.assertFalse(bool(qr), \"\"\"Run 'removefacts --conf <config> --removetestlist' or \nexecute 'tests/scripts/removetestfacts.py' to fix\"\"\")",
"def cleanse_priest_list(priests_list):",
"def worker_unscheduled(self, node, indices):\n self.sched.remove_pending_tests_from_node(node, indices)",
"def test_concurrent_add_and_delete_pending_test_case_result(self):\n result = xml_reporter._TextAndXMLTestResult(None, self.stream, None, 0,\n None)\n def add_and_delete_pending_test_case_result(test_name):\n test = MockTest(test_name)\n result.addSuccess(test)\n result.delete_pending_test_case_result(test)\n\n for i in range(50):\n add_and_delete_pending_test_case_result('add_and_delete_test%s' % i)\n self.assertEqual(result.pending_test_case_results, {})",
"def test_remove_all_pass(self):\n self.write_contents(\n 'external/wpt/variant.html.ini', \"\"\"\\\n [variant.html?foo=baz]\n [formerly failing subtest]\n expected: FAIL\n \"\"\")\n self.update({\n 'results': [{\n 'test':\n '/variant.html?foo=baz',\n 'status':\n 'OK',\n 'subtests': [{\n 'name': 'formerly failing subtest',\n 'status': 'PASS',\n 'message': None,\n 'expected': 'FAIL',\n 'known_intermittent': [],\n }],\n }],\n })\n self.assertFalse(self.exists('external/wpt/variant.html.ini'))",
"def reset(self):\n # Remove all successful action records\n to_remove = []\n for action_record, (p_valid, result_text) in self.action_records.items():\n if p_valid > .5:\n to_remove.append(action_record)\n for action_record in to_remove:\n del self.action_records[action_record]",
"def remove_test(self, file_path):\n for parser_name in self.parser_names:\n results_file_path = self.get_results_filepath(parser_name)\n results_list = []\n removed = False\n for results in self.read_results_file(results_file_path):\n if results[INPUT_FILE_PATH] == file_path:\n logger.info(\"Removed results for {} in {}\".format(file_path, results_file_path))\n removed = True\n else:\n results_list.append(results)\n\n if removed:\n self.write_results_file(results_list, results_file_path)",
"def remove_a_result(self, idblock):\n self.resultPanel.remove_item(idblock)",
"def CleanUpTestResults(self):\n name_key = lambda v: v.name\n results_by_name = sorted(self.results, key=name_key)\n\n for name, res_iter in groupby(results_by_name, key=name_key):\n results = set(res_iter)\n\n # If DejaGnu was unable to compile a test it will create following result:\n failed = DejaGnuTestResult(name, '(test for excess errors)', 'FAIL',\n False)\n\n # If a test compilation failed, remove all results that are dependent.\n if failed in results:\n dependants = set(filter(lambda r: r.result != 'FAIL', results))\n\n self.results -= dependants\n\n for res in dependants:\n logging.info('Removed {%s} dependance.', res)\n\n # Remove all UNRESOLVED results that were also marked as UNSUPPORTED.\n unresolved = [res._replace(result='UNRESOLVED')\n for res in results if res.result == 'UNSUPPORTED']\n\n for res in unresolved:\n if res in self.results:\n self.results.remove(res)\n logging.info('Removed {%s} duplicate.', res)",
"def teardown_module(module):\n cf.delete_from_table(idm.SURVEY_SUBSAMPLE_TABLE, 'RUN_ID', '=', RUN_ID)\n\n # List of tables to cleanse where [RUN_ID] = RUN_ID\n tables_to_cleanse = ['[dbo].[PROCESS_VARIABLE_PY]',\n '[dbo].[PROCESS_VARIABLE_TESTING]',\n '[dbo].[TRAFFIC_DATA]',\n '[dbo].[SHIFT_DATA]',\n '[dbo].[NON_RESPONSE_DATA]',\n '[dbo].[UNSAMPLED_OOH_DATA]',\n idm.SURVEY_SUBSAMPLE_TABLE]\n\n # Try to delete from each table in list where condition. If exception occurs,\n # assume table is already empty, and continue deleting from tables in list.\n for table in tables_to_cleanse:\n try:\n cf.delete_from_table(table, 'RUN_ID', '=', RUN_ID)\n except Exception:\n continue\n\n print(\"Duration: {}\".format(time.strftime(\"%H:%M:%S\", time.gmtime(time.time() - START_TIME))))",
"def cleanupRequests(n=10):\n\n # formula for filtering data from airtable\n formula = 'AND(DATETIME_DIFF(NOW(), {Last Modified}, \"days\") > 30, Status = \"Request Complete\")'\n\n # airtable query\n headers = {\"Authorization\": \"Bearer {}\".format(os.environ['AIRTABLE_AUTH_TOKEN'])}\n params = params = {\n 'maxRecords': 10,\n 'view': 'All Requests + Data',\n 'sortField':'Last Modified',\n 'sortDirection': 'asc',\n 'filterByFormula': formula\n\n }\n\n\n r = requests.get(os.environ['PROD_URL'], headers=headers, params=params)\n\n # if status code is good ...\n if r.status_code == 200:\n\n # instantiate twilio client\n client = Client(os.environ['ACCOUNT_SID'], os.environ['TWILIO_AUTH_TOKEN'])\n\n # iterate through records\n for record in r.json()['records']:\n\n data = {\n 'fields':\n {'Message': \"\",\n 'First Name': \"\"\n }\n }\n\n # patch the requisite fields\n r = requests.patch(\n os.environ['PROD_URL'] + record['id'] , headers=headers, json=data\n )\n\n # erase the recordings associated with the call SID\n call_sid = record['fields']['Twilio Call Sid']\n call = client.calls(call_sid).fetch()\n\n for recording_sid in call.recordings.list():\n client.recordings(recording_sid).delete()\n\n # confirm deletion\n successfully_deleted = 0\n r = requests.get(os.environ['PROD_URL'] + record['id'], headers=headers)\n call = client.calls(call_sid).fetch()\n\n if all([r.status_code == 200, \n 'Message' not in r.json().keys(), \n 'First Name' not in r.json().keys(),\n len(call.recordings.list()) == 0]):\n print('succesfully deleted')\n successfully_deleted += 1\n \n else:\n print('error')\n\n return str(successfully_deleted)",
"def test_balanced_removal(self):\n successes = 0\n failures = 0\n iterations = NUM_CALLS\n\n for _ in range(iterations):\n\n failure_callback = False\n handler = self.new_handler(balance=True)\n new_ids = [randint(0, handler.uid) for _ in range(randint(HEIGHT[0], handler.expected_height))]\n new_ids = list(set(new_ids)) # make sure there are no duplicates\n try:\n new_ids.remove(handler.golden_id) # remove golden id from removal if it was randomly selected\n except ValueError:\n pass\n\n for val in new_ids:\n handler.delNodeByID(val)\n if handler.balanced is False:\n failures += 1\n failure_callback = True\n break\n\n if failure_callback:\n break\n state = handler.get_gamestate()\n for val in new_ids:\n if 'node' + str(val) in state['node_points']:\n failures += 1\n break\n\n successes += 1\n\n self.assertEqual(failures, 0,\n msg=f'{BColors.FAIL}\\n\\t[-]\\tModification: Failed to correctly remove nodes (balancing addition) ' +\n f'{failures}/{iterations} failures! {BColors.ENDC}')\n print(f\"{BColors.OKGREEN}\\t[+]\\tModification: Validated removing nodes in balancing mode in {successes} trees.{BColors.ENDC}\")",
"def delete_runs(self):\n for run in self.get_runs():\n run.delete()",
"def main():\n dir_path = '/home/ubuntu/test_files' # path for the log files that needs to be pruned\n stat_file_name = 'file_status_info' # temp file will be created to store the stat of each files to calculate when to delete\n \n # Get the list of all the files where we want to perfrom the delete operations\n file_list = get_list_of_files_in_dir(dir_path)\n\n # Get the current system date\n current_date = get_current_date()\n\n # Iterate through all the log, error, info files in the specified directory path and check for the criteria of file older than 5 days and delete.\n for fil in file_list:\n get_file_stat(dir_path, stat_file_name, fil)\n filename, file_date = get_file_last_modification_date(stat_file_name)\n\n print(\"*********** %s file stat is written **************\" % fil)\n days = abs(current_date - file_date).days\n \n # Check if the file modification date if older than 5 days.\n if days > 5:\n remove_files(os.path.join(dir_path, fil))\n else:\n print(\"No eligible file(s) found to be deleted\")",
"def test_remove_stale_expectation(self):\n self.write_contents(\n 'external/wpt/fail.html.ini', \"\"\"\\\n [fail.html]\n expected: [OK, FAIL]\n \"\"\")\n self.update({\n 'results': [{\n 'test': '/fail.html',\n 'status': 'FAIL',\n 'expected': 'OK',\n 'known_intermittent': ['FAIL'],\n }, {\n 'test': '/fail.html',\n 'status': 'CRASH',\n 'expected': 'OK',\n 'known_intermittent': ['FAIL'],\n }],\n })\n self.assert_contents(\n 'external/wpt/fail.html.ini', \"\"\"\\\n [fail.html]\n expected: [FAIL, CRASH]\n \"\"\")",
"def test_remove_yield(self, affiliate_items_url_factory, affiliate_network_factory):\n network = affiliate_network_factory(name='Network')\n\n with mock.patch('chiton.rack.affiliates.bulk.create_affiliate') as create_affiliate:\n affiliate = ValidatingAffiliate()\n affiliate.valid_tlds = ['com', 'org']\n create_affiliate.return_value = affiliate\n\n items = affiliate_items_url_factory(['biz', 'com', 'net', 'org'])\n for index, item in enumerate(items):\n item.name = 'Item %d' % (index + 1)\n item.network = network\n item.save()\n\n assert AffiliateItem.objects.count() == 4\n\n pruned = []\n for item_name, network_name, was_pruned in prune_affiliate_items(items.order_by('name')):\n pruned.append([item_name, network_name, was_pruned])\n\n assert pruned[0] == ['Item 1', 'Network', True]\n assert pruned[1] == ['Item 2', 'Network', False]\n assert pruned[2] == ['Item 3', 'Network', True]\n assert pruned[3] == ['Item 4', 'Network', False]",
"def test_imbalanced_removal(self):\n successes = 0\n failures = 0\n iterations = NUM_CALLS\n\n for _ in range(iterations):\n\n failure_callback = False\n handler = self.new_handler()\n new_ids = [randint(0, handler.uid) for _ in range(randint(HEIGHT[0], handler.expected_height))]\n new_ids = list(set(new_ids)) # make sure there are no duplicates\n try:\n new_ids.remove(handler.golden_id) # remove golden id from removal if it was randomly selected\n except ValueError:\n pass\n\n for val in new_ids:\n handler.delNodeByID(val)\n true_bal = check_balance(handler.root)\n if handler.balanced is not true_bal:\n failures += 1\n failure_callback = True\n break\n\n if failure_callback:\n break\n state = handler.get_gamestate()\n for val in new_ids:\n if 'node' + str(val) in state['node_points']:\n failures += 1\n break\n\n successes += 1\n\n self.assertEqual(failures, 0,\n msg=f'{BColors.FAIL}\\n\\t[-]\\tModification: Failed to correctly remove nodes (non-balancing addition) ' +\n f'{failures}/{iterations} failures! {BColors.ENDC}')\n print(f\"{BColors.OKGREEN}\\t[+]\\tModification: Validated removing nodes in non-balancing mode in {successes} trees.{BColors.ENDC}\")",
"def test_successful_subscriptions_remove(self) -> None:\n self.assertGreaterEqual(len(self.streams), 2)\n streams_to_remove = self.streams[1:]\n not_subbed = [\n stream.name\n for stream in Stream.objects.filter(realm=get_realm(\"zulip\"))\n if stream.name not in self.streams\n ]\n random.shuffle(not_subbed)\n self.assertNotEqual(len(not_subbed), 0) # necessary for full test coverage\n try_to_remove = not_subbed[:3] # attempt to remove up to 3 streams not already subbed to\n streams_to_remove.extend(try_to_remove)\n self.helper_check_subs_before_and_after_remove(\n streams_to_remove,\n {\"removed\": self.streams[1:], \"not_removed\": try_to_remove},\n self.test_email,\n [self.streams[0]],\n self.test_realm,\n )",
"def poll_tests(self):\n for i, test in enumerate(self.tests):\n if test.process.poll() is not None:\n self.check_test(test)\n self.tests.pop(i)\n if self.test_numbers:\n self.start_next_test()",
"def prune_alerts():\n from scoop.messaging.models.alert import Alert\n # Supptimer les alertes\n alerts = Alert.objects.read_since(minutes=2880)\n alerts.delete()",
"def test_keep_unobserved_subtest(self):\n self.write_contents(\n 'external/wpt/variant.html.ini', \"\"\"\\\n [variant.html?foo=baz]\n [subtest that should not be removed]\n expected: CRASH\n \"\"\")\n self.update(\n {\n 'results': [{\n 'test': '/variant.html?foo=baz',\n 'status': 'CRASH',\n 'subtests': [],\n }],\n },\n overwrite_conditions='no')\n self.write_contents(\n 'external/wpt/variant.html.ini', \"\"\"\\\n [variant.html?foo=baz]\n [subtest that should not be removed]\n expected: CRASH\n \"\"\")",
"def stopTest(self, test):",
"def test_oldtestcases(self):\n\t\treturn oldtests()",
"def test_remove_expensive(self):\n test_remove_expensive = self.info_list.remove_expensive()\n self.assertTrue(test_remove_expensive)",
"def disaggregate_chunk(self, test_mains):\n raise NotImplementedError()"
] | [
"0.6036735",
"0.58126444",
"0.5707902",
"0.5680992",
"0.5655867",
"0.56548756",
"0.5543772",
"0.5505886",
"0.5502805",
"0.5481863",
"0.54809284",
"0.5464488",
"0.5420158",
"0.5416538",
"0.5402889",
"0.5371889",
"0.5359482",
"0.5342731",
"0.53407174",
"0.5334368",
"0.5320321",
"0.5319818",
"0.5316838",
"0.5309624",
"0.5308823",
"0.53065723",
"0.5304401",
"0.5288708",
"0.5285671",
"0.52796096"
] | 0.73820996 | 0 |
Instance data use_wsdl if True try to construct XML Instance from information in WSDL. | def __init__(self, wsdl, service=None, port=None, tracefile=None,
typesmodule=None, nsdict=None, soapAction=None, ns=None, op_ns=None, use_wsdl=False):
if not hasattr(wsdl, 'targetNamespace'):
wsdl = wstools.WSDLTools.WSDLReader().loadFromURL(wsdl)
# for item in wsdl.types.items():
# self._serializer.loadSchema(item)
self._service = wsdl.services[service or 0]
self.__doc__ = self._service.documentation
self._port = self._service.ports[port or 0]
self._name = self._service.name
self._wsdl = wsdl
self._tracefile = tracefile
self._typesmodule = typesmodule
self._nsdict = nsdict or {}
self._soapAction = soapAction
self._ns = ns
self._op_ns = op_ns
self._use_wsdl = use_wsdl
binding = self._port.getBinding()
portType = binding.getPortType()
for item in portType.operations:
callinfo = wstools.WSDLTools.callInfoFromWSDL(self._port, item.name)
method = MethodProxy(self, callinfo)
setattr(self, item.name, method) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _prepare_wsdl_objects(self):\r\n # This holds some optional options for the request..\r\n self.AddressValidationOptions = self.client.factory.create('AddressValidationOptions')\r\n \r\n # This is good to review if you'd like to see what the data structure\r\n # looks like.\r\n self.logger.debug(self.AddressValidationOptions)",
"def create_wsdl_object_of_type(self, type_name):\r\n return self.client.factory.create(type_name)",
"def __prepare_wsdl_objects(self):\r\n pass",
"def get_instance(self, instance_type, name, subdir=\"\"):\r\n\r\n def read_data(data_syntax, file_name, sub_dir=\"\"):\r\n alns_data = pickle.load(open(join(self.base_path, data_syntax, \"data\", sub_dir, file_name), 'rb'))\r\n return alns_data\r\n\r\n if instance_type == \"simple\":\r\n nr_vehicles = 2\r\n nr_nodes = 4\r\n nr_customers = 3\r\n load_bucket_size = 10\r\n demand = [110, 100, 150]\r\n service_times = [10, 10, 10]\r\n start_window = [0, 0, 0]\r\n end_window = [100, 100, 100]\r\n\r\n distance_matrix = [[0, 3, 2, 3], [3, 0, 1.75, 4], [2, 1.75, 0, 2.5], [3, 4, 2.5, 0]]\r\n elevation_matrix = [[0.1, 0.2, 0, 0], [0, 0, 0.3, 0], [0, 0, 0, 0], [0, 0, 0, 0]]\r\n\r\n return ALNSData(nr_veh=nr_vehicles,\r\n nr_nodes=nr_nodes,\r\n nr_customers=nr_customers,\r\n demand=demand,\r\n service_times=service_times,\r\n start_window=start_window,\r\n end_window=end_window,\r\n elevation_m=elevation_matrix,\r\n distance_m=distance_matrix,\r\n load_bucket_size=load_bucket_size)\r\n\r\n try:\r\n return read_data(self.code_lookup[instance_type], file_name=name, sub_dir=subdir)\r\n except KeyError:\r\n raise ValueError(\"instance_type is not known: Valid inputs are: pirmin, solomon, cordeau, homberger\")",
"def __init__(__self__, *,\n wsdl_endpoint_name: Optional[pulumi.Input[str]] = None,\n wsdl_service_name: Optional[pulumi.Input[str]] = None):\n if wsdl_endpoint_name is not None:\n pulumi.set(__self__, \"wsdl_endpoint_name\", wsdl_endpoint_name)\n if wsdl_service_name is not None:\n pulumi.set(__self__, \"wsdl_service_name\", wsdl_service_name)",
"def get_instance_from_words(data):\n inst = Dataset.get_instance_template()\n inst[\"words\"] = data\n return inst",
"def __init__(self, py_dict=None):\n super(RuntimeNicInfoSchema, self).__init__()\n self.set_data_type('xml')\n self.index = None\n self.label = None\n self.network = NetworkSchema()\n\n if py_dict is not None:\n self.get_object_from_py_dict(py_dict)",
"def convertInstanceData(self, builder, typeName, data):\n\t\tif typeName not in self.instanceDataTypeMap:\n\t\t\traise Exception('Instance data type \"' + typeName + '\" hasn\\'t been registered.')\n\n\t\tconvertedData = self.instanceDataTypeMap[typeName](self, data)\n\n\t\ttypeNameOffset = builder.CreateString(typeName)\n\t\tdataOffset = builder.CreateByteVector(convertedData)\n\n\t\tObjectData.Start(builder)\n\t\tObjectData.AddType(builder, typeNameOffset)\n\t\tObjectData.AddData(builder, dataOffset)\n\t\treturn ObjectData.End(builder)",
"def _prepare_wsdl_objects(self):\r\n self.DeletionControlType = self.client.factory.create('DeletionControlType')\r\n self.TrackingId = self.client.factory.create('TrackingId')\r\n self.TrackingId.TrackingIdType = self.client.factory.create('TrackingIdType')",
"def __init__(self, py_dict=None):\n super(ServiceManagerSchema, self).__init__()\n self.set_data_type('xml')\n self.name = None\n self.description = None\n self.revision = None\n self.objectTypeName = None\n self.vendorName = None\n self.vendorId = None\n self.thumbprint = None\n self.login = None\n self.password = None\n self.verifyPassword = None\n self.url = None\n self.restUrl = None\n self.status = None\n\n if py_dict is not None:\n self.get_object_from_py_dict(py_dict)",
"def test_create_instance(self):\n engine = Engine(self.config_file, self.api_token)\n\n assert isinstance(engine, Engine) is True\n assert isinstance(engine.backend, Backend) is True\n assert isinstance(engine.backend, BossBackend) is True\n assert isinstance(engine.validator, Validator) is True\n assert isinstance(engine.validator, BossValidatorV02) is True\n assert isinstance(engine.config, Configuration) is True\n\n # Schema loaded\n assert isinstance(engine.config.schema, dict) is True\n assert engine.config.schema[\"type\"] == \"object\"",
"def __init__(self, config_obj, wsdl_name, *args, **kwargs):\r\n self.logger = logging.getLogger('fedex')\r\n \"\"\"@ivar: Python logger instance with name 'fedex'.\"\"\"\r\n self.config_obj = config_obj\r\n \"\"\"@ivar: The FedexConfig object to pull auth info from.\"\"\"\r\n\r\n # If the config object is set to use the test server, point\r\n # suds at the test server WSDL directory.\r\n if config_obj.use_test_server:\r\n self.logger.info(\"Using test server.\")\r\n self.wsdl_path = os.path.join(config_obj.wsdl_path,\r\n 'test_server_wsdl', wsdl_name)\r\n else:\r\n self.logger.info(\"Using production server.\")\r\n self.wsdl_path = os.path.join(config_obj.wsdl_path, wsdl_name)\r\n\r\n self.client = Client('file:///%s' % self.wsdl_path.lstrip('/'))\r\n\r\n #print self.client\r\n\r\n self.VersionId = None\r\n \"\"\"@ivar: Holds details on the version numbers of the WSDL.\"\"\"\r\n self.WebAuthenticationDetail = None\r\n \"\"\"@ivar: WSDL object that holds authentication info.\"\"\"\r\n self.ClientDetail = None\r\n \"\"\"@ivar: WSDL object that holds client account details.\"\"\"\r\n self.response = None\r\n \"\"\"@ivar: The response from Fedex. You will want to pick what you\r\n want out here here. This object does have a __str__() method,\r\n you'll want to print or log it to see what possible values\r\n you can pull.\"\"\"\r\n self.TransactionDetail = None\r\n \"\"\"@ivar: Holds customer-specified transaction IDs.\"\"\"\r\n\r\n self.__set_web_authentication_detail()\r\n self.__set_client_detail()\r\n self.__set_version_id()\r\n self.__set_transaction_detail(*args, **kwargs)\r\n self._prepare_wsdl_objects()",
"def __init__(self, use=True):\n self.use = use",
"def _create_soap_object(self, name):\n return self.client.factory.create(name)",
"def __init__(self, enable_gateway=False, topology_name=None, topologies=None, exposed_services=None, token_cert=None, gateway_type=None, sso_type=None):\n\n self._enable_gateway = None\n self._topology_name = None\n self._topologies = None\n self._exposed_services = None\n self._token_cert = None\n self._gateway_type = None\n self._sso_type = None\n\n if enable_gateway is not None:\n self.enable_gateway = enable_gateway\n if topology_name is not None:\n self.topology_name = topology_name\n if topologies is not None:\n self.topologies = topologies\n if exposed_services is not None:\n self.exposed_services = exposed_services\n if token_cert is not None:\n self.token_cert = token_cert\n if gateway_type is not None:\n self.gateway_type = gateway_type\n if sso_type is not None:\n self.sso_type = sso_type",
"def given_an_instance() -> machine_learning.StationMachineLearning:\n # super weird bug? with pytest_bdd that hooked into isoformat on the coordinate and points fields.\n # tried forever to figure out why - and gave up in the end. removed coordinate and point from\n # feature file and just loading it in here.\n coordinate = _load_json_file(__file__, 'coordinate.json')\n points = _load_json_file(__file__, 'points.json')\n return machine_learning.StationMachineLearning(\n session=None,\n model=PredictionModel(id=1),\n grid=PredictionModelGridSubset(id=1),\n points=points,\n target_coordinate=coordinate,\n station_code=None,\n max_learn_date=datetime.now())",
"def validate_is_instance(var: Any,\n var_name: str,\n instance_type: Any,\n class_name: Optional[str] = None,\n log_metadata_validation_failures: bool = True) -> None:\n if var is None:\n return\n splits = str(instance_type).split(\"<class \")[-1].split(\"'\")\n if len(splits) > 1:\n print_type = splits[1]\n else:\n print_type = splits[0]\n if log_metadata_validation_failures:\n if class_name is None:\n logging.debug(\n \"XAI Validation :: Metadata: Variable `%s` should be of type `%s`\",\n var_name, print_type)\n else:\n logging.debug(\n \"XAI Validation :: Metadata: [%s] Variable `%s` should be of type \"\n \"`%s`\", class_name, var_name, print_type)\n if not isinstance(var, instance_type):\n raise TypeError(\"{} must be of type {}. Got {}\".format(\n var_name, str(instance_type), str(type(var))))",
"def __init__(self, data):\n self.jssp_instance_data = data",
"def load_instance(self, instance_path, input_shapes):\n metadata = load_json(os.path.join(instance_path, 'instance.meta'))\n self.log('load metadata')\n\n instance_class_name = metadata[MODEL_METADATA_KEY_INSTANCE_CLASS_NAME]\n instance_source_path = metadata[MODEL_METADATA_KEY_INSTANCE_SOURCE_PATH]\n model = import_class_from_module_path(instance_source_path, instance_class_name)\n self.log('instance source code load')\n\n self.instance = model(metadata[MODEL_METADATA_KEY_INSTANCE_PATH])\n self.instance.load_model(metadata, input_shapes)\n self.log('load instance')\n\n instance_id = metadata[MODEL_METADATA_KEY_INSTANCE_ID]\n self.log('load instance id : %s' % instance_id)",
"def _prepare_wsdl_objects(self):\r\n self.TrackPackageIdentifier = self.client.factory.create('TrackPackageIdentifier')\r\n # Default to tracking number.\r\n self.TrackPackageIdentifier.Type = 'TRACKING_NUMBER_OR_DOORTAG'",
"def __init__(self, api_use=False):\n self.api_use = api_use",
"def from_url(cls, wsdl_path):\n return cls(safe_parse_url(wsdl_path))",
"def create_instance(c_instance):\n\treturn 0",
"def __init__(self, services, tns):\r\n\r\n return super(DjangoSoapApp, self).__init__(Application(services, tns))",
"def __init__(self, py_dict=None):\n super(EdgeNATRulesSchema, self).__init__()\n self.set_data_type('xml')\n self.natRule = EdgeNATRuleSchema()\n\n if py_dict is not None:\n self.get_object_from_py_dict(py_dict)",
"def _prepare_wsdl_objects(self):\r\n\r\n\t# Default behavior is to not request transit information\r\n\tself.ReturnTransitAndCommit = False\r\n\r\n # This is the primary data structure for processShipment requests.\r\n self.RequestedShipment = self.client.factory.create('RequestedShipment')\r\n self.RequestedShipment.ShipTimestamp = datetime.now()\r\n \r\n TotalWeight = self.client.factory.create('Weight')\r\n # Start at nothing.\r\n TotalWeight.Value = 0.0\r\n # Default to pounds.\r\n TotalWeight.Units = 'LB'\r\n # This is the total weight of the entire shipment. Shipments may\r\n # contain more than one package.\r\n self.RequestedShipment.TotalWeight = TotalWeight\r\n \r\n # This is the top level data structure for Shipper information.\r\n ShipperParty = self.client.factory.create('Party')\r\n ShipperParty.Address = self.client.factory.create('Address')\r\n ShipperParty.Contact = self.client.factory.create('Contact')\r\n \r\n # Link the ShipperParty to our master data structure.\r\n self.RequestedShipment.Shipper = ShipperParty\r\n\r\n # This is the top level data structure for Recipient information.\r\n RecipientParty = self.client.factory.create('Party')\r\n RecipientParty.Contact = self.client.factory.create('Contact')\r\n RecipientParty.Address = self.client.factory.create('Address')\r\n \r\n # Link the RecipientParty object to our master data structure.\r\n self.RequestedShipment.Recipient = RecipientParty\r\n \r\n Payor = self.client.factory.create('Payor')\r\n # Grab the account number from the FedexConfig object by default.\r\n Payor.AccountNumber = self._config_obj.account_number\r\n # Assume US.\r\n Payor.CountryCode = 'US'\r\n \r\n ShippingChargesPayment = self.client.factory.create('Payment')\r\n ShippingChargesPayment.Payor = Payor\r\n\r\n self.RequestedShipment.ShippingChargesPayment = ShippingChargesPayment\r\n \r\n # ACCOUNT or LIST\r\n self.RequestedShipment.RateRequestTypes = ['ACCOUNT'] \r\n \r\n # Start with no packages, user must add them.\r\n self.RequestedShipment.PackageCount = 0\r\n self.RequestedShipment.RequestedPackageLineItems = []\r\n \r\n # This is good to review if you'd like to see what the data structure\r\n # looks like.\r\n self.logger.debug(self.RequestedShipment)",
"def test_hidden_instantiate(self):\n context = self.framework.get_bundle_context()\n\n # Prepare random values\n hidden_value = random.randint(0, 100)\n public_value = random.randint(0, 100)\n\n # Instantiate the component\n with use_ipopo(context) as ipopo:\n svc = ipopo.instantiate(self.module.FACTORY_HIDDEN_PROPS, NAME_A,\n {\"hidden.prop\": hidden_value,\n \"public.prop\": public_value})\n\n # Check default values (and accesses)\n self.assertEqual(svc.hidden, hidden_value)\n self.assertEqual(svc.public, public_value)\n\n # Check instance details\n with use_ipopo(context) as ipopo:\n details = ipopo.get_instance_details(NAME_A)\n\n self.assertNotIn(\"hidden.prop\", details[\"properties\"])",
"def make_instance(self, data, **kwargs):\n instance = self.instance or self.get_instance(data)\n if instance is not None:\n for key, value in iteritems(data):\n setattr(instance, key, value)\n return instance\n kwargs, association_attrs = self._split_model_kwargs_association(data)\n instance = self.opts.model(**kwargs)\n for attr, value in iteritems(association_attrs):\n setattr(instance, attr, value)\n return instance",
"def use_instance_table(self, name, typename):\n if typename in ['VkInstance', 'VkPhysicalDevice']:\n return True\n # vkSetDebugUtilsObjectNameEXT and vkSetDebugUtilsObjectTagEXT\n # need to be probed from GetInstanceProcAddress due to a loader issue.\n # https://github.com/KhronosGroup/Vulkan-Loader/issues/1109\n # TODO : When loader with fix for issue is widely available, remove this\n # special case.\n if name in ['vkSetDebugUtilsObjectNameEXT', 'vkSetDebugUtilsObjectTagEXT']:\n return True\n return False",
"def create_instance(c_instance):\n return OpenLabs(c_instance)"
] | [
"0.5577236",
"0.5367035",
"0.5321477",
"0.5150564",
"0.50830454",
"0.5067312",
"0.4986971",
"0.49749762",
"0.4935389",
"0.48817945",
"0.4843432",
"0.48354465",
"0.48144224",
"0.47744632",
"0.47571477",
"0.47335753",
"0.47333562",
"0.47290888",
"0.47188824",
"0.46990353",
"0.46851358",
"0.4661856",
"0.46545747",
"0.4653167",
"0.46447936",
"0.46366242",
"0.46229136",
"0.4611534",
"0.45930302",
"0.45751363"
] | 0.54205626 | 1 |
Returns typecodes representing input and output messages, if request and/or response fails to be generated return None for either or both. callinfo WSDLTools.SOAPCallInfo instance describing an operation. | def _getTypeCodes(self, callinfo):
prefix = None
self._resetPrefixDict()
if callinfo.use == 'encoded':
prefix = self._getPrefix(callinfo.namespace)
try:
requestTC = self._getTypeCode(parameters=callinfo.getInParameters(), literal=(callinfo.use=='literal'))
except EvaluateException, ex:
print "DEBUG: Request Failed to generate --", ex
requestTC = None
self._resetPrefixDict()
try:
replyTC = self._getTypeCode(parameters=callinfo.getOutParameters(), literal=(callinfo.use=='literal'))
except EvaluateException, ex:
print "DEBUG: Response Failed to generate --", ex
replyTC = None
request = response = None
if callinfo.style == 'rpc':
if requestTC: request = TC.Struct(pyclass=None, ofwhat=requestTC, pname=callinfo.methodName)
if replyTC: response = TC.Struct(pyclass=None, ofwhat=replyTC, pname='%sResponse' %callinfo.methodName)
else:
if requestTC: request = requestTC[0]
if replyTC: response = replyTC[0]
#THIS IS FOR RPC/ENCODED, DOC/ENCODED Wrapper
if request and prefix and callinfo.use == 'encoded':
request.oname = '%(prefix)s:%(name)s xmlns:%(prefix)s="%(namespaceURI)s"' \
%{'prefix':prefix, 'name':request.oname, 'namespaceURI':callinfo.namespace}
return request, response | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def GetModelOutputInfo(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def verify_call(obj):\n\tassert obj.tag == 'OMOBJ'\n\tattr = obj[0]\n\t\n\tassert attr.tag == 'OMATTR'\n\tpairs, application = attr\n\t\n\tassert application.tag == 'OMA'\n\tsymbol, args = application\n\t\n\tassert symbol.tag == 'OMS'\n\tassert symbol.get('cd') == \"scscp1\"\n\tassert symbol.get('name') == \"procedure_call\"\n\t\n\tassert args.tag == 'OMA'\n\tassert len(args) > 0\n\tname_symbol = args[0]\n\t\n\tassert name_symbol.tag == 'OMS'\n\tcd = name_symbol.get('cd')\n\tproc_name = name_symbol.get('name')\n\t\n\t#2. Now handle the extra information\n\tassert pairs.tag == 'OMATP'\n\tassert len(pairs) % 2 == 0\n\t\n\textras = {}\n\tcall_id = None\n\treturn_type = None\n\t\n\tfor i in range(0, len(pairs), 2):\n\t\tsymbol = pairs[i]\n\t\tassert symbol.tag == 'OMS'\n\t\tassert symbol.get('cd') == \"scscp1\"\n\t\tname = symbol.get('name')\n\t\textras[name] = pairs[i+1]\n\t\t\n\t\tif name == 'call_id':\n\t\t\tassert call_id is None\n\t\t\tcall_id = pairs[i+1].text\n\t\t\tprint(call_id)\n\t\telif name.startswith('option_return_'):\n\t\t\tassert return_type is None\n\t\t\treturn_type = ReturnTypes[name[14:]]\n\t\n\t#Some information is mandatory\n\tassert call_id is not None\n\tassert return_type is not None\n\t\n\treturn cd, proc_name, call_id, return_type, args[1:], extras",
"def get_method_type(request_streaming, response_streaming):\n if request_streaming and response_streaming:\n return BIDI_STREAMING\n elif request_streaming and not response_streaming:\n return CLIENT_STREAMING\n elif not request_streaming and response_streaming:\n return SERVER_STREAMING\n return UNARY",
"def get_spi_response_type(cmd_num):\n length = 8 # Default length of a response\n resp_type = 1\n if cmd_num in [8]:\n # CMD8 gets R7\n resp_type = 7\n length = 40\n if cmd_num in [5]:\n # CMD5 gets a R4 back in SPI mode\n resp_type = 4\n length = 40\n if cmd_num in [52,53]:\n resp_type = 5\n length = 16\n \n log.debug(\"Cmd %d expects response type R%s\" %(cmd_num,resp_type))\n return (resp_type, length)",
"def GetNativeInputInfo(is_optional):\r\n raise Exception(\"Abstract method\")",
"def operation_type(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"operation_type\")",
"def _on_response(self, response_type, p_api1, p_api2, double1, double2, ptr1, size1, ptr2, size2, ptr3, size3):\n if self.debug:\n print \"Response: \", ord(response_type)\n if response_type == OnConnectionStatus.value:\n self._on_connect_status(p_api2, chr(int(double1)), ptr1, size1)\n elif self._callbacks:\n for callback in self._callbacks:\n if response_type == OnRtnDepthMarketData.value:\n if self._is_market:\n callback.on_market_rtn_depth_market_data_n(p_api2, ptr1)\n elif response_type == OnRspQryInstrument.value:\n obj = cast(ptr1, POINTER(InstrumentField)).contents\n callback.on_trading_rsp_qry_instrument(p_api2, obj, bool(double1))\n elif response_type == OnRspQryTradingAccount.value:\n obj = cast(ptr1, POINTER(AccountField)).contents\n callback.on_trading_rsp_qry_trading_account(p_api2, obj, bool(double1))\n elif response_type == OnRspQryInvestorPosition.value:\n obj = cast(ptr1, POINTER(PositionField)).contents\n callback.on_trading_rsp_qry_investor_position(p_api2, obj, bool(double1))\n elif response_type == OnRspQrySettlementInfo.value:\n obj = cast(ptr1, POINTER(SettlementInfoField)).contents\n callback.on_trading_rsp_qry_settlement_info(p_api2, obj, bool(double1))\n elif response_type == OnRtnOrder.value:\n obj = cast(ptr1, POINTER(OrderField)).contents\n callback.on_trading_rtn_order(p_api2, obj)\n elif response_type == OnRtnTrade.value:\n obj = cast(ptr1, POINTER(TradeField)).contents\n callback.on_trading_rtn_trade(p_api2, obj)\n elif response_type == OnRtnQuote.value:\n obj = cast(ptr1, POINTER(QuoteField)).contents\n callback.on_trading_rtn_quote(p_api2, obj)\n elif response_type == OnRtnQuoteRequest.value:\n obj = cast(ptr1, POINTER(QuoteRequestField)).contents\n callback.on_trading_rtn_quote_request(p_api2, obj)\n elif response_type == OnRspQryHistoricalTicks.value:\n obj = cast(ptr1, POINTER(TickField)).contents\n obj2 = cast(ptr2, POINTER(HistoricalDataRequestField)).contents\n callback.on_trading_rsp_qry_historical_ticks(p_api2, obj, obj2, bool(double1))\n elif response_type == OnRspQryHistoricalBars.value:\n obj = cast(ptr1, POINTER(BarField)).contents\n obj2 = cast(ptr2, POINTER(HistoricalDataRequestField)).contents\n callback.on_trading_rsp_qry_historical_bars(p_api2, obj, obj2, bool(double1))\n elif response_type == OnRspQryInvestor.value:\n obj = cast(ptr1, POINTER(InvestorField)).contents\n callback.on_trading_rsp_qry_investor(p_api2, obj)\n elif response_type == OnFilterSubscribe.value:\n instrument = c_char_p(ptr1).value\n callback.on_trading_filter_subscribe(p_api2, ExchangeType(double1), size1, size2, size3, instrument)\n elif response_type == OnRtnError.value:\n obj = cast(ptr1, POINTER(ErrorField)).contents\n if self._is_market:\n callback.on_market_rsp_error(p_api2, obj, bool(double1))\n else:\n callback.on_trading_rsp_error(p_api2, obj, bool(double1))",
"def RequestInformation(self, request, inInfo, outInfo):\n if self.need_to_read():\n self._read_up_front()\n self._update_time_steps()\n return 1 # NOTE: ALWAYS return 1 on pipeline methods",
"def _fc_out_parameters(self) -> Tuple[str, List[str]]:\n out_pars = self.ret_type.fc_ret_type()\n if len(out_pars) == 1:\n return (out_pars[0][0], [])\n\n out_par_strl = list() # type: List[str]\n for type_name, postfix in out_pars:\n out_par_strl.append('{} {}'.format(\n type_name, self.ret_type.name + postfix))\n return ('void', out_par_strl)",
"def __validate_input(self, request_data):\n call_id = request_data.get(strings.CALL_ID_KEY)\n request_timestamp = request_data.get(strings.TIMESTAMP_KEY)\n request_start = request_data.get(strings.START_KEY)\n validation = None\n if call_id and request_timestamp and request_start is not None:\n call_detail_query = CallDetail.objects.filter(call_id=call_id)\n if call_detail_query:\n if len(call_detail_query) < CALL_DETAILS_LIMIT:\n stored_call_detail = call_detail_query[0]\n if isinstance(request_start, str):\n if request_start in strings.TRUE_VALUES:\n request_start = True\n else:\n request_start = False\n if stored_call_detail.start == request_start:\n validation = {strings.INPUT_ERROR_KEY:\n strings.START_END_ERROR}\n stored_timestamp = standardize_date(\n stored_call_detail.timestamp,\n strings.COMPLETE_DATE_PATTERN)\n request_timestamp = standardize_date(request_timestamp,\n strings.\n COMPLETE_DATE_PATTERN)\n if stored_timestamp == request_timestamp:\n validation = {strings.INPUT_ERROR_KEY:\n strings.EQUAL_TIMESTAMPS_ERROR}\n if stored_call_detail.start and not request_start:\n if stored_timestamp > request_timestamp:\n validation = {strings.INPUT_ERROR_KEY:\n strings.SOONER_END_ERROR}\n elif not stored_call_detail.start and request_start:\n if stored_timestamp < request_timestamp:\n validation = {strings.INPUT_ERROR_KEY:\n strings.SOONER_END_ERROR}\n else:\n validation = {strings.INPUT_ERROR_KEY:\n strings.CALL_LIMIT_ERROR}\n\n return validation",
"def function_response_types(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['EventSourceMappingFunctionResponseTypesItem']]]]:\n return pulumi.get(self, \"function_response_types\")",
"def construct_sp(self, info):\n if \"query\" in info.keys():\n if info[\"query\"].upper().startswith(\"CALL\"):\n self.q_str = info[\"query\"]\n self.sql_type_ind = (info[\"q_type_ind\"] if \"q_type_ind\" in info.keys() else\n sql_type.STORED_PROCEDURE_NO_RES)\n\n self.return_result = bool((self.sql_type_ind is sql_type.SELECT or\\\n self.sql_type_ind is sql_type.STORED_PROCEDURE_RES))\n\n elif \"procedure\" in info.keys():\n self.q_str = info[\"procedure\"]\n\n self.sql_type_ind = (info[\"q_type_ind\"] if \"q_type_ind\" in info.keys() else\n sql_type.STORED_PROCEDURE_NO_RES)\n\n self.return_result = bool((self.sql_type_ind is sql_type.SELECT or\\\n self.sql_type_ind is sql_type.STORED_PROCEDURE_RES))",
"def build_method_call(code, line, method_object):\n full_signature = method_object[\"methodSignature\"]\n normalised_signature = normalise_signature(full_signature)\n param_values = get_method_parameter_values(code, line, full_signature)\n string_values, cmplx_types = get_string_values(param_values, full_signature)\n\n rpc_payload_length = str(\n 4 + len(normalised_signature) + len(string_values)\n )\n # Default to stub value if method-to-service correlation failed\n strong_name = (\n method_object[\"service\"][\"strongName\"]\n if method_object[\"service\"] is not None\n else \"X\"*32\n )\n rpc_blocks = []\n rpc_blocks.extend([\n RPC_VERSION,\n RPC_FLAGS,\n rpc_payload_length,\n BASE_URL,\n strong_name,\n method_object[\"rmtSvcIntName\"],\n method_object[\"methodName\"],\n ])\n rpc_blocks.extend(normalised_signature)\n rpc_blocks.extend(string_values)\n rpc_blocks.extend([\n \"1\", \"2\", \"3\", \"4\",\n method_object[\"paramCount\"]\n ])\n rpc_blocks.extend(\n generate_parameter_map(\n rpc_blocks,\n full_signature,\n param_values\n )\n )\n return rpc_blocks, cmplx_types",
"def checkRequestType(packet):\n info = [packet[i : i + 2] for i in range(0, len(packet), 2)]\n RequestType = int.from_bytes(info[2], \"big\")\n if RequestType == 0x0001:\n return \"date\"\n elif RequestType == 0x0002:\n return \"time\"\n else:\n return -1",
"def get_operation(operation):\n if operation == 'query':\n return banking_pb2.QUERY\n if operation == 'deposit':\n return banking_pb2.DEPOSIT\n if operation == 'withdraw':\n return banking_pb2.WITHDRAW",
"def _handle_one_message(self):\n\n type, data = self.cxn.recv_message()\n\n if type.startswith(\"call\"):\n if len(data) != 3:\n message = (type, data)\n raise MessageError.invalid(message, \"incorrect number of args\")\n flags = {\n \"want_response\": type == \"call\",\n }\n call = Call(data[0], data[1], data[2], flags, self.client)\n self._handle_call(call)\n return False\n\n raise MessageError.bad_type(type)",
"def analyze_input():\n\n # Generate action_id classes for OF 1.3\n for wire_version, ordered_classes in of_g.ordered_classes.items():\n if not wire_version in [of_g.VERSION_1_3]:\n continue\n classes = versions[of_g.of_version_wire2name[wire_version]]['classes']\n for cls in ordered_classes:\n if not loxi_utils.class_is_action(cls):\n continue\n action = cls[10:]\n if action == '' or action == 'header':\n continue\n name = \"of_action_id_\" + action\n members = classes[\"of_action\"][:]\n of_g.ordered_classes[wire_version].append(name)\n if type_maps.action_id_is_extension(name, wire_version):\n # Copy the base action classes thru subtype\n members = classes[\"of_action_\" + action][:4]\n classes[name] = members\n\n # @fixme If we support extended actions in OF 1.3, need to add IDs\n # for them here\n\n for wire_version in of_g.wire_ver_map.keys():\n version_name = of_g.of_version_wire2name[wire_version]\n calculate_offsets_and_lengths(\n of_g.ordered_classes[wire_version],\n versions[version_name]['classes'],\n wire_version)",
"def get_response_type(cmd_num):\n length = 48 # Default length of a response\n if cmd_num in [0,4,15]:\n # No response expected\n rv = None\n if cmd_num in [11,13,16,17,18,19,23,55,56]:\n # Response type 1\n rv = 1\n if cmd_num in [7,12,20]:\n # Response type 1b, means it could also be a busy\n rv = 1.5\n if cmd_num in [2,9,10]:\n # Reponse type 2, CID/CSD register, not on SDIO but here for completeness\n length = 136\n rv = 2\n if cmd_num in [4,5]:\n rv = 4\n if cmd_num in [52,53]:\n rv = 5\n if cmd_num in [3]:\n rv = 6\n if cmd_num in [8]:\n rv = 7\n log.debug(\"Cmd %d expects response type R%s\" %(cmd_num,rv))\n return (rv, length)",
"def _fi_out_parameters(self) -> Tuple[str, List[Tuple[str, str]]]:\n out_pars = self.ret_type.fi_ret_type()\n if len(out_pars) == 1:\n return (out_pars[0][0], [])\n\n out_par_list = list() # type: List[Tuple[str, str]]\n for par_type, par_name in out_pars:\n out_par_list.append((par_type, 'ret_val' + par_name))\n\n return ('', out_par_list)",
"def processRequest(cls, ps, **kw):\n resource = kw['resource']\n method = resource.getOperation(ps, None) # This getOperation method is valid for ServiceSOAPBinding subclass\n rsp = method(ps, **kw)[1] # return (request, response) but we only need response\n return rsp",
"def validate_args(self, in_args, cmd_call):\n valid_1, valid_2 = None, None\n\n if len(in_args) > 0 and type(in_args) is not list:\n args = in_args.split()\n valid_1 = args[0]\n elif type(in_args) is list and len(in_args) > 0:\n args = in_args\n valid_1 = args[0]\n else:\n args = []\n\n if cmd_call in ['default']:\n # Default : Returns a valid cui type for an input cui\n # checks to see if there is more than 2 arguments\n # if so, arg[0] may be a valid code\n # arg[1] may be a valid code type\n # if not ask the user what type of code type arg[0] is\n # valid_1 = valid cui type\n # valid_2 = None\n while True:\n if len(args) >= 2 and len(args) <= 3:\n input_type = args[1].upper()\n else:\n input_type = input(\"What type of id is '{0}'? [LOCAL/RXCUI/NDC/SNOMED]\".format(args[0])).upper()\n\n # Confirm it's a valid code type\n valid_type = self.validate_id_type(input_type)\n # Valid type is a boolean of True\n if isinstance(valid_type, str) or valid_type is None:\n return None\n elif valid_type:\n break\n elif not valid_type:\n print('Invalid Option, Please Try Again')\n continue\n valid_1 = input_type\n\n elif cmd_call in self.cmd_config_default:\n # valid_1 : Valid Cui , valid_2 : Valid Cui Type\n valid_2, _ = self.validate_args(args, 'default')\n valid_1 = args[0]\n\n elif cmd_call == 'code_lookup':\n # args[0] : Initial CUI, args[1] : Initial CUI Type, args[2] : Target CUI Type\n # valid_1 : valid cui, valid_2 : list valid source and target\n _dict_opts = util.OPTIONS_CUI_TYPES.copy()\n _avail = list(set(smores.get_dict_sources()) & set(_dict_opts))\n if len(_avail) == 0 and len(args) < 2:\n print('There are no available starting cui types that can be crosswalked.\\n'\n 'Please load a file containing valid cui types: {0}'.format(_dict_opts))\n return False, None\n\n if len(args) >= 2:\n if len(args) == 3:\n # provided cui, cui source, and target\n valid_2, _ = self.validate_args(args, 'default')\n source, target = args[1].upper(), args[2].upper()\n else:\n source, target = args[0].upper(), args[1].upper()\n valid_1 = simple_input(\"Is {0} the correct starting source? \".format(source), ['YES', 'NO', 'exit'])\n if valid_1 == 'exit':\n return False, None\n # TODO need path for valid_2\n else:\n valid_1 = simple_input(\"Which code set do you want to start with?\", _avail)\n if valid_1 != 'exit':\n _dict_opts.remove(valid_1) # Don't lookup what we've already got\n valid_2 = simple_input(\"Which code set do you want to get results for?\", _dict_opts)\n if valid_2 == 'exit':\n return False, None\n else:\n return False, None\n\n elif cmd_call == 'errors':\n _current_err = list(self.errors.keys())\n if len(args) > 1:\n smores_error('#Cx001.7', console_p=True)\n return\n elif len(args) == 1 and args[0].lower() in _current_err:\n valid_1 = args[0]\n elif len(args) == 1:\n print('There are currently no errors logged for that command.')\n return\n else:\n valid_1 = simple_input(\"Please choose a command from the list to see errors: \", _current_err)\n\n elif cmd_call in ['csv', 'remap', 'fhir', 'json']:\n # Format: [File] [Output]\n if not self.inputs['loaded']:\n print(\"No Files Loaded!\\nYou Must load a file containing local medications first\")\n else:\n _file_opts = list(self.inputs['files'].keys()) + ['All']\n _dict_opts = list(smores.get_dict_sources(True)) #+ ['All']\n _file_or_dict = None\n\n if cmd_call in ['csv', 'json']:\n if len(args) == 0:\n _file_or_dict = simple_input(\"Do you want results for a File or a constructed Dictionary?\",\n ['File', 'Dictionary', 'exit'], True)\n elif args[0] not in _file_opts and args[0] not in _dict_opts:\n print('That option was not recognized as a valid source.')\n _file_or_dict = simple_input(\"Do you want results for a File or a constructed Dictionary?\",\n ['File', 'Dictionary', 'exit'], True)\n else:\n valid_1 = args[0]\n\n if _file_or_dict.upper() == 'FILE':\n valid_1 = 'FILE|' + simple_input(\"Please choose a loaded file\", _file_opts, True)\n\n elif _file_or_dict.upper() == 'DICTIONARY':\n valid_1 = 'DICT|' + simple_input(\"Please choose a code dictionary to output\", _dict_opts, True)\n elif _file_or_dict.upper() == 'EXIT':\n return None, None\n\n else:\n valid_1 = simple_input(\"Please choose a loaded file\", _file_opts, True)\n\n if cmd_call in ['csv', 'json', 'fhir']:\n if len(args) == 2 and len(args[1]) > 0:\n valid_2 = args[1]\n else:\n valid_2= input(\"Please provide an output file name:\").strip()\n\n if len(valid_2) > 0:\n if \".\" in valid_2:\n valid_2, ext = valid_2.split(\".\")\n else:\n valid_2 = ''\n print('Empty file name provided, using default.')\n else:\n valid_2 = args[0]\n\n elif cmd_call == 'file':\n re_use = False\n if self.inputs['loaded'] and len(in_args) == 0:\n print(\"The following file(s) have already been loaded: \\n\" + str(self.inputs['files']))\n _load_more = simple_input(\"Would you like to load an additional file?\", ['Y', 'N', 'exit'])\n if _load_more == 'Y':\n pass\n elif _load_more == 'N':\n _re_use = simple_input(\"Would you like to re-use a loaded file?\", ['Y', 'N', 'exit'])\n if _re_use == 'Y':\n re_use = True\n else:\n return False, None\n else:\n return False, None\n\n if in_args is not None and len(in_args) > 0:\n valid_1 = in_args\n else:\n valid_1 = input(\"Please enter the name of the file to load: \") if not re_use else simple_input(\n 'Select the file to be used: ', list(self.inputs['files'].keys()), index=True)\n\n while True:\n if valid_1 in self.inputs['files']:\n if not re_use:\n print(\"It looks like you've already loaded that file. Please try a different file.\")\n valid_1, valid_2 = input(\"Please enter the name of the file to load: \")\n else:\n break\n elif len(valid_1) == 0:\n smores_error('#Cx001.7', logger=smoresLog)\n valid_1, valid_2 = input(\"Please enter the name of the file to load: \")\n else:\n break\n\n if not resolve_target_path(valid_1):\n valid_1, valid_2 = self.validate_args('', 'file')\n\n elif '.smr' in valid_1:\n if len(self.inputs['files']) > 0:\n print(\n 'It looks like you are trying to load a session, this will replace the current session and '\n 'all previous work.')\n _save = simple_input('Do you want to save the current session first?', ['Y', 'N', 'EXIT'])\n if _save == 'Y':\n smores.save_session(self.__version__)\n elif _save == 'EXIT':\n return False, None\n valid_2 = 'session'\n else:\n valid_2 = 'file'\n\n smoresLog.debug('Args: {0}, Validated as: {1}'.format(valid_1, valid_2))\n return valid_1, valid_2",
"def GetNativeOutputInfo(\r\n is_struct=False,\r\n featurizer_name=\"\",\r\n ):\r\n raise Exception(\"Abstract method\")",
"def _get_request_parser(self, operation):\n\n wpsrequest = self\n\n def parse_get_getcapabilities(http_request):\n \"\"\"Parse GET GetCapabilities request\n \"\"\"\n\n acceptedversions = _get_get_param(http_request, 'acceptversions')\n wpsrequest.check_accepted_versions(acceptedversions)\n\n def parse_get_describeprocess(http_request):\n \"\"\"Parse GET DescribeProcess request\n \"\"\"\n version = _get_get_param(http_request, 'version')\n wpsrequest.check_and_set_version(version)\n\n language = _get_get_param(http_request, 'language')\n wpsrequest.check_and_set_language(language)\n\n wpsrequest.identifiers = _get_get_param(\n http_request, 'identifier', aslist=True)\n\n def parse_get_execute(http_request):\n \"\"\"Parse GET Execute request\n \"\"\"\n version = _get_get_param(http_request, 'version')\n wpsrequest.check_and_set_version(version)\n\n language = _get_get_param(http_request, 'language')\n wpsrequest.check_and_set_language(language)\n\n wpsrequest.identifier = _get_get_param(http_request, 'identifier')\n wpsrequest.store_execute = _get_get_param(\n http_request, 'storeExecuteResponse', 'false')\n wpsrequest.status = _get_get_param(http_request, 'status', 'false')\n wpsrequest.lineage = _get_get_param(\n http_request, 'lineage', 'false')\n wpsrequest.inputs = get_data_from_kvp(\n _get_get_param(http_request, 'DataInputs'), 'DataInputs')\n wpsrequest.outputs = {}\n\n # take responseDocument preferably\n resp_outputs = get_data_from_kvp(\n _get_get_param(http_request, 'ResponseDocument'))\n raw_outputs = get_data_from_kvp(\n _get_get_param(http_request, 'RawDataOutput'))\n wpsrequest.raw = False\n if resp_outputs:\n wpsrequest.outputs = resp_outputs\n elif raw_outputs:\n wpsrequest.outputs = raw_outputs\n wpsrequest.raw = True\n # executeResponse XML will not be stored and no updating of\n # status\n wpsrequest.store_execute = 'false'\n wpsrequest.status = 'false'\n\n if not operation:\n raise MissingParameterValue('Missing request value', 'request')\n else:\n self.operation = operation.lower()\n\n if self.operation == 'getcapabilities':\n return parse_get_getcapabilities\n elif self.operation == 'describeprocess':\n return parse_get_describeprocess\n elif self.operation == 'execute':\n return parse_get_execute\n else:\n raise OperationNotSupported(\n 'Unknown request %r' % self.operation, operation)",
"def _process_operation(operation_pb):\n match = _OPERATION_NAME_RE.match(operation_pb.name)\n if match is None:\n raise ValueError('Operation name was not in the expected '\n 'format after instance creation.',\n operation_pb.name)\n location_id = match.group('location_id')\n operation_id = int(match.group('operation_id'))\n\n request_metadata = _parse_pb_any_to_native(operation_pb.metadata)\n operation_begin = _pb_timestamp_to_datetime(\n request_metadata.request_time)\n\n return operation_id, location_id, operation_begin",
"def test_decoding_method(self):\n data = service_call.encode_call(\"foo\", [42])\n name, params = service_call.decode_call(data)\n\n self.assertEqual(name, \"foo\")\n self.assertEqual(params, [42])",
"def GetInput(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def _extract_commands(self, data):\n version = data[6]\n if version > 1:\n raise CarError('only version 1 is supported')\n if data[8] == 1:\n if self.inputastext is None:\n self.inputastext = True\n if data[9] == 1:\n if self.outputastext is None:\n self.outputastext = True\n data = data[10:]\n data = struct.unpack('<' + 'I' * (len(data) // 4), data)\n commands = tuple((data[i], data[i + 1]) for i in range(3, len(data), 2))\n for x, a in filter(lambda x: x[0] in (GOTO, IF), commands):\n if a >= len(commands):\n raise CarError('code position out of scope')\n return commands, data[:3]",
"def traffic_statuscodes_requestresponsetype(self, **kwargs):\n url_path = 'traffic/statuscodes/requestresponsetype'\n self.logger.debug(f\"Get list of request-response types\")\n body = self._make_body(kwargs)\n return self._common_get(request_path=url_path, parameters=body)",
"def function_response_types(self) -> pulumi.Output[Optional[Sequence['EventSourceMappingFunctionResponseTypesItem']]]:\n return pulumi.get(self, \"function_response_types\")",
"def pre_get_operation(\n self,\n request: operations_pb2.GetOperationRequest,\n metadata: Sequence[Tuple[str, str]],\n ) -> Tuple[operations_pb2.GetOperationRequest, Sequence[Tuple[str, str]]]:\n return request, metadata"
] | [
"0.5174658",
"0.5018817",
"0.4834404",
"0.47635424",
"0.4631196",
"0.46073565",
"0.45646647",
"0.45536888",
"0.4535534",
"0.45211482",
"0.44949257",
"0.4465704",
"0.44512537",
"0.4426451",
"0.44040138",
"0.44033703",
"0.43914264",
"0.43877032",
"0.4385859",
"0.43828747",
"0.43793657",
"0.43721932",
"0.43482167",
"0.43448585",
"0.43419588",
"0.4333053",
"0.43312353",
"0.43153423",
"0.430005",
"0.4298266"
] | 0.73895764 | 0 |
namespaces typecodes representing global elements with literal encoding. typeCode typecode representing an element. namespaceURI namespace literal True/False | def _globalElement(self, typeCode, namespaceURI, literal):
if literal:
typeCode.oname = '%(prefix)s:%(name)s xmlns:%(prefix)s="%(namespaceURI)s"' \
%{'prefix':self._getPrefix(namespaceURI), 'name':typeCode.oname, 'namespaceURI':namespaceURI} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def XmlTypeNamespace(self) -> str:",
"def is_namespace_type(self):\n raise exceptions.NotImplementedError()",
"def GetNamespaces(self):\n return list(self.type_namespaces_map.values())",
"def element_type(self) -> global___Type:",
"def patch_well_known_namespaces(etree_module):\n etree_module._namespace_map.update({\n \"http://www.w3.org/1999/02/22-rdf-syntax-ns#\": \"rdf\", \n \"http://purl.org/rss/1.0/\": \"rss\", \n \"http://purl.org/rss/1.0/modules/taxonomy/\": \"taxo\", \n \"http://purl.org/dc/elements/1.1/\": \"dc\", \n \"http://purl.org/rss/1.0/modules/syndication/\": \"syn\", \n \"http://www.w3.org/2003/01/geo/wgs84_pos#\": \"geo\"})",
"def _AppIdNamespaceKindForKey(self, key):\n last_path = key.path().element_list()[-1]\n return (datastore_types.EncodeAppIdNamespace(key.app(), key.name_space()),\n last_path.type())",
"def hasNamespaceURI(self, *args):\n return _libsbml.XMLToken_hasNamespaceURI(self, *args)",
"def GetNamespace(self, namespace_name):\n return self.type_namespaces_map.get(namespace_name, None)",
"def visit_Typedef(self, node):\n return str_node(node)",
"def test_namespaceFound(self):\n xp = XPathQuery(\"/foo[@xmlns='testns']/bar\")\n self.assertEqual(xp.matches(self.e), 1)",
"def namespaces(self) -> NamespacesType:\n return self.schema.namespaces",
"def getEnumerationTypeXmlStub (typeName): \n\tsimpleType = createSchemaElement(\"simpleType\")\n\tsimpleType.setAttribute (\"name\",typeName)\n\trestriction = createSchemaElement(\"restriction\")\n\trestriction.setAttribute (\"base\", qp(\"token\"))\n\tsimpleType.appendChild (restriction)\n\treturn simpleType",
"def _getTypeClass(self, namespaceURI, localName):\r\n bti = BaseTypeInterpreter()\r\n simpleTypeClass = bti.get_typeclass(localName, namespaceURI)\r\n return simpleTypeClass",
"def XmlTypeName(self) -> str:",
"def XmlNamespace(self) -> str:",
"def getNamespaceURI(self, *args):\n return _libsbml.XMLToken_getNamespaceURI(self, *args)",
"def translate_custom_types(self):\n\n\t\t# Preparing variables\n\t\ta_residue_names = self.a_atoms[\"residue_name\"]\t\t# Loads the names of residues\n\t\ta_atom_name = self.a_atoms[\"atom_name\"]\t\t# Loads the names of the atoms\n\t\ta_atom_symbol = self.a_atoms[\"element_symbol\"]\t\t# Loads the elements symbols\n\t\tl_s_custom_types = []\t\t# Contains the list of converted types\n\t\td_translate_custom = {\t\t# Conversion dictionary for custom types\n\t\t\t\"O\": \"OC\",\n\t\t\t\"H\": \"H\",\n\t\t\t\"N\": \"NAM\",\n\t\t\t\"C\": \"XOT\",\n\t\t\t\"CA\": \"XOT\",\n\t\t\t\"CB\": \"XOT\",\n\t\t\t\"OXT\": \"XOT\"\n\t\t}\n\n\t\t# STEP 1 : Converting the atom types ---------------- #\n\t\t# For each element to convert\n\t\tfor i_element in range(len(a_residue_names)):\n\n\t\t\t# If the residue is one of the main amino acids\n\t\t\tif a_residue_names[i_element] in elem_config.RES:\n\n\t\t\t\t# Hydrogen\n\t\t\t\tif a_atom_symbol[i_element] == \"H\":\n\t\t\t\t\ts_custom_type = \"H\"\n\n\t\t\t\t# If the atom is one of the main carbon chain\n\t\t\t\telif a_atom_name[i_element] in d_translate_custom.keys():\n\t\t\t\t\ts_custom_type = d_translate_custom[a_atom_name[i_element]]\n\n\t\t\t\t# Nitrogen in Arginine\n\t\t\t\telif a_residue_names[i_element] == \"ARG\" and a_atom_name[i_element] in elem_config.NARG[a_residue_names[i_element]]:\n\t\t\t\t\ts_custom_type = \"NBAS\"\n\n\t\t\t\t# Carbon SP2 in aromatic ring\n\t\t\t\telif a_residue_names[i_element] in elem_config.CAR.keys() and a_atom_name[i_element] in elem_config.CAR[a_residue_names[i_element]]:\n\t\t\t\t\ts_custom_type = \"CAR\"\n\n\t\t\t\t# Oxygen in hydroxyl or phenol\n\t\t\t\telif a_residue_names[i_element] in elem_config.OHY.keys() and a_atom_name[i_element] == elem_config.OHY[a_residue_names[i_element]]:\n\t\t\t\t\ts_custom_type = \"OH\"\n\n\t\t\t\t# Nitrogen in amide\n\t\t\t\telif a_residue_names[i_element] in elem_config.NAM.keys() and a_atom_name[i_element] == elem_config.NAM[a_residue_names[i_element]]:\n\t\t\t\t\ts_custom_type = \"NAM\"\n\n\t\t\t\t# Nitrogen in Histidine\n\t\t\t\telif a_residue_names[i_element] in elem_config.NHIS.keys() and a_atom_name[i_element] in elem_config.NHIS[a_residue_names[i_element]]:\n\t\t\t\t\ts_custom_type = \"NBAS\"\n\n\t\t\t\t# Central carbon from ARG, GLN, GLU, ASP, ASN\n\t\t\t\telif a_residue_names[i_element] in elem_config.CE.keys() and elem_config.CE[a_residue_names[i_element]] == a_atom_name[i_element]:\n\t\t\t\t\ts_custom_type = \"CAR\"\n\n\t\t\t\t# Oxygen in carbonyl\n\t\t\t\telif a_residue_names[i_element] in elem_config.OC.keys() and a_atom_name[i_element] == elem_config.OC[a_residue_names[i_element]]:\n\t\t\t\t\ts_custom_type = \"OC\"\n\n\t\t\t\t# Oxygen in carboxylate and oxygen in C-terminal\n\t\t\t\telif a_residue_names[i_element] in elem_config.OOX.keys() and \\\n\t\t\t\t\t\t(a_atom_name[i_element] == elem_config.OOX[a_residue_names[i_element]][0] or\n\t\t\t\t\t\t a_atom_name[i_element] == elem_config.OOX[a_residue_names[i_element]][1]):\n\t\t\t\t\ts_custom_type = \"OOX\"\n\n\t\t\t\t# Nitrogen in Lysine\n\t\t\t\telif a_residue_names[i_element] in elem_config.NLYS.keys() and a_atom_name[i_element] == elem_config.NLYS[a_residue_names[i_element]]:\n\t\t\t\t\ts_custom_type = \"NBAS\"\n\n\t\t\t\t# Unknown element within a amino acid\n\t\t\t\telse:\n\t\t\t\t\ts_custom_type = \"XOT\"\n\t\t\t# End if\n\n\t\t\t# If the element is a metallic atom\n\t\t\telif a_atom_symbol[i_element] in elem_config.METAL:\n\t\t\t\ts_custom_type = \"META\"\n\n\t\t\t# If the element is a halogen\n\t\t\telif a_atom_symbol[i_element] in elem_config.HALO:\n\t\t\t\ts_custom_type = \"HALO\"\n\n\t\t\t# If the element is a water molecule\n\t\t\telif a_residue_names[i_element] == \"HOH\" and a_atom_name[i_element] == \"O\":\n\t\t\t\ts_custom_type = \"OOW\"\n\n\t\t\t# If the element is not known\n\t\t\telse:\n\n\t\t\t\t# If the element can be converted\n\t\t\t\tif a_atom_symbol[i_element] in d_translate_custom.keys():\n\t\t\t\t\ts_custom_type = d_translate_custom[a_atom_symbol[i_element]]\n\n\t\t\t\t# If it cannot\n\t\t\t\telse:\n\t\t\t\t\ts_custom_type = \"HETATM\"\n\t\t\t# End if\n\n\t\t\tl_s_custom_types.append(s_custom_type)\t\t# Saves the new element type\n\t\t# End for\n\t\t# END STEP 1 ---------------------------------------- #\n\n\t\t# STEP 2 : Saving the list of custom types ---------- #\n\t\tself.a_atoms[\"custom_type\"] = l_s_custom_types\t\t# Saves the list of custom types\n\t\t# END STEP 2 ---------------------------------------- #",
"def header_hook(header, data):\n\n for e in header.enums:\n e[\"x_namespace\"] = e[\"namespace\"]",
"def namespace_for(uri: Union[URIRef, Namespace, str]) -> str:\n uri = str(uri)\n if uri not in namespaces.values():\n namespaces[AnonNS().ns] = uri\n return [k for k, v in namespaces.items() if uri == v][0]",
"def hasNamespaceNS(self, *args):\n return _libsbml.XMLToken_hasNamespaceNS(self, *args)",
"def _getElement(self, element, literal=False, local=False, namespaceURI=None):\r\n if not element.isElement():\r\n raise TypeError, 'Expecting an ElementDeclaration'\r\n\r\n tc = None\r\n elementName = element.getAttribute('name')\r\n tp = element.getTypeDefinition('type')\r\n\r\n typeObj = None\r\n if not (tp or element.content):\r\n nsuriType,localName = element.getAttribute('type')\r\n typeClass = self._getTypeClass(nsuriType,localName)\r\n \r\n typeObj = typeClass(elementName)\r\n elif not tp:\r\n tp = element.content\r\n\r\n if not typeObj:\r\n typeObj = self._getType(tp, elementName, literal, local, namespaceURI)\r\n\r\n minOccurs = int(element.getAttribute('minOccurs'))\r\n typeObj.optional = not minOccurs\r\n\r\n maxOccurs = element.getAttribute('maxOccurs')\r\n typeObj.repeatable = (maxOccurs == 'unbounded') or (int(maxOccurs) > 1)\r\n\r\n return typeObj",
"def xmlrpc_namespace():",
"def test_xml_to_dict_net_namespace(self):\n xml = \"\"\"\n <a\n xmlns=\"urn:ietf:params:xml:ns:netconf:base:1.0\"\n >\n <b xmlns=\"something\">b</b>\n <!-- Comment, ignore it -->\n </a>\n \"\"\"\n xmlns = {\n \"_\": utils.NETCONF_NAMESPACE\n }\n result = utils.generate_dict_node(etree.XML(xml), xmlns)\n # check dict\n self.assertEqual(\n {'a': {'_something@b': 'b'}},\n result\n )\n # check xmlns\n self.assertEqual(\n {\n '_': utils.NETCONF_NAMESPACE,\n '_something': 'something'\n }, xmlns\n )",
"def __init__ (self, ns_or_tagraw, pred=None, value=None) :\n self.__namespace__ = None\n self.__predicate__ = None\n self.__value__ = None\n self.__ismachinetag__ = False\n self.__isnumeric__ = False\n\n if pred :\n\n re_nspred = re.compile(r\"^([a-z](?:[a-z0-9_]+))$\", re.IGNORECASE)\n\n if re_nspred.match(ns_or_tagraw) and re_nspred.match(pred) and value :\n self.__namespace__ = ns_or_tagraw\n self.__predicate__ = pred\n self.__value__ = value\n else :\n\n re_tag = re.compile(r\"^([a-z](?:[a-z0-9_]+))\\:([a-z](?:[a-z0-9_]+))\\=(.+)$\", re.IGNORECASE)\n m = re_tag.findall(ns_or_tagraw)\n\n if m :\n self.__namespace__ = m[0][0]\n self.__predicate__ = m[0][1]\n self.__value__ = m[0][2]\n\n if self.__namespace__ and self.__predicate__ and self.__value__ :\n self.__ismachinetag__ = True\n\n valtype = type(self.__value__)\n\n if valtype == types.IntType or valtype == types.FloatType :\n self.__isnumeric__ = True\n else :\n re_num = re.compile(r\"^-?\\d+(\\.\\d+)?$\", re.IGNORECASE)\n m = re_num.findall(self.__value__)\n\n if m :\n\n self.__isnumeric__ = True\n self.__value__ = unicode(self.__value__)\n\n if m[0] :\n self.__value_numeric__ = float(self.__value__)\n else :\n self.__value_numeric__ = int(self.__value__)",
"def test_getLocalType(self):\n cases = [\n (self.test_eac + \"NE00800.xml\", \"Archival Series\"),\n (self.test_eac + \"NE00916.xml\", \"Archival Collection\"),\n (self.test_eac + \"NE01201.xml\", \"Person\"),\n (self.test_eac + \"NE01000.xml\", \"Glossary Term\"),\n ]\n for case in cases:\n source, expected = case\n doc = EacCpf.EacCpf(source,'http://www.example.com')\n self.assertNotEqual(doc, None)\n result = doc.getLocalType()\n self.assertEqual(result, expected)",
"def hasURI(self, *args):\n return _libsbml.XMLNamespaces_hasURI(self, *args)",
"def getNamespace(self):\n pass;",
"def idl_type(field, namespace):\n\n out = ''\n if field.is_map:\n out = 'map <{0},'.format(idl_type(field.map_key, namespace))\n\n if field.is_array:\n out += 'repeated '\n\n if field.data_type in (schema.Field.DataType.STRUCT,\n schema.Field.DataType.ENUM):\n out += field.metadata.full_name.replace(namespace, '').strip('.')\n else:\n out += field.data_type.value\n\n if field.is_map:\n out += '>'\n\n return out",
"def namespaces(self):\n return ()",
"def getNamespace(self, parent: ghidra.program.model.symbol.Namespace, namespaceName: unicode) -> ghidra.program.model.symbol.Namespace:\n ..."
] | [
"0.66442066",
"0.5593534",
"0.5443724",
"0.5412149",
"0.5365882",
"0.5329653",
"0.5311929",
"0.5237586",
"0.5178215",
"0.5165827",
"0.5055696",
"0.5044016",
"0.5007665",
"0.4926739",
"0.48959085",
"0.48641986",
"0.48631665",
"0.4855509",
"0.48434836",
"0.48183277",
"0.4792356",
"0.47862798",
"0.47404408",
"0.47294393",
"0.4723793",
"0.47111255",
"0.4702394",
"0.46919808",
"0.46903983",
"0.46584937"
] | 0.6797826 | 0 |
Retrieves a prefix/namespace mapping. namespaceURI namespace | def _getPrefix(self, namespaceURI):
prefixDict = self._getPrefixDict()
if prefixDict.has_key(namespaceURI):
prefix = prefixDict[namespaceURI]
else:
prefix = 'ns1'
while prefix in prefixDict.values():
prefix = 'ns%d' %int(prefix[-1]) + 1
prefixDict[namespaceURI] = prefix
return prefix | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def prefix_to_ns(self, prefix):\n defin = self.module.i_ctx.get_module(\n self.module.i_prefixes[prefix][0])\n return defin.search_one(\"namespace\").arg",
"def get_namespace(self, prefix):\n try:\n return self.parser.namespaces[prefix]\n except KeyError as err:\n raise self.error('FONS0004', 'No namespace found for prefix %s' % str(err))",
"def namespace_for(uri: Union[URIRef, Namespace, str]) -> str:\n uri = str(uri)\n if uri not in namespaces.values():\n namespaces[AnonNS().ns] = uri\n return [k for k, v in namespaces.items() if uri == v][0]",
"def getNamespacePrefix(self, namespace):\n return self.namespaceTable.get(namespace, None)",
"def ns_prefix_dict(g):\n return {ns: prefix.toPython() for (ns, prefix) in g.namespaces()}",
"def prefixForNamespace (self, namespace):\n pfxs = self.__inScopePrefixes.get(namespace)\n if pfxs:\n return next(iter(pfxs))\n return None",
"def getNamespacePrefixDict(xmlString):\n \n nss = {} \n defCnt = 0\n matches = re.findall(r'\\s+xmlns:?(\\w*?)\\s*=\\s*[\\'\"](.*?)[\\'\"]', xmlString)\n for match in matches:\n prefix = match[0]; ns = match[1]\n if prefix == '':\n defCnt += 1\n prefix = '_' * defCnt\n nss[prefix] = ns\n return nss",
"def getPrefix(self, *args):\n return _libsbml.XMLNamespaces_getPrefix(self, *args)",
"def getNamespacePrefix(self, *args):\n return _libsbml.XMLToken_getNamespacePrefix(self, *args)",
"def namespace_map(self, target):\n self._check_target(target)\n return target.namespace_map or self._default_namespace_map",
"def namespaces(self):\n return [self._namespace_prefix]",
"def from_ns(match):\n return ns.get(match.group(1), match.group())",
"def GetNamespace(self, namespace_name):\n return self.type_namespaces_map.get(namespace_name, None)",
"def _get_prefixes(self):\n return self._dispatch_json(\"get\", self._db_base(\"prefixes\")).get(\"@context\")",
"def namespace(self):\n return VarLookupDict(self._namespaces)",
"def get_prefixes(context: str = \"go\"):\n context = load_context(context)\n extended_prefix_map = context.as_extended_prefix_map()\n converter = Converter.from_extended_prefix_map(extended_prefix_map)\n cmaps = converter.prefix_map\n # hacky solution to: https://github.com/geneontology/go-site/issues/2000\n cmap_remapped = remap_prefixes(cmaps)\n\n return cmap_remapped",
"def get_namespace(self, namespace, lowercase=True, trim_namespace=True):\n\t\treturn self.get_namespace_view(namespace, lowercase, trim_namespace).copy()",
"def getNamespaceIndexByPrefix(self, *args):\n return _libsbml.XMLToken_getNamespaceIndexByPrefix(self, *args)",
"def xpathNsLookup(self, prefix):\n ret = libxml2mod.xmlXPathNsLookup(self._o, prefix)\n return ret",
"def namespace(self, namespace):\n return self.client.call('GET',\n self.name, params={'namespace': namespace})",
"def _getnamespaces(cls):\n return \" \".join(Kmlable._namespaces)",
"def SBMLNamespaces_getSBMLNamespaceURI(*args):\n return _libsbml.SBMLNamespaces_getSBMLNamespaceURI(*args)",
"def getNamespaces(self, *args):\n return _libsbml.SBMLNamespaces_getNamespaces(self, *args)",
"def getIndexByPrefix(self, *args):\n return _libsbml.XMLNamespaces_getIndexByPrefix(self, *args)",
"def LookupNamespace(self, prefix):\n ret = libxml2mod.xmlTextReaderLookupNamespace(self._o, prefix)\n return ret",
"def namespace(self):\n return self.__key.namespace()",
"def qname_to_prefixed(qname, namespaces):\n if not qname:\n return qname\n\n namespace = get_namespace(qname)\n for prefix, uri in sorted(filter(lambda x: x[1] == namespace, namespaces.items()), reverse=True):\n if not uri:\n return '%s:%s' % (prefix, qname) if prefix else qname\n elif prefix:\n return qname.replace('{%s}' % uri, '%s:' % prefix)\n else:\n return qname.replace('{%s}' % uri, '')\n else:\n return qname",
"def namespaces(self, psuedo=True):\n if self._namespaces == None:\n result = self.call({'action': 'query',\n 'meta': 'siteinfo',\n 'siprop': 'namespaces'})\n self._namespaces = {}\n self._psuedo_namespaces = {}\n for nsid in result['query']['namespaces']:\n if int(nsid) >= 0:\n self._namespaces[int(nsid)] = \\\n result['query']['namespaces'][nsid]['*']\n else:\n self._psuedo_namespaces[int(nsid)] = \\\n result['query']['namespaces'][nsid]['*']\n if psuedo:\n retval = {}\n retval.update(self._namespaces)\n retval.update(self._psuedo_namespaces)\n return retval\n else:\n return self._namespaces",
"def get_ns_dict(xml):\n \n nss = {} \n def_cnt = 0\n matches = re.findall(r'\\s+xmlns:?(\\w*?)\\s*=\\s*[\\'\"](.*?)[\\'\"]', xml)\n for match in matches:\n prefix = match[0]; ns = match[1]\n if prefix == '':\n def_cnt += 1\n prefix = '_' * def_cnt\n nss[prefix] = ns\n return nss",
"def prefixes(self):\n # a new OntCuries-like object that wraps NamespaceManager\n # and can leverage its trie\n self.namespace_manager\n raise NotImplementedError('yet')"
] | [
"0.74894905",
"0.723001",
"0.7178781",
"0.7092518",
"0.7036941",
"0.6799343",
"0.67157125",
"0.67054284",
"0.66477394",
"0.6579663",
"0.64730036",
"0.646861",
"0.6420464",
"0.64164484",
"0.64045894",
"0.63432497",
"0.63370234",
"0.63120365",
"0.63070714",
"0.62537974",
"0.62380385",
"0.6221364",
"0.621491",
"0.6214344",
"0.62062514",
"0.6199084",
"0.61269677",
"0.6110802",
"0.60916305",
"0.60792"
] | 0.78372264 | 0 |
Clears the prefix dictionary, this needs to be done before creating a new typecode for a message (ie. before, and after creating a new message typecode) | def _resetPrefixDict(self):
self._getPrefixDict().clear() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def reset(self):\n self.footnotes = OrderedDict()\n self.unique_prefix += 1",
"def remove_prefix(self, state_dict, prefix):\n print('remove prefix \\'{}\\''.format(prefix))\n f = lambda x: x.split(prefix, 1)[-1] if x.startswith(prefix) else x # 去除带有prefix的名字\n return {f(key): value for key, value in state_dict.items()}",
"def reset (self):\n self.__inScopeNamespaces = self.__initialScopeNamespaces\n self.__inScopePrefixes = self.__initialScopePrefixes\n self.__mutableInScopeNamespaces = False\n self.__namespacePrefixCounter = 0",
"def clear(self, prefix=PREFIX):\n for key in self.get_keys():\n # delete files in folder by not actual folder\n if key.startswith(prefix) and prefix + \"/\" != key:\n self.delete(key)",
"def clear(self) -> None:\n # Delete these so the .by_class/name values are cleared.\n self['classname'] = 'info_null'\n del self['targetname']\n self._keys.clear()\n # Clear $fixup as well.\n self._fixup = None",
"def clear_headers(self):\r\n\r\n # Remove things from the old dict as well\r\n self.reply_headers.clear()\r\n\r\n self.__reply_header_list[:] = []",
"def clear(self):\n self._pkcache = {}\n self._typecache = defaultdict(dict)\n self.init()",
"def clear(self):\n \n self.node_set.clear()\n self.prefix.clear()\n self.suffix.clear()\n self.num_node = 0\n self.edges = 0",
"def clear_address(self): #DONE\n for component_name in self.__keys:\n self.address[component_name] = Component(component_name, '')",
"def remove_prefix(self, state_dict, prefix):\n return {\n (lambda x: x.split(prefix, 1)[-1] if x.startswith(prefix) else x)(\n key\n ): value\n for key, value in state_dict.items()\n }",
"def test_ipam_prefixes_delete(self):\n pass",
"def clear(self):\n self.root = _NGramMapNode()\n self.size_freqs = dict()\n self.ele_freqs = dict()",
"def clear_keymap(self):\n self.keymap = {}",
"def clear(self):\n dict.clear(self)\n self._sequence = []",
"def clear(self):\n self._map = {}",
"def empty_prefix(self):\r\n raise NotImplementedError()",
"def clear():\r\n CURRENT_REQUEST_CONFIGURATION.data = {}",
"def reset(self):\n self._keyCode = \"\"\n self._keyCodeCount = 0\n self._keyCodeTime = 0.0",
"def empty_prefix():\n if not hasattr(CompletionElement, \"static_empty_prefix\"):\n res = CompletionElement(\"\", None)\n res.mks0 = res.mks1 = res.mks2 = 0\n res.mks0_ = res.mks1_ = res.mks2_ = 0\n CompletionElement.static_empty_prefix = res\n return res\n else:\n return CompletionElement.static_empty_prefix",
"def reset(self):\n self._maps = {}",
"def reset(self):\n self.cardinality = 0\n self.sax_character = 0\n self.wildcardbits = 0",
"def del_prefix(self, index):\n del self.bytes[:index]",
"def clear(self):\n for key in self.keys():\n del self[key]",
"def invalidate_key_group(self, prefix):\r\n self.add(prefix, 0)\r\n self.incr(prefix)",
"def clear(self):\n\n self.size = 0\n\n self.table = [[]] * 100\n\n self.keys_set = set()\n\n self.keys_ref = [[]] * 100",
"def reset(self):\n self.det_link_map = OrderedDict()\n self.id_link_map = OrderedDict()\n self.declarations_table = None\n self.annotations_table = None\n self.num_frames = 0\n self.num_frames_by_uid = {}\n self.num_frames_by_uid_pre_remove = {}",
"def clear(self) :\n self.__dict__ = {}",
"def clear_nastran(self):\n self.eid_map = {}\n self.nid_map = {}\n self.eid_to_nid_map = {}\n self.element_ids = None\n self.node_ids = None",
"def reset(self):\n for k in self.data_keys:\n setattr(self, k, [])\n self.size = 0",
"def clearMap(self):\n for key in self.componentMap.keys():\n del self.componentMap[key][:]"
] | [
"0.661214",
"0.6500577",
"0.6333235",
"0.60439557",
"0.6028406",
"0.60041296",
"0.5998321",
"0.59930265",
"0.59643847",
"0.59600914",
"0.59294903",
"0.5922057",
"0.5883665",
"0.5853302",
"0.58453923",
"0.5836675",
"0.5826535",
"0.5804847",
"0.5762894",
"0.5750483",
"0.5747566",
"0.57125735",
"0.56730354",
"0.5659652",
"0.5644342",
"0.5637284",
"0.5634939",
"0.56243587",
"0.5615651",
"0.5598903"
] | 0.8281239 | 0 |
Returns a typecode instance representing the passed in element. element XMLSchema.ElementDeclaration instance literal literal encoding? local is locally defined? namespaceURI namespace | def _getElement(self, element, literal=False, local=False, namespaceURI=None):
if not element.isElement():
raise TypeError, 'Expecting an ElementDeclaration'
tc = None
elementName = element.getAttribute('name')
tp = element.getTypeDefinition('type')
typeObj = None
if not (tp or element.content):
nsuriType,localName = element.getAttribute('type')
typeClass = self._getTypeClass(nsuriType,localName)
typeObj = typeClass(elementName)
elif not tp:
tp = element.content
if not typeObj:
typeObj = self._getType(tp, elementName, literal, local, namespaceURI)
minOccurs = int(element.getAttribute('minOccurs'))
typeObj.optional = not minOccurs
maxOccurs = element.getAttribute('maxOccurs')
typeObj.repeatable = (maxOccurs == 'unbounded') or (int(maxOccurs) > 1)
return typeObj | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def element_type(self) -> global___Type:",
"def create_class_instance(element, element_id, doc_id):\n xsi_type = get_xsi_type(element)\n element_class = XSI_TYPE_CLASSES[xsi_type]\n return element_class.from_etree(element)",
"def element_type(self):\r\n result = conf.lib.clang_getElementType(self)\r\n if result.kind == TypeKind.INVALID:\r\n raise Exception('Element type not available on this type.')\r\n\r\n return result",
"def _get_element_type(self, element):\n\n if (self._client == None):\n raise ValueError('Specification is not imported yet')\n\n el_type = None\n for value in self._client.wsdl.schema.types.values():\n if (value.name == element):\n if ('Simple' in value.id):\n el_type = 'Simple'\n elif ('Complex' in value.id):\n el_type = 'Complex'\n break\n\n return el_type",
"def elementDecl(self, name, type, content):\n pass",
"def convert_to_semanticsymbol(cls, elem):\r\n if (len(elem) == 0):\r\n return None\r\n\r\n elem_content = io.StringIO(elem) # treat the string as if a file\r\n root = xml.etree.ElementTree.parse(elem_content).getroot()\r\n\r\n return SemanticSymbol.parse_from_mathml(root)",
"def convert_to_layoutsymbol(cls, elem):\r\n if (len(elem) == 0):\r\n return None\r\n\r\n elem_content = io.StringIO(elem) # treat the string as if a file\r\n root = xml.etree.ElementTree.parse(elem_content).getroot()\r\n ## print(\"parse_from_mathml tree: \" + xml.etree.ElementTree.tostring(root,encoding=\"unicode\"))\r\n return LayoutSymbol.parse_from_mathml(root)",
"def parseTerm(element):\n tag, text = element.tag, element.text\n if tag == RESULTS_NS_ET + 'literal':\n if text is None:\n text = ''\n datatype = None\n lang = None\n if element.get('datatype', None):\n datatype = URIRef(element.get('datatype'))\n elif element.get(\"{%s}lang\" % XML_NAMESPACE, None):\n lang = element.get(\"{%s}lang\" % XML_NAMESPACE)\n\n ret = Literal(text, datatype=datatype, lang=lang)\n\n return ret\n elif tag == RESULTS_NS_ET + 'uri':\n return URIRef(text)\n elif tag == RESULTS_NS_ET + 'bnode':\n return BNode(text)\n else:\n raise TypeError(\"unknown binding type %r\" % element)",
"def __call__(self, node):\n # we assume we know what this type is and raise and catch the key error\n # exception if we don't\n try:\n s = self.lut[node.tagName](node, self)\n except KeyError, e:\n raise DeclareError(e[0], node)\n\n # save this, for use with typedef's later\n self.symbols[s.getType()+s.getName()] = s\n\n return s",
"def schema_elem(self) -> ElementType:\n return self.elem",
"def createNodeElement(_session, _segment, _const):\n return createNode(_session, _segment, _const, \"element\")",
"def FromXML(cls, doc, element, default=\"absolute\"):\n return cls(element.get(\"type\", default), NumberDef(element.text))",
"def get_element_type(cls):\r\n return cls._type_name(cls.element_type)",
"def _project_elem(self, elem, mapping):\r\n\t\tif isinstance(elem, basestring):\r\n\t\t\treturn elem\r\n\t\telif isinstance(elem, xmlmodel.XmlElem):\r\n\t\t\tcls = mapping.get_class_for(elem)\r\n\t\t\tif cls is None:\r\n\t\t\t\traise TypeError, 'Could not determine object class for \\'{0}\\' element for node type {1}'.format(elem.tag, type(self))\r\n\t\t\tif not isinstance(cls, NodeClass):\r\n\t\t\t\tif callable(cls):\r\n\t\t\t\t\tcls = cls()\r\n\t\t\t\telse:\r\n\t\t\t\t\traise TypeError, 'Object class for \\'{0}\\' element for node type {1} is of type {2}, should be a NodeClass or a callable'.format(elem.tag, type(self), type(cls))\r\n\t\t\tnode = self._projection_table.get(elem, cls)\r\n\t\t\tif node is None:\r\n\t\t\t\tnode = cls(self._projection_table, elem)\r\n\t\t\t\tself._projection_table.put(elem, cls, node)\r\n\t\t\t\tnode.node_init()\r\n\t\t\treturn node\r\n\t\telse:\r\n\t\t\traise TypeError, 'elem should be a string or an XmlElem'",
"def _getTypeClass(self, namespaceURI, localName):\r\n bti = BaseTypeInterpreter()\r\n simpleTypeClass = bti.get_typeclass(localName, namespaceURI)\r\n return simpleTypeClass",
"def make_key(element_name, element_type, namespace):\n # only distinguish 'element' vs other types\n if element_type in ('complexType', 'simpleType'):\n eltype = 'complexType'\n else:\n eltype = element_type\n if eltype not in ('element', 'complexType', 'simpleType'):\n raise RuntimeError(\"Unknown element type %s = %s\" % (element_name, eltype))\n return (element_name, eltype, namespace)",
"def getElementSymbol(self):\n dataDict = self.__dict__\n result = None\n return result",
"def visit_Declaration(self, node):\n name = self.name_gen.next()\n extend_ops = self.extend_ops\n self.push_name(name)\n base_code = compile(node.base.py_ast, self.filename, mode='eval')\n extend_ops([\n # f_globals = globals()\n (LOAD_GLOBAL, 'globals'),\n (CALL_FUNCTION, 0x0000),\n (STORE_FAST, 'f_globals'),\n\n # eval_ = eval\n (LOAD_GLOBAL, 'eval'),\n (STORE_FAST, 'eval_'),\n\n # foo_cls = eval('Window', toolkit, f_globals)\n # foo = foo_cls.__enaml_call__(identifiers, toolkit)\n (LOAD_FAST, 'eval_'),\n (LOAD_CONST, base_code),\n (LOAD_FAST, 'toolkit'),\n (LOAD_FAST, 'f_globals'),\n (CALL_FUNCTION, 0x0003),\n (LOAD_ATTR, '__enaml_call__'),\n (LOAD_FAST, 'identifiers'),\n (LOAD_FAST, 'toolkit'),\n (CALL_FUNCTION, 0x0002),\n (STORE_FAST, name),\n ])\n\n if node.identifier:\n extend_ops([\n # identifiers['foo'] = foo\n (LOAD_FAST, name),\n (LOAD_FAST, 'identifiers'),\n (LOAD_CONST, node.identifier),\n (STORE_SUBSCR, None),\n ])\n \n visit = self.visit\n for item in node.body:\n visit(item)\n \n extend_ops([\n # return foo\n (LOAD_FAST, name),\n (RETURN_VALUE, None),\n ])\n\n self.pop_name()",
"def newElement(self,cls,attrib={}):\n elem = cls(**attrib)\n self.setFreeId(elem)\n if cls==Subtoken:\n self.subtokens[elem.id] = elem\n elif cls==DepToken:\n self.deptokens[elem.id] = elem\n elif cls==RelToken:\n self.reltokens[elem.id] = elem\n elif cls==DepEntity:\n self.depentities[elem.id] = elem\n elif cls==RelEntity:\n self.relentities[elem.id] = elem\n else:\n # It is caller responsibility to add elements to the graph\n pass\n \n return(elem)",
"def from_element(cls, elem):\n return cls(elem.attrib['pid'], elem.attrib['name'], elem.text, elem.attrib['tags'])",
"def CreateFromDOM (node, default_namespace=None):\r\n if default_namespace is None:\r\n default_namespace = Namespace.fallbackNamespace()\r\n return pyxb.binding.basis.element.AnyCreateFromDOM(node, default_namespace)",
"def _globalElement(self, typeCode, namespaceURI, literal):\r\n if literal:\r\n typeCode.oname = '%(prefix)s:%(name)s xmlns:%(prefix)s=\"%(namespaceURI)s\"' \\\r\n %{'prefix':self._getPrefix(namespaceURI), 'name':typeCode.oname, 'namespaceURI':namespaceURI}",
"def CreateFromDOM (node, default_namespace=None):\n if default_namespace is None:\n default_namespace = Namespace.fallbackNamespace()\n return pyxb.binding.basis.element.AnyCreateFromDOM(node, default_namespace)",
"def CreateFromDOM (node, default_namespace=None):\n if default_namespace is None:\n default_namespace = Namespace.fallbackNamespace()\n return pyxb.binding.basis.element.AnyCreateFromDOM(node, default_namespace)",
"def CreateFromDOM (node, default_namespace=None):\n if default_namespace is None:\n default_namespace = Namespace.fallbackNamespace()\n return pyxb.binding.basis.element.AnyCreateFromDOM(node, default_namespace)",
"def CreateFromDOM (node, default_namespace=None):\n if default_namespace is None:\n default_namespace = Namespace.fallbackNamespace()\n return pyxb.binding.basis.element.AnyCreateFromDOM(node, default_namespace)",
"def CreateFromDOM (node, default_namespace=None):\n if default_namespace is None:\n default_namespace = Namespace.fallbackNamespace()\n return pyxb.binding.basis.element.AnyCreateFromDOM(node, default_namespace)",
"def CreateFromDOM (node, default_namespace=None):\n if default_namespace is None:\n default_namespace = Namespace.fallbackNamespace()\n return pyxb.binding.basis.element.AnyCreateFromDOM(node, default_namespace)",
"def CreateFromDOM (node, default_namespace=None):\n if default_namespace is None:\n default_namespace = Namespace.fallbackNamespace()\n return pyxb.binding.basis.element.AnyCreateFromDOM(node, default_namespace)",
"def CreateFromDOM (node, default_namespace=None):\n if default_namespace is None:\n default_namespace = Namespace.fallbackNamespace()\n return pyxb.binding.basis.element.AnyCreateFromDOM(node, default_namespace)"
] | [
"0.6112495",
"0.5760717",
"0.54377365",
"0.5435102",
"0.54039854",
"0.5333338",
"0.5304633",
"0.5193657",
"0.51740164",
"0.51256275",
"0.51102036",
"0.50124407",
"0.5008194",
"0.4974934",
"0.49698183",
"0.49652553",
"0.49299234",
"0.4924845",
"0.49117178",
"0.48976466",
"0.48903218",
"0.48894882",
"0.48892117",
"0.48892117",
"0.48892117",
"0.48892117",
"0.48892117",
"0.48892117",
"0.48892117",
"0.48892117"
] | 0.6733242 | 0 |
Returns a typecode class representing the type we are looking for. localName name of the type we are looking for. namespaceURI defining XMLSchema targetNamespace. | def _getTypeClass(self, namespaceURI, localName):
bti = BaseTypeInterpreter()
simpleTypeClass = bti.get_typeclass(localName, namespaceURI)
return simpleTypeClass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _find_class(self, class_name: str) -> Type:\n return self.class_resolver.find_class(class_name)",
"def find_class(self, class_name: str) -> Type:\n pass",
"def get_class(self, class_name, output_type=\"PythonClass\"):\n uris = self.cls_converter.get_uri(class_name)\n if type(uris) == list:\n warnings.warn(\"Found more than 1 classes defined within schema using label {}\".format(class_name))\n return [SchemaClass(_item, self, output_type) for _item in uris]\n else:\n return SchemaClass(class_name, self, output_type)",
"def XmlTypeNamespace(self) -> str:",
"def type(self) -> Type[ClassType]:\n return self._type",
"def pyxb_get_type_name(obj_pyxb):\n return pyxb_get_namespace_name(obj_pyxb).split('}')[-1]",
"def astType(cls, source):\n if source == '':\n return cls.BLANK\n if source == \"OPENQASM 2.0;\":\n return cls.DECLARATION_QASM_2_0\n x = QTRegEx.COMMENT.search(source)\n if x:\n return cls.COMMENT\n x = QTRegEx.INCLUDE.search(source)\n if x:\n return cls.INCLUDE\n x = QTRegEx.CTL_2.search(source)\n if x:\n if x.group(1) == 'if':\n return cls.CTL_2\n x = QTRegEx.QREG.search(source)\n if x:\n return cls.QREG\n x = QTRegEx.CREG.search(source)\n if x:\n return cls.CREG\n x = QTRegEx.MEASURE.search(source)\n if x:\n return cls.MEASURE\n x = QTRegEx.BARRIER.search(source)\n if x:\n return cls.BARRIER\n x = QTRegEx.GATE.search(source)\n if x:\n return cls.GATE\n x = QTRegEx.OP.search(source)\n if x:\n return cls.OP\n return cls.UNKNOWN",
"def get_type(self):\n if not self.xmlnode.hasProp(\"type\"):\n self.upgrade()\n return from_utf8(self.xmlnode.prop(\"type\"))",
"def get_type(self, name):\n pkg_name = name.split('.')[0]\n type_name = name.split('.')[1]\n for t in self.types:\n if t.package.name == pkg_name and t.name == type_name:\n return t\n return None",
"def qname(type_):\n # type: (type) -> str\n\n return \"{0.__module__}.{0.__qualname__}\".format(type_)",
"def XmlTypeName(self) -> str:",
"def type(self) -> global___Type:",
"def load_cls(node):\n return node.get_attr(Type).load()",
"def get_type(self) -> str:\n # Note: this name conflicts with existing python builtins\n return self[\"Sns\"][\"Type\"]",
"def GetEntityType(self, namespace_name, typename):\n if namespace_name not in self.type_namespaces_map:\n return None\n return self.type_namespaces_map[namespace_name].GetType(typename)",
"def _declaring_class(obj):\n name = _qualname(obj)\n return name[:name.rfind('.')]",
"def get_type(node):\n # Assume there is only one type inferred\n # If there are multiple types inferred we have to\n # choose which one to pick\n try:\n if len(node.inferred()) > 0:\n ty_infer = node.inferred()[0]\n if isinstance(ty_infer, Module):\n ty = ty_infer.name\n elif isinstance(ty_infer, ClassDef):\n ty = ty_infer.name\n elif isinstance(ty_infer, type(Uninferable)):\n ty = None\n else:\n ty = ty_infer.pytype().replace(\"builtins.\", \"\").lstrip(\".\")\n else:\n ty = None\n except Exception as err:\n ty = None\n\n return ty",
"def typ(rxn_class):\n return rxn_class[0]",
"def create_class_instance(element, element_id, doc_id):\n xsi_type = get_xsi_type(element)\n element_class = XSI_TYPE_CLASSES[xsi_type]\n return element_class.from_etree(element)",
"def get_type_from_string(cls_path: str) -> Type:\n module_name, class_name = cls_path.rsplit(\".\", 1)\n return getattr(import_module(module_name), class_name)",
"def XrefTypeName(typecode):\n assert typecode in _ref_types, \"unknown reference type %d\" % typecode\n return _ref_types[typecode]",
"def GetNamespace(self, namespace_name):\n return self.type_namespaces_map.get(namespace_name, None)",
"def get_typ(self, refobj):\n enum = cmds.getAttr(\"%s.type\" % refobj)\n try:\n return JB_ReftrackNode.types[enum]\n except IndexError:\n raise ValueError(\"The type on the node %s could not be associated with an available type: %s\" %\n (refobj, JB_ReftrackNode.types))",
"def get_class(self, name: str) -> Type:\n if logger.isEnabledFor(logging.DEBUG):\n logger.debug(f'new instance of {name}')\n name = self.default_name if name is None else name\n if logger.isEnabledFor(logging.DEBUG):\n logger.debug(f'creating instance of {name}')\n class_name, params = self._class_name_params(name)\n return self._find_class(class_name)",
"def _PyType_Lookup(space, type, w_name):\n w_type = from_ref(space, rffi.cast(PyObject, type))\n assert isinstance(w_type, W_TypeObject)\n\n if not space.isinstance_w(w_name, space.w_text):\n return None\n name = space.text_w(w_name)\n w_obj = w_type.lookup(name)\n # this assumes that w_obj is not dynamically created, but will stay alive\n # until w_type is modified or dies. Assuming this, we return a borrowed ref\n return w_obj",
"def element_type(self) -> global___Type:",
"def str_to_class(referance_name):\n return getattr(sys.modules[__name__], referance_name)",
"def get_handle_class(handle_class_name: str) -> Type[\"Handle\"]:\n klass = get_type_registry().parse_type_name(handle_class_name)\n return klass",
"def get_type(self, type_name):\n return type_cache.get_type_cache().get_type(type_name, self.target)",
"def type(cls):\n return cls.__name__"
] | [
"0.6334454",
"0.63214386",
"0.621163",
"0.60691124",
"0.5908769",
"0.58564675",
"0.5796145",
"0.5774467",
"0.5729281",
"0.5662747",
"0.5652824",
"0.55890405",
"0.5530385",
"0.5529409",
"0.5522239",
"0.5496328",
"0.54915804",
"0.54823256",
"0.54342365",
"0.5419367",
"0.5410473",
"0.53962886",
"0.5385955",
"0.5384445",
"0.5382902",
"0.5379116",
"0.5373598",
"0.5356813",
"0.5352603",
"0.5352074"
] | 0.7862565 | 0 |
extracts the features used to calculate neural style cost gram_style_features a list of gram matrices calculated from the style layer outputs of the style image content_feature the content layer output of the content image | def generate_features(self):
content_input = self.content_image * 255
style_input = self.style_image * 255
preprocessed_content = tf.keras.applications.vgg19.preprocess_input(
content_input)
preprocessed_style = tf.keras.applications.vgg19.preprocess_input(
style_input)
outputs_content = self.model(preprocessed_content)
outputs_style = self.model(preprocessed_style)
num_style_layers = tf.size(self.style_layers)
style_outputs, content_outputs = (
outputs_style[:num_style_layers],
outputs_content[num_style_layers:])
style_outputs = [self.gram_matrix(
style_output)for style_output in style_outputs]
self.gram_style_features = style_outputs
self.content_feature = content_outputs | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_style_image_features(image):\n ### START CODE HERE ###\n # preprocess the image using the given preprocessing function\n preprocessed_style_image = preprocess_image(image)\n\n # get the outputs from the inception model that you created using inception_model()\n outputs = inception(preprocessed_style_image)\n\n # Get just the style feature layers (exclude the content layer)\n style_outputs = outputs[:NUM_STYLE_LAYERS]\n\n # for each style layer, calculate the gram matrix for that layer and store these results in a list\n gram_style_features = [gram_matrix(style_layer) for style_layer in style_outputs]\n ### END CODE HERE ###\n return gram_style_features",
"def get_feature_representations(model, content_img, style_img):\n # Load our images in \n content = load_and_process_img(content_img)\n style = load_and_process_img(style_img)\n\n # batch compute content and style features\n style_outputs = model(style)\n content_outputs = model(content)\n\n # Get the style and content feature representations from our model\n style_features = [style_layer[0] for style_layer in style_outputs[:num_style_layers]]\n content_features = [content_layer[0] for content_layer in content_outputs[num_style_layers:]]\n\n return style_features, content_features",
"def _get_feature_representations(self, content_and_style_class):\n # Load our images in\n content_image = content_and_style_class.processed_content_image\n style_image = content_and_style_class.processed_style_image\n\n # batch compute content and style features\n style_outputs = self.model(style_image)\n content_outputs = self.model(content_image)\n\n # Get the style and content feature representations from our model\n style_features = [style_layer[0]\n for style_layer in style_outputs[:self.num_style_layers]]\n content_features = [content_layer[0]\n for content_layer in content_outputs[self.num_style_layers:]]\n return style_features, content_features",
"def get_content_image_features(image):\n\n ### START CODE HERE ###\n # preprocess the image\n preprocessed_content_image = preprocess_image(image)\n \n # get the outputs from the inception model\n outputs = inception(preprocessed_content_image)\n\n # get the content layer of the outputs\n content_outputs = outputs[:NUM_CONTENT_LAYERS]\n\n ### END CODE HERE ###\n return content_outputs",
"def all_feature_extractor(imgpath):\r\n\r\n image = cv2.imread(imgpath)\r\n image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\r\n\r\n # Extracting Gabor Features\r\n feature_dict = gabor_feature_extractor(image)\r\n\r\n feature_dict['Original'] = image\r\n\r\n entropy_img = entropy(image, disk(1))\r\n feature_dict['Entropy'] = entropy_img\r\n\r\n gaussian3_img = nd.gaussian_filter(image, sigma=3)\r\n feature_dict['Gaussian3'] = gaussian3_img\r\n\r\n gaussian7_img = nd.gaussian_filter(image, sigma=7)\r\n feature_dict['Gaussian7'] = gaussian7_img\r\n\r\n sobel_img = sobel(image)\r\n feature_dict['Sobel'] = sobel_img\r\n\r\n canny_edge_img = cv2.Canny(image, 100, 200)\r\n feature_dict['Canny'] = canny_edge_img\r\n\r\n robert_edge_img = roberts(image)\r\n feature_dict['Robert'] = robert_edge_img\r\n\r\n scharr_edge = scharr(image)\r\n feature_dict['Scharr'] = scharr_edge\r\n\r\n prewitt_edge = prewitt(image)\r\n feature_dict['Prewitt'] = prewitt_edge\r\n\r\n median_img = nd.median_filter(image, size=3)\r\n feature_dict['Median'] = median_img\r\n\r\n variance_img = nd.generic_filter(image, np.var, size=3)\r\n feature_dict['Variance'] = variance_img\r\n\r\n return feature_dict",
"def extract_features(self):\n self.extract_features_static()\n self.extract_features_dynamic()",
"def get_features(image, features, gparams, fg_size):\n import numpy as np\n from get_hog import get_hog\n\n if len(image.shape) == 2:\n image = image.reshape([image.shape[0], image.shape[1], 1])\n\n if len(image.shape) == 3:\n [im_height, im_width, num_im_chan] = image.shape\n num_images = 1\n else:\n [im_height, im_width, num_im_chan, num_images] = image.shape\n\n tot_feature_dim = features['fparams']['nDim']\n\n if fg_size is None or (not fg_size is True):\n if gparams['cell_size'] == -1:\n fg = get_hog(image, features['fparams'], gparams)\n fg_size = fg.shape\n else:\n fg_size = [np.floor(im_height / gparams['cell_size']), np.floor(im_width / gparams['cell_size'])]\n\n feature_image = get_hog(image, features['fparams'], gparams)\n if num_images == 1:\n feature_image = feature_image.reshape(feature_image.shape[0], feature_image.shape[1],\n feature_image.shape[2], 1)\n\n feature_pixels = np.zeros([int(fg_size[0]), int(fg_size[1]), tot_feature_dim, num_images])\n feature_pixels[:, :, 0::, :] = feature_image\n support_sz = [im_height, im_width]\n\n return feature_pixels, support_sz",
"def extractFeatures(image, feature_list):\n # for multiple features or color features\n #feat_vec = np.array([])\n \n # sift has 128D\n feat_vec = np.empty((0,128))\n n_channels = (image.shape[2] if len(image.shape)==3 else 1)\n \n #img_f32 = image.astype(np.float32)\n\n for feature in feature_list:\n if (feature.strip().lower() == 'dsift'):\n print \"computing dsift (dense rootSift) features\"\n dense = cv2.FeatureDetector_create(\"Dense\")\n sift = cv2.SIFT()\n if n_channels == 1:\n kp = dense.detect(image[:,:])\n # compute kp descriptors\n _,des = sift.compute(image[:,:],kp)\n \n # normalize the descriptors (L1)\n des /= (des.sum(axis=1, keepdims=True) + 1e-7)\n des = np.sqrt(des)\n \n feat_vec = np.vstack((feat_vec, des))\n else:\n for channel in xrange(n_channels):\n kp = dense.detect(image[:,:,channel])\n _,des = sift.compute(image[:,:,channel],kp)\n \n # normalize the descriptors (L1)\n des /= (des.sum(axis=1, keepdims=True) + 1e-7)\n des = np.sqrt(des)\n\n feat_vec = np.vstack((feat_vec, des))\n \n# if (feature.strip().lower() == 'color'):\n# print \"computing color features\"\n# # scale from 0-255 between 0 and 1\n# if args.scale == 1:\n# img_f32 /= 255.\n# \n# f_tmp = img_f32.flatten()\n# feat_vec = np.append(feat_vec, f_tmp)\n else:\n raise Exception(\"Method '%s' is not implemented!\"%(feature)) \n \n return feat_vec",
"def extract_features(self, preprocessed_inputs, init_extraction=False):\n if init_extraction:\n preprocessed_inputs.get_shape().assert_has_rank(4)\n shape_assert = tf.Assert(\n tf.logical_and(tf.greater_equal(tf.shape(preprocessed_inputs)[1], 33),\n tf.greater_equal(tf.shape(preprocessed_inputs)[2], 33)),\n ['image size must at least be 33 in both height and width.'])\n with tf.control_dependencies([shape_assert]):\n with slim.arg_scope(self._conv_hyperparams):\n with tf.variable_scope('MobilenetV1',\n reuse=self._reuse_weights) as scope:\n _, image_features = mobilenet_v1.mobilenet_v1_base(\n preprocessed_inputs,\n final_endpoint='Conv2d_13_pointwise',\n min_depth=self._min_depth,\n depth_multiplier=self._depth_multiplier,\n scope=scope)\n feature_head = image_features['Conv2d_13_pointwise']\n feature_head = slim.conv2d(\n feature_head,\n 512, [3,3],\n stride=1,\n padding='SAME',\n scope='Conv2d_Append_1x1_256'\n )\n feature_head = tf.nn.avg_pool(feature_head, strides=[1,1,1,1], ksize=[1,4,4,1],\n padding='VALID', )\n return feature_head\n else:\n preprocessed_inputs.get_shape().assert_has_rank(4)\n shape_assert = tf.Assert(\n tf.logical_and(tf.greater_equal(tf.shape(preprocessed_inputs)[1], 33),\n tf.greater_equal(tf.shape(preprocessed_inputs)[2], 33)),\n ['image size must at least be 33 in both height and width.'])\n\n\n bottomup_features_names = [ 'Conv2d_11_pointwise', 'Conv2d_13_pointwise']\n num_appended_layers = 0\n #appended_channel_num = [512, 256, 256, 256]\n appended_channel_num = [512]\n\n with tf.control_dependencies([shape_assert]):\n with slim.arg_scope(self._conv_hyperparams):\n with tf.variable_scope('MobilenetV1',\n reuse=self._reuse_weights) as scope:\n _, image_features = mobilenet_v1.mobilenet_v1_base(\n preprocessed_inputs,\n final_endpoint='Conv2d_13_pointwise',\n min_depth=self._min_depth,\n depth_multiplier=self._depth_multiplier,\n scope=scope)\n\n topdown_features = self._topdown_feature_maps(\n image_features,\n bottomup_features_names=bottomup_features_names,\n num_appended_layers = num_appended_layers,\n appended_channel_num = appended_channel_num)\n return topdown_features.values()",
"def getFeatures(c):\n\n\n feature_list = []\n lc_rc_list = []\n w1 = c.getStack(0)\n w2 = c.getStack(1)\n w3 = c.getStack(2)\n b1 = c.getBuffer(0)\n b2 = c.getBuffer(1)\n b3 = c.getBuffer(2)\n for i in [w1, w2]: #12\n lc = c.getLeftChild(i,1) # 1 st left child of the word on the stack.\n rc = c.getRightChild(i,1) # 1 st right child of the word on the stack.\n lc_rc_list.append(lc)\n lc_rc_list.append(rc)\n lc_rc_list.append(c.getLeftChild(i,2)) # 2 nd left child of the word on the stack\n lc_rc_list.append(c.getRightChild(i,2)) # 2 nd right child of the word on the stack\n lc_rc_list.append(c.getLeftChild(lc,1)) # 1 st left child of the left child of the word on the stack\n lc_rc_list.append(c.getRightChild(rc,1)) # 1 st right child of the right child of the word on the stack\n ########################### 18 Word Features ###########################\n for i in [w1,w2,w3,b1,b2,b3]:\n\n feature_list.append(getWordID(c.getWord(i))) # 6 words of the stack and buffer\n\n for i in lc_rc_list: #12 words of the tree\n feature_list.append(getWordID(c.getWord(i)))\n\n ########################### 18 Tag Features ###########################\n for i in [w1,w2,w3,b1,b2,b3]:\n\n feature_list.append(getPosID(c.getPOS(i))) # 6 tags of the owrds on the stack and the buffer\n\n for i in lc_rc_list:\n feature_list.append(getPosID(c.getPOS(i))) #12 tags of the words onthe stack and the buffer.\n ########################### 12 label Features ###########################\n for i in lc_rc_list:\n feature_list.append(getLabelID(c.getLabel(i))) #12 labels of the words on the stack and the buffer.\n\n\n return feature_list",
"def featurize(self, tokens):\n features = []\n \n nrc_hashtag_emotion_features = self.nrc_hashtag_emotion(tokens)\n nrc_affect_intensity_features = self.nrc_affect_intensity(tokens)\n nrc_hashtag_sentiment_lexicon_unigrams_features = self.nrc_hashtag_sentiment_lexicon_unigrams(tokens)\n nrc_hashtag_sentiment_lexicon_bigrams_features = self.nrc_hashtag_sentiment_lexicon_bigrams(tokens)\n sentiment140_unigrams_features = self.sentiment140_unigrams(tokens)\n sentiment140_bigrams_features = self.sentiment140_bigrams(tokens)\n senti_wordnet_features = self.senti_wordnet(tokens)\n bing_lui_sentiment_lexicons_features = self.bing_lui_sentiment_lexicons(tokens)\n nrc_expanded_lexicon_features = self.nrc_10_expanded(tokens)\n negating_word_list_features = self.negating_words_list(tokens)\n total_number_of_words_features = self.get_total_number_of_words(tokens)\n mpqa_subjectivity_lexicon_features = self.mpqa_subjectivity_lexicon(tokens)\n afinn_sentiment_features = self.afinn_sentiment_scores(tokens)\n # senti_strength_features = self.get_sentistrength(\" \".join(tokens))\n\n features.extend(nrc_hashtag_emotion_features.values()) # 10 features\n features.extend(nrc_affect_intensity_features.values()) # 10 features\n features.extend(nrc_hashtag_sentiment_lexicon_unigrams_features.values()) # 4 features\n features.extend(nrc_hashtag_sentiment_lexicon_bigrams_features.values()) # 4 features\n features.extend(sentiment140_unigrams_features.values()) # 4 features \n features.extend(sentiment140_bigrams_features.values()) # 4 features\n features.extend(senti_wordnet_features.values()) # 4 features\n features.extend(bing_lui_sentiment_lexicons_features.values()) # 2 features\n features.extend(nrc_expanded_lexicon_features.values()) # 10 features\n features.extend(negating_word_list_features.values()) # 1 feature\n features.extend(total_number_of_words_features.values()) # 1 feature\n features.extend(mpqa_subjectivity_lexicon_features.values()) # 2 features\n features.extend(afinn_sentiment_features.values()) # 2 features\n # features.extend(senti_strength_features.values()) # 2 features\n\n return features",
"def _extract_features(images,\n model_options,\n weight_decay=0.0001,\n reuse=tf.AUTO_REUSE,\n is_training=False,\n fine_tune_batch_norm=False):\n # feature extractor is a backbone factory\n DEBUG_VARS.raw_image = images\n features, end_points = feature_extractor.extract_features(\n images,\n output_stride=model_options.output_stride,\n multi_grid=model_options.multi_grid,\n model_variant=model_options.model_variant,\n weight_decay=weight_decay,\n reuse=reuse,\n is_training=is_training,\n fine_tune_batch_norm=fine_tune_batch_norm)\n\n # TODO:check\n # DEBUG_VARS.xception_feature = end_points['xception_65/entry_flow/conv1_1/Relu:0']\n DEBUG_VARS.xception_feature = features\n if not model_options.aspp_with_batch_norm:\n return features, end_points\n else:\n batch_norm_params = {\n 'is_training': is_training and fine_tune_batch_norm,\n 'decay': 0.9997,\n 'eps': 1e-5,\n 'affine': True,\n }\n regularize_func = regularizer('l2', weight_decay)\n with tf.variable_scope(tf.get_variable_scope(), reuse=reuse):\n with arg_scope([sep_conv2d], activate=tf.nn.relu, activate_middle=tf.nn.relu, batch_norm=True,\n depthwise_weight_reg=None, pointwise_weight_reg=regularize_func,\n padding='SAME', strides=[1, 1]):\n with arg_scope([conv2d], activate=tf.nn.relu, weight_reg=regularize_func,\n batch_norm=True, padding='SAME', strides=[1, 1]):\n # TODO: ASPP IS IMPLEMENTED HERE! Check Out!\n with arg_scope([batch_norm2d], **batch_norm_params):\n depth = 256\n branch_logits = []\n\n # TODO: ADD IMAGE POOLING HERE\n if model_options.add_image_level_feature:\n # this crop size has been updated to the new scaled one outside, which is the exact size\n # of this model's inputs\n pool_height = scale_dimension(model_options.crop_size[0],\n 1. / model_options.output_stride)\n pool_width = scale_dimension(model_options.crop_size[1],\n 1. / model_options.output_stride)\n # global average pooling, check whether the shape here is 1?\n image_feature = avg_pool2d(\n features, [pool_height, pool_width], [pool_height, pool_width],\n padding='VALID')\n # collapse channels to depth after GAP\n image_feature = conv2d(\n inputs=image_feature, outc=depth, ksize=[1, 1], name=_IMAGE_POOLING_SCOPE)\n # TODO:check\n DEBUG_VARS.image_feature = image_feature\n # reshape it to final feature map shape\n image_feature = tf.image.resize_bilinear(\n image_feature, [pool_height, pool_width], align_corners=True)\n image_feature.set_shape([None, pool_height, pool_width, depth])\n # add image level feature to branch_logits\n branch_logits.append(image_feature)\n\n # Employ a 1x1 convolution.\n branch_logits.append(conv2d(features, outc=depth, ksize=[1, 1], name=_ASPP_SCOPE + str(0)))\n\n if model_options.atrous_rates:\n # Employ 3x3 convolutions with different atrous rates.\n DEBUG_VARS.aspp_features = []\n for i, rate in enumerate(model_options.atrous_rates, 1):\n scope = _ASPP_SCOPE + str(i)\n if model_options.aspp_with_separable_conv:\n aspp_features = sep_conv2d(\n features, outc=depth, ksize=[3, 3], ratios=[rate, rate], name=scope)\n DEBUG_VARS.aspp_features.append(aspp_features)\n else:\n aspp_features = conv2d(\n features, outc=depth, ksize=[3, 3], ratios=[rate, rate], name=scope)\n branch_logits.append(aspp_features)\n\n # Merge branch logits.\n concat_logits = tf.concat(branch_logits, 3)\n concat_logits = conv2d(inputs=concat_logits, outc=depth, ksize=[1, 1],\n name=_CONCAT_PROJECTION_SCOPE)\n DEBUG_VARS.aspp_concat_feature = concat_logits\n concat_logits = drop_out(concat_logits, kp_prob=0.9, is_training=is_training,\n name=_CONCAT_PROJECTION_SCOPE + '_dropout')\n\n return concat_logits, end_points",
"def _extract_features(self, graphs, ai2d_ann, image, layers):\n # To begin with, build the grouping graph, which is provides the layout\n # information on all diagram elements, which can be then picked out in\n # other graphs, if necessary.\n graph = graphs['grouping']\n\n # Check that a graph exists\n try:\n\n # Fetch nodes from the graph\n nodes = graph.nodes(data=True)\n\n except AttributeError:\n\n return None\n\n # Begin extracting the features by getting the diagram image shape\n h, w = image.shape[:2]\n\n # Get the number of pixels in the image\n n_pix = h * w\n\n # Set up a placeholder dictionaries to hold updated node and edge\n # features\n node_features = {}\n edge_features = {}\n\n # Loop over the nodes and their features\n for node, features in nodes:\n\n # Fetch the node type from its features under the key 'kind'\n node_type = features['kind']\n\n # Parse layout annotation\n layout_feats = self._parse_ai2d_layout(ai2d_ann, # annotation\n h, # image height\n w, # image width\n n_pix, # n of pixels\n node_type, # elem type\n node # node id\n )\n\n # Add layout features to the dictionary of updated node features\n node_features[node] = {'features': layout_feats,\n 'kind': self.node_dict['grouping'][node_type]}\n\n # Updated node attributes in the grouping graph using layout\n # features\n nx.set_node_attributes(graph, node_features)\n\n # Calculate features for grouping nodes based on their children. This\n # requires a directed tree graph.\n group_tree = nx.dfs_tree(graph, source=\"I0\")\n\n # Get a list of grouping nodes and image constants in the graph\n groups = [n for n, attr in graph.nodes(data=True) if attr['kind']\n in [self.node_dict['grouping']['imageConsts'],\n self.node_dict['grouping']['group']]]\n\n # Iterate over the nodes in the graph\n for n, attr in graph.nodes(data=True):\n\n # Check if the node type is a group\n if n in groups:\n\n # Get predecessors of the grouping node\n n_preds = nx.dfs_predecessors(group_tree, n)\n\n # Remove groups from the list of predecessor;\n # each group will be processed indepedently\n n_preds = [n for n in n_preds.keys() if n not in groups]\n\n # Create a subgraph consisting of preceding nodes\n n_subgraph = graph.subgraph(n_preds)\n\n # Get layout features for each node\n n_feats = [ad['features'] for n, ad in\n n_subgraph.nodes(data=True)]\n\n # Cast stacked features into a 2D numpy array\n stacked_feats = np.array(n_feats)\n\n # Get average centre point for group by slicing the array\n x_avg = np.average(stacked_feats[:, 0])\n y_avg = np.average(stacked_feats[:, 1])\n\n # Add up their area\n a_sum = np.sum(stacked_feats[:, 2])\n\n # Average the solidity\n s_avg = np.average(stacked_feats[:, 3])\n\n # Concatenate the features\n layout_feats = np.concatenate([[x_avg], [y_avg],\n [a_sum], [s_avg]], axis=0)\n\n # Update group feature dictionary\n upd_group_feats = {n: {'features': layout_feats,\n 'kind': attr['kind']}}\n\n # Update group features\n nx.set_node_attributes(graph, upd_group_feats)\n\n # Add edge types to the grouping layer, as these are not defined in the\n # JSON annotation. To do so, get the edges from the grouping graph.\n edges = graph.edges(data=True)\n\n # Loop over the edges in the graph\n for src, dst, features in edges:\n\n # Add edge type unde key 'kind' to the edge_features dictionary\n edge_features[src, dst] = {'kind': 'grouping'}\n\n # Update edge features in the grouping graph\n nx.set_edge_attributes(graph, edge_features)\n\n # Encode edge features\n self._encode_edges(graph, self.edge_dict['grouping'])\n\n # Update the grouping graph in the graphs dictionary\n graphs['grouping'] = graph\n\n # Now that the grouping layer has been created, check which other\n # annotation layers must be included in the graph-based representation.\n\n # The combination of grouping and connectivity layers is a relatively\n # simple case.\n if layers == \"grouping+connectivity\":\n\n # If a connectivity graph exists, merge it with the grouping graph\n if graphs['connectivity'] is not None:\n\n # Use nx.compose() to combine the grouping and connectivity\n # graphs\n graph = nx.compose(graphs['connectivity'], graphs['grouping'])\n\n # Encode edge type information using numerical labels\n self._encode_edges(graph, self.edge_dict['connectivity'])\n\n # Update the grouping graph\n graphs['grouping'] = graph\n\n # The connectivity layer alone is a bit more complex, as the children of\n # grouping nodes need to be copied over to the connectivity graph.\n if layers == 'connectivity' and graphs['connectivity'] is not None:\n\n # Get the grouping and connectivity graphs\n conn_graph = graphs['connectivity']\n group_graph = graphs['grouping']\n\n # Get a list of nodes in the connectivity graph\n conn_nodes = list(conn_graph.nodes(data=True))\n\n # Get a list of grouping nodes in the connectivity graph\n grouping_nodes = [n for n, attr_dict in conn_nodes\n if attr_dict['kind'] == 'group']\n\n # If grouping nodes are found, get their children and add them to\n # the graph\n if len(grouping_nodes) > 0:\n\n # Create a directed tree graph using depth-first search,\n # starting from the image constant I0.\n group_tree = nx.dfs_tree(group_graph, source=\"I0\")\n\n # Loop over each grouping node\n for gn in grouping_nodes:\n\n # Resolve grouping nodes by adding their children to the\n # connectivity graph\n self._resolve_grouping_node(gn, group_tree,\n group_graph, conn_graph)\n\n # If the connectivity graph does not include grouping nodes, simply\n # copy the node features from the grouping graph.\n n_subgraph = group_graph.subgraph(conn_graph.nodes)\n\n # Add these nodes to the connectivity graph\n conn_graph.add_nodes_from(n_subgraph.nodes(data=True))\n\n # Encode edge type information using numerical labels\n self._encode_edges(conn_graph, self.edge_dict['connectivity'])\n\n # Update the connectivity graph in the graphs dictionary\n graphs['connectivity'] = conn_graph\n\n # Start building the discourse graph by getting node features from the\n # grouping graph.\n if layers == 'discourse':\n\n # Get grouping and discourse graphs\n group_graph = graphs['grouping']\n rst_graph = graphs['discourse']\n\n # Reverse node type dictionary for the grouping layer\n rev_group_dict = {int(v.item()): k for k, v in\n self.node_dict['grouping'].items()}\n\n # Re-encode node types to ensure that node types do not clash with\n # those defined for discourse graph\n upd_node_types = {k: rev_group_dict[int(v['kind'].item())]\n for k, v in group_graph.nodes(data=True)}\n\n # Update node attributes for the grouping graph\n nx.set_node_attributes(group_graph, upd_node_types, 'kind')\n\n # Get the nodes participating in the discourse graph from the\n # grouping graph using the .subgraph() method.\n subgraph = group_graph.subgraph(rst_graph.nodes)\n\n # Add these nodes back to the discourse graph with their features\n # and numerical labels. These will overwrite the original nodes.\n rst_graph.add_nodes_from(subgraph.nodes(data=True))\n\n # Check if discourse graph contains groups or split nodes. Split\n # nodes are used to preserve the tree structure in case a diagram\n # element participates in multiple RST relations.\n for n, attr_dict in rst_graph.copy().nodes(data=True):\n\n # Check if the node is a group\n if 'group' in attr_dict['kind']:\n\n # Create a directed tree graph using depth-first search,\n # starting from the image constant I0.\n group_tree = nx.dfs_tree(group_graph, source=\"I0\")\n\n # Resolve grouping nodes by adding their children to the\n # discourse graph.\n self._resolve_grouping_node(n, group_tree,\n group_graph, rst_graph)\n\n # Check node for the copy_of attribute, which contains a\n # reference to the node which has been split.\n if 'copy_of' in attr_dict.keys():\n\n # Get the identifier of the node in AI2D layout annotation\n n_orig_id = attr_dict['copy_of']\n n_orig_kind = attr_dict['kind']\n\n # Fetch node data from the AI2D layout annotation\n layout_feats = self._parse_ai2d_layout(ai2d_ann,\n h,\n w,\n n_pix,\n n_orig_kind,\n n_orig_id)\n\n # Add updated features to a dictionary\n upd_node_feats = {n: {'features': layout_feats,\n 'kind': n_orig_kind}}\n\n # Update node features in the graph\n nx.set_node_attributes(rst_graph, upd_node_feats)\n\n # Check if the node is a relation\n if 'relation' in attr_dict['kind']:\n\n # Get integer label for RST relation\n rst_int_label = self.node_dict['relations'][attr_dict['rel_name']]\n\n # Get node labels and encode using label binarizer\n rst_label = self._rst_binarizer.transform(rst_int_label)\n\n # Check if label smoothing is requested:\n if self._smooth_labels:\n\n # Cast into float for label smoothing\n rst_label = np.asarray(rst_label, dtype=np.float64)\n\n # Smooth the labels by a factor of 0.1\n rst_label *= (1 - 0.1)\n rst_label += (0.1 / rst_label.shape[1])\n\n # Store encoded information into the updated features dict\n upd_node_feats = {n: {'features': rst_label.flatten()}}\n\n # Set the updated features to nodes in the discourse graph\n nx.set_node_attributes(rst_graph, upd_node_feats)\n\n # Check if a NetworkX graph should be returned\n if self._return_nx:\n\n return rst_graph\n\n # Convert node identifiers to integers. This needs to be performed\n # before creating a heterograph.\n rst_graph = nx.convert_node_labels_to_integers(rst_graph,\n first_label=0)\n\n # Get nodes and convert to NumPy array; get unique nodes; get node\n # type index vector\n nodes = np.asarray([attr['kind'] for n, attr in\n rst_graph.nodes(data=True)]).flatten()\n\n ntypes = np.unique(nodes)\n\n node_ixs = np.array([np.where(ntypes == n) for n in\n np.nditer(nodes)], dtype=np.int64).flatten()\n\n # Do the same for edges\n edges = np.asarray([attr['kind'] for s, t, attr in\n rst_graph.edges(data=True)]).flatten()\n\n etypes = np.unique(edges)\n\n edge_ixs = np.array([np.where(etypes == e) for e in\n np.nditer(edges)], dtype=np.int64).flatten()\n\n # Create DGL graph object from the discourse graph\n g = dgl.from_networkx(rst_graph)\n\n # Assign node and edge types\n g.ndata[dgl.NTYPE] = torch.LongTensor(node_ixs)\n g.edata[dgl.ETYPE] = torch.LongTensor(edge_ixs)\n\n # Create a DGL heterograph from the DGL graph object\n hg = dgl.to_heterogeneous(g, ntypes, etypes)\n\n # Loop over node types in the heterograph\n for ntype in hg.ntypes:\n\n # Get unique node identifiers for this node type; cast to list\n rst_node_ids = hg.nodes[ntype].data[dgl.NID].tolist()\n\n # Loop over RST node identifiers\n features = np.vstack([rst_graph.nodes[node_id]['features']\n for node_id in rst_node_ids])\n\n # Add features to DGL heterograph\n hg.nodes[ntype].data['features'] = torch.from_numpy(features)\n\n # Update the RST graph\n graphs['discourse'] = hg\n\n # Return all graphs\n return graphs",
"def _extract_features(self, all_batches, patch_size, train=True):\n # manually derive basic intensities features\n # takes 20 sec / 1048 images batch on my laptop in 4 cores //\n p = patch_size\n r = 512 // p\n labels = np.empty(0)\n feats = np.empty(0)\n for counter, tmp in enumerate(all_batches):\n # if counter == 2:\n # break\n if train:\n batch_img, batch_label = tmp\n else:\n batch_img = tmp\n batch_label = np.empty(0)\n # just for testing just use 20 batch as training set\n print('processing batch {}'.format(counter))\n t1 = time.time()\n batch_feats = np.asarray(\n parmap.map(\n self._get_features_from_batch_images,\n batch_img,\n r,\n p,\n pm_pbar=True))\n print(time.time() - t1)\n labels = np.concatenate(\n (labels, batch_label)) if labels.size else batch_label\n feats = np.concatenate(\n (feats, batch_feats)) if feats.size else batch_feats\n if train:\n return feats, labels\n else:\n return feats",
"def stylize(network, initial, content, styles, iterations,\n content_weight, style_weight, style_blend_weights, tv_weight,\n learning_rate, print_iterations=None, checkpoint_iterations=None):\n shape = (1,) + content.shape\n style_shapes = [(1,) + style.shape for style in styles]\n content_features = {}\n style_features = [{} for _ in styles]\n\n # compute content features in feedforward mode\n g = tf.Graph()\n with g.as_default(), g.device('/cpu:0'), tf.Session() as sess:\n image = tf.placeholder('float', shape=shape)\n net, mean_pixel = vgg.net(network, image)\n content_pre = np.array([vgg.preprocess(content, mean_pixel)])\n content_features[CONTENT_LAYER] = net[CONTENT_LAYER].eval(\n feed_dict={image: content_pre})\n\n # compute style features in feedforward mode\n for i in range(len(styles)):\n g = tf.Graph()\n with g.as_default(), g.device('/cpu:0'), tf.Session() as sess:\n image = tf.placeholder('float', shape=style_shapes[i])\n net, _ = vgg.net(network, image)\n style_pre = np.array([vgg.preprocess(styles[i], mean_pixel)])\n for layer in STYLE_LAYERS:\n features = net[layer].eval(feed_dict={image: style_pre})\n features = np.reshape(features, (-1, features.shape[3]))\n gram = np.matmul(features.T, features) / features.size\n style_features[i][layer] = gram\n\n # make stylized image using backpropogation\n with tf.Graph().as_default():\n if initial is None:\n noise = np.random.normal(size=shape, scale=np.std(content) * 0.1)\n initial = tf.random_normal(shape) * 0.256\n else:\n initial = np.array([vgg.preprocess(initial, mean_pixel)])\n initial = initial.astype('float32')\n image = tf.Variable(initial)\n net, _ = vgg.net(network, image)\n\n # content loss\n content_loss = content_weight * (2 * tf.nn.l2_loss(\n net[CONTENT_LAYER] - content_features[CONTENT_LAYER]) /\n content_features[CONTENT_LAYER].size)\n # style loss\n style_loss = 0\n for i in range(len(styles)):\n style_losses = []\n for style_layer in STYLE_LAYERS:\n layer = net[style_layer]\n _, height, width, number = map(lambda i: i.value, layer.get_shape())\n size = height * width * number\n feats = tf.reshape(layer, (-1, number))\n gram = tf.matmul(tf.transpose(feats), feats) / size\n style_gram = style_features[i][style_layer]\n style_losses.append(2 * tf.nn.l2_loss(gram - style_gram) / style_gram.size)\n style_loss += style_weight * style_blend_weights[i] * reduce(tf.add, style_losses)\n # total variation denoising\n tv_y_size = _tensor_size(image[:,1:,:,:])\n tv_x_size = _tensor_size(image[:,:,1:,:])\n tv_loss = tv_weight * 2 * (\n (tf.nn.l2_loss(image[:,1:,:,:] - image[:,:shape[1]-1,:,:]) /\n tv_y_size) +\n (tf.nn.l2_loss(image[:,:,1:,:] - image[:,:,:shape[2]-1,:]) /\n tv_x_size))\n # overall loss\n loss = content_loss + style_loss + tv_loss\n\n # optimizer setup\n train_step = tf.train.AdamOptimizer(learning_rate).minimize(loss)\n\n def print_progress(i, last=False):\n stderr.write('Iteration %d/%d\\n' % (i + 1, iterations))\n if last or (print_iterations and i % print_iterations == 0):\n stderr.write(' content loss: %g\\n' % content_loss.eval())\n stderr.write(' style loss: %g\\n' % style_loss.eval())\n stderr.write(' tv loss: %g\\n' % tv_loss.eval())\n stderr.write(' total loss: %g\\n' % loss.eval())\n\n # optimization\n best_loss = float('inf')\n best = None\n with tf.Session() as sess:\n sess.run(tf.initialize_all_variables())\n for i in range(iterations):\n last_step = (i == iterations - 1)\n print_progress(i, last=last_step)\n train_step.run()\n\n if (checkpoint_iterations and i % checkpoint_iterations == 0) or last_step:\n this_loss = loss.eval()\n if this_loss < best_loss:\n best_loss = this_loss\n best = image.eval()\n yield (\n (None if last_step else i),\n vgg.unprocess(best.reshape(shape[1:]), mean_pixel)\n )",
"def extract_features(tlc):\n text = clean_text(tlc['body'])\n fields = dict()\n # add features here #\n fields['Top_comment_word_count'] = len(text.split(' '))\n fields['Top_comment_text'] = text\n\n # Extract time-based features\n def get_day_of_week(text):\n return datetime.datetime.strptime(text, '%Y-%m-%d %H:%M:%S').weekday() + 1\n\n def get_day_of_month(text):\n return datetime.datetime.strptime(text, '%Y-%m-%d %H:%M:%S').day\n\n def get_time_of_day(text):\n return datetime.datetime.strptime(text, '%Y-%m-%d %H:%M:%S').hour\n time_local = time.localtime(tlc['created_utc'])\n time_local = time.strftime(\"%Y-%m-%d %H:%M:%S\", time_local)\n fields['Top_comment_day'] = get_day_of_month(time_local)\n fields['Top_comment_day_of_week'] = get_day_of_week(time_local)\n fields['Top_comment_hour'] = get_time_of_day(time_local)\n\n # Extract gender value\n gp = GenderPerformr()\n probs, _ = gp.predict(tlc['author'])\n # Rescale it from [0,1] to [-1,1]\n fields['Top_comment_author_gender_value'] = 2 * probs - 1\n\n # Extract percentage of mispellings\n check = SpellChecker(\"en_US\")\n tokenizer = get_tokenizer(\"en_US\")\n # Prevent the denominator from 0\n def weird_division(n, d):\n return n / d if d else 0\n\n def get_mispellings_percentage(text):\n mispelling_count = 0\n total_count = 0\n if text == 'nan':\n return total_count\n else:\n check.set_text(text)\n for err in check:\n mispelling_count = mispelling_count + 1\n for w in tokenizer(text):\n total_count = total_count + 1\n value = weird_division(mispelling_count, total_count)\n return value\n fields['Top_comment_mispellings'] = get_mispellings_percentage(text)\n\n # Get politeness, agreement, support scores, and rescale them from [1,5] to [-1,1]\n ar = Agreementr()\n pr = Politenessr()\n sr = Supportr()\n fields['Top_comment_agreement_value'] = 0.5*float(ar.predict([text]))-1.5\n fields['Top_comment_politeness_value'] = 0.5*float(pr.predict([text]))-1.5\n fields['Top_comment_support_value'] = 0.5*float(sr.predict([text]))-1.5\n\n # Get toxicity scores\n KEY = \"yourkey.txt\" # os.getenv(\"GOOGLE_API_KEY\")\n service = discovery.build('commentanalyzer', 'v1alpha1', developerKey=KEY)\n\n def get_results(request_id, response, exception):\n toxicity_scores.append((request_id, response))\n\n toxicity_scores = []\n count = 0\n batch = service.new_batch_http_request(callback=get_results)\n analyze_request = {\n 'comment': {'text': text},\n \"requestedAttributes\": {\n \"TOXICITY\": {},\n \"SEVERE_TOXICITY\": {},\n \"ATTACK_ON_COMMENTER\": {}\n }\n }\n batch.add(service.comments().analyze(body=analyze_request), request_id=str(count))\n batch.execute()\n toxic_score = toxicity_scores[0][1]['attributeScores']['TOXICITY']['summaryScore']['value']\n attack_score = toxicity_scores[0][1]['attributeScores']['ATTACK_ON_COMMENTER']['summaryScore']['value']\n if toxic_score > 0.5:\n fields['Top_comment_untuned_toxicity'] = 1\n else:\n fields['Top_comment_untuned_toxicity'] = 0\n if toxic_score > 0.8 and attack_score > 0.5:\n fields['Top_comment_tuned_toxicity'] = 1\n else:\n fields['Top_comment_tuned_toxicity'] = 0\n # end of feature extractions #\n return fields",
"def extract_features(imgs, color_space='RGB', spatial_size=(32, 32),\n hist_bins=32, orient=9,\n pix_per_cell=8, cell_per_block=2, hog_channel=0,\n spatial_feat=True, hist_feat=True, hog_feat=True):\n # Create a list to append feature vectors to\n features = []\n # Iterate through the list of images\n for file in imgs:\n file_features = []\n # Read in each image, one by one\n image = mpimg.imread(file)\n # Apply colour conversion if other than 'RGB'\n if color_space != 'RGB':\n if color_space == 'HSV':\n feature_image = cv2.cvtColor((image, cv2.COLOR_RGB2HSV))\n elif color_space == 'LUV':\n feature_image = cv2.cvtColor((image, cv2.COLOR_RGB2LUV))\n elif color_space == 'HLS':\n feature_image = cv2.cvtColor((image, cv2.COLOR_RGB2HLS))\n elif color_space == 'YUV':\n feature_image = cv2.cvtColor((image, cv2.COLOR_RGB2YUV))\n elif color_space == 'YCrCb':\n feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2YCrCb)\n # print('converting to YCrCb')\n else:\n feature_image = np.copy(image)\n\n # Compute spatial features if flag is set\n if spatial_feat == True:\n spatial_features = bin_spatial(feature_image, size=spatial_size)\n file_features.append(spatial_features)\n if hist_feat == True:\n # Apply color_hist()\n hist_features = color_hist(feature_image, nbins=hist_bins)\n file_features.append(hist_features)\n if hog_feat == True:\n # Call get_hog_features() with vis=False, feature_vec=True\n if hog_channel == 'ALL':\n hog_features = []\n for channel in range(feature_image.shape[2]):\n hog_features.append(get_hog_features(feature_image[:, :, channel],\n orient, pix_per_cell, cell_per_block,\n vis=False, feature_vec=True))\n # hog_features = np.concatenate(hog_features)\n hog_features = np.ravel(hog_features)\n else:\n hog_features = get_hog_features(feature_image[:, :, hog_channel], orient,\n pix_per_cell, cell_per_block, vis=False, feature_vec=True)\n # Append the new feature vector to the features list\n file_features.append(hog_features)\n features.append(np.concatenate(file_features))\n\n # Return list of feature vectors\n return features",
"def get_style_features(text, nlp):\n doc = nlp(text)\n \n final_data = {f'mpqa_{k}': v for k, v in doc._.total_argument_types.items()}\n final_data['tb_sentiment'] = doc.sentiment\n final_data['tb_subjectivity'] = doc._.subjectivity\n \n # Return avg for emotions\n emotion_data = doc._.emotions\n emotion_data = {k: v / len(doc) for k, v in emotion_data.items()}\n \n final_data.update(emotion_data)\n \n cur_lemmas = list(set(w.lemma_ for w in doc))\n final_data['lemmas'] = cur_lemmas\n \n return final_data",
"def training_features(orientation=8, pix_per_cell=8, cell_per_block=2,\n spatial_size=16, hist_bins=32, color_space='HLS', sample_window=64,\n channels=[0], debug=False):\n def extract(paths, augment=False): # extract and augment\n features = []\n for file in paths:\n image = utils.imread_scaled_unified(file)\n if color_space != ident_config['default_color_space']:\n image_color_converted = cv2.cvtColor(\n image,\n eval('cv2.COLOR_' + ident_config['default_color_space'] + '2' + color_space))\n else:\n image_color_converted = image\n # End of if color_space\n\n image_resized = cv2.resize(image_color_converted, (sample_window, sample_window))\n if augment:\n brightened = utils.brighten(image_resized, bright=1.2)\n flipped = cv2.flip(utils.brighten(image_resized, bright=1.1), 1) # horizontal flip\n to_process = [brightened, flipped]\n else:\n to_process = [image_resized]\n # End of if augment\n\n for x in to_process: # must use square bracket for single element in list to iterate\n # using tuple, it will iterate the single image's row dimension. \n hog_features = utils.get_hog_features_channels(\n x, orientation, pix_per_cell, cell_per_block, channels)\n spatial_features, hist_features = utils.color_features(\n x, spatial_size=spatial_size, hist_bins=hist_bins, channels=channels)\n image_features = np.hstack(\n (spatial_features, hist_features, hog_features)).reshape(1, -1)\n image_features = np.squeeze(image_features)\n # remove the redundant dimension, StandardScaler does not like it\n features.append(image_features)\n # End of for x ...\n # End of for file\n return features\n cars, noncars, cars_to_be_augmented, num_cars, num_noncars = samples_sorted()\n num_samples = 30000 # limit the number of samples to be selected from each group.\n print('num_cars: ', num_cars, ' num_noncars: ', num_noncars, ' max. samples: ', 3*num_samples)\n\n car_features = extract(cars[:min(num_samples, len(cars))], augment=False)\n car_augmented_features = extract(cars_to_be_augmented[:min(num_samples, len(cars_to_be_augmented))], augment=True)\n noncar_features = extract(noncars[:min(num_samples, len(noncars))], augment=False)\n\n # Create an array stack of feature vectors\n X = np.vstack((car_features, car_augmented_features, noncar_features)).astype(np.float64)\n # Fit a per-column scaler\n X_scaler = StandardScaler().fit(X)\n # Apply the scaler to X\n scaled_X = X_scaler.transform(X)\n del X # X, scaled_X consumes much memory, should be released ASAP.\n # Define the labels vector\n y = np.hstack((np.ones(len(car_features) + len(car_augmented_features)), np.zeros(len(noncar_features))))\n\n # Split up data into randomized training and test sets\n rand_state = np.random.randint(0, 100)\n X_train, X_test, y_train, y_test = train_test_split(\n scaled_X, y, test_size=0.1, random_state=rand_state)\n return X_train, X_test, y_train, y_test, X_scaler",
"def extract_feature(network_proto_path,\n network_model_path,\n image_list, data_mean, layer_name, image_as_grey = False):\n net = caffe.Net(network_proto_path, network_model_path, caffe.TEST)\n transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})\n transformer.set_input_scale('data', 1)\n transformer.set_transpose('data', (2, 0, 1))\n blobs = OrderedDict([(k, v.data) for k, v in net.blobs.items()])\n\n shp = blobs[layer_name].shape\n print blobs['data'].shape\n\n batch_size = blobs['data'].shape[0]\n print blobs[layer_name].shape\n\n features_shape = (len(image_list), shp[1])\n features = np.empty(features_shape, dtype='float32', order='C')\n for idx, path in zip(range(features_shape[0]), image_list):\n img = caffe.io.load_image(path, color=False)\n prob = net.forward_all(data=np.asarray([transformer.preprocess('data', img)]))\n print np.shape(prob['prob'])\n blobs = OrderedDict([(k, v.data) for k, v in net.blobs.items()])\n features[idx, :] = blobs[layer_name][0, :].copy()\n print '%d images processed' % (idx + 1)\n features = np.asarray(features, dtype='float32')\n return features",
"def extract_features(self, images: List[np.ndarray]) -> List[np.ndarray]:\n pass",
"def extract_features(img, sigmas, n_features): \n dims = img.shape # dimensions of the image\n \n features = np.zeros((dims[0], dims[1], n_features)) # each feature map has the same size as the input image\n \n # the first feature we use is the pixel intensity in the green channel itself\n img_g = img[:,:,1] #I just assume it follows the RGB convention and not GBR or BGR...\n features[:,:,0] = img_g\n features[:,:,1] = np.sum(img,axis=2) \n \n gabors = get_gabors() \n \n # >>> YOUR CODE STARTS HERE <<<\n i = 2\n# for s in sigmas:\n# gfilters = gauss_filter(s)\n# for gf in gfilters:\n# features[:,:,i] = scipy.signal.fftconvolve(img_g, gf, mode='same') ;i+=1\n for s in sigmas:\n gauss = gauss_filter(s)\n for g in gauss:\n features[:,:,i] = scipy.signal.fftconvolve(img_g, g, mode='same') ;i+=1\n \n for gabor in gabors:\n features[:,:,i] = scipy.signal.fftconvolve(img_g, gabor, mode='same') ;i+=1\n \n \n features[:,:,i] = sobel(img_g, axis=0) ;i+=1\n features[:,:,i] = sobel(img_g, axis=1) ;i+=1\n features[:,:,i] = sobel(img_g, axis=0)+sobel(img_g, axis=1) ;i+=1\n features[:,:,i] = feature.canny(img_g, sigma=0.0) ;i+=1\n features[:,:,i] = feature.canny(img_g, sigma=0, low_threshold=13, high_threshold=50);i+=1\n features[:,:,i] = feature.canny(img_g, sigma=1)\n # >>> YOUR CODE ENDS HERE <<< \n \n return features",
"def output_rule_feature_matrices():\n with open(config.data_path + config.sentiment_seed, 'rb') as input_file:\n sentiment_dict = pickle.load(input_file)\n seed_sentiments = set(sentiment_dict.keys())\n \n for i in range(len(config.file_names)):\n if i is 5:\n print('processing ', config.file_names[i])\n fname = config.file_names[i]\n feature_x, feature_y, opinion_x, opinion_y = text_to_matrix(\n fname, seed_sentiments)\n feature_x = np.transpose(feature_x)\n opinion_x = np.transpose(opinion_x)\n with open('../results/' + fname + '_rule_feature_matrix.pickle', 'wb') as f:\n pickle.dump(feature_x, f)\n with open('../results/' + fname + '_rule_opinion_matrix.pickle', 'wb') as f:\n pickle.dump(opinion_x, f)\n\n with open('../results/' + fname + '_feature_label.pickle', 'wb') as f:\n pickle.dump(feature_y.ravel(), f)\n with open('../results/' + fname + '_opinion_label.pickle', 'wb') as f:\n pickle.dump(opinion_y.ravel(), f)",
"def feature_extract(self, CT_pairs):\n instances = []\n for pair in CT_pairs:\n config = pair[0]\n label = pair[1]\n data = []\n featureset = {}\n \n # for nltk NaiveBayes feature selection stuff when doing MaxEnt decoding parser commit this\n# featureset[\"topOfBuffer\"] = self.token_dict[config.beta.top()]\n# featureset[\"topOfStack\"] = self.token_dict[config.sigma.top()]\n# featureset[\"bufferStackPair\"] = (self.token_dict[config.sigma.top()], self.token_dict[config.beta.top()])\n# featureset[\"topOfBuffer\"] = self.POS_dict[config.beta.top()]\n# featureset[\"topOfStack\"] = self.POS_dict[config.sigma.top()]\n# featureset[\"bufferStackPair\"] = tuple((self.POS_dict[config.sigma.top()], self.POS_dict[config.beta.top()]))\n \n # add the (StackTopPOS,BufferTopPOS,bufferchildren_POS) feature\n #value_set = tuple([self.POS_dict[config.sigma.top()], self.POS_dict[config.beta.top()]] + [self.POS_dict[child] for child in self.getBufferChildren(config.beta.top())])\n #featureset[\"bufferStackbufferChildrenPair\"] = value_set\n \n # for MaxEnt decoding stuff\n # token variants\n data.append((\"topOfBuffer\",self.token_dict[config.beta.top()]))\n data.append((\"topOfStack\",self.token_dict[config.sigma.top()]))\n data.append((\"bufferStackPair\",self.token_dict[config.sigma.top()],self.token_dict[config.beta.top()]))\n #POS variants\n data.append((\"topOfBuffer\",self.POS_dict[config.beta.top()]))\n data.append((\"topOfStack\",self.POS_dict[config.sigma.top()]))\n data.append((\"bufferStackPair\",self.POS_dict[config.sigma.top()],self.POS_dict[config.beta.top()]))\n ins = Instance(label=label, data=data)\n #ins = Instance(label=label, data=featureset)\n instances.append(ins)\n \n return instances",
"def extract_features(self, preprocessed_inputs):\n preprocessed_inputs.get_shape().assert_has_rank(4)\n shape_assert = tf.Assert(\n tf.logical_and(tf.greater_equal(tf.shape(preprocessed_inputs)[1], 33),\n tf.greater_equal(tf.shape(preprocessed_inputs)[2], 33)),\n ['image size must at least be 33 in both height and width.'])\n\n feature_map_layout = {\n 'from_layer': ['conv4', '', '', '', '', '', ''],\n 'layer_depth': [-1, 1024, 1024, 512, 256, 256, 256],\n }\n\n with tf.control_dependencies([shape_assert]):\n with slim.arg_scope(self._conv_hyperparams):\n with tf.variable_scope('vgg_16',\n reuse=self._reuse_weights) as scope:\n net, image_features = vgg.vgg_16_base(\n preprocessed_inputs,\n final_endpoint='pool5',\n trainable=False,\n scope=scope)\n feature_maps = feature_map_generators.multi_resolution_feature_maps(\n feature_map_layout=feature_map_layout,\n depth_multiplier=self._depth_multiplier,\n min_depth=self._min_depth,\n insert_1x1_conv=True,\n image_features=image_features)\n\n return feature_maps.values()",
"def extractFeatures(image, mask, name, binCount=8, features=\"all\"):\n def extractType(func, type_name):\n name = []\n values = []\n feat = func(image,mask, binCount=binCount)\n feat.enableAllFeatures() \n feat.execute()\n for (key,val) in six.iteritems(feat.featureValues):\n name.append(key+f'_{type_name}')\n values.append(val)\n return pd.DataFrame([values], columns=name)\n\n dim = image.GetDimension()\n\n features_array = np.array([\"FO\", f\"S{dim}D\", \"GLCM\", \"GLSZM\", \"GLRLM\", \"NGTDM\", \"GLDM\"])\n features_func = np.array([firstorder.RadiomicsFirstOrder, eval(f\"shape{'2D'*(dim == 2)}.RadiomicsShape{'2D'*(dim==2)}\"), \n glcm.RadiomicsGLCM, glszm.RadiomicsGLSZM, glrlm.RadiomicsGLRLM, ngtdm.RadiomicsNGTDM, \n gldm.RadiomicsGLDM])\n if features != \"all\":\n if features is str:\n print(\"Type wrong. Returning None.\")\n return None\n index = pd.Index(features_array).isin(features)\n features_array = features_array[index]\n features_func = features_func[index]\n\n list_feat = list(map(lambda i: extractType(features_func[i], features_array[i]), np.arange(len(features_array))))\n df = pd.concat([pd.DataFrame([name], columns=[\"Caso\"])] + list_feat, axis=1)\n return df",
"def preprocess(args, g, features):\n # g = dgl.to_homogeneous(g)\n with torch.no_grad():\n g.edata[\"weight\"] = calc_weight(g)\n g.ndata[\"feat_0\"] = features\n for hop in range(1, args['n_hops'] + 1):\n g.update_all(fn.u_mul_e(f\"feat_{hop - 1}\", \"weight\", \"msg\"),\n fn.sum(\"msg\", f\"feat_{hop}\"))\n hop_feat_list = []\n for hop in range(args['n_hops'] + 1):\n hop_feat_list.append(g.ndata.pop(f\"feat_{hop}\"))\n return hop_feat_list",
"def _precompute_image_features(img, layers, shape, save_dir):\n # type: (np.ndarray, Union[Tuple[str], List[str]], Union[Tuple[int], List[int]]) -> Dict[str, np.ndarray]\n features_dict = {}\n g = tf.Graph()\n # Choose to use cpu here because we only need to compute this once and using cpu would provide us more memory\n # than the gpu and therefore allow us to process larger style images using the extra memory. This will not have\n # an effect on the training speed later since the gram matrix size is not related to the size of the image.\n with g.as_default(), g.device('/cpu:0'), tf.Session() as sess:\n with tf.variable_scope(\"discriminator\", reuse=False):\n image = tf.placeholder(tf.uint8, shape=shape)\n image_float = tf.image.convert_image_dtype(image,dtype=tf.float32) * 2 - 1\n net = vgg.net(image_float, trainable=False)\n style_pre = np.array([img])\n style_pre = style_pre.astype(np.uint8)\n\n if '0.12.0' in tf.__version__:\n all_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)\n else:\n all_vars = tf.get_collection(tf.GraphKeys.VARIABLES)\n\n discrim_tvars = [var for var in all_vars if var.name.startswith(\"discriminator\")]\n saver = tf.train.Saver(discrim_tvars)\n\n ckpt = tf.train.get_checkpoint_state(save_dir)\n if ckpt and ckpt.model_checkpoint_path:\n saver.restore(sess, ckpt.model_checkpoint_path)\n else:\n raise AssertionError(\"Cannot load from save directory.\")\n\n var_not_saved = [item for item in all_vars if item not in discrim_tvars]\n sess.run(tf.initialize_variables(var_not_saved))\n\n\n for layer in layers:\n # Calculate and store gramian.\n features = net[layer].eval(feed_dict={image: style_pre})\n features = np.reshape(features, (-1, features.shape[3]))\n gram = np.matmul(features.T, features) / features.size\n features_dict[layer] = gram\n return features_dict",
"def _extract_features(self):\n # print(os.getpid())\n return {n:self._extract_feature(f) for (n,f) in self.features.items()}",
"def get_features(self):\n if self.strokes is False:\n print('Isolating strokes')\n self.isolate_strokes()\n # List of features to use (sm1 omitted because always nan)\n feature_names = ('zrc', 'centroid',\n 'cm0', 'cm1', 'cm2', 'cm3', 'cm4',\n 'sm0', 'sm2')\n features_list = []\n for istroke in self.strokes:\n if not self.isGoodFrame(istroke):\n continue\n ifeature_dic = self.extract_features_from_frame(istroke)\n ifeature_list = []\n for ifeature in feature_names:\n ifeature_list.append(ifeature_dic[ifeature])\n features_list.append(ifeature_list)\n return {'feature_names': feature_names,\n 'feature_table': np.array(features_list)}"
] | [
"0.78582704",
"0.7087954",
"0.6903325",
"0.6433953",
"0.64332956",
"0.64121157",
"0.6316672",
"0.62753826",
"0.61490476",
"0.6146242",
"0.61101943",
"0.60842645",
"0.6055493",
"0.60267216",
"0.60207623",
"0.6020109",
"0.6018783",
"0.6010386",
"0.5994345",
"0.5985522",
"0.5985261",
"0.5937383",
"0.59319365",
"0.59129643",
"0.5911842",
"0.58991164",
"0.589174",
"0.58887327",
"0.58822024",
"0.5846836"
] | 0.78912055 | 0 |
Downsamples spike data to include only the top 1% of frames | def downsample_spikes(S, thres=150, verbose=1):
sum_S = np.sum(S, axis=0)
if verbose > 0:
print(
'Downsampling spike data to {} frames using threshold {}'
.format(np.sum(np.greater(sum_S, thres)), thres))
return S[:, np.greater(sum_S, thres)] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _down_sample(self):\n self._subsamples = self._raw_data.samples[::self._down_sample_factor]\n # Neglects the redundant subsamples in the tails.\n if len(self._subsamples) >= self._number_of_subsamples:\n self._subsamples = self._subsamples[:self._number_of_subsamples]\n if not len(self._subsamples) == self._number_of_subsamples:\n raise WaveformError(\n 'Number of subsample is %r, while %r is expected' % (\n len(self._subsamples), self._number_of_subsamples))\n logging.debug('down-samples: %r', self._subsamples)",
"def downsample_sam(self, factor):",
"def downsample_data(dataset):\n loss = dataset.loc[dataset[TARGET] == 'loss']\n good_gain = dataset.loc[dataset[TARGET] == 'good_gain']\n \n sample_size = min([loss.shape[0], good_gain.shape[0]])\n loss = loss.sample(n=sample_size, random_state=42)\n good_gain = good_gain.sample(n=sample_size, random_state=42)\n \n frames = [loss, good_gain]\n return shuffle(pd.concat(frames), random_state=0)",
"def resample(self):\n pass",
"def subbandwidth(self):",
"def detrend_and_decimate_new(trace,f_sample, params):\n\n logging.info(\"detrending\")\n \n f_new = int(params.f_new)\n print(f_sample,f_new)\n f_sample2= (int(f_sample)//1000)*1000\n print(f_sample2,f_new)\n leng =len(trace)\n\n up = int(f_new/np.gcd(f_sample2,f_new))\n down = int(f_sample2*up/f_new)\n print(up,down)\n factor=down/up\n logging.info(f\"up = {up}, down = {down}\")\n\n # up = int(100_000//f_sample)\n # down = int(100_000//f_new)\n\n\n trace_sub = resample_poly(trace,up,down,padtype='edge')\n dt=1/f_new\n times_sub = np.linspace(0.0,leng/f_sample,len(trace_sub))\n\n ord_filt_len = 2*(int(params.ord_len_ms*f_new/1000)//2)+1\n trace_sub2_ord = order_filter(trace_sub, np.ones(ord_filt_len), ord_filt_len//10) # 10 percentile filter\n\n down_temp = int(f_new//params.f_ord_decimate) \n print(f\"down_temp = {down_temp}\")\n trace_sub2_ord = decimate(trace_sub2_ord, down_temp, ftype='fir')\n trace_sub2_ord = medfilt(trace_sub2_ord) #median filter after decimation\n trace_sub2_ord = resample_poly(trace_sub2_ord, down_temp, 1,padtype='edge')\n\n savgol_len1 = 2*(int(25*f_new/1000)//2)+1\n\n # trace_sub2_ord = savgol_filter(trace_sub2_ord, savgol_len1, 3, mode='interp')\n\n #added to fix length errors, URGH\n last_ind=min(len(trace_sub),len(trace_sub2_ord))\n \n trace_zerod = trace_sub[:last_ind]-trace_sub2_ord[:last_ind]\n \n times_sub = times_sub[:last_ind]\n\n\n MAD = stats.median_absolute_deviation(trace_zerod)\n\n\n\n if params.post_savgol: # False\n savgol_len2 = 2*(int(params.savgol_len_ms*f_new/1000)//2)+1\n trace_zerod = savgol_filter(trace_zerod, savgol_len2, 3, mode='interp') # params.savgol_len=7\n \n trace_zerod = trace_zerod - np.quantile(trace_zerod, params.subs_quantile) # params.subs_quantile=0.25\n logging.info(\"finished detrending\")\n \n # times[]\n\n return trace_zerod, times_sub, MAD , factor",
"def downsample_fluorescence(F, thres=20, verbose=1):\n diff_F = np.diff(F, axis=1)\n sum_F = np.sum(diff_F, axis=0)\n F = F[:,:-1]\n if verbose > 0:\n print(\n 'Downsampling fluorescence data to {} frames using threshold {}'\n .format(np.sum(np.greater(sum_F, thres))))\n \n return F[:, np.greater(sum_F, thres)]",
"def no_overfitting(self):\n\n # Instance with minimun length should be the maximum length\n train_len = []\n [train_len.append(st['Nevents']) for st in self.stats]\n train_len = np.array(train_len)\n max_len = train_len[train_len != 0].min()\n\n # CROPS FEATURE SAMPLES\n onpower_train = pd.DataFrame()\n offpower_train = pd.DataFrame()\n duration_train = pd.DataFrame()\n start = 0\n end = 0\n for ind in np.arange(len(self.stats)):\n if self.stats[ind]['Nevents'] != 0:\n if ind == 0:\n start = 0\n else:\n start = end\n end += self.stats[ind]['Nevents']\n\n aux = self.onpower_train[start:end]\n aux = aux[:max_len]\n onpower_train = pd.concat([onpower_train, aux])\n\n aux = self.offpower_train[start:end]\n aux = aux[:max_len]\n offpower_train = pd.concat([offpower_train, aux])\n\n aux = self.duration_train[start:end]\n aux = aux[:max_len]\n duration_train = pd.concat([duration_train, aux])\n\n # udating stats:\n self.stats[ind]['Nevents'] = max_len\n\n self.onpower_train = onpower_train\n self.offpower_train = offpower_train\n self.duration_train = duration_train\n\n # RE-TRAINS FEATURES:\n self.__retrain(self.onpower, self.onpower_train)\n self.__retrain(self.offpower, self.offpower_train)\n self.__retrain(self.duration, self.duration_train)",
"def get_next_sample(self):",
"def downsampling(inp_img):\n\n\n img = np.array(inp_img)\n f = max(1, np.rint(np.amin(img)/256))\n\n if f > 1:\n lpf = np.ones((f, f))\n f = (1/(f*f))*lpf\n img = cv2.filter2D(img, -1, kernel=f)\n out = np.hstack((img[:, :, 0], img[:, :, 1], img[:, :, 2]))\n\n return out",
"def downsample_frame(self, data_frame, rate='5min'):\n if data_frame is pd.DataFrame:\n data_frame.resample(rate, how='mean', closed='right')\n pass",
"def downsample(state):\n return state[::2, ::2, :]",
"def downsample(data, downsampling, summary=np.sum, allow_trim=False):\n data = np.asarray(data)\n if data.ndim != 2:\n raise ValueError('Data must be 2 dimensional.')\n ny, nx = data.shape\n if not allow_trim and ((nx % downsampling) or (ny % downsampling)):\n raise ValueError('Data shape {0} does not evenly divide downsampling={1} and allow_trim is False.'\n .format((ny, nx), downsampling))\n ny //= downsampling\n nx //= downsampling\n shape = (ny, nx, downsampling, downsampling)\n strides = (downsampling * data.strides[0], downsampling * data.strides[1]) + data.strides\n blocks = np.lib.stride_tricks.as_strided(\n data[:downsampling * ny, :downsampling * nx], shape=shape, strides=strides)\n return summary(blocks, axis=(2, 3))",
"def oversampling_experiment():\n model, history = train.train(BATCH_SIZE, EPOCHS, print_model_summary=True,\n oversampling=True)\n evaluate_both(model)\n plotting.plot_metrics(history)",
"def downsample_pupil(df, pup_col, time_col, bin_size, method='median'): \r\n \r\n if method not in ['mean','median']:\r\n raise Exception(\"Invalid sampling method. Please use 'mean' or 'median'.\")\r\n \r\n # convert the microsecond timestamp to datetime timestamp\r\n df[time_col] = pd.to_datetime(df[time_col], unit = 'ms')\r\n \r\n # resampling on the datetime timestamp\r\n df[pup_col+'_resamp'] = df[pup_col]\r\n resampler = df[[time_col] + [pup_col+'_resamp']].resample(bin_size, on=time_col, loffset='0ms',label='left')\r\n \r\n # decide which method to calculate results\r\n if method == 'median':\r\n resampled_samps = resampler.median()\r\n elif method == 'mean':\r\n resampled_samps = resampler.mean()\r\n \r\n # convert the datetime timestamp back to microsecond timestamp\r\n resampled_samps.index = (resampled_samps.index - pd.Timestamp(\"1970-01-01\")) // pd.Timedelta('1ms')\r\n \r\n return resampled_samps",
"def downsample(self, number):\n for num, ss in enumerate(self.samples):\n self.samples[num], self.extra_kwargs[num] = _downsample(\n ss, number, extra_kwargs=self.extra_kwargs[num]\n )",
"def down_sampling(record, down_sampling_factor=16):\n\n if len(record.shape) == 1:\n return record[slice(0, record.shape[0], down_sampling_factor)]\n else:\n row_idx = np.arange(record.shape[0])\n col_idx = np.arange(0, record.shape[1], down_sampling_factor)\n\n return record[np.ix_(row_idx, col_idx)]",
"def _compute_quantized_subsamples(self):\n self._down_sample()\n self._quantize()",
"def subsample():\n\n nwav = 872\n nrow = 1600\n ncol = 1560\n\n fpath = os.path.join(HYSS_ENVIRON['HYSS_WRITE'],'raw_binned/nrow1600')\n fnames = ['full_frame_20ms_faster_VNIR_1600.raw',\n 'full_frame_20ms_faster_VNIR_1600_flat.raw']\n\n for fname in fnames:\n print(\"SUBSAMPLE: reading data from {0}\".format(fpath))\n print(\"SUBSAMPLE: {0}\".format(fname))\n data = np.fromfile(os.path.join(fpath,fname)).reshape(nwav,nrow,ncol)\n\n for fac in [2,4,8]:\n trow = '{0:04}'.format(1600/fac)\n opath = os.path.join(HYSS_ENVIRON['HYSS_WRITE'],'raw_subsample',\n 'nrow'+trow)\n oname = fname.replace('1600',trow)\n\n print(\"SUBSAMPLE: writing subsampled data to {0}\".format(opath))\n print(\"SUBSAMPLE: {0}\".format(oname))\n data[:,::fac,::fac].tofile(open(os.path.join(opath,oname),'wb'))\n\n return",
"def downsample(self, number):\n self.samples, self.extra_kwargs = _downsample(\n self.samples, number, extra_kwargs=self.extra_kwargs\n )",
"def _subsample_frames(self, video_clip_frames):\n subsampled_frames = []\n current_ix = 0\n step_size = len(video_clip_frames) / float(config.RGB_N_FRAMES)\n for _ in range(config.RGB_N_FRAMES):\n frame = video_clip_frames[int(current_ix)]\n subsampled_frames.append(frame)\n current_ix += step_size\n\n return np.array(subsampled_frames)",
"def downsample(time_series,res = '0.2S'):\n\n Nvalues = len(time_series.index)\n samplerate = 1/ ((time_series.timestamp[Nvalues-1] - time_series.timestamp[0]) / Nvalues)\n timestart = dt.datetime(1970, 1, 1, 0, 0, 0, 0) #dt.datetime.now()\n start = pd.Timestamp(timestart)\n end = pd.Timestamp(timestart + dt.timedelta(seconds=Nvalues/samplerate))\n t = np.linspace(start.value, end.value, Nvalues)\n t = pd.to_datetime(t)\n time_series['time'] = t\n time_series = time_series.resample(res,on='time').mean() # downsample to 0.2 second intervals\n time_series.index.name = 'time'\n time_series.reset_index(inplace=True)\n return time_series",
"def test_downsample_raises_error_greater_output_fps():\n with pytest.raises(\n ValueError,\n match=r'Output FPS can\\'t be greater than input FPS'):\n downsample(np.arange(10), 1, 5)",
"def samples_keep(self,index):\n\n\t\tif isinstance(index, (int, long)): index = range(self.samples)[-index:]\n\n\t\tself.sampled_topics = np.take(self.sampled_topics,index,axis=0)\n\t\tself.tt = np.take(self.tt,index,axis=2)\n\t\tself.dt = np.take(self.dt,index,axis=2)\n\n\t\tself.samples = len(index)",
"def subsample(y, limit=256, factor=2):\n if len(y) > limit:\n return y[::factor].reset_index(drop=True)\n return y",
"def upsample(x):\n return F.interpolate(x, scale_factor=2, mode=\"nearest\")",
"def _fract_whole_data(self) :\n if self._fract_data == -1 :\n pass\n else :\n rows = self._df.shape[0]\n fract_rows = int(rows*self._fract_data)\n self._df = self._df.sample(fract_rows).copy()",
"def subsample(df, freq=2):\n df = df.iloc[::freq, :]\n\n return df",
"def downsampling(x_train, y_train, random_state=42):\n sampling = pd.concat([x_train, y_train], axis=1)\n big = sampling[y_train == y_train.value_counts().index[0]]\n small = sampling[y_train == y_train.value_counts().index[1]]\n\n downsampled = resample(big,\n replace=False,\n n_samples=len(small),\n random_state=random_state)\n downsampled = pd.concat([downsampled, small])\n x_train_bal = downsampled[downsampled.columns.values[:-1]]\n y_train_bal = downsampled[downsampled.columns.values[-1]]\n\n del sampling, big, small, downsampled\n return x_train_bal, y_train_bal",
"def sample_generator(self, data, index):\r\n out = []\r\n frames = data[\"video\"]\r\n for speed_idx, speed in enumerate(self.speed_set):\r\n # generate all the samples according to the speed set\r\n num_input_frames, h, w, c = frames.shape\r\n frame_idx = random.randint(0, num_input_frames-1)\r\n selected_frame = frames[frame_idx] # H, W, C\r\n\r\n # standardize the frame size\r\n if self.cfg.PRETRAIN.FRAME_SIZE_STANDARDIZE_ENABLE: \r\n selected_frame = self.frame_size_standardize(selected_frame)\r\n \r\n # generate the sample index \r\n h, w, c = selected_frame.shape\r\n speed_x, speed_y = speed\r\n start_x, end_x = self.get_crop_params(speed_x/(self.num_speeds//2), w)\r\n start_y, end_y = self.get_crop_params(speed_y/(self.num_speeds//2), h)\r\n intermediate_x = (torch.linspace(start_x, end_x, self.num_frames).long()).clamp_(0, w-self.crop_size)\r\n intermediate_y = (torch.linspace(start_y, end_y, self.num_frames).long()).clamp_(0, h-self.crop_size)\r\n \r\n frames_out = torch.empty(\r\n self.num_frames, self.crop_size, self.crop_size, c, device=frames.device, dtype=frames.dtype\r\n )\r\n\r\n for t in range(self.num_frames):\r\n frames_out[t] = selected_frame[\r\n intermediate_y[t]:intermediate_y[t]+self.crop_size, intermediate_x[t]:intermediate_x[t]+self.crop_size, :\r\n ]\r\n\r\n # performs augmentation on the generated image sequence\r\n if self.transform is not None:\r\n frames_out = self.transform(frames_out)\r\n \r\n # applies static mask\r\n if self.static_mask_enable:\r\n frames_out = self.static_mask(frames_out)\r\n out.append(frames_out)\r\n out = torch.stack(out)\r\n data[\"video\"] = out\r\n return data"
] | [
"0.6455042",
"0.61035407",
"0.59346664",
"0.58270806",
"0.58128667",
"0.57843024",
"0.5715292",
"0.57149154",
"0.57001853",
"0.5676207",
"0.5668947",
"0.559317",
"0.5588073",
"0.5547957",
"0.5523909",
"0.5510277",
"0.5501954",
"0.54938704",
"0.54915655",
"0.5463476",
"0.543445",
"0.5414946",
"0.5383452",
"0.537052",
"0.53490114",
"0.5330503",
"0.5323642",
"0.5315958",
"0.53073895",
"0.530439"
] | 0.66121924 | 0 |
Downsamples fluorescence data to include approximately the top 1% of frames based on total increase in activity. Currently the threshold is set for 1000 neurons. Original code from | def downsample_fluorescence(F, thres=20, verbose=1):
diff_F = np.diff(F, axis=1)
sum_F = np.sum(diff_F, axis=0)
F = F[:,:-1]
if verbose > 0:
print(
'Downsampling fluorescence data to {} frames using threshold {}'
.format(np.sum(np.greater(sum_F, thres))))
return F[:, np.greater(sum_F, thres)] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def no_overfitting(self):\n\n # Instance with minimun length should be the maximum length\n train_len = []\n [train_len.append(st['Nevents']) for st in self.stats]\n train_len = np.array(train_len)\n max_len = train_len[train_len != 0].min()\n\n # CROPS FEATURE SAMPLES\n onpower_train = pd.DataFrame()\n offpower_train = pd.DataFrame()\n duration_train = pd.DataFrame()\n start = 0\n end = 0\n for ind in np.arange(len(self.stats)):\n if self.stats[ind]['Nevents'] != 0:\n if ind == 0:\n start = 0\n else:\n start = end\n end += self.stats[ind]['Nevents']\n\n aux = self.onpower_train[start:end]\n aux = aux[:max_len]\n onpower_train = pd.concat([onpower_train, aux])\n\n aux = self.offpower_train[start:end]\n aux = aux[:max_len]\n offpower_train = pd.concat([offpower_train, aux])\n\n aux = self.duration_train[start:end]\n aux = aux[:max_len]\n duration_train = pd.concat([duration_train, aux])\n\n # udating stats:\n self.stats[ind]['Nevents'] = max_len\n\n self.onpower_train = onpower_train\n self.offpower_train = offpower_train\n self.duration_train = duration_train\n\n # RE-TRAINS FEATURES:\n self.__retrain(self.onpower, self.onpower_train)\n self.__retrain(self.offpower, self.offpower_train)\n self.__retrain(self.duration, self.duration_train)",
"def determine_silence_threshold(self):\n loudest_sound_cohort_size = 0.2 # Top 20% are counted in the loudest sound group.\n silence_threshold_multiplier = 1.6 # Sounds must be at least 1.6x as loud as the loudest silence\n\n rospy.loginfo(\"Getting intensity values from mic.\")\n self.open_stream()\n tss = self.total_silence_samples\n values = [math.sqrt(abs(audioop.avg(self.stream.read(self.chunk_size), self.audio_format_width)))\n for _ in range(tss)]\n values = sorted(values, reverse=True)\n sum_of_loudest_sounds = sum(values[:int(tss * loudest_sound_cohort_size)])\n total_samples_in_cohort = int(tss * loudest_sound_cohort_size)\n average_of_loudest_sounds = sum_of_loudest_sounds / total_samples_in_cohort\n rospy.loginfo(\"Average audio intensity is %d\" % average_of_loudest_sounds)\n self.silence_threshold = average_of_loudest_sounds * silence_threshold_multiplier\n rospy.loginfo(\"Silence threshold set to %d \" % self.silence_threshold)\n self.close_stream()",
"def _down_sample(self):\n self._subsamples = self._raw_data.samples[::self._down_sample_factor]\n # Neglects the redundant subsamples in the tails.\n if len(self._subsamples) >= self._number_of_subsamples:\n self._subsamples = self._subsamples[:self._number_of_subsamples]\n if not len(self._subsamples) == self._number_of_subsamples:\n raise WaveformError(\n 'Number of subsample is %r, while %r is expected' % (\n len(self._subsamples), self._number_of_subsamples))\n logging.debug('down-samples: %r', self._subsamples)",
"def main(threshold=100, normed_length=200):\n base_loc = DATA_DIR + '/raw/human_activity/RawData'\n labels_file_data = read_file_data(base_loc + '/labels.txt', int)\n\n X = []\n y = []\n\n last_experiment_number = None\n last_user_number = None\n for experiment_number, user_number, activity_number, start, end in labels_file_data:\n # There are 12 classes:\n # 1 Walking\n # 2 Walking upstairs\n # 3 Walking downstairs\n # 4 Sitting\n # 5 Standing\n # 6 Lieing down\n # 7 Standing to siting\n # 8 Sitting to standing\n # 9 Siting to lieing down\n # 10 Lieing down to sitting\n # 11 Standing to lieing down\n # 12 Lieing down to standing\n # But some have very few samples, and without them it's basically a balanced classification problem.\n if activity_number > 6:\n continue\n\n end += 1\n if experiment_number != last_experiment_number or user_number != last_user_number:\n acc_filename = 'acc_exp{:02}_user{:02}.txt'.format(experiment_number, user_number)\n gyro_filename = 'gyro_exp{:02}_user{:02}.txt'.format(experiment_number, user_number)\n acc_file_data = torch.tensor(read_file_data(base_loc + '/' + acc_filename, float))\n gyro_file_data = torch.tensor(read_file_data(base_loc + '/' + gyro_filename, float))\n # Is a tensor of shape (length, channels=6)\n both_data = torch.cat([acc_file_data, gyro_file_data], dim=1)\n last_experiment_number = experiment_number\n last_user_number = user_number\n\n # minimum length is 74\n # maximum length is 2032\n # I think what they did in the original dataset was split it up into pieces roughly 74 steps long. It's not\n # obvious that it's going to be that easy to learn from short series so here we split it up into pieces\n # 'normed_length' steps long, and apply fill-forward padding to the end if it's still at least of length\n # 'threshold'' and discard it if it's shorter. This doesn't affect much of our dataset.\n for start_ in range(start, end, normed_length):\n start_plus = start_ + normed_length\n if start_plus > end:\n too_short = True\n if start_plus - end < threshold:\n continue # skip data\n end_ = min(start_plus, end)\n else:\n too_short = False\n end_ = start_plus\n Xi = both_data[start_:end_]\n if too_short:\n Xi = torch.cat([Xi, Xi[-1].repeat(start_plus - end, 1)], dim=0)\n X.append(Xi)\n y.append(activity_number - 1)\n X = torch.stack(X, dim=0)\n y = torch.tensor(y)\n return X, y",
"def downsample_data(dataset):\n loss = dataset.loc[dataset[TARGET] == 'loss']\n good_gain = dataset.loc[dataset[TARGET] == 'good_gain']\n \n sample_size = min([loss.shape[0], good_gain.shape[0]])\n loss = loss.sample(n=sample_size, random_state=42)\n good_gain = good_gain.sample(n=sample_size, random_state=42)\n \n frames = [loss, good_gain]\n return shuffle(pd.concat(frames), random_state=0)",
"def detrend_and_decimate_new(trace,f_sample, params):\n\n logging.info(\"detrending\")\n \n f_new = int(params.f_new)\n print(f_sample,f_new)\n f_sample2= (int(f_sample)//1000)*1000\n print(f_sample2,f_new)\n leng =len(trace)\n\n up = int(f_new/np.gcd(f_sample2,f_new))\n down = int(f_sample2*up/f_new)\n print(up,down)\n factor=down/up\n logging.info(f\"up = {up}, down = {down}\")\n\n # up = int(100_000//f_sample)\n # down = int(100_000//f_new)\n\n\n trace_sub = resample_poly(trace,up,down,padtype='edge')\n dt=1/f_new\n times_sub = np.linspace(0.0,leng/f_sample,len(trace_sub))\n\n ord_filt_len = 2*(int(params.ord_len_ms*f_new/1000)//2)+1\n trace_sub2_ord = order_filter(trace_sub, np.ones(ord_filt_len), ord_filt_len//10) # 10 percentile filter\n\n down_temp = int(f_new//params.f_ord_decimate) \n print(f\"down_temp = {down_temp}\")\n trace_sub2_ord = decimate(trace_sub2_ord, down_temp, ftype='fir')\n trace_sub2_ord = medfilt(trace_sub2_ord) #median filter after decimation\n trace_sub2_ord = resample_poly(trace_sub2_ord, down_temp, 1,padtype='edge')\n\n savgol_len1 = 2*(int(25*f_new/1000)//2)+1\n\n # trace_sub2_ord = savgol_filter(trace_sub2_ord, savgol_len1, 3, mode='interp')\n\n #added to fix length errors, URGH\n last_ind=min(len(trace_sub),len(trace_sub2_ord))\n \n trace_zerod = trace_sub[:last_ind]-trace_sub2_ord[:last_ind]\n \n times_sub = times_sub[:last_ind]\n\n\n MAD = stats.median_absolute_deviation(trace_zerod)\n\n\n\n if params.post_savgol: # False\n savgol_len2 = 2*(int(params.savgol_len_ms*f_new/1000)//2)+1\n trace_zerod = savgol_filter(trace_zerod, savgol_len2, 3, mode='interp') # params.savgol_len=7\n \n trace_zerod = trace_zerod - np.quantile(trace_zerod, params.subs_quantile) # params.subs_quantile=0.25\n logging.info(\"finished detrending\")\n \n # times[]\n\n return trace_zerod, times_sub, MAD , factor",
"def get_silence_threshold(sound, lower_quantile):\n soundint = sound.to_intensity()\n max_intensity = call(soundint, 'Get quantile', 0.0, 0.0, 1)\n sil_intensity = call(soundint, 'Get quantile', 0.0, 0.0, lower_quantile)\n return sil_intensity - max_intensity",
"def remove_silence(y, threshold=-50, nb_sample=4096): \r\n from scipy.ndimage.filters import maximum_filter1d \r\n \r\n if np.max(y) != 1.0:\r\n raise ValueError(\"Input signal is expected to be normalised to 1\")\r\n \r\n # Ignore log(0) warnings\r\n np.seterr(divide = 'ignore') \r\n y_db = 20 * np.log10(np.abs(y))\r\n np.seterr(divide = 'warn') \r\n \r\n y_envelope = maximum_filter1d(y_db, nb_sample) \r\n mask = y_envelope >= threshold\r\n y_out = y[mask]\r\n \r\n return(y_out)",
"def gen_sensor_reward(self,MAX_UNCERTAINTY,window_size,window_lag):\n\n for i in range(0, len(self.tracker_object.tracks)):\n unormalized_uncertainty = np.sum(self.tracker_object.tracks[i].p_k_k.diagonal())\n self.uncertainty[i].append((1.0 / MAX_UNCERTAINTY) * unormalized_uncertainty)\n\n\n this_uncertainty = []\n [this_uncertainty.append(self.uncertainty[x][-1]) for x in range(0, len(self.tracker_object.tracks))]\n\n self.avg_uncertainty.append(np.mean(this_uncertainty))\n\n if len(self.avg_uncertainty) < window_size + window_lag:\n self.reward.append(0)\n else:\n current_avg = np.mean(self.avg_uncertainty[-window_size:])\n prev_avg = np.mean(self.avg_uncertainty[-(window_size + window_lag):-window_lag])\n if current_avg < prev_avg or self.avg_uncertainty[-1] < .1:\n # if current_avg < prev_avg:\n self.reward.append(1)\n else:\n self.reward.append(0)",
"def discard_samples(chain_length):\n return min(chain_length / 10, MAX_GEN_DISCARD)",
"def test_flmb(self):\n self.create_sample_data_set_dir(\"node10p1.dat\", TELEM_DIR, \"node59p1.dat\")\n self.assert_initialize()\n result = self.data_subscribers.get_samples(DataParticleType.METADATA_TELEMETERED,1,30)\n result = self.data_subscribers.get_samples(DataParticleType.SAMPLE_TELEMETERED,5,30)",
"def downsample_spikes(S, thres=150, verbose=1):\n sum_S = np.sum(S, axis=0)\n if verbose > 0:\n print(\n 'Downsampling spike data to {} frames using threshold {}'\n .format(np.sum(np.greater(sum_S, thres)), thres))\n \n return S[:, np.greater(sum_S, thres)]",
"def filter_samples(df, normal_samples, damaged_samples, assembly_samples, missing_samples, damaged_thread_samples,\n loosening_samples, move_samples):\n # Count the sample types\n count_df = df.groupby(['sample_nr'])['label'].median()\n unique, counts = np.unique(count_df, return_counts=True)\n labels_count_dict = {A: B for A, B in zip(unique, counts)}\n\n # Take only the amount of samples that's needed to fill the requirement\n sampled_list = []\n for label in labels_count_dict:\n subindex = list(np.unique(df.loc[df['label'] == label].index.get_level_values(0)))\n\n if label == 0:\n to_take = normal_samples * labels_count_dict[0]\n elif label == 1:\n to_take = damaged_samples * labels_count_dict[1]\n elif label == 2:\n to_take = assembly_samples * labels_count_dict[2]\n elif label == 3:\n to_take = missing_samples * labels_count_dict[3]\n elif label == 4:\n to_take = damaged_thread_samples * labels_count_dict[4]\n elif label == 5:\n to_take = loosening_samples * labels_count_dict[5]\n elif label == 6:\n to_take = move_samples * labels_count_dict[6]\n\n sample_ids = np.random.choice(subindex, int(to_take), replace=False)\n sampled_df = df[df.index.get_level_values(0).isin(sample_ids)]\n sampled_list.append(sampled_df)\n\n taken_data = pd.concat(sampled_list, ignore_index=False).sort_values(['sample_nr', 'event'])\n\n # Reset the sample numbers\n taken_data = taken_data.reset_index()\n taken_data['sample_nr'] = (taken_data['sample_nr'] != taken_data['sample_nr'].shift(1)).astype(int).cumsum()\n taken_data['event'] = taken_data.index\n taken_data = taken_data.set_index(['sample_nr', 'event'])\n taken_data = taken_data.reset_index('event', drop=True)\n taken_data = taken_data.set_index(taken_data.groupby(level=0).cumcount().rename('event'), append=True)\n taken_data = taken_data.sort_index()\n\n return taken_data",
"def filter_by_freq(self, low=0.5, high=40):\n self.epochs.load_data()\n self.epochs.filter(l_freq=low, h_freq=high, picks = 'all')\n return self.epochs",
"def main():\n\n\n ## Groups showing similar noise profile\n #grp1 = [ 1, 4, 5, 8, 9 ]\n #grp2 = [ 18, 19, 22, 23, 30, 31 ]\n grp1 = [ 0, 1, 6, 7, 4, 5 ]\n grp2 = [ 12, 13, 16, 17, 18, 19 ]\n #grp3 = [ 18, 19, 22, 23, 26, 27 ]\n with tb.open_file(sys.argv[1], 'r') as dataF:\n\n npm = len(dataF.root.Sensors.DataPMT)#len(dataF.root.RD.pmtrwf[0])\n nevt = len(dataF.root.RD.pmtrwf)\n\n ## Filter definition\n fSample = 40E6\n freqLPF = 100E3\n freqLPFd = 2*freqLPF / fSample\n b, a = signal.butter(1, freqLPFd, 'low', analog=False)\n ##\n fig, axes = plt.subplots(nrows=3, ncols=2, figsize=(20,6))\n #fig.tight_layout()\n fig.show()\n wf_len = len(dataF.root.RD.pmtrwf[0][0])\n if len(sys.argv) > 3:\n wf_len = wf_len/2+1 \n elif len(sys.argv) == 3:\n g1_first = np.zeros(wf_len, np.float64)\n g2_first = np.zeros(wf_len, np.float64)\n g3_first = np.zeros(wf_len, np.float64)\n mean_first = np.zeros(wf_len, np.float64)\n ##\n for ievt in range(nevt):\n ## clear the axies\n for ax in axes.flatten():\n ax.cla()\n plt_frq = np.zeros(wf_len, np.float64)\n fwf_mean = np.zeros(wf_len, np.float64)\n wf_mean = np.zeros(wf_len, np.float64) # No filter\n g1_mean = np.zeros(wf_len, np.float64)\n g2_mean = np.zeros(wf_len, np.float64)\n g3_mean = np.zeros(wf_len, np.float64)\n for ipm in range(npm):\n\n sg = getWF(dataF, ipm, ievt)\n sg = sg - np.mean(sg)\n\n sgf = signal.lfilter(b, a, sg)\n ## remove mean again just in case\n sgf = sgf - np.mean(sgf)\n #sgf = sg\n\n pmID = getPMid(dataF, ipm)\n\n if len(sys.argv) == 3:\n axes[0][0].plot(sgf, label='pmt '+str(pmID))\n fwf_mean += sgf/npm\n wf_mean += sg/npm\n if pmID in grp1:\n g1_mean += sgf/len(grp1)\n elif pmID in grp2:\n g2_mean += sgf/len(grp2)\n elif pmID in grp3:\n g3_mean += sgf/len(grp3)\n else:\n ft = np.fft.rfft(sgf)\n freq = np.fft.rfftfreq(len(sgf), d=25E-9)\n if ipm == 0:\n plt_frq = freq\n if sys.argv[2] == 'mag':\n ft_mag = np.absolute(ft)\n axes[0][0].plot(freq, ft_mag, label='pmt '+str(pmID))\n fwf_mean += ft_mag/npm\n if pmID in grp1:\n g1_mean += ft_mag/len(grp1)\n elif pmID in grp2:\n g2_mean += ft_mag/len(grp2)\n elif pmID in grp3:\n g3_mean += ft_mag/len(grp3)\n elif sys.argv[2] == 'phase':\n ft_pha = np.angle(ft)\n axes[0][0].plot(freq, ft_pha, label='pmt '+str(pmID))\n fwf_mean += ft_pha/npm\n if pmID in grp1:\n g1_mean += ft_pha/len(grp1)\n elif pmID in grp2:\n g2_mean += ft_pha/len(grp2)\n elif pmID in grp3:\n g3_mean += ft_pha/len(grp3)\n \n \n ## The axes not set\n if len(sys.argv) == 3:\n axes[0][1].plot(g1_mean)\n axes[0][1].set_title('Group 1 mean waveform')\n axes[1][0].plot(g2_mean)\n axes[1][0].set_title('Group 2 mean waveform')\n axes[1][1].plot(g3_mean)\n axes[1][1].set_title('Group 3 mean waveform')\n axes[2][0].plot(fwf_mean)\n axes[2][0].set_title('Mean waveform')\n if ievt == 0:\n g1_first = g1_mean\n g2_first = g2_mean\n g3_first = g3_mean\n mean_first = fwf_mean\n else:\n axes[0][1].plot(g1_first)\n axes[1][0].plot(g2_first)\n axes[1][1].plot(g3_first)\n axes[2][0].plot(mean_first)\n axes[2][1].plot(wf_mean)\n axes[2][1].set_title('Mean waveform and corrected')\n axes[2][1].plot(wf_mean-fwf_mean)\n axes[2][1].set_xlim(0, 1000)\n else:\n axes[0][0].set_xlim(0,50000)\n axes[0][1].plot(plt_frq, g1_mean)\n axes[0][1].set_title('Group 1 mean '+sys.argv[2])\n axes[0][1].set_xlim(0,50000)\n axes[1][0].plot(plt_frq, g2_mean)\n axes[1][0].set_title('Group 2 mean '+sys.argv[2])\n axes[1][0].set_xlim(0,50000)\n axes[1][1].plot(plt_frq, g3_mean)\n axes[1][1].set_title('Group 3 mean '+sys.argv[2])\n axes[1][1].set_xlim(0,50000)\n axes[2][0].plot(plt_frq, fwf_mean)\n axes[2][0].set_title('Mean '+sys.argv[2])\n axes[2][0].set_xlim(0,50000)\n plt.draw()\n #fig.legend(loc=0)\n catcher = input(\"next plot?\")\n if catcher == 'q':\n exit()\n plt.cla()",
"def estimate_max_dn(exposure, gain=1):\n return np.random.randint(100*exposure, 500*exposure)",
"def audioEpochFeats(cur,uid,timestamp):\n\tuidA = uid +'audio'\n\n\tvar_stats = []\n\tstd_stats = []\n\tnoise = []\n\tvoiceToSilenceRatio = []\n\n\tfor i in range(1,24):\n\t\ths_timestamp = timestamp-86400+(i-1)*hour\n\t\the_timestamp = timestamp-86400+i*hour\n\t\t# Determining if start/end time of given hour is in the night\n\t\t# If yes, proceed with feature calculation, if not skip\n\t\ts_epoch = epochCalc(hs_timestamp)\n\t\te_epoch = epochCalc(he_timestamp)\n\n\t\tif s_epoch[0][0]=='night' or e_epoch[0][0]=='night':\n\t\t\tcur.execute('SELECT audio FROM {0} WHERE time_stamp >= {1} AND time_stamp<= {2}'\n\t\t\t\t.format(uidA,timestamp-86400+(i-1)*hour,timestamp-86400+i*hour))\n\t\t\trecords = cur.fetchall()\n\n\t\t\tvar_stats.append(np.var(records))\n\t\t\tstd_stats.append(np.std(records))\n\n\t\t\t# Calculating number of silence and voice/noise occurences\n\t\t\tsilence = len([item for item in records if item==0])\n\t\t\tvoice = len([item for item in records if item==1 or item==2])\n\t\t\tnoise.append(len([item for item in records if item==3]))\n\t\t\tif silence>0:\n\t\t\t\tvoiceToSilenceRatio.append(float(voice) / silence)\n\t\t\telse:\n\t\t\t\tvoiceToSilenceRatio.append(0)\n\treturn(np.nan_to_num(np.hstack((voiceToSilenceRatio,var_stats,std_stats,noise))))\n\t\"\"\"\ndef main():\n\tcon = psycopg2.connect(database='dataset', user='tabrianos')\n\tcur = con.cursor()\n\t#warnings.simplefilter(\"error\")\n\t#centers = np.load('visualizations/clustercenters.npy')\n\n# ------------TEST CASE-----------------------------\n\tfor loso in uids1:\n\t\tytest=[]\n\t\taccuracies =[]\n\t\tacc=0\n\t\tmaxminAcc =[]\n\t\tXbig = np.zeros([1,132])\t\n\t\tYbig = np.zeros([1])\n\t\tlabels=[]\n\t\tlabels.append(19)\n\t\t# loso means leave one student out: forest is trained on other users data\n\t\t# then tests are run on 'loso' student \n\t\tuids2.remove(loso)\n\t\tuids2.append(loso)\n\t\tprint('LOSO: {0}'.format(loso))\n\t\tfor testUser in uids2:\n\t\t\tprint(testUser)\n\t\t\t# lists that temporary store features before concatenation\n\t\t\t\n\t\t\tcolocationList =[]\n\t\t\tconversationList =[]\n\t\t\tactivityList=[]\n\t\t\taudioList = []\n\n\t\t\t# loading stress labels from database (currently on 0-5 scale)\n\t\t\trecords = loadSleepLabels(cur,testUser) \n\t\t\n\n\t\t\t\n\t\t\t#X,Y store initially the dataset and the labels accordingly\n\t\t\tY = np.zeros(len(records))\n\t\t\tX = np.array(records)\n\n\t\n\n\n\t\t\tfor i in range(0,len(records)):\n\t\t\t\tcolocationList.append( colocationEpochFeats(cur,testUser,X[i][0]))\n\t\t\t\tconversationList.append( convEpochFeats(cur,testUser,X[i][0]))\n\t\t\t\tactivityList.append(activityEpochFeats(cur,testUser,X[i][0]))\n\t\t\t#\tScreenList.append( screenStatFeatures(cur,testUser,X[i][0],day) )\n\t\t\t\taudioList.append(audioEpochFeats(cur,testUser,X[i][0]))\n\t\t\n\t\t\t\tif testUser==loso:\n\t\t\t\t\tytest.append(X[i][1])\n\t\t\t\t#labels list holds user ids to be used in LeaveOneOut pipeline\n\t\t\t\tlabels.append(testUser[-2:])\n\t\t\t\tY[i] = X[i][2]\n\n\t\t\t\n\t\t\t#concatenating features in one array \n\n\t\t\tXtt = np.concatenate((np.array(activityList),np.array(conversationList),np.array(colocationList),np.array(audioList)),axis=1)\n\t\t\tprint(Xtt.shape)\n\n\t\t\t#initiating and training forest, n_jobs indicates threads, -1 means all available\n\t\t\t# while the test student is not reached, training data are merged into one big matrix\n\t\t\tXbig = np.concatenate((Xbig,Xtt),axis=0)\n\t\t\tYbig = np.concatenate((Ybig,Y),axis=0)\n\n\t\t\tdel colocationList[:]\n\t\t\tdel conversationList[:]\n\t\t\tdel activityList[:]\n\t\t\tdel audioList[:]\n\n\n\n\t\t\tif testUser!=loso:\n\t\t\t\tXbig = Xbig.astype(np.float64)\n\t\t\t\tprint(Xbig.dtype)\n\t\t\t\t\n\n\t\t\t# when loso, tests are run\n\t\t\telif testUser==loso:\n\t\t\t\t#Xbig = preprocessing.scale(Xbig)\n\t\t\t\tnp.save('numdata/withgps/sleephourlyX.npy',Xbig)\n\t\t\t\tnp.save('numdata/withgps/sleephourlyY.npy',Ybig)\n\t\t\t\tnp.save('numdata/withgps/sleephourlyLOO.npy',np.array(labels))\n\t\t\t\tprint(Xbig.shape[0],Ybig.shape[0],len(labels))\n\t\t\t\tprint('train matrix saved')\n\t\t\t\ta = raw_input()\n\t\t\t\tforest = RandomForestClassifier(n_estimators=100, n_jobs = -1)\n\t\t\t\tforest.fit(Xbig,Ybig)\n\t\t\t\tef = forest.score(Xtt,ytest)\n\t\t\t\tprint(ef*100)\n\n\t\t\t\toutput = np.array(forest.predict(Xtt))\n\t\t\t\tscored = output - np.array(ytest)\n\n\t\t\t\t# Counting as correct predictions the ones which fall in +/-1, not only exact\n\t\t\t\t# I call it the 'Tolerance technique'\n\t\t\t\tcorrect=0\n\t\t\t\tc = Counter(scored)\n\t\t\t\tfor k in c.keys():\n\t\t\t\t\tif k<2 and k>-2:\n\t\t\t\t\t\tcorrect += c[k]\n\t\t\t\t\n\t\t\t\tscore = float(correct)/len(scored)\n\t\t\t\tprint(score*100)\n\n\n\n\t\tprint(Xbig.shape)\n\t\n\t\t\n\n\n\nif __name__ == '__main__':\n\tmain()\n\n\n\n\t\"\"\"",
"def n_remaining_samples(self):\n return -1",
"def n_remaining_samples(self):\n return -1",
"def n_remaining_samples(self):\n return -1",
"def subbandwidth(self):",
"def oversampling_experiment():\n model, history = train.train(BATCH_SIZE, EPOCHS, print_model_summary=True,\n oversampling=True)\n evaluate_both(model)\n plotting.plot_metrics(history)",
"def __thresholdInput(self,samples):\n absSamples = np.abs(samples) # 1 ms\n thresh = self.peakThresholdScale*np.mean(absSamples) # 0.2 ms\n i = np.where(absSamples>thresh)[0] # 1e-5 s\n samples[i] = thresh * (samples[i]/absSamples[i]) # 8e-5 s\n # Do it again in case the spikes were really loud\n absSamples[i] = np.abs(samples[i])\n thresh = self.peakThresholdScale*np.mean(absSamples)\n i = np.where(absSamples>thresh)[0]\n self.clippedPeakIPure = i # All peaks that are clipped at first round are clipped again. Requires that the peaks in first round are not set to 0\n samples[i] = thresh * (samples[i]/absSamples[i])\n # Mark peaks close to each other\n if len(self.clippedPeakIPure)>0:\n # t = time.time()\n # Mark peaks close to each other as continuous\n diffPeaks = np.diff(self.clippedPeakIPure)\n gapsAll = np.where(diffPeaks>1)[0]\n self.peakMinGap = 100\n gaps = np.where(diffPeaks[gapsAll] < self.peakMinGap)[0] # find gaps smaller than 100\n gapsLen = diffPeaks[gapsAll[gaps]] # length of the gaps\n gapsIdx = gapsAll[gaps] # Index of all gaps\n\n\n # fill the gaps smaller than self.peakMinGap\n pp = np.zeros(self.Nfft,dtype=np.int8)\n pp[self.clippedPeakIPure] = 1\n for i in range(len(gapsLen)):\n pp[self.clippedPeakIPure[gapsIdx[i]]:self.clippedPeakIPure[gapsIdx[i]]+gapsLen[i]] = 1\n\n self.clippedPeakI = np.where(pp==1)[0]\n else:\n self.clippedPeakI = self.clippedPeakIPure.copy()\n if log.level == logging.DEBUG:\n log.debug('clipped peaks ' + str(len(self.clippedPeakIPure)))",
"def sample_low_rank(self, n_samples, mu, logvar, F):\n #F = torch.unsqueeze(F, dim=1).repeat(1, n_samples, 1, 1) # [self.batch_size, n_samples, self.Y_dim, self.rank]\n F = F.repeat(n_samples, 1, 1) # [self.batch_size*n_samples, self.Y_dim, self.rank]\n mu = mu.repeat(n_samples, 1) # [self.batch_size*n_samples, self.Y_dim]\n logvar = logvar.repeat(n_samples, 1) # [self.batch_size*n_samples, self.Y_dim]\n eps_low_rank = torch.randn(self.batch_size*n_samples, self.rank, 1)\n eps_diag = torch.randn(self.batch_size*n_samples, self.Y_dim)\n half_var = torch.exp(0.5*logvar) # [self.batch_size*n_samples, self.Y_dim]\n samples = torch.bmm(F, eps_low_rank).squeeze() + mu + half_var*eps_diag\n samples = samples.reshape(n_samples, self.batch_size, self.Y_dim)\n samples = samples.transpose(0, 1)\n samples = self.unwhiten_back(samples)\n samples = samples.data.cpu().numpy()\n return samples",
"def get_threshold(ckt_path, threshold_nums, percentage):\n aug_classes = 5\n num_classes = 8\n torch.set_printoptions(precision=2, threshold=100000, linewidth=10000)\n\n # get dataloader\n mean_std_path = './data/mean_std.json'\n data_root = './data/'\n # loader dict:'train','valid', 'test'\n loader = get_dataloader(mean_std_path, data_root)\n\n copy_resnet18 = deepcopy(resnet18(pretrained=False))\n # model = Net1FC(copy_resnet18, all_classes).cuda()\n model = Net8FC(copy_resnet18, num_classes, aug_classes).cuda()\n\n # ckt_path = './backup/models/resnet180.09625'\n ckt = torch.load(ckt_path)\n model.load_state_dict(ckt['model'])\n\n model.eval()\n\n loss_list = [[] for i in range(8)]\n # _pred_list = []\n # _label_list = []\n with torch.no_grad():\n for index, (data, label) in tqdm(enumerate(loader['threshold'])):\n # _label_list.append(int(label))\n labels = torch.tensor([label for i in range(4)]).cuda()\n data = data.squeeze(1).cuda()\n data = torch.stack([data.clone(),\n data.clone().rot90(1, [1, 2]),\n data.clone().rot90(2, [1, 2]),\n data.clone().rot90(3, [1, 2])])\n\n output = model(data, labels, \"valid\")\n\n targets = torch.tensor([0, 1, 2, 3]).cuda()\n loss_list[label].append(cross_entropy(output[label], targets).item() / 4.0)\n\n # pred_label = np.argmin(val_loss)\n # _pred_list.append(int(pred_label))\n\n # val_conf_mat = conf_matrix(_pred_list, _label_list, 8, True, [i for i in range(8)])\n # cal_recall_precision(val_conf_mat, True, [i for i in range(8)])\n\n print(loss_list)\n\n threshold = []\n if threshold_nums == 'multi':\n # 若各分类器求一个阈值\n for i in range(8):\n length = len(loss_list[i])\n threshold.append(np.mean(loss_list[i].sort()[:length * percentage]))\n\n elif threshold_nums == 1:\n # 若所有分类器求一个阈值\n loss_list_in_one = []\n for loss in loss_list:\n loss_list_in_one.extend(loss)\n length = len(loss_list_in_one)\n threshold = np.mean(loss_list_in_one.sort()[:length * percentage])\n\n print(\"The threshold is:\", threshold)\n\n return threshold",
"def test_large_import_recovered(self):\n self.create_sample_data_set_dir(\"DOS15908.DAT\", RECOV_DIR)\n self.assert_initialize()\n result = self.data_subscribers.get_samples(DataParticleType.METADATA_RECOVERED,1,60)\n result = self.data_subscribers.get_samples(DataParticleType.SAMPLE_RECOVERED,96,400)",
"def experiment1_outliers():\n\tdata_folder = \"ckan_subset/prepared_learnset/\"\n\ttest_folder = 'ckan_subset/testset/xml_csv/'\n\tgm = Graph_Maker()\n\tgm.store()\n\trounds = 5\n\tx = [\"Fingerprint\", \"Syntax Feature Model\", \"Word2Vec Matcher\"]\n\t\n\tnumber_of_classes = 15\n\texamples_per_class = 0\n\taccuracies = []\n\tprecisions = []\n\trecalls = []\n\tfmeasures = []\n\tsf_main = Storage_Files(data_folder, classes)\n\ttmp_acc = []\n\ttmp_prec = []\n\ttmp_rec = []\n\ttmp_fmeasure = []\n\ttotal_actual = []\n\ttotal_predicted = []\n\n\tfor i in range(0, rounds):\n\t\tprint(\"Fingerprint\")\n\t\t# --- Fingerprint\n\t\tccc = Column_Classification_Config()\n\t\tccc.add_feature('feature_main', 'Fingerprint', [sf_main, number_of_classes, examples_per_class, False, False])\n\n\t\tccc.add_matcher('matcher', 'Fingerprint_Matcher', {'feature_main': 'fingerprint'}) # main classifier\n\t\tsm = Schema_Matcher(ccc)\n\t\tactual, predicted = execute_test_ckan(sm, test_folder, False)\n\t\ttotal_actual += actual\n\t\ttotal_predicted += predicted\n\t\taccuracy = accuracy_score(actual, predicted)\n\t\ttmp_acc.append(accuracy)\n\t\ttmp_prec.append(precision(actual, predicted))\n\t\ttmp_rec.append(recall(actual, predicted))\n\t\ttmp_fmeasure.append(f_measure(actual, predicted))\n\n\taccuracies.append( round(sum(tmp_acc) / float(rounds), 2) )\n\tprecisions.append( round(sum(tmp_prec) / float(rounds), 2) )\n\trecalls.append( round(sum(tmp_rec) / float(rounds), 2) )\n\tfmeasures.append(round(sum(tmp_fmeasure) / float(rounds), 2))\n\tclassnames = list(set(get_class_names(total_actual) + get_class_names(total_predicted)))\n\tcm = confusion_matrix(total_actual, total_predicted, labels=classnames)\n\t#gm.plot_confusion_matrix(cm, classnames, normalize=True)\n\t\n\ttmp_acc = []\n\ttmp_prec = []\n\ttmp_rec = []\n\ttmp_fmeasure = []\n\ttotal_actual = []\n\ttotal_predicted = []\n\tfor i in range(0, rounds):\n\t\tprint(\"SFM\")\n\t\t# --- Syntax Feature Model\n\t\tccc = Column_Classification_Config()\n\t\tccc.add_feature('feature_main', 'Syntax_Feature_Model', [sf_main, 1, 0, False, False])\n\n\t\tccc.add_matcher('matcher', 'Syntax_Matcher', {'feature_main': 'syntax'}) # main classifier\n\t\tsm = Schema_Matcher(ccc)\n\t\tactual, predicted = execute_test_ckan(sm, test_folder, False)\n\t\ttotal_actual += actual\n\t\ttotal_predicted += predicted\n\t\taccuracy = accuracy_score(actual, predicted)\n\t\ttmp_acc.append(accuracy)\n\t\ttmp_prec.append(precision(actual, predicted))\n\t\ttmp_rec.append(recall(actual, predicted))\n\t\ttmp_fmeasure.append(f_measure(actual, predicted))\n\n\taccuracies.append( round(sum(tmp_acc) / float(rounds), 2) )\n\tprecisions.append( round(sum(tmp_prec) / float(rounds), 2) )\n\trecalls.append( round(sum(tmp_rec) / float(rounds), 2) )\n\tfmeasures.append(round(sum(tmp_fmeasure) / float(rounds), 2))\n\tclassnames = list(set(get_class_names(total_actual) + get_class_names(total_predicted)))\n\tcm = confusion_matrix(total_actual, total_predicted, labels=classnames)\n\t#gm.plot_confusion_matrix(cm, classnames, normalize=True)\n\n\ttmp_acc = []\n\ttmp_prec = []\n\ttmp_rec = []\n\ttmp_fmeasure = []\n\ttotal_actual = []\n\ttotal_predicted = []\n\tfor i in range(0, rounds):\n\t\tprint(\"W2V\")\n\t\t# --- Word2Vec Matcher\n\t\tccc = Column_Classification_Config()\n\t\tccc.add_feature('feature_main', 'Corpus', [sf_main, number_of_classes, examples_per_class, False, False])\n\n\t\tccc.add_matcher('matcher', 'Word2Vec_Matcher', {'feature_main': 'corpus'}) # main classifier\n\t\tsm = Schema_Matcher(ccc)\n\t\tactual, predicted = execute_test_ckan(sm, test_folder, False)\n\t\ttotal_actual += actual\n\t\ttotal_predicted += predicted\n\t\taccuracy = accuracy_score(actual, predicted)\n\t\ttmp_acc.append(accuracy)\n\t\ttmp_prec.append(precision(actual, predicted))\n\t\ttmp_rec.append(recall(actual, predicted))\n\t\ttmp_fmeasure.append(f_measure(actual, predicted))\n\n\taccuracies.append( round(sum(tmp_acc) / float(rounds), 2) )\n\tprecisions.append( round(sum(tmp_prec) / float(rounds), 2) )\n\trecalls.append( round(sum(tmp_rec) / float(rounds), 2) )\n\tfmeasures.append(round(sum(tmp_fmeasure) / float(rounds), 2))\n\tclassnames = list(set(get_class_names(total_actual) + get_class_names(total_predicted)))\n\tcm = confusion_matrix(total_actual, total_predicted, labels=classnames)\n\t#gm.plot_confusion_matrix(cm, classnames, normalize=True)\n\n\tgm.add_x(x)\n\t# accuracies = [0.4, 0.4, 0.4]\n\t# precisions = [0.5, 0.5, 0.5]\n\t# recalls = [0.62, 0.62, 0.62]\n\t# fmeasures = [0.23, 0.23, 0.28]\n\tgm.append_y(accuracies)\n\tgm.append_y(precisions)\n\tgm.append_y(recalls)\n\tgm.append_y(fmeasures)\n\tgm.store()\n\tsubtitle = \"Scores were averaged over \" + str(rounds) + \" tests with \" + str(len(classes)) + \" classes. \" + \\\n\t\"Number of simulated columns per class: \" + str(number_of_classes)\n\tlabels = [\"Accuracy\", \"Precision\", \"Recall\", \"F-Measure\"]\n\tgm.plot_bar_n(\"Matcher Type\", \"Score\", \"Accuracy of Matchers\", labels, subtitle=subtitle)",
"def remove_low_info(X, max_frequency=0.99):\n selector = UniqueThreshold(max_frequency=max_frequency)\n return selector.fit_transform(X)",
"def testPeakLikelihoodFlux(self):\n # make mp: a flux measurer\n measControl = measAlg.PeakLikelihoodFluxControl()\n schema = afwTable.SourceTable.makeMinimalSchema()\n mp = measAlg.MeasureSourcesBuilder().addAlgorithm(measControl).build(schema)\n \n # make and measure a series of exposures containing just one star, approximately centered\n bbox = afwGeom.Box2I(afwGeom.Point2I(0, 0), afwGeom.Extent2I(100, 101))\n kernelWidth = 35\n var = 100\n fwhm = 3.0\n sigma = fwhm/FwhmPerSigma\n convolutionControl = afwMath.ConvolutionControl()\n psf = measAlg.SingleGaussianPsf(kernelWidth, kernelWidth, sigma)\n psfKernel = psf.getLocalKernel()\n psfImage = psf.computeKernelImage()\n sumPsfSq = numpy.sum(psfImage.getArray()**2)\n psfSqArr = psfImage.getArray()**2\n for flux in (1000, 10000):\n ctrInd = afwGeom.Point2I(50, 51)\n ctrPos = afwGeom.Point2D(ctrInd)\n\n kernelBBox = psfImage.getBBox(afwImage.PARENT)\n kernelBBox.shift(afwGeom.Extent2I(ctrInd))\n\n # compute predicted flux error\n unshMImage = makeFakeImage(bbox, [ctrPos], [flux], fwhm, var)\n\n # filter image by PSF\n unshFiltMImage = afwImage.MaskedImageF(unshMImage.getBBox(afwImage.PARENT))\n afwMath.convolve(unshFiltMImage, unshMImage, psfKernel, convolutionControl)\n \n # compute predicted flux = value of image at peak / sum(PSF^2)\n # this is a sanity check of the algorithm, as much as anything\n predFlux = unshFiltMImage.getImage().get(ctrInd[0], ctrInd[1]) / sumPsfSq\n self.assertLess(abs(flux - predFlux), flux * 0.01)\n \n # compute predicted flux error based on filtered pixels\n # = sqrt(value of filtered variance at peak / sum(PSF^2)^2)\n predFluxErr = math.sqrt(unshFiltMImage.getVariance().get(ctrInd[0], ctrInd[1])) / sumPsfSq\n\n # compute predicted flux error based on unfiltered pixels\n # = sqrt(sum(unfiltered variance * PSF^2)) / sum(PSF^2)\n # and compare to that derived from filtered pixels;\n # again, this is a test of the algorithm\n varView = afwImage.ImageF(unshMImage.getVariance(), kernelBBox)\n varArr = varView.getArray()\n unfiltPredFluxErr = math.sqrt(numpy.sum(varArr*psfSqArr)) / sumPsfSq\n self.assertLess(abs(unfiltPredFluxErr - predFluxErr), predFluxErr * 0.01)\n \n for fracOffset in (afwGeom.Extent2D(0, 0), afwGeom.Extent2D(0.2, -0.3)):\n adjCenter = ctrPos + fracOffset\n if fracOffset == (0, 0):\n maskedImage = unshMImage\n filteredImage = unshFiltMImage\n else:\n maskedImage = makeFakeImage(bbox, [adjCenter], [flux], fwhm, var)\n # filter image by PSF\n filteredImage = afwImage.MaskedImageF(maskedImage.getBBox(afwImage.PARENT))\n afwMath.convolve(filteredImage, maskedImage, psfKernel, convolutionControl)\n\n exposure = afwImage.makeExposure(filteredImage)\n exposure.setPsf(psf)\n \n table = afwTable.SourceTable.make(schema)\n source = table.makeRecord()\n mp.apply(source, exposure, afwGeom.Point2D(*adjCenter))\n measFlux = source.get(measControl.name)\n measFluxErr = source.get(measControl.name + \".err\")\n self.assertFalse(source.get(measControl.name + \".flags\"))\n self.assertLess(abs(measFlux - flux), flux * 0.003)\n \n self.assertLess(abs(measFluxErr - predFluxErr), predFluxErr * 0.2)\n\n # try nearby points and verify that the flux is smaller;\n # this checks that the sub-pixel shift is performed in the correct direction\n for dx in (-0.2, 0, 0.2):\n for dy in (-0.2, 0, 0.2):\n if dx == dy == 0:\n continue\n offsetCtr = afwGeom.Point2D(adjCenter[0] + dx, adjCenter[1] + dy)\n table = afwTable.SourceTable.make(schema)\n source = table.makeRecord()\n mp.apply(source, exposure, offsetCtr)\n offsetFlux = source.get(measControl.name)\n self.assertLess(offsetFlux, measFlux)\n \n # source so near edge of image that PSF does not overlap exposure should result in failure\n \n for edgePos in (\n (1, 50),\n (50, 1),\n (50, bbox.getHeight() - 1),\n (bbox.getWidth() - 1, 50),\n ):\n table = afwTable.SourceTable.make(schema)\n source = table.makeRecord()\n mp.apply(source, exposure, afwGeom.Point2D(*edgePos))\n self.assertTrue(source.get(measControl.name + \".flags\"))\n \n # no PSF should result in failure: flags set\n noPsfExposure = afwImage.ExposureF(filteredImage)\n table = afwTable.SourceTable.make(schema)\n source = table.makeRecord()\n mp.apply(source, noPsfExposure, afwGeom.Point2D(*adjCenter))\n self.assertTrue(source.get(measControl.name + \".flags\"))",
"def make_downsample_filt_tensor(SR=16000, ENV_SR=200, WINDOW_SIZE=1001, pycoch_downsamp=False):\n DOWNSAMPLE = SR/ENV_SR\n if not pycoch_downsamp: \n downsample_filter_times = np.arange(-WINDOW_SIZE/2,int(WINDOW_SIZE/2))\n downsample_filter_response_orig = np.sinc(downsample_filter_times/DOWNSAMPLE)/DOWNSAMPLE\n downsample_filter_window = signal.kaiser(WINDOW_SIZE, 5)\n downsample_filter_response = downsample_filter_window * downsample_filter_response_orig\n else: \n max_rate = DOWNSAMPLE\n f_c = 1. / max_rate # cutoff of FIR filter (rel. to Nyquist)\n half_len = 10 * max_rate # reasonable cutoff for our sinc-like function\n if max_rate!=1: \n downsample_filter_response = signal.firwin(2 * half_len + 1, f_c, window=('kaiser', 5.0))\n else: # just in case we aren't downsampling -- I think this should work? \n downsample_filter_response = zeros(2 * half_len + 1)\n downsample_filter_response[half_len + 1] = 1\n \n # Zero-pad our filter to put the output samples at the center\n # n_pre_pad = int((DOWNSAMPLE - half_len % DOWNSAMPLE))\n # n_post_pad = 0\n # n_pre_remove = (half_len + n_pre_pad) // DOWNSAMPLE\n # We should rarely need to do this given our filter lengths...\n # while _output_len(len(h) + n_pre_pad + n_post_pad, x.shape[axis],\n # up, down) < n_out + n_pre_remove:\n # n_post_pad += 1\n # downsample_filter_response = np.concatenate((np.zeros(n_pre_pad), downsample_filter_response, np.zeros(n_post_pad)))\n \n downsample_filt_tensor = tf.constant(downsample_filter_response, tf.float32)\n downsample_filt_tensor = tf.expand_dims(downsample_filt_tensor, 0)\n downsample_filt_tensor = tf.expand_dims(downsample_filt_tensor, 2)\n downsample_filt_tensor = tf.expand_dims(downsample_filt_tensor, 3)\n\n return downsample_filt_tensor"
] | [
"0.61294377",
"0.58991325",
"0.5723101",
"0.56286037",
"0.557679",
"0.5432801",
"0.5261728",
"0.52388954",
"0.5201363",
"0.5196606",
"0.51883674",
"0.5182593",
"0.51802176",
"0.51758677",
"0.5150192",
"0.5148113",
"0.5143918",
"0.51152796",
"0.51152796",
"0.51152796",
"0.5087558",
"0.5086504",
"0.50844115",
"0.50748616",
"0.5074183",
"0.50678855",
"0.50520724",
"0.5051874",
"0.5050721",
"0.5039316"
] | 0.66418946 | 0 |
Generates a balanced set of training examples from one or more datasets. | def generate_dataset(
datasets, networks, parents, mode='train', mean=None,
verbose=1, **params):
# Parameters
classes = params.setdefault('classes', [-1,0,1])
data_type = params.setdefault('data_type', 'spikes')
thres = params.setdefault('thres', 150.0)
target = params.setdefault('target', int(1.2e6))
valid_split = params.setdefault('valid_split', 0.1)
slice_len = params.setdefault('slice_len', 330)
assert len(datasets) == len(networks) == len(parents)
examples = np.zeros((target, 5, slice_len, 1))
labels = np.zeros((target, len(classes)))
ex_per_netw = target//len(datasets)
params['target'] = ex_per_netw
for i in range(len(datasets)):
if verbose > 0:
print('Network {} of {}'.format(i+1, len(datasets)))
data = datasets[i]
network = networks[i]
parents_ = parents[i]
if data_type == 'spikes':
ds_data = downsample_spikes(data, thres=thres, verbose=verbose)
elif data_type == 'fluorescence':
ds_data = downsample_fluorescence(
data, thres=thres, verbose=verbose)
else:
raise ValueError('Invalid data type')
start = i*ex_per_netw
end = (i+1)*ex_per_netw
examples[start:end], labels[start:end] = get_examples(
ds_data, network, parents_, verbose=verbose, **params)
shuffle_idx = np.random.permutation(np.arange(examples.shape[0]))
examples = examples[shuffle_idx]
labels = labels[shuffle_idx]
if mode == 'train':
idx = int(examples.shape[0]*valid_split)
ex_valid, ex_train = np.split(examples, [idx], axis=0)
lbl_valid, lbl_train = np.split(labels, [idx], axis=0)
mean = np.mean(ex_train, axis=0)
ex_train -= mean
ex_valid -= mean
return ex_train, ex_valid, lbl_train, lbl_valid, mean
elif mode == 'test':
assert mean != None
examples -= mean
return examples, labels
else:
raise ValueError('Invalid mode') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _generate_datasets(self):\n\n degrade_test = False\n if self._opts['degrade_step'] == 'test':\n degrade_test = True\n\n use_trainset_for_tests = UseTrainForTest.IDENTICAL # can be different in few shot workflow\n\n train_dataset, test_dataset = self._gen_datasets_with_options(self._opts['train_classes'],\n self._opts['test_classes'],\n is_superclass=self._opts['superclass'],\n class_proportion=self._opts['class_proportion'],\n degrade_test=degrade_test,\n degrade_type=self._opts['degrade_type'], # only relevant if degrade_test = True\n degrade_val=self._opts['min_val'], # only relevant if degrade_test = True\n recurse_train=self._is_train_recursive(),\n recurse_test=self._is_inference_recursive(),\n num_batch_repeats=self._opts['num_repeats'],\n recurse_iterations=self._opts['recurse_iterations'],\n evaluate_step=self._opts['evaluate'],\n use_trainset_for_tests=use_trainset_for_tests,\n invert_images=self._opts['invert_images'],\n min_val=self._opts['min_val'])\n return train_dataset, test_dataset",
"def get_training_and_testing_sets(data, Y):\r\n data = pd.concat([data, Y], axis=1)\r\n x,y=data.shape\r\n train_X_sub1=data[0:x//6]\r\n dev_X_sub1 = data[x//6:x//6 + x//12]\r\n test_X_sub1 = data[x//6 + x//12:x//3]\r\n\r\n train_X_sub2 = data[x//3:x//3+x//6]\r\n dev_X_sub2 = data[x//6 + x//3:x//3 + x//6 + x//12]\r\n test_X_sub2 = data[x//3 + x//6 + x//12:2*x//3]\r\n\r\n train_X_sub3 = data[2*x//3:(2*x//3) +x//6]\r\n dev_X_sub3 = data[x//6 + 2*x//3: (2*x//3) + x//6 + x//12]\r\n test_X_sub3 = data[2*x//3 + x//6 + x//12:x]\r\n\r\n train_X=train_X_sub1.append(train_X_sub2,ignore_index = True)\r\n train_X =train_X.append(train_X_sub3,ignore_index = True)\r\n dev_X= dev_X_sub1.append(dev_X_sub2,ignore_index = True)\r\n dev_X = dev_X.append(dev_X_sub3,ignore_index = True)\r\n test_X = test_X_sub1.append(test_X_sub2,ignore_index = True)\r\n test_X = test_X.append(test_X_sub3,ignore_index = True)\r\n\r\n\r\n train_X = util.shuffle(train_X)\r\n train_X = train_X.reset_index(drop=True)\r\n\r\n dev_X = util.shuffle(dev_X)\r\n dev_X = dev_X.reset_index(drop=True)\r\n\r\n test_X = util.shuffle(test_X)\r\n test_X = test_X.reset_index(drop=True)\r\n\r\n train_X_final=train_X\r\n dev_X_final = dev_X\r\n test_X_final = test_X\r\n x, y = train_X_final.shape\r\n train_X = train_X_final.iloc[:, 0:y - 1]\r\n train_Y = train_X_final.iloc[:, y - 1]\r\n\r\n x, y = test_X_final.shape\r\n test_X = test_X_final.iloc[:, 0:y - 1]\r\n test_Y = test_X_final.iloc[:, y - 1]\r\n\r\n x, y = dev_X_final.shape\r\n dev_X = dev_X_final.iloc[:, 0:y - 1]\r\n dev_Y = dev_X_final.iloc[:, y - 1]\r\n\r\n return train_X, train_Y, dev_X,dev_Y,test_X, test_Y",
"def make_dataset():\n\n\tnumberOfTrials = dataset_params.num_of_samples\n\tnumberOfTrials_train = int(numberOfTrials*0.8)\n\tnumberOfTrials_test = int(numberOfTrials*0.2)\n\n\tprint(\"==================================================\")\n\tprint(\"1. Generating Train images ......\")\n\tprint(\"\\nTrain image per variation\", numberOfTrials_train)\n\tmakeDataset(numberOfTrials_train, \"train\")\n\n\tprint(\"==================================================\")\n\tprint(\"2. Generating Test images ......\")\n\tprint(\"\\nTest image per variation\", numberOfTrials_test)\n\tmakeDataset(numberOfTrials_test, \"test\")\n\n\tprint(\"==================================================\")\n\tprint(\"Done!!!\")",
"def create_data_sets(reviews, labels, write_to_pickle=True, problem=\"\"):\n def sanity_check(labels):\n print str(len(labels)) + \" total labels. \" + str(sum(labels)) + \" positive labels. \" \\\n + str(len(labels) - sum(labels)) + \" negative labels. \"\n\n train_reviews = []\n train_labels = []\n dev_reviews = []\n dev_labels = []\n test_reviews = []\n test_labels = []\n\n total_train = int(len(reviews) * 0.5 / 2) # divided by 2 because of 2 classes\n total_dev = int(len(reviews) * 0.25 / 2)\n\n current_pos_training = 0\n current_neg_train = 0\n current_pos_dev = 0\n current_neg_dev = 0\n\n for (review, vote) in zip(reviews, labels):\n if vote == 1:\n if current_pos_training < total_train:\n train_reviews.append(review)\n train_labels.append(vote)\n current_pos_training += 1\n elif current_pos_dev < total_dev:\n dev_reviews.append(review)\n dev_labels.append(vote)\n current_pos_dev += 1\n else:\n test_reviews.append(review)\n test_labels.append(vote)\n\n # Negative review\n else:\n if current_neg_train < total_train:\n train_reviews.append(review)\n train_labels.append(vote)\n current_neg_train += 1\n elif current_neg_dev < total_dev:\n dev_reviews.append(review)\n dev_labels.append(vote)\n current_neg_dev += 1\n else:\n test_reviews.append(review)\n test_labels.append(vote)\n\n # Shuffle data for every dataset\n combined_lists = zip(train_reviews, train_labels)\n np.random.shuffle(combined_lists)\n train_reviews, train_labels = zip(*combined_lists)\n\n combined_lists = zip(dev_reviews, dev_labels)\n np.random.shuffle(combined_lists)\n dev_reviews, dev_labels = zip(*combined_lists)\n\n combined_lists = zip(test_reviews, test_labels)\n np.random.shuffle(combined_lists)\n test_reviews, test_labels = zip(*combined_lists)\n\n # Sanity checks\n print \"Total reviews: \" + str(len(reviews))\n print \"Original distribution: \"\n sanity_check(labels)\n print \"========================\"\n print \"Train labels\"\n sanity_check(train_labels)\n print \"========================\"\n print \"Dev labels\"\n sanity_check(dev_labels)\n print \"========================\"\n print \"Train labels\"\n sanity_check(test_labels)\n\n # Write to pickles\n N = len(reviews)\n if write_to_pickle:\n print \"Writing to pickle...\"\n pickle.dump([train_reviews, train_labels],\n open(\"TrainSet_\" + problem + '_' + str(N), \"wb\"), pickle.HIGHEST_PROTOCOL)\n\n pickle.dump([dev_reviews, dev_labels],\n open(\"DevSet_\" + problem + '_' + str(N), \"wb\"), pickle.HIGHEST_PROTOCOL)\n\n pickle.dump([test_reviews, test_labels],\n open(\"TestSet_\" + problem + '_' + str(N), \"wb\"), pickle.HIGHEST_PROTOCOL)\n print \"Done.\"\n\n return train_reviews, train_labels, dev_reviews, dev_labels, test_reviews, test_labels",
"def prepare_dataset():\n with open('gold-posts.txt', encoding='utf-8') as f:\n posts = f.readlines()\n with open('gold-labels.txt', encoding='utf-8') as f:\n labels = f.readlines()\n\n def to_cat(x: str) -> int:\n if x == 'p':\n return 1\n elif x == 'n':\n return 2\n else:\n return 0\n X = np.array([x.strip() for x in posts])\n y = np.array([to_cat(x.strip()) for x in labels])\n\n # DOES NOT WORK - too imbalanced\n #skf = StratifiedKFold(n_splits=5, random_state=None, shuffle=False)\n #for train_index, test_index in skf.split(X, y):\n # X_train, X_test = X[train_index], X[test_index]\n # y_train, y_test = y[train_index], y[test_index]\n # break\n\n # WORKS better\n trI, teI = balanced_split(y)\n\n train_texts = X[trI].tolist()\n train_labels = y[trI].tolist()\n valid_texts = X[teI].tolist()\n valid_labels = y[teI].tolist()\n return train_texts, train_labels, valid_texts, valid_labels",
"def build_all_datasets(\n cfg, tokenizer, train_valid_test_num_samples,\n):\n train_dataset = RetroQAFineTuneDataset(\n cfg.train_ds.get('file_name'),\n tokenizer,\n cfg.train_ds.get('answer_only_loss'),\n tokenizer.pad_id,\n cfg.train_ds.get('seq_length'),\n cfg.train_ds.get('add_bos'),\n cfg.train_ds.get('add_eos'),\n train_valid_test_num_samples[0],\n cfg.train_ds.get('seed'),\n cfg.train_ds.get('neighbors'),\n )\n val_dataset = RetroQAFineTuneDataset(\n cfg.val_ds.get('file_name'),\n tokenizer,\n cfg.val_ds.get('answer_only_loss'),\n tokenizer.pad_id,\n cfg.val_ds.get('seq_length'),\n cfg.val_ds.get('add_bos'),\n cfg.val_ds.get('add_eos'),\n train_valid_test_num_samples[1],\n cfg.val_ds.get('seed'),\n cfg.val_ds.get('neighbors'),\n )\n test_dataset = RetroQAFineTuneDataset(\n cfg.test_ds.get('file_name'),\n tokenizer,\n cfg.test_ds.get('answer_only_loss'),\n tokenizer.pad_id,\n cfg.test_ds.get('seq_length'),\n cfg.test_ds.get('add_bos'),\n cfg.test_ds.get('add_eos'),\n train_valid_test_num_samples[2],\n cfg.test_ds.get('seed'),\n cfg.test_ds.get('neighbors'),\n )\n\n return train_dataset, val_dataset, test_dataset",
"def inputs_balanced(batch_size, fake_data=False, one_hot=False, dtype=tf.float32, eval_data=False):\n class DataSets(object):\n pass\n data_sets = DataSets()\n if fake_data:\n def fake():\n return DataSetBalanced([], [], batch_size, fake_data=True, one_hot=one_hot, dtype=dtype, eval_data=eval_data)\n data_sets.train = fake()\n data_sets.validation = fake()\n data_sets.test = fake()\n return data_sets\n\n #testing = dict()\n validation = dict()\n training = dict()\n validation_labels = dict()\n #testing_labels = dict()\n training_labels = dict()\n if USE_MULTIPLE_FILES:\n validation, validation_labels = create_data_set(VALIDATION_FILE_LOCATION, eval_data)\n if not eval_data:\n training, training_labels = create_data_set(FILE_LOCATION, eval_data)\n #### HACK: I needed to do this so there would be some strange eosinophil in the validation data ####\n validation['strange_eosinophils'] = training['strange_eosinophils'][0:10]\n validation_labels['strange_eosinophils'] = training_labels['strange_eosinophils'][0:10]\n training['strange_eosinophils'] = training['strange_eosinophils'][10:]\n training_labels['strange_eosinophils'] = training_labels['strange_eosinophils'][10:]\n else:\n VALIDATION_SIZE = 20\n #TESTING_SIZE = 1\n data_examples = np.load(os.path.join(DATA_LOCATION, FILE_LOCATION))\n for name in cell_names:\n print(\"data_examples\")\n print(name+\":\"+str(data_examples[name].shape[0]))\n for i, name in enumerate(cell_names):\n if not eval_data:\n # make the random data consistent across runs\n np.random.seed(1)\n # Shuffle the data\n perm = np.arange(data_examples[name].shape[0])\n np.random.shuffle(perm)\n randomized_data = data_examples[name][perm]\n else:\n randomized_data = data_examples[name]\n validation[name] = randomized_data[:VALIDATION_SIZE]\n #testing[name] = randomized_data[VALIDATION_SIZE:VALIDATION_SIZE+TESTING_SIZE]\n if not eval_data:\n training[name] = randomized_data[VALIDATION_SIZE:]\n #training[name] = randomized_data[VALIDATION_SIZE+TESTING_SIZE:]\n training_labels[name] = to_categorical(np.full((training[name].shape[0], 1), i, dtype=int), NUM_CLASSES)\n validation_labels[name] = to_categorical(np.full((validation[name].shape[0], 1), i, dtype=int), NUM_CLASSES)\n #testing_labels[name] = to_categorical(np.full((testing[name].shape[0], 1), i, dtype=int), NUM_CLASSES)\n\n data_sets.validation = DataSetBalanced(validation, validation_labels, batch_size, fake_data=False, one_hot=True,\n dtype=tf.uint8, eval_data=eval_data)\n #data_sets.testing = DataSetBalanced(testing, testing_labels, batch_size, fake_data=False, one_hot=True, dtype=tf.uint8, eval_data=eval_data)\n if not eval_data:\n data_sets.train = DataSetBalanced(training, training_labels, batch_size, fake_data=False, one_hot=True,\n dtype=tf.uint8, eval_data=eval_data)\n\n return data_sets",
"def prepare_dataset(self, xs: List[str], ys: List[str], batch_size: int = None):\n\n if batch_size is None:\n batch_size = self.cM.batch_size\n\n examples = [data.Example.fromlist([x, y], self.data_fields) for x, y in zip(xs, ys)]\n\n dataset = data.Dataset(examples, fields=self.data_fields)\n\n iterator = data.BucketIterator(dataset, batch_size=batch_size, shuffle=False)\n\n return iterator",
"def generate_datasets(self, positive_data_directory: str = 'positive', negative_data_directory: str = 'negative'\n ) -> (tf.data.Dataset, tf.data.Dataset):\n positive_example_paths = list(self.data_directory.joinpath(positive_data_directory).glob('*.feather'))\n print(f'{len(positive_example_paths)} positive examples.')\n negative_example_paths = list(self.data_directory.joinpath(negative_data_directory).glob('*.feather'))\n print(f'{len(negative_example_paths)} negative examples.')\n positive_datasets = self.get_training_and_validation_datasets_for_file_paths(positive_example_paths)\n positive_training_dataset, positive_validation_dataset = positive_datasets\n negative_datasets = self.get_training_and_validation_datasets_for_file_paths(negative_example_paths)\n negative_training_dataset, negative_validation_dataset = negative_datasets\n training_dataset = self.get_ratio_enforced_dataset(positive_training_dataset, negative_training_dataset,\n positive_to_negative_data_ratio=1)\n validation_dataset = positive_validation_dataset.concatenate(negative_validation_dataset)\n if self.trial_directory is not None:\n self.log_dataset_file_names(training_dataset, dataset_name='training')\n self.log_dataset_file_names(validation_dataset, dataset_name='validation')\n load_and_preprocess_function = lambda file_path: tuple(\n tf.py_function(self.load_and_preprocess_example_file, [file_path], [tf.float32, tf.int32]))\n training_dataset = training_dataset.shuffle(buffer_size=len(list(training_dataset)))\n training_dataset = training_dataset.map(load_and_preprocess_function, num_parallel_calls=16)\n training_dataset = training_dataset.map(self.set_shape_function, num_parallel_calls=16)\n training_dataset = training_dataset.batch(self.batch_size).prefetch(buffer_size=tf.data.experimental.AUTOTUNE)\n validation_dataset = validation_dataset.map(load_and_preprocess_function, num_parallel_calls=16)\n validation_dataset = validation_dataset.map(self.set_shape_function, num_parallel_calls=16)\n validation_dataset = validation_dataset.batch(self.batch_size).prefetch(\n buffer_size=tf.data.experimental.AUTOTUNE)\n return training_dataset, validation_dataset",
"def data_set_maker():\n\n # crate a folder in your code directory and name it: \"files\". put the .npy files iside that folder\n\n x_all = np.load(path + '/files/tinyX.npy', 'r') # reads the input file\n y_all = np.load(path + '/files/tinyY.npy', 'r') # reads the input file\n\n # split the data into 10% validation-set and 90% training set\n raw_train, raw_valid, y_train, y_valid = train_test_split(x_all, y_all, test_size=0.2, random_state=43)\n return raw_train, raw_valid, y_train, y_valid",
"def create_train_test_sets(conform_shape=True, indi_proportion=0.50, incl_group_imgs=True):\r\n X_train_indi, y_train_indi = build_dataframe('Individual_Training_Images',\r\n img_input_shape, conform_shape=conform_shape)\r\n X_test_indi, y_test_indi = build_dataframe('Individual_Test_Images',\r\n img_input_shape, conform_shape=conform_shape)\r\n \r\n X_train_group, y_train_group = build_dataframe('Group_Training_Images',\r\n img_input_shape, conform_shape=conform_shape)\r\n X_test_group, y_test_group = build_dataframe('Group_Test_Images',\r\n img_input_shape, conform_shape=conform_shape)\r\n \r\n X_train_indi, y_train_indi = subsample_dataframe(X_train_indi, y_train_indi,indi_proportion)\r\n \r\n if incl_group_imgs:\r\n X_train = np.concatenate([X_train_indi,X_train_group])\r\n y_train = np.concatenate([y_train_indi,y_train_group])\r\n else: \r\n X_train = X_train_indi.copy()\r\n y_train = y_train_indi.copy()\r\n\r\n return X_train, y_train, X_test_indi, y_test_indi, X_test_group, y_test_group",
"def _load_training_and_test_sets(normalize):\n class_labels = []\n test_labels = []\n norm = None\n if normalize == True:\n norm = loading.get_normalize_vector()\n\n for i in range(0, 10):\n [training, test] = loading.load_number_set(i, 0.7, norm_vector=norm)\n labels = [str(i)] * training.shape[0]\n tlabels = [str(i)] * test.shape[0]\n if i == 0:\n train_points = training\n test_points = test\n else:\n train_points = np.concatenate((train_points, training), axis = 0)\n test_points = np.concatenate((test_points, test), axis = 0)\n class_labels.extend(labels)\n test_labels.extend(tlabels)\n\n return train_points, test_points, class_labels, test_labels",
"def generate_datasets(self, rand=None, *args, **kwargs):\n raise NotImplementedError()",
"def get_examples(ds_data, network, parents, verbose=1, **params):\n # Parameters\n classes = params.setdefault('classes', [-1,0,1])\n target = params.setdefault('target', int(1.2e6))\n slice_len = params.setdefault('slice_len', 330)\n \n assert not target % len(classes)\n \n G = np.mean(ds_data, axis=0) \n examples = np.zeros((target, 5, slice_len, 1))\n labels = np.zeros((target, len(classes)))\n count = 0\n \n if verbose > 0:\n print('Generating {} training examples'.format(target))\n bar = pb.ProgressBar(max_value=target,\n widgets=[pb.Percentage(), ' - ',\n pb.Bar(), ' - ',\n pb.ETA()])\n \n for c in classes:\n \n pairs = np.argwhere(network == c)\n reps = int(target/len(classes)/pairs.shape[0]) + 1\n pair_idx = np.repeat(np.arange(pairs.shape[0]), reps)\n pair_idx = np.random.permutation(pair_idx)[:target//len(classes)]\n start_idx = np.random.randint(\n 0, ds_data.shape[1]-slice_len, size=target//len(classes))\n \n for i in range(pair_idx.size):\n \n n1 = pairs[pair_idx[i]][0]\n n2 = pairs[pair_idx[i]][1]\n assert(network[n1,n2] == c)\n \n start = start_idx[i]\n end = start + slice_len\n \n p1 = np.mean(ds_data[parents[n1], start:end], axis=0)\n p2 = np.mean(ds_data[parents[n2], start:end], axis=0)\n \n examples[count,:,:,0] = np.vstack((\n p1, \n ds_data[n1][start:end], \n G[start:end], \n ds_data[n2][start:end], \n p2\n ))\n \n labels[count,:] = np.equal(classes, c, dtype=np.int32)\n \n if verbose > 0:\n bar.update(count)\n count +=1\n \n if verbose > 0:\n bar.finish()\n print(\n 'Generated examples of shape:', examples.shape,\n '\\nGenerated labels of shape:', labels.shape,\n '\\nThere are {} classes: {}'.format(len(classes), classes)\n )\n \n assert not np.isnan(examples).any()\n return examples, labels",
"def init_data(dataset_config: dict):\n # train and dev will be in random order, test may be ordered according to labels\n if dataset_config[\"name\"] == \"CoLA\":\n train, dev, test, num_classes = load_cola(dataset_config)\n elif dataset_config[\"name\"] == \"AGNews\":\n train, dev, test, num_classes = load_ag_news(dataset_config)\n elif dataset_config[\"name\"] == \"DBPedia\":\n train, dev, test, num_classes = load_dbpedia(dataset_config)\n elif dataset_config[\"name\"] == \"YRF\":\n train, dev, test, num_classes = load_yrf(dataset_config)\n else:\n raise NameError(f\"Dataset {dataset_config['name']} not implemented.\")\n # etc.\n\n # shrink size if debugging\n if dataset_config[\"debug\"]:\n # choose a random subset using huggingface select function\n train = train.select(random.sample(range(len(train)), k=200))\n dev = dev.select(random.sample(range(len(dev)), k=40))\n test = test.select(random.sample(range(len(test)), k=200))\n\n # create class imbalance\n random.seed(dataset_config[\"seed\"])\n if dataset_config[\"pool_balance\"] == \"balanced\":\n pass\n elif dataset_config[\"pool_balance\"] == \"imbalanced\":\n train = train.filter(lambda example: create_imbalanced_dataset(example, dataset_config[\"imbalance_prop\"], dataset_config['imbalance_cls']))\n else:\n NameError(f\"pool_balance = {dataset_config['pool_balance']} not allowed\")\n\n if dataset_config[\"dev_balance\"] == \"balanced\":\n pass\n elif dataset_config[\"dev_balance\"] == \"imbalanced\":\n dev = dev.filter(lambda example: create_imbalanced_dataset(example, dataset_config[\"imbalance_prop\"], dataset_config['imbalance_cls']))\n else:\n NameError(f\"dev_balance = {dataset_config['dev_balance']} not allowed\")\n\n # get seed labelled pool indices (using the same seed data every time)\n random.seed(dataset_config[\"seed\"])\n if dataset_config[\"seed_balance\"] == \"balanced\":\n # this is random (will have some variance vs pool)\n indices = list(range(len(train)))\n unlabelled_pool_idx, labelled_pool_idx = split(\n indices,\n random_state=dataset_config[\"seed\"],\n test_size=dataset_config[\"seed_size\"]\n )\n elif dataset_config[\"seed_balance\"] == \"stratified\":\n # this is the same as the underlying train set (which may be unbalanced)\n indices = list(range(len(train)))\n unlabelled_pool_idx, labelled_pool_idx = split(\n indices,\n random_state=dataset_config[\"seed\"],\n test_size=dataset_config[\"seed_size\"],\n stratify=train['label']\n )\n elif dataset_config[\"seed_balance\"] == \"imbalanced\":\n # artificially sample an imbalanced seed set from the pool\n unlabelled_pool_idx, labelled_pool_idx = create_imbalanced_seed(\n train,\n num_classes,\n dataset_config[\"seed_size\"],\n dataset_config['imbalance_prop'],\n dataset_config['imbalance_cls']\n )\n else:\n raise NameError(f\"seed_balance = {dataset_config['seed_balance']} not allowed\")\n\n return train, dev, test, num_classes, labelled_pool_idx, unlabelled_pool_idx",
"def data_set_maker():\n\n # crate a folder in your code directory and name it: \"files\". put the .npy files iside that folder\n path = os.getcwd() # reads the current path\n x_train = np.load(path + '/files/tinyX.npy', 'r') # reads the input file\n y_train = np.load(path + '/files/tinyY.npy', 'r') # reads the input file\n x_test = np.load(path + '/files/tinyX_test.npy', 'r') # reads the input file\n x_train, y_train = shuffle(x_train, y_train)\n\n return x_train, y_train, x_test",
"def generate_samples(self, data_dir, tmp_dir, dataset_split):\n files = self.source_data_files(data_dir, tmp_dir, dataset_split)\n vocab = _extract_vocab_data(files)\n\n # Determine the number of instances to generate\n if dataset_split == problem.DatasetSplit.TRAIN:\n num_instances = self.num_train_instances\n else:\n num_instances = self.num_eval_instances\n\n for _ in range(num_instances):\n instance_size = random.randint(self.min_size, self.max_size)\n tokens = random.choices(vocab, k=instance_size)\n instance = ''.join(tokens)\n yield {'inputs': instance, 'targets': instance}",
"def build_datasets(self, data_dir: str = None, val_ratio: float = 0.2, num_train_examples: int = None,\n seed: int = 42, download: bool = True, **kwargs):\n if data_dir is None:\n data_dir = os.path.join(os.environ['DATA_DIR'], self.dataset_name)\n\n train_data = self.raw_dataset(data_dir, download=download, train=True, transform=self.train_transforms)\n val_data = self.raw_dataset(data_dir, download=download, train=True, transform=self.train_transforms)\n test_data = self.raw_dataset(data_dir, download=download, train=False, transform=self.test_transforms)\n\n # split train and validation\n train_indices, val_indices = get_split_indices(len(train_data), val_ratio, seed)\n if num_train_examples is not None:\n train_indices = np.random.choice(train_indices, num_train_examples, replace=False)\n train_data = Subset(train_data, train_indices)\n val_data = Subset(val_data, val_indices)\n\n # general way of returning extra information\n info = None\n\n # post-process datasets\n train_data, val_data, test_data, info = self.post_process_datasets(train_data, val_data, test_data, info=info)\n\n # name datasets and save statistics\n for dataset in [train_data, val_data, test_data]:\n dataset.dataset_name = self.dataset_name\n dataset.statistics = (self.means, self.stds)\n\n return train_data, val_data, test_data, info",
"def generateCrossValidationSets(dataSets, shuffleSeed=42):\n\n\tembeddedCrossvalidationSets = []\n\tfor dataSet in dataSets:\n\n\t\tallFiles = getAllFiles([dataSet])\n\t\tallAroused = list(filter(lambda x: isAroused(x), allFiles))\n\t\tallNonAroused = list(filter(lambda x: not isAroused(x), allFiles))\n\n\t\trandom.seed(shuffleSeed)\n\t\trandom.shuffle(allAroused)\n\t\trandom.shuffle(allNonAroused)\n\n\t\tfor outerIndex in range(0, 5):\n\t\t\tif len(embeddedCrossvalidationSets) <= outerIndex:\n\t\t\t\tembeddedCrossvalidationSets += [{\"outerValidate\": [], \"crossValidate\": []}]\n\n\t\t\touterSet = embeddedCrossvalidationSets[outerIndex]\n\n\t\t\touterAroused = allAroused[outerIndex::5]\n\t\t\touterNonAroused = allNonAroused[outerIndex::5]\n\n\t\t\touterAroused = outerAroused[:len(outerNonAroused)]\n\t\t\touterNonAroused = outerNonAroused[:len(outerAroused)]\n\n\t\t\touterValidateSet = outerAroused + outerNonAroused\n\t\t\trestAroused = list(filter(lambda x: x not in outerValidateSet, allAroused))\n\t\t\trestNonAroused = list(filter(lambda x: x not in outerValidateSet, allNonAroused))\n\n\t\t\tassert(len(list(filter(isAroused, outerValidateSet))) == len(outerValidateSet) / 2)\n\t\t\touterSet[\"outerValidate\"] += outerValidateSet\n\n\t\t\tfor innerIndex in range(0, 5):\n\t\t\t\tif len(outerSet[\"crossValidate\"]) <= innerIndex:\n\t\t\t\t\touterSet[\"crossValidate\"] += [{\"validate\": [], \"train\": []}]\n\n\t\t\t\tcrossValidationSet = outerSet[\"crossValidate\"][innerIndex]\n\n\t\t\t\tvalidatingAroused = restAroused[innerIndex::5]\n\t\t\t\tvalidatingNonAroused = restNonAroused[innerIndex::5]\n\n\t\t\t\tvalidatingAroused = validatingAroused[:len(validatingNonAroused)]\n\t\t\t\tvalidatingNonAroused = validatingNonAroused[:len(validatingAroused)]\n\n\t\t\t\tvalidatingSet = validatingAroused + validatingNonAroused\n\t\t\t\ttrainingSet = list(filter(lambda x: x not in validatingSet, restAroused)) + \\\n\t\t\t\t list(filter(lambda x: x not in validatingSet, restNonAroused))\n\n\t\t\t\tassert(len(list(filter(isAroused, validatingSet))) == len(validatingSet) / 2)\n\t\t\t\t#assert no validate files or testing files are train files\n\t\t\t\tassert(set(trainingSet) - set(validatingSet) == set(trainingSet))\n\t\t\t\tassert(set(trainingSet) - set(outerValidateSet) == set(trainingSet))\n\n\t\t\t\tcrossValidationSet[\"validate\"] += validatingSet\n\t\t\t\tcrossValidationSet[\"train\"] += trainingSet\n\n\treturn embeddedCrossvalidationSets",
"def make_datasets(class_names, dataset_dict, path_source, path_dest, seed):\n \n create_directory_structure(path_dest)\n\n path_alldata = [path_source.joinpath(f'label_{class_}')\n for class_ in class_names]\n\n path_imagefiles = [class_path.glob('*.bin')\n for class_path in path_alldata]\n\n size = sum([v for k, v in dataset_dict.items()])\n rng = default_rng(seed)\n\n datasets_by_class = np.array([rng.choice([image_file.name\n for image_file in image_filelist],\n size=size, replace=False)\n for image_filelist in path_imagefiles])\n\n dataset_labels = np.array([np.full(size, class_)\n for class_ in class_names])\n\n if not path_dest.exists():\n path_dest.mkdir(parents=True)\n\n start=0\n for set_name, n_examples in dataset_dict.items():\n stop = start + n_examples\n\n filename = f'{set_name}_set.csv'\n path_file = path_dest.joinpath(filename)\n \n images = datasets_by_class[:,start:stop].flatten()\n labels = dataset_labels[:,start:stop].flatten()\n rows = np.transpose(np.vstack((images, labels))).tolist()\n\n with path_file.open(mode='w', newline='') as f:\n csv_writer = writer(f)\n csv_writer.writerows(rows)\n\n start = n_examples",
"def createTrainTestSets():\n tweets = open(noDuplicatesFilename, 'r').read().splitlines()\n name_mapping = loadNameMapping()\n holdoutLocations = [u'Frederiksberg, Danmark', u'T\\xe5rnby, Danmark', u'Kolding, Danmark', u'T\\xe4by, Sverige', u'Kungsbacka, Sverige', u'Kristianstad, Sverige', u'Bod\\xf8, Norge', u'Kvinnherad, Norge', u'Ullensaker, Norge']\n testSetLocation = []\n rest = []\n for tweet in tweets:\n if stringToTweet(tweet).getFullName() in holdoutLocations:\n testSetLocation.append(tweet)\n else:\n rest.append(tweet)\n tweets = rest\n testIndex = int(round(len(tweets) * (1 - test_set_ratio)))\n random.seed(1)\n random.shuffle(tweets)\n trainSet = tweets[:testIndex]\n testSet = tweets[testIndex:]\n open(trainSetFilename, 'w').write('\\n'.join(trainSet))\n open(testSetNormalFilename, 'w').write('\\n'.join(testSet))\n open(testSetLocationFilename, 'w').write('\\n'.join(testSetLocation))\n print \"Wrote %d tweets to train set\" % len(trainSet)\n print \"Wrote %d tweets to normal test set\" % len(testSet)\n print \"Wrote %d tweets to location test set\" % len(testSetLocation)",
"def generateTrainAndValidateset(trainSets, validateSets, validatePercentage=20):\n\tvalidateFiles = []\n\ttrainFiles = []\n\n\tfor validateSet in validateSets:\n\t\tif \".\" in validateSet:\n\t\t\tvalidateSet, percentage = validateSet.split(\".\")\n\n\t\t\tif percentage == \"all\":\n\t\t\t\t#overwrite any further checks and security measures, just append all files:\n\t\t\t\tvalidateFiles += getAllFiles([validateSet])\n\t\t\t\tcontinue\n\n\t\t\tpercentage = int(percentage)\n\t\telse:\n\t\t\tpercentage = validatePercentage\n\n\t\tif validateSet not in _dataSets:\n\t\t\traise ValueError(\"Not a valid validate set: \" + validateSet)\n\n\t\tallFiles = sorted(filter(lambda x: x.endswith(\".txt\"), os.listdir(_dataSets[validateSet])))\n\t\tallFiles = list(map(lambda x: _dataSets[validateSet] + x, allFiles))\n\t\trandom.seed(42) #make sure all lists are randomized equally each time\n\t\trandom.shuffle(allFiles)\n\n\t\tallAroused = list(filter(lambda x: isAroused(x), allFiles))\n\t\tallNonAroused = list(filter(lambda x: not isAroused(x), allFiles))\n\n\t\tvalidateFiles += allAroused[len(allAroused) - int(percentage * len(allFiles) / 100 / 2):]\n\t\tvalidateFiles += allNonAroused[len(allNonAroused) - int(percentage * len(allFiles) / 100 / 2):]\n\n\n\tfor trainSet in trainSets:\n\t\tif \".\" in trainSet:\n\t\t\ttrainSet, percentage = trainSet.split(\".\", 1)\n\n\t\t\tif percentage == \"all\":\n\t\t\t\t#overwrite any further checks and security measures, just append all files:\n\t\t\t\ttrainFiles += getAllFiles([trainSet])\n\t\t\t\tcontinue\n\n\t\t\tpercentage = int(percentage)\n\t\telse:\n\t\t\tpercentage = 100 - validatePercentage\n\t\t\tvalidatePercentage = validatePercentage\n\n\t\tif trainSet not in _dataSets:\n\t\t\traise ValueError(\"Not a valid train set: \" + trainSet)\n\n\t\tallFiles = sorted(filter(lambda x: x.endswith(\".txt\"), os.listdir(_dataSets[trainSet])))\n\t\tallFiles = list(map(lambda x: _dataSets[trainSet] + x, allFiles))\n\t\trandom.seed(42) #make sure all lists are randomized equally each time\n\t\trandom.shuffle(allFiles)\n\n\t\tallAroused = list(filter(lambda x: isAroused(x), allFiles))\n\t\tallNonAroused = list(filter(lambda x: not isAroused(x), allFiles))\n\n\t\ttrainFiles += filter(lambda x: x not in validateFiles, allAroused[:int(percentage * len(allFiles) / 100 / 2)])\n\t\ttrainFiles += filter(lambda x: x not in validateFiles, allNonAroused[:int(percentage * len(allFiles) / 100 / 2)])\n\n\tif not any(map(lambda x: x.endswith(\".all\"), list(trainSets) + list(validateSets))):\n\t\t#assert no validatefiles are also trainfiles\n\t\tassert(set(trainFiles) - set(validateFiles) == set(trainFiles))\n\t\t#assert an equal amount of aroused and non-aroused validatefiles\n\t\tassert(len(list(filter(isAroused, validateFiles))) == len(validateFiles) / 2)\n\n\treturn trainFiles, validateFiles",
"def split_train_and_test_with_py_datasets(data_set, batch_size=cfg['batch_size'], test_size=0.2, num_works=4,\n pin_memory=True):\n num_dataset = len(data_set)\n indices = list(range(num_dataset))\n split = int(np.floor(test_size * num_dataset))\n\n train_idx, test_idx = indices[split:], indices[:split]\n train_sampler = SubsetRandomSampler(train_idx)\n test_sampler = SubsetRandomSampler(test_idx)\n\n train_loader = torch.utils.data.DataLoader(\n dataset=data_set, batch_size=batch_size, sampler=train_sampler, num_workers=num_works,\n pin_memory=pin_memory\n )\n\n test_loader = torch.utils.data.DataLoader(\n dataset=data_set, batch_size=batch_size, sampler=test_sampler, num_workers=num_works,\n pin_memory=pin_memory\n )\n\n return train_loader, test_loader",
"def cross_validation_datasets(self, fold):\n if fold > len(self): fold = len(self) / 2\n stratified = self.stratified_bunches(fold)\n datasets = []\n for index in range(len(stratified)):\n gold = GoldInstances(training_as_gold(stratified[index]))\n rest = flatten(stratified[:index]) + flatten(stratified[index + 1:])\n training = TrainingInstances(rest)\n datasets.append((training, gold))\n return datasets",
"def generate_dataset():\n num_list = 10\n return [generate_list() for _ in range(num_list)]",
"def generate_datasets(self) -> (tf.data.Dataset, tf.data.Dataset):\n self.obtain_meta_data_frame_for_available_lightcurves()\n positive_example_paths = self.meta_data_frame[self.meta_data_frame['disposition'] == 'PC']['lightcurve_path']\n print(f'{len(positive_example_paths)} positive examples.')\n negative_example_paths = self.meta_data_frame[self.meta_data_frame['disposition'] != 'PC']['lightcurve_path']\n print(f'{len(negative_example_paths)} negative examples.')\n positive_datasets = self.get_training_and_validation_datasets_for_file_paths(positive_example_paths)\n positive_training_dataset, positive_validation_dataset = positive_datasets\n negative_datasets = self.get_training_and_validation_datasets_for_file_paths(negative_example_paths)\n negative_training_dataset, negative_validation_dataset = negative_datasets\n training_dataset = self.get_ratio_enforced_dataset(positive_training_dataset, negative_training_dataset,\n positive_to_negative_data_ratio=1)\n validation_dataset = positive_validation_dataset.concatenate(negative_validation_dataset)\n if self.trial_directory is not None:\n self.log_dataset_file_names(training_dataset, dataset_name='training')\n self.log_dataset_file_names(validation_dataset, dataset_name='validation')\n training_dataset = training_dataset.shuffle(buffer_size=len(list(training_dataset)))\n training_preprocessor = lambda file_path: tuple(tf.py_function(self.training_preprocessing,\n [file_path], [tf.float32, tf.float32]))\n training_dataset = training_dataset.map(training_preprocessor, num_parallel_calls=16)\n training_dataset = training_dataset.padded_batch(self.batch_size, padded_shapes=([None, 2], [None])).prefetch(\n buffer_size=tf.data.experimental.AUTOTUNE)\n validation_preprocessor = lambda file_path: tuple(tf.py_function(self.evaluation_preprocessing,\n [file_path], [tf.float32, tf.float32]))\n validation_dataset = validation_dataset.map(validation_preprocessor, num_parallel_calls=4)\n validation_dataset = validation_dataset.padded_batch(1, padded_shapes=([None, 2], [None])).prefetch(\n buffer_size=tf.data.experimental.AUTOTUNE)\n return training_dataset, validation_dataset",
"def load_datasets():\n from .dataset import num_classes, image_size\n\n train_filename = maybe_download('notMNIST_large.tar.gz', 247336696)\n test_filename = maybe_download('notMNIST_small.tar.gz', 8458043)\n\n train_folders = maybe_extract(train_filename)\n test_folders = maybe_extract(test_filename)\n if not (len(train_folders) == len(test_folders) == num_classes):\n raise Exception('Expected %d folders, one per class. Found %d and %d instead.' % (\n num_classes, len(train_folders), len(test_folders)))\n print(\"Dataset folders: %s, %s\" % (train_folders, test_folders))\n\n # load datasets\n train_datasets = maybe_pickle(train_folders, 45000, image_size)\n test_datasets = maybe_pickle(test_folders, 1800, image_size)\n\n return train_datasets, test_datasets",
"def MakeDataSetFiles(dirname):\n\n\n if not os.path.exists(dirname):\n os.mkdir(dirname)\n if not os.path.exists(os.path.join(dirname, 'train')):\n os.mkdir(os.path.join(dirname, 'train'))\n if not os.path.exists(os.path.join(dirname, 'test')):\n os.mkdir(os.path.join(dirname, 'test'))\n data_train = fetch_20newsgroups(subset='train', categories=None, shuffle=True, random_state=42)\n data_test = fetch_20newsgroups(subset='test', categories=None, shuffle=True, random_state=42)\n\n if dirname[-1] == '/' or dirname[-1] == '\\\\':\n dirname = dirname[:-1]\n \n Util.WriteClassFile(data_train.target, os.path.join(dirname, 'train_classes.txt'))\n Util.WriteClassFile(data_test.target,os.path.join(dirname, 'test_classes.txt'))\n\n\n train_counter = 0;\n for doc in data_train.data:\n filename = 'train_' + str(train_counter).zfill(5);\n f = file(os.path.join(dirname, 'train', filename), 'w');\n f.write(doc.encode('ascii', 'ignore'));\n f.close();\n train_counter = train_counter + 1;\n\n test_counter = 0;\n for doc in data_test.data:\n filename = 'test_' + str(test_counter).zfill(5);\n f = file(os.path.join(dirname, 'test', filename), 'w');\n f.write(doc.encode('ascii', 'ignore'));\n f.close();\n test_counter = test_counter + 1;\n\n class_index = file(os.path.join(dirname, 'class_label_index.txt'), 'w')\n for label in data_train.target_names:\n class_index.write(label + '\\n')\n class_index.close()",
"def dataset_difficulty():\n results = []\n datasets = [ data_2007, data_2012, data_indoor, data_easy ] \n \n for data in datasets:\n \n #Let the user know where we are\n print data\n X,Y = load_csv(data)\n \n # Training/testing split + LDA fit\n X_train, X_test, Y_train, Y_test = cross_validation.train_test_split(X, Y)\n lda = LDA()\n lda.fit(X_train, Y_train)\n \n # Use linear SVC\n clf = svm.SVC(kernel=\"linear\")\n clf.fit(lda.transform(X_train), Y_train)\n \n # Predictions\n train_predict = clf.predict(lda.transform(X_train))\n test_predict = clf.predict(lda.transform(X_test))\n \n #Compute accuracy\n train_acc = 1.*sum(train_predict == Y_train)/len(train_predict)\n test_acc = 1.*sum(test_predict == Y_test)/len(test_predict)\n \n # Append results for that dataset\n results += [ [ data, train_acc, test_acc, clf, lda ] ]\n \n return results",
"def get_train_batches(data_dir='/home/yunhan/batchified'):\n # todo: read in data that is preoprocessed\n # Use batch 1 - 52 as train (60%), 53 - 71 as validation (20%), 72 - 89 as test (20%)\n n = 53\n idx = np.random.permutation(n)\n idx = idx + 1\n for i in range(n):\n X = np.load(\"%s/X%d.npy\" % (data_dir, idx[i]))/255.\n Y = np.load(\"%s/y%d.npy\" % (data_dir, idx[i])).reshape(-1)\n yield X, Y"
] | [
"0.6996355",
"0.66383696",
"0.6572915",
"0.6517153",
"0.64793134",
"0.64199245",
"0.64149666",
"0.63592505",
"0.63368434",
"0.63012886",
"0.6289509",
"0.62852",
"0.6267794",
"0.62432927",
"0.622932",
"0.62188154",
"0.62153995",
"0.62117213",
"0.6202799",
"0.6186463",
"0.6181616",
"0.61775786",
"0.616541",
"0.6165191",
"0.6150936",
"0.6144637",
"0.61238927",
"0.61025333",
"0.603672",
"0.60283566"
] | 0.68757796 | 1 |
Submit a metric as a rate, additional tags provided will be added to the ones from the label provided via the metrics object. | def _submit_rate(self, metric_name, val, metric, custom_tags=None, hostname=None):
_tags = self._metric_tags(metric_name, val, metric, custom_tags, hostname)
self.check.rate('{}.{}'.format(self.NAMESPACE, metric_name), val, _tags, hostname=hostname) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def submit_metric(self, metric_suffix, metric, scraper_config, gauge=True, monotonic_count=True):\n metric_name = scraper_config['namespace'] + metric_suffix\n for sample in metric.samples:\n # Explicit shallow copy of the instance tags\n _tags = list(scraper_config['custom_tags'])\n\n for label_name, label_value in iteritems(sample[self.SAMPLE_LABELS]):\n _tags.append('{}:{}'.format(label_name, label_value))\n if gauge:\n # submit raw metric\n self.gauge(metric_name, sample[self.SAMPLE_VALUE], _tags)\n if monotonic_count:\n # submit rate metric\n self.monotonic_count(metric_name + '.count', sample[self.SAMPLE_VALUE], _tags)",
"def _submit_gauge(self, metric_name, val, metric, custom_tags=None, hostname=None):\n _tags = self._metric_tags(metric_name, val, metric, custom_tags, hostname)\n self.check.gauge('{}.{}'.format(self.NAMESPACE, metric_name), val, _tags, hostname=hostname)",
"def rate(self, dataset, targets):\n raise NotImplementedError",
"def post_save_metrics(sender, **kwargs):\r\n action = 'created' if kwargs.pop('created', False) else 'updated'\r\n\r\n tags = _database_tags(action, sender, kwargs)\r\n dog_stats_api.increment('edxapp.db.model', tags=tags)",
"def rate(self, newrate):\n command = 'rate ' + str(newrate)\n self.run_command(command)",
"def test_tag_rates_on_duplicate_metric_per_cost_type(self):\n tag_values_kwargs = [{\"value\": 0.2}]\n cost_model = {\n \"name\": \"Test Cost Model\",\n \"description\": \"Test\",\n \"source_type\": Provider.PROVIDER_OCP,\n \"providers\": [{\"uuid\": self.provider.uuid, \"name\": self.provider.name}],\n \"markup\": {\"value\": 10, \"unit\": \"percent\"},\n \"rates\": [\n {\"metric\": {\"name\": metric_constants.OCP_METRIC_CPU_CORE_USAGE_HOUR}},\n {\"metric\": {\"name\": metric_constants.OCP_METRIC_CPU_CORE_USAGE_HOUR}},\n ],\n \"currency\": \"USD\",\n }\n cost_model[\"rates\"][0][\"tag_rates\"] = format_tag_rate(tag_key=\"k1\", tag_values=tag_values_kwargs)\n cost_model[\"rates\"][1][\"tag_rates\"] = format_tag_rate(tag_key=\"k2\", tag_values=tag_values_kwargs)\n with tenant_context(self.tenant):\n serializer = CostModelSerializer(data=cost_model, context=self.request_context)\n self.assertTrue(serializer.is_valid(raise_exception=True))\n serializer.save()\n serializer.data",
"def update_metrics(self, metrics, predictions, labels):\n return",
"def tag_metric(request, tag_id, metric_id, error='', message=''):\n try:\n tag = Tag.objects.get(id=tag_id)\n except:\n error += 'Couldn\\'t retrieve tag ' + tag_id + '.'\n try:\n metric = Metric.objects.get(id=metric_id)\n except:\n error += 'Couldn\\'t retrieve metric ' + metric_id + '.'\n\n if tag in metric.tags.all():\n error += 'This metric has already been tagged.'\n\n if not error:\n try:\n metric.tags.add(tag)\n message += 'Tagged metric ' + str(metric.id) + ' with ' + tag.name + '.'\n except:\n error += 'Couldn\\'t tag metric.'\n return index(request=request, error=error, message=message, metric_id=metric_id, tag_id=tag_id)",
"def add_metric(self, metric):\n self.metrics.append(metric)\n self.estimate()",
"def add_metrics(self, metrics):\n for i, metric in enumerate(self.config.metrics):\n tf.summary.scalar(metric, metrics[i])",
"def post(self):\r\n json_data = request.get_json(force=True)\r\n if not json_data:\r\n abort(400, message='No input data provided')\r\n # make sure the metric_id (temporary) and metric_type (model) are filled\r\n json_data[\"metric_id\"] = \"TBD\"\r\n json_data[\"metric_type\"] = \"model\"\r\n\r\n # validate and deserialize input\r\n new_metric = self.load(json_data, session=db.session)\r\n\r\n # get the next metric id and update metric object\r\n try:\r\n db.session.add(new_metric)\r\n db.session.commit()\r\n except SQLAlchemyError as e:\r\n abort(400, message=f'Database error. Reason: {e}')\r\n\r\n # dump to json and return result\r\n result = self.schema.dump(new_metric)\r\n return success(result, code=201)",
"def record_gauge(self, name, value, tags=None):\n identity = self.create_identity(name, tags)\n with self._lock:\n self._batch[identity] = value\n self._timestamps[identity] = int(time.time() * 1000.0)",
"def inc_count(self, metric, value, tags):\n self.increment(metric, value, tags=tags)\n self.increment('%s.count' % metric, tags=tags)",
"def add_metric(self, metric, *, name=None, **kwargs):\n if name is None:\n name = metric.__name__\n\n self.metrics.append((metric, name, kwargs))",
"def add_metric(self, metric, *, name=None, **kwargs):\n if name is None:\n name = metric.__name__\n\n self.metrics.append((metric, name, kwargs))",
"def incr(\n self,\n stat: str,\n count: int = 1,\n rate: float = 1,\n tags: Attributes = None,\n ):\n if _skip_due_to_rate(rate):\n return\n if count < 0:\n raise ValueError(\"count must be a positive value.\")\n\n if self.metrics_validator.test(stat) and name_is_otel_safe(self.prefix, stat):\n counter = self.metrics_map.get_counter(full_name(prefix=self.prefix, name=stat), attributes=tags)\n counter.add(count, attributes=tags)\n return counter",
"def metrics(self, metrics):\n\n self._metrics = metrics",
"def submit_metric():\n\n gson = json.loads(request.get_json())\n\n new_point = DataPoint(\n computer_name=gson[\"computer_name\"],\n cpu_percentage=gson[\"cpu_percentage\"],\n memory_percentage=gson[\"memory_percentage\"],\n timestamp=gson[\"timestamp\"]\n )\n\n with lock:\n if not instances.get(new_point.computer_name):\n instances[new_point.computer_name] = Timeline(\n maxsize=int(os.environ.get(\"COLLECTOR_BUFFER_SIZE\"))\n )\n instances[new_point.computer_name].append(new_point)\n\n return Response(status=200)",
"def add_metric(self, metric_name: str, metric_val: typing.Any):\n self.add_metrics({metric_name: metric_val})",
"def gauge(self, gauge, value):\n try:\n self._thread_pool_executor.submit(self._delegate.gauge, gauge, value)\n except:\n self._logger.exception('Exception caught submitting gauge metric')",
"def test_add_tag_to_derived_metric(self):\n pass",
"def add_metric(self, metric_class, namespace, name, value=1.0, tags=None, interval=None):\n # type: (Type[Metric], str, str, float, MetricTagType, Optional[float]) -> None\n metric_id = Metric.get_id(name, namespace, tags, metric_class.metric_type)\n if metric_class is DistributionMetric:\n metrics_type_payload = TELEMETRY_TYPE_DISTRIBUTION\n else:\n metrics_type_payload = TELEMETRY_TYPE_GENERATE_METRICS\n\n with self._lock:\n existing_metric = self._metrics_data[metrics_type_payload][namespace].get(metric_id)\n if existing_metric:\n existing_metric.add_point(value)\n else:\n new_metric = metric_class(namespace, name, tags=tags, common=True, interval=interval)\n new_metric.add_point(value)\n self._metrics_data[metrics_type_payload][namespace][metric_id] = new_metric",
"def _process_rating(self, metadata: MetadataTransformModel | None):\n self.add_rating(self._transform_value(metadata))",
"def rate(self, rate):\n\n self._rate = rate",
"def rate(self, rate):\n\n self._rate = rate",
"def rate(self, rating, series, is_gs=False, counts=False):\n k = self.calculate_k(rating, counts)*1.1 if is_gs else self.calculate_k(rating, counts)\n rating.value = float(rating.value) + k * self.adjust(rating, series)\n rating.times += 1\n return rating",
"def rate(self, rating, series, is_gs=False, counts=False):\n k = self.calculate_k(rating,counts)*1.1 if is_gs else self.calculate_k(rating,counts)\n rating.value = float(rating.value) + k * self.adjust(rating, series)\n rating.times += 1\n return rating",
"def add_metric(self, metric: str):\n if metric not in self.metrics:\n self.metrics[metric] = self.creator.create_metric(metric)",
"def _record(self, metric_point: MetricPoint,\n measurement_map: MeasurementMap):\n metric_name = metric_point.metric_name\n tags = metric_point.tags\n\n metric = self._registry.get(metric_name)\n # Metrics should be always registered dynamically.\n assert metric\n\n tag_map = tag_map_module.TagMap()\n for key, value in tags.items():\n tag_key = tag_key_module.TagKey(key)\n tag_value = tag_value_module.TagValue(value)\n tag_map.insert(tag_key, tag_value)\n\n metric_value = metric_point.value\n measurement_map.measure_float_put(metric.measure, metric_value)\n # NOTE: When we record this metric, timestamp will be renewed.\n measurement_map.record(tag_map)",
"def endpoint_metrics_set(self, endpoint_name=None, metrics=None):\n if metrics is None:\n raise Exception(\"Metrics required!\")\n if endpoint_name is None:\n self.request('/v1.1/endpoint/metrics', 'POST', body=metrics)\n else:\n self.request('/v1.1/endpoints/%s/metrics' % endpoint_name, 'POST', body=metrics)"
] | [
"0.65285105",
"0.58314323",
"0.57720864",
"0.55562365",
"0.5346391",
"0.5288677",
"0.5270663",
"0.5265564",
"0.51896477",
"0.51511014",
"0.5126361",
"0.51259017",
"0.51075906",
"0.5091107",
"0.5091107",
"0.5021949",
"0.5016198",
"0.4961651",
"0.49544987",
"0.49520537",
"0.49461487",
"0.49023053",
"0.48914933",
"0.4881529",
"0.4881529",
"0.4844227",
"0.48373455",
"0.48332232",
"0.48079944",
"0.48069996"
] | 0.7829274 | 0 |
Submit a metric as a gauge, additional tags provided will be added to the ones from the label provided via the metrics object. | def _submit_gauge(self, metric_name, val, metric, custom_tags=None, hostname=None):
_tags = self._metric_tags(metric_name, val, metric, custom_tags, hostname)
self.check.gauge('{}.{}'.format(self.NAMESPACE, metric_name), val, _tags, hostname=hostname) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def gauge(self, gauge, value):\n try:\n self._thread_pool_executor.submit(self._delegate.gauge, gauge, value)\n except:\n self._logger.exception('Exception caught submitting gauge metric')",
"def add_gauge(self, data, metric_id=None):\n self._post_data(prefix_id='gauges', data=data, metric_id=metric_id)",
"def gauge(self, gauge, value):\n pass",
"def gauge(self, gauge, value):\n if self.ignore_metrics:\n return\n\n with self._gauge_rlock:\n self._gauge_metrics[gauge] = value\n self._gauge_call_count += 1\n\n old_call_time = self._gauge_last_call_time\n self._gauge_last_call_time = arrow.utcnow().timestamp\n if (self._gauge_call_count == self._max_call_count > 0) or \\\n self._gauge_last_call_time - old_call_time > self._max_time_between_calls > 0:\n self._gauge_call_count = 0\n self.update_gauge()",
"def submit_metric(self, metric_suffix, metric, scraper_config, gauge=True, monotonic_count=True):\n metric_name = scraper_config['namespace'] + metric_suffix\n for sample in metric.samples:\n # Explicit shallow copy of the instance tags\n _tags = list(scraper_config['custom_tags'])\n\n for label_name, label_value in iteritems(sample[self.SAMPLE_LABELS]):\n _tags.append('{}:{}'.format(label_name, label_value))\n if gauge:\n # submit raw metric\n self.gauge(metric_name, sample[self.SAMPLE_VALUE], _tags)\n if monotonic_count:\n # submit rate metric\n self.monotonic_count(metric_name + '.count', sample[self.SAMPLE_VALUE], _tags)",
"def put_gauge(self, *_, **__): # pylint: disable=arguments-differ\n pass",
"def gauge_v5(self, metric, value, tags=None, hostname=None, device_name=None, timestamp=None):\n # Make sure we get the original arguments back\n assert metric == METRIC_NAME\n assert value == METRIC_VALUE\n assert tags == METRIC_TAGS\n assert hostname is None\n assert device_name is None\n assert timestamp == METRIC_TIMESTAMP",
"def metrics_gauge(self, gauge_data):\n url = _METRICS_URL_TEMPLATE.format(base_url=self._events_api_url_base, endpoint='gauge')\n return self._post(url, gauge_data)",
"def register(self, gauge):\r\n raise NotImplementedError",
"def record_gauge(self, name, value, tags=None):\n identity = self.create_identity(name, tags)\n with self._lock:\n self._batch[identity] = value\n self._timestamps[identity] = int(time.time() * 1000.0)",
"def gauge(name, value):\n metric = _get_metric(name) or metrics.new_gauge(name)\n metric.notify(value)",
"def tag_metric(request, tag_id, metric_id, error='', message=''):\n try:\n tag = Tag.objects.get(id=tag_id)\n except:\n error += 'Couldn\\'t retrieve tag ' + tag_id + '.'\n try:\n metric = Metric.objects.get(id=metric_id)\n except:\n error += 'Couldn\\'t retrieve metric ' + metric_id + '.'\n\n if tag in metric.tags.all():\n error += 'This metric has already been tagged.'\n\n if not error:\n try:\n metric.tags.add(tag)\n message += 'Tagged metric ' + str(metric.id) + ' with ' + tag.name + '.'\n except:\n error += 'Couldn\\'t tag metric.'\n return index(request=request, error=error, message=message, metric_id=metric_id, tag_id=tag_id)",
"def update_gauge(self):\n gauge_metrics = self._fetch_gauge_metrics_and_clear()\n self._logger.info('update_gauge. gauge_metrics = %s',\n build_metrics_gauge_data(gauge_metrics))",
"async def send_add_metric(self, title: str, metric_type: str) -> None:\n msg, sending_dialogue = self.dialogues.create(\n counterparty=self.prometheus_address,\n performative=PrometheusMessage.Performative.ADD_METRIC,\n title=title,\n type=metric_type,\n description=\"a gauge\",\n labels={},\n )\n assert sending_dialogue is not None\n\n envelope = Envelope(\n to=msg.to,\n sender=msg.sender,\n message=msg,\n )\n await self.prometheus_con.send(envelope)",
"def _submit_rate(self, metric_name, val, metric, custom_tags=None, hostname=None):\n _tags = self._metric_tags(metric_name, val, metric, custom_tags, hostname)\n self.check.rate('{}.{}'.format(self.NAMESPACE, metric_name), val, _tags, hostname=hostname)",
"def gauge(self, slug, current_value):\n k = self._gauge_key(slug)\n self.r.sadd(self._gauge_slugs_key, slug) # keep track of all Gauges\n self.r.set(k, current_value)",
"def batch_gauge(self, metric_dict, prefix='stalker.'):\n if not self.enabled:\n return\n payload = []\n for k in metric_dict:\n payload.append('%s%s:%d|g' % (prefix, k, metric_dict[k]))\n self._send_events(payload)",
"def update_gauge(self):\n try:\n self._thread_pool_executor.submit(self._update_gauge_fn)\n except:\n self._logger.exception('Exception caught submitting gauge metrics update task.')",
"def gauge_v6(self, name, value, tags=None, hostname=None, device_name=None):\n # Make sure we get the original arguments back and timestamp is not being received\n assert name == METRIC_NAME\n assert value == METRIC_VALUE\n assert tags == METRIC_TAGS\n assert hostname is None\n assert device_name is None",
"def update_gauge(self):\n pass # Do nothing",
"def add_favorite_gauge(params, match):\n gauge_no = match.group(1)\n gauge_name = match.group(2)\n table = get_gauges_table()\n table.put_item(Item={\n 'USGSSiteNumber': gauge_no,\n 'GuageName': gauge_name\n })\n return lambda_response(None, \"added gauge %s %s\" % (gauge_no, gauge_name))",
"def _create_gauge(self, name: str, attributes: Attributes = None):\n otel_safe_name = _get_otel_safe_name(name)\n key = _generate_key_name(name, attributes)\n\n gauge = self.meter.create_observable_gauge(\n name=otel_safe_name,\n callbacks=[partial(self.read_gauge, _generate_key_name(name, attributes))],\n )\n self.map[key] = Observation(DEFAULT_GAUGE_VALUE, attributes)\n\n return gauge",
"def gauge(\n self,\n stat: str,\n value: int | float,\n rate: float = 1,\n delta: bool = False,\n *,\n tags: Attributes = None,\n back_compat_name: str = \"\",\n ) -> None:\n if _skip_due_to_rate(rate):\n return\n\n if back_compat_name and self.metrics_validator.test(back_compat_name):\n self.metrics_map.set_gauge_value(\n full_name(prefix=self.prefix, name=back_compat_name), value, delta, tags\n )\n\n if self.metrics_validator.test(stat):\n self.metrics_map.set_gauge_value(full_name(prefix=self.prefix, name=stat), value, delta, tags)",
"def add_metric(self, metric: str):\n if metric not in self.metrics:\n self.metrics[metric] = self.creator.create_metric(metric)",
"def set_gauge_value(self, name: str, value: float | None, delta: bool, tags: Attributes):\n key: str = _generate_key_name(name, tags)\n new_value = value or DEFAULT_GAUGE_VALUE\n old_value = self.poke_gauge(name, tags)\n if delta:\n new_value += old_value\n # If delta is true, add the new value to the last reading otherwise overwrite it.\n self.map[key] = Observation(new_value, tags)",
"def add(self, value, source=None, **params):\n\t\treturn self.connection.send_gauge_value(self.name, value, source, **params)",
"def add_metric(self, metric_class, namespace, name, value=1.0, tags=None, interval=None):\n # type: (Type[Metric], str, str, float, MetricTagType, Optional[float]) -> None\n metric_id = Metric.get_id(name, namespace, tags, metric_class.metric_type)\n if metric_class is DistributionMetric:\n metrics_type_payload = TELEMETRY_TYPE_DISTRIBUTION\n else:\n metrics_type_payload = TELEMETRY_TYPE_GENERATE_METRICS\n\n with self._lock:\n existing_metric = self._metrics_data[metrics_type_payload][namespace].get(metric_id)\n if existing_metric:\n existing_metric.add_point(value)\n else:\n new_metric = metric_class(namespace, name, tags=tags, common=True, interval=interval)\n new_metric.add_point(value)\n self._metrics_data[metrics_type_payload][namespace][metric_id] = new_metric",
"def add_metric(self, metric, *, name=None, **kwargs):\n if name is None:\n name = metric.__name__\n\n self.metrics.append((metric, name, kwargs))",
"def add_metric(self, metric, *, name=None, **kwargs):\n if name is None:\n name = metric.__name__\n\n self.metrics.append((metric, name, kwargs))",
"def _register_if_needed(self, metric_point: MetricPoint):\n metric_name = metric_point.metric_name\n metric_description = metric_point.description\n metric_units = metric_point.units\n if self._registry[metric_name] is None:\n tags = metric_point.tags\n metric_tags = []\n for tag_key in tags:\n metric_tags.append(tag_key_module.TagKey(tag_key))\n\n metric = Gauge(metric_name, metric_description, metric_units,\n metric_tags)\n self._registry[metric_name] = metric\n self.view_manager.register_view(metric.view)\n\n # If there are missing description & unit information,\n # we should notify cpp processes that we need them.\n if not metric_description or not metric_units:\n self._missing_information = True\n\n if metric_description and metric_units:\n self._registry[metric_name].view._description = metric_description\n self._registry[\n metric_name].view.measure._description = metric_description\n self._registry[metric_name].view.measure._unit = metric_units\n self._missing_information = False"
] | [
"0.7285011",
"0.6867734",
"0.68111044",
"0.6699969",
"0.6642851",
"0.6484831",
"0.64751744",
"0.6383549",
"0.6362057",
"0.6242089",
"0.61857253",
"0.60445803",
"0.5981221",
"0.5947248",
"0.58967316",
"0.5812371",
"0.57682973",
"0.5750602",
"0.5690605",
"0.5674989",
"0.5456014",
"0.5424948",
"0.54027647",
"0.5344845",
"0.5336723",
"0.53249854",
"0.5322995",
"0.53185004",
"0.53185004",
"0.52390313"
] | 0.7893012 | 0 |
Submit a metric as a monotonic count, additional tags provided will be added to the ones from the label provided via the metrics object. | def _submit_monotonic_count(self, metric_name, val, metric, custom_tags=None, hostname=None):
_tags = self._metric_tags(metric_name, val, metric, custom_tags, hostname)
self.check.monotonic_count('{}.{}'.format(self.NAMESPACE, metric_name), val, _tags, hostname=hostname) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def inc_count(self, metric, value, tags):\n self.increment(metric, value, tags=tags)\n self.increment('%s.count' % metric, tags=tags)",
"def post_save_metrics(sender, **kwargs):\r\n action = 'created' if kwargs.pop('created', False) else 'updated'\r\n\r\n tags = _database_tags(action, sender, kwargs)\r\n dog_stats_api.increment('edxapp.db.model', tags=tags)",
"def count(self, counter, delta):\n try:\n self._thread_pool_executor.submit(self._delegate.count, counter, delta)\n except:\n self._logger.exception('Exception caught submitting count metric')",
"def inc(self, labels: dict[str, str]):\n\n val = self.get(labels)\n\n if val is None:\n val = 0\n\n val += 1\n\n self.set(labels, val)",
"def submit_metric(self, metric_suffix, metric, scraper_config, gauge=True, monotonic_count=True):\n metric_name = scraper_config['namespace'] + metric_suffix\n for sample in metric.samples:\n # Explicit shallow copy of the instance tags\n _tags = list(scraper_config['custom_tags'])\n\n for label_name, label_value in iteritems(sample[self.SAMPLE_LABELS]):\n _tags.append('{}:{}'.format(label_name, label_value))\n if gauge:\n # submit raw metric\n self.gauge(metric_name, sample[self.SAMPLE_VALUE], _tags)\n if monotonic_count:\n # submit rate metric\n self.monotonic_count(metric_name + '.count', sample[self.SAMPLE_VALUE], _tags)",
"def _increment_counter(metric: str):\n if metric not in db:\n db[metric] = 0\n db[metric] += 1",
"def record_count(self, name, value, tags=None):\n identity = self.create_identity(name, tags, \"count\")\n with self._lock:\n self._batch[identity] = self._batch.get(identity, 0) + value",
"def sendUpStatCountTagCounts(node, tag):\n def pushUp(node):\n t = 0\n ta = 0\n for child in node.children:\n tc, tac = pushUp(child)\n ta += tac\n t += tc\n node.tagTranscriptAnnotations += ta\n node.tagTranscripts += t\n return node.tagTranscripts, node.tagTranscriptAnnotations\n if ':' in tag:\n tag = tag.split(':')[-1]\n pushUp(node)",
"def post_init_metrics(sender, **kwargs):\r\n tags = _database_tags('initialized', sender, kwargs)\r\n\r\n dog_stats_api.increment('edxapp.db.model', tags=tags)",
"def hit(self, label=None):\n self.labels[label] += 1",
"def count(self, key):\n self._metrics[key] += 1",
"def add_word_tag(self, token, label):\n # Add total count for label\n self.label_counts[label] += 1\n # Add count for word given label\n if token not in self.words_labels_counts[label]:\n self.words_labels_counts[label][token] = 1\n else:\n self.words_labels_counts[label][token] += 1",
"def add_counter(self, data, metric_id=None):\n self._post_data(prefix_id='counters', data=data, metric_id=metric_id)",
"def update_count(self):\n count_metrics = self._fetch_count_metrics_and_clear()\n self._logger.info('update_count. count_metrics = %s',\n build_metrics_counter_data(count_metrics))",
"def count(self, counter, delta):\n if self.ignore_metrics:\n return\n\n with self._count_rlock:\n self._count_metrics[counter] += delta\n self._count_call_count += 1\n\n old_call_time = self._count_last_call_time\n self._count_last_call_time = arrow.utcnow().timestamp\n if (self._count_call_count == self._max_call_count > 0) or \\\n self._count_last_call_time - old_call_time > self._max_time_between_calls > 0:\n self._count_call_count = 0\n self.update_count()",
"def _AddMetric(self, metric):\n machine = metric.machine_id\n time = metric.timestamp\n payload = DotDict(json.loads(metric.payload)).flatten()\n\n self.machines.add(machine)\n self.timestamps.add(time)\n for k in payload:\n if k not in self.counter_data:\n continue\n val = payload.get(k, None)\n if val is not None:\n self.counter_data[k].AddSample(machine, time, val)",
"def emit_counter(self, category, name, pid, timestamp, counter, value):\n event = self._create_event('C', category, name, pid, 0, timestamp)\n event['args'] = {counter: value}\n self._events.append(event)",
"def _submit_gauge(self, metric_name, val, metric, custom_tags=None, hostname=None):\n _tags = self._metric_tags(metric_name, val, metric, custom_tags, hostname)\n self.check.gauge('{}.{}'.format(self.NAMESPACE, metric_name), val, _tags, hostname=hostname)",
"def counter(self, metric_name, value=1):\n if self._send_sampled_event():\n counter = \"%s%s:%d|c|@%s\" % (self.metric_name_prepend, metric_name,\n value, self.statsd_sample_rate)\n self._send_events([counter])",
"def m2m_changed_metrics(sender, **kwargs):\r\n if 'action' not in kwargs:\r\n return\r\n\r\n action = {\r\n 'post_add': 'm2m.added',\r\n 'post_remove': 'm2m.removed',\r\n 'post_clear': 'm2m.cleared',\r\n }.get(kwargs['action'])\r\n\r\n if not action:\r\n return\r\n\r\n tags = _database_tags(action, sender, kwargs)\r\n\r\n if 'model' in kwargs:\r\n tags.append('target_class:{}'.format(kwargs['model'].__name__))\r\n\r\n pk_set = kwargs.get('pk_set', []) or []\r\n\r\n dog_stats_api.increment(\r\n 'edxapp.db.model',\r\n value=len(pk_set),\r\n tags=tags\r\n )",
"def do_counter(parser, token):\n try:\n tag_name, reset = token.contents.split(None, 1)\n except ValueError:\n reset = False\n else:\n if reset == 'reset':\n reset = True\n return CounterNode(reset)",
"def incr(\n self,\n stat: str,\n count: int = 1,\n rate: float = 1,\n tags: Attributes = None,\n ):\n if _skip_due_to_rate(rate):\n return\n if count < 0:\n raise ValueError(\"count must be a positive value.\")\n\n if self.metrics_validator.test(stat) and name_is_otel_safe(self.prefix, stat):\n counter = self.metrics_map.get_counter(full_name(prefix=self.prefix, name=stat), attributes=tags)\n counter.add(count, attributes=tags)\n return counter",
"def post_delete_metrics(sender, **kwargs):\r\n tags = _database_tags('deleted', sender, kwargs)\r\n\r\n dog_stats_api.increment('edxapp.db.model', tags=tags)",
"def tag_metric(request, tag_id, metric_id, error='', message=''):\n try:\n tag = Tag.objects.get(id=tag_id)\n except:\n error += 'Couldn\\'t retrieve tag ' + tag_id + '.'\n try:\n metric = Metric.objects.get(id=metric_id)\n except:\n error += 'Couldn\\'t retrieve metric ' + metric_id + '.'\n\n if tag in metric.tags.all():\n error += 'This metric has already been tagged.'\n\n if not error:\n try:\n metric.tags.add(tag)\n message += 'Tagged metric ' + str(metric.id) + ' with ' + tag.name + '.'\n except:\n error += 'Couldn\\'t tag metric.'\n return index(request=request, error=error, message=message, metric_id=metric_id, tag_id=tag_id)",
"def metric(self, slug, num=1, category=None, expire=None, date=None):\n # Add the slug to the set of metric slugs\n self.r.sadd(self._metric_slugs_key, slug)\n\n if category:\n self._categorize(slug, category)\n\n # Increment keys. NOTE: current redis-py (2.7.2) doesn't include an\n # incrby method; .incr accepts a second ``amount`` parameter.\n keys = self._build_keys(slug, date=date)\n\n # Use a pipeline to speed up incrementing multiple keys\n pipe = self.r.pipeline()\n for key in keys:\n pipe.incr(key, num)\n if expire:\n pipe.expire(key, expire)\n pipe.execute()",
"def increment_metric_counter(metric_name, redis_db):\n if TEST_MODE:\n print 'Simulate redis incremet, key is %s' % metric_name\n return\n if redis_db:\n try:\n redis_db.incr(metric_name)\n except Exception as e:\n logger.warning(\"Failed to increment redis metric '%s' \"\n \"with exception '%s'\", metric_name, e)",
"def mymetrics(): \n _update_metric_counters()\n logging.debug(prom_objects_seen.collect())\n return flask.Response(generate_latest(), mimetype='text/plain')",
"async def test_counter(client, counter_entities) -> None:\n body = await generate_latest_metrics(client)\n\n assert (\n 'counter_value{domain=\"counter\",'\n 'entity=\"counter.counter\",'\n 'friendly_name=\"None\"} 2.0' in body\n )",
"def test_counter(self):\n # Create a metrics with no metric instances\n mf = pmp.utils.create_metric_family(\n self.counter_metric_name,\n self.counter_metric_help,\n self.counter_metric_type,\n [],\n )\n self.assertIsInstance(mf, pmp.MetricFamily)\n self.assertEqual(len(mf.metric), 0)\n\n # Create it with metrics\n mf = pmp.utils.create_metric_family(\n self.counter_metric_name,\n self.counter_metric_help,\n self.counter_metric_type,\n self.counter_metric_data,\n )\n self.assertIsInstance(mf, pmp.MetricFamily)\n self.assertEqual(mf.name, self.counter_metric_name)\n self.assertEqual(mf.help, self.counter_metric_help)\n self.assertEqual(mf.type, self.counter_metric_type)\n\n # Create another and check equal\n mf_ = pmp.utils.create_metric_family(\n self.counter_metric_name,\n self.counter_metric_help,\n self.counter_metric_type,\n self.counter_metric_data,\n )\n self.assertIsInstance(mf_, pmp.MetricFamily)\n\n self.assertEqual(mf, mf_)\n\n for m in mf_.metric:\n self.assertEqual(m.timestamp_ms, 0)\n\n # Create another with timestamp\n mf_ = pmp.utils.create_metric_family(\n self.counter_metric_name,\n self.counter_metric_help,\n self.counter_metric_type,\n self.counter_metric_data,\n timestamp=True,\n )\n self.assertIsInstance(mf_, pmp.MetricFamily)\n\n for m in mf_.metric:\n self.assertNotEqual(m.timestamp_ms, 0)\n\n self.assertNotEqual(mf, mf_)\n\n # Create Counter with const_labels\n mf_ = pmp.utils.create_metric_family(\n self.counter_metric_name,\n self.counter_metric_help,\n self.counter_metric_type,\n self.counter_metric_data,\n const_labels=self.const_labels,\n )\n self.assertIsInstance(mf_, pmp.MetricFamily)\n\n # Check that const_label is present in the LabelPair associated\n # with each metric instance.\n for m in mf_.metric:\n labels = [lp.name for lp in m.label]\n self.assertIn(\"app\", labels)\n\n self.assertNotEqual(mf, mf_)\n\n # Check Counter can be round-tripped through encode and decode\n payload = pmp.encode(mf)\n self.assertIsInstance(payload, bytes)\n _mf = pmp.decode(payload)[0]\n self.assertEqual(mf, _mf)",
"def process_counter_event(\n self,\n name: str,\n categories: Union[List[str], Tuple[str, ...]],\n timestamp: Timestamp,\n wall_clock_time_ns: int,\n values: Dict[str, Union[int, float]],\n ) -> None:\n del name, categories, timestamp, wall_clock_time_ns, values # unused"
] | [
"0.7103015",
"0.62737775",
"0.60384905",
"0.60204303",
"0.59881437",
"0.58371973",
"0.5602605",
"0.55809146",
"0.5569244",
"0.5481773",
"0.5438321",
"0.5368393",
"0.53387356",
"0.53331786",
"0.5259395",
"0.5257974",
"0.52503383",
"0.52373564",
"0.52249354",
"0.5209548",
"0.5200397",
"0.51959217",
"0.51663995",
"0.5146076",
"0.51287234",
"0.50925756",
"0.5084451",
"0.50807995",
"0.5048736",
"0.5044877"
] | 0.75249547 | 0 |
Visit assignment node whose targets are all simple. | def visit_simple_assign(self, node):
temp = gensym()
temp_target = to_name(temp, ast.Store())
stmts = [ ast.Assign([temp_target], node.value) ]
stmts += [ ast.Assign([target], to_name(temp))
for target in node.targets ]
return stmts | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def visit_Assign(self, node):\n self.generic_visit(node)\n is_multiple = len(node.targets) > 1\n is_compound = any(map(is_sequence_node, node.targets))\n is_simple = not is_compound\n if is_simple and is_multiple:\n return self.visit_simple_assign(node)\n elif is_compound and (is_multiple or is_sequence_node(node.value)):\n return self.visit_compound_assign(node)\n return node",
"def single_assignment_handler(target, value, assign_stmts, node, id_str):\n #print(\"single_assignment_handler\")\n\n temp_stmts = core_language.create_Assign(target, value)\n if hasattr(node, 'lineno'):\n temp_stmts.lineno = node.lineno\n temp_stmts.col_offset = node.col_offset\n\n assign_stmts.append(temp_stmts)\n return False",
"def visit_Assign(self, node):\n assign_stmts = []\n value = node.value\n reversed_targets = node.targets\n reversed_targets.reverse()\n assign_stmts.append(stypy_functions.create_blank_line())\n if len(reversed_targets) > 1:\n assign_stmts.append(\n stypy_functions.create_src_comment(\n \"Multiple assignment of {0} elements.\".format(len(reversed_targets))))\n else:\n if hasattr(node, 'lineno'):\n assign_stmts.append(stypy_functions.create_src_comment(\n \"Assigning a {1} to a {0} (line {2}):\".format(type(reversed_targets[0]).__name__,\n type(value).__name__, node.lineno)))\n else:\n assign_stmts.append(stypy_functions.create_src_comment(\n \"Assigning a {1} to a {0}:\".format(type(reversed_targets[0]).__name__,\n type(value).__name__)))\n for assign_num in xrange(len(reversed_targets)):\n target = reversed_targets[assign_num]\n # Function guard is true? execute handler\n for handler_func_guard_tuple in self.__assignment_handlers:\n if handler_func_guard_tuple[0](target, value):\n id_str, handler_func = handler_func_guard_tuple[1]\n self.performed_transformations |= handler_func(target, value, assign_stmts, node, id_str)\n assign_stmts = stypy_functions.flatten_lists(assign_stmts)\n value = target\n break\n\n if len(assign_stmts) > 0:\n return assign_stmts\n return node",
"def visit_assign(self: Parser, node: doc.Assign) -> None:\n if len(node.targets) != 1:\n self.report_error(node, \"Consequential assignments like 'a = b = c' are not supported.\")\n lhs = node.targets[0]\n\n if isinstance(node.value, doc.Subscript):\n check_slices = []\n if isinstance(node.value.slice, doc.Slice):\n check_slices = [node.value.slice]\n elif isinstance(node.value.slice, doc.Tuple):\n for p in node.value.slice.elts:\n if isinstance(p, doc.Slice):\n check_slices.append(p)\n for s in check_slices:\n if not s.step and s.upper and s.lower:\n s.step = doc.Constant(\n 1,\n None,\n 1,\n 1,\n s.upper.lineno,\n s.upper.end_col_offset + 1,\n s.upper.lineno,\n s.upper.end_col_offset + 2,\n )\n\n rhs = self.eval_expr(node.value)\n if isinstance(lhs, doc.Subscript):\n if isinstance(lhs.slice, doc.Tuple):\n indices = []\n for index in lhs.slice.elts:\n indices.append(self.eval_expr(index))\n else:\n indices = self.eval_expr(lhs.slice)\n T.buffer_store(self.eval_expr(lhs.value), rhs, indices)\n else:\n self.eval_assign(target=lhs, source=rhs, bind_value=bind_assign_value)",
"def visit_any_assign(self, node: types.AnyAssign) -> None:\n self._check_slots(node)\n self.generic_visit(node)",
"def visit_Assign(self, node):\n self.generic_visit(node)\n target = get_single_target(node)\n if isinstance(target, ast.Subscript):\n fun = to_attribute(self.operator, 'setitem')\n args = [target.value, self.index_to_expr(target.slice), node.value]\n return ast.Expr(to_call(fun, args))\n return node",
"def visit_Assign(self, node):\n self.generic_visit(node)\n target = get_single_target(node)\n if isinstance(target, ast.Attribute):\n args = [ target.value, ast.Str(target.attr), node.value ]\n return ast.Expr(to_call(to_name('setattr'), args))\n return node",
"def iterassign(node:_AssingT) -> Iterator[Optional[List[str]]]:\n for target in node.targets if isinstance(node, ast.Assign) else [node.target]:\n dottedname = node2dottedname(target) \n yield dottedname",
"def visit_Assign(self, node: ast.Assign) -> None:\n # skip multiple assignments\n if len(node.targets) != 1:\n return\n\n # skip complex assignments\n if not isinstance(node.targets[0], ast.Name):\n return\n\n name = node.targets[0].id\n\n # skip private attributes\n if name.startswith(\"_\"):\n return\n\n self.attribute_nodes.append(node)",
"def visit_compound_assign(self, node):\n # Determine number of values (arity) of compound assignment.\n nvalues = { len(target.elts) for target in node.targets \n if is_sequence_node(target) }\n if len(nvalues) > 1:\n # A multiple, compound assignment with different arities, e.g.,\n # `x,y = a,b,c = ...` is not a syntax error in Python, though it\n # probably should be because it's guaranteed to cause a runtime\n # error. Raise the error here, since we cannot proceed.\n raise SyntaxError(\"Multiple assignment with different arities\")\n nvalues = nvalues.pop()\n\n # Assign temporary variables.\n temps = [ gensym() for i in range(nvalues) ]\n stmts = []\n if is_sequence_node(node.value) and len(node.value.elts) == nvalues:\n # Special case: RHS is sequence literal of correct length.\n for i in range(nvalues):\n temp_target = to_name(temps[i], ast.Store())\n stmts.append(ast.Assign([temp_target], node.value.elts[i]))\n else:\n # General case.\n temp_target = to_tuple(\n (to_name(temp, ast.Store()) for temp in temps), ast.Store())\n stmts.append(ast.Assign([temp_target], node.value))\n\n # Rewrite assignments as sequence of assignments.\n for target in reversed(node.targets):\n if is_sequence_node(target):\n stmts.extend(ast.Assign([target.elts[i]], to_name(temps[i]))\n for i in range(nvalues))\n else:\n temp_tuple = to_tuple(to_name(temp) for temp in temps)\n stmts.append(ast.Assign([target], temp_tuple))\n \n return stmts",
"def visit_Assign(self, node):\n self.generic_visit(node)\n\n if node.col_offset == 0:\n mnode = ast.parse(\"\")\n mnode.body = [node]\n mnode = ast.fix_missing_locations(mnode)\n code = compile(mnode, \"<ast>\", \"exec\")\n try:\n exec(code, self.globals_)\n except Exception:\n pass\n self.globals_.pop(\"__builtins__\", None)\n self.globals_.pop(\"builtins\", None)",
"def _Assign(self, t):\n if len(t.targets) > 1:\n self.RaiseError(t, \"Assignment to multiple targets not supported\")\n if not isinstance(t.targets[0], ast.Name):\n self.RaiseError(t, \"Assignment to complex expressions not supported\")\n self.fill()\n # check if target exists in locals\n if t.targets[0].id not in self._locals :\n self.write(\"auto \")\n self._locals.append(t.targets[0].id)\n self.dispatch(t.targets[0])\n self.write(\" = \")\n self.dispatch(t.value)\n self.write(\";\")",
"def _analyse_stmt_AnnAssign(\n self, statement: ast.AnnAssign, *, next: CFNode\n ) -> CFNode:\n return self._ast_node(statement, next=next, error=self._raise)",
"def _analyse_stmt_Assign(self, statement: ast.Assign, *, next: CFNode) -> CFNode:\n return self._ast_node(statement, next=next, error=self._raise)",
"def visit_ann_assign(self: Parser, node: doc.AnnAssign) -> None:\n lhs = node.target\n rhs = self.eval_expr(node.value)\n ann_var = self.visit_tvm_annotation(node.annotation)\n if not isinstance(ann_var, Var):\n self.report_error(node.annotation, \"Annotation should be Var\")\n self.eval_assign(target=lhs, source=ann_var, bind_value=bind_assign_value)\n frame = T.LetStmt(rhs, var=ann_var)\n frame.add_callback(partial(frame.__exit__, None, None, None))\n frame.__enter__()",
"def _is_assignment(node: cst.CSTNode, assignment_node: cst.CSTNode) -> bool:\n if node is assignment_node:\n return True\n if isinstance(assignment_node, (cst.Import, cst.ImportFrom)):\n aliases = assignment_node.names\n if isinstance(aliases, cst.ImportStar):\n return False\n for alias in aliases:\n if alias.name is node:\n return True\n asname = alias.asname\n if asname is not None:\n if asname.name is node:\n return True\n return False",
"def _process_assign(self, node: ast.Assign) -> None:\n if isinstance(node.value, ast.Call) and self._is_export_call(\n node.value.func\n ):\n # id = tf_export(...)(...)\n if len(node.targets) != 1:\n raise BadExportError(\n f'{self._current_file}:{node.lineno} export must be'\n f' assigned to a single value: {ast.dump(node)}'\n )\n symbol = self._name(node.targets[0])\n if not symbol:\n raise BadExportError(\n f'{self._current_file}:{node.lineno} export must be'\n f' assigned to a single value: {ast.dump(node)}'\n )\n self._add_exported_symbol(node.value.func, symbol)\n else:\n self.visit(node)",
"def _scan_declarative_assignment_stmt(\n cls: ClassDef,\n api: SemanticAnalyzerPluginInterface,\n stmt: AssignmentStmt,\n attributes: List[util.SQLAlchemyAttribute],\n) -> None:\n lvalue = stmt.lvalues[0]\n if not isinstance(lvalue, NameExpr):\n return\n\n sym = cls.info.names.get(lvalue.name)\n\n # this establishes that semantic analysis has taken place, which\n # means the nodes are populated and we are called from an appropriate\n # hook.\n assert sym is not None\n node = sym.node\n\n if isinstance(node, PlaceholderNode):\n return\n\n assert node is lvalue.node\n assert isinstance(node, Var)\n\n if node.name == \"__abstract__\":\n if api.parse_bool(stmt.rvalue) is True:\n util.set_is_base(cls.info)\n return\n elif node.name == \"__tablename__\":\n util.set_has_table(cls.info)\n elif node.name.startswith(\"__\"):\n return\n elif node.name == \"_mypy_mapped_attrs\":\n if not isinstance(stmt.rvalue, ListExpr):\n util.fail(api, \"_mypy_mapped_attrs is expected to be a list\", stmt)\n else:\n for item in stmt.rvalue.items:\n if isinstance(item, (NameExpr, StrExpr)):\n apply.apply_mypy_mapped_attr(cls, api, item, attributes)\n\n left_hand_mapped_type: Optional[Type] = None\n left_hand_explicit_type: Optional[ProperType] = None\n\n if node.is_inferred or node.type is None:\n if isinstance(stmt.type, UnboundType):\n # look for an explicit Mapped[] type annotation on the left\n # side with nothing on the right\n\n # print(stmt.type)\n # Mapped?[Optional?[A?]]\n\n left_hand_explicit_type = stmt.type\n\n if stmt.type.name == \"Mapped\":\n mapped_sym = api.lookup_qualified(\"Mapped\", cls)\n if (\n mapped_sym is not None\n and mapped_sym.node is not None\n and names.type_id_for_named_node(mapped_sym.node)\n is names.MAPPED\n ):\n left_hand_explicit_type = get_proper_type(\n stmt.type.args[0]\n )\n left_hand_mapped_type = stmt.type\n\n # TODO: do we need to convert from unbound for this case?\n # left_hand_explicit_type = util._unbound_to_instance(\n # api, left_hand_explicit_type\n # )\n else:\n node_type = get_proper_type(node.type)\n if (\n isinstance(node_type, Instance)\n and names.type_id_for_named_node(node_type.type) is names.MAPPED\n ):\n # print(node.type)\n # sqlalchemy.orm.attributes.Mapped[<python type>]\n left_hand_explicit_type = get_proper_type(node_type.args[0])\n left_hand_mapped_type = node_type\n else:\n # print(node.type)\n # <python type>\n left_hand_explicit_type = node_type\n left_hand_mapped_type = None\n\n if isinstance(stmt.rvalue, TempNode) and left_hand_mapped_type is not None:\n # annotation without assignment and Mapped is present\n # as type annotation\n # equivalent to using _infer_type_from_left_hand_type_only.\n\n python_type_for_type = left_hand_explicit_type\n elif isinstance(stmt.rvalue, CallExpr) and isinstance(\n stmt.rvalue.callee, RefExpr\n ):\n python_type_for_type = infer.infer_type_from_right_hand_nameexpr(\n api, stmt, node, left_hand_explicit_type, stmt.rvalue.callee\n )\n\n if python_type_for_type is None:\n return\n\n else:\n return\n\n assert python_type_for_type is not None\n\n attributes.append(\n util.SQLAlchemyAttribute(\n name=node.name,\n line=stmt.line,\n column=stmt.column,\n typ=python_type_for_type,\n info=cls.info,\n )\n )\n\n apply.apply_type_to_mapped_statement(\n api,\n stmt,\n lvalue,\n left_hand_explicit_type,\n python_type_for_type,\n )",
"def convert_assign(g, op, block):\n\n out = g.get_node(op.input(\"X\")[0])\n g.add_node(op.output(\"Out\")[0], out)",
"def _(self, node: Assignment):\n\n # This check allows us to ignore the initialization nodes\n # in the CAST 'i.e. x0 = -1'\n if node.source_refs == None:\n if type(node.left) == Var:\n if type(node.right) == Number and node.right.number == -1:\n return \"\"\n\n left = self.visit(node.left)\n right = self.visit(node.right)\n\n to_ret = f\"( assign {left} {right} )\"\n return to_ret",
"def process_assignment_ast(stmt_ast: ast.Assign, stmt_ast_parent_block):\n logger.log.info(\"Generating SymbolicState instance from assignment ast\")\n # first, add a reference from stmt_ast to its parent block\n stmt_ast.parent_block = stmt_ast_parent_block\n logger.log.info(\"Instantiating symbolic state for AST instance stmt_ast = %s\" % stmt_ast)\n # determine the program variables assigned on the left-hand-side\n targets: list = stmt_ast.targets\n # extract names - for now just care about normal program variables, not attributes or functions\n logger.log.info(\"Extracting list of assignment target names\")\n target_names: list = []\n for target in targets:\n target_names += extract_symbol_names_from_target(target)\n logger.log.info(\"List of all program variables changed is %s\" % target_names)\n # extract function names\n assigned_value = stmt_ast.value\n function_names: list = extract_function_names(assigned_value)\n logger.log.info(\"List of all program functions called is %s\" % function_names)\n # merge the two lists of symbols\n logger.log.info(\"Merging lists of assignment target names and function names\")\n all_symbols: list = target_names + function_names\n logger.log.info(\"List of all symbols to mark as changed in the symbolic state is %s\" % all_symbols)\n # set up a SymbolicState instance\n logger.log.info(\"Instantiating new StatementSymbolicState instance with all_symbols = %s\" % all_symbols)\n symbolic_state: SymbolicState = StatementSymbolicState(all_symbols, stmt_ast)\n return symbolic_state",
"def is_assign_to_name(statement):\n return isinstance(statement, ast.Assign) and \\\n len(statement.targets) == 1 and \\\n isinstance(statement.targets[0], ast.Name)",
"def multiple_value_call_assignment_handler(target, value, assign_stmts, node, id_str):\n #print(\"multiple_value_call_assignment_handler\")\n target_stmts, value_var = stypy_functions.create_temp_Assign(value, node.lineno, node.col_offset,\n \"{0}_assignment\".format(id_str))\n assign_stmts.append(target_stmts)\n\n #value_var_to_load = copy.deepcopy(value_var)\n value_var_to_load = ast.Name()\n value_var_to_load.col_offset = value_var.col_offset\n value_var_to_load.lineno = value_var.lineno\n value_var_to_load.id = value_var.id\n value_var_to_load.ctx = ast.Load()\n\n for i in xrange(len(target.elts)):\n # Assign values to each element.\n # getitem_att = core_language.create_attribute(value_var_to_load, '__getitem__', context=ast.Load(),\n # line=node.lineno,\n # column=node.col_offset)\n # item_call = functions.create_call(getitem_att, [core_language.create_num(i, node.lineno, node.col_offset)])\n # temp_stmts, temp_value = stypy_functions.create_temp_Assign(item_call, node.lineno, node.col_offset,\n # \"{0}_assignment\".format(id_str))\n stypy_interface = core_language.create_Name('stypy_interface')\n get_tuple_call = core_language.create_attribute(stypy_interface, 'stypy_get_value_from_tuple', context=ast.Load(),\n line=node.lineno,\n column=node.col_offset)\n\n item_call = functions.create_call(get_tuple_call, [value_var_to_load,\n core_language.create_num(len(target.elts), node.lineno, node.col_offset),\n core_language.create_num(i, node.lineno, node.col_offset)])\n temp_stmts, temp_value = stypy_functions.create_temp_Assign(item_call, node.lineno, node.col_offset,\n \"{0}_assignment\".format(id_str))\n if hasattr(node, 'lineno'):\n temp_stmts.lineno = node.lineno\n temp_stmts.col_offset = node.col_offset\n\n assign_stmts.append(temp_stmts)\n\n temp_stmts = core_language.create_Assign(target.elts[i], temp_value)\n if hasattr(node, 'lineno'):\n temp_stmts.lineno = node.lineno\n temp_stmts.col_offset = node.col_offset\n\n assign_stmts.append(temp_stmts)\n\n return True",
"def visit_AugAssign(self, node):\n self.generic_visit(node)\n stmts = []\n target = node.target\n if not isinstance(target, ast.Subscript):\n return node\n\n # AST node for target value, gensym-ed if necessary.\n if self.can_reevaluate(target.value):\n target_node = target.value\n else:\n target_node = to_name(gensym())\n stmts.append(ast.Assign(\n [set_ctx(target_node, ast.Store())], target.value))\n \n # AST node for index, gensym-ed if necessary.\n index_expr = self.index_to_expr(target.slice)\n if self.can_reevaluate(index_expr):\n index_node = index_expr\n else:\n index_node = to_name(gensym())\n stmts.append(ast.Assign(\n [set_ctx(index_node, ast.Store())], index_expr))\n \n # Main AST node for the indexed augemented assignment.\n stmts.append(ast.Expr(\n to_call(to_attribute(self.operator, 'setitem'), [\n target_node,\n index_node,\n to_call(self.op_to_function(node.op), [\n to_call(to_attribute(self.operator, 'getitem'), [\n target_node,\n index_node,\n ]),\n node.value\n ])\n ])\n ))\n\n return stmts",
"def visit_Node(self, node):\n pass",
"def visit(self, node):",
"def visit(self, node):",
"def do_assign(parser, token):\n bits = token.contents.split()\n if len(bits) != 3:\n raise template.TemplateSyntaxError(\"'%s' tag takes two arguments\" % bits[0])\n value = parser.compile_filter(bits[2])\n return AssignNode(bits[1], value)",
"def do_assign(parser, token):\n bits = token.contents.split()\n if len(bits) != 3:\n raise template.TemplateSyntaxError(\"'%s' tag takes two arguments\" % bits[0])\n value = parser.compile_filter(bits[2])\n return AssignNode(bits[1], value)",
"def test_list_assignments_for_tree(self):\n # Enable OS-INHERIT extension\n\n test_plan = {\n # Create a domain with a project hierarchy 3 levels deep:\n #\n # project 0\n # ____________|____________\n # | |\n # project 1 project 4\n # ______|_____ ______|_____\n # | | | |\n # project 2 project 3 project 5 project 6\n #\n # Also, create 1 user and 4 roles.\n 'entities': {\n 'domains': {\n 'projects': {'project': [{'project': 2},\n {'project': 2}]},\n 'users': 1},\n 'roles': 4},\n 'assignments': [\n # Direct assignment to projects 1 and 2\n {'user': 0, 'role': 0, 'project': 1},\n {'user': 0, 'role': 1, 'project': 2},\n # Also an inherited assignment on project 1\n {'user': 0, 'role': 2, 'project': 1,\n 'inherited_to_projects': True},\n # ...and two spoiler assignments, one to the root and one\n # to project 4\n {'user': 0, 'role': 0, 'project': 0},\n {'user': 0, 'role': 3, 'project': 4}],\n 'tests': [\n # List all assignments for project 1 and its subtree.\n {'params': {'project': 1, 'include_subtree': True},\n 'results': [\n # Only the actual assignments should be returned, no\n # expansion of inherited assignments\n {'user': 0, 'role': 0, 'project': 1},\n {'user': 0, 'role': 1, 'project': 2},\n {'user': 0, 'role': 2, 'project': 1,\n 'inherited_to_projects': 'projects'}]}\n ]\n }\n\n self.execute_assignment_plan(test_plan)"
] | [
"0.69212425",
"0.64886594",
"0.6375846",
"0.6128011",
"0.5978111",
"0.5925981",
"0.59075874",
"0.58308166",
"0.57873815",
"0.572006",
"0.5688921",
"0.5654621",
"0.5651131",
"0.5596672",
"0.5524254",
"0.5473678",
"0.5473099",
"0.53852975",
"0.53435594",
"0.529892",
"0.52969265",
"0.5280814",
"0.5269391",
"0.5228468",
"0.51454127",
"0.51370144",
"0.51370144",
"0.5118967",
"0.5118967",
"0.51078135"
] | 0.70178634 | 0 |
Visit assignment node with at least one compound target. | def visit_compound_assign(self, node):
# Determine number of values (arity) of compound assignment.
nvalues = { len(target.elts) for target in node.targets
if is_sequence_node(target) }
if len(nvalues) > 1:
# A multiple, compound assignment with different arities, e.g.,
# `x,y = a,b,c = ...` is not a syntax error in Python, though it
# probably should be because it's guaranteed to cause a runtime
# error. Raise the error here, since we cannot proceed.
raise SyntaxError("Multiple assignment with different arities")
nvalues = nvalues.pop()
# Assign temporary variables.
temps = [ gensym() for i in range(nvalues) ]
stmts = []
if is_sequence_node(node.value) and len(node.value.elts) == nvalues:
# Special case: RHS is sequence literal of correct length.
for i in range(nvalues):
temp_target = to_name(temps[i], ast.Store())
stmts.append(ast.Assign([temp_target], node.value.elts[i]))
else:
# General case.
temp_target = to_tuple(
(to_name(temp, ast.Store()) for temp in temps), ast.Store())
stmts.append(ast.Assign([temp_target], node.value))
# Rewrite assignments as sequence of assignments.
for target in reversed(node.targets):
if is_sequence_node(target):
stmts.extend(ast.Assign([target.elts[i]], to_name(temps[i]))
for i in range(nvalues))
else:
temp_tuple = to_tuple(to_name(temp) for temp in temps)
stmts.append(ast.Assign([target], temp_tuple))
return stmts | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def visit_Assign(self, node):\n self.generic_visit(node)\n is_multiple = len(node.targets) > 1\n is_compound = any(map(is_sequence_node, node.targets))\n is_simple = not is_compound\n if is_simple and is_multiple:\n return self.visit_simple_assign(node)\n elif is_compound and (is_multiple or is_sequence_node(node.value)):\n return self.visit_compound_assign(node)\n return node",
"def single_assignment_handler(target, value, assign_stmts, node, id_str):\n #print(\"single_assignment_handler\")\n\n temp_stmts = core_language.create_Assign(target, value)\n if hasattr(node, 'lineno'):\n temp_stmts.lineno = node.lineno\n temp_stmts.col_offset = node.col_offset\n\n assign_stmts.append(temp_stmts)\n return False",
"def visit_Assign(self, node):\n assign_stmts = []\n value = node.value\n reversed_targets = node.targets\n reversed_targets.reverse()\n assign_stmts.append(stypy_functions.create_blank_line())\n if len(reversed_targets) > 1:\n assign_stmts.append(\n stypy_functions.create_src_comment(\n \"Multiple assignment of {0} elements.\".format(len(reversed_targets))))\n else:\n if hasattr(node, 'lineno'):\n assign_stmts.append(stypy_functions.create_src_comment(\n \"Assigning a {1} to a {0} (line {2}):\".format(type(reversed_targets[0]).__name__,\n type(value).__name__, node.lineno)))\n else:\n assign_stmts.append(stypy_functions.create_src_comment(\n \"Assigning a {1} to a {0}:\".format(type(reversed_targets[0]).__name__,\n type(value).__name__)))\n for assign_num in xrange(len(reversed_targets)):\n target = reversed_targets[assign_num]\n # Function guard is true? execute handler\n for handler_func_guard_tuple in self.__assignment_handlers:\n if handler_func_guard_tuple[0](target, value):\n id_str, handler_func = handler_func_guard_tuple[1]\n self.performed_transformations |= handler_func(target, value, assign_stmts, node, id_str)\n assign_stmts = stypy_functions.flatten_lists(assign_stmts)\n value = target\n break\n\n if len(assign_stmts) > 0:\n return assign_stmts\n return node",
"def visit_assign(self: Parser, node: doc.Assign) -> None:\n if len(node.targets) != 1:\n self.report_error(node, \"Consequential assignments like 'a = b = c' are not supported.\")\n lhs = node.targets[0]\n\n if isinstance(node.value, doc.Subscript):\n check_slices = []\n if isinstance(node.value.slice, doc.Slice):\n check_slices = [node.value.slice]\n elif isinstance(node.value.slice, doc.Tuple):\n for p in node.value.slice.elts:\n if isinstance(p, doc.Slice):\n check_slices.append(p)\n for s in check_slices:\n if not s.step and s.upper and s.lower:\n s.step = doc.Constant(\n 1,\n None,\n 1,\n 1,\n s.upper.lineno,\n s.upper.end_col_offset + 1,\n s.upper.lineno,\n s.upper.end_col_offset + 2,\n )\n\n rhs = self.eval_expr(node.value)\n if isinstance(lhs, doc.Subscript):\n if isinstance(lhs.slice, doc.Tuple):\n indices = []\n for index in lhs.slice.elts:\n indices.append(self.eval_expr(index))\n else:\n indices = self.eval_expr(lhs.slice)\n T.buffer_store(self.eval_expr(lhs.value), rhs, indices)\n else:\n self.eval_assign(target=lhs, source=rhs, bind_value=bind_assign_value)",
"def visit_any_assign(self, node: types.AnyAssign) -> None:\n self._check_slots(node)\n self.generic_visit(node)",
"def visit_Assign(self, node):\n self.generic_visit(node)\n target = get_single_target(node)\n if isinstance(target, ast.Subscript):\n fun = to_attribute(self.operator, 'setitem')\n args = [target.value, self.index_to_expr(target.slice), node.value]\n return ast.Expr(to_call(fun, args))\n return node",
"def _Assign(self, t):\n if len(t.targets) > 1:\n self.RaiseError(t, \"Assignment to multiple targets not supported\")\n if not isinstance(t.targets[0], ast.Name):\n self.RaiseError(t, \"Assignment to complex expressions not supported\")\n self.fill()\n # check if target exists in locals\n if t.targets[0].id not in self._locals :\n self.write(\"auto \")\n self._locals.append(t.targets[0].id)\n self.dispatch(t.targets[0])\n self.write(\" = \")\n self.dispatch(t.value)\n self.write(\";\")",
"def visit_simple_assign(self, node):\n temp = gensym()\n temp_target = to_name(temp, ast.Store())\n stmts = [ ast.Assign([temp_target], node.value) ]\n stmts += [ ast.Assign([target], to_name(temp))\n for target in node.targets ]\n return stmts",
"def _analyse_stmt_Assign(self, statement: ast.Assign, *, next: CFNode) -> CFNode:\n return self._ast_node(statement, next=next, error=self._raise)",
"def multiple_value_call_assignment_handler(target, value, assign_stmts, node, id_str):\n #print(\"multiple_value_call_assignment_handler\")\n target_stmts, value_var = stypy_functions.create_temp_Assign(value, node.lineno, node.col_offset,\n \"{0}_assignment\".format(id_str))\n assign_stmts.append(target_stmts)\n\n #value_var_to_load = copy.deepcopy(value_var)\n value_var_to_load = ast.Name()\n value_var_to_load.col_offset = value_var.col_offset\n value_var_to_load.lineno = value_var.lineno\n value_var_to_load.id = value_var.id\n value_var_to_load.ctx = ast.Load()\n\n for i in xrange(len(target.elts)):\n # Assign values to each element.\n # getitem_att = core_language.create_attribute(value_var_to_load, '__getitem__', context=ast.Load(),\n # line=node.lineno,\n # column=node.col_offset)\n # item_call = functions.create_call(getitem_att, [core_language.create_num(i, node.lineno, node.col_offset)])\n # temp_stmts, temp_value = stypy_functions.create_temp_Assign(item_call, node.lineno, node.col_offset,\n # \"{0}_assignment\".format(id_str))\n stypy_interface = core_language.create_Name('stypy_interface')\n get_tuple_call = core_language.create_attribute(stypy_interface, 'stypy_get_value_from_tuple', context=ast.Load(),\n line=node.lineno,\n column=node.col_offset)\n\n item_call = functions.create_call(get_tuple_call, [value_var_to_load,\n core_language.create_num(len(target.elts), node.lineno, node.col_offset),\n core_language.create_num(i, node.lineno, node.col_offset)])\n temp_stmts, temp_value = stypy_functions.create_temp_Assign(item_call, node.lineno, node.col_offset,\n \"{0}_assignment\".format(id_str))\n if hasattr(node, 'lineno'):\n temp_stmts.lineno = node.lineno\n temp_stmts.col_offset = node.col_offset\n\n assign_stmts.append(temp_stmts)\n\n temp_stmts = core_language.create_Assign(target.elts[i], temp_value)\n if hasattr(node, 'lineno'):\n temp_stmts.lineno = node.lineno\n temp_stmts.col_offset = node.col_offset\n\n assign_stmts.append(temp_stmts)\n\n return True",
"def _analyse_stmt_AnnAssign(\n self, statement: ast.AnnAssign, *, next: CFNode\n ) -> CFNode:\n return self._ast_node(statement, next=next, error=self._raise)",
"def visit_Assign(self, node):\n self.generic_visit(node)\n target = get_single_target(node)\n if isinstance(target, ast.Attribute):\n args = [ target.value, ast.Str(target.attr), node.value ]\n return ast.Expr(to_call(to_name('setattr'), args))\n return node",
"def visit_Assign(self, node: ast.Assign) -> None:\n # skip multiple assignments\n if len(node.targets) != 1:\n return\n\n # skip complex assignments\n if not isinstance(node.targets[0], ast.Name):\n return\n\n name = node.targets[0].id\n\n # skip private attributes\n if name.startswith(\"_\"):\n return\n\n self.attribute_nodes.append(node)",
"def _AugAssign(self, t):\n if not isinstance(t.target, ast.Name):\n self.RaiseError(t, \"Augmented assignment to complex expressions not supported\")\n # check if target exists in locals\n if t.target.id not in self._locals :\n self.RaiseError(t, \"Augmented assignment not permitted on variables not already assigned previously\")\n self.fill()\n self.dispatch(t.target)\n self.write(\" \"+self.binop[t.op.__class__.__name__]+\"= \")\n self.dispatch(t.value)\n self.write(\";\")",
"def visit_AugAssign(self, node):\n target = node.target\n\n rhs_target = copy.deepcopy(target)\n rhs_target.ctx = ast.Load()\n ast.fix_missing_locations(rhs_target)\n\n bin_op = ast.BinOp(rhs_target, node.op, node.value)\n assignment = ast.Assign([target], bin_op)\n assignment.inplace_op = node.op\n return self.visit(assignment)",
"def _process_assign(self, node: ast.Assign) -> None:\n if isinstance(node.value, ast.Call) and self._is_export_call(\n node.value.func\n ):\n # id = tf_export(...)(...)\n if len(node.targets) != 1:\n raise BadExportError(\n f'{self._current_file}:{node.lineno} export must be'\n f' assigned to a single value: {ast.dump(node)}'\n )\n symbol = self._name(node.targets[0])\n if not symbol:\n raise BadExportError(\n f'{self._current_file}:{node.lineno} export must be'\n f' assigned to a single value: {ast.dump(node)}'\n )\n self._add_exported_symbol(node.value.func, symbol)\n else:\n self.visit(node)",
"def visit_Assign(self, node):\n self.generic_visit(node)\n\n if node.col_offset == 0:\n mnode = ast.parse(\"\")\n mnode.body = [node]\n mnode = ast.fix_missing_locations(mnode)\n code = compile(mnode, \"<ast>\", \"exec\")\n try:\n exec(code, self.globals_)\n except Exception:\n pass\n self.globals_.pop(\"__builtins__\", None)\n self.globals_.pop(\"builtins\", None)",
"def _is_assignment(node: cst.CSTNode, assignment_node: cst.CSTNode) -> bool:\n if node is assignment_node:\n return True\n if isinstance(assignment_node, (cst.Import, cst.ImportFrom)):\n aliases = assignment_node.names\n if isinstance(aliases, cst.ImportStar):\n return False\n for alias in aliases:\n if alias.name is node:\n return True\n asname = alias.asname\n if asname is not None:\n if asname.name is node:\n return True\n return False",
"def _ok(self, assignment_graph, source, value, target):\n target_values = assignment_graph[target]\n return len(target_values - set([value])) > 0",
"def visit_AugAssign(self, node):\n self.generic_visit(node)\n stmts = []\n target = node.target\n if not isinstance(target, ast.Subscript):\n return node\n\n # AST node for target value, gensym-ed if necessary.\n if self.can_reevaluate(target.value):\n target_node = target.value\n else:\n target_node = to_name(gensym())\n stmts.append(ast.Assign(\n [set_ctx(target_node, ast.Store())], target.value))\n \n # AST node for index, gensym-ed if necessary.\n index_expr = self.index_to_expr(target.slice)\n if self.can_reevaluate(index_expr):\n index_node = index_expr\n else:\n index_node = to_name(gensym())\n stmts.append(ast.Assign(\n [set_ctx(index_node, ast.Store())], index_expr))\n \n # Main AST node for the indexed augemented assignment.\n stmts.append(ast.Expr(\n to_call(to_attribute(self.operator, 'setitem'), [\n target_node,\n index_node,\n to_call(self.op_to_function(node.op), [\n to_call(to_attribute(self.operator, 'getitem'), [\n target_node,\n index_node,\n ]),\n node.value\n ])\n ])\n ))\n\n return stmts",
"def iterassign(node:_AssingT) -> Iterator[Optional[List[str]]]:\n for target in node.targets if isinstance(node, ast.Assign) else [node.target]:\n dottedname = node2dottedname(target) \n yield dottedname",
"def convert_assign(g, op, block):\n\n out = g.get_node(op.input(\"X\")[0])\n g.add_node(op.output(\"Out\")[0], out)",
"def _analyse_stmt_AugAssign(\n self, statement: ast.AugAssign, *, next: CFNode\n ) -> CFNode:\n return self._ast_node(statement, next=next, error=self._raise)",
"def is_assign_to_name(statement):\n return isinstance(statement, ast.Assign) and \\\n len(statement.targets) == 1 and \\\n isinstance(statement.targets[0], ast.Name)",
"def process_assignment_ast(stmt_ast: ast.Assign, stmt_ast_parent_block):\n logger.log.info(\"Generating SymbolicState instance from assignment ast\")\n # first, add a reference from stmt_ast to its parent block\n stmt_ast.parent_block = stmt_ast_parent_block\n logger.log.info(\"Instantiating symbolic state for AST instance stmt_ast = %s\" % stmt_ast)\n # determine the program variables assigned on the left-hand-side\n targets: list = stmt_ast.targets\n # extract names - for now just care about normal program variables, not attributes or functions\n logger.log.info(\"Extracting list of assignment target names\")\n target_names: list = []\n for target in targets:\n target_names += extract_symbol_names_from_target(target)\n logger.log.info(\"List of all program variables changed is %s\" % target_names)\n # extract function names\n assigned_value = stmt_ast.value\n function_names: list = extract_function_names(assigned_value)\n logger.log.info(\"List of all program functions called is %s\" % function_names)\n # merge the two lists of symbols\n logger.log.info(\"Merging lists of assignment target names and function names\")\n all_symbols: list = target_names + function_names\n logger.log.info(\"List of all symbols to mark as changed in the symbolic state is %s\" % all_symbols)\n # set up a SymbolicState instance\n logger.log.info(\"Instantiating new StatementSymbolicState instance with all_symbols = %s\" % all_symbols)\n symbolic_state: SymbolicState = StatementSymbolicState(all_symbols, stmt_ast)\n return symbolic_state",
"def visit_ann_assign(self: Parser, node: doc.AnnAssign) -> None:\n lhs = node.target\n rhs = self.eval_expr(node.value)\n ann_var = self.visit_tvm_annotation(node.annotation)\n if not isinstance(ann_var, Var):\n self.report_error(node.annotation, \"Annotation should be Var\")\n self.eval_assign(target=lhs, source=ann_var, bind_value=bind_assign_value)\n frame = T.LetStmt(rhs, var=ann_var)\n frame.add_callback(partial(frame.__exit__, None, None, None))\n frame.__enter__()",
"def _scan_declarative_assignment_stmt(\n cls: ClassDef,\n api: SemanticAnalyzerPluginInterface,\n stmt: AssignmentStmt,\n attributes: List[util.SQLAlchemyAttribute],\n) -> None:\n lvalue = stmt.lvalues[0]\n if not isinstance(lvalue, NameExpr):\n return\n\n sym = cls.info.names.get(lvalue.name)\n\n # this establishes that semantic analysis has taken place, which\n # means the nodes are populated and we are called from an appropriate\n # hook.\n assert sym is not None\n node = sym.node\n\n if isinstance(node, PlaceholderNode):\n return\n\n assert node is lvalue.node\n assert isinstance(node, Var)\n\n if node.name == \"__abstract__\":\n if api.parse_bool(stmt.rvalue) is True:\n util.set_is_base(cls.info)\n return\n elif node.name == \"__tablename__\":\n util.set_has_table(cls.info)\n elif node.name.startswith(\"__\"):\n return\n elif node.name == \"_mypy_mapped_attrs\":\n if not isinstance(stmt.rvalue, ListExpr):\n util.fail(api, \"_mypy_mapped_attrs is expected to be a list\", stmt)\n else:\n for item in stmt.rvalue.items:\n if isinstance(item, (NameExpr, StrExpr)):\n apply.apply_mypy_mapped_attr(cls, api, item, attributes)\n\n left_hand_mapped_type: Optional[Type] = None\n left_hand_explicit_type: Optional[ProperType] = None\n\n if node.is_inferred or node.type is None:\n if isinstance(stmt.type, UnboundType):\n # look for an explicit Mapped[] type annotation on the left\n # side with nothing on the right\n\n # print(stmt.type)\n # Mapped?[Optional?[A?]]\n\n left_hand_explicit_type = stmt.type\n\n if stmt.type.name == \"Mapped\":\n mapped_sym = api.lookup_qualified(\"Mapped\", cls)\n if (\n mapped_sym is not None\n and mapped_sym.node is not None\n and names.type_id_for_named_node(mapped_sym.node)\n is names.MAPPED\n ):\n left_hand_explicit_type = get_proper_type(\n stmt.type.args[0]\n )\n left_hand_mapped_type = stmt.type\n\n # TODO: do we need to convert from unbound for this case?\n # left_hand_explicit_type = util._unbound_to_instance(\n # api, left_hand_explicit_type\n # )\n else:\n node_type = get_proper_type(node.type)\n if (\n isinstance(node_type, Instance)\n and names.type_id_for_named_node(node_type.type) is names.MAPPED\n ):\n # print(node.type)\n # sqlalchemy.orm.attributes.Mapped[<python type>]\n left_hand_explicit_type = get_proper_type(node_type.args[0])\n left_hand_mapped_type = node_type\n else:\n # print(node.type)\n # <python type>\n left_hand_explicit_type = node_type\n left_hand_mapped_type = None\n\n if isinstance(stmt.rvalue, TempNode) and left_hand_mapped_type is not None:\n # annotation without assignment and Mapped is present\n # as type annotation\n # equivalent to using _infer_type_from_left_hand_type_only.\n\n python_type_for_type = left_hand_explicit_type\n elif isinstance(stmt.rvalue, CallExpr) and isinstance(\n stmt.rvalue.callee, RefExpr\n ):\n python_type_for_type = infer.infer_type_from_right_hand_nameexpr(\n api, stmt, node, left_hand_explicit_type, stmt.rvalue.callee\n )\n\n if python_type_for_type is None:\n return\n\n else:\n return\n\n assert python_type_for_type is not None\n\n attributes.append(\n util.SQLAlchemyAttribute(\n name=node.name,\n line=stmt.line,\n column=stmt.column,\n typ=python_type_for_type,\n info=cls.info,\n )\n )\n\n apply.apply_type_to_mapped_statement(\n api,\n stmt,\n lvalue,\n left_hand_explicit_type,\n python_type_for_type,\n )",
"def test_obj_action_for_assignments():\n grammar = r\"\"\"\n S: a=\"foo\" b?=\"bar\" c=C+;\n C: val=\"baz\";\n \"\"\"\n\n g = Grammar.from_string(grammar)\n p = Parser(g)\n\n result = p.parse(\"foo bar baz baz baz\")\n\n assert isinstance(result, g.classes['S'])\n assert isinstance(result.c[0], g.classes['C'])\n\n assert result.a == \"foo\"\n assert result.b is True\n assert len(result.c) == 3\n assert all((c.val == \"baz\" for c in result.c))",
"def visit_aug_assign(self: Parser, node: doc.AugAssign) -> None:\n lhs_pos = (\n node.target.lineno,\n node.target.col_offset,\n node.target.end_lineno,\n node.target.end_col_offset,\n )\n rhs_pos = (\n node.value.lineno,\n node.value.col_offset,\n node.value.end_lineno,\n node.value.end_col_offset,\n )\n node.target.ctx = doc.Load(*lhs_pos)\n with self.var_table.with_frame():\n lhs_name = \"__tvm_tmp_value_aug_assign_lhs\"\n rhs_name = \"__tvm_tmp_value_aug_assign_rhs\"\n lhs_expr = self.eval_expr(node.target)\n rhs_expr = self.eval_expr(node.value)\n self.var_table.add(lhs_name, lhs_expr)\n self.var_table.add(rhs_name, rhs_expr)\n op = doc.BinOp(\n doc.Name(lhs_name, doc.Load(*lhs_pos), *lhs_pos),\n node.op,\n doc.Name(rhs_name, doc.Load(*rhs_pos), *rhs_pos),\n *lhs_pos,\n )\n rhs = self.eval_expr(op)\n lhs = node.target\n lhs.ctx = doc.Store(*lhs_pos)\n if isinstance(lhs, doc.Subscript):\n if isinstance(lhs.slice, doc.Tuple):\n indices = []\n for index in lhs.slice.elts:\n indices.append(self.eval_expr(index))\n else:\n indices = [self.eval_expr(lhs.slice)]\n T.buffer_store(self.eval_expr(lhs.value), rhs, indices)\n else:\n self.eval_assign(target=lhs, source=rhs, bind_value=bind_assign_value)",
"def Assignment(self):\n id = self.primary()\n if self.currtok[1].name == \"DECLERATION\":\n self.currtok = next(self.tg)\n if self.functions.get(self.currtok[0]) is not None:\n\n express = self.FunctionCall()\n return assignmentStmt(id, express)\n else:\n express = self.Expression()\n\n if self.currtok[1].name == \"SEMI\":\n self.currtok = next(self.tg)\n return assignmentStmt(id, express)\n raise SLUCSyntaxError(\"ERROR: Missing Semicolon on line {0}\".format(str(self.currtok[2] - 1)))\n raise SLUCSyntaxError(\"ERROR: Missing assignment on line {0}\".format(str(self.currtok[2] - 1)))"
] | [
"0.7261902",
"0.6757224",
"0.64843816",
"0.63659924",
"0.624103",
"0.62127984",
"0.6177989",
"0.61282015",
"0.60786444",
"0.604624",
"0.60448",
"0.5942896",
"0.59183824",
"0.5750465",
"0.57303625",
"0.5648947",
"0.55960566",
"0.5571266",
"0.5543112",
"0.5511169",
"0.5493238",
"0.54690856",
"0.54233736",
"0.5397378",
"0.537382",
"0.53426856",
"0.5236439",
"0.5229051",
"0.52111745",
"0.51824206"
] | 0.7176904 | 1 |
Convert assignment to attributes to `setattr` call. | def visit_Assign(self, node):
self.generic_visit(node)
target = get_single_target(node)
if isinstance(target, ast.Attribute):
args = [ target.value, ast.Str(target.attr), node.value ]
return ast.Expr(to_call(to_name('setattr'), args))
return node | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __setattr__(*args):",
"def __setattr__(*args):",
"def __setattr__(*args):",
"def __setattr__(*args):",
"def __setattr__(*args):",
"def __setattr__(*args):",
"def __setattr__(*args):",
"def __setattr__(*args):",
"def __setattr__(*args):",
"def __setattr__(*args):",
"def __setattr__(*args):",
"def __setattr__(*args):",
"def __setattr__(*args):",
"def __setattr__(*args):",
"def __setattr__(*args):",
"def __setattr__(*args):",
"def __setattr__(*args):",
"def __setattr__(*args):",
"def __setattr__(*args):",
"def __setattr__(self, attr, value):",
"def set_attributes(object, attributes):\n for name, attribute in attributes.items():\n setattr(object, name, attribute)",
"def __setattr__ (self, attr, value):\n self.set_value (attr, value)",
"def __setattr__(*args, **kwargs):\n \n pass",
"def __setattr__(*args, **kwargs):\n \n pass",
"def __setattr__(*args, **kwargs): # real signature unknown\n pass",
"def __setattr__(*args, **kwargs): # real signature unknown\n pass",
"def __setattr__(*args, **kwargs): # real signature unknown\n pass",
"def __setattr__(*args, **kwargs): # real signature unknown\n pass",
"def __setattr__(*args, **kwargs): # real signature unknown\n pass",
"def __setattr__(*args, **kwargs): # real signature unknown\n pass"
] | [
"0.65005827",
"0.65005827",
"0.65005827",
"0.65005827",
"0.65005827",
"0.65005827",
"0.65005827",
"0.65005827",
"0.65005827",
"0.65005827",
"0.65005827",
"0.65005827",
"0.65005827",
"0.65005827",
"0.65005827",
"0.65005827",
"0.65005827",
"0.65005827",
"0.65005827",
"0.64025",
"0.6343958",
"0.6256444",
"0.62069863",
"0.62069863",
"0.62035817",
"0.62035817",
"0.62035817",
"0.62035817",
"0.62035817",
"0.62035817"
] | 0.708953 | 0 |
Convert index (slice) to functional expression. | def index_to_expr(self, index):
if isinstance(index, ast.Index):
return index.value
elif isinstance(index, ast.Slice):
if index.lower is None and index.step is None:
args = [ index.upper ]
elif index.step is None:
args = [ index.lower, index.upper ]
else:
args = [ index.lower, index.upper, index.step ]
args = [ to_name_constant(None) if arg is None else arg
for arg in args ]
return to_call(to_name('slice'), args)
elif isinstance(index, ast.ExtSlice):
indexes = list(map(self.index_to_expr, index.dims))
return ast.Tuple(elts=indexes, ctx=ast.Load())
elif isinstance(index, ast.Tuple):
elts = list(map(self.index_to_expr, index.elts))
return ast.Tuple(elts=elts, ctx=ast.Load())
else:
return index | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __getitem__(self, index: Any) -> ColumnOperators:\n return self.operate(getitem, index)",
"def special_slice(self, form):\n obj = self.reallyCompile(form[1])\n rest = form[2:]\n if len(rest) == 1:\n return ast.Subscript(obj, 'OP_APPLY', [self.reallyCompile(rest[0])])\n elif len(rest) == 2:\n return ast.Slice(obj, 'OP_APPLY', *self.compileForms(rest))\n elif len(rest) == 3:\n return ast.Subscript(obj, 'OP_APPLY', [ast.Sliceobj(self.compileForms(rest))])\n else:\n raise SyntaxError(\"Too many thingies to slice! %r\" % rest)",
"def to_slice(self):\n return np.index_exp[self.start[2]:self.end[2], #\n self.start[1]:self.end[1], #\n self.start[0]:self.end[0]]",
"def __getitem__(self, index):\n if isinstance(index, slice):\n return Vetor(self.elem[index])\n else:\n return self.elem[index]",
"def convert_index_select(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n index = g.get_node(op.input(\"Index\")[0])\n axis = op.attr(\"dim\")\n out = _op.transform.take(x, index, axis, mode=\"wrap\")\n g.add_node(op.output(\"Out\")[0], out)",
"def apply_slice(*, value : Any, slice : slice) -> Any:\n return value[slice]",
"def __getitem__(self, index):\n # type: (int) -> Any\n items = list.__getitem__(self, index)\n return type(self)(self._values(items)) if isinstance(index, slice) else self.value(items)",
"def __getitem__(self, idx):\n if not isinstance(idx, (slice, numbers.Integral)):\n raise ValueError('Index indices must be integers')\n if isinstance(idx, slice):\n if idx.step not in (None, 1):\n raise IndexError('Index does not support variable stepping')\n s, e = None, None\n if idx.start is not None:\n s = idx.start\n if s < 0:\n s += len(self)\n s = self.lookup(s)\n if idx.stop is not None:\n e = idx.stop\n if e >= len(self):\n e = None\n else:\n e = self.lookup(e)\n idx = slice(s, e)\n else:\n idx = self.lookup(idx)\n return self.src[idx]",
"def map(self, index):\n\n\t\tif type(index) is int:\n\t\t\tif index < 0:\n\t\t\t\tindex = self.end - (-index - 1)\n\n\t\t\tif self & index != index:\n\t\t\t\traise IndexError(f\"{index!r}: out of bounds\")\n\n\t\t\treturn index - self.start\n\t\telif type(index) is span:\n\t\t\tif self & index != index:\n\t\t\t\traise IndexError(f\"{index!r}: out of bounds\")\n\n\t\t\tstop = self.map(index.end) + 1\n\t\t\treturn slice(self.map(index.start), None if stop == len(self) else stop)\n\t\telif type(index) is slice:\n\t\t\tstop = self.map(index.stop if index.stop is not None else self.end) + 1\n\t\t\treturn slice(self.map(index.start if index.start is not None else self.start), None if stop == len(self) else stop)\n\t\telse:\n\t\t\ttry:\n\t\t\t\treturn self.map(index.__index__())\n\t\t\texcept:\n\t\t\t\traise ValueError(f\"{index!r}: bad index\")",
"def _read_index_slice(self, *args, **kwargs): # real signature unknown\n pass",
"def test_indexed_stencil(self, expr, result):\n j, l = dimify('j l')\n a = symbol(name='a', dimensions=(j, l), value=0., mode='indexed').base\n fa = a.function\n b = symbol(name='b', dimensions=(j, l), value=2., mode='indexed').base\n fb = b.function\n\n eqn = eval(expr)\n Operator(eqn)(fa, fb)\n assert np.allclose(fa.data[1:-1, 1:-1], result[1:-1, 1:-1], rtol=1e-12)",
"def map(self, index):\n\n\t\tif type(index) is int:\n\t\t\tif index < 0:\n\t\t\t\tindex = self.start - (-index - 1)\n\n\t\t\tif self & index != index:\n\t\t\t\traise IndexError(f\"{index!r}: out of bounds\")\n\n\t\t\treturn self.start - index\n\t\telif type(index) is rspan:\n\t\t\tif self & index != index:\n\t\t\t\traise IndexError(f\"{index!r}: out of bounds\")\n\n\t\t\tstop = self.map(index.end) + 1\n\t\t\treturn slice(self.map(index.start), None if stop == len(self) else stop)\n\t\telif type(index) is slice:\n\t\t\tstop = self.map(index.stop if index.stop is not None else self.end) + 1\n\t\t\treturn slice(self.map(index.start if index.start is not None else self.start), None if stop == len(self) else stop)\n\t\telse:\n\t\t\ttry:\n\t\t\t\treturn self.map(index.__index__())\n\t\t\texcept:\n\t\t\t\traise ValueError(f\"{index!r}: bad index\")\n\n\t\traise ValueError(f\"{index!r}: bad index\")",
"def create(self, index):\n return self._operator_generator(index, -1.0)",
"def __getitem__(sliceOrIdentifier):",
"def unstacked_index(size, index):\n return index % size, index // size",
"def indexer(expression, stream):\n def throw(node, item):\n raise TypeError(\n 'cannot index {} with {}'.format(\n node.__class__.__name__,\n item.__class__.__name__,\n )\n )\n\n def mkint(expression):\n if expression.data == 'integer':\n return int(expression.children[0])\n elif expression.data == 'float':\n idx = float(expression.children[0])\n if not idx.is_integer():\n idx = int(idx) + 1\n return idx\n else:\n assert False, 'bad number expression {}'.format(\n expression\n )\n\n def mkslice(expression):\n s, e = None, None\n for idx in expression.children:\n if idx.data == 'start':\n s = mkint(idx.children[0])\n elif idx.data == 'end':\n e = mkint(idx.children[0])\n yield slice(s, e)\n\n def mkindex(expression):\n if expression.data == 'expression':\n return evaluate(expression, stream)\n elif expression.data == 'slice':\n return mkslice(expression)\n elif expression.data == 'cname':\n return expression.children\n elif expression.data == 'string':\n return [expression.children[0][1:-1]]\n elif expression.data in ('integer', 'float'):\n return [mkint(expression)]\n else:\n assert False, 'bad index expression {}'.format(expression)\n\n for item in mkindex(expression.children[0]):\n for node in stream:\n if isinstance(node, Object):\n if isinstance(item, Primitive):\n item = str(item)[1:-1]\n if isinstance(item, basestring):\n yield node.get(item, null)\n continue\n\n if isinstance(node, List):\n if isinstance(item, Primitive):\n item = int(str(item))\n if isinstance(item, (int, slice)):\n try:\n yield node[item]\n except IndexError:\n yield null\n continue\n\n if not optional(expression):\n throw(node, item)",
"def reconstruct_input(self, ix):",
"def _fix_slice(self, inputs, new_attr):\n begin = new_attr.get('begin')\n end = new_attr.get('end')\n axes = new_attr.get('axis', tuple(range(len(begin))))\n slice_op = mx.sym.slice_axis(inputs[0], axis=axes[0], begin=begin[0], end=end[0])\n if len(axes) > 1:\n for i, axis in enumerate(axes):\n slice_op = mx.sym.slice_axis(slice_op, axis=axis, begin=begin[i], end=end[i])\n return slice_op",
"def __getslice__(self, i, j):\n return self.__getitem__(slice(i,j))",
"def __getslice__(self,i,j):\n return self.x[i:j]",
"def __getslice__(self, i, j):\n return self.__getitem__(slice(i, j))",
"def select(index, *decorators):\n def wrapped(*args, **kwargs):\n return decorators[int(index)](*args, **kwargs)\n return wrapped",
"def expr(self):\n\n args = []\n for i in self.indices:\n args.extend((i.j, i.m))\n return Wigner3j(*args)",
"def get_slice(x, indices):\n return x[indices]",
"def py__simple_getitem__(self, index):\n if isinstance(index, slice):\n return ValueSet([self])\n else:\n with reraise_getitem_errors(TypeError, KeyError, IndexError):\n node = self.get_tree_entries()[index]\n return self._defining_context.infer_node(node)",
"def _get_slice(index, axis, num_axes):\n idx = [slice(None)] * num_axes\n idx[axis] = index\n return tuple(idx)",
"def __getitem__(self, index):\n if isinstance(index, slice):\n return TokenList(self.token_list[index.start:index.stop:index.step])\n if index < 0: # Handle negative indices.\n index += len(self)\n return self.token_list[index]",
"def __getitem__(self, idx):\n if not isinstance(idx, slice):\n return self._fetch()[idx]\n return self._fetch()[idx.start:idx.stop]",
"def _slice(tensor, size, i):\n return tensor[:, i * size : (i + 1) * size]",
"def _processUnhashableIndex(self, idx):\n from pyomo.core.expr import current as EXPR\n #\n # Iterate through the index and look for slices and constant\n # components\n #\n fixed = {}\n sliced = {}\n ellipsis = None\n _found_numeric = False\n #\n # Setup the slice template (in fixed)\n #\n if normalize_index.flatten:\n idx = normalize_index(idx)\n if idx.__class__ is not tuple:\n idx = (idx,)\n\n for i,val in enumerate(idx):\n if type(val) is slice:\n if val.start is not None or val.stop is not None:\n raise IndexError(\n \"Indexed components can only be indexed with simple \"\n \"slices: start and stop values are not allowed.\")\n if val.step is not None:\n logger.warning(\n \"DEPRECATION WARNING: The special wildcard slice \"\n \"(::0) is deprecated. Please use an ellipsis (...) \"\n \"to indicate '0 or more' indices\")\n val = Ellipsis\n else:\n if ellipsis is None:\n sliced[i] = val\n else:\n sliced[i-len(idx)] = val\n continue\n\n if val is Ellipsis:\n if ellipsis is not None:\n raise IndexError(\n \"Indexed components can only be indexed with simple \"\n \"slices: the Pyomo wildcard slice (Ellipsis; \"\n \"e.g., '...') can only appear once\")\n ellipsis = i\n continue\n\n if hasattr(val, 'is_expression_type'):\n _num_val = val\n # Attempt to retrieve the numeric value .. if this\n # is a template expression generation, then it\n # should raise a TemplateExpressionError\n try:\n val = EXPR.evaluate_expression(val, constant=True)\n _found_numeric = True\n\n except TemplateExpressionError:\n #\n # The index is a template expression, so return the\n # templatized expression.\n #\n from pyomo.core.expr import current as EXPR\n return EXPR.GetItemExpression(tuple(idx), self)\n\n except EXPR.NonConstantExpressionError:\n #\n # The expression contains an unfixed variable\n #\n raise RuntimeError(\n\"\"\"Error retrieving the value of an indexed item %s:\nindex %s is not a constant value. This is likely not what you meant to\ndo, as if you later change the fixed value of the object this lookup\nwill not change. If you understand the implications of using\nnon-constant values, you can get the current value of the object using\nthe value() function.\"\"\" % ( self.name, i ))\n\n except EXPR.FixedExpressionError:\n #\n # The expression contains a fixed variable\n #\n raise RuntimeError(\n\"\"\"Error retrieving the value of an indexed item %s:\nindex %s is a fixed but not constant value. This is likely not what you\nmeant to do, as if you later change the fixed value of the object this\nlookup will not change. If you understand the implications of using\nfixed but not constant values, you can get the current value using the\nvalue() function.\"\"\" % ( self.name, i ))\n #\n # There are other ways we could get an exception such as\n # evaluating a Param / Var that is not initialized.\n # These exceptions will continue up the call stack.\n #\n\n # verify that the value is hashable\n hash(val)\n if ellipsis is None:\n fixed[i] = val\n else:\n fixed[i - len(idx)] = val\n\n if sliced or ellipsis is not None:\n return _IndexedComponent_slice(self, fixed, sliced, ellipsis)\n elif _found_numeric:\n if len(idx) == 1:\n return fixed[0]\n else:\n return tuple( fixed[i] for i in range(len(idx)) )\n else:\n raise DeveloperError(\n \"Unknown problem encountered when trying to retrieve \"\n \"index for component %s\" % (self.name,) )"
] | [
"0.6217184",
"0.5981041",
"0.5913932",
"0.5875691",
"0.57255584",
"0.56947947",
"0.55419147",
"0.5474115",
"0.5463078",
"0.5445567",
"0.5418668",
"0.53971356",
"0.5378227",
"0.53756636",
"0.5320697",
"0.53108877",
"0.5268785",
"0.52610666",
"0.52600414",
"0.5241331",
"0.5241322",
"0.52279806",
"0.52236474",
"0.52119666",
"0.5209811",
"0.5186041",
"0.51789296",
"0.51757604",
"0.5171783",
"0.5171351"
] | 0.7314768 | 0 |
Convert indexed `del` operation to `delitem` call. | def visit_Delete(self, node):
self.generic_visit(node)
target = get_single_target(node)
if isinstance(target, ast.Subscript):
fun = to_attribute(self.operator, 'delitem')
args = [ target.value, self.index_to_expr(target.slice) ]
return ast.Expr(to_call(fun, args))
return node | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __delitem__(self, key):\n if self.__pepth__ != 0:\n return plist.__getattr__(self, '__delitem__')(key)\n try:\n if (isinstance(key, list)\n and plist(key).all(isinstance, int)):\n for k in sorted(key, reverse=True):\n operator.__delitem__(self, k)\n else:\n # Handles slices and ints. Other key types will fail.\n list.__delitem__(self, key)\n except Exception as first_exception:\n try:\n if isinstance(key, list):\n for i, k in enumerate(key):\n operator.__delitem__(self[i], k)\n elif isinstance(key, tuple):\n try:\n for x in self:\n operator.__delitem__(x, key)\n except Exception:\n for x in self:\n for k in key:\n operator.__delitem__(x, k)\n else:\n for x in self:\n operator.__delitem__(x, key)\n except Exception as second_exception:\n raise TypeError('Failed to apply index to self or elements.\\nself exception: %s\\nelements exception: %s' % (str(first_exception), str(second_exception)))\n\n # Allow chaining of set ops when using apply('__delitem__', k) and apply(operators.__delitem__, k)\n return self",
"def __delitem__(self, index: int) -> None:\n error = self._coreIndex.removeDescriptor(index)\n assertError(error)",
"def __delitem__(self, i):\n key = self._main._sequence[i]\n if isinstance(i, types.SliceType):\n for k in key:\n # FIXME: efficiency?\n del self._main[k]\n else:\n del self._main[key]",
"def __delitem__(self, index: Any) -> None:\n del self.contents[index]\n return",
"def __delitem__(self, idx):\n # note that this may result in an empty HSP object, which should be\n # invalid\n del self._items[idx]",
"def __delitem__(self, idx):\n self.pop(idx)",
"def __delitem__(self, index):\n # delete the column\n del self._data[index]\n\n # adjust the number of columns\n self._nrows -= 1",
"def __delitem__(self, key, *args, **kwargs):\n self._del(key, *args, **kwargs)",
"def __delitem__(self, index: int) -> None:\n del self._rows[index]",
"def __delitem__(self, t: Tuple[int, ...]) -> None:\n ...",
"def __delitem__(self, i: int) -> None:\n ...",
"def __delitem__(self, index):\n # If input is a slice then delete all elements as determined\n # by the slice attributes, using an offset to account for the\n # changing size of the list.\n if isinstance(index, slice):\n offset = 0\n for i in xrange(*index.indices(len(self))):\n if i > -(len(self) + 1) or i < len(self):\n del self[i - offset]\n offset += 1\n return\n\n self.__verify_index(index)\n\n if index < 0:\n index += self.length\n\n index, prev_node, cur_node = self.__find_node_index(index)\n del cur_node.data_list[index]\n self.length -= 1\n\n self.__balance_node(prev_node, cur_node)",
"def __delitem__(self, key: tuple):\n s, a = key\n del self.store[s][a]",
"def delete_at_index(self, index: int) -> T:\n pass",
"def delete_at_index(self, index: int) -> T:\n pass",
"def __delitem__(self, key):\n pass",
"def __delitem__(self, key):\n pass",
"def _bucket_delitem(self, j, k):\n pass",
"def _del(self, *args):\n return _ida_hexrays.ctree_items_t__del(self, *args)",
"def _del(self, *args):\n return _ida_hexrays.qvector_carg_t__del(self, *args)",
"def cfDel(self, key, item):\n params = [key, item]\n\n return self.execute_command(self.CF_DEL, *params)",
"def __delitem__(self, key: tuple):\n s, a = key\n if not isinstance(s, self.observation_space) or not isinstance(a, self.action_space):\n raise KeyError\n del self.store[s][a]",
"def delete_item(list_to_parse, item_index):\n del(list_to_parse[item_index]) # Remove the item\n return list_to_parse",
"def __delitem__(self, index):\n del self.chromosome_list[index]",
"def delete(self,\r\n index,\r\n notundoing=True,\r\n update_table=True):\r\n\r\n if self.read_only:\r\n display.noteprint((alerts.ATTENTION,'CANNOT EXECUTE: READ ONLY'))\r\n return {'keys': set(),\r\n 'text': '',\r\n 'meta': {}}\r\n self.indexchanged, self.indexchanged_key, self.indexchanged_tag = True, True, True\r\n self.indexchanges += 1\r\n\r\n\r\n if str(index) in self.indexes():\r\n self.display_buffer.append(index_reduce(str(index))+alerts.WAS_DELETED)\r\n self.delete_search_words(index,\r\n self.get_text_from_note(index))\r\n self.delete_keys_tags(index,\r\n self.get_keys_from_note(index))\r\n\r\n deletedmeta = self.get_metadata_from_note(index)\r\n deletedtext = self.get_text_from_note(index)\r\n deletedkeys = self.get_keys_from_note(index)\r\n\r\n if notundoing:\r\n self.done.add(('del',\r\n index,\r\n deletedkeys,\r\n deletedtext))\r\n\r\n self.delete_note(index)\r\n\r\n if update_table:\r\n self.default_dict['indextable'].delete(index)\r\n self.default_dict['indexlist'].delete(index)\r\n self.default_dict['indexlist_indexes'].delete(Index(index))\r\n self.changed = True\r\n if len(str(index)) == self.maxdepth_found:\r\n self.deepest(is_string=True,abridged=False)\r\n if len(index_reduce(str(index))) == self.abr_maxdepth_found:\r\n self.deepest(is_string=True,abridged=True)\r\n if self.project:\r\n for p_temp in self.project:\r\n self.default_dict['projects'].delete_index(index,\r\n project=p_temp)\r\n\r\n return {'keys': deletedkeys,\r\n 'text': deletedtext,\r\n 'meta': deletedmeta}",
"def _del(self, *args):\n return _ida_hexrays.qvector_ccase_t__del(self, *args)",
"def create_delete_item(doc, source_index):\n\n action = { 'delete' : { '_index' : source_index, '_type' : doc['_type'], '_id' : doc['_id'] } }\n return action",
"def __delitem__(self, key):\r\n self.client.delete(id=key, ignore=[404], **self.kwargs)",
"def __delitem__(name):",
"def __delitem__(self, key):\n self.f_remove(key)"
] | [
"0.66921794",
"0.6634915",
"0.6633796",
"0.6570889",
"0.6552683",
"0.65220124",
"0.6386673",
"0.637424",
"0.633695",
"0.6330479",
"0.63189936",
"0.629664",
"0.62494636",
"0.6222551",
"0.6222551",
"0.6154767",
"0.6154767",
"0.6136742",
"0.6106075",
"0.6077464",
"0.60384274",
"0.6037455",
"0.60155076",
"0.598926",
"0.5984369",
"0.592938",
"0.59137845",
"0.59051883",
"0.59021115",
"0.5886797"
] | 0.67726755 | 0 |
Whether the AST node can be safely evaluated twice. | def can_reevaluate(self, node):
return isinstance(node, (ast.Name, ast.Num, ast.Str)) or \
(six.PY3 and isinstance(node, ast.Bytes)) or \
(ast_has_name_constant and isinstance(node, ast.NameConstant)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def evil_hack(self, other):\n if isinstance(other, FExpr):\n return other == self\n return isinstance(other, self.__class__) and self.id == other.id",
"def can_rewrite(self, lhs):\n return len(self[lhs]) > 0",
"def is_used_as_expression(item):\n # note: this is not accurate because of the last statement of a program\n # but intended\n return not is_used_as_statement(item)",
"def has_right(self):\n return self.right != None",
"def has_right(self):\n return self.__right != None",
"def has_duplicated_literal(head: Atom, body: Body) -> bool:\n return len(body) != len(set(body.get_literals()))",
"def internal(self):\n if self._leftchild or self._rightchild:\n return True\n return False",
"def hasTwoSons(self):\n \n return self._leftSon is not None and self._rightSon is not None",
"def check_for_right(self) -> bool:\n\t\tboolean_expression_has_right = False\n\t\texpression_has_right = False\n\t\tif self.boolean_expression:\n\t\t\tboolean_expression_has_right = self.boolean_expression.check_for_right()\n\t\tif self.expression:\n\t\t\texpression_has_right = self.expression.check_for_right()\n\t\treturn boolean_expression_has_right or expression_has_right",
"def only_once(self) -> bool:\n return self.times == 1",
"def is_true(node):\n return is_scalar_cst(node, True) or is_vector_uniform_cst(node, True)",
"def has_expression(self):\n return self._expression is not None",
"def has_right(self):\n return self.r is not None",
"def _seen(node):\n\t\tcheck = linked_list\n\t\twhile check != node:\n\t\t\tif check.value == node.value:\n\t\t\t\treturn True\n\t\t\tcheck = check.next\n\t\treturn False",
"def _has_right(self, index):\r\n return self._right(index) < len(self)",
"def _isImmediatelyConcurrentWithHelper(self, other):\n self._mergeKeys(other)\n self._binaryOperationCheck(other)\n offsetsOfPlusOne = 0\n offsetsOfMinusOne = 0\n equalities = 0\n for id in self.clock.keys():\n if (self.clock[id] + 1) == other.clock[id]:\n offsetsOfPlusOne += 1\n if (self.clock[id] - 1) == other.clock[id]:\n offsetsOfMinusOne += 1\n elif self.clock[id] == other.clock[id]:\n equalities += 1\n if offsetsOfPlusOne == 1 and offsetsOfMinusOne == 1 and equalities == len(self.clock.keys()) - 2:\n return True\n else:\n return False",
"def is_cyclically_reduced(self):\n if not self:\n return True\n return self[0] != self[-1]**-1",
"def __call__(self, first: Node, second: Node) -> bool:\n if not (is_next(first, second) and self._compare_attributes(first, second)):\n self.accumulated_axes = set()\n return False\n\n fst_axes = set([a for a in Interpolate.get_axes(first)])\n snd_axes = set([a for a in Interpolate.get_axes(second)])\n\n self.accumulated_axes = self.accumulated_axes | fst_axes\n\n # If the set of accumulated axes and the set of axes of 'second' do not intersect then nodes can be fused,\n # because interpolations with respect to various axes do not affect each other.\n if not(self.accumulated_axes & snd_axes):\n return True\n\n # Otherwise, nodes cannot be fused.\n self.accumulated_axes = set()\n return False",
"def has_side_effect(self):\n # XXX Need to handle OpExtInst correctly (it is conservative now)\n if self.result_id is None:\n return True\n return self.op_name in spirv.HAS_SIDE_EFFECT",
"def done(self):\n return self.left + 1 == self.right",
"def semileaf(self):\n if self._leftchild and not self._rightchild:\n return True\n if self._rightchild and not self._leftchild:\n return True\n return False",
"def _aresame(a, b):\n from .numbers import Number\n from .function import AppliedUndef, UndefinedFunction as UndefFunc\n if isinstance(a, Number) and isinstance(b, Number):\n return a == b and a.__class__ == b.__class__\n for i, j in zip_longest(_preorder_traversal(a), _preorder_traversal(b)):\n if i != j or type(i) != type(j):\n if ((isinstance(i, UndefFunc) and isinstance(j, UndefFunc)) or\n (isinstance(i, AppliedUndef) and isinstance(j, AppliedUndef))):\n if i.class_key() != j.class_key():\n return False\n else:\n return False\n return True",
"def has_next(self):\n return self._mu is not None or self._source.has_next()",
"def __bool__(self):\n return len(self.atoms) >= 1",
"def is_semileaf(self):\n if self._leftchild and self._rightchild:\n return False\n if not self._leftchild and not self._rightchild:\n return False\n return True",
"def nodes_are_equal(node1, node2):\n\n try:\n return dump_ast(node1).strip() == dump_ast(node2).strip() and \\\n node1.lineno == node2.lineno and \\\n node1.col_offset == node2.col_offset\n except:\n return False",
"def consistent(self):\n if self.var1.get_value() is None or self.var2.get_value() is None:\n return True\n\n return self.var1.value != self.var2.value",
"def _ast_node_is_in_docstring_position(ast_node):\n if not isinstance(ast_node, (ast.Str, Bytes)):\n raise TypeError\n expr_node = ast_node.context.parent\n if not isinstance(expr_node, ast.Expr):\n return False\n assert ast_node.context.field == 'value'\n assert ast_node.context.index is None\n expr_ctx = expr_node.context\n if expr_ctx.field != 'body':\n return False\n parent_node = expr_ctx.parent\n if not isinstance(parent_node, (ast.FunctionDef, ast.ClassDef, ast.Module, AsyncFunctionDef)):\n return False\n if expr_ctx.index == 0:\n return True\n prev_sibling_node = parent_node.body[expr_ctx.index-1]\n if isinstance(prev_sibling_node, ast.Assign):\n return True\n return False",
"def even(self):\n return self._ % 2 == 0",
"def has_logical_equivalent(self, node):\n return node.name in logical_equivalents"
] | [
"0.5983335",
"0.5781899",
"0.5668821",
"0.5606019",
"0.5595086",
"0.556334",
"0.5537646",
"0.55246955",
"0.542583",
"0.5423907",
"0.5420764",
"0.54188806",
"0.5378163",
"0.5366631",
"0.53475344",
"0.53304714",
"0.5311328",
"0.5301695",
"0.5297375",
"0.52919585",
"0.5287243",
"0.52673966",
"0.52544343",
"0.5246898",
"0.5238788",
"0.52095157",
"0.51992935",
"0.5183977",
"0.5178811",
"0.514904"
] | 0.6013396 | 0 |
Convert AST operator to function in operator module. | def op_to_function(self, op):
name = op.__class__.__name__.lower()
return to_attribute(self.operator, inplace_operator_table[name]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def op_to_function(self, op):\n name = op.__class__.__name__.lower()\n name = operator_table.get(name, name)\n return to_attribute(self.operator, name)",
"def _convert_operator(\n self, op_name, node_name, inputs, attrs, identity_list=None, convert_map=None\n ):\n identity_list = identity_list if identity_list else _identity_list\n convert_map = convert_map if convert_map else _convert_map\n if op_name in identity_list:\n sym = get_relay_op(op_name)(*inputs, **attrs)\n elif op_name in convert_map:\n if _need_prelude_for_shape_inference(op_name):\n sym = convert_map[op_name](inputs, attrs, self._params, self._prelude)\n else:\n sym = convert_map[op_name](inputs, attrs, self._params, self._mod)\n elif op_name in [\"PartitionedCall\", \"StatefulPartitionedCall\"]:\n sym = self._partition_call_operator(inputs, attrs)\n else:\n raise NotImplementedError(f\"Operator {op_name} not implemented.\")\n\n sym = set_span(sym, node_name)\n\n return sym",
"def _process_operator(self, expr, operator, func, *args, **kwargs):\n for elt in self.model.xml_element_children(expr):\n self._process_operator(elt, operator, func, *args, **kwargs)\n if isinstance(expr, mathml_apply) and expr.operator().localName == operator:\n func(expr, *args, **kwargs)",
"def _convert_operator(op_name, attrs, identity_list=None, convert_map=None):\n identity_list = identity_list if identity_list else _identity_list\n convert_map = convert_map if convert_map else _convert_map\n if op_name in identity_list:\n pass\n elif op_name in convert_map:\n op_name, attrs = convert_map[op_name](attrs)\n else:\n raise NotImplementedError(\"Operator {} not implemented.\".format(op_name))\n op = getattr(mx.sym, op_name, None)\n if not op:\n raise RuntimeError(\"Unable to map op_name {} to sym\".format(op_name))\n return op, attrs",
"def operator_constructor(loader, node):\n global workspace\n obj = loader.construct_mapping(node, deep=True)\n obj = resolve_pointer( workspace, obj )\n operation, arg = yaml_to_args( obj )[0]\n return getattr( operator, operation )( *arg )",
"def fetch_operators_function(self, operator):\n operators_function = self.operators_dict[operator]['function']\n return operators_function",
"def to_operator(operator):\n if isinstance(operator, str):\n return ValueConstraintOperators.STRING_OPERATOR_MAP[operator]\n else:\n return operator",
"def convert(self, operator: OperatorBase) -> OperatorBase:\n # pylint: disable=cyclic-import,import-outside-toplevel\n from ..evolutions.evolved_op import EvolvedOp\n\n if isinstance(operator, ListOp):\n if isinstance(operator, SummedOp) and all([isinstance(op, PauliOp)\n for op in operator.oplist]):\n # For now, we only support graphs over Paulis.\n return self.group_subops(operator)\n elif self._traverse:\n return operator.traverse(self.convert)\n else:\n return operator\n elif isinstance(operator, OperatorStateFn) and self._traverse:\n return OperatorStateFn(self.convert(operator.primitive),\n is_measurement=operator.is_measurement,\n coeff=operator.coeff)\n elif isinstance(operator, EvolvedOp) and self._traverse:\n return EvolvedOp(self.convert(operator.primitive), coeff=operator.coeff)\n else:\n return operator",
"def to_operator(self) -> Operator:\n return Operator(self.to_instruction())",
"def to_op(self):\n raise NotImplementedError",
"def run_operator(scope_node, node, name, op, code, f_globals):\n operators = __get_operators()\n if op not in operators:\n raise TypeError(\"failed to load operator '%s'\" % op)\n scope_key = scope_node.scope_key\n pair = operators[op](code, scope_key, f_globals)\n if isinstance(name, tuple):\n # The template inst binding with a single name will take this\n # path by using a length-1 name tuple. See bug #78.\n bind_extended_member(node, name, pair, scope_key)\n else:\n item = getattr(node.klass, name, None)\n if isinstance(item, Alias):\n bind_aliased_member(node, name, item, pair, scope_key)\n else:\n # This is the path for a standard binding on a child def.\n # It does not need the closure scope key. See bug #78.\n bind_member(node, name, pair)",
"def _to_ops(from_op):\n\n for to_op in OPERATORS:\n if to_op and isinstance(from_op, ast.Not):\n # 'not' can only be removed but not replaced with\n # '+', '-' or '~' b/c that may lead to strange results\n pass\n elif isinstance(from_op, ast.UAdd) and (to_op is None):\n # '+1' => '1' yields equivalent mutations\n pass\n else:\n yield to_op",
"def rhs_as_python_func(self, namespace=None):\n namespace = namespace or {}\n\n return eval(\"lambda %s: %s\" % (','.join(self.rhs_names), self.rhs),\n str_to_npfunc_map, namespace)\n # math_namespace.namespace, namespace)",
"def visit_BinaryOperator(self, node: BinaryOperator) -> Instruction:\n\n left = self.visit(node.left)\n right = self.visit(node.right)\n\n if isinstance(left, VarSymbol):\n left_symbol = self.GLOBAL_MEMORY[left.name]\n else:\n left_symbol = left\n\n if isinstance(right, VarSymbol):\n right_symbol = self.GLOBAL_MEMORY[right.name]\n else:\n right_symbol = right\n\n if node.operator.type == TokenType.PLUS:\n return self.builder.fadd(left_symbol, right_symbol, \"addtmp\")\n elif node.operator.type == TokenType.MINUS:\n return self.builder.fsub(left_symbol, right_symbol, \"subtmp\")\n elif node.operator.type == TokenType.MUL:\n return self.builder.fmul(left_symbol, right_symbol, \"multmp\")\n elif node.operator.type == TokenType.INTEGER_DIV:\n return self.builder.fdiv(left_symbol, right_symbol, \"udivtmp\")\n elif node.operator.type == TokenType.FLOAT_DIV:\n return self.builder.fdiv(left_symbol, right_symbol, \"fdivtmp\")",
"def __compile_operator(self, op, caller):\r\n if op == \"+\":\r\n self.__vmwriter.write_arithmetic(\"add\")\r\n elif op == \"-\" and caller == \"expression\":\r\n self.__vmwriter.write_arithmetic(\"sub\")\r\n elif op == \"*\":\r\n self.__vmwriter.write_call(\"Math.multiply\", 2)\r\n elif op == \"/\":\r\n self.__vmwriter.write_call(\"Math.divide\", 2)\r\n elif op == \"&\":\r\n self.__vmwriter.write_arithmetic(\"and\")\r\n elif op == \"|\":\r\n self.__vmwriter.write_arithmetic(\"or\")\r\n elif op == \"<\":\r\n self.__vmwriter.write_arithmetic(\"lt\")\r\n elif op == \">\":\r\n self.__vmwriter.write_arithmetic(\"gt\")\r\n elif op == \"=\":\r\n self.__vmwriter.write_arithmetic(\"eq\")\r\n elif op == \"-\":\r\n self.__vmwriter.write_arithmetic(\"neg\")\r\n elif op == \"~\":\r\n self.__vmwriter.write_arithmetic(\"not\")",
"def get_fermion_operator(operator):\n fermion_operator = FermionOperator()\n\n if isinstance(operator, PolynomialTensor):\n for term in operator:\n fermion_operator += FermionOperator(term, operator[term])\n return fermion_operator\n\n raise TypeError(\"Unsupported type of oeprator {}\".format(operator))",
"def mutate_bySingleOperator(self, root, operator):\n self.operator = operator\n\n ast.fix_missing_locations(root)\n # traverse the target ast tree and mutate interesting node\n mutated_ast = self.visit(root)\n ast.fix_missing_locations(root)\n\n return mutated_ast",
"def _onnx_node_to_singa_op(cls,\n onnx_node,\n inputs,\n opset_version=_known_opset_version):\n if onnx_node.op_type in cls._special_operators:\n translator = getattr(cls, cls._special_operators[onnx_node.op_type])\n else:\n translator = cls._common_onnx_node_to_singa_op\n return translator(onnx_node, inputs, opset_version)",
"def translate(expr):\n return from_python(ast.parse(expr))",
"def all_math(operator):\n a = int(request.args.get(\"a\"))\n b = int(request.args.get(\"b\"))\n return str(functions[operator](a,b))",
"def convert_exp(node, **kwargs):\n return create_basic_op_node('Exp', node, kwargs)",
"def apply(expr, fun_annotate_subexpr = None):\n assert isinstance(expr, Expression)\n t = type(expr)\n if t is Op:\n try:\n pre, suff = ExprTranslator.OPS_TO_SMTLIB[expr.id]\n return ExprTranslator.subexpr_to_smtlib(expr, pre, suff, fun_annotate_subexpr)\n except KeyError:\n raise Exception(str(expr.id) + ': operation not supported!')\n\n elif t is Var:\n return expr.get_text()\n elif t is ConstInt or t is ConstBool or t is ConstReal:\n return str(expr.get_text())\n elif t is ExprHole:\n return expr.hole_decl.get_function_call()\n else:\n raise Exception(str(t)+': expression type not supported!')",
"def applyOperator(self, operand1, operand2, operator):\n\n if operator == \"*\":\n return operand1 * operand2\n elif operator == \"/\":\n return operand1 / operand2\n elif operator == \"+\":\n return operand1 + operand2\n else:\n return operand1 - operand2",
"def vector_to_operator(op):\n if not op.isoperket:\n raise TypeError(\"only defined for operator-kets\")\n if op.superrep != \"super\":\n raise TypeError(\"only defined for operator-kets in super format\")\n dims = op.dims[0]\n return Qobj(unstack_columns(op.data, (np.prod(dims[0]), np.prod(dims[1]))),\n dims=dims,\n copy=False)",
"def funcOpExchange(expstr):\n funcOpDict = expr.getFuncOpDict() \n for funcstr in funcOpDict:\n idx = expstr.find(funcstr)\n if idx >= 0:\n #if we find a function string at idx\n if (idx == 0 or not expstr[idx-1].isalpha()) and expstr[idx+len(funcstr)] == '(':\n fstart = idx\n fstop = 0\n rest = expstr[idx:]\n pdepth = 0\n for i,c in enumerate(rest):\n if c == '(':\n pdepth += 1\n if c == ')':\n pdepth -= 1\n if pdepth == 0:\n fstop = idx+i+1\n break\n start = expstr[:fstart]\n middle = expstr[fstart:fstop]\n end = expstr[fstop:]\n args = ['('+funcOpExchange(exp)+')' for exp in funcargs(middle)]\n if len(args) == 1:\n args.append('0')\n expstr = start+funcOpDict[funcstr].join(args)+funcOpExchange(end)\n return expstr",
"def gen_binop(self, expr: expressions.BinaryOperator):\n if expr.op in [\"*\", \"/\", \"%\", \"^\", \"|\", \"&\", \">>\", \"<<\"]:\n lhs = self.gen_expr(expr.a, rvalue=True)\n rhs = self.gen_expr(expr.b, rvalue=True)\n op = expr.op\n\n ir_typ = self.get_ir_type(expr.typ)\n value = self.builder.emit_binop(lhs, op, rhs, ir_typ)\n elif expr.op == \",\":\n # Handle the comma operator by returning the second result\n self.gen_expr(expr.a, rvalue=True)\n rhs = self.gen_expr(expr.b, rvalue=True)\n value = rhs\n elif expr.op == \"+\":\n # Pay attention to pointer arithmetics!\n lhs = self.gen_expr(expr.a, rvalue=True)\n rhs = self.gen_expr(expr.b, rvalue=True)\n\n # left and right are swapped in semantics if right is pointer.\n if expr.a.typ.is_pointer:\n assert expr.b.typ.is_integer\n esize = self.sizeof(expr.a.typ.element_type)\n assert esize > 0\n if esize != 1:\n esize = self.emit(ir.Const(esize, \"esize\", rhs.ty))\n rhs = self.builder.emit_mul(rhs, esize, rhs.ty)\n rhs = self.builder.emit_cast(rhs, ir.ptr)\n\n ir_typ = self.get_ir_type(expr.typ)\n value = self.builder.emit_binop(lhs, \"+\", rhs, ir_typ)\n elif expr.op == \"-\":\n # Pay attention to pointer arithmetics!\n lhs = self.gen_expr(expr.a, rvalue=True)\n rhs = self.gen_expr(expr.b, rvalue=True)\n ir_typ = self.get_ir_type(expr.typ)\n if expr.a.typ.is_pointer:\n esize = self.sizeof(expr.a.typ.element_type)\n assert esize > 0\n if expr.b.typ.is_pointer:\n # pointer - pointer\n value = self.builder.emit_binop(lhs, \"-\", rhs, ir.ptr)\n value = self.emit(ir.Cast(value, \"typecast\", ir_typ))\n if esize != 1:\n esize = self.emit(ir.Const(esize, \"esize\", ir_typ))\n value = self.emit(\n ir.Binop(value, \"/\", esize, \"rhs\", ir_typ)\n )\n else:\n # pointer - numeric\n if esize != 1:\n esize = self.emit(ir.Const(esize, \"esize\", rhs.ty))\n rhs = self.builder.emit_mul(rhs, esize, rhs.ty)\n rhs = self.builder.emit_cast(rhs, ir.ptr)\n value = self.builder.emit_binop(lhs, \"-\", rhs, ir_typ)\n else:\n # numeric - numeric\n value = self.builder.emit_binop(lhs, \"-\", rhs, ir_typ)\n\n elif expr.op in [\"<\", \">\", \"==\", \"!=\", \"<=\", \">=\", \"||\", \"&&\"]:\n value = self.gen_condition_to_integer(expr)\n elif expr.op in [\n \"=\",\n \"+=\",\n \"-=\",\n \"*=\",\n \"%=\",\n \"/=\",\n \">>=\",\n \"<<=\",\n \"&=\",\n \"|=\",\n \"~=\",\n \"^=\",\n ]:\n # Handle struct assignment special case:\n if expr.op == \"=\" and expr.a.typ.is_struct:\n lhs = self.gen_expr(expr.a, rvalue=False)\n rhs = self.gen_expr(expr.b, rvalue=False)\n amount = self.sizeof(expr.a.typ)\n self.gen_copy_struct(lhs, rhs, amount)\n value = None\n else:\n lhs = self.gen_expr(expr.a, rvalue=False)\n rhs = self.gen_expr(expr.b, rvalue=True)\n\n if expr.op == \"=\":\n value = rhs\n else:\n # Handle '+=' and friends:\n op = expr.op[:-1]\n ir_typ = self.get_ir_type(expr.typ)\n loaded = self._load_value(lhs, expr.typ)\n\n # pointer arithmatic:\n if op in [\"+\", \"-\"] and expr.a.typ.is_pointer:\n esize = self.sizeof(expr.a.typ.element_type)\n assert esize > 0\n if esize != 1:\n esize = self.emit(ir.Const(esize, \"esize\", rhs.ty))\n rhs = self.builder.emit_mul(rhs, esize, rhs.ty)\n\n value = self.builder.emit_binop(loaded, op, rhs, ir_typ)\n self._store_value(value, lhs)\n else: # pragma: no cover\n raise NotImplementedError(str(expr.op))\n return value",
"def onnx_extract_operator(node, model, nodes_dict):\n op_type = node.op_type\n input_tensors = []\n output_tensors = []\n\n \"\"\" input_tensors\n each input_tensor has its own soure op, but same dest op\n so both have single string\n \"\"\"\n input_names = []\n # name list\n input_tensor_names = node.input\n for input_tensor_name in input_tensor_names:\n origin_tensor_name, input_tensor_name = util.names_from_input(input_tensor_name)\n try:\n pre_node = nodes_dict[nodes_dict[origin_tensor_name]].node\n except BaseException:\n pre_node = nodes_dict[origin_tensor_name].node\n \n data = None\n if pre_node in model.initializer():\n data = to_array(pre_node)\n else:\n if (pre_node not in model.graph().input) and (pre_node.op_type == 'Constant'):\n data = to_array(pre_node.attribute[0].t)\n if isinstance(data, np.ndarray):\n dtype = util.get_data_dtype(data)\n shape = list(data.shape) if data.shape != () else [1]\n input_tensor = Tensor(name=input_tensor_name,\n source_op=[],\n dest_op=[node.name],\n shape=shape,\n data=data,\n dtype=dtype\n )\n input_tensors.append(input_tensor)\n\n else:\n input_tensor = Tensor(name=input_tensor_name,\n source_op=[pre_node.name],\n dest_op=[node.name],\n shape=None,\n data=None,\n dtype=None\n )\n input_tensors.append(input_tensor)\n input_names.append(node.name)\n\n \"\"\" output_tensors\n in onnx, NodeProto has the output attribute\n \"\"\"\n output_tensor_names = node.output\n for output_tensor_name in output_tensor_names:\n output_tensor_name = util.names_from_input(output_tensor_name)[1]\n output_tensor = Tensor(name=output_tensor_name,\n source_op=[node.name],\n dest_op=nodes_dict[node.name].outputs,\n shape=None,\n data=None,\n dtype=None\n )\n\n output_tensors.append(output_tensor)\n\n return op_type, input_tensors, output_tensors",
"def convert(ast):\n\n if ast and ast.type == \"Function\":\n # Activity function conversion\n if (\n ast.name != \"molecularActivity\"\n and ast.name in belspec[\"namespaces\"][\"Activity\"][\"list\"]\n ):\n print(\"name\", ast.name, \"type\", ast.type)\n ast = convert_activity(ast)\n return ast # Otherwise - this will trigger on the BEL2 molecularActivity\n\n # translocation conversion\n elif ast.name in [\"tloc\", \"translocation\"]:\n ast = convert_tloc(ast)\n\n fus_flag = False\n for idx, arg in enumerate(ast.args):\n if arg.__class__.__name__ == \"Function\":\n\n # Fix substitution -> variation()\n if arg.name in [\"sub\", \"substitution\"]:\n ast.args[idx] = convert_sub(arg)\n\n elif arg.name in [\"trunc\", \"truncation\"]:\n ast.args[idx] = convert_trunc(arg)\n\n elif arg.name in [\"pmod\", \"proteinModification\"]:\n ast.args[idx] = convert_pmod(arg)\n\n elif arg.name in [\"fus\", \"fusion\"]:\n fus_flag = True\n\n # Recursively process Functions\n ast.args[idx] = convert(ast.args[idx])\n\n if fus_flag:\n ast = convert_fus(ast)\n\n return ast",
"def opsplit(expstr):\n\n #ops are the one char operators (sorted on precidence)\n ops = expr.getOps()\n #Remove outer parentesis if we have them\n if expstr[0] == '(' and expstr[-1] == ')' and balanced(expstr[1:-1]):\n expstr = expstr[1:-1]\n #Add a '0' to the beginning of the string if we start with an operator\n if expstr[0] in ops:\n expstr = '0'+expstr\n for op in ops:\n pc = 0\n cc = len(expstr)-1\n revexpstr = list(expstr)\n revexpstr.reverse()\n #Search for the operator backwards (to preserve operator presidence)\n for c in revexpstr:\n if c == '(':\n pc += 1\n elif c == ')':\n pc -= 1\n if c == op and pc == 0:\n #Build the tree recursively\n return [op,opsplit(expstr[:cc]),opsplit(expstr[cc+1:])]\n cc -=1\n #if we find something that looks like a function, parse it separately \n if funcpattern(expstr):\n fnamestr = funcname(expstr)\n fargs = funcargs(expstr)\n farglist = [opsplit(arg) for arg in fargs]\n return [fnamestr]+farglist\n return expstr",
"def __init__(self):\n super(OperatorCodegen, self).__init__()"
] | [
"0.7403406",
"0.68868965",
"0.670449",
"0.6681713",
"0.65929717",
"0.65538996",
"0.6334976",
"0.6330674",
"0.63050497",
"0.62751913",
"0.59945136",
"0.59220994",
"0.58847594",
"0.582918",
"0.58211267",
"0.5793635",
"0.579274",
"0.57560617",
"0.56892043",
"0.5672466",
"0.56405747",
"0.5610882",
"0.56029516",
"0.559694",
"0.5591294",
"0.558444",
"0.5562065",
"0.5551155",
"0.5549541",
"0.55332077"
] | 0.7269648 | 1 |
Convert augmented assignment to assignment plus function call. | def visit_AugAssign(self, node):
# FIXME: Gensym the LHS to avoid two evaluations.
self.generic_visit(node)
rhs = to_call(self.op_to_function(node.op),
[set_ctx(node.target), node.value])
return ast.Assign([node.target], rhs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _AugAssign(self, t):\n if not isinstance(t.target, ast.Name):\n self.RaiseError(t, \"Augmented assignment to complex expressions not supported\")\n # check if target exists in locals\n if t.target.id not in self._locals :\n self.RaiseError(t, \"Augmented assignment not permitted on variables not already assigned previously\")\n self.fill()\n self.dispatch(t.target)\n self.write(\" \"+self.binop[t.op.__class__.__name__]+\"= \")\n self.dispatch(t.value)\n self.write(\";\")",
"def visit_Assign(self, node):\n self.generic_visit(node)\n target = get_single_target(node)\n if isinstance(target, ast.Subscript):\n fun = to_attribute(self.operator, 'setitem')\n args = [target.value, self.index_to_expr(target.slice), node.value]\n return ast.Expr(to_call(fun, args))\n return node",
"def convert_assign(g, op, block):\n\n out = g.get_node(op.input(\"X\")[0])\n g.add_node(op.output(\"Out\")[0], out)",
"def _Assign(self, t):\n if len(t.targets) > 1:\n self.RaiseError(t, \"Assignment to multiple targets not supported\")\n if not isinstance(t.targets[0], ast.Name):\n self.RaiseError(t, \"Assignment to complex expressions not supported\")\n self.fill()\n # check if target exists in locals\n if t.targets[0].id not in self._locals :\n self.write(\"auto \")\n self._locals.append(t.targets[0].id)\n self.dispatch(t.targets[0])\n self.write(\" = \")\n self.dispatch(t.value)\n self.write(\";\")",
"def irgen_assign(stmt, builder, table):\n lvalue = irgen_lvalue(stmt.exprs[0], builder, table)\n expr = irgen_expr(stmt.exprs[1], builder, table)\n builder.store(expr, lvalue)",
"def expand_callable(self, call_expr):\n call_expr.func = ast.Attribute(value=call_expr.func, attr='__call__')",
"def eval_assignment(exp, env):\n set_variable_value(assignment_variable(exp), m_eval(assignment_value(exp), env), env)\n return quote(\"ok\")",
"def _assign_op(dest, op, arg, val, path, scope):\n if op == '[':\n dest[arg] = val\n elif op == '.':\n setattr(dest, arg, val)\n elif op == 'P':\n _assign = scope[TargetRegistry].get_handler('assign', dest)\n try:\n _assign(dest, arg, val)\n except Exception as e:\n raise PathAssignError(e, path, arg)\n else: # pragma: no cover\n raise ValueError('unsupported T operation for assignment')",
"def mk_assign(var_map, s, assigns):\n assign_args = []\n for k, v in assigns.items():\n k2 = convert_term(var_map, s, k)\n assert k2.fun == s, \"mk_assign: key is not an identifer.\"\n assign_args.append(k2.arg)\n assign_args.append(convert_term(var_map, s, v))\n\n return function.mk_fun_upd(s, *assign_args)",
"def visit_Assign(self, node):\n self.generic_visit(node)\n\n if node.col_offset == 0:\n mnode = ast.parse(\"\")\n mnode.body = [node]\n mnode = ast.fix_missing_locations(mnode)\n code = compile(mnode, \"<ast>\", \"exec\")\n try:\n exec(code, self.globals_)\n except Exception:\n pass\n self.globals_.pop(\"__builtins__\", None)\n self.globals_.pop(\"builtins\", None)",
"def visit_aug_assign(self: Parser, node: doc.AugAssign) -> None:\n lhs_pos = (\n node.target.lineno,\n node.target.col_offset,\n node.target.end_lineno,\n node.target.end_col_offset,\n )\n rhs_pos = (\n node.value.lineno,\n node.value.col_offset,\n node.value.end_lineno,\n node.value.end_col_offset,\n )\n node.target.ctx = doc.Load(*lhs_pos)\n with self.var_table.with_frame():\n lhs_name = \"__tvm_tmp_value_aug_assign_lhs\"\n rhs_name = \"__tvm_tmp_value_aug_assign_rhs\"\n lhs_expr = self.eval_expr(node.target)\n rhs_expr = self.eval_expr(node.value)\n self.var_table.add(lhs_name, lhs_expr)\n self.var_table.add(rhs_name, rhs_expr)\n op = doc.BinOp(\n doc.Name(lhs_name, doc.Load(*lhs_pos), *lhs_pos),\n node.op,\n doc.Name(rhs_name, doc.Load(*rhs_pos), *rhs_pos),\n *lhs_pos,\n )\n rhs = self.eval_expr(op)\n lhs = node.target\n lhs.ctx = doc.Store(*lhs_pos)\n if isinstance(lhs, doc.Subscript):\n if isinstance(lhs.slice, doc.Tuple):\n indices = []\n for index in lhs.slice.elts:\n indices.append(self.eval_expr(index))\n else:\n indices = [self.eval_expr(lhs.slice)]\n T.buffer_store(self.eval_expr(lhs.value), rhs, indices)\n else:\n self.eval_assign(target=lhs, source=rhs, bind_value=bind_assign_value)",
"def eval_assignment(assignment, motif_node_dict):\n if type(assignment.rvalue).__name__ == 'FuncCall':\n motif_node, tree_node = eval_function_call(assignment.rvalue, motif_node_dict)\n # consider \"var = XXX;\" and \"*var = XXX\" and \"&var = XXX\" situations\n if (type(assignment.lvalue).__name__ == 'ID' and assignment.lvalue.name in motif_node_dict) or (type(assignment.lvalue).__name__ == 'UnaryOp' and assignment.lvalue.expr.name in motif_node_dict):\n if not motif_node:\n print('\\33[101m' + '[error][eval_assignment]: ' + assignment.lvalue.name + ' is in the dictionary. MotifNode should not be None.\\033[0m')\n exit(1)\n else:\n motif_node_dict[assignment.lvalue.name].append(motif_node)\n return tree_node\n # In a case where a provenance node was declared but then assigned or reassigned. For example:\n # struct provenance *tprov;\n # ...\n # tprov = t->provenance;\n # tprov must then be in the motif_node_dict.\n elif type(assignment.lvalue).__name__ == 'ID' and assignment.lvalue.name in motif_node_dict:\n # we can only infer its type from the name of the variable\n motif_node = provenance.create_motif_node(assignment.lvalue.name)\n motif_node_dict[assignment.lvalue.name].append(motif_node)\n return None\n elif type(assignment.lvalue).__name__ == 'UnaryOp' and type(assignment.lvalue.expr).__name__ == 'ID' and assignment.lvalue.expr.name in motif_node_dict:\n # similar case as the previous one, except that we have: *tprov = ...\n # we can only infer its type from the name of the variable\n motif_node = provenance.create_motif_node(assignment.lvalue.expr.name)\n motif_node_dict[assignment.lvalue.expr.name].append(motif_node)\n return None\n else:\n #######################################################\n # We will consider other conditions if we ever see them\n # POSSIBLE CODE HERE.\n #######################################################\n return None",
"def _analyse_stmt_AugAssign(\n self, statement: ast.AugAssign, *, next: CFNode\n ) -> CFNode:\n return self._ast_node(statement, next=next, error=self._raise)",
"def multiple_value_call_assignment_handler(target, value, assign_stmts, node, id_str):\n #print(\"multiple_value_call_assignment_handler\")\n target_stmts, value_var = stypy_functions.create_temp_Assign(value, node.lineno, node.col_offset,\n \"{0}_assignment\".format(id_str))\n assign_stmts.append(target_stmts)\n\n #value_var_to_load = copy.deepcopy(value_var)\n value_var_to_load = ast.Name()\n value_var_to_load.col_offset = value_var.col_offset\n value_var_to_load.lineno = value_var.lineno\n value_var_to_load.id = value_var.id\n value_var_to_load.ctx = ast.Load()\n\n for i in xrange(len(target.elts)):\n # Assign values to each element.\n # getitem_att = core_language.create_attribute(value_var_to_load, '__getitem__', context=ast.Load(),\n # line=node.lineno,\n # column=node.col_offset)\n # item_call = functions.create_call(getitem_att, [core_language.create_num(i, node.lineno, node.col_offset)])\n # temp_stmts, temp_value = stypy_functions.create_temp_Assign(item_call, node.lineno, node.col_offset,\n # \"{0}_assignment\".format(id_str))\n stypy_interface = core_language.create_Name('stypy_interface')\n get_tuple_call = core_language.create_attribute(stypy_interface, 'stypy_get_value_from_tuple', context=ast.Load(),\n line=node.lineno,\n column=node.col_offset)\n\n item_call = functions.create_call(get_tuple_call, [value_var_to_load,\n core_language.create_num(len(target.elts), node.lineno, node.col_offset),\n core_language.create_num(i, node.lineno, node.col_offset)])\n temp_stmts, temp_value = stypy_functions.create_temp_Assign(item_call, node.lineno, node.col_offset,\n \"{0}_assignment\".format(id_str))\n if hasattr(node, 'lineno'):\n temp_stmts.lineno = node.lineno\n temp_stmts.col_offset = node.col_offset\n\n assign_stmts.append(temp_stmts)\n\n temp_stmts = core_language.create_Assign(target.elts[i], temp_value)\n if hasattr(node, 'lineno'):\n temp_stmts.lineno = node.lineno\n temp_stmts.col_offset = node.col_offset\n\n assign_stmts.append(temp_stmts)\n\n return True",
"def visit_assign(self: Parser, node: doc.Assign) -> None:\n if len(node.targets) != 1:\n self.report_error(node, \"Consequential assignments like 'a = b = c' are not supported.\")\n lhs = node.targets[0]\n\n if isinstance(node.value, doc.Subscript):\n check_slices = []\n if isinstance(node.value.slice, doc.Slice):\n check_slices = [node.value.slice]\n elif isinstance(node.value.slice, doc.Tuple):\n for p in node.value.slice.elts:\n if isinstance(p, doc.Slice):\n check_slices.append(p)\n for s in check_slices:\n if not s.step and s.upper and s.lower:\n s.step = doc.Constant(\n 1,\n None,\n 1,\n 1,\n s.upper.lineno,\n s.upper.end_col_offset + 1,\n s.upper.lineno,\n s.upper.end_col_offset + 2,\n )\n\n rhs = self.eval_expr(node.value)\n if isinstance(lhs, doc.Subscript):\n if isinstance(lhs.slice, doc.Tuple):\n indices = []\n for index in lhs.slice.elts:\n indices.append(self.eval_expr(index))\n else:\n indices = self.eval_expr(lhs.slice)\n T.buffer_store(self.eval_expr(lhs.value), rhs, indices)\n else:\n self.eval_assign(target=lhs, source=rhs, bind_value=bind_assign_value)",
"def visit_AugAssign(self, node):\n target = node.target\n\n rhs_target = copy.deepcopy(target)\n rhs_target.ctx = ast.Load()\n ast.fix_missing_locations(rhs_target)\n\n bin_op = ast.BinOp(rhs_target, node.op, node.value)\n assignment = ast.Assign([target], bin_op)\n assignment.inplace_op = node.op\n return self.visit(assignment)",
"def assign(self, *args):\n return _ida_hexrays.cinsn_t_assign(self, *args)",
"def assign(self, *args):\n return _ida_hexrays.cexpr_t_assign(self, *args)",
"def visit_simple_assign(self, node):\n temp = gensym()\n temp_target = to_name(temp, ast.Store())\n stmts = [ ast.Assign([temp_target], node.value) ]\n stmts += [ ast.Assign([target], to_name(temp))\n for target in node.targets ]\n return stmts",
"def _process_assign(self, node: ast.Assign) -> None:\n if isinstance(node.value, ast.Call) and self._is_export_call(\n node.value.func\n ):\n # id = tf_export(...)(...)\n if len(node.targets) != 1:\n raise BadExportError(\n f'{self._current_file}:{node.lineno} export must be'\n f' assigned to a single value: {ast.dump(node)}'\n )\n symbol = self._name(node.targets[0])\n if not symbol:\n raise BadExportError(\n f'{self._current_file}:{node.lineno} export must be'\n f' assigned to a single value: {ast.dump(node)}'\n )\n self._add_exported_symbol(node.value.func, symbol)\n else:\n self.visit(node)",
"def copy_stmt(self, env, dst_marking_var, src_marking_var):\n field = self.field\n return pyast.E(\"{} = {}\".format(field.access_from(dst_marking_var), field.access_from(src_marking_var)))",
"def _analyse_stmt_Assign(self, statement: ast.Assign, *, next: CFNode) -> CFNode:\n return self._ast_node(statement, next=next, error=self._raise)",
"def __rrshift__(self, other):\n if isinstance(other, Callable):\n return self @ other\n else:\n return self(other) # Function application",
"def func_call(self, t):\n func, params = t\n func_name = func.value\n func.value = \"({}({}))\".format(func_name, params)\n return func",
"def assign_operator(cls, quad):\n\t\tvalue = cls.get_address_value(quad.left_operand)\n\t\tif quad.right_operand :\n\t\t\tcls.set_arr_value(quad.result, quad.right_operand, value)\n\t\telse:\n\t\t\tcls.set_address_value(quad.result, value)",
"def visit_Assign(self, node):\n self.generic_visit(node)\n target = get_single_target(node)\n if isinstance(target, ast.Attribute):\n args = [ target.value, ast.Str(target.attr), node.value ]\n return ast.Expr(to_call(to_name('setattr'), args))\n return node",
"def visit_Assign(self, node):\n assign_stmts = []\n value = node.value\n reversed_targets = node.targets\n reversed_targets.reverse()\n assign_stmts.append(stypy_functions.create_blank_line())\n if len(reversed_targets) > 1:\n assign_stmts.append(\n stypy_functions.create_src_comment(\n \"Multiple assignment of {0} elements.\".format(len(reversed_targets))))\n else:\n if hasattr(node, 'lineno'):\n assign_stmts.append(stypy_functions.create_src_comment(\n \"Assigning a {1} to a {0} (line {2}):\".format(type(reversed_targets[0]).__name__,\n type(value).__name__, node.lineno)))\n else:\n assign_stmts.append(stypy_functions.create_src_comment(\n \"Assigning a {1} to a {0}:\".format(type(reversed_targets[0]).__name__,\n type(value).__name__)))\n for assign_num in xrange(len(reversed_targets)):\n target = reversed_targets[assign_num]\n # Function guard is true? execute handler\n for handler_func_guard_tuple in self.__assignment_handlers:\n if handler_func_guard_tuple[0](target, value):\n id_str, handler_func = handler_func_guard_tuple[1]\n self.performed_transformations |= handler_func(target, value, assign_stmts, node, id_str)\n assign_stmts = stypy_functions.flatten_lists(assign_stmts)\n value = target\n break\n\n if len(assign_stmts) > 0:\n return assign_stmts\n return node",
"def expand_as(predicate_string): \n def callback(frame, name, func, old_locals):\n from peak.rules.predicates import _expand_as\n kind, module, locals_, globals_ = core.frameinfo(frame)\n return _expand_as(\n func, predicate_string, locals_, globals_, __builtins__\n )\n return core.decorate_assignment(callback)",
"def assign(self, *args):\n return _libsbml.string_assign(self, *args)",
"def _(self, node: Assignment):\n\n # This check allows us to ignore the initialization nodes\n # in the CAST 'i.e. x0 = -1'\n if node.source_refs == None:\n if type(node.left) == Var:\n if type(node.right) == Number and node.right.number == -1:\n return \"\"\n\n left = self.visit(node.left)\n right = self.visit(node.right)\n\n to_ret = f\"( assign {left} {right} )\"\n return to_ret"
] | [
"0.6730379",
"0.60008764",
"0.59139097",
"0.5711847",
"0.56486744",
"0.56235904",
"0.558098",
"0.5525872",
"0.5452114",
"0.5416577",
"0.5416467",
"0.53805333",
"0.53741395",
"0.53737843",
"0.53596747",
"0.5325352",
"0.5319974",
"0.5307721",
"0.52776676",
"0.5269577",
"0.52540934",
"0.5252053",
"0.5249622",
"0.52414185",
"0.5238813",
"0.5232454",
"0.517946",
"0.5175198",
"0.51656896",
"0.5159953"
] | 0.61607265 | 1 |
Convert AST operator to function in operator module. | def op_to_function(self, op):
name = op.__class__.__name__.lower()
name = operator_table.get(name, name)
return to_attribute(self.operator, name) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def op_to_function(self, op):\n name = op.__class__.__name__.lower()\n return to_attribute(self.operator, inplace_operator_table[name])",
"def _convert_operator(\n self, op_name, node_name, inputs, attrs, identity_list=None, convert_map=None\n ):\n identity_list = identity_list if identity_list else _identity_list\n convert_map = convert_map if convert_map else _convert_map\n if op_name in identity_list:\n sym = get_relay_op(op_name)(*inputs, **attrs)\n elif op_name in convert_map:\n if _need_prelude_for_shape_inference(op_name):\n sym = convert_map[op_name](inputs, attrs, self._params, self._prelude)\n else:\n sym = convert_map[op_name](inputs, attrs, self._params, self._mod)\n elif op_name in [\"PartitionedCall\", \"StatefulPartitionedCall\"]:\n sym = self._partition_call_operator(inputs, attrs)\n else:\n raise NotImplementedError(f\"Operator {op_name} not implemented.\")\n\n sym = set_span(sym, node_name)\n\n return sym",
"def _process_operator(self, expr, operator, func, *args, **kwargs):\n for elt in self.model.xml_element_children(expr):\n self._process_operator(elt, operator, func, *args, **kwargs)\n if isinstance(expr, mathml_apply) and expr.operator().localName == operator:\n func(expr, *args, **kwargs)",
"def _convert_operator(op_name, attrs, identity_list=None, convert_map=None):\n identity_list = identity_list if identity_list else _identity_list\n convert_map = convert_map if convert_map else _convert_map\n if op_name in identity_list:\n pass\n elif op_name in convert_map:\n op_name, attrs = convert_map[op_name](attrs)\n else:\n raise NotImplementedError(\"Operator {} not implemented.\".format(op_name))\n op = getattr(mx.sym, op_name, None)\n if not op:\n raise RuntimeError(\"Unable to map op_name {} to sym\".format(op_name))\n return op, attrs",
"def operator_constructor(loader, node):\n global workspace\n obj = loader.construct_mapping(node, deep=True)\n obj = resolve_pointer( workspace, obj )\n operation, arg = yaml_to_args( obj )[0]\n return getattr( operator, operation )( *arg )",
"def fetch_operators_function(self, operator):\n operators_function = self.operators_dict[operator]['function']\n return operators_function",
"def to_operator(operator):\n if isinstance(operator, str):\n return ValueConstraintOperators.STRING_OPERATOR_MAP[operator]\n else:\n return operator",
"def convert(self, operator: OperatorBase) -> OperatorBase:\n # pylint: disable=cyclic-import,import-outside-toplevel\n from ..evolutions.evolved_op import EvolvedOp\n\n if isinstance(operator, ListOp):\n if isinstance(operator, SummedOp) and all([isinstance(op, PauliOp)\n for op in operator.oplist]):\n # For now, we only support graphs over Paulis.\n return self.group_subops(operator)\n elif self._traverse:\n return operator.traverse(self.convert)\n else:\n return operator\n elif isinstance(operator, OperatorStateFn) and self._traverse:\n return OperatorStateFn(self.convert(operator.primitive),\n is_measurement=operator.is_measurement,\n coeff=operator.coeff)\n elif isinstance(operator, EvolvedOp) and self._traverse:\n return EvolvedOp(self.convert(operator.primitive), coeff=operator.coeff)\n else:\n return operator",
"def to_operator(self) -> Operator:\n return Operator(self.to_instruction())",
"def to_op(self):\n raise NotImplementedError",
"def run_operator(scope_node, node, name, op, code, f_globals):\n operators = __get_operators()\n if op not in operators:\n raise TypeError(\"failed to load operator '%s'\" % op)\n scope_key = scope_node.scope_key\n pair = operators[op](code, scope_key, f_globals)\n if isinstance(name, tuple):\n # The template inst binding with a single name will take this\n # path by using a length-1 name tuple. See bug #78.\n bind_extended_member(node, name, pair, scope_key)\n else:\n item = getattr(node.klass, name, None)\n if isinstance(item, Alias):\n bind_aliased_member(node, name, item, pair, scope_key)\n else:\n # This is the path for a standard binding on a child def.\n # It does not need the closure scope key. See bug #78.\n bind_member(node, name, pair)",
"def _to_ops(from_op):\n\n for to_op in OPERATORS:\n if to_op and isinstance(from_op, ast.Not):\n # 'not' can only be removed but not replaced with\n # '+', '-' or '~' b/c that may lead to strange results\n pass\n elif isinstance(from_op, ast.UAdd) and (to_op is None):\n # '+1' => '1' yields equivalent mutations\n pass\n else:\n yield to_op",
"def rhs_as_python_func(self, namespace=None):\n namespace = namespace or {}\n\n return eval(\"lambda %s: %s\" % (','.join(self.rhs_names), self.rhs),\n str_to_npfunc_map, namespace)\n # math_namespace.namespace, namespace)",
"def visit_BinaryOperator(self, node: BinaryOperator) -> Instruction:\n\n left = self.visit(node.left)\n right = self.visit(node.right)\n\n if isinstance(left, VarSymbol):\n left_symbol = self.GLOBAL_MEMORY[left.name]\n else:\n left_symbol = left\n\n if isinstance(right, VarSymbol):\n right_symbol = self.GLOBAL_MEMORY[right.name]\n else:\n right_symbol = right\n\n if node.operator.type == TokenType.PLUS:\n return self.builder.fadd(left_symbol, right_symbol, \"addtmp\")\n elif node.operator.type == TokenType.MINUS:\n return self.builder.fsub(left_symbol, right_symbol, \"subtmp\")\n elif node.operator.type == TokenType.MUL:\n return self.builder.fmul(left_symbol, right_symbol, \"multmp\")\n elif node.operator.type == TokenType.INTEGER_DIV:\n return self.builder.fdiv(left_symbol, right_symbol, \"udivtmp\")\n elif node.operator.type == TokenType.FLOAT_DIV:\n return self.builder.fdiv(left_symbol, right_symbol, \"fdivtmp\")",
"def __compile_operator(self, op, caller):\r\n if op == \"+\":\r\n self.__vmwriter.write_arithmetic(\"add\")\r\n elif op == \"-\" and caller == \"expression\":\r\n self.__vmwriter.write_arithmetic(\"sub\")\r\n elif op == \"*\":\r\n self.__vmwriter.write_call(\"Math.multiply\", 2)\r\n elif op == \"/\":\r\n self.__vmwriter.write_call(\"Math.divide\", 2)\r\n elif op == \"&\":\r\n self.__vmwriter.write_arithmetic(\"and\")\r\n elif op == \"|\":\r\n self.__vmwriter.write_arithmetic(\"or\")\r\n elif op == \"<\":\r\n self.__vmwriter.write_arithmetic(\"lt\")\r\n elif op == \">\":\r\n self.__vmwriter.write_arithmetic(\"gt\")\r\n elif op == \"=\":\r\n self.__vmwriter.write_arithmetic(\"eq\")\r\n elif op == \"-\":\r\n self.__vmwriter.write_arithmetic(\"neg\")\r\n elif op == \"~\":\r\n self.__vmwriter.write_arithmetic(\"not\")",
"def mutate_bySingleOperator(self, root, operator):\n self.operator = operator\n\n ast.fix_missing_locations(root)\n # traverse the target ast tree and mutate interesting node\n mutated_ast = self.visit(root)\n ast.fix_missing_locations(root)\n\n return mutated_ast",
"def get_fermion_operator(operator):\n fermion_operator = FermionOperator()\n\n if isinstance(operator, PolynomialTensor):\n for term in operator:\n fermion_operator += FermionOperator(term, operator[term])\n return fermion_operator\n\n raise TypeError(\"Unsupported type of oeprator {}\".format(operator))",
"def _onnx_node_to_singa_op(cls,\n onnx_node,\n inputs,\n opset_version=_known_opset_version):\n if onnx_node.op_type in cls._special_operators:\n translator = getattr(cls, cls._special_operators[onnx_node.op_type])\n else:\n translator = cls._common_onnx_node_to_singa_op\n return translator(onnx_node, inputs, opset_version)",
"def translate(expr):\n return from_python(ast.parse(expr))",
"def all_math(operator):\n a = int(request.args.get(\"a\"))\n b = int(request.args.get(\"b\"))\n return str(functions[operator](a,b))",
"def convert_exp(node, **kwargs):\n return create_basic_op_node('Exp', node, kwargs)",
"def apply(expr, fun_annotate_subexpr = None):\n assert isinstance(expr, Expression)\n t = type(expr)\n if t is Op:\n try:\n pre, suff = ExprTranslator.OPS_TO_SMTLIB[expr.id]\n return ExprTranslator.subexpr_to_smtlib(expr, pre, suff, fun_annotate_subexpr)\n except KeyError:\n raise Exception(str(expr.id) + ': operation not supported!')\n\n elif t is Var:\n return expr.get_text()\n elif t is ConstInt or t is ConstBool or t is ConstReal:\n return str(expr.get_text())\n elif t is ExprHole:\n return expr.hole_decl.get_function_call()\n else:\n raise Exception(str(t)+': expression type not supported!')",
"def applyOperator(self, operand1, operand2, operator):\n\n if operator == \"*\":\n return operand1 * operand2\n elif operator == \"/\":\n return operand1 / operand2\n elif operator == \"+\":\n return operand1 + operand2\n else:\n return operand1 - operand2",
"def vector_to_operator(op):\n if not op.isoperket:\n raise TypeError(\"only defined for operator-kets\")\n if op.superrep != \"super\":\n raise TypeError(\"only defined for operator-kets in super format\")\n dims = op.dims[0]\n return Qobj(unstack_columns(op.data, (np.prod(dims[0]), np.prod(dims[1]))),\n dims=dims,\n copy=False)",
"def funcOpExchange(expstr):\n funcOpDict = expr.getFuncOpDict() \n for funcstr in funcOpDict:\n idx = expstr.find(funcstr)\n if idx >= 0:\n #if we find a function string at idx\n if (idx == 0 or not expstr[idx-1].isalpha()) and expstr[idx+len(funcstr)] == '(':\n fstart = idx\n fstop = 0\n rest = expstr[idx:]\n pdepth = 0\n for i,c in enumerate(rest):\n if c == '(':\n pdepth += 1\n if c == ')':\n pdepth -= 1\n if pdepth == 0:\n fstop = idx+i+1\n break\n start = expstr[:fstart]\n middle = expstr[fstart:fstop]\n end = expstr[fstop:]\n args = ['('+funcOpExchange(exp)+')' for exp in funcargs(middle)]\n if len(args) == 1:\n args.append('0')\n expstr = start+funcOpDict[funcstr].join(args)+funcOpExchange(end)\n return expstr",
"def gen_binop(self, expr: expressions.BinaryOperator):\n if expr.op in [\"*\", \"/\", \"%\", \"^\", \"|\", \"&\", \">>\", \"<<\"]:\n lhs = self.gen_expr(expr.a, rvalue=True)\n rhs = self.gen_expr(expr.b, rvalue=True)\n op = expr.op\n\n ir_typ = self.get_ir_type(expr.typ)\n value = self.builder.emit_binop(lhs, op, rhs, ir_typ)\n elif expr.op == \",\":\n # Handle the comma operator by returning the second result\n self.gen_expr(expr.a, rvalue=True)\n rhs = self.gen_expr(expr.b, rvalue=True)\n value = rhs\n elif expr.op == \"+\":\n # Pay attention to pointer arithmetics!\n lhs = self.gen_expr(expr.a, rvalue=True)\n rhs = self.gen_expr(expr.b, rvalue=True)\n\n # left and right are swapped in semantics if right is pointer.\n if expr.a.typ.is_pointer:\n assert expr.b.typ.is_integer\n esize = self.sizeof(expr.a.typ.element_type)\n assert esize > 0\n if esize != 1:\n esize = self.emit(ir.Const(esize, \"esize\", rhs.ty))\n rhs = self.builder.emit_mul(rhs, esize, rhs.ty)\n rhs = self.builder.emit_cast(rhs, ir.ptr)\n\n ir_typ = self.get_ir_type(expr.typ)\n value = self.builder.emit_binop(lhs, \"+\", rhs, ir_typ)\n elif expr.op == \"-\":\n # Pay attention to pointer arithmetics!\n lhs = self.gen_expr(expr.a, rvalue=True)\n rhs = self.gen_expr(expr.b, rvalue=True)\n ir_typ = self.get_ir_type(expr.typ)\n if expr.a.typ.is_pointer:\n esize = self.sizeof(expr.a.typ.element_type)\n assert esize > 0\n if expr.b.typ.is_pointer:\n # pointer - pointer\n value = self.builder.emit_binop(lhs, \"-\", rhs, ir.ptr)\n value = self.emit(ir.Cast(value, \"typecast\", ir_typ))\n if esize != 1:\n esize = self.emit(ir.Const(esize, \"esize\", ir_typ))\n value = self.emit(\n ir.Binop(value, \"/\", esize, \"rhs\", ir_typ)\n )\n else:\n # pointer - numeric\n if esize != 1:\n esize = self.emit(ir.Const(esize, \"esize\", rhs.ty))\n rhs = self.builder.emit_mul(rhs, esize, rhs.ty)\n rhs = self.builder.emit_cast(rhs, ir.ptr)\n value = self.builder.emit_binop(lhs, \"-\", rhs, ir_typ)\n else:\n # numeric - numeric\n value = self.builder.emit_binop(lhs, \"-\", rhs, ir_typ)\n\n elif expr.op in [\"<\", \">\", \"==\", \"!=\", \"<=\", \">=\", \"||\", \"&&\"]:\n value = self.gen_condition_to_integer(expr)\n elif expr.op in [\n \"=\",\n \"+=\",\n \"-=\",\n \"*=\",\n \"%=\",\n \"/=\",\n \">>=\",\n \"<<=\",\n \"&=\",\n \"|=\",\n \"~=\",\n \"^=\",\n ]:\n # Handle struct assignment special case:\n if expr.op == \"=\" and expr.a.typ.is_struct:\n lhs = self.gen_expr(expr.a, rvalue=False)\n rhs = self.gen_expr(expr.b, rvalue=False)\n amount = self.sizeof(expr.a.typ)\n self.gen_copy_struct(lhs, rhs, amount)\n value = None\n else:\n lhs = self.gen_expr(expr.a, rvalue=False)\n rhs = self.gen_expr(expr.b, rvalue=True)\n\n if expr.op == \"=\":\n value = rhs\n else:\n # Handle '+=' and friends:\n op = expr.op[:-1]\n ir_typ = self.get_ir_type(expr.typ)\n loaded = self._load_value(lhs, expr.typ)\n\n # pointer arithmatic:\n if op in [\"+\", \"-\"] and expr.a.typ.is_pointer:\n esize = self.sizeof(expr.a.typ.element_type)\n assert esize > 0\n if esize != 1:\n esize = self.emit(ir.Const(esize, \"esize\", rhs.ty))\n rhs = self.builder.emit_mul(rhs, esize, rhs.ty)\n\n value = self.builder.emit_binop(loaded, op, rhs, ir_typ)\n self._store_value(value, lhs)\n else: # pragma: no cover\n raise NotImplementedError(str(expr.op))\n return value",
"def onnx_extract_operator(node, model, nodes_dict):\n op_type = node.op_type\n input_tensors = []\n output_tensors = []\n\n \"\"\" input_tensors\n each input_tensor has its own soure op, but same dest op\n so both have single string\n \"\"\"\n input_names = []\n # name list\n input_tensor_names = node.input\n for input_tensor_name in input_tensor_names:\n origin_tensor_name, input_tensor_name = util.names_from_input(input_tensor_name)\n try:\n pre_node = nodes_dict[nodes_dict[origin_tensor_name]].node\n except BaseException:\n pre_node = nodes_dict[origin_tensor_name].node\n \n data = None\n if pre_node in model.initializer():\n data = to_array(pre_node)\n else:\n if (pre_node not in model.graph().input) and (pre_node.op_type == 'Constant'):\n data = to_array(pre_node.attribute[0].t)\n if isinstance(data, np.ndarray):\n dtype = util.get_data_dtype(data)\n shape = list(data.shape) if data.shape != () else [1]\n input_tensor = Tensor(name=input_tensor_name,\n source_op=[],\n dest_op=[node.name],\n shape=shape,\n data=data,\n dtype=dtype\n )\n input_tensors.append(input_tensor)\n\n else:\n input_tensor = Tensor(name=input_tensor_name,\n source_op=[pre_node.name],\n dest_op=[node.name],\n shape=None,\n data=None,\n dtype=None\n )\n input_tensors.append(input_tensor)\n input_names.append(node.name)\n\n \"\"\" output_tensors\n in onnx, NodeProto has the output attribute\n \"\"\"\n output_tensor_names = node.output\n for output_tensor_name in output_tensor_names:\n output_tensor_name = util.names_from_input(output_tensor_name)[1]\n output_tensor = Tensor(name=output_tensor_name,\n source_op=[node.name],\n dest_op=nodes_dict[node.name].outputs,\n shape=None,\n data=None,\n dtype=None\n )\n\n output_tensors.append(output_tensor)\n\n return op_type, input_tensors, output_tensors",
"def convert(ast):\n\n if ast and ast.type == \"Function\":\n # Activity function conversion\n if (\n ast.name != \"molecularActivity\"\n and ast.name in belspec[\"namespaces\"][\"Activity\"][\"list\"]\n ):\n print(\"name\", ast.name, \"type\", ast.type)\n ast = convert_activity(ast)\n return ast # Otherwise - this will trigger on the BEL2 molecularActivity\n\n # translocation conversion\n elif ast.name in [\"tloc\", \"translocation\"]:\n ast = convert_tloc(ast)\n\n fus_flag = False\n for idx, arg in enumerate(ast.args):\n if arg.__class__.__name__ == \"Function\":\n\n # Fix substitution -> variation()\n if arg.name in [\"sub\", \"substitution\"]:\n ast.args[idx] = convert_sub(arg)\n\n elif arg.name in [\"trunc\", \"truncation\"]:\n ast.args[idx] = convert_trunc(arg)\n\n elif arg.name in [\"pmod\", \"proteinModification\"]:\n ast.args[idx] = convert_pmod(arg)\n\n elif arg.name in [\"fus\", \"fusion\"]:\n fus_flag = True\n\n # Recursively process Functions\n ast.args[idx] = convert(ast.args[idx])\n\n if fus_flag:\n ast = convert_fus(ast)\n\n return ast",
"def opsplit(expstr):\n\n #ops are the one char operators (sorted on precidence)\n ops = expr.getOps()\n #Remove outer parentesis if we have them\n if expstr[0] == '(' and expstr[-1] == ')' and balanced(expstr[1:-1]):\n expstr = expstr[1:-1]\n #Add a '0' to the beginning of the string if we start with an operator\n if expstr[0] in ops:\n expstr = '0'+expstr\n for op in ops:\n pc = 0\n cc = len(expstr)-1\n revexpstr = list(expstr)\n revexpstr.reverse()\n #Search for the operator backwards (to preserve operator presidence)\n for c in revexpstr:\n if c == '(':\n pc += 1\n elif c == ')':\n pc -= 1\n if c == op and pc == 0:\n #Build the tree recursively\n return [op,opsplit(expstr[:cc]),opsplit(expstr[cc+1:])]\n cc -=1\n #if we find something that looks like a function, parse it separately \n if funcpattern(expstr):\n fnamestr = funcname(expstr)\n fargs = funcargs(expstr)\n farglist = [opsplit(arg) for arg in fargs]\n return [fnamestr]+farglist\n return expstr",
"def __init__(self):\n super(OperatorCodegen, self).__init__()"
] | [
"0.7269387",
"0.68856734",
"0.67033535",
"0.6680751",
"0.6593878",
"0.65538615",
"0.6333541",
"0.6330422",
"0.63046694",
"0.6274489",
"0.5995828",
"0.5922164",
"0.5883966",
"0.5829021",
"0.5820589",
"0.5794242",
"0.5792682",
"0.57558066",
"0.5689504",
"0.5672094",
"0.56409925",
"0.5610246",
"0.56021297",
"0.5596652",
"0.55908126",
"0.558397",
"0.5561989",
"0.55503595",
"0.5549611",
"0.5534024"
] | 0.740287 | 0 |
Convert list literal to function call. | def visit_List(self, node):
self.generic_visit(node)
if isinstance(node.ctx, ast.Load):
return to_call(to_attribute(self.operator, '__list__'), node.elts)
return node | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _maplist_vm(vm, f, xs):\n def f_(*args):\n return vm.call(f, args)\n return list(map(f_, xs))",
"def listify(arg):\n if isinstance(arg, list):\n return arg\n else:\n return [arg]",
"def eval_f(f, xs):\n l = []\n for x in xs:\n l.append(f(x))\n return l",
"def list_sugar(self):\n return 'list(', ')'",
"def preprocess_literal(op: str, literal: Any) -> Expression:\n if isinstance(literal, (list, tuple)):\n if op not in [\"IN\", \"NOT IN\"]:\n raise ParsingException(\n (\n f\"Invalid operator {op} for literal {literal}. Literal is a sequence. \"\n \"Operator must be IN/NOT IN\"\n ),\n report=False,\n )\n literals = tuple([Literal(None, lit) for lit in literal])\n return FunctionCall(None, \"tuple\", literals)\n else:\n if op in [\"IN\", \"NOT IN\"]:\n raise ParsingException(\n (\n f\"Invalid operator {op} for literal {literal}. Literal is not a sequence. \"\n \"Operator cannot be IN/NOT IN\"\n ),\n report=False,\n )\n return Literal(None, literal)",
"def escape_list(mylist, escape_func):\n def escape(obj, escape_func=escape_func):\n try:\n e = obj.escape\n except AttributeError:\n return obj\n else:\n return e(escape_func)\n return list(map(escape, mylist))",
"def as_list(arg):\n if _is_list(arg):\n return arg\n return [arg]",
"def test_listlist_op_1():\n\n @ops.listlist_op\n def f(x):\n return [4, 5, 6]\n\n result = f([1, 2, 3]) # Passing in a list, as expected\n\n assert(isinstance(result, list)), f\"{result}\"\n assert(result == [4, 5, 6])",
"def convert_list(f, parameters):\n variables = f[0].arguments()\n varpar = list(parameters) + list(variables)\n F = symbolic_expression([i(*variables) for i in f]).function(*varpar)\n lis = flatten([fast_callable(i,vars=varpar).op_list() for i in F], max_level=1)\n deflist = []\n stack = []\n const =[]\n stackcomp=[]\n detail=[]\n for i in lis:\n if i[0] == 'load_arg':\n stack.append(varpar[i[1]])\n elif i[0] == 'ipow':\n if i[1] in NN:\n basis = stack[-1]\n for j in range(i[1]-1):\n\t a=stack.pop(-1)\n\t detail.append(('mul', a, basis))\n\t stack.append(a*basis)\n\t stackcomp.append(stack[-1])\n else:\n detail.append(('pow',stack[-1],i[1]))\n stack[-1]=stack[-1]**i[1]\n stackcomp.append(stack[-1])\n\n elif i[0] == 'load_const':\n const.append(i[1])\n stack.append(i[1])\n elif i == 'mul':\n a=stack.pop(-1)\n b=stack.pop(-1)\n detail.append(('mul', a, b))\n stack.append(a*b)\n stackcomp.append(stack[-1])\n\n elif i == 'div':\n a=stack.pop(-1)\n b=stack.pop(-1)\n detail.append(('div', a, b))\n stack.append(b/a)\n stackcomp.append(stack[-1])\n\n elif i == 'add':\n a=stack.pop(-1)\n b=stack.pop(-1)\n detail.append(('add',a,b))\n stack.append(a+b)\n stackcomp.append(stack[-1])\n\n elif i == 'pow':\n a=stack.pop(-1)\n b=stack.pop(-1)\n detail.append(('pow', b, a))\n stack.append(b**a)\n stackcomp.append(stack[-1])\n\n elif i[0] == 'py_call' and str(i[1])=='log':\n a=stack.pop(-1)\n detail.append(('log', a))\n stack.append(log(a))\n stackcomp.append(stack[-1])\n\n elif i[0] == 'py_call' and str(i[1])=='exp':\n a=stack.pop(-1)\n detail.append(('exp', a))\n stack.append(exp(a))\n stackcomp.append(stack[-1])\n\n elif i[0] == 'py_call' and str(i[1])=='sin':\n a=stack.pop(-1)\n detail.append(('sin', a))\n detail.append(('cos', a))\n stackcomp.append(sin(a))\n stackcomp.append(cos(a))\n stack.append(sin(a))\n\n elif i[0] == 'py_call' and str(i[1])=='cos':\n a=stack.pop(-1)\n detail.append(('sin', a))\n detail.append(('cos', a))\n stackcomp.append(sin(a))\n stackcomp.append(cos(a))\n stack.append(cos(a))\n\n elif i == 'neg':\n a = stack.pop(-1)\n detail.append(('mul', -1, a))\n stack.append(-a)\n stackcomp.append(-a)\n\n return stackcomp,detail",
"def list(self, arg: SeField[Any]) -> str:\n if is_bare_list(arg.type):\n return arg.varname\n else:\n earg = arg[0]\n earg.name = \"v\"\n return f\"[{self.render(earg)} for v in {arg.varname}]\"",
"def process_list_arg(arg):\n if isinstance(arg, list):\n return arg\n elif isinstance(arg, basestring):\n args = []\n for part in arg.split(\",\"):\n args.append(part.strip())\n return args",
"def target_list_option(s):\n return _convert(s, (list, tuple))",
"def from_list(l):\n if isinstance(l, str):\n for special_char in (' ', '\\n', '\\t', '(', ')', '\\\"'):\n if special_char in l:\n return '\\\"' + l + '\\\"'\n return l\n return '(' + ' '.join(from_list(e) for e in l) + ')'",
"def list_option(s):\n return _convert(s, (list, tuple))",
"def lmap(f: Callable, *xs) -> list:\n return list(map(f, *xs))",
"def _builtin_split_call(term, parts, database=None, location=None, **kwdargs):\n functor = '=..'\n # modes:\n # <v> =.. list => list has to be fixed length and non-empty\n # IF its length > 1 then first element should be an atom\n # <n> =.. <list or var>\n #\n mode = check_mode((term, parts), ['vL', 'nv', 'nl'], functor=functor, **kwdargs)\n if mode == 0:\n elements, tail = list_elements(parts)\n if len(elements) == 0:\n raise CallModeError(functor, (term, parts),\n message='non-empty list for arg #2 if arg #1 is a variable',\n location=database.lineno(location))\n elif len(elements) > 1 and not _is_atom(elements[0]):\n raise CallModeError(functor, (term, parts),\n message='atom as first element in list if arg #1 is a variable',\n location=database.lineno(location))\n elif len(elements) == 1:\n # Special case => term == parts[0]\n return [(elements[0], parts)]\n else:\n term_part = elements[0](*elements[1:])\n return [(term_part, parts)]\n else:\n part_list = (term.with_args(),) + term.args\n current = Term('[]')\n for t in reversed(part_list):\n current = Term('.', t, current)\n try:\n local_values = {}\n list_part = unify_value(current, parts, local_values)\n elements, tail = list_elements(list_part)\n term_new = elements[0](*elements[1:])\n term_part = unify_value(term, term_new, local_values)\n return [(term_part, list_part)]\n except UnifyError:\n return []",
"def handle_list(list_name, list, args):\n if not args:\n return list\n else:\n len(args) == 1 or syntax_error(\"Wrong number of args for list expression.\")\n try:\n return list[int(args[0])]\n except ValueError:\n syntax_error(\"Invald index value: '%s'\" % args[0])\n except IndexError:\n syntax_error(\"Index out of range in '%s': %d\" % (list_name, index))",
"def Listor(fun):\n @functools.wraps(fun)\n def inside(*args, **kwargs):\n return list(fun(*args, **kwargs))\n return inside",
"def maplist(f, xs):\n return list(map(f, xs))",
"def decorator(arg):\n return lambda: list(arg)",
"def my_evalf(expr, chop=False):\r\n if type(expr) == list:\r\n try:\r\n return [x.evalf(chop=chop) for x in expr]\r\n except:\r\n return expr\r\n try:\r\n return expr.evalf(chop=chop)\r\n except:\r\n return expr",
"def __call__(self, X, Y=None, eval_gradient=False):\n return [f(X, Y=Y, eval_gradient=eval_gradient) for f in self.list_func]",
"def map_(func, some_list):\n \n result = []\n \n for arg in some_list:\n result.append(func(arg))\n \n return result",
"def func_deserialize(self, args): # pragma: no cover\n if len(args) == 0:\n return []\n x = eval(args.decode(\"utf-8\"))\n return x",
"def eval_f(f, xs):\n res_list = []\n for num in xs:\n #int_num = int(num)\n fun_num = f(num)\n res_list.append(fun_num)\n\n return res_list",
"def mk_sql_list(ls):\n res = \"(\" + ' '.join([str(elem) for elem in intersperse(\",\", ls)]) + \")\"\n return res",
"def f(*args):\n alist = [a() for a in args]\n print(alist)",
"def test_expr_list_array_constructor():\n fcode = \"ACOS(-1.0), SIN(1.0), 1.0+3.0\"\n ast = Fortran2003.Ac_Spec(fcode)\n assert isinstance(ast, Fortran2003.Ac_Value_List)",
"def evlis(targetlist, a_list, d_list):\n if targetlist.null():\n return SExp(\"NIL\")\n return SExp(eval_lisp(targetlist.car(), a_list, d_list),\n evlis(targetlist.cdr(), a_list, d_list))",
"def cast_to_list(position):\n\n\[email protected]\n\tdef wrapper(function, instance, args, kwargs):\n\t\tif not isinstance(args[position], list):\n\t\t\targs = list(args)\n\t\t\targs[position] = [args[position]]\n\t\t\targs = tuple(args)\n\n\t\treturn function(*args, **kwargs)\n\n\treturn wrapper"
] | [
"0.6245148",
"0.61611956",
"0.60648584",
"0.6038298",
"0.59792304",
"0.59553707",
"0.59013915",
"0.5873387",
"0.5854381",
"0.5801732",
"0.57718545",
"0.5754236",
"0.5722947",
"0.5686056",
"0.5682627",
"0.56783223",
"0.5655683",
"0.5624097",
"0.5621373",
"0.5564674",
"0.55535793",
"0.55439407",
"0.55419135",
"0.55281377",
"0.55103797",
"0.5499892",
"0.5498203",
"0.5474634",
"0.54696244",
"0.54537505"
] | 0.6453626 | 0 |
Convert tuple literal to function call. | def visit_Tuple(self, node):
self.generic_visit(node)
if isinstance(node.ctx, ast.Load):
return to_call(to_attribute(self.operator, '__tuple__'), node.elts)
return node | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def eval_func_tuple(f_args):\n return f_args[0](*f_args[1:])",
"def tuple(x):\n pass",
"def parse_tuple(value):\n match = re.match(r'(\\w+)=(\\w+)\\((.*?)\\)', value)\n assert match, \"could not parse '%s'\" % value\n return match.group(1), eval(match.group(2))(match.group(3))",
"def func_call(self, t):\n func, params = t\n func_name = func.value\n func.value = \"({}({}))\".format(func_name, params)\n return func",
"def from_literal(tup):\n\n def expand(vals):\n return [from_literal(x) for x in vals]\n\n def union(vals):\n if not isinstance(vals, tuple):\n vals = (vals,)\n v = expand(vals)\n return frozenset(v)\n\n if not isinstance(tup, tuple):\n return ('prim', tup)\n elif isinstance(tup[0], str):\n tag, *vals = tup\n if tag == 'prim':\n return tup\n elif tag == 'tuple':\n params = tuple(expand(vals))\n return (tag, params)\n elif tag == 'map':\n k, v = vals\n return (tag, (union(k), union(v)))\n else:\n vals, = vals # pylint: disable=self-assigning-variable\n return (tag, union(vals))\n else:\n return tuple(expand(tup))",
"def map_generate_tuple(*args):\n key, func, arg = args[0][0], args[0][1], args[0][2]\n return (key, func(*arg))",
"def tuple2func(func1, func2):\n return lambda e: (func1(e), func2(e))",
"def dec_tupl(fnc,*data):\n def wrapper(*args, **kwargs):\n return (fnc(*args,**kwargs),) + data\n return wrapper",
"def get_call_value(call_node):\n s = get_name_value(call_node.func)\n if isinstance(call_node.func.ctx, ast.Load):\n # convert ast args to literals\n args = [convert_arg(a) for a in call_node.args]\n # suround literal strings with a set of quotes for easy placing into\n # a string\n args = ['\"' + a + '\"' if isinstance(a, str) else a for a in args]\n # join all the args into a set of parens\n s += \"(\" + \",\".join(args) + \")\"\n return s",
"def tuple_from_sequence(*args):\n return tuple(args)",
"def convert_to_tuple(self, tuple_str):\n return ast.literal_eval(tuple_str)",
"def _(self, node: Call):\n\n args = []\n for n in node.arguments:\n args.append(self.visit(n))\n\n func_args = \" \".join(args)\n\n return f\"( call {node.func.name} {func_args} )\"",
"def tuple(self, arg: SeField[Any]) -> str:\n if is_bare_tuple(arg.type):\n return arg.varname\n elif is_variable_tuple(arg.type):\n earg = arg[0]\n earg.name = \"v\"\n return f\"tuple({self.render(earg)} for v in {arg.varname})\"\n else:\n rvalues = []\n for i, _ in enumerate(type_args(arg.type)):\n r = arg[i]\n r.name = f\"{arg.varname}[{i}]\"\n rvalues.append(self.render(r))\n return f\"({', '.join(rvalues)},)\" # trailing , is required for single element tuples",
"def process_tuple(self, raw_tuple, sbj, rel, obj):\n pass",
"def main():\n sampleTuple = (100, 200, 300)\n print(tupleStrFormat(sampleTuple))",
"def test_star_args_with_tuple():\n arg_tuple = ('blue', 'red', 'yellow', 'orange')\n assert arguments.fun_star_params(*arg_tuple) == ('blue', 'red', 'yellow',\n 'orange')",
"def call_statement(env, node):\n fun = env['f'][node.name]\n func_env = Environment(env).create(env['f'])\n args = fun['args'].interpret(env)\n call_args_interpretuated = node.args.interpret(env)\n args_counter = 0\n for arg in args:\n func_env['v'][arg] = call_args_interpretuated[args_counter].interpret(env)\n args_counter += 1\n fun['body'].interpret(func_env)\n return func_env['r']",
"def tuple_map(x):\n return x * 2",
"def _eval_str_tuple(value):\n if not (value.startswith('(') and value.endswith(')')):\n raise ValueError(value)\n\n orig_value = value\n value = value[1:-1]\n\n result = []\n while value:\n m = _strs.match(value)\n if m is None:\n raise ValueError(orig_value)\n\n result.append(m.group(1))\n value = value[len(m.group(0)):]\n\n return tuple(result)",
"def preprocess_literal(op: str, literal: Any) -> Expression:\n if isinstance(literal, (list, tuple)):\n if op not in [\"IN\", \"NOT IN\"]:\n raise ParsingException(\n (\n f\"Invalid operator {op} for literal {literal}. Literal is a sequence. \"\n \"Operator must be IN/NOT IN\"\n ),\n report=False,\n )\n literals = tuple([Literal(None, lit) for lit in literal])\n return FunctionCall(None, \"tuple\", literals)\n else:\n if op in [\"IN\", \"NOT IN\"]:\n raise ParsingException(\n (\n f\"Invalid operator {op} for literal {literal}. Literal is not a sequence. \"\n \"Operator cannot be IN/NOT IN\"\n ),\n report=False,\n )\n return Literal(None, literal)",
"def etuple(*args, **kwargs):\n return ExpressionTuple(args, **kwargs)",
"def make_tuple(tuple_like):\n tuple_like = (\n tuple_like\n if isinstance(tuple_like, (list, tuple))\n else (tuple_like, tuple_like)\n )\n return tuple_like",
"def generate_from_tuple(t):\n\n data_generator(*t)",
"def extract_tuple_function(programs, replacement_marker, validity_function):\n\n flat_programs=[]\n for p in programs:\n flat_programs+=p\n\n # Create a dictionary that will be used to translate lists of tuples to list of integers, and an inverse dictionary to translate in the reverse way.\n d=tuple_int_translator(flat_programs)\n reverse_d={v: k for k, v in d.items()}\n reverse_d[-1]=replacement_marker\n\n int_programs=[[d[t] for t in p] for p in programs]\n\n # Define a new validity function that works on lists of integers rather than lists of tuples, using the provided validity function.\n def int_validity_function(program):\n tuple_program=[reverse_d[i] for i in program]\n return validity_function(tuple_program)\n\n function, new_int_programs=extract_int_function(int_programs, -1, int_validity_function)\n\n tuple_function=[reverse_d[i] for i in function]\n\n new_programs=[[reverse_d[i] for i in p] for p in new_int_programs]\n\n return new_programs, tuple_function",
"def strtuple(iterable): \n string = ''\n function = type(strtuple)\n for i in iterable:\n if isinstance(i , function):\n string += i.__name__ + ', '\n else:\n string += str(i) + ', '\n string = string.rstrip(', ')\n string = '(' + string + ')'\n return string",
"def _unpack_tuple(x):\n if len(x) == 1:\n return x[0]\n else:\n return x",
"def _unpack_tuple(x):\n if len(x) == 1:\n return x[0]\n else:\n return x",
"def _convert_to_tuple(r):\n if not r:\n return r\n else:\n return (r[\"token\"], r[\"value\"], r[\"code\"], r[\"address\"],)",
"def parse_tuple(tuple_string):\n return tuple_string.strip().strip(\"\\\"[]\")",
"def process_let_binding(binding):\n if isinstance(binding[0], str):\n return tuple(binding)\n elif isinstance(binding[0], list):\n name = binding[0][0]\n params = binding[0][1:]\n body = binding[1]\n return tuple([name, ['lambda', params, body]])\n else:\n raise SnekEvaluationError('let binding pair cannot process type {}'.format(type(binding)))"
] | [
"0.7458055",
"0.63818896",
"0.6287098",
"0.62121403",
"0.6174",
"0.61714363",
"0.61672425",
"0.61028904",
"0.6084416",
"0.606182",
"0.6054684",
"0.6038045",
"0.5894287",
"0.58655924",
"0.5855375",
"0.5848195",
"0.58276147",
"0.58071357",
"0.57829064",
"0.57389504",
"0.56750906",
"0.56040084",
"0.5529752",
"0.54964846",
"0.5477384",
"0.5474606",
"0.5474606",
"0.5468391",
"0.5445261",
"0.54175663"
] | 0.65799105 | 1 |
Convert set literal to function call. | def visit_Set(self, node):
self.generic_visit(node)
return to_call(to_attribute(self.operator, '__set__'), node.elts) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set(self, arg: SeField[Any]) -> str:\n if is_bare_set(arg.type):\n return f\"list({arg.varname}) if convert_sets else {arg.varname}\"\n else:\n earg = arg[0]\n earg.name = \"v\"\n return (\n f\"[{self.render(earg)} for v in {arg.varname}] \"\n f\"if convert_sets else set({self.render(earg)} for v in {arg.varname})\"\n )",
"def _apply_to_sets(self, func, operation, keys, *args):\n keys = self._list_or_args(keys, args)\n if not keys:\n raise TypeError(\"{} takes at least two arguments\".format(operation.lower()))\n left = self._get_set(keys[0], operation) or set()\n for key in keys[1:]:\n right = self._get_set(key, operation) or set()\n left = func(left, right)\n return left",
"def set(x):\n pass",
"def parse_set_cmd(self, line):\n _, set_type, var_name, _, set_name = line.split()\n if set_type not in SET_TYPES:\n self.print_error(f\"Currently can't set system '{set_type}'.\"\n + \" Please choose from:\\n\\t* \"\n + \"\\n\\t* \".join(SET_TYPES)\n )\n else:\n set_fnc = f\"parse_set_{set_type}\"\n if set_fnc not in dir(self):\n self.print_error(\"BUG IN CODE! Tell Matt that he needs to \"\n + f\"implement the function '{set_fnc}'\")\n\n getattr(self, set_fnc)(line)",
"def imageset(*args):\n from .fancysets import ImageSet\n from .setexpr import set_function\n\n if len(args) < 2:\n raise ValueError('imageset expects at least 2 args, got: %s' % len(args))\n\n if isinstance(args[0], (Symbol, tuple)) and len(args) > 2:\n f = Lambda(args[0], args[1])\n set_list = args[2:]\n else:\n f = args[0]\n set_list = args[1:]\n\n if isinstance(f, Lambda):\n pass\n elif callable(f):\n nargs = getattr(f, 'nargs', {})\n if nargs:\n if len(nargs) != 1:\n raise NotImplementedError(filldedent('''\n This function can take more than 1 arg\n but the potentially complicated set input\n has not been analyzed at this point to\n know its dimensions. TODO\n '''))\n N = nargs.args[0]\n if N == 1:\n s = 'x'\n else:\n s = [Symbol('x%i' % i) for i in range(1, N + 1)]\n else:\n s = inspect.signature(f).parameters\n\n dexpr = _sympify(f(*[Dummy() for i in s]))\n var = tuple(uniquely_named_symbol(\n Symbol(i), dexpr) for i in s)\n f = Lambda(var, f(*var))\n else:\n raise TypeError(filldedent('''\n expecting lambda, Lambda, or FunctionClass,\n not \\'%s\\'.''' % func_name(f)))\n\n if any(not isinstance(s, Set) for s in set_list):\n name = [func_name(s) for s in set_list]\n raise ValueError(\n 'arguments after mapping should be sets, not %s' % name)\n\n if len(set_list) == 1:\n set = set_list[0]\n try:\n # TypeError if arg count != set dimensions\n r = set_function(f, set)\n if r is None:\n raise TypeError\n if not r:\n return r\n except TypeError:\n r = ImageSet(f, set)\n if isinstance(r, ImageSet):\n f, set = r.args\n\n if f.variables[0] == f.expr:\n return set\n\n if isinstance(set, ImageSet):\n # XXX: Maybe this should just be:\n # f2 = set.lambda\n # fun = Lambda(f2.signature, f(*f2.expr))\n # return imageset(fun, *set.base_sets)\n if len(set.lamda.variables) == 1 and len(f.variables) == 1:\n x = set.lamda.variables[0]\n y = f.variables[0]\n return imageset(\n Lambda(x, f.expr.subs(y, set.lamda.expr)), *set.base_sets)\n\n if r is not None:\n return r\n\n return ImageSet(f, *set_list)",
"def set_or_callable(value) -> frozenset[str] | Callable:\n if value is None:\n return frozenset()\n if callable(value):\n return value\n if isinstance(value, (frozenset, set, list)):\n return frozenset(value)\n return frozenset([str(value)])",
"def set():\n pass",
"def test_calls_in_set_with_name_and_value_arguments_only(self):\n some_calls = set([Call(\"one\", 1), Call(\"one\", 1.1), Call(\"two\", 2, 2.2), Call(\"two\", 2, \"two\")])\n assert_that(Call(\"one\", 1) in some_calls, equal_to(True))\n assert_that(Call(\"one\", 2) in some_calls, equal_to(False))",
"def func_call(self, t):\n func, params = t\n func_name = func.value\n func.value = \"({}({}))\".format(func_name, params)\n return func",
"def set():",
"def _call_set(vecObj, val):\n res = vecObj.set(val)\n return res",
"def parseSet(cmds):\n if len(cmds) != 0:\n first = str.strip(cmds[0])\n if first[0] == 'w':\n pass\n elif first[0] == 'r':\n pass\n else:\n parseExpr(first)\n parseSet(cmds[1:])",
"def __call__(value):",
"def callable_time_set(self, callable_time_set):\n \n self._callable_time_set = callable_time_set",
"def _function_set(self, data_length=self.data_length, number_of_lines=self.number_of_lines, character_font=self.character_font):\n function_set_mask = 32\n data = funtion_set_mask | (data_length << 4) | (number_of_lines << 3) | (character_font << 2)\n\n self.instruction(data)",
"def setter(self, func):\n self.fset = func\n self.set_setattr_mode(SetAttr.CallObject_ObjectValue, func)\n return self",
"def _set_function(self):\n value = 0x20 | self.power | self.addressing | self.instr\n self.command([value])",
"def SetOperator(self, A):\n return _hypre.HypreLOBPCG_SetOperator(self, A)",
"def generate_setLike_operations_properties(interface, set_like):\n setlike_ops = []\n \"\"\"\n Need to create a typedef for a function callback e.g.,\n a setlike will need a callback that has the proper args in FontFaceSet that is\n three arguments, etc.\n\n typedef void FontFaceSetForEachCallback(\n FontFace fontFace, FontFace fontFaceAgain, FontFaceSet set);\n\n void forEach(FontFaceSetForEachCallback callback, [Object thisArg]);\n \"\"\"\n callback_name = '%sForEachCallback' % interface.id\n set_op = generate_operation(interface.id, 'void', 'forEach',\n [[IDLType(None, callback_name), 'callback'],\n [IDLType(None, 'any'), 'thisArg', True]])\n setlike_ops.append(set_op)\n\n set_op = generate_operation(\n interface.id, 'boolean', 'has',\n [[IDLType(None, set_like.value_type.base_type), 'arg']])\n setlike_ops.append(set_op)\n\n if not set_like.is_read_only:\n # Issue #45676: `add` can return null on Firefox, so this should be\n # typed nullable.\n add_result_nullable = True\n set_op = generate_operation(\n interface.id, interface.id, 'add',\n [[IDLType(None, set_like.value_type.base_type), 'arg']],\n add_result_nullable)\n setlike_ops.append(set_op)\n set_op = generate_operation(\n interface.id, 'boolean', 'delete',\n [[IDLType(None, set_like.value_type.base_type), 'arg']])\n setlike_ops.append(set_op)\n set_op = generate_operation(interface.id, 'void', 'clear', [])\n setlike_ops.append(set_op)\n\n return setlike_ops",
"def _setter_decor(self, fset):\n\n def fdec(obj):\n def _decor(fun):\n fset(obj, fun)\n return fun\n\n return _decor\n\n return self._init_inherit(fset=fset, fdec=fdec)",
"def get(self, opset: OpsetVersion) -> Optional[Set[Callable]]:\n return self._functions.get(opset)",
"def SetOperator(self, op):\n return _hypre.HypreGMRES_SetOperator(self, op)",
"def do_set(self, arg):\n try:\n statement, param_name, val = arg.parsed.raw.split(None, 2)\n val = val.strip()\n param_name = param_name.strip().lower()\n if param_name not in self.settable:\n hits = [p for p in self.settable if p.startswith(param_name)]\n if len(hits) == 1:\n param_name = hits[0]\n else:\n return self.do_show(param_name)\n current_val = getattr(self, param_name)\n if (val[0] == val[-1]) and val[0] in (\"'\", '\"'):\n val = val[1:-1]\n else:\n val = cast(current_val, val)\n setattr(self, param_name, val)\n self.poutput('%s - was: %s\\nnow: %s\\n' % (param_name, current_val, val))\n if current_val != val:\n try:\n onchange_hook = getattr(self, '_onchange_%s' % param_name)\n onchange_hook(old=current_val, new=val)\n except AttributeError:\n pass\n except (ValueError, AttributeError):\n self.do_show(arg)",
"def SetOperator(self, op):\n return _hypre.HypreADS_SetOperator(self, op)",
"def __set_operation_function(self):\n if self.operation_function is not None:\n return self.operation_function\n else:\n self.operation_function = symm_eval",
"def __init__(self, setfunc, column, role, convertfunc):\n super(SetDataArgs, self).__init__()\n self.setfunc = setfunc\n self.column = column\n self.role = role\n self.convertfunc = convertfunc",
"def do_set(self, arg):\n try:\n statement, paramName, val = arg.parsed.raw.split(None, 2)\n val = val.strip()\n paramName = paramName.strip().lower()\n if paramName not in self.settable:\n hits = [p for p in self.settable if p.startswith(paramName)]\n if len(hits) == 1:\n paramName = hits[0]\n else:\n return self.do_show(paramName)\n currentVal = getattr(self, paramName)\n if (val[0] == val[-1]) and val[0] in (\"'\", '\"'):\n val = val[1:-1]\n else:\n val = cast(currentVal, val)\n setattr(self, paramName, val)\n self.stdout.write('%s - was: %s\\nnow: %s\\n' % (paramName, currentVal, val))\n if currentVal != val:\n try:\n onchange_hook = getattr(self, '_onchange_%s' % paramName)\n onchange_hook(old=currentVal, new=val)\n except AttributeError:\n pass\n except (ValueError, AttributeError, NotSettableError):\n self.do_show(arg)",
"def _functionset(self):\n\t\n\t\t#Instruciton is set based on __init__ () arguments\n\t\tinstruction = 0b00100000\n\t\tinstruction = instruction | self.bit_mode\n\t\tinstruction = instruction | self.line_num\n\t\tinstruction = instruction | self.char_height\n\t\t\n\t\tself._send(instruction, RS_INSTRUCTION)",
"def _build_set_command(self, cmd, param, val):\n try:\n str_val = self._param_dict.format(param, val)\n set_cmd = '%s=%s' % (param, str_val)\n set_cmd = set_cmd + SBE37_NEWLINE\n \n except KeyError:\n raise InstrumentParameterException('Unknown driver parameter %s' % param)\n \n return set_cmd",
"def SetOperator(self, A):\n return _hypre.HypreAME_SetOperator(self, A)"
] | [
"0.6105887",
"0.60869235",
"0.6042422",
"0.594492",
"0.58734345",
"0.5806317",
"0.5636314",
"0.5612864",
"0.5483707",
"0.546873",
"0.54369307",
"0.5421345",
"0.5389135",
"0.5195353",
"0.5110589",
"0.5105132",
"0.5087218",
"0.5083517",
"0.5051965",
"0.50517464",
"0.5040997",
"0.50280774",
"0.50219023",
"0.50212556",
"0.501696",
"0.501289",
"0.50051874",
"0.4995381",
"0.49790686",
"0.4975022"
] | 0.6132109 | 0 |
This takes a string as an input parameter and treats it as a zip code, looks up the weather for that zipcode, and returns the current temperature at that zipcode in Fahrenheit. | def weather(zipcode):
URL = 'http://api.openweathermap.org/data/2.5/weather?zip=' + zipcode + ',us&appid=' + '7d7a3cf9902ef14f54f49f160fc8a550' + '&units=imperial'
webpage = urllib.request.urlopen(URL)
contents = webpage.read()
contents = contents.decode('ascii')
weather = eval(contents) #this line turns it from a string into dictionaries and lists
temperature = weather['main']['temp']
return temperature | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get(self, zipcode):\n response = hereService.getWeatherByZipcode(zipcode)\n return response",
"def get_current_temperature(zipcode, country):\n owm = pyowm.OWM(os.environ.get('OWM_KEY'))\n observation = owm.weather_at_zip_code(zipcode, country)\n weather = observation.get_weather()\n return {\n 'temperature': weather.get_temperature('fahrenheit')['temp'],\n 'wind_speed': weather.get_wind('miles_hour')['speed'],\n 'wind_direction': weather.get_wind('miles_hour')['deg'],\n 'humidity': weather.get_humidity(),\n 'status': weather.get_status(),\n }",
"def get_external_temp():\n baseurl = \"http://api.openweathermap.org/data/2.5/weather\"\n query = \"?q=salhouse&mode=xml\"\n url = baseurl + query\n r = requests.get(url)\n root = ET.fromstring(r.text)\n kelvin = float(root[1].attrib.get('value'))\n celcius = kelvin - 272.15\n return celcius",
"def get_city(zip_code):\r\n\r\n # API key, retrieved from configure.py\r\n api_key = configure.ZIP_KEY\r\n\r\n # API endpoint\r\n url = f'https://www.zipcodeapi.com/rest/{api_key}/info.json/{zip_code}/degrees'\r\n\r\n # API call\r\n response = requests.get(url)\r\n\r\n # Collect response in json format\r\n data = response.json()\r\n\r\n if 'error_code' in data or 'error_msg' in data:\r\n return {\r\n 'success': False,\r\n 'query': zip_code\r\n }\r\n\r\n else:\r\n return {\r\n 'success': True,\r\n 'query': data['zip_code'],\r\n 'city': data['city'],\r\n 'state': data['state'],\r\n 'lat': data['lat'],\r\n 'lon': data['lng']\r\n }",
"def temperature_f(self, tuple_data, status):\r\n fahr_search = Temperature.fahr.search(status)\r\n temperature = None\r\n try:\r\n if fahr_search != None:\r\n temperature = fahr_search.group(2).replace(\",\", \".\")\r\n temperature = float(temperature)\r\n else:\r\n celcius_search = Temperature.celcius.search(status)\r\n if celcius_search != None:\r\n temperature = celcius_search.group(2).replace(\",\", \".\")\r\n temperature = float(temperature)\r\n temperature = ((9.0/5) * temperature) + 32\r\n except ValueError:\r\n print \"Encoding error on '%s'\" % (status)\r\n return temperature",
"def get_temperature(data):\n celcius = 0\n celcius = [i for i in data if re.search(r'\\d+[/]', i)]\n \n if celcius == []:\n return \"N/A\"\n celcius = celcius[0].split('/')[0]\n celcius = celcius.replace('M', '-')\n \n try:\n celcius = int(celcius)\n except ValueError:\n return \"N/A\"\n\n farenheit = round((celcius * 9/5) + 32) # formula to get farenheit from celcius\n temperature = \"{0} C ({1} F)\".format(celcius, farenheit)\n return temperature",
"def compute_zip_code(zip_code_text):\n zip_code = None\n if zip_code_text and len(zip_code_text) >= 5 and zip_code_text.isdigit():\n zip_code = zip_code_text[:5]\n return zip_code",
"def get_weather(self):\n\n city = self.user_data[\"weatherSettings\"][\"weatherCity\"]\n country = self.user_data[\"weatherSettings\"][\"weatherCountry\"]\n\n host = \"weather.mios.com\"\n temp_scale = \"C\"\n url = \"http://%s/?tempFormat=%s&cityWeather=%s&countryWeather=%s\" % \\\n (host, temp_scale, Vera.urlencode(city), Vera.urlencode(country))\n\n weather = self.proxy_get(url)\n\n return (float(weather[\"temp\"]), weather[\"text\"])",
"def current_weather(city_name, API):\r\n global new_city\r\n try:\r\n if city_name.isnumeric(): # if input is zip\r\n url = f'http://api.openweathermap.org/data/2.5/weather?zip={city_name},&appid={API}'\r\n elif ',' in city_name: # if input has a city,state or city,country\r\n new_city = city_name.split(',')\r\n new_city_name = new_city[0].replace(' ', '%20') # so the url correctly handles spaces in cities\r\n if len(new_city[1]) > 2: # if the state/country code is invalid\r\n return \"Not valid state code/country code\"\r\n url = f'https://api.openweathermap.org/data/2.5/weather?q={new_city_name},{new_city[1]},us&appid={API}'\r\n elif ',' not in city_name: # if searched by only city and not state or country code, works for big cities\r\n url = f'http://api.openweathermap.org/data/2.5/weather?q={city_name}&appid={API}'\r\n response = requests.get(url).json() # getting the proper json data based on the input of the city_name\r\n city_latitude = str(response['coord']['lat'])\r\n city_longitude = str(response['coord']['lon'])\r\n if (new_city[1].upper() in states) and (\r\n response['sys']['country'] != 'US'): # to catch foreign cities with US state codes\r\n return \"Not valid city\"\r\n elif (new_city[1].upper() not in states) and (\r\n new_city[1].upper() != response['sys']['country'] and new_city != 'XXX'):\r\n # to catch US cities with foreign country codes\r\n return 'Not a valid city'\r\n elif states[new_city[1].upper()] != coordinates(city_latitude,\r\n city_longitude):\r\n # Check to see if city is located in provided state\r\n return 'City is not located in that state'\r\n current_temp = response['main']['temp']\r\n max_temp = response['main']['temp_max']\r\n min_temp = response['main']['temp_min']\r\n feels_like_temp = response['main']['feels_like']\r\n curr_temp_fheit = round((current_temp * 1.8) - 459.67) # converting to imperial\r\n max_temp_fheit = round((max_temp * 1.8) - 459.67)\r\n min_temp_fheit = round((min_temp * 1.8) - 459.67)\r\n feels_like_temp_fheit = round((feels_like_temp * 1.8) - 459.67)\r\n description = response['weather'][0]['description']\r\n wind = round(response['wind']['speed'] * 2.23694)\r\n\r\n format_weather = (\"Current weather for \" + str(city_name) + \", \" + response['sys']['country'] +\r\n \"\\nCurrent temp: \" + str(curr_temp_fheit) + '\\nMax Temp: ' + str(\r\n max_temp_fheit) + '\\nMin Temp: ' + str(\r\n min_temp_fheit) + '\\nFeels like: ' + str(\r\n feels_like_temp_fheit) + '\\nOutlook: ' + description + '\\nWind: ' + str(\r\n wind) + ' mph')\r\n # print weather in cleaner format\r\n return format_weather\r\n\r\n except KeyError: # If a city that doesn't exist is entered\r\n return 'Not valid city'",
"def f_weather(phenny, input):\n icao_code = input.group(2)\n\n if not icao_code:\n return phenny.say(\"Try .weather London, for example?\")\n\n icao_code = code(phenny, icao_code)\n\n if not icao_code:\n phenny.say(\"No ICAO code found, sorry\")\n return\n\n uri = 'http://tgftp.nws.noaa.gov/data/observations/metar/stations/%s.TXT'\n\n try:\n bytes = web.get(uri % icao_code)\n except web.HTTPError:\n phenny.say(\"No NOAA data available for that location.\")\n return\n\n if 'Not Found' in bytes:\n phenny.say(icao_code + \": no such ICAO code, or no NOAA data\")\n return\n\n phenny.say(str(metar.parse(bytes)))",
"def GetWeather(query, api_key):\n try:\n owm = pyowm.OWM(api_key)\n observation = owm.weather_at_place(str(query))\n location = observation.get_location()\n weather = observation.get_weather()\n temp = weather.get_temperature('fahrenheit')\n status = CleanupWeatherStatus(weather.get_detailed_status())\n return 'It is %sF degrees with %s in %s right now.' % (int(temp['temp']),\n status,\n location.get_name())\n except:\n return 'I couldn\\'t find any weather for %s. I am sorry.' % (query)",
"def lookup_usaf_station_by_zipcode(zipcode):\n\n usaf = zipcode_usaf.get(zipcode, None)\n return usaf",
"def trans_weather(string):\r\n\treturn cn2en.WEATHER[string]",
"def trans_temperature(string):\r\n\treturn int(string[:2])",
"def get_weather(phenny, input):\n import wunderground\n \n report_type = 'conditions'\n\n unicode_input = unicode(input)\n if unicode_input[1:8] == 'weather':\n location_str = unicode_input[9:]\n elif unicode_input[1:3] == 'w ':\n location_str = unicode_input[3:]\n try:\n json_data = wunderground.format_json(location_str, input.weather_API, report_type)\n output_results(phenny, json_data)\n except Exception, e:\n print e\n phenny.say('Could not find results for \"%s\", please reword the search and try again.' % location_str)",
"def get_temperature(self):\n summary = \" \".join(self.get_summary().split())\n pattern = '\\$.... .. .*? .*? (.*?) .*? .*? . .*? .*? . . . .*?'\n temperature = float(re.findall(pattern,summary).pop())\n return temperature",
"def temperature(self):\r\n try:\r\n return str(self.connect()['main']['temp'])\r\n except:\r\n return '@weather_temperature'",
"def get_temperature(elevation, sea_level):\n if elevation <= sea_level:\n return 0.8\n else:\n return (-1.0 / (1.0 - sea_level)) * (elevation - sea_level) + 1.0",
"def get_zip_code(string):\n zip_code = \"\"\n\n #for each character in string\n for ch in string:\n #if the character is a number, add it to the \"zip_code\" string\n if ch.isdigit():\n zip_code += ch\n\n return zip_code",
"def convert_zip_code(zipcode):\n zipcode = tf.strings.regex_replace(zipcode, r\"X{0,5}\", \"0\")\n zipcode = tf.strings.to_number(zipcode, out_type=tf.float32)\n return zipcode",
"def GetWeatherByLocation():\n Location = GetLocation()\n WeatherUrl =\"http://api.openweathermap.org/data/2.5/weather?\"+ Location +\"&appid=b4bacbe2dc824431289800439f1ec3df&units=metric\"\n WeatherRequest = requests.get(WeatherUrl)\n WeatherInfo = WeatherRequest.json()\n pprint(WeatherInfo)\n WindSpeed = WeatherInfo['wind']['speed']\n pprint(WindSpeed)\n Temp = WeatherInfo['main']['temp']\n Humidity = WeatherInfo['main']['humidity']\n Description = WeatherInfo['weather'][0]['description']\n print(type(Humidity))\n return(Temp, Humidity, Description)",
"def weather():\n latlong = request.form.get(\"latlong\")\n latlong = latlong.split(\",\")\n data = lookup_weather(latlong[0],latlong[1])\n return render_template(\"weather.html\", data = data)",
"def get_temp(html) -> None:\n\tif page_type_dict['general']:\n\t\tt_text = html.find('div', {'class': '_1HBR'}).text\n\t\tt_digit = ''.join([i for i in t_text if i.isdigit()])\n\t\tweather_dict['temperature'] = t_digit\n\telse:\n\t\tre_temp_class = re.compile('.*_2ezK.*') # regex template: str w/ '_2ezK'\n\t\ttemp_class = html.find('div', {'class': re_temp_class}) \n\t\t# we've got smth like: 'Ночью14°Утром19°Днём24°Вечером22°\n\t\tweather_lst = temp_class.text.split('°') # ['Ночью14','Утром19',...]\n\t\tint_weather_lst = [int(number.group()) for number in ( # for all the elems \n\t\t\tre.search(r'\\d+', word) for word in weather_lst) if number] # keep integers\n\t\t# result: [14, 19, 24, 22]\n\t\tweather_dict['temperature'] = int_weather_lst",
"def temps(lieu):\r\n\r\n key = '5a72ceae1feda40543d5844b2e04a205'\r\n localisation = \"http://api.openweathermap.org/data/2.5/weather?q={0},fr&appid={1}\"\r\n localisation = localisation.format(lieu, key)\r\n request_html = requests.get(localisation)\r\n data = request_html.json()\r\n\r\n weather = data['weather'][0]['main']\r\n\r\n if weather == \"Clear\":\r\n weather = \"Beau\"\r\n\r\n elif weather == \"Clouds\":\r\n weather = \"Nuageux\"\r\n return weather",
"def find_weather(city):\n\n\ttry:\n\t\thttp = urllib3.PoolManager()\n\t\tresponse = http.request('GET', \n\t\t\t'http://api.openweathermap.org/data/2.5/weather', \n\t\t\tfields ={\n\t\t\t'q':city, \n\t\t\t'units':'metric', \n\t\t\t\"appid\": \"2bc3e79bb974a007818864813f53fd35\"\n\t\t\t}) \n\t\tparsed_data = json.loads(response.data.decode('utf-8'))\n\t\t\n\t\t\n\t\treturn (\"\\t{}\\t{}\\t{}\").format((parsed_data['name']).ljust(10),(str(parsed_data[\"main\"][\"temp\"])).ljust(10), parsed_data[\"weather\"][0][\"description\"])\n\n\texcept Exception as e:\n\t\tprint (e)",
"def weather_helper():\n\n weather = get_weather('Chicago')\n conditions = weather['weather'][0]['description']\n temperature = weather['main']['temp']\n location = weather['name']\n\n curr_weather = 'It is currently %s degrees with %s in %s' % (temperature, conditions, location)\n return curr_weather",
"def GetWeatherByCity(City):\n WeatherUrl = \"http://api.openweathermap.org/data/2.5/weather?q=\"+ City + \"&appid=b4bacbe2dc824431289800439f1ec3df&units=metric\" \n WeatherRequest = requests.get(WeatherUrl)\n WeatherInfo = WeatherRequest.json()\n if ('main' in WeatherInfo):\n pass\n else:\n print(\"Invalid City Name\")\n exit() \n Temp = WeatherInfo['main']['temp']\n Humidity = WeatherInfo['main']['humidity']\n Description = WeatherInfo['weather'][0]['description']\n return(Temp, Humidity, Description)",
"def get_zipsearch(zipcode=u''):\n from x84.bbs import getterminal, LineEditor, echo\n term = getterminal()\n echo(u''.join((u'\\r\\n\\r\\n',\n term.bold_yellow(u' -'),\n term.reverse_yellow(u':'),\n u' ')))\n return LineEditor(width=min(30, term.width - 5), content=zipcode).read()",
"def temperature() -> float:",
"def get_temperature(self, monannul, start, end, iso) -> str:\n #This is converting a username into a UUID which is how Minecraft differentiates between players\n url = 'http://climatedataapi.worldbank.org/climateweb/rest/v1/country/' + monannul + '/tas/' + start + end + iso + '.json'\n try:\n\n response = urllib.request.urlopen(url)\n json_results = response.read()\n try:\n r_obj = json.loads(json_results)\n uuid = r_obj['id']\n return uuid\n except JSONDecodeError:\n return \"none\"\n \n except urllib.error.HTTPError as e:\n print('Failed to download contents of URL')\n print('Status code: {}'.format(e.code))\n \n except urllib.error.URLError as e:\n print('Failed to download contents of URL')\n print('Status code: {}'.format(e))\n print(\"Perhaps you have no internet connection?\")"
] | [
"0.6631745",
"0.6549503",
"0.6489142",
"0.6372373",
"0.63502383",
"0.62462974",
"0.6226757",
"0.6073102",
"0.6010607",
"0.5896707",
"0.58758765",
"0.58282274",
"0.57842165",
"0.57408905",
"0.57213604",
"0.57094175",
"0.56968915",
"0.56946063",
"0.5681781",
"0.5656904",
"0.5612414",
"0.56036067",
"0.55586815",
"0.55378324",
"0.5504577",
"0.5503476",
"0.5486682",
"0.5482628",
"0.5469357",
"0.54655796"
] | 0.787071 | 0 |
Finds the token where the value is stored. | def _value_token_index(self):
# TODO: memoize this value
for i, token in enumerate(self.tokens):
if not token.type.is_metadata:
return i
raise RuntimeError('could not find a value token') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def match_value(self, token_type, token_value):\n if isinstance(self.cursor(), token_type) and self.cursor().token == token_value:\n token = self.cursor()\n self.pos += 1\n else:\n raise ParseError(\"Expected {!s}.\".format(token_value))\n return token",
"def whereis_token(self, tid, silent=False):\n tk = self.get_token(tid)\n if tk:\n rs = tk.position()\n else:\n rs = None\n if not silent:\n msg = \"Token %s position is %s\" % (tid, rs)\n self.parser.status(msg)\n return rs",
"def get_token(self, symbol):\r\n for token in self:\r\n if token[\"symbol\"].lower() == symbol.lower():\r\n return token\r\n return None",
"def GetCurrentToken(tokens, pos):\n i = 0\n while i < len(tokens):\n if pos > tokens[i].start and pos < tokens[i].end:\n return tokens[i]\n if pos < tokens[i].start:\n return tokens[i-1] if i > 0 else None\n i += 1\n\n return tokens[len(tokens)-1] if tokens else None",
"def find_token(self, start_token, tok_type, tok_str=None, reverse=False):\n # type: (Token, int, Optional[str], bool) -> Token\n t = start_token\n advance = self.prev_token if reverse else self.next_token\n while not match_token(t, tok_type, tok_str) and not token.ISEOF(t.type):\n t = advance(t, include_extra=True)\n return t",
"def find_value(code, value):\n value_pattern = re.compile(rf\"{re.escape(value)} ?= ?([^=][a-zA-Z0-9\\.'/_)(]*)\")\n\n target = None\n for line in code:\n if value_pattern.search(line):\n target = re.findall(value_pattern, line)\n break\n\n return target[0] if target is not None else value",
"def token(self):\n return self[\"token\"]",
"def get_token(self):\n return self.__token",
"def get_token(self):\n return self.__token",
"def search_token(self, message):\n\n # First search for variable name enclosed in single quotes\n m = re.search(\"'.*'\", message)\n\n # If there's no variable name search for nil-check message\n if m is None:\n m = re.search(r'nil(?=-check)', message)\n\n # If there's no nil-check search for method name that comes after a `#`\n if m is None:\n m = re.search(r'(?<=#)\\S+', message)\n\n return m.group(0) if m else None",
"def find(self, value):\n for position in range(self.get_size()):\n if self.table[position] == value:\n return position",
"def search(self, val):\n current = self.head\n # import pdb; pdb.set_trace()\n while current is not None:\n if current.data == val:\n return current\n current = current.next_node\n return None",
"def find_node(self, value):\n cur = self.first\n while cur:\n if cur.value == value:\n return cur\n cur = cur.next\n return None",
"def token(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"token\")",
"def token(self):\r\n return self._token",
"def find_offset(self,value):\n return self.header.find_offset(value)",
"def current(self) -> Token:\n return self.tokens[self.pos]",
"def n_value(self, token):",
"def token(self):\n return self._token",
"def token(self):\n return self._token",
"def token(self):\n return self._token",
"def _get_token(self):\n # Skip initial whitespace.\n pos = self._skip_whitespace()\n\n # Find the token here, if there's one.\n token = None\n\n for (token_type, regex) in TOKEN_REGEXEN:\n re_match = regex.match(self.body, pos)\n if re_match:\n token_content = next(g for g in re_match.groups() if g is not None)\n token = Token(token_type, token_content, re_match.end())\n break\n\n return token",
"def _parse_token(self, body):\n\n token_match = re.search('var\\s*token\\s*=[\\s\\']*(\\d+)', body)\n return int(token_match.group(1))",
"def _find(self, val, cur_node):\n if val == cur_node.data:\n return cur_node\n elif val > cur_node.data:\n if not cur_node.right:\n return None\n return self._find(val, cur_node.right)\n elif val < cur_node.data:\n if not cur_node.left:\n return None\n return self._find(val, cur_node.left)",
"def key_word(self):\n return self.current_token",
"def search(self, val):\n current = self.head\n found = False\n while current and not found:\n if current.val == val:\n found = True\n return current\n current = current.next\n return None",
"def token(self) -> Token:\n return getattr(self, \"tok\", None)",
"def location(self):\r\n return conf.lib.clang_getTokenLocation(self._tu, self)",
"def LookupToken(self, dmtoken):\n self.ReadClientStateFile()\n return self._registered_tokens.get(dmtoken, None)",
"def search_tree(token, root):\n matched_pos = []\n for node in PreOrderIter(root):\n # If a node is not defined in our searchable list, skip it\n if getattr(node, 'id') not in node_types.KEY_PROPERTY:\n continue\n else:\n for field in node_types.KEY_PROPERTY[getattr(node, 'id')]:\n if not hasattr(node, field):\n continue\n value = getattr(node, field)\n if token in str(value):\n matched_pos.append(node)\n\n if len(matched_pos) == 0:\n return None\n else:\n return matched_pos"
] | [
"0.6786406",
"0.6442385",
"0.631681",
"0.61292017",
"0.60891455",
"0.60803986",
"0.5987252",
"0.5974573",
"0.5974573",
"0.59730595",
"0.5952903",
"0.5946502",
"0.59407806",
"0.5928285",
"0.5908328",
"0.5895346",
"0.5840744",
"0.5825658",
"0.58217704",
"0.58217704",
"0.58217704",
"0.5820745",
"0.5816943",
"0.5813473",
"0.57983875",
"0.57854855",
"0.57698554",
"0.57656956",
"0.57608443",
"0.5752086"
] | 0.6961675 | 0 |
>>> import shutil >>> import os.path >>> import core.docprocessor >>> basepath = 'core/test_output' >>> f = open('core/test/cv_1.doc', 'r') >>> cv1 = core.docprocessor.Processor(f, 'cv_1.doc', basepath) >>> cv1.result True >>> os.path.isfile(os.path.join(cv1.markdown_path, ... cv1.name.md)) True >>> cv1.deleteconvert() >>> os.path.isfile(os.path.join(cv1.markdown_path, ... cv1.name.md)) False >>> f.close() >>> shutil.rmtree(basepath) | def deleteconvert(self):
filename = os.path.join(self.docx_path, self.name.docx)
if os.path.isfile(filename):
os.remove(filename)
filename = os.path.join(self.html_path, self.name.html)
if os.path.isfile(filename):
os.remove(filename)
filename = os.path.join(self.docbook_path, self.name.xml)
if os.path.isfile(filename):
os.remove(filename)
filename = os.path.join(self.markdown_path, self.name.md)
if os.path.isfile(filename):
os.remove(filename) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def cleanup(asciitest_out_dir, doc_file):\n # path join uses backslash win32 which is not cmake compatible\n filename = os.path.join(asciitest_out_dir, save_cmake_filename(doc_file)).replace(\"\\\\\",\"/\")\n \n #print(\"cleanup %s %s\" % (doc_file, filename))\n try:\n os.remove(filename)\n except:\n pass",
"def clean_docs(c):\n c.run(f\"rm -fr {DOCS_BUILD_DIR}\")",
"def clean_pdf():\n xnt.build.tex.clean(path=\"./\", remove_pdf=True)",
"def clean(self):\n if os.path.exists(self.paths['build_dir']):\n shutil.rmtree(self.paths['build_dir'])\n if os.path.exists(os.path.join(self.base_dir, 'docs')):\n shutil.rmtree(os.path.join(self.base_dir, 'docs'))",
"def delete_file(self):\n os.remove(self.id+\"-input.txt\")\n if(self.lang == \"PYTHON\"):\n os.remove(self.id+\".py\")\n elif(self.lang == \"C\"):\n os.remove(self.id+\".c\")\n if(self.status == 1):\n os.remove(self.id+\"_c\")\n elif(self.lang == 'CPP'):\n os.remove(self.id+\".cpp\")\n if(self.status == 1):\n os.remove(self.id+\"_cpp\")\n elif(self.lang == 'JAVA'):\n os.remove(self.id+\".java\")\n if(self.status == 1):\n os.remove(self.id+\"_java\") \n elif(self.lang == \"JS\"):\n os.remove(self.id+\".js\")\n # if(self.status == 1):\n # os.remove(self.id+\"_js\")s",
"def _cleanup(self):\n os.system(\"rm -r %s/*\" %(self._snippet_index_dir))\n os.system(\"rm %s/*\" %(self._para_dir))\n os.system(\"rm %s/*\" %(self._temp_dir))\n os.system(\"rm %s/*\" %(self._snippet_result_dir))",
"def clean():\n possible_outputs = (\n '{}.html'.format(CONFIG['FULL_PROJECT_NAME']),\n '{}.epub'.format(CONFIG['FULL_PROJECT_NAME']),\n '{}.pdf'.format(CONFIG['FULL_PROJECT_NAME']),\n '{}.docx'.format(CONFIG['FULL_PROJECT_NAME']),\n '{}.odt'.format(CONFIG['FULL_PROJECT_NAME']),\n )\n\n for filename in possible_outputs:\n if os.path.exists(filename):\n os.remove(filename)\n print(\"Removed {}\".format(filename))",
"def clean(self):\n actual_output_file = path.splitext(self.source_name)[0] + \".actual\"\n if path.exists(self.binary_name):\n os.unlink(self.binary_name)\n if path.exists(actual_output_file):\n os.unlink(actual_output_file)",
"def process_file_markdown(src_pathname):\n dest_pathname = path_src_to_dest(src_pathname, '.html')\n\n logging.info(\"Processing Markdown file: %s -> %s\" %\n (str(src_pathname), str(dest_pathname)))\n\n ensure_dest_dir(dest_pathname)\n\n with open(dest_pathname, 'w', encoding='UTF-8') as f:\n outstr = docgen.generate.generate_doc(str(src_pathname),\n verbose=config['verbose'],\n inlinecss=True,\n inlinewave=True,\n asdiv=False)\n f.write(outstr)\n\n return dest_pathname",
"def clean():\n shutil.rmtree(BUILD_PATH, ignore_errors=True)\n shutil.rmtree(os.path.join(SOURCE_PATH, \"reference\", \"api\"), ignore_errors=True)",
"def __del__(self):\n shutil.rmtree(self.epub_dir)",
"def clean():\n shutil.rmtree(BUILD_PATH, ignore_errors=True)\n shutil.rmtree(\n os.path.join(SOURCE_PATH, \"reference\", \"api\"), ignore_errors=True\n )",
"def createPDFDoc(self, filepath):\n print(\"Starting pdf creation\")\n strMD=\"\"\n for fileMD,data in self.graph.nodes(data=True):\n if not os.path.isfile(fileMD):\n sys.exit(\"Error: \" + fileMD + \" does not exist\")\n if not fileMD.endswith(\"md\" or \"markdown\"):\n sys.exit(fileMD + \" is not a markdown file\");\n print(\"Found file: \" + fileMD)\n strMD = strMD + \" \" + fileMD\n cmd = \"pandoc --latex-engine=xelatex -s -o \" + filepath + strMD\t\n print(\"Starting file conversion.\")\n if subprocess.call(cmd) != 0:\n print(\"Conversion failed\")\n else:\n print(\"Saving pdf file to: \" + filepath)\n print(\"Conversion successfull\")",
"def setUp(self):\n self.outdir = \"tests/out/pdftotext\"\n if not os.path.exists(self.outdir):\n os.makedirs(self.outdir)\n else:\n files = glob.glob(self.outdir)\n for f in files:\n if os.path.isfile(f):\n os.remove(f)",
"def main(base_path):\n current = os.getcwd()\n try:\n if not(os.path.exists(base_path)):\n ans = 'y'\n if p_out:\n print(\"Do you want to create \" + base_path + \"?(y/n)\")\n ans = sys.stdin.read(1)\n print(\"\")\n if ans in ('y', 'Y'):\n pass\n elif ans in ('n', 'N'):\n raise NoneOutput\n else:\n raise InputError\n else:\n m_path = os.path.join(base_path, 'nzmath/manual')\n if os.path.exists(m_path):\n ans = 'y'\n if p_out:\n print(\"Do you want to remove \" + m_path + \"?(y/n)\")\n ans = sys.stdin.read(1)\n print(\"\")\n if ans in ('y', 'Y'):\n for root, dirs, files in os.walk(m_path, topdown=False):\n for name in files:\n os.remove(os.path.join(root, name))\n for name in dirs:\n os.rmdir(os.path.join(root, name))\n elif ans in ('n', 'N'):\n raise NoneOutput\n else:\n raise InputError\n dirname = os.path.join(base_path, 'nzmath/manual/modules')\n if not(os.path.exists(dirname)):\n os.makedirs(dirname)\n os.chdir(os.path.join(base_path, 'nzmath/manual/'))\n csspage = convertHPURL('manual/default.css')\n if p_out:\n print(\"get css from \" + csspage)\n retryConnection(urllib.request.urlretrieve, csspage, 'default.css')\n while ad_list:\n files = ad_list.pop()\n MyWikiParser(files).feeds()\n if p_out:\n print(\"\\n\" + \"All process is done!\" + \"\\n\")\n print(\"Ok, now created nzmath-current manual located to\")\n print(os.path.join(base_path, \"nzmath\"))\n print(\"if you check difference between nzmath-cvs manual, with GNU diff,\")\n print(\"$ diff -ubBr /tmp/nzmath/manual {your-nzmathcvs-repo}/manual\")\n print(\"or you check only new version files,\")\n print(\"$ diff -r --brief /tmp/nzmath/manual {your-nzmathcvs-repo}/manual .\")\n except NoneOutput:\n if p_out:\n print('end.')\n except InputError:\n print(\"Error: Invalid input!\")\n except LookupError:\n print(\"Error: Maybe, Japanese encodings(ex.euc_jp) is not supported.\")\n except:\n if p_out:\n print(\"Check \" + base_path + \" (dir? truly path? and so on.)\")\n print(\"Delete \" + base_path + \" and try again.\")\n print(\"(Maybe, caused by problem of network connection)\\n\")\n print(sys.exc_info()[0])\n os.chdir(current)",
"def clean(self):\n print(\"Cleaning outputs in %s\" % self.args.output)\n files = glob.glob(self.args.output + \"*.pkl\")\n for f in files:\n if os.path.exists(f):\n os.remove(f)",
"def clean_folder(self):\n # Remove the 1st output\n # Remove the 2nd output\n # Remove the calibrated output\n try:\n os.remove(\"output1.csv\")\n except:\n pass\n try: \n os.remove(\"output2.csv\")\n except:\n pass\n try:\n os.remove(self.__add_output_file_location(self._output_filename))\n except:\n pass\n \n list = os.listdir(\"edited\")\n for file in list:\n file = os.path.join(\"edited\", file)\n try:\n os.remove(file)\n except:\n pass\n \n list = os.listdir(\"extracted\")\n for file in list:\n file = os.path.join(\"extracted\", file)\n try:\n os.remove(file)\n except:\n pass",
"def cli(ctx):\n ctx.invoke(clean)\n ctx.invoke(build_convert_upload)",
"def _finalize_iteration(self, verbose: bool):\n super().delete_remote_files()\n self.comm.storyteller.document_task(task=\"adam_documentation\")",
"def test_no_deletion(self):\n analyze_text(self.filename)\n self.assertTrue(os.path.exists(self.filename))",
"def test_no_deletion(self):\n analyze_text(self.filename)\n self.assertTrue(os.path.exists(self.filename))",
"def clean_documents():\n start = datetime.now()\n for i, raw_filename in enumerate(os.listdir(RAW_DIR)):\n fullpath = os.path.join(RAW_DIR, raw_filename)\n if os.path.isfile(fullpath):\n print(\"Cleaning {0} {1}\".format(i, fullpath), file=stderr)\n try:\n with open(fullpath, \"r\") as f:\n text = f.read()\n text = clean(text)\n soup = BeautifulSoup(text, \"html.parser\")\n cleaned = visible_text(soup)\n score = germanwings_score(cleaned)\n if not score:\n print(\"not germanwings: {0}\".format(raw_filename))\n else:\n clean_filename = os.path.join(CLEAN_DIR, raw_filename)\n with open(clean_filename, \"w\") as f:\n f.write(cleaned.encode(\"ascii\", \"ignore\"))\n except Exception as exc:\n print(\"{0}: {1}\".format(fullpath, exc), file=stderr)\n end = datetime.now()\n print(\"Elapsed time to clean: {0}\".format(end - start), file=stderr)",
"def clean():\n clean_files()",
"def clean_pdf_dir():\n # Create the pdf directory if it does not exist\n if not os.path.isdir(pdf_dir):\n os.makedirs(pdf_dir)\n return\n\n # Get the pdf files list and remove them\n pdf_files = [f for f in os.listdir(pdf_dir) if f.lower().endswith('pdf')]\n for pdf_name in pdf_files:\n os.remove(os.path.join(pdf_dir, pdf_name))",
"def clean(raw_file,clean_path,results_path):\n clean_file = clean_path + 'clean.pkl'\n stats_file = results_path + 'corpus_stats.pkl' \n raw_text = load_raw_text(raw_file) \n clean_docs = list()\n nlp = spacy.load('en')\n i = 0\n print('Cleaning documents...')\n for text in raw_text: \n words = nlp(text)\n raw_count = len(words)\n words = [w for w in words if not w.is_stop]\n words = [w for w in words if w.ent_type_ != 'PERSON' and w.pos_ != 'PROPN']\n words = [w for w in words if w.is_alpha and not w.is_digit]\n words = [w.lemma_ for w in words if w.text != '-PRON-']\n word_string = ' '.join(words)\n word_string = word_string.lower()\n doc = Document(word_string)\n doc.clean_count = len(words)\n doc.raw_count = raw_count\n clean_docs.append(doc)\n if i%10 == 0:\n print('Document: ' + str(i))\n i += 1\n clean_corpus = Corpus(clean_docs)\n clean_corpus.save_corpus_text(clean_file)\n clean_corpus.save_corpus_stats(stats_file)\n return clean_corpus",
"def clean_android_target_pdf_dir():\n if os.path.exists(android_target_pdf_dir):\n shutil.rmtree(android_target_pdf_dir)",
"def test_check_if_output_file_exists():\n input_file = os.path.join(os.getcwd(), 'tests', 'input_test_file.docx')\n output_file = os.path.join(os.getcwd(), 'tests', 'output_test_file.txt')\n\n questions_parser = QuestionsParser()\n questions_parser.main(argv=['-i', input_file, '-o', output_file])\n assert os.path.exists(output_file)\n os.unlink(output_file)",
"def clean(ctx):\n header(clean.__doc__)\n with ctx.cd(ROOT):\n for pattern in CLEAN_PATTERNS:\n info(\"Removing {0}\", pattern)\n ctx.run(\"rm -rf {0}\".format(pattern))",
"def test_no_delete(self):\n analyze_text(self.filename)\n self.assertTrue(os.path.exists(self.filename))",
"def clean(ctx, so=False, cache=False):\n for name in ctx.shell.files('.', '.coverage*', recursive=False):\n ctx.shell.rm(name)\n for name in ctx.shell.files('bench', '.out.*', recursive=False):\n ctx.shell.rm(name)\n ctx.shell.rm_rf(\n 'docs/coverage',\n 'docs/gcov',\n 'build',\n 'dist',\n 'wheel/dist',\n ctx.doc.userdoc,\n 'docs/_userdoc/_build',\n ctx.doc.website.source,\n ctx.doc.website.target,\n )\n if cache:\n cacheclean(ctx)\n if so:\n soclean(ctx)"
] | [
"0.65258646",
"0.63766783",
"0.62430525",
"0.6167361",
"0.5848033",
"0.57737476",
"0.5623585",
"0.5583894",
"0.5582149",
"0.5558321",
"0.5547012",
"0.55358094",
"0.553127",
"0.5530632",
"0.5524219",
"0.5511022",
"0.54836005",
"0.5472407",
"0.5460167",
"0.53966826",
"0.53966826",
"0.53895587",
"0.5382919",
"0.53824633",
"0.53743327",
"0.53707415",
"0.53637016",
"0.53601897",
"0.5357064",
"0.5347869"
] | 0.73595536 | 0 |
Read ascii file to get weather info | def read_weather(self):
print "Reading weather data from file",self.datafile
tab = ascii.read(self.datafile)
# Fix 'T' values in precipitation column, which represent tiny
# amounts of rain (not measurable)
TINY_VALUE = '.005' # 0.005 is half the smallest measurable value
rain = tab['PrecipitationIn']
wbad = (rain == 'T')
rain[wbad] = TINY_VALUE
rain = numpy.array(rain).astype("float")
# Replace string version of precip with float version
tab['PrecipIn'] = rain
tab.remove_column('PrecipitationIn')
self.table = tab | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _read_raw_temperature():\n with open(device_file, 'r') as f:\n content = f.readlines()\n return content",
"def read_ascii(file):\n wvlen, band, mag, emag, fmag, unit, beam, odate, ref = [],[],[],[],[],[],[],[],[]\n with open(file, 'r') as f_in:\n for line in f_in:\n try:\n # ensure line contains data:\n a = float(line[0])\n except ValueError:\n a = 'dummy'\n try:\n # ensure mag or flux entry is not '--'\n m = float(line.split(' ')[2])\n except ValueError:\n m = 'dummy'\n \n if isinstance(a, float) and isinstance(m, float):\n wvlen.append(float(line.strip().split(' ')[0])) # in metres\n band.append(line.strip().split(' ')[1])\n mag.append(float(line.strip().split(' ')[2]))\n emag.append(line.strip().split(' ')[3])\n fmag.append(line.strip().split(' ')[4])\n unit.append(line.strip().split(' ')[5])\n beam.append(line.strip().split(' ')[6])\n odate.append(line.strip().split(' ')[7])\n ref.append(line.strip().split(' ')[8])\n \n return wvlen, band, mag, emag, fmag, unit, beam, odate, ref",
"def read_imp_ASCII(filename):\n\n # create a temporary directory\n tmpDir = tempfile.mkdtemp()\n\n # unzip filename to tmpDir\n with zipfile.ZipFile(filename, 'r') as inZip:\n inZip.extractall(tmpDir)\n\n # set filenames\n dt_file = os.path.join(tmpDir, 'DateTime.txt')\n location_file = os.path.join(tmpDir, 'LatLon.txt')\n bx_file = os.path.join(tmpDir, 'BX.txt')\n by_file = os.path.join(tmpDir, 'BY.txt')\n bz_file = os.path.join(tmpDir, 'BZ.txt')\n obx_file = os.path.join(tmpDir, 'obsBX.txt')\n oby_file = os.path.join(tmpDir, 'obsBY.txt')\n obz_file = os.path.join(tmpDir, 'obsBZ.txt')\n station_file = os.path.join(tmpDir, 'Stations.txt')\n\n DT = _read_antti_datetime(dt_file)\n\n Lat, Lon, Rad, Label = _read_antti_location(location_file)\n\n BX = _read_antti_component(bx_file)\n BY = _read_antti_component(by_file)\n BZ = _read_antti_component(bz_file)\n\n obsX = _read_antti_component(obx_file)\n obsY = _read_antti_component(oby_file)\n obsZ = _read_antti_component(obz_file)\n\n obsLat, obsLon, obsRad, obsInc, obsID = _read_antti_stations(station_file)\n\n shutil.rmtree(tmpDir)\n\n return (DT, (Lat, Lon, Rad), BX, BY, BZ, Label,\n (obsLat, obsLon, obsRad), obsX, obsY, obsZ, obsInc, obsID)",
"def load_info():\n data = np.loadtxt(\"u_sol_meta.txt\", dtype=int)\n return data",
"def read_data(self):\n print 'Reading Data ...'\n fname = self.wpath + 'Data/' + self.city[2] + '-' + self.application + '.csv.bz2'\n self.dataset = loadtxt(fname, skiprows=1,\n dtype=[('lat', 'f8'), ('lng', 'f8'), ('time', 'i4'), ('user', 'S20')],\n usecols=(0, 1, 2, 3), delimiter=';', comments='#')",
"def read():\n # TODO",
"def read_temperature():\n temp = 0.0\n with open(\"daily_temp.txt\", \"r\") as f:\n temp = float(f.readline())\n\n return temp",
"def _read_arf(file):\n with fits.open(file) as hdul:\n data = hdul[1].data\n\n return data['energ_lo'], data['energ_hi'], data['specresp']",
"def read_data_nmt():\n data_dir = download_extract('fra-eng')\n with open(os.path.join(data_dir, 'fra.txt'), 'r') as f:\n return f.read()",
"def read_weather_data():\n # Check if UTC to gmt+1 conversion is being handled correctly\n weather = pd.read_csv('//datc//opschaler//weather_data//knmi_10_min_raw_data//output//df_combined_uncleaned.csv',\n delimiter='\\t', comment='#',\n parse_dates=['datetime'])\n weather = weather.set_index(['datetime'])\n return weather",
"def read_from_file(self, filename: str) -> None:",
"def _load(self):\n\n # number of non-data header details at top of data file\n header = 1\n\n # open file\n weatherData = []\n with open(self.wfile) as myfile:\n if (self.lines > 0):\n weatherData = [next(myfile) for x in xrange(self.lines + header)]\n else:\n weatherData = myfile.readlines()\n\n # get data stream from first line\n streamHeader = weatherData.pop(0).rstrip()\n if (streamHeader == 'FULL'):\n self.dataStream = 0\n elif (streamHeader == 'ADVANCED'):\n self.dataStream = 1\n elif (streamHeader == 'BASIC'):\n self.dataStream = 2\n else:\n print \"Error: unecognised data stream from file %s\" % (self.wfile)\n return -1\n\n # read data\n inputData = []\n for line in weatherData:\n entries = line.split()\n inputData.append(entries)\n\n # copy all into np array\n self.data = np.array(inputData)\n\n return 0",
"def read_weather(self, path='../datasets/McClear/s7_clr_data_17-19.csv'):\n s_clr = pd.read_csv(path)\n times = pd.date_range('03-16-2018 16:00', freq='15min', periods=96*2, tz=\"UTC\") #\n weather = pd.DataFrame(columns=['ghi', 'dni', 'dhi'], index=times)\n weather['dni'] = np.array(s_clr['BNI'])\n weather['ghi'] = np.array(s_clr['GHI'])\n weather['dhi'] = np.array(s_clr['DHI'])\n return weather",
"def readData(self):\n f = open(self.filename)\n self.time = []\n self.data = []\n for line in f:\n if line.find('BAD FLAG') > 0:\n self.badValue = float(line.split(':')[1].strip())\n if line.find('LONGITUDE') > 0:\n self.lon = line.split(':')[1].strip()\n if line.find('LATITUDE') > 0:\n self.lat = line.split(':')[1].strip()\n if len(line) > 6 and line[2] == '-' and line[6] == '-':\n parts = line.rsplit(None, 1)\n # data line\n timeStamp = datetime.datetime.strptime(parts[0], '%d-%b-%Y %H')\n t = timeArray.datetimeToEpochTime(timeStamp)\n self.time.append(t)\n val = float(parts[1])\n self.data.append(val)\n\n self.time = np.array(self.time)\n self.data = np.array(self.data)\n # remove bad values\n if self.badValue:\n goodIx = self.data != self.badValue\n self.time = self.time[goodIx]\n self.data = self.data[goodIx]\n self.fileIsRead = True",
"def read_data(self, loc):\n pass",
"def read(self, filename):\n pass",
"def read(self, filename):\n pass",
"def wac_to_dict(file_path: str) -> dict:\n\n weather_dict = {'longitude': '',\n 'latitude': '',\n 'altitude': '',\n 'time': [],\n 'temperature': [],\n 'relative_humidity': [],\n 'horizontal_global_solar_radiation': [],\n 'diffuse_horizontal_solar_radiation': [],\n 'air_pressure': [],\n 'vertical_rain': [],\n 'wind_direction': [],\n 'wind_speed': [],\n 'cloud_index': [],\n 'atmospheric_counter_horizontal_long_wave_radiation': [],\n 'atmospheric_horizontal_long_wave_radiation': [],\n 'ground_temperature': [],\n 'ground_reflectance': []\n }\n\n file_obj = open(file_path, 'r')\n file_lines = file_obj.readlines()\n file_obj.close()\n\n weather_dict['longitude'] = float(file_lines[4].split('\\t')[0].strip())\n weather_dict['latitude'] = float(file_lines[5].split('\\t')[0].strip())\n weather_dict['altitude'] = float(file_lines[6].split('\\t')[0].strip())\n\n for line in file_lines[12:]:\n splitted_line = line.split('\\t')\n weather_dict['time'].append(datetime.datetime.strptime(splitted_line[0].strip(), '%Y-%m-%d %H:%M'))\n weather_dict['temperature'].append(float(splitted_line[1].strip()))\n weather_dict['relative_humidity'].append(float(splitted_line[2].strip()))\n weather_dict['horizontal_global_solar_radiation'].append(float(splitted_line[3].strip()))\n weather_dict['diffuse_horizontal_solar_radiation'].append(float(splitted_line[4].strip()))\n weather_dict['air_pressure'].append(float(splitted_line[5].strip()))\n weather_dict['vertical_rain'].append(float(splitted_line[6].strip()))\n weather_dict['wind_direction'].append(float(splitted_line[7].strip()))\n weather_dict['wind_speed'].append(float(splitted_line[8].strip()))\n weather_dict['cloud_index'].append(float(splitted_line[9].strip()))\n weather_dict['atmospheric_counter_horizontal_long_wave_radiation'].append(float(splitted_line[10].strip()))\n weather_dict['atmospheric_horizontal_long_wave_radiation'].append(float(splitted_line[11].strip()))\n weather_dict['ground_temperature'].append(float(splitted_line[12].strip()))\n weather_dict['ground_reflectance'].append(float(splitted_line[13].strip()))\n\n return weather_dict",
"def read_file(log_file):\n\t\tfile = open(log_file, 'r')\n\t\tresult = []\n\t\twhile 1:\n\t\t\tcontent = file.readline()\n\t\t\tif not content:\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tdata = content.split(\"\\003\")\n\t\t\t\tif len(data) == 13:\n\t\t\t\t\ttrack = {\n\t\t\t\t\t\t\t 'device_no' : long(data[0][3:]), 'sim' : data[1], 'type':int(data[2]), 'gps_time' : MongoTrack.time_trans(data[3]),\n\t\t\t\t\t\t\t 'valid' : data[4], 'loc':{'long' : float(data[5]), 'lat' : float(data[6]) }, 'altitude' : float(data[7]),\n\t\t\t\t\t\t\t 'speed' : float(data[8]), 'course' : float(data[9]), 'km' : float(data[10]), 'para' : float(data[11]),\n\t\t\t\t\t\t\t 'rtime' : MongoTrack.time_trans(data[12].strip())\n\t\t\t\t\t\t\t}\n\t\t\t\t\tresult.append(track)\n\t\tfile.close()\n\t\treturn result",
"def read_file(path_to_file):\n 8",
"def get_weather_data(lat, lon):\n\n # Get weather\n filedata = pvtoolslib.get_s3_filename_df()\n filedata_closest = nsrdbtools.find_closest_datafiles(float(lat), float(lon),\n filedata)\n\n filename = filedata_closest['filename'].iloc[0]\n\n if filename == '124250_37.93_-122.3.npz':\n weather, info = nsrdbtools.get_local_weather_data(filename)\n else:\n weather, info = pvtoolslib.get_s3_weather_data(filename)\n\n return weather, info",
"def read(path):",
"def __read():\n f = file(constellation_data_path)\n constellations = []\n for line in f:\n tokens = line.split()\n if not tokens: continue\n hip_numbers = [int(t) for t in tokens[2:]]\n element = tokens[0], zip(hip_numbers[::2], hip_numbers[1::2])\n constellations.append(element)\n f.close()\n return constellations",
"def readtxt(obslog):\n\n logger = log.getLogger('obslog.readtxt')\n\n if not os.path.exists(obslog):\n logger.error('Cannot access %s', obslog)\n raise SystemExit\n\n logger.info('Reading %s', obslog)\n\n with open(obslog) as f: # Since we will have to go through the data twice, read the whole file at once.\n data = f.readlines()\n\n header = ['Observation ID', 'Data Labels', 'File Numbers', 'Dataset UT', 'Target Name', 'Filters', 'Slit',\n 'Grating/Wavelength', 'Camera/Prism', 'ExpTime/LNR/Coadds', 'ACQ']\n\n pattern = dict() # Enforce formatting rules to avoid parsing comments as data:\n pattern['Observation ID'] = re.compile(r'^G[NS]-[0-9]{4}[AB]-([CQ]|DD|FT|LP|SV)-[0-9]{0,3}-[0-9]+$')\n pattern['Data Labels'] = re.compile(r'[0-9]+-*[0-9]*') # 1, 2-3, 45-67, 890-1234\n pattern['File Numbers'] = re.compile(r'[0-9]+-*[0-9]*') # 1, 2-3, 45-67, 890-1234\n pattern['Dataset UT'] = re.compile(r'^[0-9]{2}:[0-9]{2}:[0-9]{2}$') # 09:58:15\n pattern['Target Name'] = re.compile(r'[a-zA-Z0-9_-]+') # Match any string\n pattern['Filters'] = re.compile(r'[A-Z0-9\\-]+') # H, XD, H2, X, J, H\n pattern['Slit'] = re.compile(r'[a-zA-Z0-9]+') # 0.675, ACQ, LgPin\n pattern['Grating/Wavelength'] = re.compile(r'[0-9]{2,3}/[0-9]\\.[0-9]{2}') # 32/1.65, 111/1.68\n pattern['Camera/Prism'] = re.compile(r'[A-Z]{2}/[A-Z]{3}') # LB/MIR, SB/SXD\n pattern['ExpTime/LNR/Coadds'] = re.compile(r'[0-9]+\\.[0-9]/[0-9]+/[0-9]+') # 0.2/1/25, 300.0/32/1\n pattern['ACQ'] = re.compile(r'^Y*$') # Y or ''\n\n indx = {}\n for line in data:\n if 'Electronic Observing Log' in line:\n date = line.split()[-1][7:]\n logger.debug('Log date: %s', date)\n if line[0:14] == 'Observation ID': # This defines the start of the header row\n for h in header:\n indx[h] = line.find(h) # Find where each column starts\n break # No need to go farther\n\n width = {} # Find the width of each row\n for i in range(len(header) - 1): # This requires that 'header' be an ordered array (not a dictionary)\n width[header[i]] = indx[header[i + 1]] - indx[header[i]]\n width[header[i+1]] = 1 # The ACQ field is either 'Y' or blank\n\n val = {}\n match = {}\n info = {}\n for line in data:\n logger.debug('\\n%s', line)\n files = []\n for h in header:\n val[h] = line[indx[h]: indx[h] + width[h]].strip()\n match[h] = re.match(pattern[h], val[h])\n logger.debug('%s: \"%s\" %s' % (h, val[h], match[h]))\n\n # Maybe throw a warning if only match 1 fails; indicating a likely bad pattern specification?\n\n if None in match.values():\n logger.debug('Failed to match all patterns -> This is a comment')\n continue\n\n if '-' in val['File Numbers']:\n start, stop = val['File Numbers'].split('-')\n for i in range(int(start), int(stop)+1):\n files.append(i)\n else:\n files.append(int(val['File Numbers']))\n\n for filenum in files:\n f = 'N%sS%04d.fits' % (date, filenum)\n logger.debug('File: %s', f)\n info[f] = {}\n for h in [header[0]] + header[3:]: # Skip 'Data Labels' and \"File Numbers'\n info[f][h] = val[h]\n\n logger.debug('info: %s', info)\n return info",
"def readfile(self, path, filename):\n # The DataStudio software uses ISO-8859-1 encoding (especially for the degree sign in temperature files)\n file = open(path + filename, encoding=\"iso-8859-1\")\n rowlist = file.readlines()\n\n title = rowlist[0].strip(\"\\n\")\n labels = rowlist[1].strip(\"\\n\").split(sep=\"\\t\")\n\n data = np.zeros((len(rowlist)-2, 2))\n\n for i in range(2, len(rowlist)):\n columns = rowlist[i].split(sep=\"\\t\")\n data[i-2, 0] = float(columns[0].replace(\",\", \".\"))\n data[i-2, 1] = float(columns[1].replace(\",\", \".\"))\n\n return data, title, labels",
"def readTempSensor(sensorName) :\n f = open(sensorName, 'r')\n lines = f.readlines()\n f.close()\n return lines",
"def open_and_read_file():\n file_path = sys.argv[1]\n #print file_path\n file_data = open(file_path, 'r')\n gettysburg = file_data.read()\n\n return gettysburg",
"def read_raw_data(self):\n dat_file = os.path.join(DATA_DIR, self.patient_number + '.txt')\n if not os.path.exists(dat_file):\n raise AssertionError(\"{} doesn't exist.\".format(dat_file))\n time = []\n voltage1 = []\n voltage2 = []\n with open(dat_file, 'r') as fd:\n for line in fd:\n line = line.split()\n time.append(line[0])\n voltage1.append(float(line[1]))\n voltage2.append(float(line[2]))\n\n tags_file = os.path.join(DATA_DIR, self.patient_number + '_tag.txt')\n if not os.path.exists(dat_file):\n raise AssertionError(\"{} doesn't exist.\".format(tags_file))\n tags_time = []\n tags = []\n r_peaks_indexes = []\n with open(tags_file, 'r') as fd:\n for line in fd:\n line = line.split()\n tags_time.append(line[0])\n tags.append(line[2])\n r_peaks_indexes.append(int(line[1]))\n return time, voltage1, voltage2, tags_time, tags, r_peaks_indexes",
"def read_from_ascii(self, filename):\n self.ascii_filename = filename\n # read file content into a string\n f=open(filename,'r')\n file_str=f.read()\n f.close()\n # make dictionary with file content\n reg_exp_data_groups=re.compile(r'^#>>(\\w+):.*\\n',re.M)\n file_dict=self.make_data_dict_from_str(reg_exp_data_groups,file_str)\n # read arrays ------------------------------\n self.x=np.loadtxt(StringIO.StringIO(file_dict['x']))\n self.p=np.loadtxt(StringIO.StringIO(file_dict['p']))\n self.fmci_XP=np.loadtxt(StringIO.StringIO(file_dict['XP']))\n # regular expression for extracting parameter=value\n reg_exp_param_val=re.compile(r'\\n*(\\w+)=',re.M)\n # read params_physics -----------------------\n params_physics_dict=self.make_data_dict_from_str(reg_exp_param_val,file_dict['params_physics'])\n self.name=self.__get_particle_name(params_physics_dict['particle'])\n self.time=float(params_physics_dict['time'])\n # read params_TDC ---------------------------\n params_TDC_dict=self.make_data_dict_from_str(reg_exp_param_val,file_dict['params_TDC'])\n self.calc_id=params_TDC_dict['calc_id']\n self.i_ts=int(params_TDC_dict['i_ts'])",
"def _read_antti_stations(station_file):\n if station_file.split('.')[-1] == 'gz':\n ff = gzip.open(station_file, 'r')\n else:\n ff = open(station_file, 'r')\n\n sIO = io.BytesIO(ff.read().encode())\n ff.close()\n\n # extract and convert single line with observatory IDs\n obsList = []\n llList = []\n incList = []\n nObs = 0\n nLL = 0\n nInc = 0\n for line in sIO:\n if re.search(b\"^%\", line):\n # skip comments\n continue\n\n if re.search(br\"^\\s*$\", line):\n # skip blank lines\n continue\n\n # first line of consequence should be a list of quoted strings holding\n # observatory IDs for observatories considered in this solution; convert\n # to a list of strings\n if len(obsList) == 0:\n obsList = re.sub(b'\\'', b'', line).split()\n nObs = len(obsList)\n continue\n\n # assume next nobs lines read are observatory locations\n if nLL < nObs:\n llList.append([float(elem) for elem in line.decode().split()])\n nLL = nLL+1\n continue\n\n # assume next nobs lines read are observatory inclusion (boolean) lists\n if nInc < nObs:\n #incList.append(line.strip())\n incList.append([int(elem) for elem in line.decode().strip()])\n nInc = nInc+1\n continue\n\n # close sIO\n sIO.close()\n\n if len(llList) > 2:\n obsLat, obsLon, obsRad = list(zip(*llList))\n elif len(llList) == 2:\n obsLat, obsLon = list(zip(*llList))\n obsRad = np.ones(obsLat.shape)\n else:\n raise Exception('Requires (at least) latitude and longitude')\n\n obsInc = list(zip(*incList))\n\n return (np.array(obsLat), np.array(obsLon), np.array(obsRad),\n np.array(obsInc), np.array(obsList))"
] | [
"0.6585251",
"0.65576595",
"0.6403347",
"0.60498667",
"0.59925395",
"0.59534574",
"0.59316444",
"0.58384484",
"0.5829025",
"0.5764885",
"0.57411146",
"0.571867",
"0.57075197",
"0.56858313",
"0.5685079",
"0.56707263",
"0.56707263",
"0.56704044",
"0.5669247",
"0.5663918",
"0.56601894",
"0.56368417",
"0.56338936",
"0.56320095",
"0.5629638",
"0.56266713",
"0.56266093",
"0.5626478",
"0.56122434",
"0.5606969"
] | 0.67925274 | 0 |
Get features (for regression) based on this bikedata's weather data | def get_weather_features(self):
if self.weather_features is None:
raise Exception("Weather features not made yet.")
### self.make_weather_features()
else:
return self.weather_features | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def feature_extraction(self) -> None:\n # Add the hour, minute, and x column to the data\n self.df_poly[\"hour\"] = self.df_poly[\"time\"].apply(lambda y: y.hour)\n self.df_poly[\"minute\"] = self.df_poly[\"time\"].apply(lambda y: y.minute)\n self.df_poly[\"x\"] = self.df_poly[\"hour\"] * 60 + self.df_poly[\"minute\"]\n\n # Empty list to hold the feature names\n poly_feature_names = []\n\n # Add the poly columns to the df_poly\n for degree in [0, 1, 2, 3, 4, 5]:\n self.df_poly = poly(self.df_poly, degree)\n poly_feature_names.append(\"poly_\" + str(degree))\n\n # filterout + - inf, nan\n self.df_poly = self.df_poly[\n ~self.df_poly.isin([np.nan, np.inf, -np.inf]).any(1)\n ]\n\n # Save the poly feature name\n self.poly_feature_names = poly_feature_names\n feature_names = []\n\n #########################################################################################\n train_index_poly = self.df_poly[\n ~self.df_poly.isin([np.nan, np.inf, -np.inf]).any(1)\n ].index\n X_train_poly, y_train_poly = (\n self.df_poly[self.poly_feature_names].loc[train_index_poly],\n self.df_poly[\"y\"].loc[train_index_poly],\n )\n\n # Build the Polynomial Regression Model\n lin_reg = LinearRegression()\n lin_reg.fit(X_train_poly, y_train_poly)\n self.poly_model = lin_reg\n y_train_season = lin_reg.predict(X_train_poly)\n self.y_train_season_obj = y_train_season\n #########################################################################################\n\n for n in [10, 15, 20, 25, 30]:\n self.df = MOM(self.df, n)\n feature_names.append(\"MOM_\" + str(n))\n for n in [10, 15, 20, 25, 30]:\n self.df = ROC(self.df, n)\n feature_names.append(\"ROC_\" + str(n))\n for n in [1, 2, 3, 4, 5]:\n self.df = LAG(self.df, n)\n feature_names.append(\"LAG_\" + str(n))\n for n in [10, 20, 30]:\n self.df = MA(self.df, n)\n feature_names.append(\"MA_\" + str(n))\n\n self.df = self.df[\n ~self.df.isin([np.nan, np.inf, -np.inf]).any(1)\n ] # filterout + - inf, nan\n self.feature_names = feature_names",
"def extractFeatures(self, datum):\n abstract",
"def generate_features(self):\n bars = self.portfolio.data_handler.bars.ix[:, -15:, :]\n prices = bars[\"adj_price_close\"]\n weights = np.array([1.0, -1.])\n feats = pd.DataFrame(index=bars.minor_axis)\n ts = prices.dot(weights)\n feats[\"z-score\"] = (ts.ix[-1] - ts.mean()) / ts.std()\n return feats",
"def get_all_station_feature(city):\n poi_frequency = np.load(exp_data_path + os.sep + 'poi_frequency' + os.sep + 'poi_frequency_{}.npy'.format(city),\n allow_pickle=True) # .tolist()\n poi_num = np.load(exp_data_path + os.sep + 'poi' + os.sep + 'poi_{}.npy'.format(city), allow_pickle=True)\n poi_entropy = np.load(exp_data_path + os.sep + 'poi_entropy' + os.sep + 'poi_entropy_{}.npy'.format(city),\n allow_pickle=True)\n road = np.load(exp_data_path + os.sep + 'roadnet' + os.sep + 'roadnet_{}.npy'.format(city), allow_pickle=True)\n transportation = np.load(exp_data_path + os.sep + 'transportation' + os.sep + 'transportation_{}.npy'.format(city),\n allow_pickle=True)\n commerce = np.load(exp_data_path + os.sep + 'commerce' + os.sep + 'commerce_{}.npy'.format(city), allow_pickle=True)\n\n file_name = exp_data_path + os.sep + 'station' + os.sep + 'all_demand_{}.npy'.format(city)\n demand_data = np.load(file_name, allow_pickle=True)\n num = demand_data[:, 0, -2, np.newaxis] # todo check meaning here, get quick and slow feature\n\n raw_data = np.concatenate((num, poi_frequency, poi_num, poi_entropy, road, transportation, commerce), axis=1)\n csv_data = pd.DataFrame(raw_data, columns=GENERAL_HEADER)\n\n file_path = exp_data_path + os.sep + 'static' + os.sep + 'static_feature_{}.csv'.format(city)\n if os.path.exists(file_path):\n os.remove(file_path)\n csv_data.to_csv(file_path)\n pass",
"def load_data():\n data = pd.read_csv('datasets/housing.csv')\n prices = data['MEDV']\n features = data.drop(['MEDV'], axis=1) # remove it from data as we need to predict it\n print(data.head()) # prints top columns 5 for ex\n return [features, prices]",
"def _extract_features(self, row):\n ncep_data = self.ncep_data\n ncep_sfc_data = self.ncep_sfc_data\n date = row['date']\n features = dict(row)\n #reduce the dimensions of ncep_data(xarray dataset) by fixing coordinates(lon,lat)\n #and then convert it to dataframe\n ncep_data = ncep_data[date.year] \\\n .sel(lon=row['longitude'], lat=row['latitude'], method='nearest') \\\n .to_dask_dataframe() \\\n .compute() \\\n .set_index(['level','time'])\n #reduce the dimensions of ncep_sfc_data(xarray dataset) by fixing coordinates(lon,lat)\n #and then convert it to dataframe\n ncep_sfc_data = ncep_sfc_data[date.year] \\\n .sel(lon=row['longitude'], lat=row['latitude'], method='nearest') \\\n .to_dask_dataframe() \\\n .compute() \\\n .set_index(['time'])\n\n for level in self.levels:\n #features at different pressure level\n point = ncep_data.loc[level]\n p1w = point.rolling(7).mean() # 1 Week mean\n p2w = point.rolling(14).mean() # 2 Week mean\n p3w = point.rolling(21).mean() # 3 Week mean\n # \n v0w = point.loc[date]\n v1w = p1w.loc[date]\n v2w = p2w.loc[date]\n v3w = p3w.loc[date]\n #\n for data_var in self.ncep_data_vars:\n features[\"{0}_0w_lvl_{1}\".format(data_var,level)] = v0w[data_var]\n features[\"{0}_1w_lvl_{1}\".format(data_var,level)] = v1w[data_var]\n features[\"{0}_2w_lvl_{1}\".format(data_var,level)] = v2w[data_var]\n features[\"{0}_3w_lvl_{1}\".format(data_var,level)] = v3w[data_var]\n #features at surface level\n point = ncep_sfc_data\n p1w = point.rolling(7).mean() # 1 Week mean\n p2w = point.rolling(14).mean() # 2 Week mean\n p3w = point.rolling(21).mean() # 3 Week mean\n # \n v0w = point.loc[date]\n v1w = p1w.loc[date]\n v2w = p2w.loc[date]\n v3w = p3w.loc[date]\n #\n for data_var in self.ncep_sfc_data_vars:\n features[\"{0}_0w\".format(data_var)] = v0w[data_var]\n features[\"{0}_1w\".format(data_var)] = v1w[data_var]\n features[\"{0}_2w\".format(data_var)] = v2w[data_var]\n features[\"{0}_3w\".format(data_var)] = v3w[data_var] \n\n return features",
"def get_features(self):\n return []",
"def get_all_features(self) :\n raise NotImplementedError",
"def forecast_weather(self):\n pass",
"def get_weather_data(lat='40.761440',lng='-73.981806'):\r\n key ='********************************'\r\n x = pd.DataFrame()\r\n unix_now = int((dt.datetime.now()- dt.datetime(1970,1,1)).total_seconds())\r\n for time in range(unix_now-86400, unix_now+604800, 86400):\r\n rsp = rq.get('https://api.darksky.net/forecast/{}/{},{},{}'.format(key, lat, lng, time))\r\n rsp_json = json.loads(rsp.text)\r\n row = json_normalize(rsp_json[\"daily\"]['data'])\r\n x = x.append(row)\r\n \r\n x = x[['icon','apparentTemperatureHigh','apparentTemperatureLow','cloudCover','humidity','precipProbability',\r\n 'pressure','visibility','windBearing','windGust','windSpeed']].reset_index(drop=True)\r\n return x",
"def make_weather_features(self, timeline_dt_list):\n\n print \"Making weather features...\"\n\n N_FEATURES = 2\n n_examples = len(timeline_dt_list)\n XX = numpy.zeros((n_examples, N_FEATURES))\n indices = numpy.zeros(n_examples,dtype='int')\n ind_weatherday = 0\n\n # Loop over all times in the timeline\n for ii, time in enumerate(timeline_dt_list):\n # Find where this time in the timeline matches the date\n # of some weather data.\n jj = ind_weatherday\n while time.date() != self.datetimes[jj].date():\n # Make sure jj does not get too large to be an index to\n # the list.\n # Note this is probably a bad idea to do it this way.\n if jj == len(self.datetimes)-1:\n break\n jj += 1\n## print jj\n\n ind_weatherday = jj\n indices[ii] = ind_weatherday\n\n# XX[ii, 0] = self.table['PrecipIn'][ind_weatherday]\n# XX[ii, 1] = self.table['Mean TemperatureF'][ind_weatherday]\n## XX[ii, 2] = self.table['MeanDew PointF'][ind_weatherday]\n\n XX[:,0] = self.table['PrecipIn'][indices]\n XX[:,1] = self.table['Mean TemperatureF'][indices]\n self.weather_features = XX\n return XX",
"def create_features(energy_data, label=None):\n energy_data['date'] = energy_data.index\n energy_data['hour'] = energy_data['Datetime'].dt.hour\n energy_data['dayofweek'] = energy_data['Datetime'].dt.dayofweek\n energy_data['month'] = energy_data['Datetime'].dt.month\n energy_data['quarter'] = energy_data['Datetime'].dt.quarter\n energy_data['year'] = energy_data['Datetime'].dt.year\n energy_data['dayofyear'] = energy_data['Datetime'].dt.dayofyear\n energy_data['dayofmonth'] = energy_data['Datetime'].dt.day\n energy_data['weekofyear'] = energy_data['Datetime'].dt.weekofyear\n energy_data['pjme_2_hrs_lag'] = energy_data['PJME_MW'].shift(2)\n energy_data['pjme_4_hrs_lag'] = energy_data['PJME_MW'].shift(4)\n energy_data['pjme_8_hrs_lag'] = energy_data['PJME_MW'].shift(8)\n energy_data['pjme_12_hrs_lag'] = energy_data['PJME_MW'].shift(12)\n energy_data['pjme_24_hrs_lag'] = energy_data['PJME_MW'].shift(24)\n energy_data['pjme_4_hrs_mean'] = energy_data['PJME_MW'].rolling(window=4).mean()\n energy_data['pjme_8_hrs_mean'] = energy_data['PJME_MW'].rolling(window=8).mean()\n energy_data['pjme_12_hrs_mean'] = energy_data['PJME_MW'].rolling(window=12).mean()\n energy_data['pjme_24_hrs_mean'] = energy_data['PJME_MW'].rolling(window=24).mean()\n energy_data['pjme_4_hrs_std'] = energy_data['PJME_MW'].rolling(window=4).std()\n energy_data['pjme_8_hrs_std'] = energy_data['PJME_MW'].rolling(window=8).std()\n energy_data['pjme_12_hrs_std'] = energy_data['PJME_MW'].rolling(window=12).std()\n energy_data['pjme_24_hrs_std'] = energy_data['PJME_MW'].rolling(window=24).std()\n energy_data['pjme_4_hrs_max'] = energy_data['PJME_MW'].rolling(window=4).max()\n energy_data['pjme_8_hrs_max'] = energy_data['PJME_MW'].rolling(window=8).max()\n energy_data['pjme_12_hrs_max'] = energy_data['PJME_MW'].rolling(window=12).max()\n energy_data['pjme_24_hrs_max'] = energy_data['PJME_MW'].rolling(window=24).max()\n energy_data['pjme_4_hrs_min'] = energy_data['PJME_MW'].rolling(window=4).min()\n energy_data['pjme_8_hrs_min'] = energy_data['PJME_MW'].rolling(window=8).min()\n energy_data['pjme_12_hrs_min'] = energy_data['PJME_MW'].rolling(window=12).min()\n energy_data['pjme_24_hrs_min'] = energy_data['PJME_MW'].rolling(window=24).min()\n\n features = energy_data[['hour', 'dayofweek', 'quarter', 'month', 'year',\n 'dayofyear', 'dayofmonth', 'weekofyear', 'pjme_2_hrs_lag', 'pjme_4_hrs_lag',\n 'pjme_8_hrs_lag', 'pjme_12_hrs_lag', 'pjme_24_hrs_lag', 'pjme_4_hrs_mean',\n \"pjme_8_hrs_mean\", \"pjme_12_hrs_mean\", \"pjme_24_hrs_mean\", \"pjme_4_hrs_std\",\n \"pjme_8_hrs_std\", \"pjme_12_hrs_std\", \"pjme_24_hrs_std\",\n \"pjme_4_hrs_max\", \"pjme_8_hrs_max\", \"pjme_12_hrs_max\", \"pjme_24_hrs_max\",\n \"pjme_4_hrs_min\", \"pjme_8_hrs_min\", \"pjme_12_hrs_min\", \"pjme_24_hrs_min\"]]\n if label:\n label = energy_data[label]\n return features, label\n return features",
"def select(self, features):\n if 'Weather Type' not in features:\n features.append('Weather Type')\n self.data = self.data[:,[self._getFIdx(f) for f in features]]\n self.featureNames = self.featureNames[[self._getFIdx(f) for f in features]]\n return 0",
"def extract_features(time_series, window):\n if not tsd_common.is_standard_time_series(time_series, window):\n # add your report of this error here...\n\n return []\n\n # spilt time_series\n split_time_series = tsd_common.split_time_series(time_series, window)\n # nomalize time_series\n normalized_split_time_series = tsd_common.normalize_time_series(split_time_series)\n max_min_normalized_time_series = tsd_common.normalize_time_series_by_max_min(split_time_series)\n s_features = statistical_features.get_statistical_features(normalized_split_time_series[4])\n f_features = fitting_features.get_fitting_features(normalized_split_time_series)\n c_features = classification_features.get_classification_features(max_min_normalized_time_series)\n # combine features with types\n features = s_features + f_features + c_features\n return features",
"def get_dataset_features(text):\n return model.extract(text)",
"def get_model_feature(\n model,\n batch_x\n):\n features = model.get_feature(batch_x, training=False)\n return features",
"def extract_features(self, inputs):\n pass",
"def get_features(self):\n if self.strokes is False:\n print('Isolating strokes')\n self.isolate_strokes()\n # List of features to use (sm1 omitted because always nan)\n feature_names = ('zrc', 'centroid',\n 'cm0', 'cm1', 'cm2', 'cm3', 'cm4',\n 'sm0', 'sm2')\n features_list = []\n for istroke in self.strokes:\n if not self.isGoodFrame(istroke):\n continue\n ifeature_dic = self.extract_features_from_frame(istroke)\n ifeature_list = []\n for ifeature in feature_names:\n ifeature_list.append(ifeature_dic[ifeature])\n features_list.append(ifeature_list)\n return {'feature_names': feature_names,\n 'feature_table': np.array(features_list)}",
"def _get_feature_map(self, time_period=False, volume_filter=False):\n if not self.stocks:\n return False\n\n # Load the data from the stock dictionary\n features = []\n symbol_names = []\n historical_price_info = []\n\n if not time_period:\n today = datetime.datetime.now()\n previous = today - datetime.timedelta(days=60)\n time_period = [previous, today]\n\n for stock in self.stocks:\n price_data = self.db.get_stock_prices(\n stock, time_period=time_period, dataframe=True)\n\n if type(price_data) == bool and not price_data:\n continue\n if len(price_data) < 5:\n continue\n\n volatility = self.stock_engine.volatility(\n price_data, dataframe=True)\n\n if volatility[0] < self.VOLATILITY_FILTER:\n continue\n\n stock_feature_dict = self.stock_engine.get_technical_indicators(\n price_data)\n\n if not stock_feature_dict:\n continue\n\n feature_list = []\n for key in list(sorted(stock_feature_dict.keys())):\n feature_list.extend(stock_feature_dict[key])\n\n if np.isnan(feature_list).any() == True:\n\n continue\n\n avg_volume = np.mean(list(price_data['volume'])[-30:])\n\n if volume_filter and avg_volume < volume_filter:\n continue\n\n features.append(feature_list)\n symbol_names.append(stock)\n historical_price_info.append(price_data)\n features, historical, symbols = self._preproc_data(\n features, historical_price_info, symbol_names)\n\n return features, historical, symbols",
"def get_features(self):\n \n # Get the model from cache or disk based on the model_name in request\n self._get_model_by_name()\n \n # Prepare the output\n self.response = self.model.features_df\n self.response[\"sort_order\"] = pd.Series([i+1 for i in range(len(self.response.index))], index=self.response.index)\n self.response = self.response[[\"model_name\", \"sort_order\", \"name\", \"variable_type\", \"data_type\",\\\n \"feature_strategy\", \"strategy_args\"]]\n \n # Send the reponse table description to Qlik\n self._send_table_description(\"features\")\n \n # Debug information is printed to the terminal and logs if the paramater debug = true\n if self.model.debug:\n self._print_log(4)\n \n # Finally send the response\n return self.response",
"def extract_features(self):\n self.extract_features_static()\n self.extract_features_dynamic()",
"def get_features(self):\n return self._features",
"def _extract_data(self) -> np.ndarray:\n \n mats = Material.objects.all()\n \n mat_arrays = []\n for mat in mats: # django queryset -> python list\n mat_features = []\n \n # Add data\n # Some data are missing here.\n #TODO: Delete those if sentences after cleaning the data.\n mat_features.append(mat.model_surface_temperature if mat.model_surface_temperature!=None else 0)\n mat_features.append(mat.melt_temperature if mat.melt_temperature!=None else 0)\n mat_features.append(mat.mold_temperature_range_min if mat.mold_temperature_range_min!=None else 0)\n mat_features.append(mat.mold_temperature_range_max if mat.mold_temperature_range_max!=None else 0)\n mat_features.append(mat.melt_temperature_range_min if mat.melt_temperature_range_min!=None else 0)\n mat_features.append(mat.melt_temperature_range_max if mat.melt_temperature_range_max!=None else 0)\n mat_features.append(mat.absolute_maximum_melt_temperature if mat.absolute_maximum_melt_temperature!=None else 0)\n mat_features.append(mat.ejection_temperature if mat.ejection_temperature!=None else 0)\n mat_features.append(mat.maximum_shear_stress if mat.maximum_shear_stress!=None else 0)\n mat_features.append(mat.maximum_shear_rate if mat.maximum_shear_rate!=None else 0)\n mat_features.append(mat.melt_density if mat.melt_density!=None else 0)\n mat_features.append(mat.solid_density if mat.solid_density!=None else 0)\n mat_features.append(mat.pvt_b5 if mat.pvt_b5!=None else 0)\n mat_features.append(mat.pvt_b6 if mat.pvt_b6!=None else 0)\n mat_features.append(mat.pvt_b1m if mat.pvt_b1m!=None else 0)\n mat_features.append(mat.pvt_b2m if mat.pvt_b2m!=None else 0)\n mat_features.append(mat.pvt_b2m if mat.pvt_b2m!=None else 0)\n mat_features.append(mat.pvt_b4m if mat.pvt_b4m!=None else 0)\n mat_features.append(mat.pvt_b1s if mat.pvt_b1s!=None else 0)\n mat_features.append(mat.pvt_b2s if mat.pvt_b2s!=None else 0)\n mat_features.append(mat.pvt_b3s if mat.pvt_b3s!=None else 0)\n mat_features.append(mat.pvt_b4s if mat.pvt_b4s!=None else 0)\n mat_features.append(mat.pvt_b7 if mat.pvt_b7!=None else 0)\n mat_features.append(mat.pvt_b8 if mat.pvt_b8!=None else 0)\n mat_features.append(mat.pvt_b9 if mat.pvt_b9!=None else 0)\n mat_features.append(mat.elastic_modulus_e1 if mat.elastic_modulus_e1!=None else 0)\n mat_features.append(mat.elastic_modulus_e2 if mat.elastic_modulus_e2!=None else 0)\n mat_features.append(mat.poisson_ratio_v12 if mat.poisson_ratio_v12!=None else 0)\n mat_features.append(mat.poisson_ratio_v23 if mat.poisson_ratio_v23!=None else 0)\n mat_features.append(mat.shear_modulus_g12 if mat.shear_modulus_g12!=None else 0.)\n mat_features.append(mat.thermal_expansion_data_transverse_isotropic_coefficient_alpha1 if mat.thermal_expansion_data_transverse_isotropic_coefficient_alpha1!=None else 0.)\n mat_features.append(mat.thermal_expansion_data_transverse_isotropic_coefficient_alpha2 if mat.thermal_expansion_data_transverse_isotropic_coefficient_alpha2!=None else 0.)\n mat_features.append(mat.seven_params_n if mat.seven_params_n!=None else 0.)\n mat_features.append(mat.seven_params_Tau if mat.seven_params_Tau!=None else 0.)\n mat_features.append(mat.seven_params_D1 if mat.seven_params_D1!=None else 0.)\n mat_features.append(mat.seven_params_D2 if mat.seven_params_D2!=None else 0.)\n mat_features.append(mat.seven_params_D3 if mat.seven_params_D3!=None else 0.)\n mat_features.append(mat.seven_params_A1 if mat.seven_params_A1!=None else 0.)\n mat_features.append(mat.seven_params_A2 if mat.seven_params_A2!=None else 0.)\n mat_features.append(mat.c1 if mat.c1!=None else 0.)\n mat_features.append(mat.c2 if mat.c2!=None else 0.)\n mat_features.append(mat.conversion_temperature if mat.conversion_temperature!=None else 0.)\n mat_features.append(mat.MFR_temperature if mat.MFR_temperature!=None else 0.)\n mat_features.append(mat.MFR_loading if mat.MFR_loading!=None else 0.)\n mat_features.append(mat.measured_MFR if mat.measured_MFR!=None else 0.)\n \n mat_arrays.append(mat_features)\n \n # Get numpy arrays.\n mat_arrays = np.array(mat_arrays, dtype=np.float64)\n \n return mat_arrays",
"def parse_weather(data: DataFrame) -> List[WeatherData]:\n parsed_results = []\n\n for index, row in data.iterrows():\n date = sqlite3.Date(index.year, index.month, index.day)\n item = WeatherData(\n date=date,\n average_temp=celsius_to_fahr(row.get('tavg', 0)),\n precipitation=row.get('prcp', 0),\n )\n parsed_results.append(item)\n return parsed_results",
"def weather_data(cities, openweathermap_api_key=openweathermap_api_key):\n L = []\n for c in cities:\n res = requests.get(f'http://api.openweathermap.org/data/2.5/weather?q={c}&appid={openweathermap_api_key}&units=imperial')\n L.append(res.json())\n\n df = pd.DataFrame(L)\n df['lon'] = df['coord'].map(op.itemgetter('lon'))\n df['lat'] = df['coord'].map(op.itemgetter('lat'))\n df['Temprature'] = df['main'].map(op.itemgetter('temp'))\n df['Humidity'] = df['main'].map(op.itemgetter('humidity'))\n df['Wind Speed'] = df['wind'].map(op.itemgetter('speed'))\n return df[['name','lon', 'lat','Temprature','Humidity','Wind Speed']]",
"def features(self) -> List[np.ndarray]:\n if len(self.data) == 0 or self.data[0].features is None:\n return None\n\n return [d.features for d in self.data]",
"def get_features(data, col_list, y_name):\n \n # keep track of numpy values\n feature_matrix = data[col_list + [y_name]].dropna().values\n return feature_matrix[:, :-1], feature_matrix[:, -1]",
"def get_features(self, request, **kwargs):\n raise NotImplementedError()",
"def get_features(self, problem_name=None, user_name=None):\n with self.__orm.session_scope() as session:\n results = self._get_features(session, problem_name, user_name).all()\n feature_dicts = []\n for feature, user_name in results:\n d = {\n \"user\" : user_name,\n \"description\" : feature.description,\n \"md5\" : feature.md5,\n \"created_at\" : feature.created_at,\n }\n feature_metrics = session.query(Metric.name,\n Metric.value).filter(Metric.feature_id ==\n feature.id).all()\n # feature_metrics = feature.metrics\n for metric in feature_metrics:\n d[metric.name] = metric.value\n\n feature_dicts.append(d)\n\n if not feature_dicts:\n print(\"No features found\")\n else:\n return pd.DataFrame(feature_dicts)",
"def featurize(data):\n features = {}\n missing_weight = False\n for fieldname in STATIC_FIELDS:\n # Static fields use -1 to denote that the value was not measured.\n if data[fieldname][0][1] == -1:\n features[fieldname] = NAN_REPLACE\n else:\n features[fieldname] = float(data[fieldname][0][1])\n for fieldname in FIELDS:\n # Time-series fields may or may not be measured, but if they are present\n # in the dataset, then the value will be valid (i.e. nonnegative).\n if fieldname in data:\n values = [float(d[1]) for d in data[fieldname]]\n if -1 in values and fieldname == 'Weight':\n # Record that weight was missing for this record id.\n missing_weight = True\n field_features = set_features_to_nan(fieldname)\n else:\n field_features = {}\n field_features['{}_min'.format(fieldname)] = min(values)\n field_features['{}_max'.format(fieldname)] = max(values)\n field_features['{}_mean'.format(fieldname)] = np.mean(values)\n field_features['{}_first'.format(fieldname)] = values[0]\n field_features['{}_last'.format(fieldname)] = values[-1]\n field_features['{}_diff'.format(fieldname)] = values[-1] - values[0]\n else:\n field_features = set_features_to_nan(fieldname)\n features.update(field_features)\n return features, missing_weight"
] | [
"0.65693516",
"0.6504419",
"0.63095343",
"0.6184907",
"0.6181109",
"0.61156017",
"0.6097132",
"0.60952747",
"0.60912675",
"0.6084227",
"0.6035357",
"0.59760165",
"0.5962229",
"0.5936362",
"0.5921777",
"0.59124935",
"0.59112424",
"0.58922076",
"0.58688956",
"0.58619624",
"0.58587444",
"0.5850264",
"0.5834209",
"0.5826236",
"0.5792523",
"0.5777546",
"0.5776913",
"0.5746392",
"0.5723293",
"0.5713805"
] | 0.7378489 | 0 |
Publish response to kafka topic | def publish_response(class_label):
client = KProducer(config=publisher_config)
client.produce(class_label, PUBLISHER_TOPIC) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def publish(self, node, topic, data={}, on_publish=None, on_response=None):\n pass",
"def produce(self, response, regex, ts):\n self.logger.info(\"Producing message...\")\n\n payload = {\n \"url\": response.url,\n \"latency\": response.elapsed,\n \"status\": response.status_code,\n \"check_time\": ts,\n }\n\n if regex:\n try:\n payload[\"regex_match\"] = bool(re.search(regex, response.text))\n except re.error as e:\n raise e\n\n try:\n self.producer.produce(\n self.topic,\n value=json.dumps(payload, cls=JSONDatetimeEncoder),\n callback=_log_produced,\n )\n self.producer.poll(1)\n except KafkaException as e:\n self.logger.error(\n \"An error occurred while producing a message: %s\", e.args[0].reason\n )",
"def publish(self, topic, msg):\n\t\tself.topic = topic\n\t\tself.msg = msg \n\t\tself.client.publish(self.topic, self.msg)",
"def publish(self, topic, msg):\n formatted_msg = json.dumps(msg)\n self.client.publish(topic, formatted_msg) # json converting cause of mqtt's data transfer limit.",
"def _publish(self, topic_name, message):\n msg = {\n 'op': 'publish',\n 'topic': topic_name,\n 'msg': message\n }\n json_msg = json.dumps(msg)\n self.ws.send(json_msg)",
"def publish_mqtt(self, topic, data={}, on_publish=None, on_response=None, inject_rid=True):\n payload = data\n\n # If this is a dict and we're allowed to inject a request ID, do so\n # Injecting a request ID allows the nodes to respond and us to execute callbacks\n if (type(data) is dict) and inject_rid:\n data['rid'] = str(shortuuid.uuid())\n\n # JSON encode dicts, lists and stuff\n if type(data) in [dict, list, tuple]:\n payload = json.dumps(data)\n\n result, mid = self.mqtt.publish(topic, payload, qos=1)\n\n if on_publish:\n self.publish_callbacks[mid] = on_publish\n\n if on_response and data and data.get('rid', None):\n self.response_callbacks[data['rid']] = on_response\n\n self.publishes.append(mid)\n\n while mid in self.publishes:\n self.wait()",
"def kafka_publish_message(self, message):\n self.kf_sender = self.kf_producer.send(self.kf_topic, value=message.encode('utf-8'));",
"def reply(self, topic, callback):\n \n msg = self.topics[topic].recv()\n rep = callback(msg)\n self.topics[topic].send(rep)",
"def publish(self, topic:str, data:bytes) -> None:\n\t\tself.mqttClient.publish(topic, data)",
"def publish(self, topic, value):\n msg = self.topics[topic]['msg']\n msg.data = value\n self.topics[topic]['publisher'].publish(msg)\n print(\"published \\t{} \\t{}\".format(topic, value))",
"def publish( self, topic, data, qos = 1, retain = False ):\n logging.info( \"Publishing to topic %s\" %topic )\n self.client.publish( topic, data, qos = qos, retain = retain )",
"def output_topic_callback(self, msg):\n with self.callback_lock:\n if self._time_received_input != 0:\n # Get actual time from ROS\n time_now = self.node.get_clock().now().nanoseconds\n\n # Compute the amount of time elapsed from receiving the last\n # message in the input topic\n measure = time_now - self._time_received_input\n\n # Transform from nanoseconds to milliseconds\n measure = measure / (1000 * 1000)\n\n publish_msg = Int64()\n publish_msg.data = int(measure)\n\n # Publish the measurement\n self._publisher.publish(publish_msg)\n\n self._time_received_input = 0",
"def publish(self, topic, payload):\n complete_topic = \"{}/{}\".format(self._base_topic, topic)\n self._client.publish(complete_topic, payload, qos=2)\n logger.info(\"On topic %s published: %s\", complete_topic, payload)",
"def publish_message(producer_instance, topic_name, key, value):\n key_serializer = repr(key).encode()\n value_serializer = repr(value).encode()\n\n producer_instance.send(topic_name, key=key_serializer, value=value_serializer)\n producer_instance.flush()\n print('Message published successfully.')",
"def publish(topic, message):\n if DEBUG:\n print(\"Publish: '\" + message + \"' (topic: '\" + topic + \"')\")\n DATA[\"client\"].publish(topic, message)",
"def publish_and_wait(self, node, topic, data={}):\n pass",
"def post(self):\n s = ScuttlebuttService()\n try:\n topic_dict = simplejson.loads(self.request.body)\n topic = s.CreateTopic(topic_dict)\n self.response.headers['Content-Type'] = 'application/json'\n self.response.out.write(simplejson.dumps(topic.ToDict()))\n except simplejson.JSONDecodeError:\n # HTTP 400 for bad syntax.\n self.response.set_status(\n 400, 'Failed to create topic. Invalid JSON: %s' % self.request.body)\n except Exception, e:\n # HTTP 422 for syntactically correct but semantically wrong.\n self.response.set_status(422, 'Error creating topic: %s' % e)",
"def publish(self, data=None):\n rospy.loginfo(\"Message published on topic %s\", self.topic)",
"def test_publish(self):\n target_arn = 'testing'\n supercuboid_key = 'acd123'\n message_id = '123456'\n receipt_handle = 'a1b2c3d4'\n message = serializer.encodeIngestMessage(supercuboid_key, message_id, receipt_handle)\n self.sns.publish(self.topic_arn, message)\n message = self.sns.subscribe(self.topic_arn)",
"def maybe_notify_lessee(request, response):\n if request.get('pubsub_topic'):\n pubsub.publish(\n pubsub.full_topic_name(\n request['pubsub_project'], request['pubsub_topic']),\n json.dumps(response),\n {},\n )\n metrics.pubsub_messages_sent.increment(fields={'target': 'lessee'})",
"def publish(self, message: None):\n response = self.client.publish(TopicArn=self.params['topic_arn'], Message=message)\n return response",
"def publish(self, message):\n logger.info(\"Publishing to topic [{0}]: {1}\".format(self._topic_name, message))\n self._executor.send(json.dumps({\n 'op': 'publish',\n 'id': 'publish:{0}:{1}'.format(self._topic_name, self._id),\n 'topic': self._topic_name,\n 'msg': message\n }))",
"async def publish(self, msgDict):\n try:\n msgId = msgDict.get(\"id\", None)\n msgType = msgDict.get(\"type\", None)\n msgRetry = msgDict.get(\"retry\", None)\n if msgId:\n self.write('id: {}\\n'.format(msgId))\n if msgType:\n self.write('event: {}\\n'.format(msgType))\n if msgRetry:\n self.write('retry: {}\\n'.format(msgRetry))\n self.write('data: {}\\n\\n'.format(msgDict[\"data\"]))\n await self.flush()\n return True\n except StreamClosedError:\n return False",
"def response_kafka_topic_name(self, response_kafka_topic_name: str):\n\n self._response_kafka_topic_name = response_kafka_topic_name",
"def on_next(self, msg):\n # publish the message to the topics\n retain = msg.retain if hasattr(msg, 'retain') else False\n for (topic, qos) in self.topics:\n self.client.publish(topic, msg, qos, retain)",
"def publish(self, message: str) -> None:",
"def publish(self, node, topic, data={}, on_publish=None, on_response=None):\n logger.debug('Publishing \"%s\" data to node \"%s\"', topic, node.node_id)\n\n logger.debug('Connecting to \"%s:%s\" over TCP socket', node.node_id, self.options['port'])\n conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n conn.connect((node.node_id, self.options['port']))\n\n # JSON encode dicts, lists and stuff\n if type(data) in [dict, list, tuple]:\n data = json.dumps(data)\n\n payload = {\n 'cmd': topic,\n 'data': data,\n }\n payload = json.dumps(payload)\n payload = bytes(payload, 'utf8')\n\n logger.debug('Sending %s bytes of data', len(payload))\n conn.send(payload)\n\n if on_publish:\n logger.debug('Calling publish callback')\n on_publish()\n\n conn.setblocking(0)\n ready = select.select([conn], [], [], self.timeout / 1000)\n payload, data = None, None\n\n if ready[0]:\n payload = conn.recv(8192)\n payload = str(payload, 'utf8')\n logger.debug('Received %s bytes of data' % len(payload))\n\n try:\n data = json.loads(payload)\n except json.decoder.JSONDecodeError as e:\n logger.error('Error while JSON decoding message payload: %s' % e)\n\n if on_response:\n logger.debug('Calling response callback')\n on_response(payload, data)\n\n logger.debug('Closing connection')\n conn.close()\n\n return payload, data",
"def publish_and_wait(self, node, topic, data={}):\n return self.publish(node, topic, data=data)",
"def on_publish( client, userdata, mid ):\n logging.info( \"Data published successfully.\" )",
"def publish_to_simulation(self, topic, message, **kwargs):\n pass"
] | [
"0.6920841",
"0.6638568",
"0.6578799",
"0.6561684",
"0.6525909",
"0.65240884",
"0.6514028",
"0.641655",
"0.62882924",
"0.627717",
"0.62517506",
"0.61733186",
"0.61410475",
"0.6117747",
"0.61167103",
"0.61138386",
"0.61064917",
"0.608827",
"0.6083523",
"0.6068794",
"0.60644025",
"0.6057398",
"0.60570663",
"0.6046443",
"0.5974617",
"0.59727097",
"0.5960435",
"0.5958009",
"0.59568113",
"0.5949957"
] | 0.7494711 | 0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.