query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
listlengths
30
30
negative_scores
listlengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Check to make sure sklearn's UMAP doesn't use the title param
def test_sklearn_umap_title(self): # In TSNEVisualizer, the internal sklearn UMAP transform consumes # some but not all kwargs passed in by user. Those not in get_params(), # like title, are passed through to YB's finalize method. This test should # notify us if UMAP's params change on the sklearn side. with pytest.raises(TypeError): UMAP(title="custom_title")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_custom_title_umap(self):\n umap = UMAPVisualizer(title=\"custom_title\")\n\n assert umap.title == \"custom_title\"", "def test_umap_mismtached_labels(self):\n ## produce random data\n X, y = make_classification(\n n_samples=200,\n n_features=100,\n n_informative=20,\n n_redundant=10,\n n_classes=3,\n random_state=42,\n )\n\n ## fewer labels than classes\n umap = UMAPVisualizer(random_state=87, labels=[\"a\", \"b\"])\n with pytest.raises(YellowbrickValueError):\n umap.fit(X, y)\n\n ## more labels than classes\n umap = UMAPVisualizer(random_state=87, labels=[\"a\", \"b\", \"c\", \"d\"])\n with pytest.raises(YellowbrickValueError):\n umap.fit(X, y)", "def test_no_target_umap(self):\n ## produce random data\n X, y = make_classification(\n n_samples=200,\n n_features=100,\n n_informative=20,\n n_redundant=10,\n n_classes=3,\n random_state=6897,\n )\n\n ## visualize data with UMAP\n umap = UMAPVisualizer(random_state=64)\n umap.fit(X)\n\n self.assert_images_similar(umap, tol=40)", "def testFalseCapTitle(self):\n val = capTitles(\"victor Ifezue\") \n self.assertNotEqual(val, \"victor Ifezue\")", "def test_sklearn_umap_size(self):\n # In UMAPVisualizer, the internal sklearn UMAP transform consumes\n # some but not all kwargs passed in by user. Those not in get_params(),\n # like size, are passed through to YB's finalize method. This test should\n # notify us if UMAP's params change on the sklearn side.\n with pytest.raises(TypeError):\n UMAP(size=(100, 100))", "def test_umap_unavailable():\n from yellowbrick.text.umap_vis import UMAP\n\n assert UMAP is None\n\n with pytest.raises(\n YellowbrickValueError, match=\"umap package doesn't seem to be installed\"\n ):\n UMAPVisualizer()", "def testMapTitle(self) -> None:\n def testNewTitle(name:str, solution:list[float]):\n self._nameClassifierBuilder._initializeNameMapping()\n title = self._nameClassifierBuilder._getTitle(name)\n self._nameClassifierBuilder._mapTitle(title)\n self.assertEquals(solution, self._nameClassifierBuilder._currentNameMapping)\n\n solution = [1.0,0.0,0.0,0.0,0.0,0.0]\n testNewTitle(\"jslghaldfaCollgja lgn awfggad\", solution)\n \n solution = [0.0,0.0,1.0,0.0,0.0,0.0]\n testNewTitle(\"fsdj Mrs. afjdlgaj\", solution)\n\n solution = [0.0,0.0,0.0,0.0,0.0,1.0]\n testNewTitle(\"jslghaldfagja lgn awfggad\", solution)", "def need_labels(self) -> bool:\n return False", "def need_labels(self) -> bool:\n return False", "def test_title(names):", "def conclusion_title_map(self):\n pass", "def need_labels(self) -> bool:\n return True", "def istitle(self) -> bool:\n pass", "def test_labels_encoder_no_classes(self):\n\n class L2UTransformer(object):\n def transform(self, y):\n return np.array([yi.upper() for yi in y])\n\n oz = ClassificationScoreVisualizer(GaussianNB(), encoder=L2UTransformer())\n with pytest.warns(YellowbrickWarning, match=\"could not determine class labels\"):\n assert oz._labels() is None", "def test_make_classification_umap_class_labels(self):\n\n ## produce random data\n X, y = make_classification(\n n_samples=200,\n n_features=100,\n n_informative=20,\n n_redundant=10,\n n_classes=3,\n random_state=42,\n )\n\n ## visualize data with UMAP\n umap = UMAPVisualizer(random_state=87, labels=[\"a\", \"b\", \"c\"])\n umap.fit(X, y)\n\n self.assert_images_similar(umap, tol=40)", "def test_unnamed_parameter(self):\n\n m = Mothur(**self.init_vars)\n m.help('summary.seqs')\n\n return", "def test_quick_method(self):\n corpus = load_hobbies()\n tfidf = TfidfVectorizer()\n\n X = tfidf.fit_transform(corpus.data)\n y = corpus.target\n\n viz = umap(X, y, show=False)\n assert isinstance(viz, UMAPVisualizer)\n\n self.assert_images_similar(viz, tol=50)", "def test_print_title_negativie(capsys, title, result):\n GC.print_title(title)\n out, err = capsys.readouterr()\n print(err)\n assert out != result", "def test_make_classification_umap(self):\n\n ## produce random data\n X, y = make_classification(\n n_samples=200,\n n_features=100,\n n_informative=20,\n n_redundant=10,\n n_classes=3,\n random_state=42,\n )\n\n ## visualize data with UMAP\n umap = UMAPVisualizer(random_state=87)\n umap.fit(X, y)\n\n self.assert_images_similar(umap, tol=40)", "def _check_for_labels(self):\n check = True\n if 'labels' not in self.mapper:\n check = False\n return check", "def test_labels_warning(self):\n with pytest.warns(\n YellowbrickWarning, match=\"both classes and encoder specified\"\n ):\n oz = ClassificationScoreVisualizer(\n GaussianNB(),\n classes=[\"a\", \"b\", \"c\"],\n encoder={0: \"foo\", 1: \"bar\", 2: \"zap\"},\n )\n labels = oz._labels()\n npt.assert_array_equal(labels, [\"foo\", \"bar\", \"zap\"])", "def test_default_parameters() -> None:\n mapie = MapieClassifier()\n assert mapie.estimator is None\n assert mapie.method == \"score\"\n assert mapie.cv == \"prefit\"\n assert mapie.verbose == 0\n assert mapie.n_jobs is None", "def is_bad_title(title):\n bad_examples = [\"under construction\", \"test page\", \"redirect\", \"index of\", \"none \", \"expired\", \"coming soon\",\n \"error \", \"domain pending\", \"at directnic\", \"pending validation\", \"website disabled\",\n \"US Zip Code Information\", # verified we need this, urls like 00000.us, 00001.us end up at zipcode.com\n \"domain default page\", \"non-existent domain\", \"v-webs hosting services\",\n \"be back soon\", \"something went wrong\", \"Lunarpages Web Hosting Placeholder Page\",\n \"Félicitations ! Votre domaine a bien été créé chez OVH !\", \"Domaine réservé\",\n \" - For Sale | Undeveloped\", \"Yahoo's Aabaco Small Business: Websites, Ecommerce, Email & Local Listings\",\n \"service unavailable\", \"website disabled\", \"404 Not Found\", \"Not Found\", \"Page cannot be found\"\n ]\n for bad_title in bad_examples:\n if bad_title.lower() in title.lower():\n debug(bad_title)\n return hit(bad_title)\n\n exact_matches = [\"web hosting\", \"webhosting\"]\n for ma in exact_matches:\n if title.replace(\" \", \"\").replace(\"\\t\", \"\").replace(\"\\n\", \"\").replace(\"\\r\", \"\").lower() == ma:\n debug(ma)\n return hit(ma)\n return False", "def test_article_has_no_page_title(self, fake_article_missing_elements):\n\n fake_analysis = PageTitleAnalyzer(title=fake_article_missing_elements.title)\n assert not fake_analysis.has_page_title()", "def need_labels(self) -> None:\n raise NotImplementedError()", "def test_default_sample_weight() -> None:\n mapie = MapieClassifier()\n assert signature(mapie.fit).parameters[\"sample_weight\"].default is None", "def replaces_summary(self):\n return False", "def settitle(self, title):\n self.__title = title\n self.__nonzero = True", "def testConvertMissingLabels(self):\n self.assertEqual(self.data['no_species']['labels'][0]['species'], '-1')\n self.assertEqual(self.data['no_count']['labels'][0]['count'], '-1')\n self.assertEqual(self.data['no_standing']['labels'][0]['standing'], '-1')", "def __init__(self, title=\"\"):\n self.__title = title\n self.__data = []\n if self.__title:\n self.__nonzero = True\n else:\n self.__nonzero = False" ]
[ "0.66912156", "0.64914465", "0.61412466", "0.5810493", "0.57012737", "0.56671464", "0.5628206", "0.5620812", "0.5620812", "0.5588849", "0.5584933", "0.5557151", "0.55520403", "0.5547825", "0.5476415", "0.54417104", "0.54199195", "0.53977454", "0.5372286", "0.53333205", "0.5331825", "0.5239894", "0.5214307", "0.5205799", "0.51816684", "0.5168972", "0.51072043", "0.51027304", "0.50829536", "0.508057" ]
0.8391244
0
Check UMAP can accept a custom title (string) from the user
def test_custom_title_umap(self): umap = UMAPVisualizer(title="custom_title") assert umap.title == "custom_title"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def valid_title(self, title):\n if title in self.timers.keys() and isinstance(title, str) and self.timers[title]['count']>0:\n return True\n else:\n return False", "def validate_title_input(title):\n if len(title) != 0:\n clear()\n return True\n\n else:\n clear()\n print('** Please enter a task title **')\n return False", "def titleValidator(self, title):\n if type(title) != str:\n API.abort(400, error_messages[11]['Int_title'])\n\n # check if the contents of title have characters between a-z and A-Z\n elif not re.match(r\"(^[a-zA-Z_]+$)\", title) or title.isspace():\n API.abort(\n 400, error_messages[12]['wrong_format_title'])\n\n return True", "def istitle(self) -> bool:\n pass", "def test_users_has_users_in_title(self):\n self.browser.get(self.warno_url)\n self.browser.find_element_by_link_text(\"Users\").click()\n self.assertTrue('User' in self.browser.title, 'Users did not have \"User\" in title')", "def test_title(self):\n key = api.portal.get_registry_record(\n 'plone.site_title'\n )\n self.assertEqual(u'Briefy CMS', key)", "def _validate_title(self, attribute: attr.Attribute, value: str):\n\n if not isinstance(value, str) or len(value) <= 0:\n raise ValueError(\n f\"Window title must be a non-empty string, received {value!r}\"\n )", "def test_title(self):\n\n # list instead of string\n self.validator.adata.uns[\"title\"] = [\"title\"]\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: '['title']' in 'uns['title']' is not valid, \"\n \"it must be a string.\"\n ],\n )", "def validateTitle(title):\n \n if not(title) or not(title.strip()):\n return \"You must supply a title.\"\n else:\n return None", "def testFalseCapTitle(self):\n val = capTitles(\"victor Ifezue\") \n self.assertNotEqual(val, \"victor Ifezue\")", "def test_sklearn_umap_title(self):\n # In TSNEVisualizer, the internal sklearn UMAP transform consumes\n # some but not all kwargs passed in by user. Those not in get_params(),\n # like title, are passed through to YB's finalize method. This test should\n # notify us if UMAP's params change on the sklearn side.\n with pytest.raises(TypeError):\n UMAP(title=\"custom_title\")", "def is_bad_title(title):\n bad_examples = [\"under construction\", \"test page\", \"redirect\", \"index of\", \"none \", \"expired\", \"coming soon\",\n \"error \", \"domain pending\", \"at directnic\", \"pending validation\", \"website disabled\",\n \"US Zip Code Information\", # verified we need this, urls like 00000.us, 00001.us end up at zipcode.com\n \"domain default page\", \"non-existent domain\", \"v-webs hosting services\",\n \"be back soon\", \"something went wrong\", \"Lunarpages Web Hosting Placeholder Page\",\n \"Félicitations ! Votre domaine a bien été créé chez OVH !\", \"Domaine r&eacute;serv&eacute;\",\n \" - For Sale | Undeveloped\", \"Yahoo&#39;s Aabaco Small Business: Websites, Ecommerce, Email &amp; Local Listings\",\n \"service unavailable\", \"website disabled\", \"404 Not Found\", \"Not Found\", \"Page cannot be found\"\n ]\n for bad_title in bad_examples:\n if bad_title.lower() in title.lower():\n debug(bad_title)\n return hit(bad_title)\n\n exact_matches = [\"web hosting\", \"webhosting\"]\n for ma in exact_matches:\n if title.replace(\" \", \"\").replace(\"\\t\", \"\").replace(\"\\n\", \"\").replace(\"\\r\", \"\").lower() == ma:\n debug(ma)\n return hit(ma)\n return False", "def input_is_replaceable(title):\n bad_title = re.compile(r\"(^([Pp](age|\\.) \\d+|[Ff]ront|[Bb]ack|REUSE)$)|^$\")\n\n return bool(bad_title.match(title))", "def title_exists(form, field):\n if Entry.select().where(Entry.title ** field.data).exists():\n raise ValidationError('That title is already in use.')", "def get_valid_title(title):\n if len(title) >= 254:\n title = title[:254]\n return title", "def is_title_displayed(self):\n return self.driver.wait_for_title_contains(InboxLocators.TITLE, 30)", "def test_title(names):", "def title_contains(title_substring):\n title_substring = title_substring.encode('ascii')\n def f(win):\n t = conv(win.title)\n return title_substring in t\n return f", "def try_create_uniqe_title(self,title,owner):\n if self.valid_title(title):\n for i in range (1,20):\n new_title=title+\"_\"+str(i)\n if self.unique_title(new_title,owner):\n return new_title\n return False\n else:\n return False", "def check_for_metatitle(self, interest_name: str):\n if interest_name.endswith(\"/streaming/p*\"):\n return True\n else:\n return False", "def check_title(self):\n currenttitle = self.driver.title\n assert self.TITLE in currenttitle, 'Title not expected. Actual: ' + currenttitle + ', Expected: ' + self.TITLE", "def is_title_matches(self):\n return \"Python\" in self.driver.title", "def is_title_matches(self):\n return \"EXNESS - Trader Calculator and Currency Converter\" in self.driver.title", "def UserName_availabity():\r\n try:\r\n \r\n UserName=request.args.get(\"UserName\")\r\n user_details=fetch_details(UserName)\r\n user_name=user_details[0]['UserName']\r\n if str(UserName)==str(user_name):\r\n msg=\"UserName is already taken kindly choose another one\"\r\n except IndexError:\r\n msg=\"UserName is available.\"\r\n return msg", "def enter_title():\n valid_data = False\n # used to keep track of the values and change them in other scopes\n input_data = {'title': ''}\n\n while not valid_data:\n input_data['title'] = get_input(\"Title of the task: \")\n if re.match('[\\w]+', input_data['title']):\n valid_data = True\n clean_scr()\n\n return input_data['title']", "def check_valid_title(title):\n title_issues = TitleIssues(title_contains_nsfw=title_contains_nsfw(title))\n return title_issues", "def verifyPageTitle(self, titleToVerify):\n try:\n actualTitle = self.getTitle()\n return self.util.verifyTextContains(actualTitle, titleToVerify)\n except:\n self.log.error(\"Failed to get page title\")\n print_stack()\n return False", "def lookup_space(self, title=None, **kwargs):\n return False", "def test_query_has_query_in_title(self):\n query_url = self.warno_url + '/query'\n self.browser.get(query_url)\n self.assertTrue('Query' in self.browser.title, 'Query did not have \"Query\" in title')", "def handle_title(self, tag, attrs):\n self.title = 'present'" ]
[ "0.6321018", "0.6244966", "0.6110215", "0.6049247", "0.6036075", "0.5973084", "0.5953221", "0.5953062", "0.5943151", "0.5932759", "0.5906794", "0.5882811", "0.5789171", "0.5764922", "0.5761954", "0.5761824", "0.5754397", "0.5731467", "0.57234776", "0.56962955", "0.5676605", "0.5634449", "0.5630614", "0.5609572", "0.5579676", "0.5509376", "0.5482445", "0.546732", "0.54584813", "0.5430679" ]
0.67318124
0
Check UMAP can accept a custom size (tuple of pixels) from the user
def test_custom_size_umap(self): umap = UMAPVisualizer(size=(100, 50)) assert umap._size == (100, 50)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_size(self,x,y):\n assert(x <= 10**3), 'Width larger than 1000' \n assert(y <= 10**3), 'Height larger than 1000' \n assert(x*y <= 3*(10**5)), 'Resolution larger than 300000'", "def test_sklearn_umap_size(self):\n # In UMAPVisualizer, the internal sklearn UMAP transform consumes\n # some but not all kwargs passed in by user. Those not in get_params(),\n # like size, are passed through to YB's finalize method. This test should\n # notify us if UMAP's params change on the sklearn side.\n with pytest.raises(TypeError):\n UMAP(size=(100, 100))", "def _check_size(size):\r\n\r\n if not isinstance(size, (list, tuple)):\r\n raise ValueError(\"Size must be a tuple\")\r\n if len(size) != 2:\r\n raise ValueError(\"Size must be a tuple of length 2\")\r\n if size[0] < 0 or size[1] < 0:\r\n raise ValueError(\"Width and height must be >= 0\")\r\n\r\n return True", "def image_size_exact(self, msg, img_type, height, width,\n target=None):\n height = int(height)\n width = int(width)\n sizes = self._get_sizes(msg, img_type)\n for img in sizes:\n if (img['width'], img['height']) == (width, height):\n return True\n return False", "def check_crop_size(size):\n type_check(size, (int, list, tuple), \"size\")\n if isinstance(size, int):\n check_value(size, (1, FLOAT_MAX_INTEGER))\n elif isinstance(size, (tuple, list)) and len(size) == 2:\n for value in size:\n check_value(value, (1, FLOAT_MAX_INTEGER))\n else:\n raise TypeError(\"Size should be a single integer or a list/tuple (h, w) of length 2.\")", "def is_image_size_64(image):\n return image['height'] == 64 and image['width'] == 64", "def is_valid_size(self, dot_width, dot_height, distance, screen_width, screen_height):\n if dot_width * distance > screen_width or dot_height * distance > screen_height:\n return False\n return True", "def _validate_image_size(size):\n\n error_message = \"Input size (pixels) should be an integers or a tuple of two integers.\"\n\n if type(size) in [list, tuple, np.array]:\n assert len(size) == 2, error_message\n x_size, y_size = size\n\n elif np.issubdtype(type(size), np.number):\n x_size = size\n y_size = size\n\n else:\n raise ValueError(error_message)\n\n assert not x_size % 1 and not y_size % 1, error_message\n\n x_size = int(x_size)\n y_size = int(y_size)\n\n return x_size, y_size", "def check_resize_size(size):\n if isinstance(size, int):\n check_value(size, (1, FLOAT_MAX_INTEGER))\n elif isinstance(size, (tuple, list)) and len(size) == 2:\n for i, value in enumerate(size):\n check_value(value, (1, INT32_MAX), \"size at dim {0}\".format(i))\n else:\n raise TypeError(\"Size should be a single integer or a list/tuple (h, w) of length 2.\")", "def testSize (self):\r\n \r\n perpixel = bytes_per_pixel [self.bih_vals [bih_BitCount]]\r\n width = self.bih_vals [bih_Width]\r\n height = self.bih_vals [bih_Height]\r\n expected = self.bih_vals [bih_SizeImage]\r\n\r\n # Rows always have multiples of 4 bytes\r\n \r\n padding = 3 - ((perpixel * width + 3) % 4)\r\n size = (width * perpixel + padding) * height\r\n\r\n if not size == expected:\r\n print \"Calculated size = %d (<> %d)\" % (size, expected)\r\n print \"***** File size error *****\"", "def isValidTeamSize(size, minimum, maximum) :\n\n return isInteger(size) and int(size) >= minimum and int(size) <= maximum", "def _acceptable_dimensions(self, box):\n return self._min_width < box.x1-box.x0 < self._max_width and\\\n self._min_height < box.y1-box.y0 < self._max_height", "def test_valid_sizes(self):\n for size in settings.MISAGO_AVATARS_SIZES:\n self.assertEqual(clean_size(size), size)", "def get_tile_size(self, map_size = None, show_info = None):\n if not map_size: map_size = self.map_size\n w,h = self.img_size\n x_tiles,y_tiles = map_size\n\n tile_raw_w = w / x_tiles\n tile_raw_h = h / y_tiles\n\n if self.debug:\n print(f' ► Raw tile width: {tile_raw_w}\\n ► Raw tile height: {tile_raw_h}')\n\n tile_w = int(round(tile_raw_w))\n tile_h = int(round(tile_raw_h))\n\n if show_info:\n print(f' Image Size: {w} x {h} px\\n Tile Size: {tile_w} x {tile_h} px\\n Map Size: {x_tiles} x {y_tiles} tiles')\n\n error_w = tile_w - tile_raw_w\n error_h = tile_h - tile_raw_h\n print(f'\\n -=ERROR INFO=-\\n Tile Size Width Error: {round(error_w,4)} px \\n Tile Size Height Error: {round(error_h,4)} px \\n Total Width Rounding Error: {round(error_w * x_tiles,4)} px \\n Total Height Rounding Error: {round(error_h * y_tiles,4)} px\\n')\n\n return (tile_raw_w,tile_raw_h)", "def get_image_size(self):", "def size(value):\r\n if not value.isValid():\r\n return '0.0 x 0.0' if isinstance(value, QSizeF) else '0 x 0'\r\n return '{} x {}'.format(value.width(), value.height())", "def calc_image_size(spr):\n return int(max(spr.label_safe_width(), 1)), \\\n int(max(spr.label_safe_height(), 1))", "def IsUsingSizeInPixels(*args, **kwargs):\n return _gdi_.Font_IsUsingSizeInPixels(*args, **kwargs)", "def test_modis_resize(self):\n modis_order = {'mod09a1': {'inputs': 'mod09a1.a2000072.h02v09.005.2008237032813',\n 'products': ['l1']},\n 'resampling_method': 'cc',\n 'resize': {'pixel_size': 30,\n 'pixel_size_units': 'meters'},\n 'format': 'gtiff'}\n\n exc = 'pixel count value is greater than maximum size of'\n\n try:\n api.validation(modis_order, self.staffuser.username)\n except Exception as e:\n assert(exc in str(e))\n else:\n self.fail('Failed MODIS pixel resize test')", "def are_sizes_valid(sizes):\n return all(isinstance(size, int) and size >= 16 and size <= 28 for size in sizes)", "def user_labels_size(*args):\n return _ida_hexrays.user_labels_size(*args)", "def test_summarize_otu_sizes_from_otu_map(self):\r\n otu_map_f = \"\"\"O1\tseq1\r\no2\tseq2\tseq3\tseq4\tseq5\r\no3\tseq5\r\no4\tseq6\tseq7\"\"\".split('\\n')\r\n expected = [(1, 2), (2, 1), (4, 1)]\r\n self.assertEqual(summarize_otu_sizes_from_otu_map(otu_map_f), expected)", "def _check_image_size(self, size):\n if size % 32 == 0:\n return (0, 0)\n else:\n imageBorder = 32 - (size % 32)\n if (imageBorder % 2) == 0:\n return (int(imageBorder / 2), int(imageBorder / 2))\n else:\n return (int(imageBorder / 2), int((imageBorder / 2) + 1))", "def correct_size():\n check50.run(\"./inheritance_test\").stdout(\"size_true.*\").exit(0)", "def size(self, size_input: Tuple[str, str]):\n self.isize = [UIMetric.parse(size_input[0]),\n UIMetric.parse(size_input[1])]", "def is_valid(box, img):\n valid_width = box['top_left_x'] > 0 and box['bottom_right_x'] < img.shape[1]\n valid_height = box['top_left_y'] > 0 and box['bottom_right_y'] < img.shape[0]\n return valid_width and valid_height", "def size(map):\n return map['size']", "def verify_size_content(self, re_size):\n to_alternate = 0\n if re_size['chunck'] < re_size['size']:\n to_alternate = re_size['chunck']\n re_size['chunck'] = re_size['size']\n re_size['size'] = to_alternate\n return re_size", "def test_size_too_small(self):\n min_size = min(settings.MISAGO_AVATARS_SIZES)\n too_small = min_size / 2\n\n self.assertEqual(clean_size(too_small), min_size)", "def createBitmap(self):\n return self.level.has_redundancy and self.size >= 1000 and self.format.type != \"swap\"" ]
[ "0.6711422", "0.6630726", "0.62506163", "0.62452865", "0.6196343", "0.6094031", "0.60335875", "0.60198134", "0.59973264", "0.5955764", "0.58968693", "0.5896098", "0.5810795", "0.5778206", "0.5754338", "0.5711507", "0.57061523", "0.56949794", "0.5684642", "0.5682231", "0.5675218", "0.5654276", "0.56539875", "0.5649742", "0.56239325", "0.5622146", "0.5615051", "0.5585875", "0.55819625", "0.5573733" ]
0.77458096
0
Check UMAP accepts and properly handles custom colors from user
def test_custom_colors_umap(self): ## produce random data X, y = make_classification( n_samples=200, n_features=100, n_informative=20, n_redundant=10, n_classes=5, random_state=42, ) ## specify a list of custom colors >= n_classes purple_blues = ["indigo", "orchid", "plum", "navy", "purple", "blue"] ## instantiate the visualizer and check that self.colors is correct purple_umap = UMAPVisualizer(colors=purple_blues, random_state=87) assert purple_umap.colors == purple_blues ## fit the visualizer and check that self.color_values is as long as ## n_classes and is the first n_classes items in self.colors purple_umap.fit(X, y) assert len(purple_umap.color_values_) == len(purple_umap.classes_) assert purple_umap.color_values_ == purple_blues[: len(purple_umap.classes_)] ## specify a list of custom colors < n_classes greens = ["green", "lime", "teal"] ## instantiate the visualizer and check that self.colors is correct green_umap = UMAPVisualizer(colors=greens, random_state=87) assert green_umap.colors == greens ## fit the visualizer and check that self.color_values is as long as ## n_classes and the user-supplied color list gets recycled as expected green_umap.fit(X, y) assert len(green_umap.color_values_) == len(green_umap.classes_) assert green_umap.color_values_ == ["green", "lime", "teal", "green", "lime"]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_colorstr(arg):\n try:\n assert len(arg) == 6\n for c in arg:\n assert c in COLORMAP\n except AssertionError:\n raise argparse.ArgumentTypeError('%s is not a valid color string' % arg)\n return arg", "def test_is_valid_color_name(self):\n self.assertTrue(is_valid_color_name('black'))\n self.assertTrue(is_valid_color_name('red'))\n self.assertFalse(is_valid_color_name('#aabb11'))\n self.assertFalse(is_valid_color_name('bl(ack'))", "def check_colormap(cmap):\n names = set(['BrBG', 'PiYG', 'PRGn', 'PuOr', 'RdBu', 'RdGy', 'RdYlBu', 'RdYlGn', 'Spectral',\n 'Blues', 'BuGn', 'BuPu', 'GnBu', 'Greens', 'Greys', 'Oranges', 'OrRd', 'PuBu',\n 'PuBuGn', 'PuRd', 'Purples', 'RdPu', 'Reds', 'YlGn', 'YlGnBu', 'YlOrBr', 'YlOrRd',\n 'Accent', 'Dark2', 'Paired', 'Pastel1', 'Pastel2', 'Set1', 'Set2', 'Set3', 'Lightning'])\n if cmap not in names:\n raise Exception(\"Invalid cmap '%s', must be one of %s\" % (cmap, names))\n else:\n return cmap", "def validate_hair_color(passport: map) -> bool:\n if passport.get('hcl'):\n regex = re.compile('#[0-9a-f]{6}')\n match = regex.match(passport['hcl'])\n return bool(match)\n\n return False", "def print_illegal_color_format_screen( enteredBGColor, enteredFGColor, convertedBGColor, convertedFGColor ):\n print \"\"\n print \"Error: are the passed in colors valid?\"\n print \" - passed in background-color '\" + enteredBGColor + \"' was converted to '\" + convertedBGColor + \"'.\"\n print \" - passed in foreground-color '\" + enteredFGColor + \"' was converted to '\" + convertedFGColor + \"'.\"\n print \"\"", "def test_change_color_of_the_device__false():", "def colorOK(colorStr):\n tkWdg = _getTkWdg()\n\n try:\n tkWdg.winfo_rgb(colorStr)\n except tkinter.TclError:\n return False\n return True", "def test_is_valid_color(self):\n self.assertTrue(is_valid_color('black'))\n self.assertTrue(is_valid_color('#aabb11'))\n self.assertTrue(is_valid_color('rgba(23,45,67, .5)'))\n self.assertFalse(is_valid_color('bl(ack'))", "def clean_colors(self):\n err = _(\"Color must be a valid hex triplet.\")\n colors = ['background_color_custom', 'font_color_custom']\n colors2 = colors + ['background_color', 'font_color']\n # If there are custom colors specified in settings, length of\n # self.COLORS will be > 6, so check for validity\n if len(self.COLORS) > 6:\n colors = colors2\n for color in colors:\n c = getattr(self, color)\n l = len(c)\n if l:\n if l != 6:\n raise ValidationError(err)\n else:\n try:\n int(c, 16)\n except ValueError:\n raise ValidationError(err)", "def _iscolorstring(self, color):\n try:\n rgb = self.cv.winfo_rgb(color)\n ok = True\n except TK.TclError:\n ok = False\n return ok", "def test_change_color_of_the_device__true():", "def validate_color(self, field):\n if match(r'^[A-Fa-f0-9]{0,6}$', field.data):\n field.data = field.data.lower()\n else:\n raise ValidationError('Field is not a valid hexadecimal color code.')", "def uniqueish_color(color_data):\n # return plt.cm.gist_ncar(color_data)\n # return plt.cm.binary(color_data)\n return plt.cm.bwr(color_data)", "def test_color(self):\n self._calibration_test(\"color_full\")", "def color_obeject(val):\n if val == 'O':\n color = 'red'\n elif val == '<M8[ns]':\n color = 'blue'\n else:\n color = 'black'\n return 'color: %s' % color", "def validPlayerColor(color):\n if color not in (RED, GREEN, BLUE, YELLOW):\n return False\n else:\n return True", "def test_colorFormatting(self):\n self.assertEqual(irc.parseFormattedText(\"\\x0301yay\\x03\"), A.fg.black[\"yay\"])\n self.assertEqual(\n irc.parseFormattedText(\"\\x0301,02yay\\x03\"), A.fg.black[A.bg.blue[\"yay\"]]\n )\n self.assertEqual(\n irc.parseFormattedText(\"\\x0301yay\\x0302yipee\\x03\"),\n A.fg.black[\"yay\", A.fg.blue[\"yipee\"]],\n )", "def test_weirdColorFormatting(self):\n self.assertAssembledEqually(\"\\x031kinda valid\", A.fg.black[\"kinda valid\"])\n self.assertAssembledEqually(\n \"\\x03999,999kinda valid\", A.fg.green[\"9,999kinda valid\"]\n )\n self.assertAssembledEqually(\n \"\\x031,2kinda valid\", A.fg.black[A.bg.blue[\"kinda valid\"]]\n )\n self.assertAssembledEqually(\n \"\\x031,999kinda valid\", A.fg.black[A.bg.green[\"9kinda valid\"]]\n )\n self.assertAssembledEqually(\n \"\\x031,242 is a special number\",\n A.fg.black[A.bg.yellow[\"2 is a special number\"]],\n )\n self.assertAssembledEqually(\"\\x03,02oops\\x03\", A.normal[\",02oops\"])\n self.assertAssembledEqually(\"\\x03wrong\", A.normal[\"wrong\"])\n self.assertAssembledEqually(\"\\x031,hello\", A.fg.black[\"hello\"])\n self.assertAssembledEqually(\"\\x03\\x03\", A.normal)", "def is_color(color):\n # check if color is\n # 1) the default empty value\n # 2) auto\n # 3) a color name from the 16 color palette\n # 4) a color index from the 256 color palette\n # 5) an HTML-style color code\n if (color in ['', 'auto'] or\n color in COLORS.keys() or\n (color.isdigit() and int(color) >= 0 and int(color) <= 255) or\n (color.startswith('#') and (len(color) in [4, 7, 9]) and\n all(c in '01234567890abcdefABCDEF' for c in color[1:]))):\n return color\n raise VdtValueError(color)", "def getColorMapFlags():\n\treturn colorMap_flag", "def is_valid_color(value):\n if is_str(value):\n return is_hex_string(value)\n elif is_tuple_or_list(value):\n return (is_tuple_or_list(value)\n and is_three_channeled(value)\n and has_valid_channel_values(value))\n else:\n return is_str_or_coll(value)", "def test_assembleColor(self):\n self.assertEqual(\n irc.assembleFormattedText(A.fg.red[A.bg.blue[\"hello\"]]),\n \"\\x0f\\x0305,02hello\",\n )", "def _is_same_color(p1: str, p2: str):\n return p1.islower() == p2.islower()", "def EyeColorTest(str):\n\n\tvalidcolors = ['amb', 'blu', 'brn', 'gry', 'grn', 'hzl', 'oth']\n\treturn str in validcolors", "def verify_color(cci):\n\n if cci < -6.0:\n return OrangeColor.GREEN\n elif -6.0 <= cci < -1.0:\n return OrangeColor.YELLOWISH_GREEN\n elif -1.0 <= cci < 2.7:\n return OrangeColor.YELLOW\n elif 2.7 <= cci < 6.0:\n return OrangeColor.LIGHT_ORANGE\n else: # cci >= 6\n return OrangeColor.ORANGE", "def __isValidColor(self, name):\n try:\n if self.__isHexString(name) and len(name) in [3, 6, 9, 12]:\n return True\n return QColor.isValidColor(name)\n except AttributeError:\n if name.startswith(\"#\"):\n if len(name) not in [4, 7, 10, 13]:\n return False\n hexCheckStr = name[1:]\n return self.__isHexString(hexCheckStr)\n else:\n if self.__isHexString(name) and len(name) in [3, 6, 9, 12]:\n return True\n return name in QColor.colorNames()", "def test_assembleForegroundColor(self):\n self.assertEqual(\n irc.assembleFormattedText(A.fg.blue[\"hello\"]), \"\\x0f\\x0302hello\"\n )", "def is_color(s):\n def in_range(i): return 0 <= i <= int('0xFFFFFF', 0)\n\n try:\n if type(s) == int:\n return in_range(s)\n elif type(s) not in (str, bytes):\n return False\n elif s in webcolors.css3_names_to_hex:\n return True\n elif s[0] == '#':\n return in_range(int('0x' + s[1:], 0))\n elif s[0:2] == '0x':\n return in_range(int(s, 0))\n elif len(s) == 6:\n return in_range(int('0x' + s, 0))\n except ValueError:\n return False", "def is_valid_eye_color(eye_color: str) -> str:\n return eye_color in [\"amb\", \"blu\", \"brn\", \"gry\", \"grn\", \"hzl\", \"oth\"]", "def _color(self, args):" ]
[ "0.61849034", "0.6094498", "0.60621583", "0.60231525", "0.60187125", "0.5935084", "0.5925285", "0.5918845", "0.5859132", "0.5766543", "0.57401425", "0.57124156", "0.57018787", "0.56962585", "0.56811154", "0.56695724", "0.5624193", "0.5616206", "0.56129676", "0.55991244", "0.55929744", "0.5569163", "0.5556111", "0.5546668", "0.5537667", "0.5535032", "0.55096024", "0.5496014", "0.5474114", "0.5443959" ]
0.68729633
0
Test UMAP integrated visualization on a sklearn classifier dataset
def test_make_classification_umap(self): ## produce random data X, y = make_classification( n_samples=200, n_features=100, n_informative=20, n_redundant=10, n_classes=3, random_state=42, ) ## visualize data with UMAP umap = UMAPVisualizer(random_state=87) umap.fit(X, y) self.assert_images_similar(umap, tol=40)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_make_classification_umap_class_labels(self):\n\n ## produce random data\n X, y = make_classification(\n n_samples=200,\n n_features=100,\n n_informative=20,\n n_redundant=10,\n n_classes=3,\n random_state=42,\n )\n\n ## visualize data with UMAP\n umap = UMAPVisualizer(random_state=87, labels=[\"a\", \"b\", \"c\"])\n umap.fit(X, y)\n\n self.assert_images_similar(umap, tol=40)", "def test_no_target_umap(self):\n ## produce random data\n X, y = make_classification(\n n_samples=200,\n n_features=100,\n n_informative=20,\n n_redundant=10,\n n_classes=3,\n random_state=6897,\n )\n\n ## visualize data with UMAP\n umap = UMAPVisualizer(random_state=64)\n umap.fit(X)\n\n self.assert_images_similar(umap, tol=40)", "def test_quick_method(self):\n corpus = load_hobbies()\n tfidf = TfidfVectorizer()\n\n X = tfidf.fit_transform(corpus.data)\n y = corpus.target\n\n viz = umap(X, y, show=False)\n assert isinstance(viz, UMAPVisualizer)\n\n self.assert_images_similar(viz, tol=50)", "def test_visualizer_with_pandas(self):\n X, y = make_classification(\n n_samples=200,\n n_features=100,\n n_informative=20,\n n_redundant=10,\n n_classes=3,\n random_state=3020,\n )\n\n X = pandas.DataFrame(X)\n y = pandas.Series(y)\n\n umap = UMAPVisualizer(random_state=64)\n umap.fit(X, y)\n\n self.assert_images_similar(umap, tol=40)", "def map_objects_classifier_evaluation(self):\n df = self.results[(self.results['iou'] > 0.7)]\n y_true = df['true_class']\n y_pred = df['pred_class']\n print(classification_report(y_true, y_pred))\n matrix = confusion_matrix(y_true, y_pred)\n matrix = matrix.astype('float') / matrix.sum(axis=1)[:, np.newaxis]\n import seaborn as sns\n\n plt.figure(figsize=(10, 7))\n sns.set(font_scale=2.4)\n sns.heatmap(matrix, annot=True, annot_kws={'size': 25},\n cmap=plt.cm.Reds)\n # Add labels to the plot\n class_names = ['background', 'building', 'water']\n tick_marks = np.arange(len(class_names))\n tick_marks2 = tick_marks + 0.28\n tick_marks2[0] = tick_marks2[0] - 0.2\n tick_marks = tick_marks + 0.5\n plt.xticks(tick_marks, class_names, rotation=0)\n plt.yticks(tick_marks2, class_names, rotation=90)\n plt.xlabel('Predicted label', labelpad=13)\n plt.ylabel('True label', labelpad=13)\n plt.show()", "def test_umap_mismtached_labels(self):\n ## produce random data\n X, y = make_classification(\n n_samples=200,\n n_features=100,\n n_informative=20,\n n_redundant=10,\n n_classes=3,\n random_state=42,\n )\n\n ## fewer labels than classes\n umap = UMAPVisualizer(random_state=87, labels=[\"a\", \"b\"])\n with pytest.raises(YellowbrickValueError):\n umap.fit(X, y)\n\n ## more labels than classes\n umap = UMAPVisualizer(random_state=87, labels=[\"a\", \"b\", \"c\", \"d\"])\n with pytest.raises(YellowbrickValueError):\n umap.fit(X, y)", "def test_integrated_umap(self):\n tfidf = TfidfVectorizer()\n\n docs = tfidf.fit_transform(corpus.data)\n labels = corpus.target\n\n umap = UMAPVisualizer(random_state=8392, colormap=\"Set1\", alpha=1.0)\n umap.fit_transform(docs, labels)\n\n tol = 55\n self.assert_images_similar(umap, tol=tol)", "def test_custom_colors_umap(self):\n ## produce random data\n X, y = make_classification(\n n_samples=200,\n n_features=100,\n n_informative=20,\n n_redundant=10,\n n_classes=5,\n random_state=42,\n )\n\n ## specify a list of custom colors >= n_classes\n purple_blues = [\"indigo\", \"orchid\", \"plum\", \"navy\", \"purple\", \"blue\"]\n\n ## instantiate the visualizer and check that self.colors is correct\n purple_umap = UMAPVisualizer(colors=purple_blues, random_state=87)\n assert purple_umap.colors == purple_blues\n\n ## fit the visualizer and check that self.color_values is as long as\n ## n_classes and is the first n_classes items in self.colors\n purple_umap.fit(X, y)\n assert len(purple_umap.color_values_) == len(purple_umap.classes_)\n assert purple_umap.color_values_ == purple_blues[: len(purple_umap.classes_)]\n\n ## specify a list of custom colors < n_classes\n greens = [\"green\", \"lime\", \"teal\"]\n\n ## instantiate the visualizer and check that self.colors is correct\n green_umap = UMAPVisualizer(colors=greens, random_state=87)\n assert green_umap.colors == greens\n\n ## fit the visualizer and check that self.color_values is as long as\n ## n_classes and the user-supplied color list gets recycled as expected\n green_umap.fit(X, y)\n assert len(green_umap.color_values_) == len(green_umap.classes_)\n assert green_umap.color_values_ == [\"green\", \"lime\", \"teal\", \"green\", \"lime\"]", "def draw_umap(\n data,\n n_neighbors=15,\n min_dist=0.1,\n c=None,\n n_components=2,\n metric=\"euclidean\",\n title=\"\",\n plot=True,\n cmap=None,\n use_plotly=False,\n **kwargs,\n):\n fit = UMAP(\n n_neighbors=n_neighbors,\n min_dist=min_dist,\n n_components=n_components,\n metric=metric,\n random_state=42,\n )\n mapper = fit.fit(data)\n u = fit.transform(data)\n if plot:\n if use_plotly:\n fig = px.scatter(\n x=u[:, 0], y=u[:, 1], color=c, title=title, **kwargs\n )\n fig.update_layout(\n {\n \"plot_bgcolor\": \"rgba(0, 0, 0, 0)\",\n \"paper_bgcolor\": \"rgba(0, 0, 0, 0)\",\n }\n )\n fig.show()\n else:\n fig = plt.figure()\n if n_components == 1:\n ax = fig.add_subplot(111)\n ax.scatter(u[:, 0], range(len(u)), c=c)\n if n_components == 2:\n ax = fig.add_subplot(111)\n scatter = ax.scatter(u[:, 0], u[:, 1], c=c, label=c, cmap=cmap)\n if n_components == 3:\n ax = fig.add_subplot(111, projection=\"3d\")\n ax.scatter(u[:, 0], u[:, 1], u[:, 2], c=c, s=100)\n plt.title(title, fontsize=18)\n legend = ax.legend(*scatter.legend_elements())\n ax.add_artist(legend)\n\n return u, mapper", "def show_umap_bokeh(data, metadata, color_field=None,\n n_neighbors=10, min_dist=0.001, metric='euclidean'):\n if color_field is None:\n dims = 3\n else:\n dims = 2\n um = umap.UMAP(n_neighbors=n_neighbors, n_components=dims,\n min_dist=min_dist, metric=metric)\n vis = um.fit_transform(data)\n\n if color_field is None:\n color = umap_color(vis[:, 2], None, 20)\n color_field = \"Third UMAP dimension\"\n else:\n color = umap_color(metadata, color_field, 20, dtype=int)\n\n scatter_data = pandas.DataFrame({\n 'umap_1': vis[:, 0],\n 'umap_2': vis[:, 1],\n 'color': color,\n 'htid': list(metadata.index),\n 'title': ['<br>'.join(textwrap.wrap(t))\n for t in metadata['title']],\n 'author': list(metadata['author']),\n 'pub_date': list(metadata['pub_date'])\n })\n\n plot_figure = figure(\n title=('UMAP Projection of Phasor vectors for ~1000 random '\n 'HathiTrust volumes (colored by {})'.format(color_field)),\n plot_width=800,\n plot_height=800,\n tools=('pan, wheel_zoom, tap, reset')\n )\n\n plot_figure.add_tools(HoverTool(\n tooltips=(\n \"<div><span style='font-size: 10px'>@htid{safe}</span></div>\"\n \"<div><span style='font-size: 10px'>@author{safe}</span></div>\"\n \"<div><span style='font-size: 10px'>@title{safe}</span></div>\"\n \"<div><span style='font-size: 10px'>@pub_date{safe}</span></div>\"\n )\n ))\n\n plot_figure.circle(\n 'umap_1',\n 'umap_2',\n color='color',\n source=scatter_data,\n )\n\n tap = plot_figure.select(type=TapTool)\n tap.callback = OpenURL(\n url='https://babel.hathitrust.org/cgi/pt?id=@htid{safe}'\n )\n show(plot_figure)", "def create_umap(name):\n\tglobal dir\n\tdirec = dir + \"/\" + name + \"/\"\n\tos.chdir(direc + \"representations/\")\n\t\n\t# Palette size of 2x50 required. 1-49 for labeled nat data, 51-100 for labeled syn data, 50 for unlabeled nat data\n\tpalette = sns.color_palette(\"Blues_d\", 30)# Syn data in blue\n\tpalette.extend(sns.dark_palette(\"purple\", 20)) # Unimportant, just a filler\n\tpalette.extend(sns.color_palette(\"Reds_d\", 30))# Nat data in red\n\tpalette.extend(sns.dark_palette(\"purple\", 20))# Unimportant, just a filler\n\tpalette[49]=\"#50B689\"# Unlabeled nat data in green\n\t# print(\"size of palette \" + str(len(palette)))\n\t\n\tfor file in glob.glob(\"*.pt\"):\n\t\t\trepresentation = torch.load(file)\n\t\t\ttarfile = file[:-3] # Removes the .pt ending\n\t\t\ttarfile = \"tar\" + tarfile[4:] + \".log\"\n\t\t\tall_targets = []\n\t\t\twith open(tarfile, \"r\") as f:\n\t\t\t\tfor tar in f:\n\t\t\t\t\tall_targets.append(float(tar.strip()))\n\n\t\t\tsns.set(style='white', context='notebook', rc={'figure.figsize': (14, 10)})\n\t\t\treducer = umap.UMAP()\n\t\t\tembedding = reducer.fit_transform(representation.cpu())\n\t\t\t\n\t\t\tprint(\"scattering\")\n\t\t\t# print(all_targets)\n\t\t\tplt.scatter(embedding[:, 0], embedding[:, 1], c=[palette[int(y-1)] for y in all_targets], alpha=0.8)\n\t\t\tplt.gca().set_aspect('equal', 'datalim')\n\t\t\tplt.title('UMAP projection of cell data', fontsize=24);\n\t\t\tplt.savefig(\"./umap_\" + str(file[4:-3]) + \".png\")\n\t\t\tplt.clf()\n\tos.chdir(\"../../../../\")", "def test_fit() -> None:\n mapie = MapieClassifier()\n mapie.fit(X_toy, y_toy)", "def test(self, test_data_path):\n test_data = read_datafile(test_data_path)\n test_data = self.preprocessor.preprocess(test_data)\n\n data = '__label__' + test_data['claim'].astype(str) + test_data['check_worthiness'].astype(str) + ' ' + \\\n test_data['tweet_text']\n\n output = self.run(data)\n\n df = pd.DataFrame()\n df[\"predicted\"] = output.split()\n df[\"labeled\"] = [d.split()[0] for d in data]\n\n cm = confusion_matrix(df[\"labeled\"], df[\"predicted\"], labels=['__label__11','__label__10','__label__00'])\n\n ax= plt.subplot()\n sns.heatmap(cm, annot=True, ax = ax); #annot=True to annotate cells\n\n ax.set_xlabel('Predicted labels');ax.set_ylabel('True labels'); \n ax.set_title('Confusion Matrix'); \n ax.xaxis.set_ticklabels(['__label__11','__label__10','__label__00']); ax.yaxis.set_ticklabels(['__label__11','__label__10','__label__00']);\n\n plt.show()\n\n return np.sum(cm.diagonal()) / np.sum(cm)", "def predict(model,test_data, show_confusion_matrix=False):\r\n\r\n pick_model = open(model, \"rb\") # 'model.data'\r\n model = pickle.load(pick_model)\r\n pick_model.close()\r\n\r\n pick_in = open(test_data, \"rb\") # 'data_test.data' , be careful not to leak training data here\r\n data = pickle.load(pick_in)\r\n pick_in.close()\r\n\r\n features = []\r\n labels = []\r\n\r\n for feature, label in data:\r\n features.append(feature)\r\n labels.append(label)\r\n\r\n xtest = features\r\n ytest = labels\r\n \r\n accuracy = model.score(xtest, ytest)\r\n print(\"accuracy: \", accuracy)\r\n \r\n if show_confusion_matrix:\r\n prediction = model.predict(xtest)\r\n cm_array = confusion_matrix(ytest, prediction)\r\n sns.heatmap(cm_array, annot=True, cmap='Blues')\r\n plt.show()", "def test_sklearn_umap_title(self):\n # In TSNEVisualizer, the internal sklearn UMAP transform consumes\n # some but not all kwargs passed in by user. Those not in get_params(),\n # like title, are passed through to YB's finalize method. This test should\n # notify us if UMAP's params change on the sklearn side.\n with pytest.raises(TypeError):\n UMAP(title=\"custom_title\")", "def show_test_results(true_labels: List[int], predictions: List[int], class_names: List[str]):\n confusion_mtx = confusion_matrix(true_labels, predictions)\n plt.figure(figsize=(10, 8))\n sns.heatmap(confusion_mtx, xticklabels=class_names, yticklabels=class_names,\n annot=True, fmt='g')\n plt.xlabel('Prediction')\n plt.ylabel('Label')\n plt.title(\"Confusion matrix\")\n plt.show()\n print(classification_report(true_labels, predictions, target_names=class_names, digits=DIGITS))", "def test_sklearn_umap_size(self):\n # In UMAPVisualizer, the internal sklearn UMAP transform consumes\n # some but not all kwargs passed in by user. Those not in get_params(),\n # like size, are passed through to YB's finalize method. This test should\n # notify us if UMAP's params change on the sklearn side.\n with pytest.raises(TypeError):\n UMAP(size=(100, 100))", "def represent():\n\tmodel.eval()\n\twith torch.no_grad():\n\n\t\tall_data = []\n\t\tall_targets = []\n\n\t\tfor batch_idx, (data, labels) in enumerate(nat_test_loader):\n\t\t\tall_data.append(data)\n\t\t\tall_targets.append(labels.float()+50) # +50 for nat data, for distinction between nat and syn\n\t\tfor batch_idx, (data, labels) in enumerate(syn_test_loader):\n\t\t\tall_data.append(data)\n\t\t\tall_targets.append(labels.float())\n\n\t\tall_data = torch.cat(all_data, 0) # Merges the list of tensors\n\t\tall_data = all_data.cuda()\n\t\tall_targets = torch.cat(all_targets, 0)\n\n\t\trepresentation = model.representation(all_data)\n\t\t\n\t\ttorch.save(representation, directory + \"/representations/repr\" + str(epoch) + \".pt\")\n\t\twith open(directory + \"/representations/tar\" + str(epoch) + \".log\", \"w\") as f:\n\t\t\tfor t in all_targets:\n\t\t\t\tf.write(str(t.item()) + \"\\n\")\n\n\t\t# Optional: Plotting of the UMAP in each represent()\n\t\t#sns.set(style='white', context='notebook', rc={'figure.figsize': (14, 10)})\n\t\t#reducer = umap.UMAP()\n\t\t#embedding = reducer.fit_transform(representation.cpu())\n\t\t# flatui = [\"#ff0000\", \"#000000\", \"#001800\", \"#003000\", \"#004800\", \"#006000\", \"#007800\", \"#009000\", \"#00a800\", \"#00c000\", \"#00d800\"]\n\t\t# plt.scatter(embedding[:, 0], embedding[:, 1], c=[sns.color_palette(flatui)[x] for x in all_targets.int()])\n\t\t#plt.scatter(embedding[:, 0], embedding[:, 1], c=all_targets.cpu())\n\t\t#plt.gca().set_aspect('equal', 'datalim')\n\t\t#plt.title('UMAP projection of cell data', fontsize=24);\n\t\t#plt.savefig(directory + \"/umap_\" + str(epoch) + \".png\")\n\t\t#plt.clf()", "def test_initialized() -> None:\n MapieClassifier()", "def test_classify(self):\n classifiers, estimates =\\\n ada_boost.train_dataset(self.larger_matrix,\n self.larger_class_labels,\n 9)\n data_to_classify = [1, 0.5]\n classifications = ada_boost.classify(data_to_classify, classifiers)\n expected = np.mat([-1.])\n self.assertEqual(classifications, expected)", "def learnGauss(metricArray): \n fit = gaussReg() \n n= 100 #You should probably change this...\n overlap = 0.3 \n imageSize = [100,400]\n densMap(fit, metricArray, n, overlap, imageSize )\n overlayMap('SmallTile.jpg', 'ContourPlot.jpg') \n # Final map is saved as OverlayMap.jpg", "def scikit_learn_classifier_comparison_example():\n\n # Code source: Gael Varoqueux\n # Andreas Mueller\n # Modified for Documentation merge by Jaques Grobler\n # Modified to serve as a MinCq example by Jean-Francis Roy\n # License: BSD 3 clause\n\n h = .02 # step size in the mesh\n\n names = [\"Linear SVM\", \"RBF SVM\", \"AdaBoost\", \"Linear MinCq\", \"RBF MinCq\", \"Stumps MinCq\"]\n classifiers = [\n SVC(kernel=\"linear\", C=0.025),\n SVC(gamma=2, C=1),\n AdaBoostClassifier(),\n MinCqLearner(mu=0.01, voters_type=\"kernel\", kernel=\"linear\"),\n MinCqLearner(mu=0.01, voters_type=\"kernel\", kernel=\"rbf\", gamma=2),\n MinCqLearner(mu=0.01, voters_type=\"stumps\"),\n ]\n\n X, y = make_classification(n_features=2, n_redundant=0, n_informative=2,\n random_state=1, n_clusters_per_class=1)\n\n rng = np.random.RandomState(2)\n X += 2 * rng.uniform(size=X.shape)\n linearly_separable = (X, y)\n\n datasets = [make_moons(noise=0.3, random_state=0),\n make_circles(noise=0.2, factor=0.5, random_state=1),\n linearly_separable\n ]\n\n figure = pl.figure(figsize=(27, 9))\n i = 1\n # iterate over datasets\n for ds in datasets:\n # preprocess dataset, split into training and test part\n X, y = ds\n y[y == 0] = -1\n X = StandardScaler().fit_transform(X)\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.4)\n\n x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5\n y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h),\n np.arange(y_min, y_max, h))\n\n # just plot the dataset first\n cm = pl.cm.RdBu\n cm_bright = ListedColormap(['#FF0000', '#0000FF'])\n ax = pl.subplot(len(datasets), len(classifiers) + 1, i)\n # Plot the training points\n ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)\n # and testing points\n ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6)\n ax.set_xlim(xx.min(), xx.max())\n ax.set_ylim(yy.min(), yy.max())\n ax.set_xticks(())\n ax.set_yticks(())\n i += 1\n\n # iterate over classifiers\n for name, clf in zip(names, classifiers):\n ax = pl.subplot(len(datasets), len(classifiers) + 1, i)\n clf.fit(X_train, y_train)\n score = clf.score(X_test, y_test)\n\n # Plot the decision boundary. For that, we will assign a color to each\n # point in the mesh [x_min, m_max]x[y_min, y_max].\n if hasattr(clf, \"decision_function\"):\n Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])\n else:\n Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]\n\n # Put the result into a color plot\n Z = Z.reshape(xx.shape)\n ax.contourf(xx, yy, Z, cmap=cm, alpha=.8)\n\n # Plot also the training points\n ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)\n # and testing points\n ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright,\n alpha=0.6)\n\n ax.set_xlim(xx.min(), xx.max())\n ax.set_ylim(yy.min(), yy.max())\n ax.set_xticks(())\n ax.set_yticks(())\n ax.set_title(name)\n ax.text(xx.max() - .3, yy.min() + .3, ('%.2f' % score).lstrip('0'),\n size=15, horizontalalignment='right')\n i += 1\n\n figure.subplots_adjust(left=.02, right=.98)\n pl.show()", "def test(self, test_instances, test_labels):\n scores = self.classifier.predict(test_instances)\n # TODO: print report", "def visualize_predictions(model : torch.nn.Module, dataSet : Dataset, \r\n axes, device :torch.device, numTestSamples : int,\r\n id_to_color : np.ndarray = train_id_to_color):\r\n model.to(device=device)\r\n model.eval()\r\n\r\n # predictions on random samples\r\n testSamples = np.random.choice(len(dataSet), numTestSamples).tolist()\r\n # _, axes = plt.subplots(numTestSamples, 3, figsize=(3*6, numTestSamples * 4))\r\n \r\n for i, sampleID in enumerate(testSamples):\r\n inputImage, gt = dataSet[sampleID]\r\n\r\n # input rgb image \r\n inputImage = inputImage.to(device)\r\n landscape = inverse_transform(inputImage).permute(1, 2, 0).cpu().detach().numpy()\r\n axes[i, 0].imshow(landscape)\r\n axes[i, 0].set_title(\"Landscape\")\r\n\r\n # groundtruth label image\r\n label_class = gt.cpu().detach().numpy()\r\n axes[i, 1].imshow(id_to_color[label_class])\r\n axes[i, 1].set_title(\"Groudtruth Label\")\r\n\r\n # predicted label image\r\n y_pred = torch.argmax(model(inputImage.unsqueeze(0)), dim=1).squeeze(0)\r\n label_class_predicted = y_pred.cpu().detach().numpy() \r\n axes[i, 2].imshow(id_to_color[label_class_predicted])\r\n axes[i, 2].set_title(\"Predicted Label\")\r\n\r\n plt.show()", "def test(self, training_set, original_test_set, imitation_test_set ):\n\n plt.figure()\n\n training_axis = np.arange(len(training_set))\n original_test_axis = np.arange(len(original_test_set))+len(training_axis)\n imitation_test_axis = np.arange(len(imitation_test_set))+len(training_axis)+len(original_test_set)\n\n training_scores = []\n original_test_scores = []\n imitation_test_scores = []\n\n for signature in training_set:\n vectorized_signature = signature.tolist()\n score = -1*self.model.score(vectorized_signature)\n training_scores.append(score)\n\n for signature in original_test_set:\n vectorized_signature = signature.tolist()\n score = -1*self.model.score(vectorized_signature)\n original_test_scores.append(score)\n\n for signature in imitation_test_set:\n vectorized_signature = signature.tolist()\n score = -1*self.model.score(vectorized_signature)\n imitation_test_scores.append(score)\n\n accuracy, threshold = self.evaluate(training_scores, original_test_scores, imitation_test_scores)\n\n xaxis = np.arange(len(imitation_test_set)+len(training_axis)+len(original_test_set))\n plt.plot( xaxis, threshold*np.ones(len(xaxis)), \"--\", label=\"Threshold\" )\n plt.scatter(training_axis,training_scores, label=\"Training data\")\n plt.scatter(original_test_axis, original_test_scores, c=\"g\", label=\"Original Test data\")\n plt.scatter(imitation_test_axis, imitation_test_scores, c=\"r\", label=\"Imitated Test data\")\n plt.legend(loc=\"best\")\n plt.title(f\"{self.user} data. Accuracy={accuracy} \")\n plt.ylabel(\"Score\")\n plt.xlabel(\"File\")\n plt.savefig(f\"{self.n_components}_{self.user}.png\")\n\n Model.accuracies.append(accuracy)", "def test_none_alpha_results() -> None:\n estimator = LogisticRegression()\n estimator.fit(X, y)\n y_pred_est = estimator.predict(X)\n mapie = MapieClassifier(estimator=estimator, cv=\"prefit\")\n mapie.fit(X, y)\n y_pred_mapie = mapie.predict(X)\n np.testing.assert_allclose(y_pred_est, y_pred_mapie)", "def plot_confusion_matrix(self):\r\n interp = ClassificationInterpretation.from_learner(self.learn)\r\n interp.plot_confusion_matrix()", "def summarize_model(clf_, X_tr, X_te, y_tr, y_te, tree=False):\n \n import sklearn.metrics as metrics\n import matplotlib.pyplot as plt\n import pandas as pd\n \n y_hat_tr, y_hat_te = fit_n_pred(clf_, X_tr, X_te, y_tr)\n print('Classification Report:')\n print(metrics.classification_report(y_te, y_hat_te))\n \n if tree:\n fig, ax = plt.subplots(figsize=(10,5), nrows=2)\n\n metrics.plot_confusion_matrix(clf_,X_te,y_te,cmap=\"YlOrRd\", normalize='true',\n ax=ax[0])\n ax[0].set(title='Confusion Matrix')\n ax[0].grid(False)\n\n plot_importance(clf_, X_tr, ax=ax[1])\n plt.tight_layout()\n \n else:\n clf_coef = pd.Series(clf_.coef_[0], index=X_tr.columns, name='Normal')\n abs_coef = pd.Series(abs(clf_.coef_[0]), index=X_tr.columns, name='Absolute')\n posi_coef = pd.Series((clf_coef > 0), name='Positive')\n coef_all = pd.concat([clf_coef, abs_coef, posi_coef], axis=1)\n coef_all.sort_values('Absolute', ascending=True, inplace=True)\n coef_all.tail(20)['Normal'].plot(kind='barh', color=coef_all['Positive'].map({True:'b',False:'r'})\n\n metrics.plot_confusion_matrix(clf_,X_te,y_te,cmap=\"YlOrRd\", normalize='true')\n plt.title('Confusion Matrix')\n plt.grid(False)\n plt.tight_layout()\n\ndef grid_searcher(clf_, params, X_tr, X_te, y_tr, y_te, cv=None, keep_t=False, train_score=True):\n \n \"\"\"Takes any classifier, train/test data for X/y, and dict of parameters to\n iterate over. Optional parameters select for cross-validation tuning, keeping\n time for running the gridsearch, and returning training scores when done.\n Default parameters only return the fitted grid search object. MUST HAVE Timer\n class imported.\"\"\"\n \n from sklearn.model_selection import GridSearchCV\n import numpy as np\n \n ## Instantiate obj. with our targets\n grid_s = GridSearchCV(clf_, params, cv=cv, return_train_score=train_score)\n \n ## Time and fit run the 'search'\n time = Timer()\n time.start()\n grid_s.fit(X_tr, y_tr)\n time.stop()\n \n ## Display results\n tr_score = np.mean(grid_s.cv_results_['mean_train_score'])\n te_score = grid_s.score(X_te, y_te)\n print(f'Mean Training Score: {tr_score :.2%}')\n print(f'Mean Test Score: {te_score :.2%}')\n print('Best Parameters:')\n print(grid_s.best_params_)\n \n ## Time keeping and grid obj\n if keep_t:\n lap = time.record().total_seconds()\n print('**********All done!**********')\n return grid_s, lap\n else:\n return grid_s", "def model_visualization(model,X,y,classifier):\n sns.set_context(context='notebook',font_scale=2)\n plt.figure(figsize=(16,9))\n from matplotlib.colors import ListedColormap\n X_set, y_set = X, y\n X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))\n plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape), alpha = 0.6, cmap = ListedColormap(('green', 'blue')))\n plt.xlim(X1.min(), X1.max())\n plt.ylim(X2.min(), X2.max())\n for i, j in enumerate(np.unique(y_set)):\n plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],\n color = ListedColormap(('turquoise', 'blue'))(i), label = j)\n plt.title(\"%s Model Set\" %(model))\n plt.xlabel('PC 1')\n plt.ylabel('PC 2')\n plt.legend()\n plt.savefig('images/{0}.png'.format(model))", "def _classifier(self, test_set):\r\n return self._euclidian_classifier(test_set.features, test_set.targets)" ]
[ "0.7237436", "0.69271594", "0.6770888", "0.6631627", "0.6620112", "0.6457081", "0.64443177", "0.63267493", "0.626038", "0.6186775", "0.59111714", "0.5845833", "0.5778432", "0.57569915", "0.5749673", "0.5714801", "0.564882", "0.5628666", "0.5620671", "0.55739444", "0.55627376", "0.5556546", "0.5546796", "0.5502901", "0.54875493", "0.54632723", "0.54599726", "0.54472303", "0.54449505", "0.5429948" ]
0.7453398
0
Test UMAP integrated visualization with class labels specified
def test_make_classification_umap_class_labels(self): ## produce random data X, y = make_classification( n_samples=200, n_features=100, n_informative=20, n_redundant=10, n_classes=3, random_state=42, ) ## visualize data with UMAP umap = UMAPVisualizer(random_state=87, labels=["a", "b", "c"]) umap.fit(X, y) self.assert_images_similar(umap, tol=40)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_make_classification_umap(self):\n\n ## produce random data\n X, y = make_classification(\n n_samples=200,\n n_features=100,\n n_informative=20,\n n_redundant=10,\n n_classes=3,\n random_state=42,\n )\n\n ## visualize data with UMAP\n umap = UMAPVisualizer(random_state=87)\n umap.fit(X, y)\n\n self.assert_images_similar(umap, tol=40)", "def test_umap_mismtached_labels(self):\n ## produce random data\n X, y = make_classification(\n n_samples=200,\n n_features=100,\n n_informative=20,\n n_redundant=10,\n n_classes=3,\n random_state=42,\n )\n\n ## fewer labels than classes\n umap = UMAPVisualizer(random_state=87, labels=[\"a\", \"b\"])\n with pytest.raises(YellowbrickValueError):\n umap.fit(X, y)\n\n ## more labels than classes\n umap = UMAPVisualizer(random_state=87, labels=[\"a\", \"b\", \"c\", \"d\"])\n with pytest.raises(YellowbrickValueError):\n umap.fit(X, y)", "def map_objects_classifier_evaluation(self):\n df = self.results[(self.results['iou'] > 0.7)]\n y_true = df['true_class']\n y_pred = df['pred_class']\n print(classification_report(y_true, y_pred))\n matrix = confusion_matrix(y_true, y_pred)\n matrix = matrix.astype('float') / matrix.sum(axis=1)[:, np.newaxis]\n import seaborn as sns\n\n plt.figure(figsize=(10, 7))\n sns.set(font_scale=2.4)\n sns.heatmap(matrix, annot=True, annot_kws={'size': 25},\n cmap=plt.cm.Reds)\n # Add labels to the plot\n class_names = ['background', 'building', 'water']\n tick_marks = np.arange(len(class_names))\n tick_marks2 = tick_marks + 0.28\n tick_marks2[0] = tick_marks2[0] - 0.2\n tick_marks = tick_marks + 0.5\n plt.xticks(tick_marks, class_names, rotation=0)\n plt.yticks(tick_marks2, class_names, rotation=90)\n plt.xlabel('Predicted label', labelpad=13)\n plt.ylabel('True label', labelpad=13)\n plt.show()", "def test_custom_colors_umap(self):\n ## produce random data\n X, y = make_classification(\n n_samples=200,\n n_features=100,\n n_informative=20,\n n_redundant=10,\n n_classes=5,\n random_state=42,\n )\n\n ## specify a list of custom colors >= n_classes\n purple_blues = [\"indigo\", \"orchid\", \"plum\", \"navy\", \"purple\", \"blue\"]\n\n ## instantiate the visualizer and check that self.colors is correct\n purple_umap = UMAPVisualizer(colors=purple_blues, random_state=87)\n assert purple_umap.colors == purple_blues\n\n ## fit the visualizer and check that self.color_values is as long as\n ## n_classes and is the first n_classes items in self.colors\n purple_umap.fit(X, y)\n assert len(purple_umap.color_values_) == len(purple_umap.classes_)\n assert purple_umap.color_values_ == purple_blues[: len(purple_umap.classes_)]\n\n ## specify a list of custom colors < n_classes\n greens = [\"green\", \"lime\", \"teal\"]\n\n ## instantiate the visualizer and check that self.colors is correct\n green_umap = UMAPVisualizer(colors=greens, random_state=87)\n assert green_umap.colors == greens\n\n ## fit the visualizer and check that self.color_values is as long as\n ## n_classes and the user-supplied color list gets recycled as expected\n green_umap.fit(X, y)\n assert len(green_umap.color_values_) == len(green_umap.classes_)\n assert green_umap.color_values_ == [\"green\", \"lime\", \"teal\", \"green\", \"lime\"]", "def test_no_target_umap(self):\n ## produce random data\n X, y = make_classification(\n n_samples=200,\n n_features=100,\n n_informative=20,\n n_redundant=10,\n n_classes=3,\n random_state=6897,\n )\n\n ## visualize data with UMAP\n umap = UMAPVisualizer(random_state=64)\n umap.fit(X)\n\n self.assert_images_similar(umap, tol=40)", "def test_labels(self):\n self.compliance_tester.test_labels(self.oi)", "def test_labels(self):\n classes = np.array([\"a\", \"b\", \"c\", \"d\", \"e\"])\n y = classes[np.random.randint(0, 5, 100)]\n\n oz = ClassificationScoreVisualizer(GaussianNB, classes=classes)\n npt.assert_array_equal(oz._labels(), classes)\n\n encoder = dict(zip(range(len(classes)), classes))\n oz = ClassificationScoreVisualizer(GaussianNB, encoder=encoder)\n npt.assert_array_equal(oz._labels(), classes)\n\n encoder = LabelEncoder().fit(y)\n oz = ClassificationScoreVisualizer(GaussianNB, encoder=encoder)\n npt.assert_array_equal(oz._labels(), classes)", "def test_quick_method(self):\n corpus = load_hobbies()\n tfidf = TfidfVectorizer()\n\n X = tfidf.fit_transform(corpus.data)\n y = corpus.target\n\n viz = umap(X, y, show=False)\n assert isinstance(viz, UMAPVisualizer)\n\n self.assert_images_similar(viz, tol=50)", "def create_umap(name):\n\tglobal dir\n\tdirec = dir + \"/\" + name + \"/\"\n\tos.chdir(direc + \"representations/\")\n\t\n\t# Palette size of 2x50 required. 1-49 for labeled nat data, 51-100 for labeled syn data, 50 for unlabeled nat data\n\tpalette = sns.color_palette(\"Blues_d\", 30)# Syn data in blue\n\tpalette.extend(sns.dark_palette(\"purple\", 20)) # Unimportant, just a filler\n\tpalette.extend(sns.color_palette(\"Reds_d\", 30))# Nat data in red\n\tpalette.extend(sns.dark_palette(\"purple\", 20))# Unimportant, just a filler\n\tpalette[49]=\"#50B689\"# Unlabeled nat data in green\n\t# print(\"size of palette \" + str(len(palette)))\n\t\n\tfor file in glob.glob(\"*.pt\"):\n\t\t\trepresentation = torch.load(file)\n\t\t\ttarfile = file[:-3] # Removes the .pt ending\n\t\t\ttarfile = \"tar\" + tarfile[4:] + \".log\"\n\t\t\tall_targets = []\n\t\t\twith open(tarfile, \"r\") as f:\n\t\t\t\tfor tar in f:\n\t\t\t\t\tall_targets.append(float(tar.strip()))\n\n\t\t\tsns.set(style='white', context='notebook', rc={'figure.figsize': (14, 10)})\n\t\t\treducer = umap.UMAP()\n\t\t\tembedding = reducer.fit_transform(representation.cpu())\n\t\t\t\n\t\t\tprint(\"scattering\")\n\t\t\t# print(all_targets)\n\t\t\tplt.scatter(embedding[:, 0], embedding[:, 1], c=[palette[int(y-1)] for y in all_targets], alpha=0.8)\n\t\t\tplt.gca().set_aspect('equal', 'datalim')\n\t\t\tplt.title('UMAP projection of cell data', fontsize=24);\n\t\t\tplt.savefig(\"./umap_\" + str(file[4:-3]) + \".png\")\n\t\t\tplt.clf()\n\tos.chdir(\"../../../../\")", "def show_test_results(true_labels: List[int], predictions: List[int], class_names: List[str]):\n confusion_mtx = confusion_matrix(true_labels, predictions)\n plt.figure(figsize=(10, 8))\n sns.heatmap(confusion_mtx, xticklabels=class_names, yticklabels=class_names,\n annot=True, fmt='g')\n plt.xlabel('Prediction')\n plt.ylabel('Label')\n plt.title(\"Confusion matrix\")\n plt.show()\n print(classification_report(true_labels, predictions, target_names=class_names, digits=DIGITS))", "def test_integrated_umap(self):\n tfidf = TfidfVectorizer()\n\n docs = tfidf.fit_transform(corpus.data)\n labels = corpus.target\n\n umap = UMAPVisualizer(random_state=8392, colormap=\"Set1\", alpha=1.0)\n umap.fit_transform(docs, labels)\n\n tol = 55\n self.assert_images_similar(umap, tol=tol)", "def test_labels_warning(self):\n with pytest.warns(\n YellowbrickWarning, match=\"both classes and encoder specified\"\n ):\n oz = ClassificationScoreVisualizer(\n GaussianNB(),\n classes=[\"a\", \"b\", \"c\"],\n encoder={0: \"foo\", 1: \"bar\", 2: \"zap\"},\n )\n labels = oz._labels()\n npt.assert_array_equal(labels, [\"foo\", \"bar\", \"zap\"])", "def test_custom_title_umap(self):\n umap = UMAPVisualizer(title=\"custom_title\")\n\n assert umap.title == \"custom_title\"", "def test_labels_encoder_no_classes(self):\n\n class L2UTransformer(object):\n def transform(self, y):\n return np.array([yi.upper() for yi in y])\n\n oz = ClassificationScoreVisualizer(GaussianNB(), encoder=L2UTransformer())\n with pytest.warns(YellowbrickWarning, match=\"could not determine class labels\"):\n assert oz._labels() is None", "def test_sklearn_umap_title(self):\n # In TSNEVisualizer, the internal sklearn UMAP transform consumes\n # some but not all kwargs passed in by user. Those not in get_params(),\n # like title, are passed through to YB's finalize method. This test should\n # notify us if UMAP's params change on the sklearn side.\n with pytest.raises(TypeError):\n UMAP(title=\"custom_title\")", "def draw_umap(\n data,\n n_neighbors=15,\n min_dist=0.1,\n c=None,\n n_components=2,\n metric=\"euclidean\",\n title=\"\",\n plot=True,\n cmap=None,\n use_plotly=False,\n **kwargs,\n):\n fit = UMAP(\n n_neighbors=n_neighbors,\n min_dist=min_dist,\n n_components=n_components,\n metric=metric,\n random_state=42,\n )\n mapper = fit.fit(data)\n u = fit.transform(data)\n if plot:\n if use_plotly:\n fig = px.scatter(\n x=u[:, 0], y=u[:, 1], color=c, title=title, **kwargs\n )\n fig.update_layout(\n {\n \"plot_bgcolor\": \"rgba(0, 0, 0, 0)\",\n \"paper_bgcolor\": \"rgba(0, 0, 0, 0)\",\n }\n )\n fig.show()\n else:\n fig = plt.figure()\n if n_components == 1:\n ax = fig.add_subplot(111)\n ax.scatter(u[:, 0], range(len(u)), c=c)\n if n_components == 2:\n ax = fig.add_subplot(111)\n scatter = ax.scatter(u[:, 0], u[:, 1], c=c, label=c, cmap=cmap)\n if n_components == 3:\n ax = fig.add_subplot(111, projection=\"3d\")\n ax.scatter(u[:, 0], u[:, 1], u[:, 2], c=c, s=100)\n plt.title(title, fontsize=18)\n legend = ax.legend(*scatter.legend_elements())\n ax.add_artist(legend)\n\n return u, mapper", "def show_result(inputs, labels, outputs):\n num_classes = outputs.size(1)\n outputs = outputs.argmax(dim=1).detach().cpu().numpy()\n if num_classes == 2:\n outputs *= 255\n mask = outputs[0].reshape((360, 640))\n fig, ax = plt.subplots(1, 2, figsize=(20, 1 * 5))\n ax[0].imshow(inputs[0, :3, :, ].detach().cpu().numpy().transpose((1, 2, 0)))\n ax[0].set_title('Image')\n ax[1].imshow(labels[0].detach().cpu().numpy().reshape((360, 640)), cmap='gray')\n ax[1].set_title('gt')\n plt.show()\n plt.figure()\n plt.imshow(mask, cmap='gray')\n plt.title('Pred')\n plt.show()", "def vis_class(X, labels, title, file_path=None):\n unique_labels = set(labels)\n colors = [plt.cm.Spectral(each)\n for each in np.linspace(0, 1, len(unique_labels))]\n\n plt.figure(figsize=(15, 12))\n for k, col in zip(unique_labels, colors):\n if k == -1:\n # Black used for noise.\n col = [0, 0, 0, 1]\n\n class_member_mask = (labels == k)\n\n xy = X[class_member_mask]\n plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=tuple(col),\n markeredgecolor='k', markersize=14, label=k)\n plt.text(xy[0, 0], xy[0, 1], str(k), fontsize=18)\n\n # xy = X[class_member_mask & ~core_samples_mask]\n # plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=tuple(col),\n # markeredgecolor='k', markersize=6, label=k)\n plt.title(title)\n plt.legend()\n plt.tight_layout()\n if file_path:\n plt.savefig(file_path, dpi=300)", "def test_decode_labels_warning(self):\n with pytest.warns(\n YellowbrickWarning, match=\"both classes and encoder specified\"\n ):\n oz = ClassificationScoreVisualizer(\n GaussianNB(),\n classes=[\"a\", \"b\", \"c\"],\n encoder={0: \"foo\", 1: \"bar\", 2: \"zap\"},\n )\n encoded = oz._decode_labels([0, 1, 2])\n npt.assert_array_equal(encoded, [\"foo\", \"bar\", \"zap\"])", "def test_decode_labels_from_strings(self):\n classes = np.array([\"a\", \"b\", \"c\", \"d\", \"e\"])\n decoded = classes[np.random.randint(0, 5, 100)]\n y = np.array([v.upper() for v in decoded])\n\n oz = ClassificationScoreVisualizer(GaussianNB, classes=classes)\n npt.assert_array_equal(oz._decode_labels(y), decoded)\n\n encoder = {c.upper(): c for c in classes}\n oz = ClassificationScoreVisualizer(GaussianNB, encoder=encoder)\n npt.assert_array_equal(oz._decode_labels(y), decoded)\n\n class L2UTransformer(object):\n def transform(self, y):\n return np.array([yi.upper() for yi in y])\n\n def inverse_transform(self, y):\n return np.array([yi.lower() for yi in y])\n\n oz = ClassificationScoreVisualizer(GaussianNB, encoder=L2UTransformer())\n npt.assert_array_equal(oz._decode_labels(y), decoded)", "def nominal_map(options):\n pass", "def show_umap_bokeh(data, metadata, color_field=None,\n n_neighbors=10, min_dist=0.001, metric='euclidean'):\n if color_field is None:\n dims = 3\n else:\n dims = 2\n um = umap.UMAP(n_neighbors=n_neighbors, n_components=dims,\n min_dist=min_dist, metric=metric)\n vis = um.fit_transform(data)\n\n if color_field is None:\n color = umap_color(vis[:, 2], None, 20)\n color_field = \"Third UMAP dimension\"\n else:\n color = umap_color(metadata, color_field, 20, dtype=int)\n\n scatter_data = pandas.DataFrame({\n 'umap_1': vis[:, 0],\n 'umap_2': vis[:, 1],\n 'color': color,\n 'htid': list(metadata.index),\n 'title': ['<br>'.join(textwrap.wrap(t))\n for t in metadata['title']],\n 'author': list(metadata['author']),\n 'pub_date': list(metadata['pub_date'])\n })\n\n plot_figure = figure(\n title=('UMAP Projection of Phasor vectors for ~1000 random '\n 'HathiTrust volumes (colored by {})'.format(color_field)),\n plot_width=800,\n plot_height=800,\n tools=('pan, wheel_zoom, tap, reset')\n )\n\n plot_figure.add_tools(HoverTool(\n tooltips=(\n \"<div><span style='font-size: 10px'>@htid{safe}</span></div>\"\n \"<div><span style='font-size: 10px'>@author{safe}</span></div>\"\n \"<div><span style='font-size: 10px'>@title{safe}</span></div>\"\n \"<div><span style='font-size: 10px'>@pub_date{safe}</span></div>\"\n )\n ))\n\n plot_figure.circle(\n 'umap_1',\n 'umap_2',\n color='color',\n source=scatter_data,\n )\n\n tap = plot_figure.select(type=TapTool)\n tap.callback = OpenURL(\n url='https://babel.hathitrust.org/cgi/pt?id=@htid{safe}'\n )\n show(plot_figure)", "def change_class_labels(classes):\n u,indices=np.unique(classes,return_inverse=True)\n return u,indices", "def test_labels(ruler: SpaczzRuler) -> None:\n assert all(\n [label in ruler.labels for label in [\"GPE\", \"STREET\", \"DRUG\", \"NAME\", \"BAND\"]]\n )\n assert len(ruler.labels) == 5", "def test_visualizer_with_pandas(self):\n X, y = make_classification(\n n_samples=200,\n n_features=100,\n n_informative=20,\n n_redundant=10,\n n_classes=3,\n random_state=3020,\n )\n\n X = pandas.DataFrame(X)\n y = pandas.Series(y)\n\n umap = UMAPVisualizer(random_state=64)\n umap.fit(X, y)\n\n self.assert_images_similar(umap, tol=40)", "def visclassifier(fun,xTr,yTr):\n\n yTr = np.array(yTr).flatten()\n \n symbols = [\"ko\",\"kx\"]\n marker_symbols = ['o', 'x']\n mycolors = [[0.5, 0.5, 1], [1, 0.5, 0.5]]\n classvals = np.unique(yTr)\n\n plt.figure()\n\n res=300\n xrange = np.linspace(min(xTr[:, 0]), max(xTr[:, 0]),res)\n yrange = np.linspace(min(xTr[:, 1]), max(xTr[:, 1]),res)\n pixelX = repmat(xrange, res, 1)\n pixelY = repmat(yrange, res, 1).T\n\n xTe = np.array([pixelX.flatten(), pixelY.flatten()]).T\n\n testpreds = fun(xTe)\n Z = testpreds.reshape(res, res)\n # Z[0,0] = 1 # optional: scale the colors correctly\n plt.contourf(pixelX, pixelY, np.sign(Z), colors=mycolors)\n\n for idx, c in enumerate(classvals):\n plt.scatter(xTr[yTr == c,0],\n xTr[yTr == c,1],\n marker=marker_symbols[idx],\n color='k'\n )\n\n plt.axis('tight')\n plt.show()", "def test(self, test_data_path):\n test_data = read_datafile(test_data_path)\n test_data = self.preprocessor.preprocess(test_data)\n\n data = '__label__' + test_data['claim'].astype(str) + test_data['check_worthiness'].astype(str) + ' ' + \\\n test_data['tweet_text']\n\n output = self.run(data)\n\n df = pd.DataFrame()\n df[\"predicted\"] = output.split()\n df[\"labeled\"] = [d.split()[0] for d in data]\n\n cm = confusion_matrix(df[\"labeled\"], df[\"predicted\"], labels=['__label__11','__label__10','__label__00'])\n\n ax= plt.subplot()\n sns.heatmap(cm, annot=True, ax = ax); #annot=True to annotate cells\n\n ax.set_xlabel('Predicted labels');ax.set_ylabel('True labels'); \n ax.set_title('Confusion Matrix'); \n ax.xaxis.set_ticklabels(['__label__11','__label__10','__label__00']); ax.yaxis.set_ticklabels(['__label__11','__label__10','__label__00']);\n\n plt.show()\n\n return np.sum(cm.diagonal()) / np.sum(cm)", "def uk_map(fig1, indata, clevs, datlons, datlats, mtitle, munits, maskswitch):\n\t\n\tfrom mpl_toolkits import basemap as bm\n\timport matplotlib.cm as cm\n\tfrom mpl_toolkits.basemap import shiftgrid \n\tfrom netCDF4 import Dataset\n\tfrom matplotlib.colors import LightSource\n\timport matplotlib.pyplot as plt\n\timport numpy as np\n\timport hillshade\n\timport set_shade\n\timport colour_map\n\t\n\tif maskswitch==1:\n\t\t# import missing data map for masking out of oceans \n\t\tmissdata = Dataset('/exports/work/geos_cxc/users/ahardin4/output/amibatch/afixa/miss.nc', 'r', format='NETCDF3_CLASSIC')\n\t\t\n\t# create the figure and axes instances.\n\tax = fig1.add_axes([0.1,0.1,0.8,0.8])\n\tm = bm.Basemap(llcrnrlon=-9.5,llcrnrlat=49.5,urcrnrlon=2.5,urcrnrlat=59,rsphere=(6378137.00,6356752.3142),\\\n \tresolution='f',area_thresh=1000.,projection='laea', lat_0=54.5,lon_0=-2.75,ax=ax)\n\tm.drawcoastlines()\n\t\n\t# read in etopo5 topography/bathymetry.\n\turl = 'http://ferret.pmel.noaa.gov/thredds/dodsC/data/PMEL/etopo5.nc'\n\tetopodata = Dataset(url)\n\ttopoin = etopodata.variables['ROSE'][:]\n\tlons = etopodata.variables['ETOPO05_X'][:]\n\tlats = etopodata.variables['ETOPO05_Y'][:]\n\t\n\t# shift data so lons go from -180 to 180 instead of 00 to 360.\n\ttopoin,lons = shiftgrid(180.,topoin,lons,start=False)\n\n\t# transform coordinates\n\tx,y=m(datlons[:,:],datlats[:,:])\n\t# transform to nx x ny regularly spaced 5km native projection grid\n\tnx = int((m.xmax-m.xmin)/5000.)+1; ny = int((m.ymax-m.ymin)/5000.)+1\n\ttopodat = m.transform_scalar(topoin,lons,lats,nx,ny)\n\t\n\t# create light source object for topography\n\tls = LightSource(azdeg = 0, altdeg = 2)\n\t# use set_shade function (also available)\n\trgb = set_shade(topodat)\n\n\t# plot image over map with imshow.\n\tim = m.imshow(rgb)\n\t\n\t# apply function to colormap pointers, can be any function at all, as long as\n\t# 0 remains 0, 1 remains 1, and values increase from one to the other.\n\t\n\t# x^4 is good for pseudo-log plots of rainfall:\n\t#log_jet=cmap_xmap(lambda x: (x*x*x*x), cm.hsv)\n\t\n\t#set to lambda x: x for no change:\n\tlog_jet=cmap_xmap(lambda x: (x), cm.jet)\n\t\n\t#apply function to colormap if desired to make whole scale 'hotter' or 'colder'\n\t#example makes colourmap significantly hotter by confining values to upper quarter:\t\n\t#log_jet=cmap_map(lambda x: x/4+0.75, cm.gist_rainbow)\n\t\n\t# mask out oceans, but not lakes. Useful when plotting or comparing against observed\n\tif maskswitch==1:\n\t\tmissmap=missdata.variables['land_map']\n\t\tmissmap2=missdata.variables['land_map']\n\t\t# cut from big mask to small mask if necessary\n\t\t#smallmap=missmap[0,6:46,0:34]\n\t\tsmallmap=missmap[0,:,:]\n\t\tsmallmap2=missmap2[0,:,:]\n\t\t# expand out by one to take into account interpolation\n\t\t\n\t\tfor i in range(1,39):\n\t\t\tfor j in range(1,33):\n\t\t\t\tif smallmap[i,j] == 0.0:\n\t\t\t\t\tsmallmap2[i-1,j]=0.0 \n\t\t\t\t\tsmallmap2[i,j-1]=0.0\n\t\t\t\t\tsmallmap2[i+1,j]=0.0 \n\t\t\t\t\tsmallmap2[i,j+1]=0.0\n\t\t\n\t\t# perform masking\n\t\tindata=np.ma.masked_array(indata,mask=(smallmap2<-0.5))\n\t\tprint smallmap2[0,0], smallmap2[36,0], smallmap2[20,20]\n\t\t#indata[indata<=0.1]=np.nan\n\t# produce semi-transparent contour map\n\tcontourmap=m.contourf(x,y,indata,clevs,cmap=cm.get_cmap(log_jet,len(clevs)-1),extend='both',\n\t\talpha=0.5,origin='lower',rasterized=True)\n\t\t\n\t# produce simple block plot\n\t#contourmap=m.pcolor(x,y,indata,shading='interp',cmap=cm.get_cmap(log_jet,len(clevs)-1),\n\t#\talpha=0.5)\n\t\t\n\t# place colour bar on right\n\tcb = m.colorbar(contourmap,\"right\", size=\"5%\", pad='3%')\n\t# configure colour bar labeling\n\tcl = plt.getp(cb.ax, 'ymajorticklabels')\n\tcontourmap=plt.setp(cl, fontsize=14)\n\n\t# draw parallels and meridians so as not to clash with colour bar placement\n\t# labels = [left,right,top,bottom]\n\tm.drawparallels(np.arange(-70.,80,1.), labels=[1,0,0,1], fontsize=13)\n\tm.drawmeridians(np.arange(351.,362.,2.),labels=[1,0,0,1], fontsize=13)\n\t\n\t# configure title and units\n\tcb.ax.set_xlabel(munits, fontsize=12)\n\tcontourmap=plt.title(mtitle, fontsize=14)", "def labels_to_cityscapes_palette(image):\n classes=ZHANG_classes \n result =np.zeros((img.shape[0], img.shape[1], 3),dtype=np.uint8)\n for key, value in classes.items():\n result[np.where(img == key)] = value\n return result", "def display_missclassified(class_to_idx: Dict[str,int], \n targets: List[int], \n predictions: List[int], \n images: List[np.ndarray], \n gridsize: Tuple[int] = (4,4)):\n fig = plt.figure()\n plot_counter = 1\n mean = np.array([0.485, 0.456, 0.406])\n std = np.array([0.229, 0.224, 0.225])\n idx_to_class = {i:label for i, label in enumerate(class_to_idx)}\n for i in range(len(targets)):\n if plot_counter > gridsize[0]*gridsize[1]:\n break\n \n image = images[i].transpose(1, 2, 0)\n image = ((image * std) + mean) * 255\n image = image.astype(\"uint8\")\n \n image = cv2.resize(image, (128, 128))\n image = cv2.putText(image, idx_to_class[predictions[i]], (0,20), 3, 0.4, (0,0,255), 1)\n if predictions[i] == targets[i]:\n pass\n else:\n ax = fig.add_subplot(gridsize[0], gridsize[1], plot_counter)\n ax.imshow(image)\n plot_counter += 1\n plt.show()" ]
[ "0.68238217", "0.67836696", "0.63707215", "0.6301605", "0.6257042", "0.613644", "0.60741764", "0.59136367", "0.5912499", "0.5887305", "0.5873073", "0.5842747", "0.5841711", "0.58411485", "0.5811801", "0.5750405", "0.5730497", "0.57265025", "0.5669417", "0.5630759", "0.562077", "0.560996", "0.55990505", "0.5511536", "0.55047154", "0.54902273", "0.54802096", "0.54748607", "0.547357", "0.546475" ]
0.75628173
0
Test UMAP when no target or classes are specified
def test_no_target_umap(self): ## produce random data X, y = make_classification( n_samples=200, n_features=100, n_informative=20, n_redundant=10, n_classes=3, random_state=6897, ) ## visualize data with UMAP umap = UMAPVisualizer(random_state=64) umap.fit(X) self.assert_images_similar(umap, tol=40)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_umap_mismtached_labels(self):\n ## produce random data\n X, y = make_classification(\n n_samples=200,\n n_features=100,\n n_informative=20,\n n_redundant=10,\n n_classes=3,\n random_state=42,\n )\n\n ## fewer labels than classes\n umap = UMAPVisualizer(random_state=87, labels=[\"a\", \"b\"])\n with pytest.raises(YellowbrickValueError):\n umap.fit(X, y)\n\n ## more labels than classes\n umap = UMAPVisualizer(random_state=87, labels=[\"a\", \"b\", \"c\", \"d\"])\n with pytest.raises(YellowbrickValueError):\n umap.fit(X, y)", "def test_make_classification_umap_class_labels(self):\n\n ## produce random data\n X, y = make_classification(\n n_samples=200,\n n_features=100,\n n_informative=20,\n n_redundant=10,\n n_classes=3,\n random_state=42,\n )\n\n ## visualize data with UMAP\n umap = UMAPVisualizer(random_state=87, labels=[\"a\", \"b\", \"c\"])\n umap.fit(X, y)\n\n self.assert_images_similar(umap, tol=40)", "def test_make_classification_umap(self):\n\n ## produce random data\n X, y = make_classification(\n n_samples=200,\n n_features=100,\n n_informative=20,\n n_redundant=10,\n n_classes=3,\n random_state=42,\n )\n\n ## visualize data with UMAP\n umap = UMAPVisualizer(random_state=87)\n umap.fit(X, y)\n\n self.assert_images_similar(umap, tol=40)", "def test_umap_unavailable():\n from yellowbrick.text.umap_vis import UMAP\n\n assert UMAP is None\n\n with pytest.raises(\n YellowbrickValueError, match=\"umap package doesn't seem to be installed\"\n ):\n UMAPVisualizer()", "def test_sklearn_umap_title(self):\n # In TSNEVisualizer, the internal sklearn UMAP transform consumes\n # some but not all kwargs passed in by user. Those not in get_params(),\n # like title, are passed through to YB's finalize method. This test should\n # notify us if UMAP's params change on the sklearn side.\n with pytest.raises(TypeError):\n UMAP(title=\"custom_title\")", "def test_quick_method(self):\n corpus = load_hobbies()\n tfidf = TfidfVectorizer()\n\n X = tfidf.fit_transform(corpus.data)\n y = corpus.target\n\n viz = umap(X, y, show=False)\n assert isinstance(viz, UMAPVisualizer)\n\n self.assert_images_similar(viz, tol=50)", "def test_integrated_umap(self):\n tfidf = TfidfVectorizer()\n\n docs = tfidf.fit_transform(corpus.data)\n labels = corpus.target\n\n umap = UMAPVisualizer(random_state=8392, colormap=\"Set1\", alpha=1.0)\n umap.fit_transform(docs, labels)\n\n tol = 55\n self.assert_images_similar(umap, tol=tol)", "def test_sklearn_umap_size(self):\n # In UMAPVisualizer, the internal sklearn UMAP transform consumes\n # some but not all kwargs passed in by user. Those not in get_params(),\n # like size, are passed through to YB's finalize method. This test should\n # notify us if UMAP's params change on the sklearn side.\n with pytest.raises(TypeError):\n UMAP(size=(100, 100))", "def _determine_targets(self, program):\n super()._determine_targets(program)\n # Do not consider libbb files as targets\n for file in (program._files[f] for f in self._match_files if self._match_files[f] > 0):\n file.target = False", "def create_umap(name):\n\tglobal dir\n\tdirec = dir + \"/\" + name + \"/\"\n\tos.chdir(direc + \"representations/\")\n\t\n\t# Palette size of 2x50 required. 1-49 for labeled nat data, 51-100 for labeled syn data, 50 for unlabeled nat data\n\tpalette = sns.color_palette(\"Blues_d\", 30)# Syn data in blue\n\tpalette.extend(sns.dark_palette(\"purple\", 20)) # Unimportant, just a filler\n\tpalette.extend(sns.color_palette(\"Reds_d\", 30))# Nat data in red\n\tpalette.extend(sns.dark_palette(\"purple\", 20))# Unimportant, just a filler\n\tpalette[49]=\"#50B689\"# Unlabeled nat data in green\n\t# print(\"size of palette \" + str(len(palette)))\n\t\n\tfor file in glob.glob(\"*.pt\"):\n\t\t\trepresentation = torch.load(file)\n\t\t\ttarfile = file[:-3] # Removes the .pt ending\n\t\t\ttarfile = \"tar\" + tarfile[4:] + \".log\"\n\t\t\tall_targets = []\n\t\t\twith open(tarfile, \"r\") as f:\n\t\t\t\tfor tar in f:\n\t\t\t\t\tall_targets.append(float(tar.strip()))\n\n\t\t\tsns.set(style='white', context='notebook', rc={'figure.figsize': (14, 10)})\n\t\t\treducer = umap.UMAP()\n\t\t\tembedding = reducer.fit_transform(representation.cpu())\n\t\t\t\n\t\t\tprint(\"scattering\")\n\t\t\t# print(all_targets)\n\t\t\tplt.scatter(embedding[:, 0], embedding[:, 1], c=[palette[int(y-1)] for y in all_targets], alpha=0.8)\n\t\t\tplt.gca().set_aspect('equal', 'datalim')\n\t\t\tplt.title('UMAP projection of cell data', fontsize=24);\n\t\t\tplt.savefig(\"./umap_\" + str(file[4:-3]) + \".png\")\n\t\t\tplt.clf()\n\tos.chdir(\"../../../../\")", "def check_default_uv_set___fix():\n all_meshes = pm.ls(type=\"mesh\")\n for mesh in all_meshes:\n node = mesh.getParent()\n pm.select(node, r=1)\n uvsets = pm.polyUVSet(node, q=1, auv=1)\n if len(uvsets) == 1 and uvsets[0] != \"map1\":\n pm.polyUVSet(rename=1, newUVSet=\"map1\", uvSet=uvsets[0])\n pm.select(cl=1)", "def test_implemented_targets_derived_from_target(self):\n for key in forcebalance.objective.Implemented_Targets.keys():\n self.logger.debug(\"Assert %s is subclass of target\\n\" % str(forcebalance.objective.Implemented_Targets[key]))\n self.assertTrue(issubclass(forcebalance.objective.Implemented_Targets[key],forcebalance.target.Target))", "def _empty_mapping(self):\r\n return self.type2test()", "def test_custom_colors_umap(self):\n ## produce random data\n X, y = make_classification(\n n_samples=200,\n n_features=100,\n n_informative=20,\n n_redundant=10,\n n_classes=5,\n random_state=42,\n )\n\n ## specify a list of custom colors >= n_classes\n purple_blues = [\"indigo\", \"orchid\", \"plum\", \"navy\", \"purple\", \"blue\"]\n\n ## instantiate the visualizer and check that self.colors is correct\n purple_umap = UMAPVisualizer(colors=purple_blues, random_state=87)\n assert purple_umap.colors == purple_blues\n\n ## fit the visualizer and check that self.color_values is as long as\n ## n_classes and is the first n_classes items in self.colors\n purple_umap.fit(X, y)\n assert len(purple_umap.color_values_) == len(purple_umap.classes_)\n assert purple_umap.color_values_ == purple_blues[: len(purple_umap.classes_)]\n\n ## specify a list of custom colors < n_classes\n greens = [\"green\", \"lime\", \"teal\"]\n\n ## instantiate the visualizer and check that self.colors is correct\n green_umap = UMAPVisualizer(colors=greens, random_state=87)\n assert green_umap.colors == greens\n\n ## fit the visualizer and check that self.color_values is as long as\n ## n_classes and the user-supplied color list gets recycled as expected\n green_umap.fit(X, y)\n assert len(green_umap.color_values_) == len(green_umap.classes_)\n assert green_umap.color_values_ == [\"green\", \"lime\", \"teal\", \"green\", \"lime\"]", "def test_map_args_all_none():\n pass", "def test_make_pipeline(self):\n\n umap = UMAPVisualizer() # Should not cause an exception.\n assert umap.transformer_ is not None\n\n assert len(umap.transformer_.steps) == 1", "def targets_placeholder(self):", "def test_implemented_targets_derived_from_target(self):\n for key in forcebalance.objective.Implemented_Targets.keys():\n self.logger.debug(\"Assert %s is subclass of target\\n\" % str(forcebalance.objective.Implemented_Targets[key]))\n assert issubclass(forcebalance.objective.Implemented_Targets[key],forcebalance.target.Target)", "def autofixTargets(self, local_ctx):\n pass", "def compatible(self, other):\n return (hasattr(other, 'tmap') and hasattr(other, 'qmap')\n and hasattr(other, 'umap')\n and super(tqumap, self).compatible(other))", "def test_usearch_handles_unions(self):\r\n\r\n # adapted from test_app.test_cd_hit.test_cdhit_clusters_from_seqs\r\n # Should detect and remove chimeric sequence based\r\n # during ref based detection\r\n\r\n exp_otu_ids = ['0', '1', '2']\r\n\r\n # will retain 'chimera' with union option.\r\n exp_clusters = [['Solemya', 'Solemya_seq2'], ['chimera'],\r\n ['usearch_ecoli_seq', 'usearch_ecoli_seq2']\r\n ]\r\n\r\n app = UsearchOtuPicker(params={'save_intermediate_files': False,\r\n 'db_filepath': self.tmp_ref_database,\r\n 'output_dir': self.temp_dir,\r\n 'remove_usearch_logs': True,\r\n 'reference_chimera_detection': True,\r\n 'de_novo_chimera_detection': True,\r\n 'cluster_size_filtering': False,\r\n 'minlen': 12,\r\n 'w': 12,\r\n 'minsize': 1,\r\n 'percent_id': 0.97,\r\n 'percent_id_err': 0.97,\r\n 'abundance_skew': 2,\r\n 'chimeras_retention': 'union'\r\n })\r\n\r\n obs = app(self.tmp_seq_filepath2)\r\n\r\n obs_otu_ids = sorted(obs.keys())\r\n obs_clusters = sorted(obs.values())\r\n # The relation between otu ids and clusters is abitrary, and\r\n # is not stable due to use of dicts when parsing clusters -- therefore\r\n # just checks that we have the expected group of each\r\n self.assertEqual(obs_otu_ids, exp_otu_ids)\r\n self.assertEqual(obs_clusters, exp_clusters)", "def test_usearch_handles_unions(self):\r\n\r\n # adapted from test_app.test_cd_hit.test_cdhit_clusters_from_seqs\r\n # Should detect and remove chimeric sequence based\r\n # during ref based detection\r\n\r\n exp_otu_ids = ['0', '1', '2']\r\n\r\n # will retain 'chimera' with union option.\r\n exp_clusters = [['Solemya', 'Solemya_seq2'], ['chimera'],\r\n ['usearch_ecoli_seq', 'usearch_ecoli_seq2']\r\n ]\r\n\r\n app = UsearchReferenceOtuPicker(\r\n params={'save_intermediate_files': False,\r\n 'db_filepath':\r\n self.tmp_ref_database,\r\n 'output_dir': self.temp_dir,\r\n 'remove_usearch_logs': True,\r\n 'reference_chimera_detection':\r\n True,\r\n 'de_novo_chimera_detection':\r\n True,\r\n 'cluster_size_filtering':\r\n False,\r\n 'minlen': 12,\r\n 'w': 12,\r\n 'minsize': 1,\r\n 'percent_id': 0.97,\r\n 'percent_id_err': 0.97,\r\n 'abundance_skew': 2,\r\n 'chimeras_retention': 'union'\r\n })\r\n\r\n obs = app(self.tmp_seq_filepath2, self.tmp_otu_ref_database)\r\n\r\n obs_otu_ids = sorted(obs.keys())\r\n obs_clusters = sorted(obs.values())\r\n # The relation between otu ids and clusters is abitrary, and\r\n # is not stable due to use of dicts when parsing clusters -- therefore\r\n # just checks that we have the expected group of each\r\n self.assertEqual(obs_otu_ids, exp_otu_ids)\r\n self.assertEqual(obs_clusters, exp_clusters)", "def is_target(top_container):\n\tif '.' not in top_container.get('barcode', ''):\n\t\treturn True\n\telse:\n\t\treturn False", "def get_only_target():\r\n\ttype1_img, type1_label, type2_img, type2_label = load_data_all()\r\n\ttype1_imgs, type1_labels = type1_makeup(type1_img, type1_label, v1 = 100, v2 = 160, masking = True)\r\n\ttype2_imgs, type2_labels = type2_makeup(type2_img, type2_label, v1 = 100, v2 = 55, masking = True)\r\n\r\n\tnew_type1_imgs, new_type1_labels = find_contain_target(type1_imgs, type1_labels)\r\n\tnew_type2_imgs, new_type2_labels = find_contain_target(type2_imgs, type2_labels)\r\n\r\n\treturn {'type1_img' : new_type1_imgs, 'type1_label' : new_type1_labels,\r\n\t\t'type2_img':new_type2_imgs, 'type2_label':new_type2_labels}", "def test_uvmappedtexture(pngfile):\n tex = omf.UVMappedTexture()\n tex.image = pngfile\n with pytest.raises(properties.ValidationError):\n tex.uv_coordinates = [0.0, 1.0, 0.5]\n tex.uv_coordinates = [[0.0, -0.5], [0.5, 1]]\n assert tex.validate()\n tex.uv_coordinates = [[0.0, 0.5], [0.5, np.nan]]\n assert tex.validate()\n\n points = omf.PointSet()\n points.vertices = [[0.0, 0, 0], [1, 1, 1], [2, 2, 2]]\n points.textures = [tex]\n with pytest.raises(properties.ValidationError):\n points.validate()\n points.vertices = [[0.0, 0, 0], [1, 1, 1]]\n assert points.validate()", "def copy_map1_name(source, target):\n\n if not is_matching_type(source, target):\n return\n\n source_uv_name = cmds.getAttr(\"{}.uvSet[0].uvSetName\".format(source))\n\n try:\n cmds.setAttr(\"{}.uvSet[0].uvSetName\".format(target), source_uv_name,\n type=\"string\")\n except RuntimeError:\n logger.debug(\"{} doesn't not have uvs, skipping udpate map1 name\"\n .format(target))\n return", "def test_mapping_switch():\n\tassert nset != oset", "def map_to_ground_truth(overlaps, gt_bbox, gt_class, params):\n #taken from fastai\n\n # for each object, what is the prior of maximum overlap\n gt_to_prior_overlap, gt_to_prior_idx = overlaps.max(1)\n\n # for each prior, what is the object of maximum overlap\n prior_to_gt_overlap, prior_to_gt_idx = overlaps.max(0)\n\n # for priors of max overlap, set a high value to make sure they match\n prior_to_gt_overlap[gt_to_prior_idx] = 1.99\n\n # for each prior, get the actual id of the class it should predict, unmatched anchors (low IOU) should predict background\n matched_gt_class_ids = gt_class[prior_to_gt_idx]\n pos = prior_to_gt_overlap > params.mapping_threshold \n matched_gt_class_ids[~pos] = 100 # background code\n\n # for each matched prior, get the bbox it should predict\n raw_matched_bbox = gt_bbox[prior_to_gt_idx]\n pos_idx = torch.nonzero(pos)[:, 0]\n # which of those max values are actually precise enough?\n gt_bbox_for_matched_anchors = raw_matched_bbox[pos_idx]\n\n # so now we have the GT represented with priors\n return gt_bbox_for_matched_anchors, matched_gt_class_ids, pos_idx", "def test_default_app_map_search_0(self):\n pass", "def test_custom_title_umap(self):\n umap = UMAPVisualizer(title=\"custom_title\")\n\n assert umap.title == \"custom_title\"" ]
[ "0.63227683", "0.62847257", "0.6214535", "0.60885245", "0.58001065", "0.57032645", "0.55417573", "0.5501365", "0.5477659", "0.54495037", "0.5408545", "0.53191584", "0.5308243", "0.52774423", "0.5270059", "0.5262288", "0.52073884", "0.51964533", "0.5167412", "0.5149407", "0.513493", "0.5125447", "0.5125061", "0.5102489", "0.51006883", "0.50945777", "0.5077287", "0.5060843", "0.50516176", "0.5051347" ]
0.713948
0
Test that the user can supply an alpha param on instantiation
def test_alpha_param(self): ## produce random data X, y = make_classification( n_samples=200, n_features=100, n_informative=20, n_redundant=10, n_classes=3, random_state=42, ) ## Instantiate a UMAPVisualizer, provide custom alpha umap = UMAPVisualizer(random_state=64, alpha=0.5) # Test param gets set correctly assert umap.alpha == 0.5 # Mock ax and fit the visualizer umap.ax = mock.MagicMock(autospec=True) umap.fit(X, y) # Test that alpha was passed to internal matplotlib scatterplot _, scatter_kwargs = umap.ax.scatter.call_args assert "alpha" in scatter_kwargs assert scatter_kwargs["alpha"] == 0.5
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_valid_alpha(alpha: Any) -> None:\n check_alpha(alpha=alpha)", "def test_ALPHA(self):\n self.assertIsInstance(constants.ALPHA, int,\n \"constants.ALPHA must be an integer.\")", "def test_invalid_alpha(alpha: Any) -> None:\n with pytest.raises(ValueError, match=r\".*Invalid alpha.*\"):\n check_alpha(alpha=alpha)", "def __init__(self, alpha = 1): \n self.alpha = alpha", "def __init__(self, alpha: int=0.05):\r\n super().__init__()\r\n self.alpha = alpha", "def __init__(self, alpha = 1.0): \n self.alpha = alpha", "def test_check_digits_with_wrong_alphabet(self, _, alpha):\n with self.assertRaises(exceptions.WrongArgumentValueError):\n positional.encode(42, 10, alphabet=alpha)", "def test_Alpha_getter(self):\r\n self.assertEqual(self.mc.Alpha, 0.05)", "def test1ParameterAccess( self ):\n\n from AthExHelloWorld.AthExHelloWorldConf import HelloAlg\n\n HelloWorld = HelloAlg( 'test1ParameterAccess' )\n\n # a not-yet-set variable should raise\n self.assertRaises( AttributeError, getattr, HelloWorld, 'MyInt' )\n\n # a non-existing variable should raise\n self.assertRaises( AttributeError, setattr, HelloWorld, 'MyMy', 1 )", "def test_validParameters(self):\n # stupid/simple inputs\n h = Helix(\"thing\", \"Cu\", 0, 0, 1, 1, 1)\n self.assertEqual(h.getDimension(\"axialPitch\"), 1)\n\n # standard case / inputs ordered well\n h = Helix(\n \"what\",\n \"Cu\",\n Tinput=25.0,\n Thot=425.0,\n id=0.1,\n od=0.35,\n mult=1.0,\n axialPitch=1.123,\n helixDiameter=1.5,\n )\n self.assertTrue(1.123 < h.getDimension(\"axialPitch\") < 1.15)\n\n # inputs ordered crazy\n h = Helix(\n material=\"Cu\",\n id=0.1,\n mult=1.0,\n Tinput=25.0,\n Thot=425.0,\n axialPitch=1.123,\n name=\"stuff\",\n od=0.35,\n helixDiameter=1.5,\n )\n self.assertTrue(1.123 < h.getDimension(\"axialPitch\") < 1.15)\n\n # missing helixDiameter input\n with self.assertRaises(TypeError):\n h = Helix(\n name=\"helix\",\n material=\"Cu\",\n Tinput=25.0,\n Thot=425.0,\n id=0.1,\n od=0.35,\n mult=1.0,\n axialPitch=1.123,\n )", "def __init__(self, alpha=1.0, epsilon=0.05, gamma=0.8, numTraining = 10):\n self.alpha = float(alpha)\n self.epsilon = float(epsilon)\n self.discount = float(gamma)\n self.numTraining = int(numTraining)", "def __init__(self, alpha=1.0, epsilon=0.05, gamma=0.8, numTraining=10):\n self.alpha = float(alpha)\n self.epsilon = float(epsilon)\n self.discount = float(gamma)\n self.numTraining = int(numTraining)", "def test_checkParameters(self):\n self.failUnlessEqual(self.nice.opts['long'], \"Alpha\")\n self.failUnlessEqual(self.nice.opts['another'], \"Beta\")\n self.failUnlessEqual(self.nice.opts['longonly'], \"noshort\")\n self.failUnlessEqual(self.nice.opts['shortless'], \"Gamma\")", "def Alpha(self, alpha):\r\n if alpha >= 0 and alpha <= 1:\r\n self._alpha = alpha\r\n else:\r\n raise ValueError(\"Alpha must be between 0 and 1.\")", "def test_alpha_value_error(self, dmatrix_2x1_with_label, alpha):\n\n dummy_confo_model = DummyLeafNodeScaledConformalPredictor()\n\n with pytest.raises(\n ValueError, match=re.escape(\"alpha must be in range [0 ,1]\")\n ):\n\n dummy_confo_model.calibrate(\n data=dmatrix_2x1_with_label, alpha=alpha, response=np.array([0, 1])\n )", "def __init__(self, name=\"alpha\", attr=None):\n Arg.__init__(self, name, attr)", "def test_init(self):\r\n c = AlphaDiversityCalc(observed_otus)\r\n self.assertEqual(c.Metric, observed_otus)\r\n self.assertEqual(c.Params, {})", "def test_need_params(self):\n\n acme = ACMEAccount(client=self.client)\n # missing acme_id, name\n self.assertRaises(TypeError, acme.create)\n # missing name\n self.assertRaises(TypeError, acme.create, 1234)", "def __init__(self, alpha=0.05, lookahead=5):\n self.alpha = alpha\n self.lookahead = lookahead\n self.reset()", "def __init__(self, a=5, b=8, m=26):\n\n self.a = a\n self.b = b\n self.m = m\n self.alpha = list(string.ascii_lowercase)", "def test_Alpha_setter_invalid(self):\r\n self.assertRaises(ValueError, setattr, self.mc, 'Alpha', -5)\r\n self.assertRaises(ValueError, setattr, self.mc, 'Alpha', 2)", "def comp_alpha(self):\n pass", "def test_name_validation(self, attr):\n kwargs = {'kind': POSITIONAL_ONLY, attr: 3}\n with pytest.raises(TypeError) as excinfo:\n FParameter(**kwargs)\n assert excinfo.value.args[0] == \\\n '{} must be a str, not a {}'.format(attr, 3)", "def __init__(self, alpha_calcs, params=None):\r\n self.Params = params or {}\r\n self.Calcs = alpha_calcs", "def test_need_params(self):\n\n acme = ACMEAccount(client=self.client)\n # missing name, acme_server, org_id\n self.assertRaises(TypeError, acme.create)\n # missing acme_server, org_id\n self.assertRaises(TypeError, acme.create, \"name\")\n # missing org_id\n self.assertRaises(TypeError, acme.create, \"name\", \"acme_server\")", "def __init__(self, mixer: Callable, alpha: float = 1.0):\n self.alpha = alpha\n self.aug = mixer", "def test2ParameterValidation( self ):\n\n from AthExHelloWorld.AthExHelloWorldConf import HelloAlg\n\n HelloWorld = HelloAlg( 'test2ParameterValidation' )\n\n # not-allowed conversions\n self.assertRaises( ValueError, setattr, HelloWorld, 'MyInt', 1. )\n self.assertRaises( ValueError, setattr, HelloWorld, 'MyInt', '1' )\n self.assertRaises( TypeError, setattr, HelloWorld, 'MyInt', [1] )\n\n self.assertRaises( ValueError, setattr, HelloWorld, 'MyDouble', '1' )\n self.assertRaises( ValueError, setattr, HelloWorld, 'MyDouble', '1.' )\n self.assertRaises( TypeError, setattr, HelloWorld, 'MyDouble', [1.] )", "def __init__(self, initAlpha=0.01, factor=0.25, dropEvery=10):\n self.initAlpha = initAlpha\n self.factor = factor\n self.dropEvery = dropEvery\n pass", "def test_Alpha_setter(self):\r\n self.mc.Alpha = 0.01\r\n self.assertEqual(self.mc.Alpha, 0.01)", "def test_color__rgba_int_args_invalid_value_without_alpha(self):\n self.assertRaises(ValueError, pygame.Color, 256, 10, 105)\n self.assertRaises(ValueError, pygame.Color, 10, 256, 105)\n self.assertRaises(ValueError, pygame.Color, 10, 105, 256)" ]
[ "0.77863246", "0.6957073", "0.694851", "0.6797293", "0.6712802", "0.66309685", "0.65371954", "0.6380201", "0.62434375", "0.6134172", "0.6118963", "0.61111933", "0.60929245", "0.6078673", "0.60731035", "0.6065588", "0.60491043", "0.6048345", "0.6028508", "0.60014725", "0.5980855", "0.5934449", "0.59311455", "0.58856946", "0.5873914", "0.58697474", "0.58662856", "0.5846294", "0.583874", "0.5827925" ]
0.7568995
1
Test for umap quick method with hobbies dataset
def test_quick_method(self): corpus = load_hobbies() tfidf = TfidfVectorizer() X = tfidf.fit_transform(corpus.data) y = corpus.target viz = umap(X, y, show=False) assert isinstance(viz, UMAPVisualizer) self.assert_images_similar(viz, tol=50)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test02Something(self):\n\n summaryFile = re.sub(\"\\.csv\",\"\",self.parsedFile)+\"_summary.csv\"\n bmap = self.bm.load_summary(summaryFile,taxaList=[\"10090\"])\n self.assertEqual(bmap['GG11117|c2_g1_i1'][0],'INT1_MOUSE')\n self.assertEqual(bmap['GG11117|c2_g1_i1'][1],'68510')\n \n bmap = self.bm.load_summary(summaryFile,taxaList=[\"10090\"],trinityGene=True)\n self.assertEqual(bmap['GG11117|c2_g1'][0],'INT1_MOUSE')\n\n bmap = self.bm.load_summary(summaryFile,taxaList=[\"10090\"],trinityGene=True,best=False)\n self.assertEqual(bmap['GG11117|c2_g1'][0][0],'INT1_MOUSE')\n self.assertEqual(bmap['GG11117|c2_g1'][0][4],0.0)", "def test_get_boat(self):\n pass", "def test_get_learners(self):\n pass", "def test_machine_learning():", "def test_intent_classifier_get_testing_samples(self):\n pass", "def test_get_bans(self):\n pass", "def test_intent_classifier_vaporise(self):\n pass", "def test_something(self):\n\n allure.dynamic.title(\"Testing compute_ranks\")\n allure.dynamic.severity(allure.severity_level.NORMAL)\n allure.dynamic.description_html('<h3>Codewars badge:</h3>'\n '<img src=\"https://www.codewars.com/users/myFirstCode'\n '/badges/large\">'\n '<h3>Test Description:</h3>'\n \"<p>Test the function taht organizes a sports league in a \"\n \"round-robin-system. Each team meets all other teams. \"\n \"In your league a win gives a team 2 points, a draw gives \"\n \"both teams 1 point. After some games you have to compute \"\n \"the order of the teams in your league. You use the following \"\n \"criteria to arrange the teams:</p>\"\n \"<ul><li>- Points</li>\"\n \"<li>- Scoring differential (the difference between goals \"\n \"scored and those conceded)</li>\"\n \"<li>- Goals scored</li></ul>\")\n\n test_data = [\n (6,\n [[0, 5, 2, 2],\n [1, 4, 0, 2],\n [2, 3, 1, 2],\n [1, 5, 2, 2],\n [2, 0, 1, 1],\n [3, 4, 1, 1],\n [2, 5, 0, 2],\n [3, 1, 1, 1],\n [4, 0, 2, 0]],\n [4, 4, 6, 3, 1, 2]),\n (6,\n [[0, 5, 2, 0],\n [1, 4, 2, 2],\n [2, 3, 1, 3],\n [1, 5, 0, 0],\n [2, 0, 2, 1],\n [3, 4, 3, 1]],\n [2, 3, 4, 1, 5, 6]),\n (4,\n [[0, 3, 1, 1],\n [1, 2, 2, 2],\n [1, 3, 2, 0],\n [2, 0, 2, 0]],\n [3, 1, 1, 3]),\n (10,\n [],\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]),\n (8,\n [[0, 7, 2, 0]],\n [1, 2, 2, 2, 2, 2, 2, 8])\n ]\n\n for data in test_data:\n number = data[0]\n games = data[1]\n expected = data[2]\n actual_result = compute_ranks(number, games)\n print_log(number=number,\n games=games,\n expected=expected,\n actual_result=actual_result)\n\n with allure.step(\"Enter a test data and verify the result:\"):\n self.assertEqual(expected, actual_result)", "def test_university():\n test_path = tempfile.mkdtemp()\n x_train, metadata = university(test_path)\n try:\n assert x_train.shape == (62, 17)\n except:\n shutil.rmtree(test_path)\n raise()", "def test_api():\n # person id for one long time employee\n content = get_person(10050)\n assert content['preferredName'].endswith('immel')", "def testBeliefs1sk(self):", "def test_Bernoulli_NB_estimators():", "def test_create_boat(self):\n pass", "def test_intent_classifier_get_details(self):\n pass", "def test_intent_classifier_test(self):\n pass", "def test_get_foods(self):\n pass", "def test_intent_classifier_add_testing_samples(self):\n pass", "def test_smith():\n ranks = [[1,3,2],\n [2,1,3],\n [3,2,1]]\n \n ranks = np.array(ranks)\n out = smith_set(ranks=ranks)\n assert 0 in out\n assert 1 in out\n assert 2 in out\n assert len(out) == 3\n return", "def test_intent_classifier_get_details_all(self):\n pass", "def test_how_many_friends(self):\n expected = [\n (1, 3), (2, 3), (3, 3), (5, 3), (8, 3),\n (0, 2), (4, 2), (6, 2), (7, 2), (9, 1),\n ]\n self.assertEqual(expected, self.users.how_many_friends())", "def test_summarize_recipe(self):\n pass", "def test_is_ghibli_api_request_working(self):\n\n films = retrieveMoviesFromGhibliAPI()\n self.assertIs(type(films), list)\n\n for film in films:\n self.assertTrue('people' in film)", "def test_get_similar_recipes(self):\n pass", "def test_books_model_name(self):\n data = self.data1\n self.assertEqual(str(data),\"The Ultimate Guide to using YouTube Live to Engage Your Audience\")", "def test_21_29(self):\r\n husband = Person()\r\n husband.id = \"I01\"\r\n husband.name = \"Jotaro /Kujo/\"\r\n husband.gender = \"F\"\r\n husband.birthDate = \"29 SEP 1918\"\r\n husband.age = 101\r\n husband.alive = True\r\n husband.death = \"N/A\"\r\n husband.child = [\"I03\"]\r\n husband.spouse = [\"I02\"]\r\n\r\n wife = Person()\r\n wife.id = \"I02\"\r\n wife.name = \"Marry Sue\"\r\n wife.gender = \"M\"\r\n wife.birthDate = \"12 Jan 1988\"\r\n wife.age = 80\r\n wife.alive = False\r\n wife.death = \"23 JAN 2020\"\r\n wife.child = []\r\n wife.spouse = [\"I01\"]\r\n\r\n test_family = Family()\r\n test_family.id = \"F01\"\r\n test_family.married = \"29 SEP 1993\"\r\n test_family.divorce = \"25 JAN 2020\"\r\n test_family.husbandID = \"I01\"\r\n test_family.husbandName = \"Morgan Sue\"\r\n test_family.wifeID = \"I02\"\r\n test_family.wifeName = \"Marry Sue\"\r\n test_family.chidren = [\"I03\"]\r\n\r\n personList = [husband, wife]\r\n familiList = [test_family]\r\n\r\n self.assertEqual(us21(personList, familiList), \"Husband in family is not male or wife in family is not female\")\r\n self.assertEqual(us29(personList), ['I02'])", "def test_basketballteams_get(self):\n pass", "def test_make_classification_umap(self):\n\n ## produce random data\n X, y = make_classification(\n n_samples=200,\n n_features=100,\n n_informative=20,\n n_redundant=10,\n n_classes=3,\n random_state=42,\n )\n\n ## visualize data with UMAP\n umap = UMAPVisualizer(random_state=87)\n umap.fit(X, y)\n\n self.assert_images_similar(umap, tol=40)", "def test_check_yahtzee_bonus_true(self):\n\n yahtzee_bonus_fixtures = [[1, 1, 1, 1, 1],\n [2, 2, 2, 2, 2],\n [3, 3, 3, 3, 3],\n [4, 4, 4, 4, 4],\n [5, 5, 5, 5, 5],\n [6, 6, 6, 6, 6],\n ]\n\n for fixture in yahtzee_bonus_fixtures:\n score = self.roll.check_yahtzee_bonus(fixture)\n\n self.assertEqual(score, 50)\n self.assertEqual(len(fixture), 5)", "def testSanityChecksMetacommunityApplication(self):\n self.assertEqual(self.t1.get_species_richness(), self.t4.get_species_richness(4))\n self.assertEqual(74, self.t4.get_species_richness(4))\n self.assertEqual(1, self.t2.get_species_richness(2))\n self.assertEqual(self.t2.get_species_richness(2), self.t3.get_species_richness(3))", "def test_intent_classifier_retrieve(self):\n pass" ]
[ "0.590538", "0.58260053", "0.5768046", "0.57615876", "0.56201154", "0.5576002", "0.5468112", "0.5445976", "0.5409438", "0.5399367", "0.53626776", "0.5362031", "0.53528714", "0.5347987", "0.5343849", "0.531972", "0.5312878", "0.5309637", "0.53079206", "0.5297613", "0.5290937", "0.5282928", "0.52753836", "0.52727807", "0.5270128", "0.5268202", "0.5263162", "0.526115", "0.5254704", "0.52546996" ]
0.699782
0
A wrapper that does dot for a multidimensional image that is often used in the pipeline. The input image should be Ccontiguous.
def dot_image(image, B): imshape = image.shape if not image.flags['C_CONTIGUOUS']: raise TypeError, 'Error: cannot deal with non-C-contiguous image' output = gemm(1.0, image.reshape((np.prod(imshape[:-1]), imshape[-1])), B) return output.reshape(imshape[:-1] + (B.shape[1],))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dot(self):\n self.img[self.l_i / 2, self.l_i / 2] = 1.", "def dot(self, vec):\n pass", "def dot(a, b, out=None):\n\n if len(a.shape) != 2 or len(b.shape) != 2:\n raise (ValueError, \"only 2-D matrices supported\")\n\n if a.shape[1] != b.shape[0]:\n raise (ValueError,\n \"last dimension of `a` does not match first dimension of `b`\")\n\n l, m, n = a.shape[0], a.shape[1], b.shape[1]\n\n if out is not None:\n if out.shape != (l, n):\n raise (ValueError, \"`out` array does not have the correct shape\")\n else:\n f = tb.openFile('dot.h5', 'w')\n filters = tb.Filters(complevel=5, complib='blosc')\n out = f.createCArray(f.root, 'out', tb.Atom.from_dtype(a.dtype),\n shape=(l, n), filters=filters)\n\n # Compute a good block size\n buffersize = OOC_BUFFER_SIZE\n bl = math.sqrt(buffersize / out.dtype.itemsize)\n bl = 2**int(math.log(bl, 2))\n for i in range(0, l, bl):\n for j in range(0, n, bl):\n for k in range(0, m, bl):\n a0 = a[i:min(i+bl, l), k:min(k+bl, m)]\n b0 = b[k:min(k+bl, m), j:min(j+bl, n)]\n out[i:i+bl, j:j+bl] += np.dot(a0, b0)\n\n return out", "def dot(*Ms):\n N = Ms[0]\n for M in Ms[1:]:\n N = np.dot(N, M)\n return N", "def dot(a, b):\n return np.vdot(a.arr,b.arr)", "def dot_product(x, kernel):\n return K.squeeze(K.dot(x, K.expand_dims(kernel)), axis=-1)", "def get_cell_dot(self):\n\n cell_dot = np.zeros((3, 3))\n\n for m in range(3):\n for n in range(3):\n cell_dot[m, n] = np.dot(self.cell[m], self.cell[n])\n\n return cell_dot", "def dot(array1, array2):\n return Nd4jArray(array1.array.mmul(array2.array))", "def dot(self, x):\n pass", "def reshape_dot(A, X, Y, tag=None):\n badshape = False\n ashape = (1,) if A.shape == () else A.shape\n xshape = (1,) if X.shape == () else X.shape\n if A.shape == ():\n incshape = X.shape\n elif X.shape == ():\n incshape = A.shape\n elif X.ndim == 1:\n badshape = ashape[-1] != xshape[0]\n incshape = ashape[:-1]\n else:\n badshape = ashape[-1] != xshape[-2]\n incshape = ashape[:-1] + xshape[:-2] + xshape[-1:]\n\n if (badshape or incshape != Y.shape) and incshape != ():\n raise ValueError('shape mismatch in %s: %s x %s -> %s' % (\n tag, A.shape, X.shape, Y.shape))\n\n # If the result is scalar, we'll reshape it so Y[...] += inc works\n return incshape == ()", "def dot(self,n_,x,y): # 3\n if x is None: raise TypeError(\"Invalid type for argument x\")\n if x is None:\n x_ = None\n else:\n try:\n x_ = memoryview(x)\n except TypeError:\n try:\n _tmparr_x = array.array(\"d\",x)\n except TypeError:\n raise TypeError(\"Argument x has wrong type\")\n else:\n x_ = memoryview(_tmparr_x)\n \n else:\n if x_.format != \"d\":\n x_ = memoryview(array.array(\"d\",x))\n \n if x_ is not None and len(x_) != (n_):\n raise ValueError(\"Array argument x has wrong length\")\n if y is None: raise TypeError(\"Invalid type for argument y\")\n if y is None:\n y_ = None\n else:\n try:\n y_ = memoryview(y)\n except TypeError:\n try:\n _tmparr_y = array.array(\"d\",y)\n except TypeError:\n raise TypeError(\"Argument y has wrong type\")\n else:\n y_ = memoryview(_tmparr_y)\n \n else:\n if y_.format != \"d\":\n y_ = memoryview(array.array(\"d\",y))\n \n if y_ is not None and len(y_) != (n_):\n raise ValueError(\"Array argument y has wrong length\")\n res,resargs = self.__obj.dot(n_,x_,y_)\n if res != 0:\n raise Error(rescode(res),\"\")\n _xty_return_value = resargs\n return _xty_return_value", "def multi_dot(arrays, out=None):\n\n n = len(arrays)\n\n if n < 2:\n checker_throw_value_error(\"multi_dot\", \"arrays\", n, \">1\")\n\n result = arrays[0]\n for id in range(1, n):\n result = dpnp.dot(result, arrays[id])\n\n return result", "def dot(x,y):\n\treturn sum([xi*yi for (xi,yi) in zip(x,y)])", "def dot_product(x, kernel):\n if K.backend() == 'tensorflow':\n # K.expand_dims 默认axis=-1\n return K.squeeze(K.dot(x, K.expand_dims(kernel)), axis=-1)\n else:\n return K.dot(x, kernel)", "def _dot(a, b):\n return np.einsum('ijk,ikl->ijl', a, b)", "def dot_product(x, kernel):\n if K.backend() == 'tensorflow':\n return K.squeeze(K.dot(x, K.expand_dims(kernel)), axis=-1)\n else:\n return K.dot(x, kernel)", "def dot_product(x, kernel):\n if K.backend() == 'tensorflow':\n return K.squeeze(K.dot(x, K.expand_dims(kernel)), axis=-1)\n else:\n return K.dot(x, kernel)", "def dot_product(x, kernel):\n if K.backend() == 'tensorflow':\n return K.squeeze(K.dot(x, K.expand_dims(kernel)), axis=-1)\n else:\n return K.dot(x, kernel)", "def dot_product(x, kernel):\n if K.backend() == 'tensorflow':\n return K.squeeze(K.dot(x, K.expand_dims(kernel)), axis=-1)\n else:\n return K.dot(x, kernel)", "def dot_product(x, kernel):\r\n if K.backend() == 'tensorflow':\r\n return K.squeeze(K.dot(x, K.expand_dims(kernel)), axis=-1)\r\n else:\r\n return K.dot(x, kernel)", "def dot(pepx1, pepx2):\n\n Ls = pepx1.shape\n assert pepx2.shape==Ls, '[dot]: sizes of pepx1 and pepx2 are not equal'\n new_pepx = np.empty(Ls, dtype=np.object)\n new_lams = np.empty(pepx1.lambdas.shape, dtype=np.object)\n\n # if np.all([ pepx1[i].ndim==3 and pepx2[i].ndim==3 for i in np.ndenumerate(pepx1) ]):\n # return peps_dot(pepx1,pepx2)\n # else:\n for idx in np.ndindex(Ls):\n len_dp1 = len(pepx1.phys_bonds[idx])\n len_dp2 = len(pepx2.phys_bonds[idx])\n ax1 = [0,2,4,6] + range(8, 8+len_dp1)\n ax2 = [1,3,5,7] + range(8+len_dp1-1,8+len_dp1+len_dp2-1)\n ax2[-len_dp2] = ax1[-1] # contract vertical bonds (mpx1 down with mpx2 up)\n new_site = np.einsum(pepx1[idx],ax1,pepx2[idx],ax2)\n new_pepx[idx] = tf.reshape(new_site,'ii,ii,ii,ii,...',group_ellipsis=False)\n\n i,j = idx\n for xx in range(new_lams.shape[2]):\n new_lams[i,j,xx] = np.outer(pepx1.lambdas[i,j,xx], pepx2.lambdas[i,j,xx]).reshape(-1)\n # print new_lams[i,j,xx].shape\n\n return PEPX_GL(new_pepx,new_lams) #,pepx1.phys_bonds)", "def dot(a, b):\n raise NotImplementedError", "def convert_dot(g, op, block):\n\n # x, y should be 1D or 2D tensor\n # when it's 2D tensor, the first dimension means batch dimension\n x = g.get_node(op.input(\"X\")[0])\n y = g.get_node(op.input(\"Y\")[0])\n\n out = _op.sum(_op.multiply(x, y), axis=[-1], keepdims=True)\n g.add_node(op.output(\"Out\")[0], out)", "def dot( v1, v2 ):\n return sum( x*y for x,y in izip(v1,v2) )", "def dot_as_einsum(x: JaxExpression, y: JaxExpression, params: Params) -> Einsum:\n dimension_numbers = params['dimension_numbers']\n (x_contract, y_contract), (x_batch, y_batch) = dimension_numbers\n x_ndim, y_ndim = len(x.shape), len(y.shape)\n letter_iter = einsum.einsum_letters()\n x_dims = ''.join(it.islice(letter_iter, x_ndim))\n y_dims = list(it.islice(letter_iter, y_ndim))\n for x_dim, y_dim in zip(x_contract + x_batch, y_contract + y_batch):\n y_dims[y_dim] = x_dims[x_dim]\n y_dims = ''.join(y_dims)\n out_batch_dims = [x_dims[dim] for dim in x_batch]\n out_dims = out_batch_dims + ([xd for xd in x_dims if xd not in y_dims] +\n [yd for yd in y_dims if yd not in x_dims])\n out_dims = ''.join(out_dims)\n return Einsum(f'{x_dims},{y_dims}->{out_dims}', (x, y))", "def dot(self, vec):\n if not isinstance(vec, self.__class__):\n raise TypeError('Dot product operand must be a VectorArray')\n if self.nV != 1 and vec.nV != 1 and self.nV != vec.nV:\n raise ValueError('Dot product operands must have the same '\n 'number of elements.')\n return np.sum((getattr(self, d)*getattr(vec, d) for d in self.dims), 1)", "def dot(self):\n return self.__dot", "def dot_product(x, kernel):\n if K.backend() == 'tensorflow':\n # todo: check that this is correct\n return K.squeeze(K.dot(x, K.expand_dims(kernel)), axis=-1)\n else:\n return K.dot(x, kernel)", "def vdot(x, v, pub):\n x_flatten = x.flatten()\n v_flatten = v.flatten()\n mul_res = paillier_gpu.mul_impl(v_flatten, x_flatten)\n\n return paillier_gpu.sum_impl(mul_res)", "def dot(self, other):\n checkVector(self, other)\n dots = self.client.map(_call_dot, self.vecDask, other.vecDask, pure=False)\n # Adding all the results together\n dot = 0.0\n for future, result in daskD.as_completed(dots, with_results=True):\n dot += np.float64(result)\n return dot" ]
[ "0.6049258", "0.5814721", "0.5721821", "0.57042724", "0.5681423", "0.567958", "0.564207", "0.5631767", "0.5609443", "0.5586889", "0.5583641", "0.55492735", "0.54939", "0.54839337", "0.54573774", "0.5454269", "0.5454269", "0.5454269", "0.5454269", "0.54445654", "0.5423928", "0.54198736", "0.5416624", "0.5404385", "0.5403481", "0.5402115", "0.53976643", "0.5390436", "0.5386033", "0.53315365" ]
0.69416124
0
An mpi implementation of the mean over different nodes.
def mpi_mean(data): s_local = data.sum(0) m = np.empty_like(s_local) mpi.COMM.Allreduce(s_local, m) num_data = mpi.COMM.allreduce(data.shape[0]) m /= float(num_data) return m
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mean_photon_v(pk1,pk2,pk3,mu1,mu2,mu3):\n return pk1*mu1 + pk2*mu2 + pk3*mu3", "def mpi_avg_grads(module):\n if num_procs()==1:\n return\n for p in module.parameters():\n p_grad = p.grad.cpu()\n p_grad_numpy = p_grad.numpy() # numpy view of tensor data\n avg_p_grad = mpi_avg(p_grad_numpy)\n p_grad_numpy[:] = avg_p_grad[:]\n p.grad.copy_(p_grad)", "def mean_photon_a(P,mu):\n return np.dot(P, mu)", "def ensemble_mean(self):\n return self.mean(dim='mem')", "def reduce(nodes):\r\n accum = tc.mean(nodes.mailbox['m'], 1).cuda()\r\n return {'h': accum}", "def compute_cluster_ensemble(var, indicesOnCluster, maxIndices, indicesToParticle): #{{{\n\n num_clusters = maxIndices.shape[0]\n if len(var.shape) == 1:\n meanvar = np.zeros((num_clusters,))\n elif len(var.shape) == 2:\n meanvar = np.zeros((var.shape[0],num_clusters))\n else:\n warnings.warn('did not have correct shape for ' + str(var) + ' with len(var.shape)='+ str(len(var.shape)))\n meanvar = None\n\n for aCluster, maxInd in enumerate(maxIndices):\n # get particles in cluster\n particles = indicesToParticle[indicesOnCluster[aCluster,0:maxInd]]\n\n # compute mean depending upon size of array\n if len(var.shape) == 1:\n meanvar[aCluster] = np.mean(var[particles])\n if len(var.shape) == 2:\n meanvar[:,aCluster] = np.mean(var[:,particles], axis=1)\n\n return meanvar #}}}", "def ensemble_mean(self):\n self.cube = self.cube_ensemble_mean(self.cube)\n self.processes.append('ensemble_mean')\n return self.cube", "def ensemble_mean(self):\n new_cubelist = []\n for cube in self.cubelist:\n new_cubelist.append(self.cube_ensemble_mean(cube))\n self.cubelist = iris.cube.CubeList(new_cubelist)\n self.processes.append('ensemble_mean')\n return self.cubelist", "def mean(self):\n return np.average(self.particles, weights=self.weights, axis=0)", "def _mean(self,gp):\r\n return self.gp_link.transf(gp)", "def _mean(self,gp):\r\n return self.gp_link.transf(gp)", "def _mean(self,gp):\r\n return self.gp_link.transf(gp)", "def _mean(self,gp):\r\n return self.gp_link.transf(gp)", "def _mean(self,gp):\r\n return self.gp_link.transf(gp)", "def get_mean(self):\r\n for i in range(1,len(self.data[0])):\r\n self.prom.append(np.mean(self.data[:,i]))", "def _compute_global_mean(self, dataset, session, limit=None):\n _dataset = dataset\n mean = 0.\n if isinstance(limit, int):\n _dataset = _dataset[:limit]\n if isinstance(_dataset, np.ndarray) and not self.global_mean_pc:\n mean = np.mean(_dataset)\n else:\n # Iterate in case of non numpy data\n for i in range(len(dataset)):\n if not self.global_mean_pc:\n mean += np.mean(dataset[i]) / len(dataset)\n else:\n mean += (np.mean(dataset[i], axis=(0, 1),\n keepdims=True) / len(dataset))[0][0]\n self.global_mean.assign(mean, session)\n return mean", "def _compute_mean(index, M, R, rake):\r\n mean = (a1[index] + _compute_linear_magnitude_term(index, M) + _compute_quadratic_magnitude_term(index, M) +\r\n _compute_logarithmic_distance_term(index, M, R) + _compute_faulting_style_term(index, rake))\r\n\r\n return mean", "def _compute_mean(index, M, R, rake):\r\n mean = (a1[index] + _compute_linear_magnitude_term(index, M) + _compute_quadratic_magnitude_term(index, M) +\r\n _compute_logarithmic_distance_term(index, M, R) + _compute_faulting_style_term(index, rake))\r\n\r\n return mean", "def _compute_global_mean(self, dataset, session, limit=None):\n _dataset = dataset\n mean = 0.\n if isinstance(limit, int):\n _dataset = _dataset[:limit]\n if isinstance(_dataset, np.ndarray):\n mean = np.mean(_dataset)\n else:\n # Iterate in case of non numpy data\n for i in range(len(dataset)):\n mean += np.mean(dataset[i]) / len(dataset)\n self.global_mean.assign(mean, session)\n return mean", "def _compute_batch_moments(x):\n return torch.mean(x, dim=(0, 2, 3), keepdim=True), torch.var(x, dim=(0, 2, 3), keepdim=True)", "def matrix_mean(matrix):\n return sum(map(mean,matrix))", "def mape(x, y):\n return statistics.mean(ape(x, y))", "def mean_allcnnc():\n # TODO implement pre forward hook to adapt to arbitary image size for other data sets than cifar100\n return nn.Sequential(\n nn.AvgPool2d(kernel_size=(6, 6)),\n flatten()\n )", "def get_mean(self):\n raise NotImplementedError(\"This is an abstract method and needs to be implemented in derived classes.\")", "def test_mean_metric_broadcast(nan_strategy):\n metric = MeanMetric(nan_strategy=nan_strategy)\n\n x = torch.arange(5).float()\n x[1] = torch.tensor(float(\"nan\"))\n w = torch.arange(5).float()\n\n metric.update(x, w)\n res = metric.compute()\n assert round(res.item(), 4) == 3.2222 # (0*0 + 2*2 + 3*3 + 4*4) / (0 + 2 + 3 + 4)\n\n x = torch.arange(5).float()\n w = torch.arange(5).float()\n w[1] = torch.tensor(float(\"nan\"))\n\n metric.update(x, w)\n res = metric.compute()\n assert round(res.item(), 4) == 3.2222 # (0*0 + 2*2 + 3*3 + 4*4) / (0 + 2 + 3 + 4)", "def mean_cluster(self, labelled_cluster):\n sum_of_points = self.sum_cluster(labelled_cluster)\n size_cluster = len(labelled_cluster)\n if self.sigma_cl1:\n size_cluster += np.sqrt(2)*self.sigma_cl1*np.random.randn()\n mean_of_points = sum_of_points * (1.0 / size_cluster)\n return mean_of_points", "def _mean_pooling(self, x, x_mask):\n return torch.mean(x, 1)\n # x_lens = x_mask.data.eq(0).long().sum(dim=1)\n # if self.use_cuda:\n # weights = Variable(torch.ones(x.size()).cuda() / x_lens.unsqueeze(1).float())\n # else:\n # weights = Variable(torch.ones(x.size()) / x_lens.unsqueeze(1).float())\n # weights.data.masked_fill_(x_mask.data, 0.0)\n # output = torch.bmm(x.transpose(1, 2), weights.unsqueeze(2)).squeeze(2)\n # return output", "def _calc_u_matrix_means(self) -> None:\n for u_node in itertools.product(\n range(self.n_rows * 2 - 1), range(self.n_columns * 2 - 1)\n ):\n if not (u_node[0] % 2) and not (u_node[1] % 2):\n # SOM nodes -> mean over 2-4 values\n\n nodelist = []\n if u_node[0] > 0:\n nodelist.append((u_node[0] - 1, u_node[1]))\n if u_node[0] < self.n_rows * 2 - 2:\n nodelist.append((u_node[0] + 1, u_node[1]))\n if u_node[1] > 0:\n nodelist.append((u_node[0], u_node[1] - 1))\n if u_node[1] < self.n_columns * 2 - 2:\n nodelist.append((u_node[0], u_node[1] + 1))\n self.u_matrix[u_node] = self._get_u_mean(nodelist)\n\n elif (u_node[0] % 2) and (u_node[1] % 2):\n # mean over four\n\n self.u_matrix[u_node] = self._get_u_mean(\n [\n (u_node[0] - 1, u_node[1]),\n (u_node[0] + 1, u_node[1]),\n (u_node[0], u_node[1] - 1),\n (u_node[0], u_node[1] + 1),\n ]\n )", "def _get_u_mean(self, nodelist: List[Tuple[int, int]]) -> Optional[float]:\n meanlist = [self.u_matrix[u_node] for u_node in nodelist]\n u_mean = None\n if self.u_mean_mode_ == \"mean\":\n u_mean = np.mean(meanlist)\n elif self.u_mean_mode_ == \"median\":\n u_mean = np.median(meanlist)\n elif self.u_mean_mode_ == \"min\":\n u_mean = np.min(meanlist)\n elif self.u_mean_mode_ == \"max\":\n u_mean = np.max(meanlist)\n return u_mean", "def convert_mean(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n mx_axis = attrs.get(\"axis\", None)\n axes = convert_string_to_list(str(mx_axis)) if mx_axis is not None else None\n\n keepdims = get_boolean_attribute_value(attrs, \"keepdims\")\n\n if axes is not None:\n node = onnx.helper.make_node(\n 'ReduceMean',\n inputs=input_nodes,\n outputs=[name],\n axes=axes,\n keepdims=keepdims,\n name=name\n )\n\n return [node]\n else:\n node = onnx.helper.make_node(\n 'ReduceMean',\n inputs=input_nodes,\n outputs=[name],\n keepdims=keepdims,\n name=name\n )\n\n return [node]" ]
[ "0.6522529", "0.63845176", "0.6283875", "0.62613827", "0.61774707", "0.6149247", "0.6097366", "0.60199577", "0.6002639", "0.5977115", "0.5977115", "0.5977115", "0.5977115", "0.5977115", "0.58853865", "0.5786438", "0.5728149", "0.5728149", "0.5717552", "0.5708904", "0.5685021", "0.56758475", "0.5612727", "0.56060284", "0.56002855", "0.5592658", "0.55770844", "0.5576861", "0.5573748", "0.5555922" ]
0.73965716
0
An mpi implementation of the std over different nodes.
def mpi_std(data): m = mpi_mean(data) data_centered = data - m data_centered **= 2 std_local = data_centered.sum(0) std = np.empty_like(std_local) mpi.COMM.Allreduce(std_local, std) num_data = mpi.COMM.allreduce(data.shape[0]) std /= float(num_data) return std
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parallel_generate_particle_distribution(self, max_loop = np.inf, Ncore = 1, outfile=None):\n \n self.pos = np.zeros((self.N_part, 3))\n self.vel = np.zeros((self.N_part, 3))\n \n \n # start running\n nmax = self.N_part / Ncore\n #pool = Pool(processes = Ncore)\n #pool.apply_async(_while_loop,)\n #result = pool.map(_while_loop, args=(self, nmax, max_loop,))\n #print result.get(timeout = 100)\n #p = Process(target=_while_loop, args=(nmax, max_loop,))\n jobs = []\n for i in np.arange(Ncore):\n p = multiprocessing.Process(target=_while_loop, args=(self, nmax, max_loop, \n Ncore, outfile,))\n jobs.append(p)\n p.start()\n \n for p in jobs:\n p.join()\n \n #results = [None]*self.N_part\n #results = [OUTPUT.get() for p in jobs]\n \n #results = np.array(results)\n \n #pos = results[:,0]\n #pos = pos.reshape(self.N_part,3)\n #self.pos = pos\n \n #vel = results[:,1]\n #vel = vel.reshape(self.N_part,3)\n #self.vel = vel\n \n \n #if (not outfile == None):\n # self.write_pd(outfile)\n # combine to a single output\n bash_command = \"cat \"\n for i in np.arange(Ncore) + 1:\n temp_name = outfile + \"_%02i_\"%(i) + \".temp\"\n bash_command = bash_command + temp_name + \" \"\n bash_command = bash_command + \"> \" + outfile\n os.system(bash_command)\n \n # now remove temporary files\n bash_command = \"rm \"\n for i in np.arange(Ncore) + 1:\n temp_name = outfile + \"_%02i_\"%(i) + \".temp\"\n bash_command = bash_command + temp_name + \" \"\n os.system(bash_command)\n \n bash_command = \"sed -i -e '1i#m x y z vx vy vz\\' \" + outfile\n os.system(bash_command)\n self.load_particle_ic(outfile)\n \n return self.pos, self.vel", "def _compute_global_std(self, dataset, session, limit=None):\n _dataset = dataset\n std = 0.\n if isinstance(limit, int):\n _dataset = _dataset[:limit]\n if isinstance(_dataset, np.ndarray) and not self.global_std_pc:\n std = np.std(_dataset)\n else:\n for i in range(len(dataset)):\n if not self.global_std_pc:\n std += np.std(dataset[i]) / len(dataset)\n else:\n std += (np.std(dataset[i], axis=(0, 1),\n keepdims=True) / len(dataset))[0][0]\n self.global_std.assign(std, session)\n return std", "def _compute_global_std(self, dataset, session, limit=None):\n _dataset = dataset\n std = 0.\n if isinstance(limit, int):\n _dataset = _dataset[:limit]\n if isinstance(_dataset, np.ndarray):\n std = np.std(_dataset)\n else:\n for i in range(len(dataset)):\n std += np.std(dataset[i]) / len(dataset)\n self.global_std.assign(std, session)\n return std", "def Std(data):\n return data.std()", "def build_std(self):\n param = self.param\n meansp = self.mean()\n stdsp = self.std()\n num_noise = 200\n noise = np.random.normal(1,0.005,(num_noise,self.wvl.size)) # add 0.5% variance to signal at all wavelengths\n # should be at every sp in utc, but for now, use mean sp\n sp_arr = meansp*noise\n #import code; code.interact(local=locals())\n par_noisy = np.array(list(map(lambda tt:param(sp_arr[tt,:],self.wvl),xrange(num_noise))))\n notaxis = tuple(np.where(par_noisy.shape != self.npar)[0])\n stdpar = np.nanstd(par_noisy,axis=notaxis)\n self.stdpar = stdpar\n return stdpar", "def mpirun(self):\n comm = MPI.COMM_WORLD\n rank = comm.Get_rank()\n size = comm.Get_size()\n print rank \n print size\n data = []\n dcds = self.getdcds()\n for i in range(0, len(dcds)):\n pid = i % size \n if pid == rank:\n dcd = dcds[i]\n dcdpath = self.d + \"/\" + dcd\n data.extend(self.metric(self.dcdtopsf(dcd), dcdpath))\n self.write(data)", "def std(self) -> \"Stream[float]\":\n return self.agg(lambda x: np.std(x, ddof=1)).astype(\"float\")", "def Means_Stds(self):\n self.means=[] # list taking care for the means of ll experiments\n self.stds=[] # list taking care fro the Stds of all experiments\n for replica in self.exper(): # remember self.exper, from above returns ListExperiments\n mean, Std = self._ReplicaStats(replica.T) # here calculates the means and Stds. WE have to transpose the matrix. .T stands for transpose\n self.means.append(mean) # the calculted data for each experiment is gethered in one place\n self.stds.append(Std)\n #print(self.means, self.stds)\n return self.means, self.stds", "def gather_ps(rank, size, comm, k_allmodels, P21_allmodels, PHII_allmodels,\n first_snap_allmodels, last_snap_allmodels):\n\n def generate_tag(rank):\n tag = int(rank*100)\n\n return tag\n\n # Rank 0 will gather the wavenumber bins/power spectra from all other\n # ranks. \n if rank == 0:\n k_master = []\n P21_master = []\n PHII_master = []\n\n # Go through each model. \n for model_number in range(len(k_allmodels)):\n\n k_master.append([])\n P21_master.append([])\n PHII_master.append([])\n\n model_k = k_allmodels[model_number]\n model_P21 = P21_allmodels[model_number]\n model_PHII = PHII_allmodels[model_number]\n\n num_snaps = last_snap_allmodels[model_number] - \\\n first_snap_allmodels[model_number]\n rank_count = 0\n my_count = 0\n\n # Then go through each snapshot.\n # In the main data loop (``generate_data()``) the snapshots are\n # scatter sequentially. Hence when we gather, we get snap0 from\n # rank 0, snap1 from rank 1 etc. So we increase rank_count for each\n # snapshot and then reset it when we reach `size`.\n for snap_idx in range(num_snaps):\n\n if rank_count == 0:\n this_k = model_k[my_count] \n this_P21 = model_P21[my_count] \n this_PHII = model_PHII[my_count] \n my_count += 1\n else:\n # Each rank will use a unique tag.\n tag = generate_tag(rank_count) \n\n # Then the tag is offset for each data array. \n this_k = comm.recv(source = rank_count,\n tag = tag)\n this_P21 = comm.recv(source = rank_count,\n tag = tag+1)\n this_PHII = comm.recv(source = rank_count,\n tag = tag+2)\n\n # Now we have the data, append it to the master.\n k_master[model_number].append(this_k)\n P21_master[model_number].append(this_P21)\n PHII_master[model_number].append(this_PHII)\n\n rank_count += 1\n if rank_count == size:\n rank_count = 0\n\n # Snapshot Loop.\n # Model Loop.\n\n return k_master, P21_master, PHII_master\n\n else:\n\n # For all other ranks, go through the power spectra it calculated and\n # send it back to the root rank.\n for model_number in range(len(k_allmodels)):\n for idx in range(len(P21_allmodels[model_number])):\n\n tag = generate_tag(rank) \n\n k_this_idx = k_allmodels[model_number][idx]\n P21_this_idx = P21_allmodels[model_number][idx]\n PHII_this_idx = PHII_allmodels[model_number][idx]\n\n comm.send(k_this_idx, dest = 0, tag = tag)\n comm.send(P21_this_idx, dest = 0, tag = tag+1)\n comm.send(PHII_this_idx, dest = 0, tag = tag+2)\n\n # Non-zero ranks return junk.\n return None, None, None", "def mpi_run(custom_command=\"\"):\n\n from mpi4py import MPI\n\n comm = MPI.COMM_WORLD\n nprocs = comm.Get_size()\n rank = comm.Get_rank()\n # this will be the master cpu. This guy will create - or append - a folder,\n # being sure to be the first to do so.\n if rank == 0:\n # First initialisation\n cosmo, data, command_line, success = safe_initialisation(\n custom_command, comm, nprocs)\n\n regexp = re.match(\".*__(\\w*).txt\", data.out_name)\n suffix = regexp.groups()[0]\n # Send an \"OK\" signal to all the other processes, actually giving the\n # suffix of this master chain. All the other will add 1 to this number\n for index in range(1, nprocs):\n comm.send(suffix, dest=index, tag=1)\n else:\n # If the rank is not 0, it is a slave process. It waits to receive the\n # \"OK\" message, which is immediatly discarded.\n suffix = comm.recv(source=0, tag=1)\n\n # If a failed message was passed, exit the process\n if suffix == 'failed':\n return\n\n # Concatenate the rank to the suffix, and not the opposite, this should\n # avoid any conflicting name\n if not custom_command:\n custom_command = \" \".join(sys.argv[1:])\n custom_command += \" --chain-number %s\" % str(rank)+suffix\n cosmo, data, command_line, success = initialise(custom_command)\n\n import sampler\n sampler.run(cosmo, data, command_line)\n\n return", "def std(self):\n return self.std", "def get_std(self):\n return self.serie.std()", "def std(self):\n return self._lift(\"std\")", "def object_communicator():\n comm = MPI.COMM_WORLD", "def comm_times_single(ns, send_host, recv_host):\n\n return run_on_hosts((send_host, recv_host),\n '''python %sape/timings/communication/mpi_run_single.py \"%s\" %s %s'''%(\n ape_dir, str(ns), send_host, recv_host))", "def _std(self):\n\n\t\t#print opt.hess_inv.todense()\n\t\td = 1E-7\n\t\ttheta = self.theta\n\n\t\tTheta = np.copy(theta)\n\t\tTheta[0] = Theta[0] + d\n\t\taa1 = self.objfxn(tuple(Theta))\n\t\tTheta = np.copy(theta)\n\t\tTheta[0] = Theta[0] - d\n\t\taa2 = self.objfxn(tuple(Theta))\n\t\taa3 = self.objfxn(tuple(theta))\n\n\t\tself.stda = 1/np.sqrt((aa1 - 2*aa3 + aa2)/d**2)\n\n\t\tTheta = np.copy(theta)\n\t\tTheta[1] = Theta[1] + d\n\t\tbb1 = self.objfxn(tuple(Theta))\n\t\tTheta = np.copy(theta)\n\t\tTheta[1] = Theta[1] - d\n\t\tbb2 = self.objfxn(tuple(Theta))\n\t\tbb3 = self.objfxn(tuple(theta))\n\n\t\tself.stdb = 1/np.sqrt((bb1 - 2*bb3 + bb2)/d**2)\n\n\t\td = 1E-9\n\t\tTheta = np.copy(theta)\n\t\tTheta[2] = Theta[2] + d\n\t\tcc1 = self.objfxn(tuple(Theta))\n\t\tTheta = np.copy(theta)\n\t\tTheta[2] = Theta[2] - d\n\t\tcc2 = self.objfxn(tuple(Theta))\n\t\tcc3 = self.objfxn(tuple(theta))\n\n\t\tself.stdc = 1/np.sqrt((cc1 - 2*cc3 + cc2)/d**2)\n\n\t\treturn self", "def sim_split_no_mig(params, ns):\n #5 parameters\t\n nuA, nu1, nu2, nu3, T1 = params\n sts = moments.LinearSystem_1D.steady_state_1D(ns[0] + ns[1] + ns[2])\n fs = moments.Spectrum(sts)\n fs = moments.Manips.split_1D_to_2D(fs, ns[0], ns[1] + ns[2])\n fs = moments.Manips.split_2D_to_3D_2(fs, ns[1], ns[2])\n ## Population function for T1\n nu_T1 = [nu1, nu2, nu3]\n fs.integrate(nu_T1, T1) \n return fs", "def _ReplicaStats(self, myreplica):\n \n means=[None]*len(myreplica) # creating an empty list for the means with the length of my timepoints indexes\n std=[None]*len(myreplica) # creating an empty list for the std\n for i in range(len(myreplica)):\n means[i]=np.mean(myreplica[i]) # numpy is calculating the means and std for every row and then add it to the list\n std[i]=np.std(myreplica[i])\n #print(means, std)\n return means, std", "def initialize_mpi():\n # Check for environment variables set by mpirun. Variables are from\n # http://docs.roguewave.com/threadspotter/2012.1/linux/manual_html/apas03.html\n variables = ['PMI_RANK', 'OMPI_COMM_WORLD_RANK', 'OMPI_MCA_ns_nds_vpid',\n 'PMI_ID', 'SLURM_PROCID', 'LAMRANK', 'MPI_RANKID',\n 'MP_CHILD', 'MP_RANK', 'MPIRUN_RANK']\n use_mpi = False\n for var in variables:\n if var in os.environ:\n use_mpi = True\n break\n if not use_mpi:\n return None\n\n # Initialize MPI\n from mpi4py import MPI\n MPI.COMM_WORLD.barrier()\n mpicomm = MPI.COMM_WORLD\n\n # Override sys.excepthook to abort MPI on exception\n def mpi_excepthook(type, value, traceback):\n sys.__excepthook__(type, value, traceback)\n sys.stdout.flush()\n sys.stderr.flush()\n if mpicomm.size > 1:\n mpicomm.Abort(1)\n # Use our eception handler\n sys.excepthook = mpi_excepthook\n\n # Catch sigterm signals\n def handle_signal(signal, frame):\n if mpicomm.size > 1:\n mpicomm.Abort(1)\n for sig in [signal.SIGINT, signal.SIGTERM, signal.SIGABRT]:\n signal.signal(sig, handle_signal)\n\n return mpicomm", "def Skopt5DStats(numIters, numRuns):\n\n comm = MPI.COMM_WORLD\n rank = comm.Get_rank()\n\n assert comm.Get_size() == numRuns, \"Please ensure there is one process running per run i.e \" + str(numRuns) + \" processes.\"\n \n # Define the problem bounds.\n skoptBounds = [(10, 1300), (40, 230), (0, 90), (0, 90), (0, 90)]\n\n # Use the seedlist from the other runs.\n seedList = [843484, 61806, 570442, 867402, 192390, 60563, 899483, 732848, 243267, 439621] \n\n if rank == 0:\n timeList = []\n bestFoMList = []\n\n # Define which solver will be used.\n optimiser = skopt.Optimizer(skoptBounds, base_estimator = \"RF\", n_initial_points = int(np.ceil(numIters/10)), random_state = seedList[rank])\n\n # Start timing.\n startTime = time.time()\n bestFoM = 0\n\n # Start optimisation.\n for iteration in range(numIters):\n\n # Find out which point to sample next.\n nextParams = optimiser.ask()\n\n # Evaluate the objective function.\n nextFoM = FitnessSkopt5D(nextParams)\n\n if abs(nextFoM) > bestFoM:\n bestFoM = abs(nextFoM)\n \n # Update the model.\n optimiser.tell(nextParams, nextFoM)\n\n # One run complete.\n timeElapsed = time.time() - startTime\n # Run complete. Send results to main process. Tags are unique identifiers.\n if rank != 0:\n comm.send(timeElapsed, dest = 0, tag = 1)\n comm.send(bestFoM, dest = 0, tag = 2)\n \n # Wait for all the processes to end.\n comm.Barrier()\n \n if rank == 0:\n # Add own data first.\n bestFoMList.append(bestFoM)\n timeList.append(timeElapsed)\n\n for process in range(comm.Get_size() - 1):\n # Get the data.\n individualTime = None\n individualTime = comm.recv(individualTime, source = process + 1, tag = 1)\n\n individualFoM = None\n individualFoM = comm.recv(individualFoM, source = process + 1, tag = 2)\n\n bestFoMList.append(individualFoM)\n timeList.append(individualTime)\n\n avgRuntime = np.average(timeList)\n avgFoM = np.average(bestFoMList)\n avgFoMPerTime = np.average(np.divide(bestFoMList, timeList))\n avgFoMPerIter = np.average(np.divide(bestFoMList, numIters))\n absBestFoM = np.max(bestFoMList)\n\n print(\"Bayesian optimisation 5D testing complete! Here are the stats:\")\n print(\"Average runtime per run (s): \" + str(avgRuntime))\n print(\"Average FoM: \" + str(avgFoM))\n print(\"Average FoM per unit time: \" + str(avgFoMPerTime))\n print(\"Average FoM per unit iteration: \" + str(avgFoMPerIter))\n print(\"Absolute best FoM determined: \" + str(absBestFoM))\n print(\"------------------------------------------------------------------------------------------------------------------\")\n \n return", "def sim_split_no_mig_size(params, ns):\n #9 parameters\t\n nuA, nu1a, nu1b, nu2a, nu2b, nu3a, nu3b, T1, T2 = params\n sts = moments.LinearSystem_1D.steady_state_1D(ns[0] + ns[1] + ns[2])\n fs = moments.Spectrum(sts)\n fs = moments.Manips.split_1D_to_2D(fs, ns[0], ns[1] + ns[2])\n fs = moments.Manips.split_2D_to_3D_2(fs, ns[1], ns[2])\n ## Population function for T1\n nu_T1 = [nu1a, nu2a, nu3a]\n fs.integrate(nu_T1, T1)\n ## Population function for T2\n nu_T2 = [nu1b, nu2b, nu3b]\n fs.integrate(nu_T2, T2) \n return fs", "def split_symmig_all(params, ns):\n #10 parameters \n nu1, nuA, nu2, nu3, m1_1, m2_1, m2_2, m2_3, T1, T2 = params\n sts = moments.LinearSystem_1D.steady_state_1D(ns[0] + ns[1] + ns[2])\n fs = moments.Spectrum(sts)\n fs = moments.Manips.split_1D_to_2D(fs, ns[0], ns[1] + ns[2])\n ## Population function and migration matrix for T1\n nu_T1 = [nu1, nuA]\n mig1 = numpy.array([[0, m1_1],[m1_1, 0]])\n fs.integrate(nu_T1, T1, m=mig1)\n fs = moments.Manips.split_2D_to_3D_2(fs, ns[1], ns[2])\n ## Population function and migration matrix for T2\n nu_T2 = [nu1, nu2, nu3]\n mig2 = numpy.array([[0, m2_1, m2_3],[m2_1, 0, m2_2], [m2_3, m2_2, 0]]) \n fs.integrate(nu_T2, T2, m=mig2) \n return fs", "def stdAxisPoints(self, var):\n varID = var.id\n var = genutil.statistics.std(var, axis=\"(%s)\" % self.axis.id)\n var.id = varID\n return var", "def scatter_work(array, mpi_rank, mpi_size, root=0, dtype=np.int32):\n if mpi_rank == root:\n print(f\"Scattering array to {mpi_size} ranks\")\n scatter_total = array.size\n mod = scatter_total % mpi_size\n if mod != 0:\n print(\"Padding array for scattering...\")\n pad = -1 * np.ones(mpi_size - mod, dtype=dtype)\n array = np.concatenate((array, pad))\n scatter_total += mpi_size - mod\n assert scatter_total % mpi_size == 0\n assert scatter_total == array.size\n else:\n scatter_total = None\n\n scatter_total = comm.bcast(scatter_total, root=root)\n subset = np.empty(scatter_total//mpi_size, dtype=dtype)\n comm.Scatter(array, subset, root=root)\n\n return subset", "def test_3():\n \n\n # Functions wrapped by agents\n def f(in_streams, out_streams):\n multiply_and_add(in_streams[0], out_streams[0],\n multiplicand=2, addend=1)\n\n def g(in_streams, out_streams):\n t = Stream('t')\n filter_then_square(in_streams[0], t,\n filter_threshold=20)\n print_stream(t, name='p1')\n\n def sums(in_streams, out_streams):\n s = Stream('s')\n sum_window(in_streams[0], s, window_size=3, step_size=3)\n print_stream(s, name=' p2')\n\n processes = \\\n {\n 'source_process':\n {'in_stream_names_types': [('in', 'i')],\n 'out_stream_names_types': [('out', 'i')],\n 'compute_func': f,\n 'sources':\n {'acceleration':\n {'type': 'i',\n 'func': source_thread_target\n },\n }\n },\n 'process_1':\n {'in_stream_names_types': [('in', 'i')],\n 'out_stream_names_types': [],\n 'compute_func': g,\n 'sources': {}\n },\n 'process_2':\n {'in_stream_names_types': [('in', 'i')],\n 'out_stream_names_types': [],\n 'compute_func': sums,\n 'sources': {}\n }\n }\n \n connections = \\\n {\n 'source_process' :\n {\n 'out' : [('process_1', 'in'), ('process_2', 'in')],\n 'acceleration' : [('source_process', 'in')]\n },\n 'process_1':\n {\n },\n 'process_2':\n {\n }\n }\n\n multicore(processes, connections)", "def std(state, avg=None, keepdims=True, is_log=False):\n return std_raw(state.particles, state.log_weights, avg, keepdims, is_log)", "def std(self):\n return self._summarize(lambda c: c.std)", "def test_2():\n \n # Functions wrapped by agents\n def f(in_streams, out_streams):\n multiply_and_add(in_streams[0], out_streams[0],\n multiplicand=2, addend=1)\n\n def g(in_streams, out_streams):\n filter_then_square(in_streams[0], out_streams[0],\n filter_threshold=20)\n\n def h(in_streams, out_streams):\n s = Stream('s')\n sum_window(in_streams[0], s, window_size=3, step_size=3)\n print_stream(s, name=s.name)\n \n\n # Specify processes and connections.\n processes = \\\n {\n 'source_process':\n {'in_stream_names_types': [('in', 'i')],\n 'out_stream_names_types': [('out', 'i')],\n 'compute_func': f,\n 'sources':\n {'acceleration':\n {'type': 'i',\n 'func': source_thread_target\n },\n },\n 'actuators': {}\n },\n 'filter_and_square_process':\n {'in_stream_names_types': [('in', 'i')],\n 'out_stream_names_types': [('filtered', 'i')],\n 'compute_func': g,\n 'sources': {},\n 'actuators': {}\n },\n 'aggregate_and_output_process':\n {'in_stream_names_types': [('in', 'i')],\n 'out_stream_names_types': [],\n 'compute_func': h,\n 'sources': {},\n 'actuators': {}\n }\n }\n \n connections = \\\n {\n 'source_process' :\n {\n 'out' : [('filter_and_square_process', 'in')],\n 'acceleration' : [('source_process', 'in')]\n },\n 'filter_and_square_process' :\n {\n 'filtered' : [('aggregate_and_output_process', 'in')],\n },\n 'aggregate_and_output_process':\n {}\n }\n\n multicore(processes, connections)", "def ol_mpi_send(data, dest: int, tag: int):\n import numba_mpi\n\n def impl(data, dest: int, tag: int) -> None:\n \"\"\"reduce a single number across all cores\"\"\"\n status = numba_mpi.send(data, dest, tag)\n assert status == 0\n\n return impl", "def make_parallel_MPI(function):\n\n def wrapper(*args, **kwargs):\n\n # Checks that the essential paremeters are there\n assert not kwargs['out_allPartTypes'] is None\n assert not kwargs['simulation_name'] is None\n\n # Generate a simulation object and oush it to **kwargs\n sim = Simulation(simulation_name=kwargs['simulation_name'])\n kwargs['simulation'] = sim\n\n # Set-up the MPI allocation schedule\n process = 0\n process_iterator = itertools.product(sim.clusterIDAllowed, sim.redshiftAllowed)\n\n for halo_num, redshift in process_iterator:\n\n if process % size == rank:\n\n cluster_obj = Cluster(clusterID=int(halo_num), redshift=redshift_str2num(redshift))\n file_name = sim.cluster_prefix + sim.halo_Num(halo_num) + redshift\n fileCompletePath = sim.pathSave + '/' + sim.simulation + '_output/collective_output/' + file_name + '.hdf5'\n\n kwargs['cluster'] = cluster_obj\n kwargs['fileCompletePath'] = fileCompletePath\n\n print('CPU ({}/{}) is processing halo {} @ z = {} ------ process ID: {}'.format(rank, size, cluster_obj.clusterID, cluster_obj.redshift, process))\n # Each CPU loops over all apertures - this avoids concurrence in file reading\n # The loop over apertures is defined explicitly in the wrapped function.\n function(*args, **kwargs)\n\n process += 1\n\n return wrapper" ]
[ "0.54028803", "0.5319086", "0.5299202", "0.5285267", "0.52729636", "0.5233467", "0.5097652", "0.5074314", "0.50097334", "0.50081116", "0.49924487", "0.49458593", "0.49275267", "0.490736", "0.489474", "0.4864296", "0.48584995", "0.4855858", "0.4847231", "0.4846435", "0.4841479", "0.48264945", "0.4823134", "0.48230973", "0.48227027", "0.48221096", "0.48196456", "0.48183885", "0.48045975", "0.4799078" ]
0.7032368
0
An mpi implementation of the covariance matrix over different nodes
def mpi_cov(data): m = mpi_mean(data) data_centered = data - m cov_local = dot(data_centered.T, data_centered) covmat = np.empty_like(cov_local) mpi.COMM.Allreduce(cov_local, covmat) num_data = mpi.COMM.allreduce(data.shape[0]) covmat /= float(num_data) return covmat
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _mn_cov_ ( self , size = -1 , root = False ) :\n #\n if size <= 0 : size = len ( self )\n size = min ( size , len ( self ) ) \n #\n from array import array\n matrix = array ( 'd' , [ 0 for i in range(0, size * size) ] )\n self.mnemat ( matrix , size )\n #\n import ostap.math.linalg\n from ostap.core.core import Ostap \n mtrx = Ostap.Math.SymMatrix ( size )() \n for i in range ( 0 , size ) :\n for j in range ( i , size ) : \n mtrx [ i , j ] = matrix [ i * size + j ]\n \n return mtrx", "def covariance(data_matrix):\n return np.asmatrix(np.cov(data_matrix, rowvar=0))", "def _covariance_matrix_prob_v1(self, merged_df, prob_vector):\n total_cov = merged_df.groupby(CYCLE_LABEL, as_index=True).cov()\n cov_matrix = 0\n for i in range(5):\n cov_matrix += total_cov.loc[i, :] * prob_vector[:, i]\n return cov_matrix", "def covariance_matrix(self):\n\n cov_filename = self.covariance_filename\n cov_press, cov_data = self._co_star_read(cov_filename)\n\n # \"Fix\" covariances that are not positive definite\n if not np.all(np.linalg.eigvals(cov_data) > 0):\n warnings.warn(\"Covariance matrix for species {} is not positive definite, modifying eigenvals\".format(self.species))\n\n # Get eigen values and vector from matrix\n eigval, eigvec = np.linalg.eig(cov_data)\n\n # Find negative eigen values and set to the media\n eigval[np.where(eigval < 0)] = np.median(eigval)\n\n # Reconstruct matrix with modified eigen values\n cov_data = eigvec @ np.diag(eigval) @ np.linalg.inv(eigvec)\n\n return cov_data", "def muscovite():\n\n rho = 2834.\n\n C = np.zeros((6,6), dtype=float)\n C[0,0] = 181.; C[0,1] = 48.8; C[0,2] = 25.6; C[0,3] = 0.; C[0,4] = -14.2; C[0,5] = 0.\n C[1,0] = C[0,1]; C[1,1] = 178.4; C[1,2] = 21.2; C[1,3] = 0.; C[1,4] = 1.1; C[1,5] = 0.\n C[2,0] = C[0,2]; C[2,1] = C[1,2]; C[2,2] = 58.6; C[2,3] = 0.; C[2,4] = 1.; C[2,5] = 0.\n C[3,0] = C[0,3]; C[3,1] = C[1,3]; C[3,2] = C[2,3]; C[3,3] = 16.5; C[3,4] = 0.; C[3,5] = -5.2\n C[4,0] = C[0,4]; C[4,1] = C[1,4]; C[4,2] = C[2,4]; C[4,3] = C[3,4]; C[4,4] = 19.5; C[4,5] = 0.\n C[5,0] = C[0,5]; C[5,1] = C[1,5]; C[5,2] = C[2,5]; C[5,3] = C[3,5]; C[5,4] = C[4,5]; C[5,5] = 72.\n\n return C, rho", "def _getCovMat(self, cov_expr):\n # store the expression\n self.expr = cov_expr\n # create a PETSC matrix for cov_mat\n cov_mat = PETSc.Mat().create()\n cov_mat.setType('aij')\n cov_mat.setSizes(self.domain.getNodes(), self.domain.getNodes())\n cov_mat.setUp()\n\n # scalar valued function is evaluated in this variable\n cov_ij = np.empty((1), dtype=float)\n # the points to evalute the expression\n xycor = np.empty((4), dtype=float)\n\n print '---------------------------'\n print '---------------------------'\n print ' Building Covariance Matrix'\n print '---------------------------'\n print '---------------------------'\n # Loop through global nodes and build the matrix for i < j because of\n # symmetric nature.\n for node_i in range(0, self.domain.getNodes()):\n # global node node_i\n for node_j in range(node_i, self.domain.getNodes()):\n # global node node_j\n temp_cov_ij = 0\n for elem_i in self.node_to_elem[node_i]:\n # elem_i : element attached to node_i\n # x1 : x co-ordinate of the centroid of element elem_i\n x1 = self.c_centroid_array[elem_i].x()\n # y1 : x co-ordinate of the centroid of element elem_i\n y1 = self.c_centroid_array[elem_i].y()\n for elem_j in self.node_to_elem[node_j]:\n # elem_j : element attached to node_j\n # x2 : x co-ordinate for the centroid of element elem_j\n x2 = self.c_centroid_array[elem_j].x()\n # y2 : y co-ordinate for the centroid of element elem_j\n y2 = self.c_centroid_array[elem_j].y()\n xycor[0] = x1\n xycor[1] = x2\n xycor[2] = y1\n xycor[3] = y2\n # evaluate the expression\n cov_expr.eval(cov_ij, xycor)\n if cov_ij[0] > 0:\n temp_cov_ij += (1.0 / 3) * (1.0 / 3) * \\\n cov_ij[0] * \\\n self.c_volume_array[elem_i] * \\\n self.c_volume_array[elem_j]\n\n cov_mat.setValue(node_i, node_j, temp_cov_ij)\n cov_mat.setValue(node_j, node_i, temp_cov_ij)\n cov_mat.assemblyBegin()\n cov_mat.assemblyEnd()\n print '---------------------------'\n print '---------------------------'\n print ' Finished Covariance Matrix'\n print '---------------------------'\n print '---------------------------'\n\n return cov_mat", "def compute_covariance_matrix(Xs, sigma_2):\n m, d = Xs.shape\n t1 = np.reshape(np.tile(Xs, m), (m, m, d))\n t2 = np.reshape(np.tile(Xs, (m, 1)), (m, m, d))\n K1 = np.linalg.norm(t1 - t2, axis=2)\n coeff = 0.1\n Sigma = np.ones((m, m)) - coeff*K1\n return Sigma", "def make_covariance_mixte_matrix(points1, points2, kernel):\n\n dim = len(points1)\n p1 = np.reshape(points1, (dim, -1, 1))\n p2 = np.reshape(points2, (dim, 1, -1))\n \n return kernel(p1, p2)", "def covariance(self,pt0,pt1):\n #raise Exception()\n cov = self.nugget\n for vario in self.variograms:\n cov += vario.covariance(pt0,pt1)\n return cov", "def covariance_matrix(self):\n\n self._order_observations()\n self.cov_matrix = self._compute_covariance_matrix(\n self.list_observations, self.list_observations)\n\n self.cov_matrix += np.diag(np.array([self.noise] * self.n_observation))\n\n return self.cov_matrix", "def get_cov_matrix_parameters(self):\n cov = numpy.diag(numpy.zeros(self.get_num_parameters()))\n i = 0\n for p in self.parameters:\n cov[i,i] = p.get_covariance()\n i += 1\n return cov", "def get_process_covariance_matrix(dt):\n # a = np.array([\n # [0.25 * dt ** 4, 0.5 * dt ** 3, 0.5 * dt ** 2],\n # [0.5 * dt ** 3, dt ** 2, dt],\n # [0.5 * dt ** 2, dt, 1]\n # ])\n\n a = np.array([\n [dt ** 6 / 36., dt ** 5 / 24., dt ** 4 / 6.],\n [dt ** 5 / 24., 0.25 * dt ** 4, 0.5 * dt ** 3],\n [dt ** 4 / 6., 0.5 * dt ** 3, dt ** 2]\n ])\n return a", "def calcCovarianceMatrix(data):\n # Create covariance matrix and array to store the mean values for x_mean, y_mean, z_mean\n C = np.zeros((data.shape[1], data.shape[1]))\n mean_xyz = []\n # Calculate all mean values\n for i in range(0, data.shape[1]):\n mean_xyz.append(data[:,i].mean())\n mean_xyz = np.array(mean_xyz)\n # Check whether dimensions agree \n if data[:,0].size != data[:,1].size or data[:,0].size != data[:,2].size:\n print \"X, Y and Z must be of same dimensions.\"\n else:\n # For each row in covariance matrix C\n for i in range(0, C.shape[0]):\n # For each column in covariance matrix C\n for j in range(0, C.shape[1]):\n C[i,j] = 0\n # For each point in the dataset, access x, y, z-values\n for point in data:\n # For each point, access x,y and z in all combinations (xx, xy, xz, yx, yy, yz etc)\n C[i][j] = C[i][j] + (point[i]-mean_xyz[i])*(point[j]-mean_xyz[j])\n # Divide by the total number of points \n C = (1.0/data.shape[0]) * C\n return C", "def covariance_matrix(self,x,y,names=None,cov=None):\n if not isinstance(x,np.ndarray):\n x = np.array(x)\n if not isinstance(y,np.ndarray):\n y = np.array(y)\n assert x.shape[0] == y.shape[0]\n\n if names is not None:\n assert x.shape[0] == len(names)\n c = np.zeros((len(names),len(names)))\n np.fill_diagonal(c,self.contribution)\n cov = Cov(x=c,names=names)\n elif cov is not None:\n assert cov.shape[0] == x.shape[0]\n names = cov.row_names\n c = np.zeros((len(names),1)) + self.contribution\n cont = Cov(x=c,names=names,isdiagonal=True)\n cov += cont\n\n else:\n raise Exception(\"Vario2d.covariance_matrix() requires either\" +\n \"names or cov arg\")\n rc = self.rotation_coefs\n for i1,(n1,x1,y1) in enumerate(zip(names,x,y)):\n dx = x1 - x[i1+1:]\n dy = y1 - y[i1+1:]\n dxx,dyy = self._apply_rotation(dx,dy)\n h = np.sqrt(dxx*dxx + dyy*dyy)\n\n h[h<0.0] = 0.0\n h = self._h_function(h)\n if np.any(np.isnan(h)):\n raise Exception(\"nans in h for i1 {0}\".format(i1))\n cov.x[i1,i1+1:] += h\n for i in range(len(names)):\n cov.x[i+1:,i] = cov.x[i,i+1:]\n return cov", "def make_covariance_matrix(points, kernel):\n\n dim = len(points)\n p1 = np.reshape(points, (dim, 1, -1))\n p2 = np.reshape(points, (dim, -1, 1))\n\n return kernel(p1, p2)", "def covariance_matrix(self,x,y,names=None,cov=None):\n if not isinstance(x,np.ndarray):\n x = np.array(x)\n if not isinstance(y,np.ndarray):\n y = np.array(y)\n assert x.shape[0] == y.shape[0]\n\n if names is not None:\n assert x.shape[0] == len(names)\n c = np.zeros((len(names),len(names)))\n np.fill_diagonal(c,self.nugget)\n cov = Cov(x=c,names=names)\n elif cov is not None:\n assert cov.shape[0] == x.shape[0]\n names = cov.row_names\n c = np.zeros((len(names),1))\n c += self.nugget\n cont = Cov(x=c,names=names,isdiagonal=True)\n cov += cont\n\n else:\n raise Exception(\"GeoStruct.covariance_matrix() requires either \" +\n \"names or cov arg\")\n for v in self.variograms:\n v.covariance_matrix(x,y,cov=cov)\n return cov", "def get_cov_matrix_state_pars(self):\n cov = numpy.diag(numpy.zeros(self.get_num_variables() + self.get_num_parameters()))\n i = 0\n for v in self.variables:\n cov[i,i] = v.get_covariance()\n i += 1\n for p in self.parameters:\n cov[i,i] = p.get_covariance()\n i += 1\n return cov", "def get_covariance(self):\n ...", "def _compute_total_covariance_matrix(self) -> tf.Tensor:\n total_covariance_matrix = self.total_c_phi\\\n + tf.matmul(self.s_matrix_inv,\n tf.matmul(self.t_matrix, self.s_matrix_inv))\n return total_covariance_matrix", "def get_cov_matrix_states(self):\n cov = numpy.diag(numpy.zeros(self.get_num_variables()))\n i = 0\n for v in self.variables:\n cov[i,i] = v.get_covariance()\n i += 1\n return cov", "def get_cov(self, npar=None, **args):\n return get_par(self, dummy='cov_mat', npar=npar, **args)", "def covariance(self, point_one, point_two):\n raise NotImplementedError(\"C++ wrapper currently does not support computing covariance quantities.\")", "def build_covariance_matrix (numpy_cloud, reduce_by_center_of_mass=True ):\r\n\r\n # build a sum over all points\r\n sum_xyz = np.sum (numpy_cloud, axis=0 )\r\n\r\n # and normalize it to get center of mass\r\n mass_center = sum_xyz / numpy_cloud.shape[0]\r\n\r\n # reduce point cloud by center of mass\r\n if (reduce_by_center_of_mass ):\r\n numpy_cloud_reduced = np.subtract (numpy_cloud[:, 0:3], mass_center )\r\n else:\r\n numpy_cloud_reduced = numpy_cloud.copy ()\r\n\r\n # build ATA matrix\r\n a_transposed_a = np.zeros ((3, 3 ))\r\n\r\n for point in numpy_cloud_reduced:\r\n a_transposed_a[0, 0] = a_transposed_a[0, 0] + np.float_power(point[0], 2 )\r\n a_transposed_a[0, 1] = a_transposed_a[0, 1] + point[0] * point[1]\r\n a_transposed_a[0, 2] = a_transposed_a[0, 2] + point[0] * point[2]\r\n\r\n a_transposed_a[1, 0] = a_transposed_a[1, 0] + point[0] * point[1]\r\n a_transposed_a[1, 1] = a_transposed_a[1, 1] + np.float_power(point[1], 2 )\r\n a_transposed_a[1, 2] = a_transposed_a[1, 2] + point[1] * point[2]\r\n\r\n a_transposed_a[2, 0] = a_transposed_a[2, 0] + point[0] * point[2]\r\n a_transposed_a[2, 1] = a_transposed_a[2, 1] + point[2] * point[1]\r\n a_transposed_a[2, 2] = a_transposed_a[2, 2] + np.float_power(point[2], 2 )\r\n\r\n return a_transposed_a, mass_center", "def covariance(mtrx):\r\n\r\n # Average column of matrix\r\n T = np.transpose(mtrx)\r\n ave = np.zeros(len(mtrx))\r\n mtrx = np.asarray(mtrx)\r\n\r\n if isinstance(mtrx, np.ndarray):\r\n ave = average(T)\r\n\r\n for col in T:\r\n if type(mtrx) == list:\r\n # If data isn't standardized\r\n ave += np.asarray(col)\r\n\r\n\r\n if len(mtrx[0]) > len(mtrx):\r\n for moreRows in range(len(mtrx[0]), len(mtrx)):\r\n mtrx[moreRows] = np.asarray(mtrx[moreRows])\r\n\r\n ave /= len(mtrx[0])\r\n\r\n\r\n phi = T - ave\r\n # Covariance matrix\r\n return np.dot(np.transpose(phi), phi)", "def get_cov_matrix_outputs(self):\n cov = numpy.diag(numpy.zeros(self.get_num_measured_outputs()))\n i = 0\n for o in self.outputs:\n if o.is_measured_output():\n cov[i,i] = o.get_covariance()\n i += 1\n return cov", "def getCovarianceNoiseMatrix(self):\n return np.dot ( self.getB().T, self.getB() )", "def get_covariance(self):\n x = self.particles[:, 0]\n y = self.particles[:, 1]\n X = np.stack((x, y), axis=0)\n return np.cov(X)", "def _compute_covariance_matrix(self, list_obs_1, list_obs_2):\n\n assert isinstance(list_obs_1, list)\n assert isinstance(list_obs_2, list)\n\n cov_matrix = np.zeros((len(list_obs_1), len(list_obs_2)))\n cov_matrix_flat = [\n (i, j, self.covariance(xi, yj))\n for (i, xi) in enumerate(list_obs_1)\n for (j, yj) in enumerate(list_obs_2)\n ]\n for coord_value in cov_matrix_flat:\n cov_matrix[coord_value[:2]] = coord_value[2]\n\n return cov_matrix", "def create_covariance_matrix(cls, coordinates):\n number_of_conformations = coordinates.shape[0]\n number_of_atoms = coordinates.shape[1]\n coordinates_per_conformation = number_of_atoms * 3\n covariance_matrix = numpy.zeros((coordinates_per_conformation, coordinates_per_conformation))\n coordinates = coordinates.reshape((number_of_conformations, coordinates_per_conformation))\n # Mean structure\n mean = coordinates.mean(0)\n # Changed for efficiency\n for coords in coordinates:\n deviations = coords - mean\n covariance_matrix += numpy.outer(deviations, deviations)\n return covariance_matrix / number_of_conformations", "def set_cov(self):\n v_mpart = self.d_vars['MPart']\n n_mpart = len(v_mpart)\n for p in combinations_with_replacement(range(n_mpart), 2):\n self.add_parameter('Cov', p[0], p[1])\n\n m_cov = np.zeros((n_mpart, n_mpart))\n return m_cov" ]
[ "0.66558284", "0.63560295", "0.63454884", "0.63399667", "0.62891805", "0.6275255", "0.6123064", "0.60660565", "0.60626185", "0.60455185", "0.6044615", "0.60191244", "0.600188", "0.59990466", "0.59840465", "0.5940753", "0.59398955", "0.5937423", "0.5902132", "0.589381", "0.58827657", "0.58815646", "0.58783513", "0.58779544", "0.58717394", "0.58355266", "0.5824194", "0.5811688", "0.58054966", "0.5797053" ]
0.7496815
0
Creates Hypercube objects for every index in the multidimensional self.hypercubes list.
def create_grids_structure(self): for indices, hypercube in np.ndenumerate(self.hypercubes): self.hypercubes[indices] = Hypercube(coords=indices)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_hypercubes_parents_indices(self):\n for hypercube in self.hypercubes.flatten():\n coordinates = []\n for coord in hypercube.coords:\n coordinates.append([2 * coord, 2 * coord + 1])\n for indices in list(itertools.product(*coordinates)):\n hypercube.parent_hypercubes_indices.append(tuple(indices))", "def set_hypercubes_classes(self):\n print(\"Setting the BaseGrid hypercubes' classes.\")\n list_of_all_hc = list(self.hypercubes.flatten())\n print(\"Number of hypercubes: \" + str(len(list_of_all_hc)))\n for hypercube in list_of_all_hc:\n hypercube.set_hypercube_class()\n if self.child_grid:\n self.child_grid.set_hypercubes_classes()", "def set_hypercubes_classes(self):\n print(\"Setting the Hypercubes' classes of grid at level: \" + str(self.level))\n for hypercube in self.hypercubes.flatten():\n coordinates = []\n for coord in hypercube.coords:\n coordinates.append([2 * coord, 2 * coord + 1])\n parents_list = []\n for indices in list(itertools.product(*coordinates)):\n parents_list.append(self.parent_hypercubes[tuple(reversed(indices))])\n hypercube.set_lower_level_hypercube_class(parent_hypercubes=parents_list, threshold=self.threshold)\n if self.child_grid:\n self.child_grid.set_hypercubes_classes()", "def compute_centers_of_hypercubes(self):\n for hc in self.hypercubes.flatten():\n for i in range(self.dims - 1, -1, -1):\n index = self.dims - (i + 1)\n hc.center[i] = (hc.coords[index] + 0.5) * self.hypercube_measurements[index]", "def index_as_cube(self):\n return _IndexAsCubeSlicer(self)", "def compute_centers_of_hypercubes(self):\n for hypercube in self.hypercubes.flatten():\n sums = np.zeros((len(hypercube.coords)))\n for coords in hypercube.parent_hypercubes_indices:\n for index, summ in enumerate(sums):\n sums[index] += self.parent_hypercubes[coords].center[index]\n hypercube.center = [x / 4 for x in sums]", "def mlab_plt_cube(xmin, xmax, ymin, ymax, zmin, zmax):\n faces = cube_faces(xmin, xmax, ymin, ymax, zmin, zmax)\n for grid in faces:\n x, y, z = grid\n mlab.mesh(x, y, z, opacity=0.1, color=(0.1, 0.2, 0.3))", "def gen_hypercube(samples, N):\n\n np.random.seed(4654562)\n hypercube = lhs(N, samples=samples)\n\n return hypercube", "def initialize_cells(self):\n for loc in np.ndindex(*self.shape): # TODO: see if nested for loop is faster than this\n c = Cell(loc, self)\n self.cells.append(c)", "def test_2_1_3D_cube_init(self):\n check = [(0, 0, 0), (1, 1, 1), (1, 0, 0), (1, 1, 0), (1, 0, 1),\n (0, 1, 0), (0, 1, 1), (0, 0, 1), (0.5, 0.5, 0.5)]\n\n nn_checks = {\n (1, 1, 1): [(1, 1, 0), (0, 1, 1), (1, 0, 0), (0, 0, 1), (1, 0, 1),\n (0.5, 0.5, 0.5), (0, 1, 0)],\n (1, 0, 1): [(1, 0, 0), (0, 0, 1), (0, 0, 0), (0.5, 0.5, 0.5),\n (1, 1, 1)],\n (0.5, 0.5, 0.5): [(1, 1, 0), (0, 1, 1), (0, 1, 0), (1, 0, 0),\n (0, 0, 1), (1, 0, 1), (0, 0, 0), (1, 1, 1)]}\n\n init_triangulation(3, 0, check, nn_checks)", "def generate_cube():\n \n num_voxels = 31\n\n data_x = []\n data_y = []\n data_z = []\n data_intensity = []\n\n volume = numpy.zeros((num_voxels, num_voxels, num_voxels))\n\n for x in range(num_voxels):\n for y in range(num_voxels):\n for z in range(num_voxels):\n\n if 5 < x < 10 and 5 < y < 10:\n data_x.append(x)\n data_y.append(y)\n data_z.append(z)\n data_intensity.append(200.0)\n\n volume[x,y,z] = 200.0\n\n\n return data_x, data_y, data_z, data_intensity, volume", "def build_sites(self):\n for i in range(self.shape[0]):\n for j in range(self.shape[1]):\n for k in range(self.shape[2]):\n for s,site in enumerate(self.cell.sites):\n newsite = copy.deepcopy(site)\n coordinate = self.cell.a1*i+\\\n self.cell.a2*j+\\\n self.cell.a3*k\n newsite.coordinate += coordinate\n self.sites[i,j,k,s] = newsite", "def __init__(self, cube_size, time_range):\n\n # cubesize is in z,y,x for interactions with tile/image data\n self.zdim, self.ydim, self.xdim = self.cubesize = [cube_size[2], cube_size[1], cube_size[0]]\n self.time_range = time_range\n self._newcube = False", "def genCubes():\n offset = vpy.vector(.5, .5, .5)\n size = vpy.vector(.2, .2, .2)\n B1 = vpy.box(pos=vpy.vector(0, 0, 0)-offset,\n color=vpy.vector(0, 0, 0), size=size, make_trail=True)\n B2 = vpy.box(pos=vpy.vector(0, 0, 1)-offset,\n color=vpy.vector(0, 0, 1), size=size, make_trail=True)\n B3 = vpy.box(pos=vpy.vector(0, 1, 1)-offset,\n color=vpy.vector(0, 1, 1), size=size, make_trail=True)\n B4 = vpy.box(pos=vpy.vector(0, 1, 0)-offset,\n color=vpy.vector(0, 1, 0), size=size, make_trail=True)\n\n B5 = vpy.box(pos=vpy.vector(1, 0, 0)-offset,\n color=vpy.vector(1, 0, 0), size=size, make_trail=True)\n B6 = vpy.box(pos=vpy.vector(1, 0, 1)-offset,\n color=vpy.vector(1, 0, 1), size=size, make_trail=True)\n B7 = vpy.box(pos=vpy.vector(1, 1, 0)-offset,\n color=vpy.vector(1, 1, 0), size=size, make_trail=True)\n B8 = vpy.box(pos=vpy.vector(1, 1, 1)-offset,\n color=vpy.vector(1, 1, 1), size=size, make_trail=True)\n\n return [B1, B2, B3, B4, B5, B6, B7, B8]", "def cube_array(self):\n cube_sides = {}\n\n for side in SIDES:\n cube_sides[side] = []\n \n # Todo Break this loop into helper functions for clarity and simplicity\n for coord in COORDS_3:\n for cubie in self.cubies:\n # Making sure that the cubes cubies are processed in the correct order\n if np.array_equal(cubie.coordinates, coord): \n \n \n for side in SIDES:\n if cubie.in_side(side):\n for face in cubie.faces:\n \n # Checking that the face of the cubie has the same norm as the side we are processing\n if np.array_equal(face.norm, NORMS[side]):\n cube_sides[side].append(face.colour)\n\n new_list = [cube_sides[\"U\"], cube_sides[\"F\"], reversal(cube_sides[\"R\"]), reversal(cube_sides[\"B\"]),\n cube_sides[\"L\"], reversal(cube_sides[\"D\"])]\n \n final_list = [nine_to_3x3(side) for side in new_list]\n return final_list", "def cube_colors(self, cubes):\n n = cubes.shape[0]\n col = np.zeros((n ** 3, 3))\n terrain_col = (66, 244, 72)\n empty_col = self.background\n for i in range(n):\n for j in range(n):\n for k in range(n):\n c = cubes[i, j, k]\n col[i * n ** 2 + j * n + k] = empty_col if c.state == 'empty' else terrain_col\n self.wireframe_col = col", "def test_1_1_2D_cube_init(self): # TODO: REMOVE FUNC AFTER SPLIT\n check = [(0, 0), (1, 1), (1, 0), (0, 1), (0.5, 0.5)]\n\n nn_checks = {(0.5, 0.5): [(0, 1), (1, 0), (0, 0), (1, 1)],\n (0, 1): [(0, 0), (1, 1), (0.5, 0.5)]}\n\n init_triangulation(2, 0, check, nn_checks)", "def new_mesh_set(self, all_meshes):\n if isinstance(all_meshes, Mesh):\n mesh_tp = []\n mesh_tp.append(all_meshes)\n all_meshes = mesh_tp\n\n if not isinstance(all_meshes, list):\n raise TypeError(\"Please send a list of mesh to update_mesh\")\n self.all_meshes = all_meshes\n\n # Remove previous actors from the scene\n for actor in self.mesh_actors:\n self.parent_window.ren.RemoveActor(actor)\n self.mesh_actors = list()\n\n # Create the geometry of a point (the coordinate) points = vtkPoints()\n for i, mesh in enumerate(self.all_meshes):\n if mesh.time.size != 1:\n raise IndexError(\"Mesh should be from one frame only\")\n\n points = vtkPoints()\n for j in range(mesh.channel.size):\n # points.InsertNextPoint([0, 0, 0])\n points.InsertNextPoint(mesh.data[:3, j, 0].tolist())\n\n # Create an array for each triangle\n draw_patch = not mesh.automatic_triangles and not self.force_wireframe\n if draw_patch:\n poly_type = vtkPolygon\n n_ids = 3\n color = self.patch_color[i]\n else:\n poly_type = vtkPolyLine\n n_ids = 4\n color = self.mesh_color\n cells = vtkCellArray()\n\n # Create the polygons\n for j in range(mesh.triangles.shape[1]):\n poly = poly_type()\n poly.GetPointIds().SetNumberOfIds(n_ids) # make a tri\n for k in range(len(mesh.triangles[:, j])):\n poly.GetPointIds().SetId(k, mesh.triangles[k, j])\n if not draw_patch:\n poly.GetPointIds().SetId(3, mesh.triangles[0, j]) # Close the triangle\n cells.InsertNextCell(poly)\n\n poly_data = vtkPolyData()\n poly_data.SetPoints(points)\n if draw_patch:\n poly_data.SetPolys(cells)\n else:\n poly_data.SetLines(cells)\n\n mapper = vtkPolyDataMapper()\n mapper.SetInputData(poly_data)\n\n # Create an actor\n self.mesh_actors.append(vtkActor())\n self.mesh_actors[i].SetMapper(mapper)\n self.mesh_actors[i].GetProperty().SetColor(color)\n self.mesh_actors[i].GetProperty().SetOpacity(self.mesh_opacity)\n\n self.parent_window.ren.AddActor(self.mesh_actors[i])\n\n # Update marker position\n self.update_mesh(self.all_meshes)", "def _reinit_indexes(self):\n print('Reinitializing indexes...')\n for identity in self.groundtruth_metadata.keys():\n self.groundtruth_metadata[identity]['index'] = 0\n print('Indexes reinitialized!')", "def _make_multi_level(cube, time_promote=False):\n height1 = iris.coords.DimCoord([10], \"height\", units=\"m\")\n height1.attributes[\"positive\"] = \"up\"\n height2 = iris.coords.DimCoord([20], \"height\", units=\"m\")\n height2.attributes[\"positive\"] = \"up\"\n cube1 = cube.copy()\n cube2 = cube.copy()\n cube1.add_aux_coord(height1)\n cube2.add_aux_coord(height2)\n new_cube = iris.cube.CubeList([cube1, cube2]).merge_cube()\n if time_promote:\n new_cube = iris.util.new_axis(new_cube, \"time\")\n return new_cube", "def create_subgrid(self)->list:\n return [subgrid.Subgrid(i) for i in range(0, 9)]", "def generaCubo(self):\r\n #Use Panda predefined format for vertex coordinate only\r\n format = GeomVertexFormat.getV3()\r\n \r\n #Build Vertex data using the created format. Vertex will never change so I use Static attribute \r\n vdata = GeomVertexData('CuboData', format, Geom.UHStatic)\r\n \r\n #I will have to write vertex data so I create a writer for these data\r\n vertex = GeomVertexWriter(vdata, 'vertex')\r\n \r\n #I now use the writer to add vertex data\r\n vertex.addData3f(0, 0, 0)\r\n vertex.addData3f(1, 1, 1)\r\n vertex.addData3f(0, 1, 1)\r\n vertex.addData3f(0, 1, 0)\r\n vertex.addData3f(0, 0, 1)\r\n vertex.addData3f(1, 0, 0)\r\n vertex.addData3f(1, 0, 1)\r\n vertex.addData3f(1, 1, 0)\r\n \r\n #I now create 12 triangles\r\n prim = GeomTriangles(Geom.UHStatic)\r\n\r\n #and then I add vertex to them\r\n #Next time use addVertices(0,1,2) !!!\r\n prim.addVertex(7)\r\n prim.addVertex(0)\r\n prim.addVertex(5)\r\n prim.closePrimitive()\r\n \r\n prim.addVertex(3)\r\n prim.addVertex(0)\r\n prim.addVertex(7)\r\n prim.closePrimitive()\r\n \r\n prim.addVertex(2)\r\n prim.addVertex(6)\r\n prim.addVertex(4)\r\n prim.closePrimitive()\r\n\r\n prim.addVertex(1)\r\n prim.addVertex(6)\r\n prim.addVertex(2)\r\n prim.closePrimitive()\r\n\r\n prim.addVertex(7)\r\n prim.addVertex(2)\r\n prim.addVertex(3)\r\n prim.closePrimitive()\r\n\r\n prim.addVertex(1)\r\n prim.addVertex(2)\r\n prim.addVertex(7)\r\n prim.closePrimitive()\r\n\r\n prim.addVertex(3)\r\n prim.addVertex(4)\r\n prim.addVertex(0)\r\n prim.closePrimitive()\r\n\r\n prim.addVertex(2)\r\n prim.addVertex(4)\r\n prim.addVertex(3)\r\n prim.closePrimitive()\r\n\r\n prim.addVertex(0)\r\n prim.addVertex(6)\r\n prim.addVertex(5)\r\n prim.closePrimitive()\r\n\r\n prim.addVertex(4)\r\n prim.addVertex(6)\r\n prim.addVertex(0)\r\n prim.closePrimitive()\r\n\r\n prim.addVertex(5)\r\n prim.addVertex(1)\r\n prim.addVertex(7)\r\n prim.closePrimitive()\r\n\r\n prim.addVertex(6)\r\n prim.addVertex(1)\r\n prim.addVertex(5)\r\n prim.closePrimitive()\r\n\r\n #Create a Geom to bing vertex data to primitives\r\n geom = Geom(vdata)\r\n geom.addPrimitive(prim)\r\n\r\n #Create a node for the Geom in order to be able to render it\r\n node = GeomNode('gnode')\r\n node.addGeom(geom)\r\n\r\n #Adde the node to the scene graph == render it!\r\n nodePath = render.attachNewNode(node)\r\n \r\n #is this needed?\r\n nodePath.setPos( 0, 5, 0)\r\n \r\n self.camera.lookAt(nodePath)\r\n \r\n base.setBackgroundColor( .0, .0, .0 )\r\n \r\n taskMgr.add(self.SpinCameraTask, \"SpinCameraTask\")", "def __init__(self, board):\n self.board = board\n self.tile_rhombus_list = [TilePolygon(tile) for tile in self.board.list_of_tiles]\n self.cube_walls_list = [TilePolygon(wall, wall.color) for wall in self.board.list_of_cube_walls]", "def _getitem3d(self, index):\n\n lovects = self._getlovects()\n hivects = self._gethivects()\n fields = self._getfields()\n\n ix = index[0]\n iy = index[1]\n iz = index[2]\n\n if len(fields[0].shape) > self.dim:\n ncomps = fields[0].shape[-1]\n else:\n ncomps = 1\n\n if len(index) > self.dim:\n if ncomps > 1:\n ic = index[-1]\n else:\n raise Exception('Too many indices given')\n else:\n ic = None\n\n nx = hivects[0,:].max() - self.nghosts\n ny = hivects[1,:].max() - self.nghosts\n nz = hivects[2,:].max() - self.nghosts\n\n if npes > 1:\n nx = comm_world.allreduce(nx, op=mpi.MAX)\n ny = comm_world.allreduce(ny, op=mpi.MAX)\n nz = comm_world.allreduce(nz, op=mpi.MAX)\n\n if isinstance(ix, slice):\n ixstart = max(ix.start or -self.nghosts, -self.nghosts)\n ixstop = min(ix.stop or nx + 1 + self.nghosts, nx + self.overlaps[0] + self.nghosts)\n else:\n ixstart = ix\n ixstop = ix + 1\n if isinstance(iy, slice):\n iystart = max(iy.start or -self.nghosts, -self.nghosts)\n iystop = min(iy.stop or ny + 1 + self.nghosts, ny + self.overlaps[1] + self.nghosts)\n else:\n iystart = iy\n iystop = iy + 1\n if isinstance(iz, slice):\n izstart = max(iz.start or -self.nghosts, -self.nghosts)\n izstop = min(iz.stop or nz + 1 + self.nghosts, nz + self.overlaps[2] + self.nghosts)\n else:\n izstart = iz\n izstop = iz + 1\n\n # --- Setup the size of the array to be returned and create it.\n # --- Space is added for multiple components if needed.\n sss = (max(0, ixstop - ixstart),\n max(0, iystop - iystart),\n max(0, izstop - izstart))\n if ncomps > 1 and ic is None:\n sss = tuple(list(sss) + [ncomps])\n resultglobal = np.zeros(sss, dtype=_libwarpx._numpy_real_dtype)\n\n datalist = []\n for i in range(len(fields)):\n\n # --- The ix1, 2 etc are relative to global indexing\n ix1 = max(ixstart, lovects[0,i])\n ix2 = min(ixstop, lovects[0,i] + fields[i].shape[0])\n iy1 = max(iystart, lovects[1,i])\n iy2 = min(iystop, lovects[1,i] + fields[i].shape[1])\n iz1 = max(izstart, lovects[2,i])\n iz2 = min(izstop, lovects[2,i] + fields[i].shape[2])\n\n if ix1 < ix2 and iy1 < iy2 and iz1 < iz2:\n\n sss = (slice(ix1 - lovects[0,i], ix2 - lovects[0,i]),\n slice(iy1 - lovects[1,i], iy2 - lovects[1,i]),\n slice(iz1 - lovects[2,i], iz2 - lovects[2,i]))\n if ic is not None:\n sss = tuple(list(sss) + [ic])\n\n vslice = (slice(ix1 - ixstart, ix2 - ixstart),\n slice(iy1 - iystart, iy2 - iystart),\n slice(iz1 - izstart, iz2 - izstart))\n\n datalist.append((vslice, fields[i][sss]))\n\n if npes == 1:\n all_datalist = [datalist]\n else:\n all_datalist = comm_world.allgather(datalist)\n\n for datalist in all_datalist:\n for vslice, ff in datalist:\n resultglobal[vslice] = ff\n\n # --- Now remove any of the reduced dimensions.\n sss = [slice(None), slice(None), slice(None)]\n if not isinstance(ix, slice):\n sss[0] = 0\n if not isinstance(iy, slice):\n sss[1] = 0\n if not isinstance(iz, slice):\n sss[2] = 0\n\n return resultglobal[tuple(sss)]", "def create_grid(grid):\r\n for i in range(4):\r\n grid.append([0,0,0,0])", "def itercubes(self, **kwargs):\n for ifuslot in self.fplane.ifuslots:\n yield ifuslot, self.extract_ifu_sensitivity_cube(ifuslot, \n **kwargs)", "def mesh(self, initial_obj=None):\n mesh = initial_obj if initial_obj else 0.\n for size in reversed(self.sizes):\n mesh = [mesh] * size\n return mesh", "def build_subsets(self):\n\t\tself.all = h.SectionList()\n\t\tself.all.wholetree(sec=self.soma)\n\n\t\t# morphological section lists\n\t\tself.axon_list = []\n\t\tself.axosomatic_list = []\n\t\tself.apicalshaftoblique_list = []\n\t\tself.apicaltree_list = []\n\t\tself.tuft_list = []\n\t\tself.soma_list = []\n\t\tself.basal_list = []\n\n\t\tself.axon_list.append(hillock)\n\t\tself.axon_list.append(iseg)\n\t\tself.axon_list.append(axon)\n\n\t\tself.axosomatic_list.append(soma)\n\t\tself.axosomatic_list.append(basal)\n\t\tself.axosomatic_list.append(hillock)\n\t\tself.axosomatic_list.append(iseg)\n\t\tself.axosomatic_list.append(axon)\n\n\t\tself.apicalshaftoblique_list.append(apical)\n\n\t\tself.apicaltree_list.append(apical)\n\t\tself.apicaltree_list.append(tuft)\n\n\t\tself.tuft_list.append(tuft)\n\n\t\tself.soma_list.append(soma)\n\n\t\tself.basal_list.append(basal)\n\n\t# Create lists of cell parts that contain each ion channel type\n\t\tself.nat_list = []\n\t\tself.kslow_list = []\n\t\tself.kfast_list = []\n\t\tself.ih_list = []\n\n\t\tself.ih_list.append(basal)\n\t\tself.ih_list.append(apical)\n\t\tself.ih_list.append(tuft)\n\n\t\tself.excsyn_list.append(basal)\n\t\tself.excsyn_list.append(apical)\n\t\tself.excsyn_list.append(tuft)\n\n\t\tself.inhdendsyn_list.append(basal)\n\t\tself.inhdendsyn_list.append(apical)\n\n\t\tself.inhsomasyn_list.append(soma)\n\n\t\tself.nat_list.append(soma)\n\t\tself.nat_list.append(hillock)\n\t\tself.nat_list.append(iseg)\n\t\tself.nat_list.append(apical)\n\t\tself.nat_list.append(tuft)\n\n\t\tself.kfast_list.append(soma)\n\t\tself.kfast_list.append(apical)\n\t\tself.kfast_list.append(tuft)\n\n\t\tself.kslow_list.append(soma)\n\t\tself.kslow_list.append(apical)\n\t\tself.kslow_list.append(tuft)", "def __init__(self, browser, config=None):\n super(WhooshIndexer, self).__init__()\n\n self.root_path = config.get(\"search\", \"index_path\")\n self.browser = browser\n self.cube = browser.cube\n self.path = os.path.join(self.root_path, str(self.cube))\n self.logger = self.browser.logger\n\n # FIXME: this is redundant, make one index per dimension or something\n # FIXME: the above requirement needs cubes browser to provide list of\n # all dimension values, which is not currently implemented nor in the\n # API definition", "def testCube(self):\n cube = {i:(i^1,i^2,i^4) for i in range(8)}\n self.check(cube,6)" ]
[ "0.6719218", "0.6329061", "0.6187306", "0.60999596", "0.5802039", "0.5746891", "0.57240033", "0.56811094", "0.5670855", "0.56600827", "0.5632086", "0.5601353", "0.55559343", "0.5540782", "0.551118", "0.53804505", "0.5376794", "0.53545433", "0.5327541", "0.5314851", "0.530495", "0.5304603", "0.52949655", "0.52722067", "0.52693754", "0.526155", "0.5235018", "0.52221066", "0.52100474", "0.5200855" ]
0.8409945
0
Creates LowerLevelGrid when it is possible level equal to one means that this is the coarsest level.
def create_lower_level_grid(self): if self.level == 1: return False else: return LowerLevelGrid(level=self.level - 1, parent_hypercubes_number=self.hypercubes_number, parent_hypercubes=self.hypercubes, dims=self.dims)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def min_level(self):\n return self.__min", "def grid_levels(self, nlevels):\n for j in np.arange(nlevels):\n if j in self.gridded['levels'].keys():\n continue\n self.gridded['levels'][j] = self.grid_param(self.levels[j])\n self.jmax = max(self.gridded['levels'].keys())\n if self.verbose:\n print('Gridded the first %d energy levels.' % (self.jmax))\n print('Use self.grid_levels() to read in more.\\n')\n return", "def GLDAS025LandGrid():\n return GLDAS025Grids(only_land=True)", "def define_grid():\n grid_left = np.array([[-13.1000000000000, -35.5000000000000, -48.3000000000000, -60, -16.9000000000000,\n -34.8000000000000, -67.5000000000000, -46.1000000000000, -59.8000000000000,\n -14.2000000000000, -28.3000000000000, -42.3000000000000, -67.6000000000000,\n -50.5000000000000, -14.6000000000000, -60.9000000000000, -31.6000000000000,\n -5.10000000000000, -65.6000000000000, -41.8000000000000, -55.1000000000000,\n -22.7000000000000, -5.80000000000000, -49.2000000000000, -34.5000000000000,\n -61.5500000000000, -63.6000000000000, -40.4000000000000, -48.7000000000000,\n -21.8000000000000, -58.2000000000000, -7, -36.3000000000000, -48.1000000000000,\n -56.8000000000000, -7.30000000000000, -22.2000000000000, -36.8000000000000,\n -46.8000000000000],\n [-67.7000000000000, -60, -55.1000000000000, -51.8000000000000, -51.6000000000000,\n -49.3000000000000, -47.1000000000000, -43.7000000000000, -39.6000000000000,\n -39.1000000000000, -31.2000000000000, -30.7000000000000, -30.1000000000000,\n -24.4000000000000, -22.7000000000000, -18.7000000000000, -16.9000000000000,\n -12.6000000000000, -10.8000000000000, -10.2000000000000, -4.01000000000000, 1.20000000000000,\n 2.80000000000000, 3.70000000000000, 3.90000000000000, 6.20000000000000, 8.30000000000000,\n 11.8000000000000, 14.5000000000000, 16, 18.2000000000000, 18.4000000000000, 19.9000000000000,\n 24.6000000000000, 28.5200000000000, 33.8000000000000, 35, 35.4000000000000,\n 35.6000000000000],\n [69.1000000000000, 66, 58.2000000000000, 48, 78, 71.7000000000000, 31, 61.1000000000000,\n 53.3000000000000, 81.1000000000000, 76, 70.2000000000000, 41.2000000000000, 64.4000000000000,\n 80.2000000000000, 50.9000000000000, 75.2000000000000, 77.3000000000000, 37.8000000000000, 67,\n 53.2000000000000, 72, 74.8000000000000, 54.7000000000000, 66.5000000000000, 35.9000000000000,\n 25.7000000000000, 60.7000000000000, 50.5000000000000, 68.9000000000000, 27.3000000000000,\n 70.3000000000000, 59.6000000000000, 44, 20.8000000000000, 61.7000000000000, 57.2000000000000,\n 47, 36]])\n stn_left = np.array([[-14.6, -13.2, -11.7, -9.10, -11.7, -13.2, -7.90, -10],\n [-15.1, -15.1, -15.1, -12.6, -12.6, -12.6, -9.40, -10.1],\n [-5.40, -7.20, -8.70, -8.70, -7.50, -5.10, -10.3, -7.80]])\n grid_right = np.copy(grid_left)\n grid_right[0, :] = grid_right[0, :] * -1\n stn_right = np.copy(stn_left)\n stn_right[0, :] = stn_right[0, :] * -1\n\n return grid_left, grid_right, stn_left, stn_right", "def test_hunger_grid_create(self):\n self.grid = Hunger_Grid.hunger_grid()\n self.grid.newGrid = Hunger_Grid.hunger_grid().create_hunger_grid(M=30, N=30, P_LAVA = 1.0)\n self.assertTrue(self.grid.newGrid.size == 900, \"Grid size is incorrect\")\n self.assertTrue(self.grid.newGrid[2, 2] == 1, \"Lava chance is not acting correctly\")\n self.assertTrue(self.grid.newGrid[-3, -3] == 1, \"Lava chance is not acting correctly\")", "def grid_levels(self, nlevels):\n for j in np.arange(nlevels):\n if j in self.gridded['levels'].keys():\n continue\n self.gridded['levels'][j] = self.grid_param(self.levels[j],\n self.method)\n self.jmax = max(self.gridded['levels'].keys())\n if self.verbose:\n print('Gridded the first %d energy levels.' % (self.jmax))\n print('Use self.grid_levels() to read in more.\\n')\n return", "def gen_level(\n root_path,\n floor_file = 'floor.lines',\n walls_file = 'walls.lines',\n windows_file = 'windows.lines',\n doors_file = 'doors.lines',\n handrails_file = 'handrails.lines',\n stairs_file = 'stairs.lines'):\n \n def internal(\n floor_thickness = .3,\n walls_thickness = .2, \n walls_height = 3, \n windows_fn = default_window(),\n doors_fn = default_door(),\n doors_height = 2.3,\n handrails_height = 1.2,\n handrails_thickness = .1):\n \n floor = gen_floor(root_path + floor_file, floor_thickness)\n \n walls = w8.gen_walls(\n root_path + walls_file, \n walls_thickness, \n walls_height + floor_thickness, \n external = False)\n \n windows = gen_windows(\n root_path + windows_file, \n windows_fn,\n walls_height + floor_thickness)\n \n doors, doorways = gen_doors(\n root_path + doors_file, \n doors_fn, \n doors_height)\n \n handrails = w8.gen_walls(\n root_path + handrails_file, \n handrails_thickness, \n handrails_height + floor_thickness,\n external = False)\n \n stair_foot = gen_stairs_foot(root_path + stairs_file)\n walls = DIFFERENCE([walls, T(3)(floor_thickness)(doorways)])\n \n return walls, windows, doors, handrails, floor, stair_foot\n \n return internal", "def lower_row_invariant(self, target_row, target_col):\n # replace with your code\n if self.get_number(target_row, target_col) != 0:\n print 'Error 1: Current number is not 0'\n return False\n current = 0\n for row in range(target_row + 1, self.get_height()):\n if target_col == self.get_width() - 1:\n current = self._grid[row][0]\n else:\n current = self._grid[row - 1][-1] + 1\n column = self._grid[row]\n for grid in column:\n if grid != current:\n print 'Error 2'\n return False\n current += 1\n if target_col != self.get_width() - 1:\n current = self._grid[target_row][target_col + 1]\n for grid in self._grid[target_row][target_col + 1:]:\n if grid != current:\n print 'Error 3'\n return False\n current += 1\n return True", "def lower_row_invariant(self, target_row, target_col):\r\n # Tile zero is positioned at (i,j).\r\n # All tiles in rows i+1 or below are positioned at their solved location.\r\n # All tiles in row i to the right of position (i,j) are positioned at their solved location.\r\n solved_lower = False\r\n solved_grid = [[col + self.get_width() * row\r\n for col in range(self.get_width())]\r\n for row in range(self._height)]\r\n if self._grid[target_row][target_col] == 0:\r\n solved_lower = True\r\n \r\n for row in range(target_row + 1, self._height):\r\n for col in range(self._width):\r\n if self._grid[row][col] != solved_grid[row][col]:\r\n solved_lower = False\r\n \r\n for col in range(target_col + 1, self._width):\r\n if self._grid[target_row][col] != solved_grid[target_row][col]:\r\n solved_lower = False\r\n \r\n return solved_lower", "def grow_if_needed(self, points: List[Point]):\n if any(p not in self.grid for p in points):\n right = self.grid.right + 1\n bottom = self.grid.bottom + 1\n for y_pos in range(self.grid.top, bottom):\n new_point = Point(right, y_pos)\n self.grid[new_point] = self.get_geologic_level(new_point)\n for x_pos in range(self.grid.left, right + 1):\n new_point = Point(x_pos, bottom)\n self.grid[new_point] = self.get_geologic_level(new_point)", "def createLevelMap(self):\n for a in self.hierarchy.iterkeys():\n self.lvl = 0\n self.calcLevel(a)\n if self.lvl > self.levelMap.highestLevel: self.levelMap.highestLevel = self.lvl\n self.levelMap.addLevelData(AgentName=a, Level=self.lvl)", "def createLevelMap(self):\n\t\tfor a in self.hierarchy.iterkeys():\n\t\t\tself.lvl = 0\n\t\t\tself.calcLevel(a)\n\t\t\tif self.lvl > self.levelMap.highestLevel: self.levelMap.highestLevel = self.lvl\n\t\t\tself.levelMap.addLevelData(AgentName=a, Level=self.lvl)", "def generate_level(level):\n seed = level * 69420 # multiply by 69420 to not have the seeds too close to each other\n random.seed(seed)\n dimensions = get_map_size(level)\n level_map = np.full(dimensions, -1)\n while -1 in level_map:\n choice = random.choice(np.argwhere(level_map == -1))\n next_index = (choice[0], choice[1])\n # get indices of the tiles next to the current index\n left_index, up_index, right_index, down_index = get_direction_indices(next_index)\n left = tile_needs_connection(left_index, level_map, has_connection_right)\n up = tile_needs_connection(up_index, level_map, has_connection_down)\n right = tile_needs_connection(right_index, level_map, has_connection_left)\n down = tile_needs_connection(down_index, level_map, has_connection_up)\n level_map[next_index] = get_tile(left, up, right, down)\n return un_solve(level_map)", "def create_level(self, name):\n \n # Create a level object\n level = Level()\n size_y=8\n size_x=10\n # Separates static and non static parts\n # This will speed up network games, since only the non static part will be\n # sent on the network\n level_static = soya.World(level)\n \n # Load 3 materials (= textures) for files ./materials{grass|ground|snow}.data\n \n ground = soya.Material.get(\"block2\")\n \n \n # Creates a landscape, from the heighmap \"./images/map.png\"\n # The landscape is in the static part (=level_static), because it won't change along the game.\n land = soya.Land(level_static)\n land.y =0.0\n land.from_image(soya.Image.get(\"floor.png\"))\n \n # Sets how high is the landscape\n land.multiply_height(-0.0)\n \n # These values are trade of between quality and speed\n land.map_size = 8\n land.scale_factor = 1.5\n land.texture_factor = 1.0\n \n # Set the texture on the landscape, according to the height\n # (i.e. height 0.0 to 15.0 are textured with grass, ...)\n \n land.set_material_layer(ground, 0.0, 25.0)\n \n # squares where the player starts\n # Note that this is stored in physical, not abstract, coordinates.\n always_clear=[(-1,-1),(-2,-1),(0,-1),(-1,-2),(-1,0)]\n cube = soya.Shape.get(\"cube\")\n \n # r and c represent the cube positions in the grid,\n # while x and y represent the physical coordinates in the world.\n # Note the simple formula: r = x + self.size_x , c = y + self.size_y\n border_row, border_col = 2*size_x - 2, 2*size_y - 2\n for r, x in enumerate(range(-size_x,size_x-1)):\n for c, y in enumerate(range(-size_y,size_y-1)):\n bx = x +128\n by = y +128 \n if (r % 2 == 0 and c % 2 == 0) or \\\n (r == 0 or c == 0 or r == border_row or c == border_col ):\n # This is a wall block\n block = soya.Volume(level_static, cube)\n block.scale(1.0, 1.0, 1.0)\n block.set_xyz(bx, 0.5, by) \n elif random() < 0.8 and not (x, y) in always_clear:\n # A soft block\n block = SoftBox()\n level.add_mobile(block)\n block.scale(1.0, 1.0,1.0)\n block.set_xyz(bx, 0.5, by)\n \n # Creates a light in the level, similar to a sun (=a directional light)\n sun = soya.Light(level_static)\n sun.directional = 1\n sun.diffuse = (1.0, 0.8, 0.4, 1.0)\n sun.rotate_vertical(-45.0)\n \n # Creates a sky atmosphere, with fog\n atmosphere = soya.SkyAtmosphere()\n atmosphere.ambient = (0.3, 0.3, 0.4, 1.0)\n atmosphere.fog = 1\n atmosphere.fog_type = 0\n atmosphere.fog_start = 40.0\n atmosphere.fog_end = 50.0\n atmosphere.fog_color = atmosphere.bg_color = (0.2, 0.5, 0.7, 1.0)\n atmosphere.skyplane = 1\n atmosphere.sky_color = (1.5, 1.0, 0.8, 1.0)\n \n # Set the atmosphere to the level\n level.atmosphere = atmosphere\n \n # Save the level as \"./worlds/level_demo.data\" (remember, levels are subclasses of worlds)\n level_static.filename = level.name = name+\"_bbomber_static\"\n level_static.save()\n level.filename = level.name = name+\"_bbomber\"\n level.save()", "def setup_level_1() -> object:\n #create level object\n level = Level()\n\n #create vertical walls for level\n create_and_add_vertical_walls_to_list(4, 39, 4, level.wall_list)\n create_and_add_vertical_walls_to_list(4, 25, 19, level.wall_list)\n create_and_add_vertical_walls_to_list(33, 54, 19, level.wall_list)\n create_and_add_vertical_walls_to_list(4, 25, 34, level.wall_list)\n create_and_add_vertical_walls_to_list(33, 54, 34, level.wall_list)\n create_and_add_vertical_walls_to_list(14, 25, 54, level.wall_list)\n create_and_add_vertical_walls_to_list(33, 44, 54, level.wall_list)\n create_and_add_vertical_walls_to_list(14, 45, 74, level.wall_list)\n create_and_add_vertical_walls_to_list(54, settings.HEIGHT, 23, level.wall_list)\n create_and_add_vertical_walls_to_list(54, settings.HEIGHT, 30, level.wall_list)\n\n #create horizontal walls for level\n create_and_add_horiontal_walls_to_list(4, 34, 4, level.wall_list)\n create_and_add_horiontal_walls_to_list(4, 9, 19, level.wall_list)\n create_and_add_horiontal_walls_to_list(15, 24, 19, level.wall_list)\n create_and_add_horiontal_walls_to_list(30, 54, 19, level.wall_list)\n create_and_add_horiontal_walls_to_list(54, 74, 14, level.wall_list)\n create_and_add_horiontal_walls_to_list(4, 24, 39, level.wall_list)\n create_and_add_horiontal_walls_to_list(30, 54, 39, level.wall_list)\n create_and_add_horiontal_walls_to_list(54, 74, 44, level.wall_list)\n create_and_add_horiontal_walls_to_list(19, 24, 54, level.wall_list)\n create_and_add_horiontal_walls_to_list(30, 35, 54, level.wall_list)\n\n #create knight character for level\n create_and_add_character_to_list(\"pics\\prison_guard.png\", 0.2, 270, 470, level.character_list)\n\n #knight asks for bribe\n guard_convo = Dialogue(300, 500, 150, 50, \"I know who you are...\\n if you pay me,\\n I'll turn a blind eye.\")\n level.dialogue_list.append(guard_convo)\n\n #create coin item to bribe knight character\n create_and_add_item_to_list(\"pics\\gold_1.png\", 0.5, 400, 250, level.item_list)\n\n #create prompts and info for rooms for object\n cell = RoomInfo(120, 100, \"Dungeon cell. There's a note and key. Someone's waiting for you in the garden.\")\n level.room_info_list.append(cell)\n guard_room = RoomInfo(450, 280, \"Guardroom. There's the unconconsious bodies of the guards. Your saviours must've gone to great lengths...\")\n level.room_info_list.append(guard_room)\n torture_chamber = RoomInfo(120, 280, \"Torture chamber. You've been here before. They were questioning you, but you didn't answer.\")\n level.room_info_list.append(torture_chamber)\n battle_room = RoomInfo(650, 280, \"Battle room. You see that your captors are fighting revolutionaries- those who seek to bring back a lost king.\")\n level.room_info_list.append(battle_room)\n stairwell = RoomInfo(220, 520, \"Stairwell. There's a lone guard who doesn't look surprised to see you\")\n level.room_info_list.append(stairwell)\n\n return level", "def generate_latitudinal_level_title(grid, field, level):\n time_str = generate_grid_time_begin(grid).isoformat() + \"Z\"\n disp = grid.y[\"data\"][level] / 1000.0\n if disp >= 0:\n direction = \"north\"\n else:\n direction = \"south\"\n disp = -disp\n l1 = f\"{generate_grid_name(grid)} {disp:.1f} km {direction} of origin {time_str} \"\n field_name = generate_field_name(grid, field)\n return l1 + \"\\n\" + field_name", "def __init__(self, level):\n self.level = level\n self.my_map = {}\n self.my_level = []\n self.my_grid = []", "def lower_binary_tree(self):\n return self.min_linear_extension().binary_search_tree_shape(left_to_right=False)", "def create_grid(self):\n\n # If called when a grid already exists create a new grid\n if self.grid:\n self.grid = []\n\n grid_pen = QPen(QColor(215, 215, 215), 1)\n w = 10000\n h = 10000\n self.addLine(-10000, 0, 10000, 0, QPen(QColor(0, 0, 0), 2))\n self.addLine(0, -10000, 0, 10000, QPen(QColor(0, 0, 0), 2))\n\n w = int(w / self.grid_spacing) * self.grid_spacing\n h = int(h / self.grid_spacing) * self.grid_spacing\n for i in range(-w, w, self.grid_spacing):\n if i == 0:\n pass\n else:\n line = self.addLine(-w, i, w, i, grid_pen)\n line.setZValue(-1)\n self.grid.append(line)\n for i in range(-h, h, self.grid_spacing):\n if i == 0:\n pass\n else:\n line = self.addLine(i, -h, i, h, grid_pen)\n line.setZValue(-1)\n self.grid.append(line)\n\n self.grid_built = True", "def build_lower_zeros(self):\r\n for row in range(self.SIZE - 1, 0, -1):\r\n self.__obtain_zero(row, 0, 0)\r\n\r\n for col in range(1, self.SIZE - 1):\r\n for row in range(self.SIZE - 1, col, -1):\r\n self.__obtain_zero(row, col, row - 1)", "def generate_map(self):\n map = Map.Map(50, 80, 1000, 10, 6)\n\n #here we can map out our larger map structure\n if self.level < 2:\n map.make_greathall()\n elif self.level >= 2 and self.level < 20:\n map.make_map()\n elif self.level >= 20:\n map.make_cave()\n else:\n map.make_map()\n return map", "def best_unexplored_lower_bound(self):\n if self._unexplored_nodes:\n return min(node.lower_bound for node in self._unexplored_nodes)\n else:\n return 0.0", "def initialize_grid(self):\r\n for i in range(self.height):\r\n for j in range(self.width):\r\n self.grid[i][j] = 0\r\n \r\n # fill up unvisited cells\r\n for r in range(self.height):\r\n for c in range(self.width):\r\n if r % 2 == 0 and c % 2 == 0:\r\n self.unvisited.append((r,c))\r\n\r\n self.visited = []\r\n self.path = dict()\r\n self.generated = False", "def increase_left_boundary(self):\n self.L = self.L - 1.0\n self.Ne = self.Ne + 1", "def initial_level(self):\n return self.get(self._names[\"initial_level\"])", "def _buildGridPoints(self):\n self.spacings = []\n for level in xrange(self.depth):\n levelSpacings = []\n refLevel = level + 1\n level = 2**level\n axisData = []\n for axis in self.size:\n spacing = axis / (level+1)\n levelSpacings.append(spacing)\n axisData.append([gridValue*spacing for gridValue in xrange(1, level+1)])\n pointList = [((i, j, k), np.array([axisData[0][i], axisData[1][j], axisData[2][k]]))\n for i in xrange(level)\n for j in xrange(level)\n for k in xrange(level)]\n self.grid[refLevel] = {point[0]: point[1] for point in pointList}\n self.spacings.append(levelSpacings)", "def setup_random_influence_level():\r\n \r\n global INFLUENCE_LEVELS\r\n number = random.uniform(0,1)\r\n index = 0\r\n if number<=0.9:\r\n index = 0\r\n else:\r\n index = 1\r\n return INFLUENCE_LEVELS[index]", "def setupLevel(self):\n\n self.state = GameState.SETUP\n\n # vado a leggere il dizionario corrispondente\n # al numero di livello corrente facendo in modo\n # che se il numero di livello richiesto non esiste\n # carico quello più vicino a quello richiesto\n if self.levelIndex>= len(levels):\n self.levelIndex = len(levels) -1\n elif self.levelIndex <0:\n self.levelIndex = 0\n\n level = levels[self.levelIndex]\n\n # nome del livello\n self.level_name = level.get(\"name\", \"Livello %s\" % (self.levelIndex+1))\n\n # dimensione del labirinto (numero di righe e di colonne)\n self.nrows = level.get(\"nrows\", 20)\n self.ncols = level.get(\"ncols\", 20)\n\n # l'algoritmo di generazione del labirinto supporta solo un numero di\n # righe e di colonne dispari, quindi approssimiamo le dimensioni ai\n # valori dispari più vicini\n if self.nrows % 2 == 0:\n self.nrows+=1\n if self.ncols % 2 == 0:\n self.ncols+=1\n\n\n # fattore di scala del labirinto\n # attenzione che, fattori di scala molto\n # grandi, rallentano le prestazioni di gioco\n self.scale = level.get(\"scale\", 30)\n\n background_image_filename = level.get(\"background_image\", None)\n if background_image_filename!=None:\n self.background_image = pygame.image.load(background_image_filename).convert()\n else:\n self.background_image = None\n\n # parametri usati dall'algoritmo di generazione del labirinto\n # si veda https://en.wikipedia.org/wiki/Maze_generation_algorithm\n self.maze_density = level.get(\"maze_density\", Game.MAZE_DENSITY)\n self.maze_complexity = level.get(\"maze_complexity\", Game.MAZE_COMPLEXITY)\n\n # colore delle monete\n self.coin_color = level.get(\"coin_color\", Game.YELLOW)\n\n # tempo a disposizione per completare il livello\n self.time = level.get(\"time\", 240)\n self.clockTime = level.get(\"clock\", 80)\n\n # numero di nemici\n self.numEnemies = level.get(\"num_enemies\", 0)\n\n # numero di ricaricatori temporali\n self.numTimeReloaders = level.get(\"time_reloaders\", 0)\n\n # numero di bombe \"distruggi monete\"\n self.bonus_bombs = level.get(\"bombs\", [])\n # numero di bombe \"distruggi muri\"\n self.bonus_wall_bombs = level.get(\"wall_bombs\", [])\n # numero di bombe \"distruggi nemici\"\n self.bonus_enemy_killers = level.get(\"enemy_killers\", [])\n # numero di pizze che rendono i nemici golosi di monete\n self.bonus_greedy_enemies = level.get(\"greedy_enemies\", 0)\n # numero di portali (teletrasporto del giocatore)\n self.bonus_portals = level.get(\"portals\", 0)\n\n # proiettili a disposizione del giocatore per un certo periodo di tempo\n self.bonus_player_bullets = level.get(\"player_bullets\", [])\n\n #numero di bonus che rendono il giocatore invisibile per un certo periodo di tempo\n self.bonus_invisibility_players = level.get(\"invisibility_players\", [])\n\n # numero di shooters (nemici che sparano contro il giocatore)\n self.numShooters = level.get(\"num_shooters\" , [])\n\n\n # suoni di collisione\n self.sound_explosion = pygame.mixer.Sound(\"Effects/smc-wwvi/big_explosion.ogg\")\n self.sound_bomb_explosion = pygame.mixer.Sound(\"Effects/smc-wwvi/bombexplosion.ogg\")\n\n\n # suono della moneta raccolta\n #self.sound_coin = pygame.mixer.Sound(\"Effects/SFX/beep_7.wav\")\n self.sound_coin = pygame.mixer.Sound(\"Effects/jute-dh/gold.wav\")\n\n # suono del timeReloader\n self.sound_time_reloader = pygame.mixer.Sound(\"Effects/SFX/echo_5.wav\")\n\n # suono di collisione con enemy killer\n self.sound_enemy_killer = pygame.mixer.Sound(\"Effects/smc-wwvi/big_explosion.ogg\")\n\n # suono dell'invisibility player\n self.sound_invisibility_player = pygame.mixer.Sound(\"Effects/sound_effects/trekscan.wav\")\n\n # suono del teletrasporto\n self.sound_portal = pygame.mixer.Sound(\"Effects/sound_effects/trekscan.wav\")\n\n # suono dell'arma presa e del proiettile sparato\n self.sound_weapon = pygame.mixer.Sound(\"Effects/jute-dh/hit_2m.wav\")\n\n # suono dei greedy enemies\n self.sound_greedy_enemies = pygame.mixer.Sound(\"Effects/sound_effects/squeak2.wav\")\n\n # suono del levello completato\n self.sound_completed_level = pygame.mixer.Sound(\"Effects/sound_effects/level_completed.wav\")\n\n #\n # IMMAGINI DEGLI SPRITE DI GIOCO: CONFIGURABILE DA FILE DI CONFIGURAZIONE!!\n #\n\n # immagine delle pareti del labirinto\n self.wall_filename = level.get(\"wall\", \"Backgrounds/Dim/Boards.jpg\")\n\n # immagine dei nemici del labirinto\n self.enemies_filename = level.get(\"enemies\", \"Sprites/Animals/duck.png\")\n\n # immagine dei nemici del labirinto che possono anche sparare\n # di default gli shooters hanno lo stesso aspetto dei nemici normali\n self.shooters_filename = level.get(\"shooters\", self.enemies_filename)\n\n # immagine della bomba distruggi monete\n self.bomb_filename = level.get(\"bomb\", \"Sprites/bomb_bonus.png\")\n # immagine della bomba distruggi muri\n self.wall_bomb_filename = level.get(\"wall_bomb\", \"Sprites/bomb_wall_bonus.png\")\n\n self.time_reloaders_filename = level.get(\"time_reloader\", \"Sprites/clessidra.png\")\n self.enemy_killers_filename = level.get(\"enemy_killer\", \"Sprites/skull2.png\")\n self.greedy_enemies_filename = level.get(\"greedy_enemy\", \"Sprites/pizza.png\")\n self.portals_filename = level.get(\"portal\", \"Sprites/CrawlStone/portal.png\")\n self.invisibility_players_filename = level.get(\"invisibility_player\", \"Sprites/CrawlStone/wizard_hat_2.png\")\n\n # lo sprite che fornisce i proiettili ha la stessa immagine dei proiettili\n self.player_bullets_filename = level.get(\"player_bullet\", \"Sprites/CrawlStone/apple.png\")\n self.bonus_player_bullets_filename = self.player_bullets_filename\n\n self.shooters_bullets_filename = level.get(\"shooter_bullet\", \"Sprites/CrawlStone/apple.png\")\n\n #\n # GRUPPI DI SPRITES\n #\n\n # i muri del mio labirinto\n self.walls = pygame.sprite.Group()\n\n # i nemici\n self.enemies = pygame.sprite.Group()\n\n # i nemici che sparano fanno parte dello stesso gruppo dei nemici!\n #self.shooters = pygame.sprite.Group()\n\n # le bombe\n self.bombs = pygame.sprite.Group()\n\n # gli attivatori/disattivatori di nemici golosi\n self.greedyEnemies = pygame.sprite.Group()\n\n # le bombe che spaccano i muri\n self.wallBombs = pygame.sprite.Group()\n\n # i ricaritori temporali\n self.timeReloaders = pygame.sprite.Group()\n\n # le monete da raccogliere\n self.coins = pygame.sprite.Group()\n\n # i killer dei nemici\n self.enemyKillers = pygame.sprite.Group()\n\n # i portali per spostarsi in nuove aree\n self.portals = pygame.sprite.Group()\n\n # i nemici che rendono invisibile il giocatore\n self.invisibilityPlayers = pygame.sprite.Group()\n\n # i proiettili sparati dal giocatore\n self.playerBullets = pygame.sprite.Group()\n\n # i proiettili sparati dagli shooters\n self.shooterBullets = pygame.sprite.Group()\n\n # il bonus che fornisce proiettili sparati dal giocatore\n self.bonusPlayerBullets = pygame.sprite.Group()\n\n\n self.free_locations = []\n\n # genero il labirinto che prescinde dai fattori di scala\n self.maze = self.generate_maze()\n #print(self.maze)\n\n # il giocatore e i nemici hanno una dimensione che dipende dal fattore di scala\n self.player = pygame.sprite.GroupSingle(Player(int(self.scale * 0.8), int(self.scale * 0.8),\n self.scale, 1,\n \"Sprites/pac-classic/ghost-red-front.png\",\n )\n )\n self.player.sprite.setWalls(self.walls)\n # imposto le immagini del giocatore sulla base della posizione\n # l'ordine è UP, DOWN , RIGHT, LEFT\n\n self.player.sprite.setImages([\n [\"Sprites/pac-classic/ghost-red-rear.png\",\n \"Sprites/pac-classic/ghost-red-front.png\",\n \"Sprites/pac-classic/ghost-red-right.png\",\n \"Sprites/pac-classic/ghost-red-left.png\",\n ],\n\n [\"Sprites/pac-classic/ghost-orange-rear.png\",\n \"Sprites/pac-classic/ghost-orange-front.png\",\n \"Sprites/pac-classic/ghost-orange-right.png\",\n \"Sprites/pac-classic/ghost-orange-left.png\",\n ],\n\n [\"Sprites/pac-classic/ghost-lblue-rear.png\",\n \"Sprites/pac-classic/ghost-lblue-front.png\",\n \"Sprites/pac-classic/ghost-lblue-right.png\",\n \"Sprites/pac-classic/ghost-lblue-left.png\",\n ],\n\n ]\n )\n\n\n\n\n #\n # CREAZIONE DEGLI SPRITES\n #\n\n # CREO I MIEI NEMICI\n self.createEnemies(self.numEnemies,self.enemies_filename,self.enemies)\n\n # CREO I MIEI NEMICI CHE SPARANO che aggiungo allo stesso gruppo dei nemici!\n self.createShooters(self.numShooters, self.shooters_filename, self.shooters_bullets_filename,self.shooterBullets,\n self.sound_weapon, self.enemies)\n\n # CREO LE BOMBE che sono ObjectDestroyer che distruggono le monete\n self.createObjectDestroyers(self.bonus_bombs,self.bomb_filename,self.bombs, self.coins)\n\n\n # CREO LE WALL BOMBS che sono WallDestroyer che consentono di distruggere i muri\n # interni del labirinto\n self.createInnerObjectDestroyers(self.ncols, self.nrows,self.bonus_wall_bombs,\n self.wall_bomb_filename,self.wallBombs,self.walls)\n # CREO GLI ENEMY KILLERS che sono ObjectDestroyer che consentono di eliminare i nemici\n self.createObjectDestroyers(self.bonus_enemy_killers, self.enemy_killers_filename, self.enemyKillers, self.enemies)\n\n # Creo GREEDY_ENEMIES come ENEMY che consentono di rendere, alternativamente, i nemici golosi di monete oppure no\n self.createEnemies(self.bonus_greedy_enemies, self.greedy_enemies_filename, self.greedyEnemies)\n\n # Alternativamente potrei creare GREED ENEMIES come ObjectDestroyer che in realtà non distruggono niente, ma rendono \"golosi\"\n # i nemici che stanno intorno a loro in modo che inizino a mangiare monete. Se stanno già mangiando\n # monete, al contrario, dovrebbero smettere. CHIEDERLO COME ESERCIZIO\n\n # CREO I TIME RELOADERS che consentono di ripristinare il tempo\n self.createEnemies(self.numTimeReloaders, self.time_reloaders_filename, self.timeReloaders)\n\n # CREO I PORTALI che consentono di trasferirsi in una nuova locazione random\n self.createEnemies(self.bonus_portals, self.portals_filename, self.portals)\n\n # CREO I TIME LIMITED POWERS, come quello che rende invisibile il giocatore\n self.createTimeLimitedPowers(self.bonus_invisibility_players, self.invisibility_players_filename, self.invisibilityPlayers)\n # e come il ricaricatore di proiettili\n self.createTimeLimitedPowers(self.bonus_player_bullets, self.bonus_player_bullets_filename, self.bonusPlayerBullets)\n\n self.mazeSurf = pygame.Surface((self.ncols * self.scale, self.nrows * self.scale))\n # disegno il labirinto coi suoi muri\n self.drawMaze()\n\n self.scrollSurface = self.mazeSurf.copy()\n #self.scrollSurface.fill((0, 0, 0))\n\n pos = random.choice(self.free_locations)\n print(\"Loc Player:%s\" % str(pos))\n\n self.player.sprite.setPosition(pos)\n\n # imposto posizione e movimento iniziale\n # ai vari gruppi di sprites\n\n self.setInitialPosition(self.enemies.sprites())\n self.setInitialPosition(self.bombs.sprites())\n self.setInitialPosition(self.wallBombs.sprites())\n self.setInitialPosition(self.timeReloaders.sprites())\n self.setInitialPosition(self.enemyKillers.sprites())\n self.setInitialPosition(self.greedyEnemies.sprites())\n self.setInitialPosition(self.portals.sprites())\n self.setInitialPosition(self.invisibilityPlayers.sprites())\n self.setInitialPosition(self.bonusPlayerBullets.sprites())\n\n #self.setInitialPosition(self.shooters.sprites())\n\n # normalmente i nemici non mangiano monete...\n self.enemies_eater = False\n\n\n # a inizio livello si dà tempo di 5 secondi al Giocatore per divincolarsi\n # da eventuali nemici che compaiono negli immediati dintorni\n # della posizione (casuale) in cui si viene a trovare\n # il giocatore a inizio livello\n self.player.sprite.addPower(PlayerPowers.INVISIBILITY, (self.time,5))\n\n # imposto la musica del livello e la mando in esecuzione\n self.music = level.get(\"music\", \"./Music/Soundimage/Techno-Gameplay_Looping.ogg\")\n pygame.mixer.music.load(self.music)\n # mando in esecuzione in modalità loop (valore -1)\n pygame.mixer.music.play(-1)\n\n # barra di stato del gioco con informazioni sul punteggio\n self.setupGamebarSurface()", "def _create_grid(self):\n\n # Check if hull dimensions are sensible for deck-dimensions (rows & lanes)\n grid = np.zeros((self.rows, self.lanes), dtype=np.int)\n if self.rows > self.hull_catheti_length and self.lanes >= self.hull_catheti_length * 2:\n for i in range(self.hull_catheti_length):\n t = (self.hull_catheti_length - i)\n grid[i] += np.hstack([-np.ones(t, dtype=np.int), np.zeros(self.lanes - t, dtype=np.int)])\n grid[i] += np.hstack([np.zeros(self.lanes - t, dtype=np.int), -np.ones(t, dtype=np.int)])\n else:\n logging.getLogger(__name__).error(\"Ship hull does not match grid dimensions -> return without hull\")\n return grid", "def calc_levels_left(char_name):\n with open(SPREADSHEET_PATH + 'starting_lvls.json', 'r') as lvls_file:\n char_lvls = json.load(lvls_file)\n\n char_starting_level = char_lvls[char_name]['starting_lvl']\n return LEVEL_MAX - char_starting_level" ]
[ "0.5543817", "0.53682214", "0.5357687", "0.5343803", "0.53416336", "0.52594876", "0.52290356", "0.52202016", "0.5219884", "0.519104", "0.51719373", "0.5154888", "0.51536214", "0.51400846", "0.51250356", "0.51171875", "0.5115629", "0.5093552", "0.5093239", "0.5017504", "0.50111085", "0.4979691", "0.4971924", "0.49637565", "0.49529", "0.493379", "0.49336314", "0.49313787", "0.4926549", "0.49217224" ]
0.8483062
0
Given an Example, finds the class of its nearest nonempty Hypercube. First, it gathers the data (center coordinates and the class) of each parent Hypercube which coordinates are listed in parents_indices. Then it returns the class of the nearest nonempty parent Hypercube.
def nearest_neighbours_class(self, example_coords, parents_indices): print("Computing the nearest neighbours class.") parents_data = [(self.hypercubes[parent].center, self.hypercubes[parent].hypercube_class) for parent in parents_indices] distances = sorted([(distance.euclidean(example_coords, parent[0]), parent[1]) for parent in parents_data if not parent[1] == EMPTY_HYPERCUBE_INDICATOR]) return distances[0][1]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def classify(self, example_coords, hypercubes_coords):\n print(\"Classifying an observation with coordinates; \" + str(example_coords))\n hypercubes_coords = tuple([int(x / 2) for x in hypercubes_coords])\n if self.hypercubes[hypercubes_coords].hypercube_class == EMPTY_HYPERCUBE_INDICATOR:\n returned_class = self.child_grid.classify(example_coords=example_coords, hypercubes_coords=hypercubes_coords)\n if returned_class[0] == -1:\n returned_class = self.nearest_neighbours_class(example_coords, returned_class[1])\n return returned_class\n elif self.hypercubes[hypercubes_coords].hypercube_class == MIXED_HYPERCUBE_INDICATOR:\n return -1, self.hypercubes[hypercubes_coords].parent_hypercubes_indices\n # -1 is a flag indicating that we need to compute distances\n else:\n return self.hypercubes[hypercubes_coords].hypercube_class", "def test(self, example_coords):\n print(\"Predicting the class of an observation with coordinates: \" + str(example_coords))\n hypercubes_coords = tuple(\n [int(example_coords[i] / self.hypercube_measurements[i]) for i in range(self.dims - 1, -1, -1)])\n if self.hypercubes[hypercubes_coords].hypercube_class is not EMPTY_HYPERCUBE_INDICATOR:\n return self.hypercubes[hypercubes_coords].hypercube_class\n else:\n print(\"Observation with coordinates \" + str(example_coords) + \" falls within an empty cube.\")\n returned_class = self.child_grid.classify(example_coords=example_coords, hypercubes_coords=hypercubes_coords)\n if returned_class[0] == -1:\n returned_class = self.nearest_neighbours_class(example_coords=example_coords, parents_indices=returned_class[1])\n return returned_class", "def nearest_common_parent(self, go_ids=None):\n # Take the element at maximum depth\n ncp = max(self.common_parents(go_ids=go_ids), key=lambda t: self.dict_go[t].depth)\n return ncp", "def parent_classes(self):\n response = check_defined(self, inspect.stack()[0][3])\n if not response:\n return response\n root_node = list(nx.topological_sort(self.se.full_class_only_graph))\n # When a schema is not a tree with only one root node\n # Set \"Thing\" as the root node by default\n if 'http://schema.org/Thing' in root_node:\n root_node = 'http://schema.org/Thing'\n else:\n root_node = root_node[0]\n paths = nx.all_simple_paths(self.se.full_class_only_graph,\n source=root_node,\n target=self.uri)\n paths = [_path[:-1] for _path in paths]\n result = restructure_output(self,\n paths,\n inspect.stack()[0][3],\n self.output_type)\n return result", "def get_examples(ds_data, network, parents, verbose=1, **params):\n # Parameters\n classes = params.setdefault('classes', [-1,0,1])\n target = params.setdefault('target', int(1.2e6))\n slice_len = params.setdefault('slice_len', 330)\n \n assert not target % len(classes)\n \n G = np.mean(ds_data, axis=0) \n examples = np.zeros((target, 5, slice_len, 1))\n labels = np.zeros((target, len(classes)))\n count = 0\n \n if verbose > 0:\n print('Generating {} training examples'.format(target))\n bar = pb.ProgressBar(max_value=target,\n widgets=[pb.Percentage(), ' - ',\n pb.Bar(), ' - ',\n pb.ETA()])\n \n for c in classes:\n \n pairs = np.argwhere(network == c)\n reps = int(target/len(classes)/pairs.shape[0]) + 1\n pair_idx = np.repeat(np.arange(pairs.shape[0]), reps)\n pair_idx = np.random.permutation(pair_idx)[:target//len(classes)]\n start_idx = np.random.randint(\n 0, ds_data.shape[1]-slice_len, size=target//len(classes))\n \n for i in range(pair_idx.size):\n \n n1 = pairs[pair_idx[i]][0]\n n2 = pairs[pair_idx[i]][1]\n assert(network[n1,n2] == c)\n \n start = start_idx[i]\n end = start + slice_len\n \n p1 = np.mean(ds_data[parents[n1], start:end], axis=0)\n p2 = np.mean(ds_data[parents[n2], start:end], axis=0)\n \n examples[count,:,:,0] = np.vstack((\n p1, \n ds_data[n1][start:end], \n G[start:end], \n ds_data[n2][start:end], \n p2\n ))\n \n labels[count,:] = np.equal(classes, c, dtype=np.int32)\n \n if verbose > 0:\n bar.update(count)\n count +=1\n \n if verbose > 0:\n bar.finish()\n print(\n 'Generated examples of shape:', examples.shape,\n '\\nGenerated labels of shape:', labels.shape,\n '\\nThere are {} classes: {}'.format(len(classes), classes)\n )\n \n assert not np.isnan(examples).any()\n return examples, labels", "def test_closest_parent(self):\n self.assertEqual(self.vectors.closest_parent('dog.n.01'), 'canine.n.02')\n self.assertEqual(self.vectors.closest_parent('mammal.n.01'), None)", "def update(self, example, hypercubes_coords):\n hypercubes_coords = tuple([int(x / 2) for x in hypercubes_coords])\n new_class = self.hypercubes[hypercubes_coords].update_lower_level(example_class=example.class_id, example_count=1, threshold=self.threshold)\n print(\"Update. Changed class of \" + str(hypercubes_coords) + \" to: \" + str(new_class))\n if self.child_grid:\n self.child_grid.update(example=example, hypercubes_coords=hypercubes_coords)", "def _find_positive_ancestor(self, refdata, seedindex): \n \n seedval = refdata[seedindex]\n if seedval > self.row_priors[seedindex]: \n return seedindex, -seedval/self.row_priors[seedindex]\n \n # find parents of seed\n parents = self.parents\n seedparents = parents[seedindex]\n parents_len = len(seedparents)\n if parents_len == 0:\n return None, 0\n elif parents_len == 1:\n return self._find_positive_ancestor(refdata, seedparents[0])\n elif parents_len == 2:\n # handle special case when there are only two items\n # instead of doing a general query and sort, pick best of two \n r0 = self._find_positive_ancestor(refdata, seedparents[0])\n r1 = self._find_positive_ancestor(refdata, seedparents[1])\n if r1[1] < r0[1]:\n return r1 \n return r0 \n \n # study multiple paths toward root, return most enriched\n result = [self._find_positive_ancestor(refdata, _) for _ in seedparents] \n return min(result, key=itemgetter(1))", "def get_parent_by_coord(x, y, w, h, states: [State]) -> State:\n parents = [state for state in states if is_state_a_child_by_coord(x, y, w, h, state)]\n if not parents:\n return None\n parents.sort(key = lambda st: st.x, reverse=True)\n return parents[0]", "def get_parents_of_class(class_uri, endpoint):\n query = \"\"\"\n select distinct ?c where{\n <%s> rdfs:subClassOf ?c.\n }\n \"\"\" % class_uri\n results = run_query(query=query, endpoint=endpoint)\n classes = [r['c']['value'] for r in results]\n return classes", "def find_closest(self, inputs, nb_closest=5):\r\n result = np.zeros((2,6))\r\n output = self.out.clone()\r\n op = output.transpose(0,1)\r\n ip = op[inputs]\r\n input_prob = ip.transpose(0,1) \r\n cos = nn.CosineSimilarity(0)\r\n \r\n for in_col, token_id in enumerate(inputs.data.cpu().numpy()):\r\n cosines = None\r\n \r\n for o_col, prob in enumerate(output.data.cpu().numpy()):\r\n # if token_id == o_col:\r\n # continue\r\n c = cos (input_prob.data[:,in_col], output.data[:,o_col])\r\n if (cosines is not None):\r\n cosines = torch.cat([cosines,c])\r\n else :\r\n cosines = c\r\n \r\n #print(cosines.size())\r\n res = torch.topk(cosines, nb_closest+1)\r\n #print (type(res[1]))\r\n res = Variable (res[1])\r\n #print (type (res))\r\n res = res.data.cpu().numpy()\r\n result = np.concatenate((result,[res]),axis=0)\r\n \r\n result = np.delete(result, 0, 0)\r\n result = np.delete(result , 0, 0)\r\n return (cuda(Variable(torch.from_numpy(result))))\r\n \r\n #raise NotImplementedError('Implement the find_closest method of the model')\r", "def update(self, example, hypercubes_coords=None):\n hypercubes_coords = tuple(\n [int(example.coords[i] / self.hypercube_measurements[i]) for i in range(self.dims - 1, -1, -1)])\n new_class = self.hypercubes[hypercubes_coords].update_basic(example_list=[example])\n print(\"Update. Changed class of \" + str(hypercubes_coords) + \" to: \" + str(new_class))\n if self.child_grid:\n self.child_grid.update(example=example, hypercubes_coords=hypercubes_coords)", "def find_root(self):\r\n self.find_parents()\r\n index = 0\r\n while len(self.vertices[index].parents)>0:\r\n index = self.vertices[index].parents[0]\r\n return index", "def ID3(self,data,classData,featureNames, parentMajority):\n\t\t\n\t\tnData = len(data)\n\t\tnClasses = len(classData)\n\n\t\t# base case 1: if D is empty, return the parentMajority class\n\t\tif nData==0 and nClasses==0:\n\t\t\t return parentMajority\n\n\t\t# get the number of features\n\t\tnFeatures = 0\n\t\tif nData != 0:\n\t\t\tnFeatures = len(data[0])\n\t\t\t\n\t\t# find the majority of target value\n\t\tmajority = self.majority_class(classData)\n\n\t\t# base case 2: if d is empty (no features), return the majority class\n\t\tif nFeatures == 0 :\n\t\t\treturn majority\n\n\t\t# base case 3: if all instances have the same target value, return the first target value\n\t\telif classData.count(classData[0]) == nData:\n\t\t\treturn classData[0]\n\t\t\n\t\t# general case to recursively build the tree\n\t\telse:\n\n\t\t\t# Choose the best feature based on information gain\n\t\t\tgain = np.zeros(nFeatures)\n\t\t\tfor feature in range(nFeatures):\n\t\t\t\tgain[feature] = self.info_gain(data,classData,feature)\n\t\t\tbestFeature = np.argmax(gain)\n\t\t\tbestFeatureName = featureNames[bestFeature]\n\t\t\t\n\t\t\ttree = {bestFeatureName:{}}\n\t\t\t#print \"The tree %s afer the best feature %s\" % (tree, bestFeatureName)\n\n\t\t\t# Load the bestFeature's possible values into a list\n\t\t\tvalues = []\n\t\t\tfor i in range(len(self.featureValues[bestFeatureName])):\n\t\t\t\tvalues.append(self.featureValues[bestFeatureName][i])\n\t\t\t#print \"The best feature %s values %s\" % (bestFeatureName, str(values))\n\n\t\t\t# Partition the original datapoints based on the best feature possible values\n\t\t\t# and then recursively invoke ID algorithm to build subtrees\n\t\t\tfor value in values:\n\t\t\t\tnewData = []\n\t\t\t\tnewClassData = []\n\t\t\t\tindex = 0\n\n\t\t\t\t# partition the data\n\t\t\t\tfor datapoint in data:\n\t\t\t\t\tif datapoint[bestFeature]==value:\n\t\t\t\t\t\tif bestFeature==0:\n\t\t\t\t\t\t\tnewdatapoint = datapoint[1:]\n\t\t\t\t\t\t\tnewNames = featureNames[1:]\n\t\t\t\t\t\telif bestFeature==nFeatures:\n\t\t\t\t\t\t\tnewdatapoint = datapoint[:-1]\n\t\t\t\t\t\t\tnewNames = featureNames[:-1]\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tnewdatapoint = datapoint[:bestFeature]\n\t\t\t\t\t\t\tnewdatapoint.extend(datapoint[bestFeature+1:])\n\t\t\t\t\t\t\tnewNames = featureNames[:bestFeature]\n\t\t\t\t\t\t\tnewNames.extend(featureNames[bestFeature+1:])\n\n\t\t\t\t\t\tnewData.append(newdatapoint)\n\t\t\t\t\t\tnewClassData.append(classData[index])\n\t\t\t\t\tindex += 1\n\n\t\t\t\t# Now do recursive call to build the subtrees\n\t\t\t\tsubtree = self.ID3(newData,newClassData,newNames, majority)\n\n\t\t\t\t# Add the subtree on to the tree\n\t\t\t\t#print \"The subtree %s for the current tree %s\" % ( subtree, tree,)\n\t\t\t\ttree[bestFeatureName][value] = subtree\n\n\t\t\treturn tree", "def getNearestCentreClass(centers, driver):\n driver = driver.reshape((1, 2))\n assert (centers.shape[1] == 6)\n assert (driver.shape == (1, 2))\n\n mask = centers[:, -1] == 1\n tmp = centers[mask]\n assert (tmp.shape[1] == centers.shape[1])\n\n dis = np.linalg.norm(tmp[:, 0:2].copy() - driver)\n mini = np.argmin(dis)\n _class = tmp[mini, 4]\n tmp[mini, 5] = 0\n centers[mask] = tmp\n return _class, centers", "def set_hypercubes_classes(self):\n print(\"Setting the Hypercubes' classes of grid at level: \" + str(self.level))\n for hypercube in self.hypercubes.flatten():\n coordinates = []\n for coord in hypercube.coords:\n coordinates.append([2 * coord, 2 * coord + 1])\n parents_list = []\n for indices in list(itertools.product(*coordinates)):\n parents_list.append(self.parent_hypercubes[tuple(reversed(indices))])\n hypercube.set_lower_level_hypercube_class(parent_hypercubes=parents_list, threshold=self.threshold)\n if self.child_grid:\n self.child_grid.set_hypercubes_classes()", "def get_shapes(self):\n colours = [[None for j in range(self.cellcounts[0])] for i in range(self.cellcounts[1])]\n X = []\n backmap = {}\n rad = 3\n for k,((i,y),(j,x)) in enumerate(self.mids):\n chunk = self.img[y-rad:y+rad, x-rad:x+rad,:]\n m = np.mean(np.mean(chunk, axis=0), axis=0).astype(np.uint16)\n colours[i][j] = m\n X.append(m)\n backmap[k] = (i,j)\n print(np.shape(X))\n Z = linkage(X, 'ward')\n Q = fcluster(Z, self.thresh, criterion='distance')\n\n closenesses = []\n for k,cls in enumerate(Q):\n i,j = backmap[k]\n closenesses.append( np.sqrt(np.sum( (colours[i][j] - self.ideal_bg)**2) ) )\n minidx = np.argmin(closenesses)\n bgcls = Q[minidx]\n\n blibs = [(255,0,0), (0,255,0), (0,0,255), (255,255,0), (0,255,255), (255,0,255)]\n img4 = self.img2.copy()\n for k,((i,y),(j,x)) in enumerate(self.mids):\n cls = Q[k]\n if cls == bgcls:\n continue\n col = blibs[(cls-1)]\n img4 = cv2.circle(img4, (x,y), 5, col, 2)\n\n write_img(\"./out/test_classes.png\", img4)\n self.classimg = img4\n\n A = np.zeros(shape=self.cellcounts, dtype=np.uint8)\n mx = np.max(Q)\n for k,cls in enumerate(Q):\n if cls == bgcls:\n continue\n\n if cls == mx:\n plotcls = bgcls\n else:\n plotcls = cls\n i,j = backmap[k]\n A[i][j] = plotcls\n\n self.res = A", "def find_extent_of_top_class(local_list=local_list):\n for i in range(len(local_list) + 1):\n j = -(i+1) # 1 indexed\n if j == -1:\n test_class = local_list[j][0]\n current_class = local_list[j][0]\n if j == -len(local_list):\n # if the whole of local_list is the same class 9(this accounts for zero indexing)\n j = j -1\n break\n if not (current_class == test_class):\n #print(j)\n break\n no_of_members_of_test_class = -j - 1\n return no_of_members_of_test_class", "def cluster_hierarchically(active_sites):\n\n\n cls, sc = agglomerative(active_sites)\n\n return cls", "def find_neighbor(self, test_example):\n n = test_example.size\n # Create list of tuples --> [(training_example, distance to test_example), ... ]\n dists = []\n for i, e in enumerate(self.train_examples):\n dist = self.euclidean_dist(test_example, e, n)\n class_value = self.train_examples.get_class_value_at(i)\n dists.append((class_value, dist))\n dists.sort(key=itemgetter(1))\n neighbors = [dists[i][0] for i in xrange(self.k)] # closest self.k neighbors\n return neighbors", "def find_parent_of(self, *args):\n return _ida_hexrays.citem_t_find_parent_of(self, *args)", "def set_hypercubes_parents_indices(self):\n for hypercube in self.hypercubes.flatten():\n coordinates = []\n for coord in hypercube.coords:\n coordinates.append([2 * coord, 2 * coord + 1])\n for indices in list(itertools.product(*coordinates)):\n hypercube.parent_hypercubes_indices.append(tuple(indices))", "def _classify(self, example):\n neighbors = self.find_neighbor(example)\n class_label = self.find_response(neighbors)\n return class_label", "def get_coparents(request):\n building_id = request.GET.get('building_id', '')\n node = BuildingSnapshot.objects.get(pk=building_id)\n\n # we need to climb up 'root's parents to find the other matched\n # snapshots\n root, proto_result = _parent_tree_coparents(node)\n\n if node.canonical_building and node.co_parent:\n proto_result.append(node.co_parent)\n elif node.co_parent and node.co_parent.canonical_building:\n proto_result.append(node)\n\n while node.children.first():\n child = node.children.first()\n if child.co_parent:\n proto_result.append(child.co_parent)\n node = child\n\n result = map(lambda b: b.to_dict(), proto_result)\n\n tip = root.tip\n tree = tip.parent_tree + [tip]\n tree = map(lambda b: b.to_dict(), tree)\n response = {\n 'status': 'success',\n 'coparents': result,\n 'match_tree': tree,\n 'tip': tip.to_dict(),\n }\n\n return response", "def closest_cluster(cluster_averages, example):\n\n # just do a normal search for a minimum\n best_index = -1\n best_distance = 10000000\n for (i, cluster_average) in enumerate(cluster_averages):\n distance = dist(cluster_average, example)\n\n # easy trick to get a single comparison and negation to check for\n # invalid number too\n if distance < best_distance:\n best_index = i\n best_distance = distance\n\n return best_index", "def batch_update(self, parents_classes_dict):\n print(\"Updating the LowerLevelGrid with a batch of examples\")\n for (class_id, coords), examples in parents_classes_dict.items():\n coords = tuple([int(x / 2) for x in coords])\n self.hypercubes[coords].update_lower_level(example_class=class_id, example_count=len(examples), threshold=self.threshold)\n if self.child_grid:\n self.child_grid.batch_update(parents_classes_dict=parents_classes_dict)", "def get_parents(target, concept_map):\n parents = []\n target_index = concept_map[CONCEPTS_STR].index(target)\n for row in range(len(concept_map[ADJ_MAT_STR])): \n # get value in adjMat for each row at target concept's col\n val = concept_map[ADJ_MAT_STR][row][target_index] \n if val > 0 and target_index != row: # don't care concepts are their own parents\n # print('parent found at {}, {}'.format(row, target_index)) # TODO remove\n parents.append(concept_map[CONCEPTS_STR][row])\n return parents", "def get_pseudo_label(self, anchors, box_cls_batch, box_delta_batch, gt_instances, scale_weight, enforce_back=False, back_thre=0.3, fore_thre=0.7, IOU_thre=0.5):\n with torch.no_grad():\n anchors = type(anchors[0]).cat(anchors).tensor\n device = anchors.device\n N = len(gt_instances)\n weight_flatten = [torch.ones((permute_to_N_HWA_K(x, self.num_classes)).shape[0:2]).to(device)*scale_weight[i] for i, x in enumerate(box_cls_batch)]\n weight_flatten = torch.cat(weight_flatten, dim=1).view(-1)\n pred_logits_collect = []\n pred_boxes_collect = []\n pseudo_target_logits_collect = []\n pseudo_target_boxes_collect = []\n weight_logits_collect = []\n weight_boxes_collect = []\n # For each image in the batch:\n for i in range(N):\n # Aggregate box_cls and box_delta for each scale.\n box_cls = [box_cls[i:i+1] for box_cls in box_cls_batch]\n box_delta = [box_delta[i:i+1] for box_delta in box_delta_batch]\n pred_class_logits, pred_anchor_deltas = permute_all_cls_and_box_to_N_HWA_K_and_concat(box_cls, box_delta, self.num_classes) # Shapes: (N x R, K) and (N x R, 4), respectively.\n pred_box = self.box2box_transform.apply_deltas(pred_anchor_deltas, anchors)\n gt_boxes = gt_instances[i].gt_boxes\n gt_labels = gt_instances[i].gt_classes\n # Initial the pseudo_targets\n with torch.no_grad():\n pseudo_target_logits = pred_class_logits.clone().to(pred_class_logits.device)\n pseudo_target_logits = pseudo_target_logits.sigmoid()\n pseudo_target_boxes = pred_box.clone().to(pred_box.device)\n # Step 1: For each object, assgin groud-truth to the predicted boxes of the highest IoU, to prevent the case that there are missing detections.\n # For convenience, we use Matcher provided by D2 to achieve this step. We use a high fore_thre to get the highest IoU match.\n matcher = Matcher([back_thre, fore_thre], [-1, 0, 1], allow_low_quality_matches=True)\n with torch.no_grad():\n match_quality_matrix = pairwise_iou(gt_boxes, Boxes(anchors))\n matched_idxs, anchor_labels = matcher(match_quality_matrix)\n del match_quality_matrix\n # Assign groud-truth predictions to the selected anchors.\n selected_anchor = anchor_labels == 1\n pseudo_target_logits[selected_anchor] = 0\n pseudo_target_logits[selected_anchor, gt_labels[matched_idxs[selected_anchor]]] = 1\n pseudo_target_boxes[selected_anchor] = gt_boxes.tensor[matched_idxs[selected_anchor]]\n # If enforce_back is enabled, background-anchors are also included in the pseudo-labels.\n # background-anchors are anchors which are far away from any objects.\n # By enableing enforce_back, we enforce the background-anchors to detect nothing.\n if enforce_back:\n background_idxs = anchor_labels == -1\n pseudo_target_logits[background_idxs] = 0\n pseudo_back_logits = pseudo_target_logits[background_idxs].clone().to(pseudo_target_logits.device)\n pred_class_back_logits = pred_class_logits[background_idxs]\n weight_back_logits = weight_flatten[background_idxs]\n\n # Step 2: Conduct NMS process, filter out eliminated dectections.\n # Only apply constraints on detections kept after NMS.\n logits_sigmoid = pseudo_target_logits.flatten()\n num_topk = min(self.topk_candidates, pseudo_target_boxes.size(0))\n predicted_prob, topk_idxs = logits_sigmoid.sort(descending=True)\n predicted_prob = predicted_prob[: num_topk]\n topk_idxs = topk_idxs[:num_topk]\n keep_idxs = predicted_prob > self.score_threshold\n predicted_prob = predicted_prob[keep_idxs]\n topk_idxs = topk_idxs[keep_idxs]\n anchor_idxs = topk_idxs // self.num_classes\n\n pseudo_target_logits = pseudo_target_logits[anchor_idxs]\n pseudo_target_boxes = pseudo_target_boxes[anchor_idxs]\n pred_box = pred_box[anchor_idxs]\n pred_class_logits = pred_class_logits[anchor_idxs]\n weight_logits = weight_flatten[anchor_idxs]\n weight_boxes = weight_flatten[anchor_idxs]\n gt_labels = gt_instances[i].gt_classes\n\n # Step 3: Match the rest detections with the ground-truth objects and assign pseudo-targets based on the matching.\n # If IoU > IOU_thre, assign ground-truth cls and box as the target.\n # Else, assign background as targets.\n matcher = Matcher([IOU_thre], [0, 1], allow_low_quality_matches=False)\n\n match_quality_matrix = pairwise_iou(gt_boxes, Boxes(pseudo_target_boxes))\n matched_idxs, anchor_labels = matcher(match_quality_matrix)\n del match_quality_matrix\n\n target = torch.zeros(((anchor_labels == 1).sum(), 80), dtype=pred_box.dtype, device=pred_box.device)\n target[torch.arange((anchor_labels == 1).sum()), gt_labels[matched_idxs[anchor_labels == 1]]] = 1.0\n pseudo_target_logits[anchor_labels == 1] = target\n pseudo_target_boxes[anchor_labels == 1] = gt_boxes.tensor[matched_idxs[anchor_labels == 1]]\n pseudo_target_boxes = pseudo_target_boxes[anchor_labels == 1]\n pred_box = pred_box[anchor_labels == 1]\n pseudo_target_logits[anchor_labels == 0] = 0\n weight_boxes = weight_boxes[anchor_labels == 1]\n if enforce_back:\n pseudo_target_logits = torch.cat([pseudo_back_logits, pseudo_target_logits], dim=0)\n pred_class_logits = torch.cat([pred_class_back_logits, pred_class_logits], dim=0)\n weight_logits = torch.cat([weight_back_logits, weight_logits], dim=0)\n pseudo_target_boxes_collect.append(pseudo_target_boxes)\n pseudo_target_logits_collect.append(pseudo_target_logits)\n pred_boxes_collect.append(pred_box)\n pred_logits_collect.append(pred_class_logits)\n weight_logits_collect.append(weight_logits)\n weight_boxes_collect.append(weight_boxes)\n return torch.cat(pred_logits_collect), torch.cat(pred_boxes_collect), torch.cat(pseudo_target_logits_collect), torch.cat(pseudo_target_boxes_collect), torch.cat(weight_logits_collect), torch.cat(weight_boxes_collect)", "def calculate_crowding(solutions: list):\n\n def crowding_distance(all_solutions: list, idx: int, measure_idx: int):\n \"\"\"\n Function that calculates the crowding distance (cuboid) for a certain solution.\n\n Parameters\n ------------\n :param all_solutions: list\n All solutions.\n :param idx: int\n Index indicating the objective solution for which the crowding will be calculated.\n :param measure_idx: int\n Indicates the index at which the scores of a certain objective function are found.\n \"\"\"\n # Get target function values\n measured_values = [solution.values[measure_idx] for solution in all_solutions]\n f_max = max(measured_values)\n f_min = min(measured_values)\n\n # If all the solutions are the same crowding is 0\n if f_max == f_min:\n return 0\n\n # Calculate crowding distance\n distance = (measured_values[idx + 1] - measured_values[idx - 1]) / \\\n (max(measured_values) - min(measured_values))\n\n return distance\n\n # Get the number of target functions\n num_objectives = len(solutions[0].values)\n\n for measure in range(num_objectives):\n\n # Sort solutions based on measure value (ascending)\n solutions = sorted(solutions, key=lambda solution: solution.values[measure])\n\n # Select limits to infinite\n solutions[0].crowding_distance, solutions[len(solutions) - 1].crowding_distance = infinite, infinite\n\n # Calculate crowding distance for target function\n for i in range(1, len(solutions) - 1):\n solutions[i].crowding_distance += crowding_distance(all_solutions=solutions,\n idx=i, measure_idx=measure)", "def derive_class_hierarchy():\n logger.info('Deriving class hierarchy ...')\n data = statistics.get_json_data('classes')\n\n hierarchy = defaultdict(dict)\n keys = ['i', 's', 'ai', 'as', 'sc', 'sb', 'r']\n\n for cid in data:\n for key in keys:\n if key in data[cid] and data[cid][key]:\n hierarchy[cid][key] = data[cid][key]\n\n statistics.update_json_data('classes/hierarchy', hierarchy)\n statistics.update_split_json_data('classes/hierarchy', hierarchy, 1000)" ]
[ "0.60938364", "0.5795412", "0.5308555", "0.5189766", "0.5157798", "0.50664556", "0.49884278", "0.49517813", "0.48900566", "0.48891792", "0.48850486", "0.48730367", "0.48566884", "0.48272377", "0.48231125", "0.47985056", "0.47855023", "0.47668108", "0.47496378", "0.47456798", "0.47077027", "0.47010013", "0.46897787", "0.4674194", "0.46642837", "0.4649147", "0.46001503", "0.45964602", "0.45795494", "0.45658836" ]
0.75999105
0
Adds provided Example to the correct Hypercube in BasicGrid
def add_example_to_grid(self, example): indices = tuple([int(example.coords[x] / self.hypercube_measurements[x]) for x in range(self.dims - 1, -1, -1)]) self.hypercubes[indices].add_example(example)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def example(self, example):\n self._example = example\n return self", "def add(self, sample, **kwargs):\n if not self.samples:\n self.init(sample)\n self.samples.append(sample)", "def batch_update(self, examples):\n print(\"Updating the BaseGrid with a batch of examples\")\n examples_grouping_dict = {}\n for example in examples:\n hypercubes_coords = tuple(\n [int(example.coords[i] / self.hypercube_measurements[i]) for i in range(self.dims - 1, -1, -1)])\n examples_grouping_dict[(example.class_id, hypercubes_coords)] = examples_grouping_dict.get((example.class_id, hypercubes_coords), [])\n examples_grouping_dict[(example.class_id, hypercubes_coords)].append(example)\n for (class_id, hypercubes_coords), example_list in examples_grouping_dict.items():\n self.hypercubes[hypercubes_coords].update_basic(example_list=example_list)\n if self.child_grid:\n self.child_grid.batch_update(examples=examples_grouping_dict)", "def instantiate_examples(self, examples : [dict]) -> [dict]:\n raise NotImplementedError()", "def update(self, example, hypercubes_coords=None):\n hypercubes_coords = tuple(\n [int(example.coords[i] / self.hypercube_measurements[i]) for i in range(self.dims - 1, -1, -1)])\n new_class = self.hypercubes[hypercubes_coords].update_basic(example_list=[example])\n print(\"Update. Changed class of \" + str(hypercubes_coords) + \" to: \" + str(new_class))\n if self.child_grid:\n self.child_grid.update(example=example, hypercubes_coords=hypercubes_coords)", "def add_alpha_helix(self, alpha_helix):\n assert self.default_model is not None\n self.default_model.add_alpha_helix(alpha_helix)", "def target(self, example):\n raise NotImplementedError()", "def register():\n\n def grid_sampler(g, input, grid, mode, padding_mode, align_corners):\n # mode\n # 'bilinear' : onnx::Constant[value={0}]\n # 'nearest' : onnx::Constant[value={1}]\n # 'bicubic' : onnx::Constant[value={2}]\n # padding_mode\n # 'zeros' : onnx::Constant[value={0}]\n # 'border' : onnx::Constant[value={1}]\n # 'reflection' : onnx::Constant[value={2}]\n mode = symbolic_helper._maybe_get_const(mode, \"i\")\n padding_mode = symbolic_helper._maybe_get_const(padding_mode, \"i\")\n mode_str = [\"bilinear\", \"nearest\", \"bicubic\"][mode]\n padding_mode_str = [\"zeros\", \"border\", \"reflection\"][padding_mode]\n align_corners = int(symbolic_helper._maybe_get_const(align_corners, \"b\"))\n\n # From opset v13 onward, the output shape can be specified with\n # (N, C, H, W) (N, H_out, W_out, 2) => (N, C, H_out, W_out)\n # input_shape = input.type().sizes()\n # gird_shape = grid.type().sizes()\n # output_shape = input_shape[:2] + gird_shape[1:3]\n # g.op(...).setType(input.type().with_sizes(output_shape))\n\n return g.op(\n \"com.microsoft::GridSample\",\n input,\n grid,\n mode_s=mode_str,\n padding_mode_s=padding_mode_str,\n align_corners_i=align_corners,\n )\n\n _reg(grid_sampler)\n\n def inverse(g, self):\n return g.op(\"com.microsoft::Inverse\", self).setType(self.type())\n\n _reg(inverse)\n\n @torch.onnx.symbolic_helper.parse_args(\"v\", \"s\")\n def gelu(g, self: torch._C.Value, approximate: str = \"none\"):\n # Use microsoft::Gelu for performance if possible. It only supports approximate == \"none\"\n if approximate == \"none\":\n return g.op(\"com.microsoft::Gelu\", self).setType(self.type())\n return torch.onnx.symbolic_opset9.gelu(g, self, approximate)\n\n _reg(gelu)\n\n def triu(g, self, diagonal):\n return g.op(\"com.microsoft::Trilu\", self, diagonal, upper_i=1).setType(self.type())\n\n _reg(triu)\n\n def tril(g, self, diagonal):\n return g.op(\"com.microsoft::Trilu\", self, diagonal, upper_i=0).setType(self.type())\n\n _reg(tril)", "def register():\n # DeepOBS\n deepobs.pytorch.testproblems.two_d_quadratic = two_d_quadratic\n\n # for CockpitPlotter\n if \"scalar\" in deepobs.config.DATA_SET_NAMING.keys():\n assert deepobs.config.DATA_SET_NAMING[\"scalar\"] == \"Scalar\"\n else:\n deepobs.config.DATA_SET_NAMING[\"scalar\"] = \"Scalar\"\n\n if \"deep\" in deepobs.config.TP_NAMING.keys():\n assert deepobs.config.TP_NAMING[\"deep\"] == \"Deep\"\n else:\n deepobs.config.TP_NAMING[\"deep\"] = \"deep\"\n\n # BackOBS\n backobs.utils.ALL += (two_d_quadratic,)\n backobs.utils.REGRESSION += (two_d_quadratic,)\n backobs.utils.SUPPORTED += (two_d_quadratic,)\n backobs.integration.SUPPORTED += (two_d_quadratic,)", "def insert(self, sample, *args):\n raise NotImplementedError", "def add_alpha_helix(self, alpha_helix):\n assert isinstance(alpha_helix, AlphaHelix)\n self.alpha_helix_list.append(alpha_helix)\n alpha_helix.model = self", "def add_helix(self, helix):\n id = helix.lattice_num\n if (id not in self.helix_list):\n self.helix_list[id] = helix", "def setSampledExamplesLabels(self, X, Y):\n\n exName = \"X\"\n labelsName = \"y\"\n\n self.addDataField(exName, X)\n self.addDataField(labelsName, Y)\n self.setDefaultExamplesName(exName)\n self.setLabelsName(labelsName)", "def add_selected_to_examples(self, *args):\n\t\tself.log('Boom. Adding Selected to examples')\n\t\ttext_bucket = ''\n\t\tselection = cmds.ls(selection = True, long = True)\n\t\tself.log('selection is: %s' % selection)\n\t\ttext_bucket = cmds.scrollField(self.example_comments, query = True, text = True)\n\t\tif text_bucket:\n\t\t\tself.log('text_bucket is TRUE:: %s' % text_bucket)\n\t\t\tfor selected in selection:\n\t\t\t\ttext_bucket += ( \", \" + selected)\n\t\telse:\n\t\t\tfor selected in selection:\n\t\t\t\ttext_bucket += (selected + ', ')\n\t\t\ttext_bucket = text_bucket.rstrip(', ')\n\n\t\tcmds.scrollField(self.example_comments, edit = True, text = text_bucket)\n\n\n\n\t\tself.update_subcategory('example_comments_text')", "def example(km_ij, jobs, params_groups, persons_gi):\n groups, zones = persons_gi.shape\n example = Example(groups, zones)\n example.set_array('km_ij', km_ij)\n example.set_array('jobs_j', jobs)\n example.set_array('param_g', params_groups)\n example.set_array('persons_gi', persons_gi)\n return example", "def set_help_examples(self, examples):\n\n self.help_examples.extend(examples)", "def __add__(self, obj):\n if isinstance(obj, vtk.vtkProp3D):\n self.AddPart(obj)\n\n self.actors.append(obj)\n\n if hasattr(obj, \"scalarbar\") and obj.scalarbar is not None:\n if self.scalarbar is None:\n self.scalarbar = obj.scalarbar\n return self\n\n def unpack_group(scalarbar):\n if isinstance(scalarbar, Group):\n return scalarbar.unpack()\n else:\n return scalarbar\n\n if isinstance(self.scalarbar, Group):\n self.scalarbar += unpack_group(obj.scalarbar)\n else:\n self.scalarbar = Group([unpack_group(self.scalarbar), unpack_group(obj.scalarbar)])\n self.pipeline = vedo.utils.OperationNode(\"add mesh\", parents=[self, obj], c=\"#f08080\")\n return self", "def create_grids_structure(self):\n for indices, hypercube in np.ndenumerate(self.hypercubes):\n self.hypercubes[indices] = Hypercube(coords=indices)", "def modify_image(self, example, target_label):\n raise NotImplementedError()", "def update(self, example, hypercubes_coords):\n hypercubes_coords = tuple([int(x / 2) for x in hypercubes_coords])\n new_class = self.hypercubes[hypercubes_coords].update_lower_level(example_class=example.class_id, example_count=1, threshold=self.threshold)\n print(\"Update. Changed class of \" + str(hypercubes_coords) + \" to: \" + str(new_class))\n if self.child_grid:\n self.child_grid.update(example=example, hypercubes_coords=hypercubes_coords)", "def add(topcell, subcell, center=(0,0)):\n topcell.add(gdspy.CellReference(subcell, origin=center))", "def example(self):\n self.set_call_layout()\n how_many = self.nb_examples\n columns = self.layout_args if self.layout_args \\\n else default_layout_args\n exo_layout = self.layout\n ref_class = self.solution\n\n how_many_samples = self.nb_examples if self.nb_examples \\\n else len(self.scenarios)\n\n # can provide 3 args (convenient when it's the same as correction) or just 2\n columns = columns[:2]\n c1, c2 = columns\n #print(\"Using columns={}\".format(columns))\n table = Table(style=font_style)\n html = table.header()\n\n sample_scenarios = self.scenarios[:how_many_samples]\n for i, scenario in enumerate(sample_scenarios):\n # first step has to be a constructor\n assert len(scenario)>=1 and scenario[0][0] == '__init__'\n \n methodname, args_obj = scenario[0]\n # always render the classname\n args_obj.render_function_name(self.name)\n\n # start of scenario\n legend = CellLegend(\"Scénario {}\".format(i+1))\n html += TableRow(\n cells=[TableCell(legend, colspan=4, tag='th',\n style='text-align:center')],\n style=header_font_style).html()\n cells = [ TableCell(CellLegend(x), tag='th') for x in ('Appel', 'Attendu')]\n html += TableRow(cells = cells).html()\n \n ref_args = args_obj.clone(self.copy_mode)\n ref_args.render_function_name(self.name)\n ref_obj = ref_args.init_obj(ref_class)\n cells = (TableCell(args_obj, layout=self.layout, width=c1),\n TableCell(CellLegend('-')))\n html += TableRow(cells=cells).html()\n\n for methodname, args_obj in scenario[1:]:\n ref_args = args_obj.clone(self.copy_mode)\n ref_args.render_function_name(methodname)\n ref_result = ref_args.call_obj(ref_obj, methodname)\n cells = ( TableCell(ref_args, layout=self.layout, width=c1),\n TableCell(ref_result, layout=self.layout, width=c2))\n html += TableRow(cells=cells).html()\n\n html += table.footer()\n return HTML(html)", "def append_inst(self, inst):\n inst.basic_block = self\n self.insts.append(inst)", "def test_add(self):\n gm = GridMap(threshold=2, bit_depth=8)\n gm.add(14, 7, \"hello world\")\n gm.add(0, 0, \"DEADBEEF\")\n gm.add(255, 255, \"meh\")\n for x in range(256):\n for y in range(256):\n gm.add(x, y, \"\")\n gm.add(0, 255, None)\n gm.add(15, 31, 3.14598)", "def is_example(self, is_example):\n self._is_example = is_example", "def add_exercise( self, exercise ):\n self.exercises.append( exercise )", "def example():\n sampler = DoomSampler()\n # Current architecture works on batches of 32 levels\n # Noise shape is (batch_size, 100)\n noise_shape = [32, 100]\n # Input Feature vector shape is (batch_size, 7) - Refer to DoomLevelsGAN/network_architecture.py\n feat_shape = [32, 7]\n\n # Generating random noise and input features as example\n noise = np.random.random(size=noise_shape)\n input_features = np.random.random(size=feat_shape)\n out_features = sampler.get_features_of(noise, input_features)\n\n # Retrieving samples\n levels = sampler.last_generated_samples\n\n # Printing feature values\n for f_name, f_list in out_features.items():\n print(\"Feature: {} \\n {}\".format(f_name, f_list))", "def gen_hypercube(samples, N):\n\n np.random.seed(4654562)\n hypercube = lhs(N, samples=samples)\n\n return hypercube", "def draw_samples_grid_vae(model,\n \t\t\t\t\t num_row=15,\n \t\t\t\t\t num_colum=15,\n \t\t\t\t\t images_size=(28, 28)):\n\n grid_x = norm.ppf(np.linspace(0.05, 0.95, num_colum))\n grid_y = norm.ppf(np.linspace(0.05, 0.95, num_row))\n\n figure = np.zeros((images_size[0] * num_colum, images_size[1] * num_row))\n for i, y_i in enumerate(grid_x):\n for j, x_i in enumerate(grid_y):\n z_sample = np.array([[x_i, y_i]])\n\n x_sample = model.q_x(torch.from_numpy(z_sample).float()).view(\n images_size).cpu().data.numpy()\n\n image = x_sample\n figure[i * images_size[0]: (i + 1) * images_size[0],\n j * images_size[1]: (j + 1) * images_size[1]] = image\n\n return figure", "def add_samples(self, samples):\n samples = [samples] if isinstance(samples, Sample) else samples\n for sample in samples:\n if isinstance(sample, Sample):\n self._samples.append(sample)\n self[SAMPLE_EDIT_FLAG_KEY] = True\n else:\n _LOGGER.warning(\"not a peppy.Sample object, not adding\")" ]
[ "0.6088038", "0.5691631", "0.5638335", "0.5430962", "0.53936327", "0.53669506", "0.5353391", "0.51910037", "0.5160521", "0.5118814", "0.5102909", "0.5083791", "0.50791067", "0.5072332", "0.5069205", "0.50586724", "0.50537837", "0.5049797", "0.50126547", "0.50125664", "0.49871612", "0.49855286", "0.49554482", "0.49501476", "0.49406394", "0.49340612", "0.49136165", "0.4908869", "0.488405", "0.48833957" ]
0.8240919
0
Sets classes for all hypercubes in self and in its child (LowerLevelGrid).
def set_hypercubes_classes(self): print("Setting the BaseGrid hypercubes' classes.") list_of_all_hc = list(self.hypercubes.flatten()) print("Number of hypercubes: " + str(len(list_of_all_hc))) for hypercube in list_of_all_hc: hypercube.set_hypercube_class() if self.child_grid: self.child_grid.set_hypercubes_classes()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_hypercubes_classes(self):\n print(\"Setting the Hypercubes' classes of grid at level: \" + str(self.level))\n for hypercube in self.hypercubes.flatten():\n coordinates = []\n for coord in hypercube.coords:\n coordinates.append([2 * coord, 2 * coord + 1])\n parents_list = []\n for indices in list(itertools.product(*coordinates)):\n parents_list.append(self.parent_hypercubes[tuple(reversed(indices))])\n hypercube.set_lower_level_hypercube_class(parent_hypercubes=parents_list, threshold=self.threshold)\n if self.child_grid:\n self.child_grid.set_hypercubes_classes()", "def update(self, example, hypercubes_coords):\n hypercubes_coords = tuple([int(x / 2) for x in hypercubes_coords])\n new_class = self.hypercubes[hypercubes_coords].update_lower_level(example_class=example.class_id, example_count=1, threshold=self.threshold)\n print(\"Update. Changed class of \" + str(hypercubes_coords) + \" to: \" + str(new_class))\n if self.child_grid:\n self.child_grid.update(example=example, hypercubes_coords=hypercubes_coords)", "def batch_update(self, parents_classes_dict):\n print(\"Updating the LowerLevelGrid with a batch of examples\")\n for (class_id, coords), examples in parents_classes_dict.items():\n coords = tuple([int(x / 2) for x in coords])\n self.hypercubes[coords].update_lower_level(example_class=class_id, example_count=len(examples), threshold=self.threshold)\n if self.child_grid:\n self.child_grid.batch_update(parents_classes_dict=parents_classes_dict)", "def update(self, example, hypercubes_coords=None):\n hypercubes_coords = tuple(\n [int(example.coords[i] / self.hypercube_measurements[i]) for i in range(self.dims - 1, -1, -1)])\n new_class = self.hypercubes[hypercubes_coords].update_basic(example_list=[example])\n print(\"Update. Changed class of \" + str(hypercubes_coords) + \" to: \" + str(new_class))\n if self.child_grid:\n self.child_grid.update(example=example, hypercubes_coords=hypercubes_coords)", "def setup_class(self):\n\n class SubFLRW(FLRW):\n def w(self, z):\n return super().w(z)\n\n self.cls = SubFLRW\n # H0, Om0, Ode0\n self.cls_args = (70 * u.km / u.s / u.Mpc, 0.27 * u.one, 0.689 * u.one)\n self.cls_kwargs = dict(Tcmb0=3.0 * u.K, name=self.__class__.__name__, meta={\"a\": \"b\"})", "def __init__(self, initial_grid):\n part_1.Grid.__init__(self, initial_grid)\n self.turn_on_corners()", "def setup_class(self):\n class SubCosmology(Cosmology):\n\n H0 = Parameter(unit=u.km / u.s / u.Mpc)\n Tcmb0 = Parameter(unit=u.K)\n\n def __init__(self, H0, Tcmb0=0*u.K, name=None, meta=None):\n super().__init__(name=name, meta=meta)\n self._H0 = H0\n self._Tcmb0 = Tcmb0\n\n self.cls = SubCosmology\n self.cls_args = (70 * (u.km / u.s / u.Mpc), 2.7 * u.K)\n self.cls_kwargs = dict(name=self.__class__.__name__, meta={\"a\": \"b\"})", "def create_grids_structure(self):\n for indices, hypercube in np.ndenumerate(self.hypercubes):\n self.hypercubes[indices] = Hypercube(coords=indices)", "def set_grid(self,ug):\n self.grd=ug\n self.set_topology()", "def initialize_grid(self) -> None:\n for i in range(self.grid_size[0]):\n for j in range(self.grid_size[1]):\n self.set(i, j, self.base_color)", "def set_sclasses(self, w: Wrapper, classes: Any) -> None:\n w.setProperty(self.style_sclass_property, f\" {' '.join(set(classes))} \")", "def __init__(self):\n\n for layer in self._layer_class_map:\n setattr(self, layer, self._layer_class_map[layer]())", "def __init__(self, grid_size=7, num_bboxes=2, num_classes=20):\r\n super(Loss, self).__init__()\r\n self.S = grid_size\r\n self.B = num_bboxes\r\n self.C = num_classes", "def __init__(self, grid_cols: int = 8, **attrs: Any) -> None:\n\n super().__init__(**attrs)\n\n self.grid_cols = grid_cols\n self.forced_width = self.grid_cols * 4 - 1 + self.sidelength\n self.width = self.forced_width\n\n self._layer_functions = [foreground, background]\n\n self.layer = 0", "def set_config_layers_by_class(self, class_name, **items):\n for layer in self._layers:\n if layer.__class__.__name__.lower().startswith(class_name.lower()):\n self.set_config_layer(layer.name, **items)", "def set_hypercubes_parents_indices(self):\n for hypercube in self.hypercubes.flatten():\n coordinates = []\n for coord in hypercube.coords:\n coordinates.append([2 * coord, 2 * coord + 1])\n for indices in list(itertools.product(*coordinates)):\n hypercube.parent_hypercubes_indices.append(tuple(indices))", "def setUpClass(cls):\n\t\t\n\t\tww.weightwatcher.torch = torch\t\t\n\t\treturn", "def update_class(self):\n neighbors_set = list(set(self.neighbors))\n counts = np.array([self.neighbors.count(n) for n in neighbors_set])\n probs = (counts / counts.sum()) * (1-self.mutation_prob)\n probs = np.append(probs, self.mutation_prob)\n neighbors_set.append(np.random.choice(np.arange(1, self.board.classes))) \n\n self.cell_class = np.random.choice(neighbors_set, p=probs)", "def create_lower_level_grid(self):\n if self.level == 1:\n return False\n else:\n return LowerLevelGrid(level=self.level - 1, parent_hypercubes_number=self.hypercubes_number, parent_hypercubes=self.hypercubes, dims=self.dims)", "def customise_grid(self):\n\n # Agent colours, agent tooltips and grid initialisation\n mapper = linear_cmap(field_name='category',\n palette=['blue', 'red', 'green', 'orange', 'purple'] ,\n low=0,\n high=4)\n TOOLTIPS = [(\"Residential utility\", \"@res_utility\"),\n ('Local composition', '@local_comp'),\n ('Neighbourhood composition', '@n_comp'),\n (\"School utility\", \"@school_utility\"),\n ('Distance', '@dist_school'),\n ('School composition', '@s_comp'),\n ('School composition utility', '@school_comp_utility')]\n hover = HoverTool(names=[\"households\", \"schools\"], tooltips=TOOLTIPS)\n self.grid = figure(x_range=(self.model.grid.x_min - 1,\n self.model.grid.x_max),\n y_range=(self.model.grid.y_min - 1,\n self.model.grid.y_max),\n tools=[hover, 'tap', 'pan',\n WheelZoomTool()],\n tooltips=TOOLTIPS, output_backend=\"webgl\")\n\n # Set WheelZoomTool active by default if not lattice\n if self.params['case'].lower() != 'lattice':\n self.grid.toolbar.active_scroll = self.grid.select_one(\n WheelZoomTool)\n\n # Add a background map using OpenStreetMap (Google Maps is too\n # computationally expensive and cannot zoom properly)\n self.grid.add_tile(get_provider('OSM'))\n\n self.grid.axis.visible = False\n self.grid.grid.visible = False\n # Function to highlight all households that are currently enrolled in\n # the same school.\n self.source.selected.on_change(\"indices\", self.select_households)\n\n # Plot households\n self.grid.circle(x=\"x\",\n y=\"y\",\n size=5,\n view=self.household_view,\n source=self.source,\n fill_color=mapper,\n line_color='black',\n alpha=0.8,\n nonselection_fill_alpha=0.2,\n selection_fill_alpha=1,\n name='households')\n\n # Plot schools\n self.grid.circle(x=\"x\",\n y=\"y\",\n size=7,\n source=self.source,\n view=self.school_view,\n fill_color='yellow',\n line_color='black',\n name='schools')\n\n # Plot neighbourhoods\n self.grid.patches('x',\n 'y',\n source=self.source,\n view=self.neighbourhood_view,\n fill_color=None,\n line_color=\"black\",\n line_width=2,\n hover_alpha=0,\n hover_line_color=None,\n name='neighbourhoods',\n selection_fill_alpha=0)", "def setUpClass(cls):\n\n Base._Base__nb_objects = 0\n cls.b1 = Base()\n cls.b2 = Base()\n cls.b3 = Base(22)\n cls.b4 = Base(2.2)\n cls.b5 = Base(\"two\")\n cls.r1 = Rectangle(10, 7, 2, 8)\n cls.r2 = Rectangle(2, 4)", "def process_class(self, parent, cls):\n if cls.typemap.flat_name in self.class_map:\n raise RuntimeError(\"process_class: class {} already exists in class_map\"\n .format(cls.typemap.flat_name))\n self.class_map[cls.typemap.flat_name] = cls\n for var in cls.variables:\n self.add_var_getter_setter(parent, cls, var)\n cls.functions = self.define_function_suffix(cls.functions)", "def setUpClass(cls):\n # Call the setUpClass method(s) of the parent class(es)", "def set_class_list(self, L):\n\t\tself.class_list = L", "def __init__(self, hparams):\n super(ThreeLayerClassifier, self).__init__()\n self.hparams = hparams\n self.layer_1 = torch.nn.Linear(self.hparams[\"input_size\"], 128)\n self.layer_2 = torch.nn.Linear(128, 256)\n self.layer_3 = torch.nn.Linear(256, self.hparams[\"targets\"])", "def draw(self):\n pos = Point([2,2])\n\n if self.classes == None:\n classes = self.lumpy.get_class_list()\n else:\n classes = [make_thing(self.lumpy, cls) for cls in self.classes]\n\n # find the classes that have no parents, and find the\n # height of each tree\n roots = [c for c in classes if c.parents == []]\n for root in roots:\n root.set_height()\n\n # for all the leaf nodes, compute the distance to\n # the parent\n leafs = [c for c in classes if c.childs == []]\n for leaf in leafs:\n leaf.set_depth()\n\n # if we're drawing all the classes, start with the roots;\n # otherwise draw the classes we were given.\n if self.classes == None:\n drawn = self.draw_classes(roots, pos)\n else:\n drawn = self.draw_classes(classes, pos)\n \n self.draw_arrows()\n\n # configure the scroll region\n bbox = Canvas.bbox(self.canvas, ALL)\n self.canvas.configure(scrollregion=bbox)", "def configure_grid(self):\r\n\r\n for r in range(3):\r\n self.rowconfigure(r, weight=1)\r\n for c in range(3):\r\n self.columnconfigure(c, weight=1)", "def CSSClasses(self):", "def cla(self):\n # Don't forget to call the base class\n Axes.cla(self)\n \n x_min = 0\n y_min = 0\n x_max = 1\n y_max = 1\n x_spacing = 0.1\n y_spacing = 0.1\n self.xaxis.set_minor_locator(NullLocator())\n self.yaxis.set_minor_locator(NullLocator())\n self.xaxis.set_ticks_position('bottom')\n self.yaxis.set_ticks_position('left')\n Axes.set_xlim(self, x_min, x_max)\n Axes.set_ylim(self, y_min, y_max)\n self.xaxis.set_ticks(np.arange(x_min, x_max+x_spacing, x_spacing))\n self.yaxis.set_ticks(np.arange(y_min, y_max+y_spacing, y_spacing))", "def __init__(self):\n self._grid = [[None]]" ]
[ "0.89251757", "0.6874839", "0.64062953", "0.62887317", "0.5722013", "0.5634838", "0.54932666", "0.54521555", "0.54472536", "0.54459906", "0.5339989", "0.5315919", "0.52430695", "0.52424365", "0.5172487", "0.5148295", "0.51459616", "0.50925523", "0.5066024", "0.5049924", "0.503075", "0.50297284", "0.5006576", "0.5001757", "0.49713388", "0.49333596", "0.4920067", "0.4916158", "0.49113762", "0.4909638" ]
0.8686745
1
Computes every hypercube's center by taking the midpoint between the beginning and the end of hypercube in every dimension.
def compute_centers_of_hypercubes(self): for hc in self.hypercubes.flatten(): for i in range(self.dims - 1, -1, -1): index = self.dims - (i + 1) hc.center[i] = (hc.coords[index] + 0.5) * self.hypercube_measurements[index]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute_centers_of_hypercubes(self):\n for hypercube in self.hypercubes.flatten():\n sums = np.zeros((len(hypercube.coords)))\n for coords in hypercube.parent_hypercubes_indices:\n for index, summ in enumerate(sums):\n sums[index] += self.parent_hypercubes[coords].center[index]\n hypercube.center = [x / 4 for x in sums]", "def center(box):\n x_center = box[:, 0] + (box[:, 2] - box[:, 0]) // 2\n y_center = box[:, 1] + (box[:, 3] - box[:, 1]) // 2\n return torch.stack((x_center, y_center)).t().to(box.device)", "def centre(arrayin):\r\n ny = arrayin.shape[0]\r\n nx = arrayin.shape[1]\r\n cy = 0.0\r\n cx = 0.0\r\n for i in range(ny):\r\n for j in range(nx):\r\n cy += np.float64(arrayin[i,j]) * np.float64(i - ny/2 + 1)\r\n cx += np.float64(arrayin[i,j]) * np.float64(j - nx/2 + 1)\r\n cx = cx / np.sum(arrayin)\r\n cy = cy / np.sum(arrayin)\r\n arrayout = np.roll(arrayin ,-int(cy),0)\r\n arrayout = np.roll(arrayout,-int(cx),1)\r\n return [arrayout,cy,cx]", "def find_center(self):\n x = np.int(np.rint((len(self.grid[0][0]))/2))\n center = np.array([x, x, x])\n self.grid[center[0]][center[1]][center[2]] = 1\n return self.grid, center", "def center(self):\n bounds = self.bounds\n x = (bounds[1] + bounds[0]) / 2\n y = (bounds[3] + bounds[2]) / 2\n z = (bounds[5] + bounds[4]) / 2\n return [x, y, z]", "def center_size(boxes):\n concat = P.Concat(1)\n return concat(((boxes[:, 2:] + boxes[:, :2])/2, # cx, cy\n boxes[:, 2:] - boxes[:, :2])) # w, h", "def _face_center(mesh, face):\n center = [0, 0, 0]\n for vert in face.vertices:\n center = _list_plus(vert, center)\n new_list = [x / len(face.vertices) for x in center]\n return new_list", "def center_size(boxes):\n return torch.cat([(boxes[:, :2] + boxes[:, 2:])/2, # cx, cy\n boxes[:, :2] - boxes[:, 2:]], 1) # w, h", "def center(self):\n points = set()\n for face in self._points:\n points.update(face)\n x_points = [point[0] for point in points]\n y_points = [point[1] for point in points]\n z_points = [point[2] for point in points]\n return \\\n (np.average(x_points), np.average(y_points), np.average(z_points))", "def calculate_center(self):\n return [(self.startX + self.endX) / 2., (self.startY + self.endY) / 2.]", "def to_center_form(boxes):\n x_min, y_min = boxes[:, 0], boxes[:, 1]\n x_max, y_max = boxes[:, 2], boxes[:, 3]\n center_x = (x_max + x_min) / 2.\n center_y = (y_max + y_min) / 2.\n width = x_max - x_min\n height = y_max - y_min\n return np.concatenate([center_x[:, None], center_y[:, None],\n width[:, None], height[:, None]], axis=1)", "def center(self):\n try: \n return self._center\n except AttributeError:\n self._center = vector(ZZ, [0]*self.ambient_dim())\n for v in self.vertex_generator(): self._center += v.vector()\n self._center /= self.n_vertices()\n return self._center", "def get_square_centers(self):\n x_values = np.arange(-2, 4, 2) * np.ones(self.GRID_SHAPE)\n y_values = np.arange(2, -4, -2).reshape((3, 1)) * np.ones(self.GRID_SHAPE)\n x_values *= self.spacing\n x_values += self.center[0] # add x-coordinate for grid center\n y_values *= self.spacing\n y_values += self.center[1] # add y-coordinate for grid center\n return np.dstack((x_values, y_values))", "def populateCenters(matrix, row, col, frame, midRange, roughness, perturbance):\n maxIndex = matrix.shape[0]-1\n quarterRange = midRange/2\n\n pf = perturbanceFactor(matrix.shape[0], midRange, perturbance)\n noiseLevel = roughness * pf\n\n \"\"\"\n For each subdivided cube, getIndexRef is used to get the indicies, and center is used\n to determine the points that should be averaged and the point to be set. \n setValue does the calculations.\n \"\"\"\n indexRef = getIndexRef(row, col, frame, quarterRange, maxIndex)\n setValue(matrix, center, indexRef, noiseLevel)\n \n indexRef = getIndexRef(row, col, frame + midRange, quarterRange, maxIndex)\n setValue(matrix, center, indexRef, noiseLevel)\n \n indexRef = getIndexRef(row, col + midRange, frame, quarterRange, maxIndex)\n setValue(matrix, center, indexRef, noiseLevel)\n \n indexRef = getIndexRef(row + midRange, col, frame, quarterRange, maxIndex)\n setValue(matrix, center, indexRef, noiseLevel)\n \n indexRef = getIndexRef(row + midRange, col + midRange, frame, quarterRange, maxIndex)\n setValue(matrix, center, indexRef, noiseLevel)\n \n indexRef = getIndexRef(row + midRange, col, frame + midRange, quarterRange, maxIndex)\n setValue(matrix, center, indexRef, noiseLevel)\n \n indexRef = getIndexRef(row, col + midRange, frame + midRange, quarterRange, maxIndex)\n setValue(matrix, center, indexRef, noiseLevel)\n \n indexRef = getIndexRef(row + midRange, col + midRange, frame + midRange, quarterRange, maxIndex)\n setValue(matrix, center, indexRef, noiseLevel)\n \n\n #printAllowCancel(matrix)", "def gen_center_points(x_res, y_res, dim):\n center_points = []\n\n for x in range(math.floor(dim[0] / x_res)):\n for y in range(math.floor(dim[1] / y_res)):\n x = (x + 1) * x_res\n y = (y + 1) * y_res\n center_points.append((x, y))\n\n return center_points", "def center_image_grid_anchors(image_grid):\n for image in image_grid:\n center_image_anchor(image)", "def center_of_mass(im_binary, x_offset=0, y_offset=0):\n n = np.sum(im_binary)\n\n x = np.arange(im_binary.shape[1]) + x_offset\n y = np.arange(im_binary.shape[0]) + y_offset\n xv, yv = np.meshgrid(x, y)\n cx = np.sum(xv[im_binary]) / n\n cy = np.sum(yv[im_binary]) / n\n\n return cx, cy", "def center(self):\n return np.array([0,0,1/self.C+self.pos()])", "def compute_cell_center(seg_img: np.ndarray, labels: np.ndarray, results: np.ndarray) \\\n -> np.ndarray:\n for label in labels:\n if label != 0:\n all_points_z, all_points_x, all_points_y = np.where(seg_img == label)\n avg_z = np.round(np.mean(all_points_z))\n avg_x = np.round(np.mean(all_points_x))\n avg_y = np.round(np.mean(all_points_y))\n results[label] = [avg_z, avg_x, avg_y]\n\n return results", "def center(self):\n cyl = (len(self.cells) - 1) / 2 # Lower and upper bound of list slices\n cxl = (len(self.cells[0]) - 1) / 2\n cyu = len(self.cells) / 2 + 1\n cxu = len(self.cells[0]) / 2 + 1\n\n # candidates are all the cells in the middle,\n # accounting for even dimensions\n candidates = []\n\n for r in self.cells[cyl:cyu]:\n candidates += r[cxl:cxu]\n\n # center is the candidate with the most carrots\n center = max(candidates, key=lambda c: c.carrots)\n\n return center", "def get_center(self):\n return center_points(np.expand_dims(self.best_box, axis=0))[0]", "def center(self):\n # minz to offset the heights to 0\n mz = (self.maxz-self.minz)/2\n #mz = self.minz\n return (self.minx + self.width / 2, self.miny + self.height / 2, mz)", "def center(self):\n return np.sum(self.bbox, 0) / 2", "def get_cell_center_coordinates(self):\n import numpy as np\n x1, x2, x3 = np.ix_(*self.cell_center_coordinates)\n if self.geometry == 'cartesian':\n x, y, z = x1, x2, x3\n elif self.geometry == 'spherical':\n x = x1 * np.sin(x2) * np.cos(x3)\n y = x1 * np.sin(x2) * np.sin(x3)\n z = x1 * np.cos(x2)\n return x, y, z", "def cell_center_fast(seg_img: np.ndarray, labels: np.ndarray) -> np.ndarray:\n array_max_idx = max(labels)\n results = np.zeros((array_max_idx + 1, 3))\n results = compute_cell_center(seg_img, labels, results)\n\n return results", "def _get_centers(self, lwidth, lheight, batch_size):\n x_left, y_left = tf.meshgrid(tf.range(0, lheight), tf.range(0, lwidth))\n x_y = K.stack([x_left, y_left], axis = -1)\n x_y = tf.cast(x_y, dtype = self.dtype)/tf.cast(lwidth, dtype = self.dtype)\n x_y = tf.repeat(tf.expand_dims(tf.repeat(tf.expand_dims(x_y, axis = -2), self._num, axis = -2), axis = 0), batch_size, axis = 0)\n return x_y", "def center_size(boxes):\n wh = boxes[:, 2:] - boxes[:, :2] + 1.0\n if isinstance(boxes, np.ndarray):\n return np.column_stack((boxes[:, :2] + 0.5 * wh, wh))\n return torch.cat((boxes[:, :2] + 0.5 * wh, wh), 1)", "def center(x):\n return x - x.mean()", "def center(self, x):\n\n shape = x.shape\n nx = shape[1]\n ny = shape[0]\n hnx = nx // 2\n hny = ny // 2\n\n temp = x[0:hny, 0:hnx].copy()\n x[0:hny, 0:hnx] = x[hny:ny, hnx:nx].copy()\n x[hny:ny, hnx:nx] = temp\n\n temp = x[0:hny, hnx:nx].copy()\n x[0:hny, hnx:nx] = x[hny:ny, 0:hnx].copy()\n x[hny:ny, 0:hnx] = temp", "def center(image):\n\tsize = image.shape\n\thalf = int(np.ceil(size[0]/2))\n\timage = np.roll(np.roll(image, half, 0), half, 1)\n\treturn image" ]
[ "0.74476457", "0.6830486", "0.67943937", "0.6649868", "0.6605242", "0.6537745", "0.652884", "0.64794374", "0.64205414", "0.64118415", "0.638748", "0.6345151", "0.63197005", "0.63122505", "0.6282437", "0.6226204", "0.6219615", "0.62132144", "0.6184002", "0.61678565", "0.6144855", "0.61306214", "0.6125305", "0.6096988", "0.6093893", "0.608182", "0.60609406", "0.60495055", "0.6035089", "0.6028542" ]
0.8032393
0
Predicts the class of an observation with given coordinates.
def test(self, example_coords): print("Predicting the class of an observation with coordinates: " + str(example_coords)) hypercubes_coords = tuple( [int(example_coords[i] / self.hypercube_measurements[i]) for i in range(self.dims - 1, -1, -1)]) if self.hypercubes[hypercubes_coords].hypercube_class is not EMPTY_HYPERCUBE_INDICATOR: return self.hypercubes[hypercubes_coords].hypercube_class else: print("Observation with coordinates " + str(example_coords) + " falls within an empty cube.") returned_class = self.child_grid.classify(example_coords=example_coords, hypercubes_coords=hypercubes_coords) if returned_class[0] == -1: returned_class = self.nearest_neighbours_class(example_coords=example_coords, parents_indices=returned_class[1]) return returned_class
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def predict(self, coordinates):\n check_is_fitted(self, [\"force_\"])\n force_east, force_north = self.force_coords\n east, north = n_1d_arrays(coordinates, n=2)\n cast = np.broadcast(*coordinates[:2])\n npoints = cast.size\n components = (\n np.empty(npoints, dtype=east.dtype),\n np.empty(npoints, dtype=east.dtype),\n )\n if parse_engine(self.engine) == \"numba\":\n components = predict_2d_numba(\n east,\n north,\n force_east,\n force_north,\n self.mindist,\n self.poisson,\n self.force_,\n components[0],\n components[1],\n )\n else:\n components = predict_2d_numpy(\n east,\n north,\n force_east,\n force_north,\n self.mindist,\n self.poisson,\n self.force_,\n components[0],\n components[1],\n )\n return tuple(comp.reshape(cast.shape) for comp in components)", "def predict(self, x, **kwargs):\n kwargs = self.filter_sk_params(Sequential.predict_classes, kwargs)\n classes = self.model.predict_classes(x, **kwargs)\n return self.classes_[classes]", "def predict(self, coordinates):\n check_is_fitted(self, [\"region_\"])\n return tuple(comp.predict(coordinates) for comp in self.components)", "def predict(self, obs):\n pass", "def predict(self, X: np.ndarray):\n return np.apply_along_axis(self.estimate_class, 1, X)", "def predict(self, X):\n ...", "def predict(self, X):\n ...", "def predict(self, X):\n ...", "def predict(self, X):\n\n\t\tn_samples = X.shape[0]\n\t\tpredicted = np.zeros(n_samples)\n\n\t\tfor i in xrange(n_samples):\n\t\t\tpredicted[i] = self.classify_example(X[i])\n\n\t\treturn predicted", "def predict(self, X):", "def predict(self, X):", "def predict(self, x):\n \n\n return predictions", "def predict(self, X):\n res = self.predict_proba(X)\n positive_mask = res >= 0.5\n negative_mask = res < 0.5\n res[positive_mask] = self.POSITIVE_CLASS\n res[negative_mask] = self.NEGATIVE_CLASS\n return res", "def predict(self, obs):\n return self.model(obs)", "def predict(self, x):\n # *** START CODE HERE ***\n return self.clf.predict_classes(x.reshape(x.shape[0], 28, 28, 1))\n # *** END CODE HERE ***", "def predict(self, X):\n (t0, t1, t2) = self.theta\n g = lambda x: t0 + t1 * x[0] + t2 * x[1]\n return np.array([\n self.classes[1] if g(x) > 0 else self.classes[0]\n for x in X\n ])", "def predict(self,X):\n return self.classifier.predict(X)", "def predict(self,X):\n return self.classifier.predict(X)", "def predict(self,X):\n return self.classifier.predict(X)", "def predict(self,X):\n return self.classifier.predict(X)", "def predict(self, X):\n pass", "def predict(self, X):\n pass", "def predict(self, X):\n pass", "def predict(self, X):\n raise NotImplementedError", "def predict(self, x):\n raise NotImplementedError(\"Please Implement this method\")", "def predict(self,X): \n return self._predict(X)", "def predict(self, X):\n\t\tR = self.predict_soft(X)\t\t\t\t\t\t\t\t\t\t\t# compute soft output values\n\t\tY = R.argmax(1)\t\t\t\t\t\t\t\t\t\t\t\t\t\t# get index of maximum response\n\t\treturn self.classes[Y]\t\t\t\t\t\t\t\t\t\t\t\t# convert to saved class values", "def predict(self, X, check_input=True):\n if check_input:\n X = check_array(X)\n proba = self.predict_proba(X)\n return self.classes_.take(np.argmax(proba, axis=1), axis=0)", "def predict(self, predPoints=None):", "def predict(self, X):\n return self.classifier.predict(X)" ]
[ "0.69743305", "0.67340916", "0.6664514", "0.6588236", "0.6583106", "0.65313673", "0.65313673", "0.65313673", "0.6526987", "0.650605", "0.650605", "0.6498759", "0.6485663", "0.64440244", "0.6431313", "0.6431298", "0.6427786", "0.6427786", "0.6427786", "0.6427786", "0.6419594", "0.6419594", "0.6419594", "0.64144224", "0.6404117", "0.6390411", "0.6385287", "0.63809925", "0.6379946", "0.63605297" ]
0.67905843
1
Updates the grid with a given Example. If the Grid has a child, it is also forced to update itself with the new observation.
def update(self, example, hypercubes_coords=None): hypercubes_coords = tuple( [int(example.coords[i] / self.hypercube_measurements[i]) for i in range(self.dims - 1, -1, -1)]) new_class = self.hypercubes[hypercubes_coords].update_basic(example_list=[example]) print("Update. Changed class of " + str(hypercubes_coords) + " to: " + str(new_class)) if self.child_grid: self.child_grid.update(example=example, hypercubes_coords=hypercubes_coords)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def batch_update(self, examples):\n print(\"Updating the BaseGrid with a batch of examples\")\n examples_grouping_dict = {}\n for example in examples:\n hypercubes_coords = tuple(\n [int(example.coords[i] / self.hypercube_measurements[i]) for i in range(self.dims - 1, -1, -1)])\n examples_grouping_dict[(example.class_id, hypercubes_coords)] = examples_grouping_dict.get((example.class_id, hypercubes_coords), [])\n examples_grouping_dict[(example.class_id, hypercubes_coords)].append(example)\n for (class_id, hypercubes_coords), example_list in examples_grouping_dict.items():\n self.hypercubes[hypercubes_coords].update_basic(example_list=example_list)\n if self.child_grid:\n self.child_grid.batch_update(examples=examples_grouping_dict)", "def add_example_to_grid(self, example):\n indices = tuple([int(example.coords[x] / self.hypercube_measurements[x]) for x in range(self.dims - 1, -1, -1)])\n self.hypercubes[indices].add_example(example)", "def update(self, example, hypercubes_coords):\n hypercubes_coords = tuple([int(x / 2) for x in hypercubes_coords])\n new_class = self.hypercubes[hypercubes_coords].update_lower_level(example_class=example.class_id, example_count=1, threshold=self.threshold)\n print(\"Update. Changed class of \" + str(hypercubes_coords) + \" to: \" + str(new_class))\n if self.child_grid:\n self.child_grid.update(example=example, hypercubes_coords=hypercubes_coords)", "def example(self, example):\n self._example = example\n return self", "def update_example(self, path: str):\n\n if not path.startswith(os.sep):\n raise ValueError(\"Path to fault-data must be absolute\")\n\n # Update info in the model for the currently loaded example\n path = path.rstrip(os.sep)\n zone_name, fault_time = utils.path_to_zone_and_timestamp(path)\n self.event_dir = path\n self.zone_name = zone_name\n self.fault_time = fault_time\n\n # Split up the path into it's constituent pieces\n tokens = path.split(os.sep)\n dt = datetime.strptime(f\"{tokens[-2]} {tokens[-1]}\", \"%Y_%m_%d %H%M%S.%f\")\n zone = tokens[-3]\n\n # Save the root data path into the rfwtools configuration. Windows is weird, C: doesn't get handled correctly.\n if platform.system() == \"Windows\":\n data_dir = os.path.join(tokens[0], os.sep, *tokens[1:-3])\n else:\n data_dir = os.path.join(os.path.sep, *tokens[:-3])\n Config().data_dir = data_dir\n\n # Update the example the model is currently loading\n self.example = Example(zone=zone, dt=dt, cavity_conf=math.nan, fault_conf=math.nan, cavity_label=\"\",\n fault_label=\"\", label_source=\"\")", "def update_observable(self):\n self.scenario.update_observable()", "def update(self):\n diff = self._diff()\n if not diff:\n # Nothing to do!\n return\n self.parent.update_node(self, diff)", "def batch_update(self, parents_classes_dict):\n print(\"Updating the LowerLevelGrid with a batch of examples\")\n for (class_id, coords), examples in parents_classes_dict.items():\n coords = tuple([int(x / 2) for x in coords])\n self.hypercubes[coords].update_lower_level(example_class=class_id, example_count=len(examples), threshold=self.threshold)\n if self.child_grid:\n self.child_grid.batch_update(parents_classes_dict=parents_classes_dict)", "def update_grid(self):\n # Check to see if we have moved squares\n _new_grid = self.calc_grid()\n if _new_grid == self._grid:\n return\n # Remove from old square and add to new square\n self.target._grid[self._grid][self._type].discard(self)\n self.target._grid[_new_grid][self._type].add(self)\n # Update coordinates\n self._grid = _new_grid", "def update(self, parent):\r\n pass", "def update(self):\n self.grid.update()\n sleep(self.update_rate)", "def update_positions(self, grid):\r\n self.grid = grid", "def updateObservation(self, obs):\n self.settingsDb.updateObservation(self.observationsTableName(), obs)", "def update(self, feature_col, feature_value, node_type, nodes, children=[]):\n self.feature_col = feature_col\n self.feature_value = feature_value\n self.node_type = node_type\n self.nodes = nodes\n self.children = children", "def mutate(self, child):\n return child", "def update_cell(self, x, y, value):\n x1, y1 = self.transpose_coordinates(x, y)\n if self.is_in_field(x1, y1):\n self._cells[y1][x1] = value\n return True\n return False", "def change_child_value(self, place, hasElement ):\n raise NotImplementedError", "def replace_child(self, oldchild, newchild):\n index = self._subtrees.index(oldchild)\n self._subtrees[index] = newchild\n self._remove_child_widget(oldchild)\n self._add_child_widget(newchild)\n self.update(newchild)", "def _modelUpdated(self, *args, **kwargs):\n topLeft = self.index(column=0)\n bottomRight = self.index(column=1)\n model = self.model()\n if model is not None:\n model.dataChanged.emit(topLeft, bottomRight)", "def grid(self, grid):\n\n self._grid = grid", "def update_cell(self, cell):\n\n if cell.uuid not in self._cells:\n error_str = \"Trying to update a non-existing cell with uuid: \"\\\n + str(cell.uuid)\n raise KeyError(error_str)\n\n if not isinstance(cell, Cell):\n error_str = \"Trying to update an object with the wrong type. \"\\\n + \"Cell expected.\"\n raise TypeError(error_str)\n\n cell_to_update = self._cells[cell.uuid]\n\n cell_to_update.data = cell.data\n cell_to_update.points = cell.points", "def updateCurrentValue(self, value):\n xsnap = None\n ysnap = None\n \n if value != self.endValue():\n xsnap = self.targetObject().isXSnappedToGrid()\n ysnap = self.targetObject().isYSnappedToGrid()\n \n self.targetObject().setXSnapToGrid(False)\n self.targetObject().setYSnapToGrid(False)\n \n super(XNodeAnimation, self).updateCurrentValue(value)\n \n if value != self.endValue():\n self.targetObject().setXSnapToGrid(xsnap)\n self.targetObject().setYSnapToGrid(ysnap)", "def update_grid(self):\n if self.game_over:\n return\n if self.active_piece is None:\n self.place_new_piece()\n if self.piece_collision_exists(self.active_piece):\n self.handle_active_piece_collision()\n self.place_new_piece()\n self.shift_cells(self.active_piece, self.current_direction)\n self.active_piece = TransformPiece.shift_coordinates(self.active_piece, self.current_direction)\n self.merge_with_completed_rows()\n if self.is_game_won():\n self.game_over = True", "def update_grid(comp, grid):\n while not comp.waiting and not comp.halt_status:\n time.sleep(0.001)\n\n while not comp.output.empty():\n # Get 3 results\n x = comp.output.get()\n y = comp.output.get()\n b = comp.output.get()\n\n if x == -1 and y == 0:\n print(f\"Score = {b}\")\n\n grid[x][y] = b", "def update_obstacles(self, new_obs):\n self.obstacles = new_obs", "def update_state(self, context: GANContext) -> None:\n updater = lambda value: lambda: self._metric.update_state(value)\n for real_xy, noise in context.dataset:\n real_x, real_y = real_xy\n\n g_inputs = noise\n if len(context.generator_model.inputs) == 2:\n g_inputs = [noise, real_y]\n\n fake = context.generator_model(\n g_inputs, training=context.log_eval_mode == LogEvalMode.TRAIN\n )\n\n # check the resolution is the same as the one passed as input\n resolution = real_x.shape[1]\n if resolution != self.resolution:\n raise ValueError(\n \"Image resolution is not the same as the input resolution.\"\n )\n\n scores = sliced_wasserstein_distance(\n real_x,\n fake,\n resolution_min=self.resolution_min,\n patches_per_image=self.patches_per_image,\n use_svd=self.use_svd,\n patch_size=self.patch_size,\n random_projection_dim=self.random_projection_dim,\n random_sampling_count=self.random_sampling_count,\n )\n\n fake_scores = []\n\n for i, couple in enumerate(scores):\n self.children_real_fake[i][0].update_state(context, couple[0])\n self.children_real_fake[i][1].update_state(context, couple[1])\n fake_scores.append(tf.expand_dims(couple[1], axis=0))\n\n fake_scores = tf.concat(fake_scores, axis=0)\n\n self._distribute_strategy.experimental_run_v2(updater(fake_scores))", "def updatetree(self):\n if self.node:\n self.node.update()\n self.draw()", "def update_board(self, value, row=-1, col=-1, cell=-1):\n\n if row != -1 and col != -1 and cell == -1:\n _row,_col = row,col\n\n elif row == -1 and col == -1 and type(cell) == tuple:\n _row,_col = cell\n\n else:\n raise Exception(\"you must provide either row and column or a cell tuple\")\n\n group = self.calc_group(_row, _col)\n\n self.rows[_row].discard(value)\n self.columns[_col].discard(value)\n self.groups[group].discard(value)\n\n self.board[_row][_col] = value", "def update(self, x):\n pass", "def update_observation(self, g, inf_nodes, node, label, c):\n # rigorously speaking,\n # root sampler should be updated, for example\n # earliet node might be updated, or uninfected nodes get removed\n # print('update observation, self.root_sampler', self.root_sampler)\n self._update_root_sampler(inf_nodes, c)\n\n new_samples = self.sampler.update_samples(\n inf_nodes,\n {node: label},\n root_sampler=self.root_sampler\n )\n\n if not self.sampler.with_resampling:\n # should be deprecated because trees are re-sampled\n self.error_estimator.update_trees(new_samples, {node: label})\n else:\n # re-build the matrix because trees are re-sampled\n self.error_estimator.build_matrix(self.sampler._samples)" ]
[ "0.6287198", "0.59800756", "0.5757915", "0.54690313", "0.52610976", "0.51572514", "0.5034711", "0.50117636", "0.49751922", "0.48433962", "0.48404443", "0.4838931", "0.47301564", "0.47202694", "0.46636343", "0.46548712", "0.46405032", "0.46102586", "0.46008292", "0.45953625", "0.45883283", "0.4571117", "0.4570679", "0.4559546", "0.45521694", "0.45440254", "0.4509946", "0.45027164", "0.44915238", "0.4489712" ]
0.61649644
1
Updates the grid, given the batch_size of Examples. It groups Examples by class_id and by Hypercubes containing these Examples. If the Grid has a child, it is also forced to update itself.
def batch_update(self, examples): print("Updating the BaseGrid with a batch of examples") examples_grouping_dict = {} for example in examples: hypercubes_coords = tuple( [int(example.coords[i] / self.hypercube_measurements[i]) for i in range(self.dims - 1, -1, -1)]) examples_grouping_dict[(example.class_id, hypercubes_coords)] = examples_grouping_dict.get((example.class_id, hypercubes_coords), []) examples_grouping_dict[(example.class_id, hypercubes_coords)].append(example) for (class_id, hypercubes_coords), example_list in examples_grouping_dict.items(): self.hypercubes[hypercubes_coords].update_basic(example_list=example_list) if self.child_grid: self.child_grid.batch_update(examples=examples_grouping_dict)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def batch_update(self, parents_classes_dict):\n print(\"Updating the LowerLevelGrid with a batch of examples\")\n for (class_id, coords), examples in parents_classes_dict.items():\n coords = tuple([int(x / 2) for x in coords])\n self.hypercubes[coords].update_lower_level(example_class=class_id, example_count=len(examples), threshold=self.threshold)\n if self.child_grid:\n self.child_grid.batch_update(parents_classes_dict=parents_classes_dict)", "def update(self, example, hypercubes_coords=None):\n hypercubes_coords = tuple(\n [int(example.coords[i] / self.hypercube_measurements[i]) for i in range(self.dims - 1, -1, -1)])\n new_class = self.hypercubes[hypercubes_coords].update_basic(example_list=[example])\n print(\"Update. Changed class of \" + str(hypercubes_coords) + \" to: \" + str(new_class))\n if self.child_grid:\n self.child_grid.update(example=example, hypercubes_coords=hypercubes_coords)", "def update(self, example, hypercubes_coords):\n hypercubes_coords = tuple([int(x / 2) for x in hypercubes_coords])\n new_class = self.hypercubes[hypercubes_coords].update_lower_level(example_class=example.class_id, example_count=1, threshold=self.threshold)\n print(\"Update. Changed class of \" + str(hypercubes_coords) + \" to: \" + str(new_class))\n if self.child_grid:\n self.child_grid.update(example=example, hypercubes_coords=hypercubes_coords)", "def set_hypercubes_classes(self):\n print(\"Setting the BaseGrid hypercubes' classes.\")\n list_of_all_hc = list(self.hypercubes.flatten())\n print(\"Number of hypercubes: \" + str(len(list_of_all_hc)))\n for hypercube in list_of_all_hc:\n hypercube.set_hypercube_class()\n if self.child_grid:\n self.child_grid.set_hypercubes_classes()", "def update_eligs(self, *args):\n self.splitGD.update_eligs()", "def update_eligs(self, *args):\n self.splitGD.update_eligs()", "def update_grid(self):\n # Check to see if we have moved squares\n _new_grid = self.calc_grid()\n if _new_grid == self._grid:\n return\n # Remove from old square and add to new square\n self.target._grid[self._grid][self._type].discard(self)\n self.target._grid[_new_grid][self._type].add(self)\n # Update coordinates\n self._grid = _new_grid", "def set_hypercubes_classes(self):\n print(\"Setting the Hypercubes' classes of grid at level: \" + str(self.level))\n for hypercube in self.hypercubes.flatten():\n coordinates = []\n for coord in hypercube.coords:\n coordinates.append([2 * coord, 2 * coord + 1])\n parents_list = []\n for indices in list(itertools.product(*coordinates)):\n parents_list.append(self.parent_hypercubes[tuple(reversed(indices))])\n hypercube.set_lower_level_hypercube_class(parent_hypercubes=parents_list, threshold=self.threshold)\n if self.child_grid:\n self.child_grid.set_hypercubes_classes()", "def _update(self):\n # clear group before rebuild\n self.clear()\n\n # build configuration groups\n self._config_names = []\n for i in range(self._n_configs):\n config_name = f\"config{i+1:02}\"\n self._config_names.append(config_name)\n self._build_config_group(config_name)\n\n # reset active configuration if necessary\n if not all(cname in self._config_names for cname in self._active_config):\n self._active_config = (self._config_names[0],)\n\n # build datasets\n self._build_datasets()", "def add_example_to_grid(self, example):\n indices = tuple([int(example.coords[x] / self.hypercube_measurements[x]) for x in range(self.dims - 1, -1, -1)])\n self.hypercubes[indices].add_example(example)", "def update_batch(self, *args, **kwargs):\n pass", "def fill_batch_queue(self):\n\t\twhile True:\n\t\t\tif self._hps.mode.value != 'decode':\n\t\t\t\t# Get bucketing_cache_size-many batches of Examples into a list, then sort\n\t\t\t\tinputs = []\n\t\t\t\tfor _ in xrange(self._hps.batch_size.value * self._bucketing_cache_size):\n\t\t\t\t\tinputs.append(self._example_queue.get())\n\t\t\t\tinputs = sorted(inputs, key=lambda inp: inp.enc_len) # sort by length of encoder sequence\n\n\t\t\t\t# Group the sorted Examples into batches, optionally shuffle the batches, and place in the batch queue.\n\t\t\t\tbatches = []\n\t\t\t\tfor i in xrange(0, len(inputs), self._hps.batch_size.value):\n\t\t\t\t\tbatches.append(inputs[i:i + self._hps.batch_size.value])\n\t\t\t\t\n\t\t\t\tfor b in batches: # each b is a list of Example objects\n\t\t\t\t\tself._batch_queue.put(Batch(b, self._hps, self._vocab))\n\n\t\t\telse: # beam search decode mode\n\t\t\t\tex = self._example_queue.get()\n\t\t\t\tb = [ex for _ in xrange(self._hps.batch_size.value)]\n\t\t\t\tself._batch_queue.put(Batch(b, self._hps, self._vocab))", "def _update_batch_size(configs, batch_size):\n configs[\"train_config\"].batch_size = max(1, int(round(batch_size)))", "def flow(self, batch_size=32):\n nb_batches = int(len(self.image_ids_in_subset) / batch_size) + 1\n while True:\n # Before each epoch we shuffle the images' ids\n random.shuffle(self.image_ids_in_subset)\n\n for i in range(nb_batches):\n # We first get all the image ids for the next batch\n current_bach = self.image_ids_in_subset[i*batch_size:(i+1)*batch_size]\n X_batch = []\n Y_batch = []\n\n for image_id in current_bach:\n # Load the image and resize it. We get a PIL Image object\n img = image.load_img(self.get_img_path(int(image_id)), grayscale=False, target_size=(cfg.IMAGE.IMG_SIZE, cfg.IMAGE.IMG_SIZE))\n # Cast the Image object to a numpy array and put the channel has the last dimension\n img_arr = image.img_to_array(img, data_format='channels_last')\n X_batch.append(img_arr)\n # Y_batch.append(self.id_to_label[image_id])\n Y_batch.append(self.get_labels(image_id))\n\n # resize X_batch in (batch_size, IMG_HEIGHT, IMG_WIDTH, 3)\n X_batch = np.reshape(X_batch, (-1, cfg.IMAGE.IMG_SIZE, cfg.IMAGE.IMG_SIZE, 3))\n # resize Y_batch in (None, nb_classes)\n Y_batch = np.reshape(Y_batch, (-1, self.nb_classes))\n\n # substract mean values from imagenet\n X_batch = preprocess_input(X_batch, data_format='channels_last')\n yield(X_batch, Y_batch)", "def update(self, batch: Batch[T]) -> typing.Self:\n molecules = (record.get_molecule() for record in batch)\n self._molecules.update(map(self._key_maker.get_key, molecules))\n self._batches.add(batch.get_identity_key())\n self._num += 1\n return self", "def batch_size(self, batch_size: ConfigNodePropertyInteger):\n\n self._batch_size = batch_size", "def update_supergroups_by_id(self, group_ids, kind):\n\n updated = set(map(int, group_ids))\n\n logger = logging.getLogger(__name__)\n debug = logger.debug\n\n debug('updating %s: %r', kind, updated)\n\n groups = self['__store']\n\n # print(kind)\n existing = getattr(self, kind + 's')\n debug('existing %s: %r', kind, updated)\n # print(updated, existing)\n\n if updated != existing:\n\n group_lookup = {\n group.group_id: group.name\n for group in groups\n }\n\n db = groups.db\n\n to_remove = existing - updated\n if to_remove:\n debug('removing %s %r from %r', kind, to_remove, self.name)\n cmd = 'delete from subgroups where subgroup_id=%s and group_id in %s'\n db(cmd, self.group_id, to_remove)\n\n for group_id in to_remove:\n audit(\n 'remove %s' % kind,\n group_lookup.get(\n group_id,\n 'unknown (%s)' % group_id,\n ),\n self.name\n )\n\n to_add = updated - existing\n if to_add:\n debug('adding %s %r to %r', kind, to_add, self.name)\n cmd = 'insert into subgroups (group_id, subgroup_id) values (%s, %s)'\n sequence = zip(to_add, [self.group_id] * len(to_add))\n db.execute_many(cmd, sequence)\n\n for subgroup_id in to_add:\n audit(\n 'add %s' % kind,\n group_lookup.get(\n subgroup_id,\n 'unknown (%s)' % subgroup_id,\n ),\n self.name\n )\n\n else:\n debug('%s unchanged', kind)", "def _batch_update(self, query, mutation):\n logger.info(\"Performing batch update on %s. Mutation: %s\", query, mutation)\n modified = 0\n for doc in self.instances.find(query):\n with lock_instance(doc['_id']):\n pre_update_doc = self.instances.find_one({'_id' : doc['_id']})\n result = self.instances.update_one({'_id': doc['_id']}, mutation)\n assert result.modified_count == 1\n modified += 1\n updated_doc = self.instances.find_one({'_id': doc['_id']})\n instance = FixtureInstance.deserialize_mongodoc(updated_doc)\n try:\n self.axdb_client.update_fixture_instance(instance.axdbdoc())\n except Exception:\n logger.exception(\"Failed to persist updates for %s. Undoing cache update\", instance)\n self.instances.replace_one({'_id' : instance.id}, pre_update_doc)\n raise\n logger.info(\"%s fixture instances modified\", modified)", "def _update_num_batches(self):\n # maximum possible number of batches is equal to number of whole times\n # batch_size divides in to the number of data points which can be\n # found using integer division\n possible_num_batches = self.inputs.shape[0] // self.batch_size\n if self.max_num_batches == -1:\n self.num_batches = possible_num_batches\n else:\n self.num_batches = min(self.max_num_batches, possible_num_batches)", "async def split_large_groups_impl(config):\n async with create_sessionmaker(config)() as dbsession:\n progress = ClickIndeterminate(\"Splitting large groups\")\n progress.start()\n splitting = True\n stmt = select(Group).options(selectinload(Group.items), selectinload(Group.children))\n while splitting:\n splitting = False\n result = await dbsession.execute(stmt)\n for group in result.scalars():\n if len(group.children) == 0:\n if len(group.items) > 120 and len(group.items) < 300: # noqa: PLR2004\n if split_by_year(config, dbsession, group):\n splitting = True\n else:\n split_by_similarity(dbsession, group)\n splitting = True\n elif len(group.items) >= 300: # noqa: PLR2004\n if split_by_attribute(dbsession, group, \"concepts\"):\n splitting = True\n elif split_by_attribute(dbsession, group, \"subjects\"):\n splitting = True\n elif split_by_attribute(dbsession, group, \"materials\"):\n splitting = True\n elif split_by_attribute(dbsession, group, \"techniques\"):\n splitting = True\n elif split_by_year(config, dbsession, group):\n splitting = True\n else:\n split_by_similarity(dbsession, group)\n splitting = True\n await dbsession.commit()\n progress.stop()", "def hook_store_batch_size(module):\n if self._batch_size == {}:\n batch_axis = 0\n batch_size = module.input0.shape[batch_axis]\n\n for group in param_groups:\n group_id = id(group)\n\n if self._verbose:\n print(f\"Group {group_id}: Store 'batch_size'\")\n\n self._batch_size[group_id] = batch_size", "def modify_instance_groups(self, instance_group_ids, new_sizes):\r\n if type(instance_group_ids) != types.ListType:\r\n instance_group_ids = [instance_group_ids]\r\n if type(new_sizes) != types.ListType:\r\n new_sizes = [new_sizes]\r\n\r\n instance_groups = zip(instance_group_ids, new_sizes)\r\n\r\n params = {}\r\n for k, ig in enumerate(instance_groups):\r\n #could be wrong - the example amazon gives uses InstanceRequestCount,\r\n #while the api documentation says InstanceCount\r\n params['InstanceGroups.member.%d.InstanceGroupId' % (k+1) ] = ig[0]\r\n params['InstanceGroups.member.%d.InstanceCount' % (k+1) ] = ig[1]\r\n\r\n return self.get_object('ModifyInstanceGroups', params, ModifyInstanceGroupsResponse, verb='POST')", "def batch_size(self) -> int:\n ...", "def set_grids(self, core_size, patch_shape, psf_model_shape):\n # core foo\n ravel_size = patch_shape[0] * patch_shape[1]\n self.core_shape = (core_size, core_size)\n xcenter = (patch_shape[0] - 1) / 2\n ycenter = (patch_shape[1] - 1) / 2\n buff = (core_size - 1) / 2\n xcore = xcenter - buff, xcenter + buff + 1\n ycore = ycenter - buff, ycenter + buff + 1\n core_ind = np.arange(ravel_size, dtype=np.int).reshape(patch_shape)\n self.core_ind = core_ind[xcore[0]:xcore[1], ycore[0]:ycore[1]].ravel()\n\n # grid defs\n self.psf_grid, self.patch_grid = get_grids(patch_shape, psf_model_shape)", "def update(self, output_projections, hiddens, contexts):\n\n # (batch, beam_width, num_classes)\n _, _, num_classes = output_projections.size()\n\n # (batch, beam_width * num_classes, 1)\n class_probabilities = F.softmax(output_projections, dim=-1)\n class_probabilities = torch.log(class_probabilities.view(self._batch_size, -1, 1))\n\n if len(self.sequences.size()) == 2:\n self.sequences = self.sequences.unsqueeze(2) # (batch_size, beam_width, 1)\n\n # each return is (batch, beam_width, 1)\n # class_probabilities = (batch, beam_width * num_classes, 1)\n\n # prob_cur, indices = torch.topk(class_probabilities, self._beam_width, dim=1)\n update_indices, update_log_prob = self.fold(class_probabilities, num_classes)\n\n nth_beam = update_indices / num_classes # used for selecting the hidden state and context vector\n update_seq = update_indices % num_classes\n\n assert self.sequences.size(0) == update_seq.size(0) and self.sequences.size(1) == update_seq.size(1)\n print(\"self.sequences size (before update): \" + str(self.sequences.size()))\n self.sequences = torch.cat([self.sequences, update_seq], 2)\n print(\"self.sequences size: \" + str(self.sequences.size()))\n assert self.seq_log_prob.size() == update_seq.size()\n self.seq_log_prob += update_log_prob\n\n # update the next hidden states and the context vector\n self.hiddens = self.batched_index_select(hiddens, nth_beam.data)\n self.contexts = self.batched_index_select(contexts, nth_beam.data)", "def set_batch_size(self, batch_size):\n final_sz = self.full_dataset_size % batch_size\n if not self.final_batch:\n self.dataset_size = self.full_dataset_size - final_sz\n self.enqueuer.set_num_elements(self.dataset_size)\n self.batch_size = batch_size", "def grow(self, batch_ids, **combo_runner_opts):\n if isinstance(batch_ids, int):\n batch_ids = (batch_ids,)\n\n combo_runner_core(\n grow,\n combos=((\"batch_number\", batch_ids),),\n constants={\"verbosity\": 0, \"crop\": self},\n **combo_runner_opts,\n )", "def update_nodes(self):\n raise NotImplementedError('ERROR: sweeper has to implement update_nodes(self)')", "def update_target_groups():\n\n # detect which region the explorer(s) are located\n for j in range(NUM_OF_SHARDS):\n key_explorer = \"explorers_\" + str(j)\n array_instance_ip = parse_network_config(key_explorer)\n array_instance_id = retrieve_instance_id(array_instance_ip)\n\n reg = retrieve_instance_region(array_instance_ip[0])\n # all nodes registered for the same endpoints should be located in the same region, if not, exit\n verify_nodes_same_region(reg, array_instance_ip)\n\n elbv2_client = boto3.client('elbv2', region_name=reg)\n\n array_target_group = create_name_target_group(j, ID_DOMAIN_NAME)\n pp.pprint(array_target_group)\n\n # 1/3 - retrieve target group arn\n print(\"==== retrieve target group arn\")\n dict_tg_arn = dict()\n for tg in array_target_group:\n resp = elbv2_client.describe_target_groups(Names=[tg])\n tg_arn = resp[\"TargetGroups\"][0][\"TargetGroupArn\"]\n dict_tg_arn[tg] = tg_arn\n pp.pprint(dict_tg_arn)\n\n # 2/3 - find all the instances\n print(\"==== find all the instances current registered\")\n dict_tg_instanceid = defaultdict(list)\n for tg in array_target_group:\n resp = elbv2_client.describe_target_health(TargetGroupArn=dict_tg_arn[tg])\n num_of_targets = len(resp[\"TargetHealthDescriptions\"])\n for k in range(num_of_targets):\n instance_id = resp[\"TargetHealthDescriptions\"][k][\"Target\"][\"Id\"]\n dict_tg_instanceid[tg].append(instance_id)\n pp.pprint(dict_tg_instanceid)\n\n # 3/3 - deregister all instances, then we can have a clean and nice target group\n print(\"==== deregister all instances\")\n for tg in array_target_group:\n for instance_id in dict_tg_instanceid[tg]:\n try:\n resp = elbv2_client.deregister_targets(TargetGroupArn=dict_tg_arn[tg],\n Targets=[{'Id': instance_id}])\n except Exception as e:\n print(\"Unexpected error to deregister the instance: %s\" % e)\n\n # 3/3 - register instances into the tg\n print(\"==== register all instances\")\n # outer for loop: loop through 2 tg, https and wss\n # inner loop: add every single instance id into each tg\n for tg in array_target_group:\n for instance in array_instance_id:\n response = elbv2_client.register_targets(\n TargetGroupArn=dict_tg_arn[tg],\n Targets=[{'Id': instance, }, ]\n )", "def create_grids_structure(self):\n for indices, hypercube in np.ndenumerate(self.hypercubes):\n self.hypercubes[indices] = Hypercube(coords=indices)" ]
[ "0.7303846", "0.6537551", "0.64356464", "0.5361293", "0.52692604", "0.52692604", "0.5262458", "0.523727", "0.5224487", "0.5210136", "0.5149948", "0.5143078", "0.5071514", "0.50149363", "0.5011419", "0.49959013", "0.49740377", "0.48971123", "0.48322254", "0.48213798", "0.47948405", "0.4770699", "0.4752018", "0.47495633", "0.4722465", "0.47167626", "0.46872178", "0.46771047", "0.46766132", "0.46558616" ]
0.8305461
0
Computes coordinates of 2dims parents' Hypercubes for every Hypercube in the grid. Each coarser Hypercube consists of 2dims finer level Hypercubes.
def set_hypercubes_parents_indices(self): for hypercube in self.hypercubes.flatten(): coordinates = [] for coord in hypercube.coords: coordinates.append([2 * coord, 2 * coord + 1]) for indices in list(itertools.product(*coordinates)): hypercube.parent_hypercubes_indices.append(tuple(indices))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute_centers_of_hypercubes(self):\n for hypercube in self.hypercubes.flatten():\n sums = np.zeros((len(hypercube.coords)))\n for coords in hypercube.parent_hypercubes_indices:\n for index, summ in enumerate(sums):\n sums[index] += self.parent_hypercubes[coords].center[index]\n hypercube.center = [x / 4 for x in sums]", "def compute_centers_of_hypercubes(self):\n for hc in self.hypercubes.flatten():\n for i in range(self.dims - 1, -1, -1):\n index = self.dims - (i + 1)\n hc.center[i] = (hc.coords[index] + 0.5) * self.hypercube_measurements[index]", "def create_grids_structure(self):\n for indices, hypercube in np.ndenumerate(self.hypercubes):\n self.hypercubes[indices] = Hypercube(coords=indices)", "def get_neighbours(self):\n shape=self.cubeshape[1:]\n neighboursx=np.arange(self.xpos-(self.blocksize-1)/2,(self.xpos+(self.blocksize-1)/2)+1,dtype='int' )\n neighboursx=[x if (x>=0) & (x<=shape[1]-1) else np.nan for x in neighboursx ]\n neighboursy=np.arange(self.ypos-(self.blocksize-1)/2,(self.ypos+(self.blocksize-1)/2)+1,dtype='int' )\n neighboursy=[y if (y>=0) & (y<=shape[0]-1) else np.nan for y in neighboursy ]\n keys=[np.ravel_multi_index([y,x], shape) if np.all(np.isfinite(np.asarray([y,x]))) else np.nan for y in neighboursy for x in neighboursx]\n\n return keys", "def cell_edges3d_cartesian(self, axis2, axis3):", "def set_hypercubes_classes(self):\n print(\"Setting the Hypercubes' classes of grid at level: \" + str(self.level))\n for hypercube in self.hypercubes.flatten():\n coordinates = []\n for coord in hypercube.coords:\n coordinates.append([2 * coord, 2 * coord + 1])\n parents_list = []\n for indices in list(itertools.product(*coordinates)):\n parents_list.append(self.parent_hypercubes[tuple(reversed(indices))])\n hypercube.set_lower_level_hypercube_class(parent_hypercubes=parents_list, threshold=self.threshold)\n if self.child_grid:\n self.child_grid.set_hypercubes_classes()", "def get_grid_locations(self, top_left, other_pos):\n cell_x = torch.floor(((other_pos[:, 0] - top_left[:, 0]) / self.neighborhood_size) *self.grid_size)\n\n # Added this part to implementation, otherwise the pooling is going to run into an indexing error\n cell_x[cell_x == self.grid_size] -= 1\n cell_y = torch.floor(((top_left[:, 1] - other_pos[:, 1]) / self.neighborhood_size) *self.grid_size)\n cell_y[cell_y == self.grid_size] -= 1\n grid_pos = cell_x + cell_y * self.grid_size\n\n return grid_pos", "def cell_edges2d_cartesian(self, axis2):", "def sub_grid(unique_x, xpos, ypos, zpos, axvel, ayvel, azvel):\n\n n = sgs\n ssgh_array = []\n\n i = 0\n while i + n + ((n - 1) * unique_x) < len(xpos):\n # Makes sure that subgrid can be fromed\n sxp = 0\n syp = 0\n szp = 0\n sxv = 0\n syv = 0\n szv = 0\n\n for j in range(n):\n for k in range(n):\n sxp += xpos[i + j + (k * unique_x)]\n syp += ypos[i + j + (k * unique_x)]\n szp += zpos\n sxv += axvel[i + j + (k * unique_x)]\n syv += ayvel[i + j + (k * unique_x)]\n szv += azvel[i + j + (k * unique_x)]\n ssgh_array.append([sxp, syp, szp, sxv, syv, szv])\n\n if (i + n) < len(xpos):\n i += n\n else:\n pl = unique_x - (i % unique_x)\n i += pl + ((n - 1) * unique_x)\n\n return np.array(ssgh_array) / sqr(n)", "def calc_grid(self):\n return int(self._posn.x / cell_size), int(self._posn.y / cell_size)", "def test_parent_with_iterables(self):\n def makeCubesAndGrp():\n cmds.file(new=1, f=1)\n cubes = []\n for x in range(10):\n cubes.append(pm.polyCube()[0])\n group = pm.group(empty=True)\n return cubes, group\n\n cubes, group = makeCubesAndGrp()\n res1 = pm.parent(cubes[:4], group)\n self.assertEqual(cubes[:4], res1)\n res2 = pm.parent(cubes, group)\n self.assertEqual(cubes[4:], res2)\n\n cubes, group = makeCubesAndGrp()\n res1 = pm.parent(cubes[:4] + [group])\n self.assertEqual(cubes[:4], res1)\n res2 = pm.parent(cubes, group)\n self.assertEqual(cubes[4:], res2)\n\n cubes, group = makeCubesAndGrp()\n res1 = pm.parent(cubes[0], cubes[1], cubes[2], cubes[3], group)\n self.assertEqual(cubes[:4], res1)\n res2 = pm.parent(cubes, group)\n self.assertEqual(cubes[4:], res2)\n\n cubes, group = makeCubesAndGrp()\n res1 = pm.parent(cubes[0], cubes[1], [cubes[2], cubes[3], group])\n self.assertEqual(cubes[:4], res1)\n res2 = pm.parent(cubes, group)\n self.assertEqual(cubes[4:], res2)\n\n cubes, group = makeCubesAndGrp()\n res1 = pm.parent([cubes[0], cubes[1]], cubes[2], [cubes[3], group])\n self.assertEqual(cubes[:4], res1)\n res2 = pm.parent(cubes, group)\n self.assertEqual(cubes[4:], res2)", "def generate_all_locations(grid, shape):", "def cell_edges(self):", "def _buildGridPoints(self):\n self.spacings = []\n for level in xrange(self.depth):\n levelSpacings = []\n refLevel = level + 1\n level = 2**level\n axisData = []\n for axis in self.size:\n spacing = axis / (level+1)\n levelSpacings.append(spacing)\n axisData.append([gridValue*spacing for gridValue in xrange(1, level+1)])\n pointList = [((i, j, k), np.array([axisData[0][i], axisData[1][j], axisData[2][k]]))\n for i in xrange(level)\n for j in xrange(level)\n for k in xrange(level)]\n self.grid[refLevel] = {point[0]: point[1] for point in pointList}\n self.spacings.append(levelSpacings)", "def ij_coordinates(self):\n\n x = np.arange(self.nx)\n y = np.arange(self.ny)\n return np.meshgrid(x, y)", "def _calcOrderedCellVertexIDs(self):\n ids = numerix.zeros((8, self.nx, self.ny, self.nz), 'l')\n indices = numerix.indices((self.nx, self.ny, self.nz))\n ids[1] = indices[0] + (indices[1] + (indices[2] + 1) * (self.ny + 1) + 1) * (self.nx + 1)\n ids[0] = ids[1] + 1\n ids[3] = indices[0] + (indices[1] + (indices[2] + 1) * (self.ny + 1)) * (self.nx + 1)\n ids[2] = ids[3] + 1\n ids[5] = indices[0] + (indices[1] + indices[2] * (self.ny + 1) + 1) * (self.nx + 1)\n ids[4] = ids[5] + 1\n ids[7] = indices[0] + (indices[1] + indices[2] * (self.ny + 1)) * (self.nx + 1)\n ids[6] = ids[7] + 1\n\n return numerix.reshape(ids.swapaxes(1, 3), (8, self.numberOfCells))", "def get_base_coords(self):\n num_bases = len(self.tour)\n base_coords = np.zeros((num_bases,3), dtype=float)\n for i,base in enumerate(self.tour):\n helix_num = base.h\n helix_pos = base.p\n helix = self.helix_list[helix_num]\n base_coords[i] = base.coordinates\n return base_coords", "def grid_coords(self):\n return [(x, y) for y in range(self.height) for x in range(self.width)]", "def co_vertexes(self):\n theta = self.orientation + np.pi / 2\n shifts = np.array([np.cos(theta), np.sin(theta)]) * self.b\n return self.coords + (shifts[:, None] * [-1, 1]).T", "def cart_coords2d(self):\r\n\r\n mgx, mgy = self.meshup2d()\r\n coords = np.column_stack((mgx.flatten(), mgy.flatten()))\r\n\r\n return coords", "def _cal_grid_coordinates(self, nc_handle):\n print(\"calculating grid coordinates\")\n #\n x = np.zeros(self._grid[\"counts\"][0], dtype=float)\n y = np.zeros(self._grid[\"counts\"][1], dtype=float)\n z = np.zeros(self._grid[\"counts\"][2], dtype=float)\n \n for i in range(self._grid[\"counts\"][0]):\n x[i] = self._grid[\"origin\"][0] + i*self._grid[\"d0\"][0]\n\n for j in range(self._grid[\"counts\"][1]):\n y[j] = self._grid[\"origin\"][1] + j*self._grid[\"d1\"][1]\n\n for k in range(self._grid[\"counts\"][2]):\n z[k] = self._grid[\"origin\"][2] + k*self._grid[\"d2\"][2]\n\n self._set_grid_key_value(\"x\", x)\n self._set_grid_key_value(\"y\", y)\n self._set_grid_key_value(\"z\", z)\n\n for key in [\"x\", \"y\", \"z\"]:\n self._write_to_nc(nc_handle, key, self._grid[key])\n return None", "def find_edges(self):\n self.edges = [deepcopy(self.grid[0]), [], deepcopy(self.grid[-1]), []]\n for g in self.grid:\n self.edges[3].append(g[0])\n self.edges[1].append(g[-1])\n self.edges[2]\n self.edges[3]", "def _get_grid_cell_indexes(proj, xs, ys, bounding_box):\n # Unpack values from the projection\n eq_rad = proj.semi_major_axis\n polar_rad = proj.semi_minor_axis\n h = proj.perspective_point_height + eq_rad\n lon0 = proj.longitude_of_projection_origin\n \n # Unpack values from the area we want to grab the data\n min_lat, min_lon = bounding_box.sw_corner()\n max_lat, max_lon = bounding_box.ne_corner()\n \n with np.errstate(invalid='ignore'):\n # Calculate the lat and lon grids\n xs, ys = np.meshgrid(xs, ys)\n a_vals = np.power(np.sin(xs), 2.0) + \\\n np.power(np.cos(xs), 2.0) * (np.power(np.cos(ys), 2.0) + \\\n eq_rad * eq_rad / polar_rad / polar_rad * np.power(np.sin(ys), 2.0))\n b_vals = -2 * h * np.cos(xs) * np.cos(ys)\n c_val = h * h - eq_rad * eq_rad\n \n rs = (-b_vals - np.sqrt(np.power(b_vals, 2.0) - 4 * a_vals * c_val)) / (2 * a_vals)\n \n sx = rs * np.cos(xs) * np.cos(ys)\n sy = -rs * np.sin(xs)\n sz = rs * np.cos(xs) * np.sin(ys)\n \n lats = np.arctan((eq_rad *eq_rad * sz) \\\n / (polar_rad * polar_rad * np.sqrt(np.power(h - sx, 2.0) + np.power(sy, 2.0))))\n lats = np.degrees(lats)\n \n lons = np.radians(lon0) - np.arctan(sy / (h - sx))\n lons = np.degrees(lons)\n \n # Flatten the arrays so we get a 1D list of indexes\n lats = lats.flatten()\n lons = lons.flatten()\n \n # Filter out values not in our bounding box\n lats = np.where(np.logical_and(lats >= min_lat, lats <= max_lat))[0]\n lons = np.where(np.logical_and(lons >= min_lon, lons <= max_lon))[0]\n idxs = list(set(lons).intersection(set(lats)))\n \n return idxs", "def build_sites(self):\n for i in range(self.shape[0]):\n for j in range(self.shape[1]):\n for k in range(self.shape[2]):\n for s,site in enumerate(self.cell.sites):\n newsite = copy.deepcopy(site)\n coordinate = self.cell.a1*i+\\\n self.cell.a2*j+\\\n self.cell.a3*k\n newsite.coordinate += coordinate\n self.sites[i,j,k,s] = newsite", "def cube_vertices(x, y, z, n):\n #def cube_vertices(self):\n # \"\"\" Return the vertices of the cube at position x, y, z with size 2*n.\n #\n # \"\"\"\n # return [\n # x-n,y+n,z-n, x-n,y+n,z+n, x+n,y+n,z+n, x+n,y+n,z-n, # top\n # x-n,y-n,z-n, x+n,y-n,z-n, x+n,y-n,z+n, x-n,y-n,z+n, # bottom\n # x-n,y-n,z-n, x-n,y-n,z+n, x-n,y+n,z+n, x-n,y+n,z-n, # left\n # x+n,y-n,z+n, x+n,y-n,z-n, x+n,y+n,z-n, x+n,y+n,z+n, # right\n # x-n,y-n,z+n, x+n,y-n,z+n, x+n,y+n,z+n, x-n,y+n,z+n, # front\n # x+n,y-n,z-n, x-n,y-n,z-n, x-n,y+n,z-n, x+n,y+n,z-n, # back\n # ]\n return [\n x-n,y+n,z-n, x-n,y+n,z+n, x+n,y+n,z+n, x+n,y+n,z-n, # top\n x-n,y-n,z-n, x+n,y-n,z-n, x+n,y-n,z+n, x-n,y-n,z+n, # bottom\n x-n,y-n,z-n, x-n,y-n,z+n, x-n,y+n,z+n, x-n,y+n,z-n, # left\n x+n,y-n,z+n, x+n,y-n,z-n, x+n,y+n,z-n, x+n,y+n,z+n, # right\n x-n,y-n,z+n, x+n,y-n,z+n, x+n,y+n,z+n, x-n,y+n,z+n, # front\n x+n,y-n,z-n, x-n,y-n,z-n, x-n,y+n,z-n, x+n,y+n,z-n, # back\n ]", "def test_1_2_2D_cube_splits(self):\n check = [(0, 0), (1, 1), (1, 0), (0, 1), (0.5, 0.5), (0.0, 0.5),\n (0.5, 0.0),\n (0.25, 0.25), (1.0, 0.5), (0.5, 1.0), (0.75, 0.75),\n (0.75, 0.25),\n (0.25, 0.75), (0.5, 0.25), (0.25, 0.5), (0.375, 0.375),\n (0.0, 0.25),\n (0.25, 0.0), (0.125, 0.125), (0.125, 0.375), (0.375, 0.125),\n (0.5, 0.75), (0.75, 0.5), (0.625, 0.625), (1.0, 0.75),\n (0.75, 1.0),\n (0.875, 0.875), (0.875, 0.625), (0.625, 0.875), (0.625, 0.375),\n (1.0, 0.25), (0.75, 0.0), (0.875, 0.125), (0.875, 0.375),\n (0.625, 0.125), (0.375, 0.625), (0.0, 0.75), (0.25, 1.0),\n (0.125, 0.875), (0.125, 0.625), (0.375, 0.875)]\n\n nn_checks = {(0, 0): [(0.25, 0.0), (0.0, 0.25), (0.125, 0.125)],\n (0.625, 0.375): [(0.5, 0.5), (0.75, 0.25), (0.75, 0.5),\n (0.5, 0.25)],\n (0, 1): [(0.25, 1.0), (0.125, 0.875),(0.0, 0.75)],\n (0.625, 0.125): [(0.5, 0.0), (0.75, 0.25), (0.75, 0.0),\n (0.5, 0.25)]}\n\n\n init_triangulation(2, 2, check, nn_checks)", "def getSubdivisionNodes(self, signature):\n x, y, z = signature[0], signature[1], signature[2]\n return [(2*x+1, 2*y, 2*z), (2*x, 2*y, 2*z),\n (2*x+1, 2*y+1, 2*z), (2*x, 2*y, 2*z+1),\n (2*x+1, 2*y+1, 2*z+1), (2*x, 2*y+1, 2*z),\n (2*x+1, 2*y, 2*z+1), (2*x, 2*y+1, 2*z+1)]", "def get_cells(self):\r\n return \\\r\n (self.nx-1 if self.nx>1 else 1)* \\\r\n (self.ny-1 if self.ny>1 else 1)* \\\r\n (self.nz-1 if self.nz>1 else 1)", "def set_hypercubes_classes(self):\n print(\"Setting the BaseGrid hypercubes' classes.\")\n list_of_all_hc = list(self.hypercubes.flatten())\n print(\"Number of hypercubes: \" + str(len(list_of_all_hc)))\n for hypercube in list_of_all_hc:\n hypercube.set_hypercube_class()\n if self.child_grid:\n self.child_grid.set_hypercubes_classes()", "def cart_coords(self):\r\n\r\n mgx, mgy, mgz = self.meshup()\r\n coords = np.column_stack((np.ravel(mgx, order='C'), np.ravel(mgy, order='C'), np.ravel(mgz, order='C')))\r\n\r\n return coords" ]
[ "0.68306977", "0.6475379", "0.62905055", "0.58867306", "0.5867285", "0.5828118", "0.56448174", "0.5489253", "0.5457213", "0.54442", "0.54378206", "0.54122716", "0.5392055", "0.5390636", "0.5355757", "0.5286187", "0.5280051", "0.5264198", "0.5259125", "0.5256561", "0.52493584", "0.5245283", "0.5233753", "0.521964", "0.52196205", "0.5200668", "0.52003723", "0.51856107", "0.5176147", "0.5160404" ]
0.7526032
0
Sets classes for all hypercubes in self and in it's child (another LowerLevelGrid) if it exists.
def set_hypercubes_classes(self): print("Setting the Hypercubes' classes of grid at level: " + str(self.level)) for hypercube in self.hypercubes.flatten(): coordinates = [] for coord in hypercube.coords: coordinates.append([2 * coord, 2 * coord + 1]) parents_list = [] for indices in list(itertools.product(*coordinates)): parents_list.append(self.parent_hypercubes[tuple(reversed(indices))]) hypercube.set_lower_level_hypercube_class(parent_hypercubes=parents_list, threshold=self.threshold) if self.child_grid: self.child_grid.set_hypercubes_classes()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_hypercubes_classes(self):\n print(\"Setting the BaseGrid hypercubes' classes.\")\n list_of_all_hc = list(self.hypercubes.flatten())\n print(\"Number of hypercubes: \" + str(len(list_of_all_hc)))\n for hypercube in list_of_all_hc:\n hypercube.set_hypercube_class()\n if self.child_grid:\n self.child_grid.set_hypercubes_classes()", "def update(self, example, hypercubes_coords):\n hypercubes_coords = tuple([int(x / 2) for x in hypercubes_coords])\n new_class = self.hypercubes[hypercubes_coords].update_lower_level(example_class=example.class_id, example_count=1, threshold=self.threshold)\n print(\"Update. Changed class of \" + str(hypercubes_coords) + \" to: \" + str(new_class))\n if self.child_grid:\n self.child_grid.update(example=example, hypercubes_coords=hypercubes_coords)", "def update(self, example, hypercubes_coords=None):\n hypercubes_coords = tuple(\n [int(example.coords[i] / self.hypercube_measurements[i]) for i in range(self.dims - 1, -1, -1)])\n new_class = self.hypercubes[hypercubes_coords].update_basic(example_list=[example])\n print(\"Update. Changed class of \" + str(hypercubes_coords) + \" to: \" + str(new_class))\n if self.child_grid:\n self.child_grid.update(example=example, hypercubes_coords=hypercubes_coords)", "def batch_update(self, parents_classes_dict):\n print(\"Updating the LowerLevelGrid with a batch of examples\")\n for (class_id, coords), examples in parents_classes_dict.items():\n coords = tuple([int(x / 2) for x in coords])\n self.hypercubes[coords].update_lower_level(example_class=class_id, example_count=len(examples), threshold=self.threshold)\n if self.child_grid:\n self.child_grid.batch_update(parents_classes_dict=parents_classes_dict)", "def create_grids_structure(self):\n for indices, hypercube in np.ndenumerate(self.hypercubes):\n self.hypercubes[indices] = Hypercube(coords=indices)", "def __init__(self, initial_grid):\n part_1.Grid.__init__(self, initial_grid)\n self.turn_on_corners()", "def setup_class(self):\n\n class SubFLRW(FLRW):\n def w(self, z):\n return super().w(z)\n\n self.cls = SubFLRW\n # H0, Om0, Ode0\n self.cls_args = (70 * u.km / u.s / u.Mpc, 0.27 * u.one, 0.689 * u.one)\n self.cls_kwargs = dict(Tcmb0=3.0 * u.K, name=self.__class__.__name__, meta={\"a\": \"b\"})", "def initialize_grid(self) -> None:\n for i in range(self.grid_size[0]):\n for j in range(self.grid_size[1]):\n self.set(i, j, self.base_color)", "def setup_class(self):\n class SubCosmology(Cosmology):\n\n H0 = Parameter(unit=u.km / u.s / u.Mpc)\n Tcmb0 = Parameter(unit=u.K)\n\n def __init__(self, H0, Tcmb0=0*u.K, name=None, meta=None):\n super().__init__(name=name, meta=meta)\n self._H0 = H0\n self._Tcmb0 = Tcmb0\n\n self.cls = SubCosmology\n self.cls_args = (70 * (u.km / u.s / u.Mpc), 2.7 * u.K)\n self.cls_kwargs = dict(name=self.__class__.__name__, meta={\"a\": \"b\"})", "def process_class(self, parent, cls):\n if cls.typemap.flat_name in self.class_map:\n raise RuntimeError(\"process_class: class {} already exists in class_map\"\n .format(cls.typemap.flat_name))\n self.class_map[cls.typemap.flat_name] = cls\n for var in cls.variables:\n self.add_var_getter_setter(parent, cls, var)\n cls.functions = self.define_function_suffix(cls.functions)", "def set_grid(self,ug):\n self.grd=ug\n self.set_topology()", "def update_class(self):\n neighbors_set = list(set(self.neighbors))\n counts = np.array([self.neighbors.count(n) for n in neighbors_set])\n probs = (counts / counts.sum()) * (1-self.mutation_prob)\n probs = np.append(probs, self.mutation_prob)\n neighbors_set.append(np.random.choice(np.arange(1, self.board.classes))) \n\n self.cell_class = np.random.choice(neighbors_set, p=probs)", "def __init__(self):\n\n for layer in self._layer_class_map:\n setattr(self, layer, self._layer_class_map[layer]())", "def _absorb_classes(self, ground_to='in'):\n # Find number of new class for inside and outside set\n currnt_classes = np.unique(self.classes)\n new_class_in = currnt_classes.max() + 1\n new_class_out = currnt_classes.max() + 2\n \n # Ground node to new_class\n if ground_to == 'in':\n self.classes[self.ground_node] = new_class_in\n elif ground_to == 'out':\n self.classes[self.ground_node] = new_class_out\n else:\n self.classes[self.ground_node] = -1\n \n cond_out = self.x[:self.ground_node] > self.threshold\n self.classes[:self.ground_node][cond_out] = new_class_out\n cond_out = self.x[self.ground_node:] > self.threshold\n self.classes[self.ground_node+1:][cond_out] = new_class_out\n \n cond_in = self.x[self.ground_node:] <= self.threshold\n self.classes[self.ground_node+1:][cond_in] = new_class_in\n cond_in = self.x[:self.ground_node] <= self.threshold\n self.classes[:self.ground_node][cond_in] = new_class_in", "def set_hypercubes_parents_indices(self):\n for hypercube in self.hypercubes.flatten():\n coordinates = []\n for coord in hypercube.coords:\n coordinates.append([2 * coord, 2 * coord + 1])\n for indices in list(itertools.product(*coordinates)):\n hypercube.parent_hypercubes_indices.append(tuple(indices))", "def set_config_layers_by_class(self, class_name, **items):\n for layer in self._layers:\n if layer.__class__.__name__.lower().startswith(class_name.lower()):\n self.set_config_layer(layer.name, **items)", "def set_sclasses(self, w: Wrapper, classes: Any) -> None:\n w.setProperty(self.style_sclass_property, f\" {' '.join(set(classes))} \")", "def customise_grid(self):\n\n # Agent colours, agent tooltips and grid initialisation\n mapper = linear_cmap(field_name='category',\n palette=['blue', 'red', 'green', 'orange', 'purple'] ,\n low=0,\n high=4)\n TOOLTIPS = [(\"Residential utility\", \"@res_utility\"),\n ('Local composition', '@local_comp'),\n ('Neighbourhood composition', '@n_comp'),\n (\"School utility\", \"@school_utility\"),\n ('Distance', '@dist_school'),\n ('School composition', '@s_comp'),\n ('School composition utility', '@school_comp_utility')]\n hover = HoverTool(names=[\"households\", \"schools\"], tooltips=TOOLTIPS)\n self.grid = figure(x_range=(self.model.grid.x_min - 1,\n self.model.grid.x_max),\n y_range=(self.model.grid.y_min - 1,\n self.model.grid.y_max),\n tools=[hover, 'tap', 'pan',\n WheelZoomTool()],\n tooltips=TOOLTIPS, output_backend=\"webgl\")\n\n # Set WheelZoomTool active by default if not lattice\n if self.params['case'].lower() != 'lattice':\n self.grid.toolbar.active_scroll = self.grid.select_one(\n WheelZoomTool)\n\n # Add a background map using OpenStreetMap (Google Maps is too\n # computationally expensive and cannot zoom properly)\n self.grid.add_tile(get_provider('OSM'))\n\n self.grid.axis.visible = False\n self.grid.grid.visible = False\n # Function to highlight all households that are currently enrolled in\n # the same school.\n self.source.selected.on_change(\"indices\", self.select_households)\n\n # Plot households\n self.grid.circle(x=\"x\",\n y=\"y\",\n size=5,\n view=self.household_view,\n source=self.source,\n fill_color=mapper,\n line_color='black',\n alpha=0.8,\n nonselection_fill_alpha=0.2,\n selection_fill_alpha=1,\n name='households')\n\n # Plot schools\n self.grid.circle(x=\"x\",\n y=\"y\",\n size=7,\n source=self.source,\n view=self.school_view,\n fill_color='yellow',\n line_color='black',\n name='schools')\n\n # Plot neighbourhoods\n self.grid.patches('x',\n 'y',\n source=self.source,\n view=self.neighbourhood_view,\n fill_color=None,\n line_color=\"black\",\n line_width=2,\n hover_alpha=0,\n hover_line_color=None,\n name='neighbourhoods',\n selection_fill_alpha=0)", "def __init__(self, grid_cols: int = 8, **attrs: Any) -> None:\n\n super().__init__(**attrs)\n\n self.grid_cols = grid_cols\n self.forced_width = self.grid_cols * 4 - 1 + self.sidelength\n self.width = self.forced_width\n\n self._layer_functions = [foreground, background]\n\n self.layer = 0", "def __init__(self, num_classes, **kwargs):\n super(DeepLook, self).__init__()\n\n # Make config\n kwargs['num_classes'] = num_classes\n self.configs = make_configs(**kwargs)\n\n # Build submodules\n self.build_modules()", "def __init__(self, grid_size=7, num_bboxes=2, num_classes=20):\r\n super(Loss, self).__init__()\r\n self.S = grid_size\r\n self.B = num_bboxes\r\n self.C = num_classes", "def build_etl_classes(self):\n\n self.clear_etl_classes()\n\n for config in list(self.configs.values()):\n\n etl_class = self.build(config)\n\n self.add_etl_class(etl_class)", "def child_classes(self):\n response = check_defined(self, inspect.stack()[0][3])\n if not response:\n return response\n children = self.se.full_class_only_graph.successors(self.uri)\n result = restructure_output(self,\n children,\n inspect.stack()[0][3],\n self.output_type)\n return result", "def batch_update(self, examples):\n print(\"Updating the BaseGrid with a batch of examples\")\n examples_grouping_dict = {}\n for example in examples:\n hypercubes_coords = tuple(\n [int(example.coords[i] / self.hypercube_measurements[i]) for i in range(self.dims - 1, -1, -1)])\n examples_grouping_dict[(example.class_id, hypercubes_coords)] = examples_grouping_dict.get((example.class_id, hypercubes_coords), [])\n examples_grouping_dict[(example.class_id, hypercubes_coords)].append(example)\n for (class_id, hypercubes_coords), example_list in examples_grouping_dict.items():\n self.hypercubes[hypercubes_coords].update_basic(example_list=example_list)\n if self.child_grid:\n self.child_grid.batch_update(examples=examples_grouping_dict)", "def create_lower_level_grid(self):\n if self.level == 1:\n return False\n else:\n return LowerLevelGrid(level=self.level - 1, parent_hypercubes_number=self.hypercubes_number, parent_hypercubes=self.hypercubes, dims=self.dims)", "def register_classes():\n CoaddSplit.register_class()\n CoaddSplit_SG.register_class()", "def init_other_vars(self):\n self.endIndexOfClassConditionalProbability = self.numOfGridsIn1D \\\n * self.numOfGridsIn1D * self.numOfClasses\n self.endIndexOfObjectProbability \\\n = self.endIndexOfClassConditionalProbability \\\n + self.numOfGridsIn1D*self.numOfGridsIn1D*self.numOfBoxesPerGrid\n # Class Conditional Probability: P(class | object),\n self.classConditionalProbability = np.zeros([\n self.numOfGridsIn1D, self.numOfGridsIn1D, self.numOfClasses\n ])\n # P(object): Object probability, i.e. the probability of an\n self.objectProbability = np.zeros([\n self.numOfGridsIn1D, self.numOfGridsIn1D, self.numOfBoxesPerGrid\n ])\n # Box data (x, y, w, h)\n self.boxData = np.zeros([\n self.numOfGridsIn1D, self.numOfGridsIn1D, self.numOfBoxesPerGrid, 4\n ])\n # Offset to add to x and y values to convert from within-grid\n # coordinates to image coordinates\n self.offsetY = np.tile(\n np.arange(self.numOfGridsIn1D)[:, np.newaxis, np.newaxis],\n (1, self.numOfGridsIn1D, self.numOfBoxesPerGrid)\n )\n self.offsetX = np.transpose(self.offsetY, (1, 0, 2))\n # Most probable classes per grid\n self.maxProbableClasses = np.zeros([\n self.numOfGridsIn1D, self.numOfGridsIn1D, self.numOfBoxesPerGrid\n ])\n # Probabilities of most probable classes per grid\n self.maxProbableClassProbabilities = np.zeros([\n self.numOfGridsIn1D, self.numOfGridsIn1D, self.numOfBoxesPerGrid\n ])\n # The probability of an object present, and it being each class\n self.objectClassProbability = np.zeros([\n self.numOfGridsIn1D, self.numOfGridsIn1D, self.numOfBoxesPerGrid,\n self.numOfClasses\n ])", "def __init__(self, class_graph: class_dependency.JavaClassDependencyGraph):\n super().__init__()\n\n # Create list of all targets using class nodes\n # so we don't miss targets with no dependencies (edges).\n for class_node in class_graph.nodes:\n if len(class_node.build_targets) > _MAX_CONCURRENT_BUILD_TARGETS:\n continue\n for build_target in class_node.build_targets:\n self.add_node_if_new(build_target)\n\n for begin_class, end_class in class_graph.edges:\n if len(begin_class.build_targets) > _MAX_CONCURRENT_BUILD_TARGETS:\n continue\n if len(end_class.build_targets) > _MAX_CONCURRENT_BUILD_TARGETS:\n continue\n for begin_target in begin_class.build_targets:\n for end_target in end_class.build_targets:\n # Avoid intra-target deps.\n if begin_target == end_target:\n continue\n\n self.add_edge_if_new(begin_target, end_target)\n\n begin_target_node = self.get_node_by_key(begin_target)\n end_target_node = self.get_node_by_key(end_target)\n assert begin_target_node is not None\n assert end_target_node is not None\n begin_target_node.add_class(begin_class)\n end_target_node.add_class(end_class)\n begin_target_node.add_class_dependency_edge(\n end_target_node, begin_class, end_class)", "def _add_class_assignments(self, rawdata):\n gt_overlaps = rawdata['gt_overlaps'].toarray()\n # max overlap with gt over classes (columns)\n max_overlaps = gt_overlaps.max(axis=1)\n # gt class that had the max overlap\n max_classes = gt_overlaps.argmax(axis=1)\n rawdata['max_classes'] = max_classes\n rawdata['max_overlaps'] = max_overlaps\n # sanity checks\n # if max overlap is 0, the class must be background (class 0)\n zero_inds = np.where(max_overlaps == 0)[0]\n assert all(max_classes[zero_inds] == 0)\n # if max overlap > 0, the class must be a fg class (not class 0)\n nonzero_inds = np.where(max_overlaps > 0)[0]\n assert all(max_classes[nonzero_inds] != 0)", "def _add_class_assignments(self, roidb):\n for entry in roidb:\n gt_overlaps = entry['gt_overlaps'].toarray()\n # max overlap with gt over classes (columns)\n max_overlaps = gt_overlaps.max(axis=1)\n # gt class that had the max overlap\n max_classes = gt_overlaps.argmax(axis=1)\n entry['max_classes'] = max_classes\n entry['max_overlaps'] = max_overlaps\n # sanity checks\n # if max overlap is 0, the class must be background (class 0)\n zero_inds = np.where(max_overlaps == 0)[0]\n assert all(max_classes[zero_inds] == 0)\n # if max overlap > 0, the class must be a fg class (not class 0)\n nonzero_inds = np.where(max_overlaps > 0)[0]\n assert all(max_classes[nonzero_inds] != 0)" ]
[ "0.8647008", "0.70186275", "0.64768374", "0.6208812", "0.548963", "0.5472785", "0.5408511", "0.5372353", "0.5281182", "0.5218629", "0.5192152", "0.51507455", "0.51295465", "0.5093066", "0.50306475", "0.5026904", "0.49976724", "0.49723232", "0.49324986", "0.49259773", "0.48838702", "0.48737717", "0.48580557", "0.4855247", "0.48462647", "0.48409274", "0.4830536", "0.48291406", "0.48252785", "0.48211467" ]
0.88763803
0
Computes every hypercube's center by taking the midpoint between the beginning and the end of hypercube in every dimension.
def compute_centers_of_hypercubes(self): for hypercube in self.hypercubes.flatten(): sums = np.zeros((len(hypercube.coords))) for coords in hypercube.parent_hypercubes_indices: for index, summ in enumerate(sums): sums[index] += self.parent_hypercubes[coords].center[index] hypercube.center = [x / 4 for x in sums]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute_centers_of_hypercubes(self):\n for hc in self.hypercubes.flatten():\n for i in range(self.dims - 1, -1, -1):\n index = self.dims - (i + 1)\n hc.center[i] = (hc.coords[index] + 0.5) * self.hypercube_measurements[index]", "def center(box):\n x_center = box[:, 0] + (box[:, 2] - box[:, 0]) // 2\n y_center = box[:, 1] + (box[:, 3] - box[:, 1]) // 2\n return torch.stack((x_center, y_center)).t().to(box.device)", "def centre(arrayin):\r\n ny = arrayin.shape[0]\r\n nx = arrayin.shape[1]\r\n cy = 0.0\r\n cx = 0.0\r\n for i in range(ny):\r\n for j in range(nx):\r\n cy += np.float64(arrayin[i,j]) * np.float64(i - ny/2 + 1)\r\n cx += np.float64(arrayin[i,j]) * np.float64(j - nx/2 + 1)\r\n cx = cx / np.sum(arrayin)\r\n cy = cy / np.sum(arrayin)\r\n arrayout = np.roll(arrayin ,-int(cy),0)\r\n arrayout = np.roll(arrayout,-int(cx),1)\r\n return [arrayout,cy,cx]", "def find_center(self):\n x = np.int(np.rint((len(self.grid[0][0]))/2))\n center = np.array([x, x, x])\n self.grid[center[0]][center[1]][center[2]] = 1\n return self.grid, center", "def center(self):\n bounds = self.bounds\n x = (bounds[1] + bounds[0]) / 2\n y = (bounds[3] + bounds[2]) / 2\n z = (bounds[5] + bounds[4]) / 2\n return [x, y, z]", "def center_size(boxes):\n concat = P.Concat(1)\n return concat(((boxes[:, 2:] + boxes[:, :2])/2, # cx, cy\n boxes[:, 2:] - boxes[:, :2])) # w, h", "def _face_center(mesh, face):\n center = [0, 0, 0]\n for vert in face.vertices:\n center = _list_plus(vert, center)\n new_list = [x / len(face.vertices) for x in center]\n return new_list", "def center_size(boxes):\n return torch.cat([(boxes[:, :2] + boxes[:, 2:])/2, # cx, cy\n boxes[:, :2] - boxes[:, 2:]], 1) # w, h", "def center(self):\n points = set()\n for face in self._points:\n points.update(face)\n x_points = [point[0] for point in points]\n y_points = [point[1] for point in points]\n z_points = [point[2] for point in points]\n return \\\n (np.average(x_points), np.average(y_points), np.average(z_points))", "def calculate_center(self):\n return [(self.startX + self.endX) / 2., (self.startY + self.endY) / 2.]", "def to_center_form(boxes):\n x_min, y_min = boxes[:, 0], boxes[:, 1]\n x_max, y_max = boxes[:, 2], boxes[:, 3]\n center_x = (x_max + x_min) / 2.\n center_y = (y_max + y_min) / 2.\n width = x_max - x_min\n height = y_max - y_min\n return np.concatenate([center_x[:, None], center_y[:, None],\n width[:, None], height[:, None]], axis=1)", "def center(self):\n try: \n return self._center\n except AttributeError:\n self._center = vector(ZZ, [0]*self.ambient_dim())\n for v in self.vertex_generator(): self._center += v.vector()\n self._center /= self.n_vertices()\n return self._center", "def get_square_centers(self):\n x_values = np.arange(-2, 4, 2) * np.ones(self.GRID_SHAPE)\n y_values = np.arange(2, -4, -2).reshape((3, 1)) * np.ones(self.GRID_SHAPE)\n x_values *= self.spacing\n x_values += self.center[0] # add x-coordinate for grid center\n y_values *= self.spacing\n y_values += self.center[1] # add y-coordinate for grid center\n return np.dstack((x_values, y_values))", "def populateCenters(matrix, row, col, frame, midRange, roughness, perturbance):\n maxIndex = matrix.shape[0]-1\n quarterRange = midRange/2\n\n pf = perturbanceFactor(matrix.shape[0], midRange, perturbance)\n noiseLevel = roughness * pf\n\n \"\"\"\n For each subdivided cube, getIndexRef is used to get the indicies, and center is used\n to determine the points that should be averaged and the point to be set. \n setValue does the calculations.\n \"\"\"\n indexRef = getIndexRef(row, col, frame, quarterRange, maxIndex)\n setValue(matrix, center, indexRef, noiseLevel)\n \n indexRef = getIndexRef(row, col, frame + midRange, quarterRange, maxIndex)\n setValue(matrix, center, indexRef, noiseLevel)\n \n indexRef = getIndexRef(row, col + midRange, frame, quarterRange, maxIndex)\n setValue(matrix, center, indexRef, noiseLevel)\n \n indexRef = getIndexRef(row + midRange, col, frame, quarterRange, maxIndex)\n setValue(matrix, center, indexRef, noiseLevel)\n \n indexRef = getIndexRef(row + midRange, col + midRange, frame, quarterRange, maxIndex)\n setValue(matrix, center, indexRef, noiseLevel)\n \n indexRef = getIndexRef(row + midRange, col, frame + midRange, quarterRange, maxIndex)\n setValue(matrix, center, indexRef, noiseLevel)\n \n indexRef = getIndexRef(row, col + midRange, frame + midRange, quarterRange, maxIndex)\n setValue(matrix, center, indexRef, noiseLevel)\n \n indexRef = getIndexRef(row + midRange, col + midRange, frame + midRange, quarterRange, maxIndex)\n setValue(matrix, center, indexRef, noiseLevel)\n \n\n #printAllowCancel(matrix)", "def gen_center_points(x_res, y_res, dim):\n center_points = []\n\n for x in range(math.floor(dim[0] / x_res)):\n for y in range(math.floor(dim[1] / y_res)):\n x = (x + 1) * x_res\n y = (y + 1) * y_res\n center_points.append((x, y))\n\n return center_points", "def center_image_grid_anchors(image_grid):\n for image in image_grid:\n center_image_anchor(image)", "def center_of_mass(im_binary, x_offset=0, y_offset=0):\n n = np.sum(im_binary)\n\n x = np.arange(im_binary.shape[1]) + x_offset\n y = np.arange(im_binary.shape[0]) + y_offset\n xv, yv = np.meshgrid(x, y)\n cx = np.sum(xv[im_binary]) / n\n cy = np.sum(yv[im_binary]) / n\n\n return cx, cy", "def center(self):\n return np.array([0,0,1/self.C+self.pos()])", "def compute_cell_center(seg_img: np.ndarray, labels: np.ndarray, results: np.ndarray) \\\n -> np.ndarray:\n for label in labels:\n if label != 0:\n all_points_z, all_points_x, all_points_y = np.where(seg_img == label)\n avg_z = np.round(np.mean(all_points_z))\n avg_x = np.round(np.mean(all_points_x))\n avg_y = np.round(np.mean(all_points_y))\n results[label] = [avg_z, avg_x, avg_y]\n\n return results", "def center(self):\n cyl = (len(self.cells) - 1) / 2 # Lower and upper bound of list slices\n cxl = (len(self.cells[0]) - 1) / 2\n cyu = len(self.cells) / 2 + 1\n cxu = len(self.cells[0]) / 2 + 1\n\n # candidates are all the cells in the middle,\n # accounting for even dimensions\n candidates = []\n\n for r in self.cells[cyl:cyu]:\n candidates += r[cxl:cxu]\n\n # center is the candidate with the most carrots\n center = max(candidates, key=lambda c: c.carrots)\n\n return center", "def get_center(self):\n return center_points(np.expand_dims(self.best_box, axis=0))[0]", "def center(self):\n # minz to offset the heights to 0\n mz = (self.maxz-self.minz)/2\n #mz = self.minz\n return (self.minx + self.width / 2, self.miny + self.height / 2, mz)", "def center(self):\n return np.sum(self.bbox, 0) / 2", "def get_cell_center_coordinates(self):\n import numpy as np\n x1, x2, x3 = np.ix_(*self.cell_center_coordinates)\n if self.geometry == 'cartesian':\n x, y, z = x1, x2, x3\n elif self.geometry == 'spherical':\n x = x1 * np.sin(x2) * np.cos(x3)\n y = x1 * np.sin(x2) * np.sin(x3)\n z = x1 * np.cos(x2)\n return x, y, z", "def cell_center_fast(seg_img: np.ndarray, labels: np.ndarray) -> np.ndarray:\n array_max_idx = max(labels)\n results = np.zeros((array_max_idx + 1, 3))\n results = compute_cell_center(seg_img, labels, results)\n\n return results", "def _get_centers(self, lwidth, lheight, batch_size):\n x_left, y_left = tf.meshgrid(tf.range(0, lheight), tf.range(0, lwidth))\n x_y = K.stack([x_left, y_left], axis = -1)\n x_y = tf.cast(x_y, dtype = self.dtype)/tf.cast(lwidth, dtype = self.dtype)\n x_y = tf.repeat(tf.expand_dims(tf.repeat(tf.expand_dims(x_y, axis = -2), self._num, axis = -2), axis = 0), batch_size, axis = 0)\n return x_y", "def center_size(boxes):\n wh = boxes[:, 2:] - boxes[:, :2] + 1.0\n if isinstance(boxes, np.ndarray):\n return np.column_stack((boxes[:, :2] + 0.5 * wh, wh))\n return torch.cat((boxes[:, :2] + 0.5 * wh, wh), 1)", "def center(x):\n return x - x.mean()", "def center(self, x):\n\n shape = x.shape\n nx = shape[1]\n ny = shape[0]\n hnx = nx // 2\n hny = ny // 2\n\n temp = x[0:hny, 0:hnx].copy()\n x[0:hny, 0:hnx] = x[hny:ny, hnx:nx].copy()\n x[hny:ny, hnx:nx] = temp\n\n temp = x[0:hny, hnx:nx].copy()\n x[0:hny, hnx:nx] = x[hny:ny, 0:hnx].copy()\n x[hny:ny, 0:hnx] = temp", "def center(image):\n\tsize = image.shape\n\thalf = int(np.ceil(size[0]/2))\n\timage = np.roll(np.roll(image, half, 0), half, 1)\n\treturn image" ]
[ "0.8033822", "0.68309146", "0.67945725", "0.6649882", "0.6605336", "0.6538168", "0.6528375", "0.64801854", "0.6419897", "0.6411588", "0.63880193", "0.63441837", "0.63206303", "0.6310798", "0.6282664", "0.6226101", "0.6219449", "0.62121934", "0.61837673", "0.6167192", "0.6144268", "0.6130036", "0.6124794", "0.60982823", "0.60936165", "0.60833555", "0.6060997", "0.60480255", "0.60332584", "0.6026207" ]
0.74494195
1
Classifies observation with given coordinates.
def classify(self, example_coords, hypercubes_coords): print("Classifying an observation with coordinates; " + str(example_coords)) hypercubes_coords = tuple([int(x / 2) for x in hypercubes_coords]) if self.hypercubes[hypercubes_coords].hypercube_class == EMPTY_HYPERCUBE_INDICATOR: returned_class = self.child_grid.classify(example_coords=example_coords, hypercubes_coords=hypercubes_coords) if returned_class[0] == -1: returned_class = self.nearest_neighbours_class(example_coords, returned_class[1]) return returned_class elif self.hypercubes[hypercubes_coords].hypercube_class == MIXED_HYPERCUBE_INDICATOR: return -1, self.hypercubes[hypercubes_coords].parent_hypercubes_indices # -1 is a flag indicating that we need to compute distances else: return self.hypercubes[hypercubes_coords].hypercube_class
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test(self, example_coords):\n print(\"Predicting the class of an observation with coordinates: \" + str(example_coords))\n hypercubes_coords = tuple(\n [int(example_coords[i] / self.hypercube_measurements[i]) for i in range(self.dims - 1, -1, -1)])\n if self.hypercubes[hypercubes_coords].hypercube_class is not EMPTY_HYPERCUBE_INDICATOR:\n return self.hypercubes[hypercubes_coords].hypercube_class\n else:\n print(\"Observation with coordinates \" + str(example_coords) + \" falls within an empty cube.\")\n returned_class = self.child_grid.classify(example_coords=example_coords, hypercubes_coords=hypercubes_coords)\n if returned_class[0] == -1:\n returned_class = self.nearest_neighbours_class(example_coords=example_coords, parents_indices=returned_class[1])\n return returned_class", "def __init__(self, coordinates):\n self.coordinates = coordinates", "def coordinates(self):", "def classification_score(self, x, y):\t\n\t\tpass", "def predict(self, coordinates):\n check_is_fitted(self, [\"force_\"])\n force_east, force_north = self.force_coords\n east, north = n_1d_arrays(coordinates, n=2)\n cast = np.broadcast(*coordinates[:2])\n npoints = cast.size\n components = (\n np.empty(npoints, dtype=east.dtype),\n np.empty(npoints, dtype=east.dtype),\n )\n if parse_engine(self.engine) == \"numba\":\n components = predict_2d_numba(\n east,\n north,\n force_east,\n force_north,\n self.mindist,\n self.poisson,\n self.force_,\n components[0],\n components[1],\n )\n else:\n components = predict_2d_numpy(\n east,\n north,\n force_east,\n force_north,\n self.mindist,\n self.poisson,\n self.force_,\n components[0],\n components[1],\n )\n return tuple(comp.reshape(cast.shape) for comp in components)", "def classify(self, X):\n x = convert_data( X )\n return self.internal.classify(*get_data(x))", "def coordinates(self, coordinates):\n\n self._coordinates = coordinates", "def __init__(\n self, coordinates, features, original_labels=None, inverse_maps=None,\n ):\n self.coordinates = coordinates\n self.features = features\n self.original_labels = original_labels\n self.inverse_maps = inverse_maps", "def set_coordinates(self, coordinates):\n self.coordinates = coordinates", "def test_toy_dataset_predictions() -> None:\n clf = GaussianNB().fit(X_toy, y_toy)\n mapie = MapieClassifier(estimator=clf, cv=\"prefit\").fit(X_toy, y_toy)\n _, y_ps = mapie.predict(X_toy, alpha=0.2)\n np.testing.assert_allclose(\n classification_coverage_score(y_toy, y_ps[:, :, 0]), 7/9\n )\n np.testing.assert_allclose(y_ps[:, :, 0], y_toy_mapie)", "def __init__(self,coordinate,label=None,color=None,markersize=80,marker='o'):\n self.coordinate = np.array(coordinate)\n self.label = label\n self.color = color\n self.marker = marker\n self.markersize = markersize", "def _setCoords(self, coords, label='', overwrite=False):\n\n n_atoms = self._n_atoms\n if n_atoms:\n if coords.shape[-2] != n_atoms:\n raise ValueError('coords array has incorrect number of atoms')\n else:\n self._n_atoms = n_atoms = coords.shape[-2]\n\n ndim = coords.ndim\n shape = coords.shape\n if self._coords is None or overwrite or (ndim == 3 and shape[0] > 1):\n if ndim == 2:\n self._coords = coords.reshape((1, n_atoms, 3))\n self._cslabels = [str(label)]\n self._n_csets = n_csets = 1\n\n else:\n self._coords = coords\n self._n_csets = n_csets = shape[0]\n\n\n if isinstance(label, list):\n if len(label) == n_csets:\n self._cslabels = list(label)\n\n else:\n self._cslabels = [''] * n_csets\n LOGGER.warn('Number of labels does not match number '\n 'of coordinate sets.')\n else:\n self._cslabels = [str(label)] * n_csets\n self._acsi = 0\n self._setTimeStamp()\n\n else:\n acsi = self._acsi\n if ndim == 2:\n self._coords[acsi] = coords\n else:\n self._coords[acsi] = coords[0]\n self._setTimeStamp(acsi)\n self._cslabels[acsi] = str(label)", "def annotate_image(file_path, coordinates):\n \n # Load raw image\n image = Image.open(file_path)\n image_width, image_height = image.size\n image_side = image_width if image_width >= image_height else image_height\n\n # Annotate image\n image_draw = ImageDraw.Draw(image)\n image_coordinates = coordinates[0]\n image = e_utils.display_body_parts(image, image_draw, image_coordinates, image_height=image_height, image_width=image_width, marker_radius=int(image_side/150))\n image = e_utils.display_segments(image, image_draw, image_coordinates, image_height=image_height, image_width=image_width, segment_width=int(image_side/100))\n \n # Save annotated image\n image.save(normpath(file_path.split('.')[0] + '_tracked.png'))", "def annotate_image(file_path, coordinates):\n\n # Load raw image\n from PIL import Image, ImageDraw\n image = Image.open(file_path)\n image_width, image_height = image.size\n image_side = image_width if image_width >= image_height else image_height\n\n # Annotate image\n image_draw = ImageDraw.Draw(image)\n image_coordinates = coordinates[0]\n image = helpers.display_body_parts(image, image_draw, image_coordinates, image_height=image_height,\n image_width=image_width, marker_radius=int(image_side / 150))\n image = helpers.display_segments(image, image_draw, image_coordinates, image_height=image_height,\n image_width=image_width, segment_width=int(image_side / 100))\n\n # Save annotated image\n image.save(os.path.normpath(file_path.split('.')[0] + '_tracked.png'))", "def predict(self, coordinates):\n check_is_fitted(self, [\"region_\"])\n return tuple(comp.predict(coordinates) for comp in self.components)", "def transform_to(self, coordinates) -> np.ndarray:\n return (coordinates - self._o) @ self.T.T", "def change_coords(self, coords):\n self.__corrds[self.X_CORD], self.__corrds[self.Y_CORD] = coords\n if type(coords) != tuple:\n raise ValueError('coords must be a tuple of the form (x, y)')\n\n # if False in [type(cord) != float for cord in coords]:\n # raise ValueError('coords must be a tuple of the form (x, y)')", "def classify_incidents(in_features, date_field, report_location, repeatdist,\n spatial_bands, temporal_bands, out_lines_dir,\n out_lines_name, *args):\n try:\n # Fix for potential issue with xlsx files as report locations\n if not path.isdir(report_location):\n report_location = path.dirname(report_location)\n\n # Build sorted lists of band values\n spatial_bands = [float(b) for b in spatial_bands.split(';')]\n temporal_bands = [float(b) for b in temporal_bands.split(';')]\n\n repeatdist = float(repeatdist)\n spatial_bands.append(repeatdist)\n\n spatial_bands = list(set(spatial_bands))\n temporal_bands = list(set(temporal_bands))\n\n spatial_bands.sort()\n temporal_bands.sort()\n\n arcpy.env.overwriteOutput = True\n\n # Report run time used for file names\n now = dt.strftime(dt.now(), \"%Y-%m-%d_%H-%M-%S\")\n now_nice = dt.strftime(dt.now(), \"%Y-%m-%d %H:%M:%S\")\n\n # Check for and delete existing fields necessary for classification\n reset_fields(in_features)\n\n # Get name of OID field\n oidname = arcpy.Describe(in_features).oidFieldName\n\n # Get sorted list of unique incident date values\n with arcpy.da.SearchCursor(in_features, date_field) as rows:\n date_vals = [row[0] for row in rows]\n\n date_vals = list(set(date_vals))\n date_vals.sort()\n\n # Range of incident dates\n min_date = date_vals[0]\n max_date = date_vals[-1]\n\n # Keep track of origins and nrs\n oids = []\n nrids = []\n rids = []\n\n # Connecting line segments and table rows\n new_lines = []\n new_rows = []\n\n # Build empty dictionary to hold type tallies\n type_counts = {}\n for sband in spatial_bands:\n type_counts[sband] = {}\n for tband in temporal_bands:\n type_counts[sband][tband] = {'oids': [],\n 'nrids': [],\n 'rids': []}\n\n # Value lists for half life calculations\n all_distances = {}\n for sband in spatial_bands:\n all_distances[sband] = []\n\n all_lives = {}\n for tband in temporal_bands:\n all_lives[tband] = []\n\n found_connections = []\n\n # Build table of all records within the max spatial band of anther feature\n near_table = arcpy.GenerateNearTable_analysis(in_features, in_features, search_radius=temporal_bands[-1], closest='ALL', method='GEODESIC')\n\n # Identify and process relevent near features\n with arcpy.da.SearchCursor(near_table, field_names=['IN_FID', 'NEAR_FID', 'NEAR_DIST']) as nearrows:\n\n # Process each identified connection within the spatial bands\n for nearrow in nearrows:\n dist = nearrow[2]\n if not dist <= spatial_bands[-1]:\n continue\n\n links= []\n\n # Find the two features that are part of the connection\n where_clause = \"\"\"{} in ({},{})\"\"\".format(oidname, nearrow[0], nearrow[1])\n fields = [oidname, date_field, z_value_field, 'SHAPE@X','SHAPE@Y']\n with arcpy.da.UpdateCursor(in_features, field_names=fields, where_clause=where_clause) as cur_link:\n for feat in cur_link:\n # Calculate the z values of each incident in the pair\n zval = feat[1] - min_date\n feat[2] = zval.days\n cur_link.updateRow(feat)\n links.append([feat[0], feat[1], feat[3], feat[4], feat[2]])\n\n # Identify which feature is the oldest and id it as the source\n if links[0][1] > links[1][1]:\n oid, odate, ox, oy, oz = links[1]\n fid, fdate, fx, fy, fz = links[0]\n\n else:\n oid, odate, ox, oy, oz = links[0]\n fid, fdate, fx, fy, fz = links[1]\n\n # test for new connection\n if (oid, fid) in found_connections:\n continue\n\n # Calculate the days between the two dates\n datediff = fdate - odate\n daydiff = datediff.days\n\n # only process rows within defined temporal bands\n if daydiff > temporal_bands[-1]:\n continue\n\n # Identify the spatial bands that are covered by this relationship and create a connecting line feature\n link_found = False\n for sband in spatial_bands:\n if dist <= sband:\n for tband in temporal_bands:\n if daydiff <= tband:\n if not link_found:\n # track distances and lives for half measures\n all_distances[sband].append(dist)\n all_lives[tband].append(daydiff)\n incident_sband = sband\n incident_tband = tband\n\n link_found = True\n\n # id classification\n if oid not in type_counts[sband][tband]['oids']:\n type_counts[sband][tband]['oids'].append(oid)\n if dist <= spatial_bands[0]:\n if fid not in type_counts[sband][tband]['rids']:\n type_counts[sband][tband]['rids'].append(fid)\n elif fid not in type_counts[sband][tband]['nrids']:\n type_counts[sband][tband]['nrids'].append(fid)\n\n if link_found:\n found_connections.append((oid, fid))\n\n # create connecting line from x, y, z values of two pts\n end = arcpy.Point(X=fx, Y=fy, Z=fz)\n start = arcpy.Point(X=ox, Y=oy, Z=oz)\n vertices = arcpy.Array([start, end])\n feature = arcpy.Polyline(vertices, None, True, False)\n new_lines.append([fid, oid, dist, daydiff, incident_sband, incident_tband, feature])\n\n # Delete near table\n arcpy.Delete_management(near_table)\n\n # Create feature class for connecting lines\n sr = arcpy.Describe(in_features).spatialReference\n connectors = arcpy.CreateFeatureclass_management(out_lines_dir,\n out_lines_name,\n 'POLYLINE',\n has_z='ENABLED',\n spatial_reference=sr)\n arcpy.AddField_management(connectors, 'FEATUREID', \"LONG\")\n arcpy.AddField_management(connectors, origin_feat_field, \"LONG\")\n arcpy.AddField_management(connectors, dist_orig_field, \"FLOAT\")\n arcpy.AddField_management(connectors, 'RPTDAYS', \"FLOAT\")\n arcpy.AddField_management(connectors, spatial_band_field, \"FLOAT\")\n arcpy.AddField_management(connectors, temporal_band_field, \"FLOAT\")\n\n # Insert connecting line features from the array of values\n fields = ['FEATUREID', origin_feat_field, dist_orig_field, 'RPTDAYS', spatial_band_field, temporal_band_field, 'SHAPE@']\n with arcpy.da.InsertCursor(connectors, fields) as rows:\n for new_line in new_lines:\n rows.insertRow(new_line)\n\n # Manage classification fields\n fieldnames = []\n for sband in spatial_bands:\n for tband in temporal_bands:\n fieldnames.append('s{}t{}'.format(int(sband), int(tband)))\n\n cur_fields = [f.name for f in arcpy.ListFields(in_features)]\n for fieldname in fieldnames:\n if fieldname in cur_fields:\n arcpy.DeleteField_management(in_features, fieldname)\n arcpy.AddField_management(in_features, fieldname, 'TEXT', field_length=2)\n\n # Classify & count incidents by type\n for sband in spatial_bands:\n for tband in temporal_bands:\n band = type_counts[sband][tband]\n type_counts[sband][tband]['oids'] = [id for id in band['oids'] if id not in band['nrids'] and id not in band['rids']]\n type_counts[sband][tband]['nrids'] = [id for id in band['nrids'] if id not in band['rids']]\n\n fields = [\"OID@\", date_field, z_value_field]\n fields.extend(fieldnames)\n\n with arcpy.da.UpdateCursor(in_features, fields) as rows:\n inc_count = 0\n for row in rows:\n inc_count += 1\n\n # calc z value if missing\n if not row[2]:\n zval = row[1] - min_date\n row[2] = zval.days\n\n classifications = []\n\n for sband in spatial_bands:\n for tband in temporal_bands:\n if row[0] in type_counts[sband][tband]['nrids']:\n classifications.append('NR')\n elif row[0] in type_counts[sband][tband]['rids']:\n classifications.append('R')\n elif row[0] in type_counts[sband][tband]['oids']:\n classifications.append('O')\n else:\n classifications.append(None)\n row[3:] = classifications\n\n rows.updateRow(row)\n\n # Build empty dictionary to hold spatial and temporal band tallies\n band_counts = {}\n for sband in spatial_bands:\n band_counts[sband] = {}\n for tband in temporal_bands:\n band_counts[sband][tband] = 0\n\n for sband in spatial_bands:\n for tband in temporal_bands:\n if sband == spatial_bands[0]:\n band_counts[sband][tband] = len(type_counts[sband][tband]['rids'])\n else:\n band_counts[sband][tband] = len(type_counts[sband][tband]['nrids'])\n\n # Get unit of feature class spatial reference system\n try:\n unit = units[sr.linearUnitName]\n except KeyError:\n unit = ''\n\n # Get half-life and half-distance\n test_distances = []\n half_distances = {}\n for sband in spatial_bands:\n test_distances.extend(all_distances[sband])\n test_distances.sort()\n if len(test_distances) > 0:\n half_distances[sband] = test_distances[int(len(test_distances)/2)]\n else:\n half_distances[sband] = 'Not Calculated'\n\n test_lives = []\n half_lives = {}\n for tband in temporal_bands:\n test_lives.extend(all_lives[tband])\n test_lives.sort()\n if len(test_lives) > 0:\n half_lives[tband] = test_lives[int(len(test_lives)/2)]\n else:\n half_lives[tband] = 'Not Calculated'\n\n # Build report content\n report_header = ('Repeat and Near Repeat Incident Summary\\n'\n 'Created {}\\n'.format(now_nice))\n\n data_info = ('Data Source: {}\\n'\n 'Incident Date Range: {} - {}\\n'\n '# Incidents Processed: {}'.format(in_features, min_date, max_date, inc_count))\n\n## inc_type_reports = ''\n## console_type_rpts = ''\n##\n## for sband in spatial_bands:\n## for tband in temporal_bands:\n## cnt_o = len(type_counts[sband][tband]['oids'])\n## cnt_n = len(type_counts[sband][tband]['nrids'])\n## cnt_r = len(type_counts[sband][tband]['rids'])\n##\n## perc_o = \"{:.1f}\".format(100.0*float(cnt_o)/inc_count)\n## perc_n = \"{:.1f}\".format(100.0*float(cnt_n)/inc_count)\n## perc_r = \"{:.1f}\".format(100.0*float(cnt_r)/inc_count)\n##\n## inc_type_reports += ('Count and percentage of each type of incident in spatial band {}{} and temporal band {} days\\n'\n## ', Count, Percentage\\n'\n## 'All Incidents,{}, 100\\n'\n## 'Originators,{},{}\\n'\n## 'Near Repeats,{},{}\\n'\n## 'Repeats,{},{}\\n\\n'.format(sband, unit, tband,\n## inc_count,\n## cnt_o, perc_o,\n## cnt_n, perc_n,\n## cnt_r, perc_r))\n## console_type_rpts += ('Count and percentage of each type of incident in spatial band {}{} and temporal band {} days\\n'\n## ' Count Percentage\\n'\n## 'All Incidents {:^10} {:^13}\\n'\n## 'Originators {:^10} {:^13}\\n'\n## 'Near Repeats {:^10} {:^13}\\n'\n## 'Repeats {:^10} {:^13}\\n\\n'.format(sband, unit, tband,\n## inc_count, 100,\n## cnt_o, perc_o,\n## cnt_n, perc_n,\n## cnt_r, perc_r))\n\n half_lives_str = 'Estimated incident half-life\\n'\n half_lives_str_console = 'Estimated incident half-life\\n'\n for tband in temporal_bands:\n half_lives_str += '{} days temporal band, {:.1f} days\\n'.format(tband, half_lives[tband])\n half_lives_str_console += '{} days temporal band: {:.1f} days\\n'.format(tband, half_lives[tband])\n\n half_distance_str = 'Estimated incident half-distance\\n'\n half_distance_str_console = 'Estimated incident half-distance\\n'\n for sband in spatial_bands[1:]:\n half_distance_str += '{0} {1} spatial band, {2:.1f} {1}\\n'.format(sband, unit, half_distances[sband])\n half_distance_str_console += '{0} {1} spatial band: {2:.1f} {1}\\n'.format(sband, unit, half_distances[sband])\n\n temp_band_strs = [\"<={} days\".format(b) for b in temporal_bands]\n temporal_band_labels = ','.join(temp_band_strs)\n console_tband_labels = ' '.join(['{:^12}'.format(bnd) for bnd in temp_band_strs])\n\n counts_title = 'Number of Repeat and Near-Repeat incidents per spatial and temporal band\\n'\n percent_title = 'Percentage of all incidents classified as Repeat or Near-Repeat and appearing in each spatial and temporal band\\n'\n\n counts_header = ',{}\\n'.format(temporal_band_labels)\n console_counts_header = ' {}'.format(console_tband_labels)\n\n percent_header = ',{}\\n'.format(temporal_band_labels)\n console_perc_header = ' {}'.format(console_tband_labels)\n\n counts_table = \"\"\n percent_table = \"\"\n console_count = \"\"\n console_perc = \"\"\n\n row_sum = [0 for tband in temporal_bands]\n\n for sband in spatial_bands:\n\n # get temporal bands and their incident counts\n vals = [band_counts[sband][tband] for tband in temporal_bands]\n\n # Get spatial band count in each temporal band\n # Sums include counts from smaller bands\n## row_counts = [vals[tband] for tband in temporal_bands]\n## try:\n## row_sums = [sum(row_counts[0:i]) for i in xrange(1,len(row_counts)+1)]\n## except:\n## row_sums = [sum(row_counts[0:i]) for i in range(1,len(row_counts)+1)]\n##\n## row_sum = [x + y for (x, y) in zip(row_sums, row_sum)]\n row_perc = [100.0 * float(val)/inc_count for val in vals]\n\n # append counts & percentages to the table\n if sband == spatial_bands[0]:\n counts_table += '<={} {},{}\\n'.format(sband, unit, ','.join([str(cnt) for cnt in vals]))\n console_count += '{:>25} {}\\n'.format('<={} {}'.format(sband, unit), ' '.join(['{:^12}'.format(cnt) for cnt in vals]))\n percent_table += '<={} {},{}\\n'.format(sband, unit, ','.join([\"{:.1f}\".format(prc) for prc in row_perc]))\n console_perc += '{:>25} {}\\n'.format('<={} {}'.format(sband, unit), ' '.join(['{:^12}'.format(\"{:.1f}\".format(prc)) for prc in row_perc]))\n else:\n counts_table += '>{} to {} {},{}\\n'.format(spatial_bands[0], sband, unit, ','.join([str(cnt) for cnt in vals]))\n console_count += '{:>25} {}\\n'.format('>{} to {} {}'.format(spatial_bands[0], sband, unit), ' '.join(['{:^12}'.format(cnt) for cnt in vals]))\n percent_table += '>{} to {} {},{}\\n'.format(spatial_bands[0], sband, unit, ','.join([\"{:.1f}\".format(prc) for prc in row_perc]))\n console_perc += '{:>25} {}\\n'.format('>{} to {} {}'.format(spatial_bands[0], sband, unit), ' '.join(['{:^12}'.format(\"{:.1f}\".format(prc)) for prc in row_perc]))\n\n # Write report\n reportname = path.join(report_location, \"{}_{}.csv\".format('Summary', now))\n with open(reportname, 'w') as report:\n\n report.write(report_header)\n report.write('\\n')\n report.write(data_info)\n report.write('\\n')\n report.write(half_distance_str)\n report.write('\\n')\n report.write(half_lives_str)\n report.write('\\n')\n## report.write(inc_type_reports)\n report.write(counts_title)\n report.write(counts_header)\n report.write(counts_table)\n report.write('\\n')\n report.write(percent_title)\n report.write(percent_header)\n report.write(percent_table)\n\n arcpy.SetParameterAsText(9, path.join(out_lines_dir, out_lines_name))\n arcpy.AddMessage(\"\\nView incident summary report: {}\\n\".format(reportname))\n\n arcpy.AddMessage(report_header)\n arcpy.AddMessage('')\n arcpy.AddMessage(data_info)\n arcpy.AddMessage('')\n arcpy.AddMessage(half_distance_str_console)\n arcpy.AddMessage('')\n arcpy.AddMessage(half_lives_str_console)\n arcpy.AddMessage('')\n## arcpy.AddMessage(console_type_rpts)\n arcpy.AddMessage(counts_title)\n arcpy.AddMessage(console_counts_header)\n arcpy.AddMessage(console_count)\n arcpy.AddMessage('')\n arcpy.AddMessage(percent_title)\n arcpy.AddMessage(console_perc_header)\n arcpy.AddMessage(console_perc)\n\n## print(\"\\nView incident summary report: {}\\n\".format(reportname))\n##\n## print(report_header)\n## print('')\n## print(data_info)\n## print('')\n## print(half_distance_str_console)\n## print('')\n## print(half_lives_str_console)\n## print('')\n#### arcpy.AddMessage(console_type_rpts)\n## print(counts_title)\n## print(console_counts_header)\n## print(console_count)\n## print('')\n## print(percent_title)\n## print(console_perc_header)\n## print(console_perc)\n\n except arcpy.ExecuteError:\n # Get the tool error messages\n msgs = arcpy.GetMessages()\n arcpy.AddError(msgs)\n print(msgs)\n\n except:\n # Return error messages for use in script tool or Python Window\n arcpy.AddError(str(sys.exc_info()[1]))\n\n # Print Python error messages for use in Python / Python Window\n print(str(sys.exc_info()[1]) + \"\\n\")", "def __init__(self,prediction, x1, y1, x2, y2):\n self.prediction = prediction\n self.x1 = x1\n self.y1 = y1\n self.x2 = x2\n self.y2 = y2", "def make_annotation_dict(coords, pairs=None,\n nodes_class=None,\n nodes_class_color_mapper=None,\n ):\n\n annotations = {}\n new_nodes = convert_nodes_tys_to_nap(coords)\n annotations['nodes_coords'] = new_nodes\n if nodes_class is not None:\n annotations['nodes_class'] = nodes_class\n if nodes_class_color_mapper is not None:\n annotations['nodes_class_color_mapper'] = nodes_class_color_mapper\n if pairs is not None:\n annotations['edges_coords'] = convert_edges_tys_to_nap(new_nodes, pairs)\n return annotations", "def get_causal_labels(posX, posY, Xrange=32, Yrange=32, nclasses=8):\n x_label = posX // (Xrange/nclasses)\n y_label = posY // (Yrange/nclasses)\n xy_class = nclasses * x_label + y_label\n return xy_class", "def classify(self, data):\n abstract", "def coords_plot(self):\n self.load_coords()\n x = []\n y = []\n px = [] \n for item in self.coords:\n if item[1] >52.10 and item[1] <52.4 and item[2]>20.8 and item [2] <21.4:\n x.append(item[1])\n y.append(item[2])\n px.append(item[3])\n plt.scatter(x,y,c=px,s=150,alpha=0.3)\n plt.show()", "def piece_encompasses_coordinates(self, coordinates) -> Model or bool:\n if len(coordinates) != 2:\n raise IndexError(\"Coordinates consist of x and y\")\n for piece in self.pieces:\n if piece.encompasses_coordinates(coordinates):\n return piece\n return False", "def loc_from_tuple(self, coords):\n self.x, self.y = coords", "def set_observed_class(self, cell):\n try:\n if cell.coords in [c.coords for c in self.clses[0]]:\n self.obs = self.clses[0]\n elif cell.coords in [c.coords for c in self.clses[1]]:\n self.obs = self.clses[1]\n elif cell.coords in [c.coords for c in self.clses[2]]:\n self.obs = self.clses[2]\n elif cell.coords in [c.coords for c in self.clses[3]]:\n self.obs = self.clses[3]\n else:\n print self.clses \n raise OutOfBoardError()\n except IndexError:\n print self.clses \n raise OutOfBoardError()", "def save_coords(self, coords):\n pos = f'{coords[0]}, {coords[1]}, {coords[2]}'\n descr = str(\" \".join(coords[3:]))\n\n if not self.check_coords_file():\n file = open(self.coords_file, 'x')\n \n if not self.__check_coordinates(pos):\n file = open(self.coords_file, 'a')\n file.write(f'{str(pos)} - {descr}\\n')\n file.close()\n return True\n return False", "def _annotation_from_index(self, index, _COCO):\n im_ann = _COCO.loadImgs(index)[0]\n width = im_ann['width']\n height = im_ann['height']\n\n annIds = _COCO.getAnnIds(imgIds=index, iscrowd=None)\n objs = _COCO.loadAnns(annIds)\n # Sanitize bboxes -- some are invalid\n valid_objs = []\n for obj in objs:\n x1 = np.max((0, obj['bbox'][0]))\n y1 = np.max((0, obj['bbox'][1]))\n x2 = np.min((width - 1, x1 + np.max((0, obj['bbox'][2] - 1))))\n y2 = np.min((height - 1, y1 + np.max((0, obj['bbox'][3] - 1))))\n if obj['area'] > 0 and x2 >= x1 and y2 >= y1:\n obj['clean_bbox'] = [x1, y1, x2, y2]\n valid_objs.append(obj)\n objs = valid_objs\n num_objs = len(objs)\n\n res = np.zeros((num_objs, 5))\n\n # Lookup table to map from COCO category ids to our internal class\n # indices\n for ix, obj in enumerate(objs):\n cls = self._thing_dataset_id_to_contiguous_id[obj['category_id']]\n res[ix, 0:4] = obj['clean_bbox']\n res[ix, 4] = cls\n\n return res", "def coordinates(self):\n # TODO: Add the feature where coordinates come from multiple sources.\n # Consider whether or not you'd want to output the categorical\n # variable indicating the source of the coordinate data or\n # make the user place coordinates a different property entirely.\n try:\n bounding_box = array(\n self.status.place\n [\"bounding_box\"]\n [\"coordinates\"]\n ).squeeze()\n centroid = bounding_box.mean(axis=0)\n return centroid\n except AttributeError:\n return zeros(2)", "def classify(self, example):\n raise NotImplementedError()" ]
[ "0.59496695", "0.54452467", "0.5402337", "0.5321679", "0.5184538", "0.5163346", "0.51437443", "0.5075722", "0.5045529", "0.5041442", "0.5035127", "0.5005401", "0.49902293", "0.49848723", "0.49844524", "0.49838632", "0.4979044", "0.49766052", "0.49506316", "0.49488333", "0.49362886", "0.49309915", "0.491365", "0.4897143", "0.48812866", "0.4871709", "0.4833823", "0.48204356", "0.480635", "0.48013702" ]
0.62354344
0
Tests that adding two users works
def testAdd2(self): self.assertEquals(models.SUCCESS, self.users.add("userC", "password")) self.assertEquals(models.SUCCESS, self.users.add("userD", "password"))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_011_add_same_user(self):\n testflow.step(ADD_USR_MSG, TEST_USER1)\n assert not USER_CLI.run('add', TEST_USER1)[0]", "def testAdd1(self):\n self.assertEquals(models.SUCCESS, self.users.add(\"userA\", \"password\"))", "def test_add_user(self):\n pass", "def testAddExists(self):\n self.assertEquals(models.SUCCESS, self.users.add(\"userB\", \"password\"))\n self.assertEquals(models.ERR_USER_EXISTS, self.users.add(\"userB\", \"password\"))", "def test_addUser(self):\n self.new_user.saveUser()\n self.assertEqual(len(User.users_list),1)", "def test_resource_user_resource_add_users_post(self):\n pass", "def test_add_duplicate(self, api):\n self.builder.add_user(api.get_user())\n resp = api.add_user(api.get_user())\n self.builder.del_user(api.get_user())\n assert resp.status_code == 304", "def test_teams_add_user_to_team_v2(self):\n pass", "def test_user_creation_is_successful(self):\n user_1 = User.objects.get(pk=self.user_1.id)\n user_2 = User.objects.get(pk=self.user_2.id)\n user_count = User.objects.count()\n\n self.assertEqual(user_1.first_name, \"John\")\n self.assertEqual(user_2.first_name, \"Kent\")\n self.assertEqual(user_count, 2)", "def test_resource_user_resource_add_user_post(self):\n pass", "def test_main_add_user(self):\n with self.client:\n response = self.client.post(\n '/',\n data=dict(username='michael', email='[email protected]'),\n follow_redirects=True\n )\n self.assertEqual(response.status_code, 200)\n self.assertIn(b'<h1>All Users</h1>', response.data)\n self.assertNotIn(b'<p>No users!</p>', response.data)\n self.assertIn(b'michael', response.data)", "def test_teams_add_user_to_team_v1(self):\n pass", "def test_manage_user(self):\r\n # First with a new user\r\n user_data = dict(user_id=1, screen_name='twitter')\r\n token = dict(oauth_token='token', oauth_token_secret='secret')\r\n user = manage_user(token, user_data, None)\r\n assert user.email_addr == user_data['screen_name'], user\r\n assert user.name == user_data['screen_name'], user\r\n assert user.fullname == user_data['screen_name'], user\r\n assert user.twitter_user_id == user_data['user_id'], user\r\n\r\n # Second with the same user\r\n user = manage_user(token, user_data, None)\r\n assert user.email_addr == user_data['screen_name'], user\r\n assert user.name == user_data['screen_name'], user\r\n assert user.fullname == user_data['screen_name'], user\r\n assert user.twitter_user_id == user_data['user_id'], user\r\n\r\n # Finally with a user that already is in the system\r\n user_data = dict(user_id=10, screen_name=self.name)\r\n token = dict(oauth_token='token2', oauth_token_secret='secret2')\r\n user = manage_user(token, user_data, None)\r\n err_msg = \"It should return the same user\"\r\n assert user.twitter_user_id == 10, err_msg", "def test_add_user_as_user(self):\n filepath = os.path.join(\n os.getcwd(), self.hiarc_util.TEST_FILE_PATH, 'Test.txt')\n\n u1 = self.hiarc_users.create_user(self.hiarc_util.create_user())\n u2 = self.hiarc_users.create_user(self.hiarc_util.create_user())\n c1 = self.hiarc_collections.create_collection(\n self.hiarc_util.create_collection())\n\n self.assertRaises(hiarc.rest.ApiException,\n self.hiarc_collections.add_user_to_collection,\n hiarc.AddUserToCollectionRequest(u2.key, hiarc.AccessLevel.READ_ONLY), c1.key, x_hiarc_user_key=u1.key)\n\n self.hiarc_collections.add_user_to_collection(\n hiarc.AddUserToCollectionRequest(u1.key, hiarc.AccessLevel.READ_ONLY), c1.key)\n self.assertRaises(hiarc.rest.ApiException,\n self.hiarc_collections.add_user_to_collection,\n hiarc.AddUserToCollectionRequest(u2.key, hiarc.AccessLevel.READ_ONLY), c1.key, x_hiarc_user_key=u1.key)\n\n u3 = self.hiarc_users.create_user(self.hiarc_util.create_user())\n self.hiarc_collections.add_user_to_collection(\n hiarc.AddUserToCollectionRequest(u3.key, hiarc.AccessLevel.READ_WRITE), c1.key)\n self.hiarc_collections.add_user_to_collection(\n hiarc.AddUserToCollectionRequest(u2.key, hiarc.AccessLevel.READ_ONLY), c1.key, x_hiarc_user_key=u3.key)", "def testAddLongUsernameAndPassword(self):\n original_username = \"thisgonnabelong\"\n longer_username = original_username*10\n original_password = \"thisalsogonnabelong\"\n longer_password = original_password*10\n self.assertEquals(models.ERR_BAD_USERNAME, self.users.add(longer_username, longer_password))", "def test_adduser(self):\n self.run_function(\"group.add\", [self._group], gid=self._gid)\n self.run_function(\"user.add\", [self._user])\n self.assertTrue(self.run_function(\"group.adduser\", [self._group, self._user]))\n group_info = self.run_function(\"group.info\", [self._group])\n self.assertIn(self._user, str(group_info[\"members\"]))\n # try add a non existing user\n self.assertFalse(\n self.run_function(\"group.adduser\", [self._group, self._no_user])\n )\n # try add a user to non existing group\n self.assertFalse(\n self.run_function(\"group.adduser\", [self._no_group, self._user])\n )\n # try add a non existing user to a non existing group\n self.assertFalse(\n self.run_function(\"group.adduser\", [self._no_group, self._no_user])\n )", "def test_manage_user(self):\r\n # First with a new user\r\n user_data = dict(id='1', name='google',\r\n email='[email protected]')\r\n token = 't'\r\n user = manage_user(token, user_data, None)\r\n assert user.email_addr == user_data['email'], user\r\n assert user.name == user_data['name'], user\r\n assert user.fullname == user_data['name'], user\r\n assert user.google_user_id == user_data['id'], user\r\n\r\n # Second with the same user\r\n user = manage_user(token, user_data, None)\r\n assert user.email_addr == user_data['email'], user\r\n assert user.name == user_data['name'], user\r\n assert user.fullname == user_data['name'], user\r\n assert user.google_user_id == user_data['id'], user\r\n\r\n # Finally with a user that already is in the system\r\n user_data = dict(id='10', name=self.name,\r\n email=self.email_addr)\r\n token = 'tA'\r\n user = manage_user(token, user_data, None)\r\n err_msg = \"User should be the same\"\r\n print user.google_user_id\r\n assert user.google_user_id == '10', err_msg", "def test_020_add_user_to_group(self):\n testflow.step(\"Adding user %s to group %s\", TEST_USER1, TEST_GROUP1)\n assert MANAGE_CLI.run(\n 'useradd',\n TEST_GROUP1,\n user=TEST_USER1\n )[0], \"Failed to add user to group '%s'\" % TEST_GROUP1\n\n testflow.step(\"Adding nonexisting user to group %s\", TEST_GROUP1)\n assert not MANAGE_CLI.run(\n 'useradd',\n TEST_GROUP1,\n user='nonsense'\n )[0], \"Possible to add nonexisting user to group\"\n\n testflow.step(\"Adding user %s to nonexisting group\", TEST_USER2)\n assert not MANAGE_CLI.run(\n 'useradd',\n 'nonsense',\n user=TEST_USER2\n )[0], \"Possible to add user to nonexisting group\"", "def test_add_user(self, api):\n resp = api.add_user(api.get_user())\n self.builder.delete_user(api.get_user())\n assert resp.status_code == 201", "def test_add_users_doesnt_add_duplicate_entry(self):\r\n role = CourseStaffRole(self.course_key)\r\n role.add_users(self.student)\r\n self.assertTrue(role.has_user(self.student))\r\n # Call add_users a second time, then remove just once.\r\n role.add_users(self.student)\r\n role.remove_users(self.student)\r\n self.assertFalse(role.has_user(self.student))", "def test_000_add_user(self):\n # This case is always passed because it's tested in setup_module,\n # If setup module fails, this case will never run\n pass", "def testLoginTwice(self):\n self.assertEquals(models.SUCCESS, self.users.add(\"userG2\", \"password\"))\n self.assertEquals(self.users.login(\"userG2\", \"password\"), 2)\n self.assertEquals(self.users.login(\"userG2\", \"password\"), 3)", "def test_create_multiple_users(self):\r\n self._auto_auth()\r\n self._auto_auth()\r\n self.assertEqual(User.objects.all().count(), 2)", "def test_add_user(self):\n with self.client:\n auth_header = login_test_user(self.client)\n response = self.client.post('/users', \n data=json.dumps(dict(\n username=\"neil\",\n email=\"[email protected]\",\n password=\"password123\"\n )),\n content_type='application/json',\n headers=auth_header\n )\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 201)\n self.assertIn('[email protected] was added!', data['message'])\n self.assertIn('success', data['status'])", "def test_append_user(self):\n print('(' + self.test_append_user.__name__+')',\n self.test_append_user.__doc__)\n new_username = self.connection.append_user(\n NEW_PATIENT_USERNAME, NEW_PATIENT)\n # test appended ok\n self.assertIsNotNone(new_username)\n # check appended the same user data\n self.assertEqual(new_username, NEW_PATIENT_USERNAME)\n # check the added user in db has the same data\n get_new_patient = self.connection.get_user(new_username)\n self.assertDictContainsSubset(\n NEW_PATIENT['restricted_profile'], get_new_patient['restricted_profile'])\n self.assertDictContainsSubset(\n NEW_PATIENT['public_profile'], get_new_patient['public_profile'])", "def test_add_customer(self):\r\n create_empty_db()\r\n add_customer(**user_1)\r\n query = Customer.get(Customer.customer_id == user_1['customer_id'])\r\n self.assertEqual(user_1['name'], query.customer_name)\r\n self.assertEqual(user_1['lastname'], query.customer_last_name)\r\n self.assertEqual(user_1['home_address'], query.customer_address)\r\n self.assertEqual(user_1['phone_number'], query.customer_phone)\r\n self.assertEqual(user_1['email_address'], query.customer_email)\r\n self.assertEqual(user_1['status'], query.customer_status)\r\n self.assertEqual(user_1['credit_limit'], query.customer_limit)\r\n\r\n # add another person\r\n add_customer(**user_2)\r\n query = Customer.get(Customer.customer_id == user_2['customer_id'])\r\n self.assertEqual(user_2['name'], query.customer_name)\r\n self.assertEqual(user_2['lastname'], query.customer_last_name)\r\n self.assertEqual(user_2['home_address'], query.customer_address)\r\n self.assertEqual(user_2['phone_number'], query.customer_phone)\r\n self.assertEqual(user_2['email_address'], query.customer_email)\r\n self.assertEqual(user_2['status'], query.customer_status)\r\n self.assertEqual(user_2['credit_limit'], query.customer_limit)\r\n\r\n # add a duplicate person\r\n with self.assertRaises(ValueError):\r\n add_customer(**user_2)\r\n drop_db()", "def add(self, user: U) -> None:\n ...", "def test_can_register_new_user(self):\n user_count = User.objects.count()\n self.register_bob()\n self.assertTrue(User.objects.count() == user_count + 1)", "def test_register_twice(self):\n body, code = self.post(f\"/users\", bob, {\"phone\": \"+441234567890\", **bob_creds})\n self.assertEqual(400, code)\n self.assertEqual({\"error\": \"User already exists.\"}, body)", "def test_user_add(self):\n\n result = self.client.post(\"/login\", data={\"user_email\": \"[email protected]\", \"user_password\": \"1234\"},\n follow_redirects=True)\n self.assertIn(b\"Bobby\", result.data)" ]
[ "0.83015925", "0.7707303", "0.77038246", "0.74918866", "0.73776263", "0.7374491", "0.732083", "0.7282199", "0.72698605", "0.7215631", "0.71915513", "0.7155373", "0.71463656", "0.7135748", "0.71296144", "0.7112154", "0.71116185", "0.71065176", "0.7069175", "0.7047732", "0.6975203", "0.69720644", "0.69568574", "0.6911281", "0.6892542", "0.6892322", "0.68694067", "0.686139", "0.68606913", "0.68602836" ]
0.8096824
1
Tests that adding an user with empty username fails
def testAddEmptyUsername(self): self.assertEquals(models.ERR_BAD_USERNAME, self.users.add("", "password"))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def testAddNoneUsernameAndPassword(self):\n self.assertEquals(models.ERR_BAD_USERNAME, self.users.add(\"\", \"\"))", "def testAddNoneUsername(self):\n self.assertEquals(models.ERR_BAD_USERNAME, self.users.add(None, \"password\"))", "def testAddNoneUsernameAndPassword(self):\n self.assertEquals(models.ERR_BAD_USERNAME, self.users.add(None, None))", "def test_create_user_invalid_username(self):\r\n print(\"Create user invalid username (already taken)\")\r\n u_id = 3\r\n username = \"100\"\r\n password = \"test9999\"\r\n u_type = 1\r\n\r\n prev_noUsers = len(User.query.all())\r\n self.assertEqual(self.system.create_user(u_id, username, password, u_type), 0)\r\n curr_noUsers = len(User.query.all())\r\n self.assertEqual(prev_noUsers, curr_noUsers)", "def test_username_nodigits(self):\n response = self.signup_a_user(self.username_nodigits)\n self.assertEqual(response.data['errors']['username'],\n [\"username is invalid\"]\n )\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def testAddEmptyPassword(self):\n self.assertEquals(models.SUCCESS, self.users.add(\"userE\", \"\"))", "def test_user_empty_username(self):\n data = json.dumps({\n \"username\" : \"\", \"email\" : \"[email protected]\",\n \"password\" : \"12345678\", \"confirm_password\" : \"12345678\"})\n response = self.app.post(\n '/api/v3/users', data=data,\n content_type='application/json',\n headers=self.admin_header)\n self.assertEqual(response.status_code, 400)", "def test_empty_user_name_field(self):\r\n result=self.user.get_user_register(\"Stephen\",\" Ochieng\",\"[email protected]\",\"stephenochieng\",\"eat\")\r\n self.assertEqual(2,result,\"Fill in the username field please\")", "def test_username_not_in_use(self):\n self.request.json_body = {'username': 'newuser'}\n self.datautils.create_user({'username': 'testuser', 'password': 'testpass'})\n result = users_post_view(self.request)['d']\n self.assertIsInstance(result, dict)\n self.assertEqual(result, error_dict('api_errors', 'username, email, and password are all required string fields'))", "def test_empty_username():\n expect_error(register, InputError, \"\", \"abcdef\", \"A\", \"A\", \"A\")", "def test_signup_missing_username(self):\n\n invalid_u = User.signup(\"[email protected]\", None, \"testpass\", \"Test\", \"User\", None)\n \n uid = 99999\n invalid_u.id = uid\n\n with self.assertRaises(exc.IntegrityError) as context:\n db.session.commit()", "def test_username_not_unique(self, client, users):\n data = factory.build(dict, FACTORY_CLASS=UserFactory, username=users[1].username)\n url = reverse('users:create')\n response = client.post(url, data)\n assert response.status_code == 200\n assert 'A user with that username already exists.' in str(response.content)", "def test_create_user_empty_string(self):\n res = self.client.post(CREATE_USER_URL, {\n **self.mock_user,\n 'name': ''\n })\n\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n\n user = get_user_model().objects.get(**res.data)\n\n self.assertTrue(user.check_password(self.mock_user['password']))\n self.assertNotEqual('', user.name)", "def test_create_user_fails_with_no_username(self):\n user = get_user_model().objects.create(\n email='[email protected]',\n first_name='Test',\n password='pass123456!'\n )\n\n users = User.objects.filter(username='Test')\n\n self.assertEqual(len(users), 0)", "def test_unique_username(self):\n user = User(name=\"thealphadollar\")\n g.db.add(user)\n g.db.commit()\n\n user_field = Field(\"thealphadollar\")\n\n with self.assertRaises(ValidationError):\n unique_username(None, user_field)", "def testAddNonePassword(self):\n self.assertEquals(models.ERR_BAD_PASSWORD, self.users.add(\"userF\", None))", "def test_create_user_with_preexisting_username(self):\n data = {\n 'username': 'test_user',\n 'email': '[email protected]',\n 'password': 'testpassword'\n }\n response = self.client.post(self.create_url, data, format='json')\n self.assertEqual(User.objects.count(), 1)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(len(response.data['username']), 1)", "def test_create_no_username(self):\n\n # If there is no username, email will be used instead\n properties = self.portal.portal_properties.site_properties\n properties.manage_changeProperties(use_email_as_login=True)\n\n user = api.user.create(\n email='[email protected]',\n password='secret'\n )\n\n self.assertEquals(user.getUserName(), '[email protected]')\n\n # But if using emails as a username is disabled, we should get\n # an error\n properties.manage_changeProperties(use_email_as_login=False)\n\n self.assertRaises(\n ValueError,\n api.user.create,\n email='[email protected]', password='secret'\n )", "def test_create_user_with_no_username(self):\n data = {\n 'username': '',\n 'email': '[email protected]',\n 'password': 'foobar'\n }\n response = self.client.post(self.create_url, data, format='json')\n self.assertEqual(User.objects.count(), 1)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(len(response.data['username']), 1)", "def test_user_not_in_db_before_addition(self):\n\n username = 'testuser'\n user = User.query.filter_by(username=username).first()\n self.assertTrue(user.__str__(), None)", "def test_registeration_no_username(self):\n response = self.signup_a_user(self.user_lacks_username)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertNotIn(\"token\", response.data)", "def test_username_not_unique(self, client, users):\n user = users[0]\n data = factory.build(dict, FACTORY_CLASS=UserFactory, username=users[1].username)\n url = reverse('users:update', args=(user.pk,))\n response = client.post(url, data)\n assert response.status_code == 200\n assert 'A user with that username already exists.' in str(response.content)", "def test_username_not_unique(bot):\n expect_error(register, InputError, bot.username, \"abcdef\", \"a\", \"a\", \"a\")", "def test_user_signup_with_invalid_first_name(self):\n pass", "def test_username_in_use(self):\n self.request.json_body = {'username': 'testuser'}\n self.datautils.create_user({'username': 'testuser', 'password': 'testpass'})\n result = users_post_view(self.request)['d']\n self.assertIsInstance(result, dict)\n self.assertEqual(result, error_dict('verification_error', 'username already in use: testuser'))", "def test_admin_cannot_create_user_with_invalid_username(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n user = dict(\n name='Summer Love',\n username='love summer',\n password='Andela8',\n role='attendant'\n )\n\n resp = self.client.post(\n '/api/v1/users',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n\n reply = json.loads(resp.data.decode())\n\n self.assertEqual(reply['message'], 'Enter username in a correct string format no spaces, (johndoe)!')\n self.assertEqual(resp.status_code, 400)", "def test_signup_missing_first_name(self):\n\n invalid_u = User.signup(\"[email protected]\", \"testuser\", \"testpass\", None, \"User\", None)\n \n uid = 99999\n invalid_u.id = uid\n\n with self.assertRaises(exc.IntegrityError) as context:\n db.session.commit()", "def test_empty_username_field(self):\n self.empty_username = {'user': {\n \"username\": \"\",\n \"email\": \"[email protected]\",\n \"password\": \"Password123\"\n }}\n response = self.client.post(\n self.reg_url,\n self.empty_username,\n format=\"json\")\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertIn(b\"may not be blank\", response.content)", "def test_incorrect_user_registeration_duplicate_username(self):\n with self.client:\n response = self.client.post('/users/signup', data=dict(\n username='eschoppik',password='doesnotmatter',name=\"anything\",email=\"[email protected]\"))\n self.assertIn(b'Username has been taken', response.data)\n self.assertIn('/users/signup', request.url)", "def test_username_taken(self):\n self.datautils.create_user({'username': 'newuser'})\n self.assertEqual(1, self.session.query(User).count())\n self.request.json_body = deepcopy(self.new_account)\n result = users_post_view(self.request)['d']\n self.assertEqual(result, error_dict('verification_error',\n 'username already in use: %s' % self.new_account['username']))" ]
[ "0.8778913", "0.87578815", "0.85605466", "0.83670336", "0.81206423", "0.7980297", "0.7915649", "0.775719", "0.7717246", "0.7714081", "0.7688214", "0.7678973", "0.7670885", "0.7654914", "0.7652749", "0.7632635", "0.7626589", "0.7626313", "0.76021594", "0.7547543", "0.7526477", "0.7453451", "0.7447838", "0.7424433", "0.7402294", "0.7398379", "0.73659307", "0.73530257", "0.73424125", "0.7329217" ]
0.90195215
0
Tests that adding a user with a None password fails
def testAddNonePassword(self): self.assertEquals(models.ERR_BAD_PASSWORD, self.users.add("userF", None))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def testAddNoneUsernameAndPassword(self):\n self.assertEquals(models.ERR_BAD_USERNAME, self.users.add(None, None))", "def testAddNoneUsernameAndPassword(self):\n self.assertEquals(models.ERR_BAD_USERNAME, self.users.add(\"\", \"\"))", "def testAddNoneUsername(self):\n self.assertEquals(models.ERR_BAD_USERNAME, self.users.add(None, \"password\"))", "def testAddEmptyPassword(self):\n self.assertEquals(models.SUCCESS, self.users.add(\"userE\", \"\"))", "def test_create_user_invalid_password(self):\r\n print(\"Create user invalid password (empty)\")\r\n u_id = 3\r\n username = \"newtestuser\"\r\n password = \"\"\r\n u_type = 1\r\n\r\n prev_noUsers = len(User.query.all())\r\n self.assertEqual(self.system.create_user(u_id, username, password, u_type), 0)\r\n curr_noUsers = len(User.query.all())\r\n self.assertEqual(prev_noUsers, curr_noUsers)", "def testAddEmptyUsername(self):\n self.assertEquals(models.ERR_BAD_USERNAME, self.users.add(\"\", \"password\"))", "def test_register_user_with_empty_password(self, app):\n data = RegisterUser.random()\n setattr(data, \"password\", None)\n res = app.register.register(\n data=data, type_response=RegisterUserResponseInvalid\n )\n assert res.status_code == 400\n assert res.data.message == ResponseText.MESSAGE_REGISTER_USER_INVALID", "def test_signup_missing_password(self):\n with self.assertRaises(ValueError) as context:\n invalid_u = User.signup(\"[email protected]\", \"testuser\", None, \"Test\", \"User\", None)", "def test_creation_without_password(self, user):\n with pytest.raises(mongoengine.errors.ValidationError):\n user.save()", "def testLoginBadUsernameAndPassword(self):\n self.assertEquals(models.SUCCESS, self.users.add(\"userJ\", \"password\"))\n self.assertEquals(models.ERR_BAD_CREDENTIALS, self.users.login(\"nobody_user\", \"nobody_password\"))", "def test_registeration_for_a_super_user_no_password(self):\n with self.assertRaisesMessage(TypeError,\n 'Superusers must have a password.'):\n User.objects.create_superuser(\n 'jey',\n '[email protected]',\n None\n )", "def test_empty_password_field(self):\r\n result=self.user.get_user_register(\"Stephen\",\" Ochieng\",\"[email protected]\",\"stephenochieng\",\"eat\"\")\r\n self.assertEqual(2,result,\"Fill in the password field please\")", "def test_user_creation(self):\n user = UserModel.objects.create_user(\n username=\"saimer\"\n )\n self.assertEqual(user.email, \"\")\n self.assertEqual(user.username, \"saimer\")\n self.assertFalse(user.has_usable_password())", "def test_invalid_user_without_email(self):\n email = ''\n password = None\n with self.assertRaises(ValueError):\n self.user_manager.create_user(email=email, password=password)", "def test_user_empty_password(self):\n data = json.dumps({\n \"username\" : \"lenny\", \"email\" : \"[email protected]\",\n \"password\" : \"\", \"confirm_password\" : \"secret12345\"})\n response = self.app.post(\n '/api/v3/users', data=data,\n content_type='application/json',\n headers=self.admin_header)\n self.assertEqual(response.status_code, 400)", "def test_create_user_with_no_password(self):\n data = {\n 'username': 'foobar',\n 'email': '[email protected]',\n 'password': ''\n }\n response = self.client.post(self.create_url, data, format='json')\n self.assertEqual(User.objects.count(), 1)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(len(response.data['password']), 1)", "def test_create_user_invalid_username(self):\r\n print(\"Create user invalid username (already taken)\")\r\n u_id = 3\r\n username = \"100\"\r\n password = \"test9999\"\r\n u_type = 1\r\n\r\n prev_noUsers = len(User.query.all())\r\n self.assertEqual(self.system.create_user(u_id, username, password, u_type), 0)\r\n curr_noUsers = len(User.query.all())\r\n self.assertEqual(prev_noUsers, curr_noUsers)", "def test_create_user_empty_string(self):\n res = self.client.post(CREATE_USER_URL, {\n **self.mock_user,\n 'name': ''\n })\n\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n\n user = get_user_model().objects.get(**res.data)\n\n self.assertTrue(user.check_password(self.mock_user['password']))\n self.assertNotEqual('', user.name)", "def testLoginPassword(self):\n self.assertEquals(models.SUCCESS, self.users.add(\"userI\", \"password\"))\n self.assertEquals(models.ERR_BAD_CREDENTIALS, self.users.login(\"userI\", \"passw0rd\"))", "def test_create_account_failed_no_password(self):\n data = self.user_data.copy()\n data.pop('password')\n response = self.client.post(self.url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response.data.get('message').get('password')[0], 'This field is required.')", "def test_add_user(self):\n pass", "def test_registeration_no_password(self):\n response = self.signup_a_user(self.user_lacks_password)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response.data[\"errors\"][\"password\"],\n [\"This field may not be blank.\"]\n )\n self.assertNotIn(\"token\", response.data)", "def test_private_create_user_without_email(self):\n with pytest.raises(ValueError) as exinfo:\n EmailUser.objects._create_user(None, None, False, False)\n self.assertIn('email must be set', str(exinfo.value))", "def test_create_user(self):\n pass", "def test_create_user(self):\n pass", "def test_create_user(self):\n pass", "def test_invalid_password(self):\n pass", "def test_set_user_password(self):\n pass", "def test_32_oauth_password(self):\r\n user = User(email_addr=\"[email protected]\",\r\n name=self.user.username,\r\n passwd_hash=None,\r\n fullname=self.user.fullname,\r\n api_key=\"api-key\")\r\n db.session.add(user)\r\n db.session.commit()\r\n res = self.signin()\r\n assert \"Ooops, we didn't find you in the system\" in res.data, res.data", "def test_user_empty_conf_password(self):\n data = json.dumps({\n \"username\" : \"lenny\", \"email\" : \"[email protected]\",\n \"password\" : \"secret\", \"confirm_password\" : \"\"})\n response = self.app.post(\n '/api/v3/users', data=data,\n content_type='application/json',\n headers=self.admin_header)\n self.assertEqual(response.status_code, 400)" ]
[ "0.8673927", "0.86445725", "0.8449593", "0.8156031", "0.80684835", "0.7944337", "0.772304", "0.77000314", "0.7635938", "0.75678897", "0.7556625", "0.751811", "0.7516171", "0.7498143", "0.74943274", "0.74489343", "0.73932636", "0.73721427", "0.7327254", "0.7301202", "0.7286358", "0.72841966", "0.72415423", "0.7234427", "0.7234427", "0.7234427", "0.72194225", "0.72084594", "0.7201584", "0.71988165" ]
0.8799864
0
Tests that adding a user with a None username fails
def testAddNoneUsername(self): self.assertEquals(models.ERR_BAD_USERNAME, self.users.add(None, "password"))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def testAddNoneUsernameAndPassword(self):\n self.assertEquals(models.ERR_BAD_USERNAME, self.users.add(None, None))", "def testAddNoneUsernameAndPassword(self):\n self.assertEquals(models.ERR_BAD_USERNAME, self.users.add(\"\", \"\"))", "def testAddEmptyUsername(self):\n self.assertEquals(models.ERR_BAD_USERNAME, self.users.add(\"\", \"password\"))", "def test_create_user_invalid_username(self):\r\n print(\"Create user invalid username (already taken)\")\r\n u_id = 3\r\n username = \"100\"\r\n password = \"test9999\"\r\n u_type = 1\r\n\r\n prev_noUsers = len(User.query.all())\r\n self.assertEqual(self.system.create_user(u_id, username, password, u_type), 0)\r\n curr_noUsers = len(User.query.all())\r\n self.assertEqual(prev_noUsers, curr_noUsers)", "def testAddNonePassword(self):\n self.assertEquals(models.ERR_BAD_PASSWORD, self.users.add(\"userF\", None))", "def test_username_nodigits(self):\n response = self.signup_a_user(self.username_nodigits)\n self.assertEqual(response.data['errors']['username'],\n [\"username is invalid\"]\n )\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_signup_missing_username(self):\n\n invalid_u = User.signup(\"[email protected]\", None, \"testpass\", \"Test\", \"User\", None)\n \n uid = 99999\n invalid_u.id = uid\n\n with self.assertRaises(exc.IntegrityError) as context:\n db.session.commit()", "def test_create_no_username(self):\n\n # If there is no username, email will be used instead\n properties = self.portal.portal_properties.site_properties\n properties.manage_changeProperties(use_email_as_login=True)\n\n user = api.user.create(\n email='[email protected]',\n password='secret'\n )\n\n self.assertEquals(user.getUserName(), '[email protected]')\n\n # But if using emails as a username is disabled, we should get\n # an error\n properties.manage_changeProperties(use_email_as_login=False)\n\n self.assertRaises(\n ValueError,\n api.user.create,\n email='[email protected]', password='secret'\n )", "def test_user_not_in_db_before_addition(self):\n\n username = 'testuser'\n user = User.query.filter_by(username=username).first()\n self.assertTrue(user.__str__(), None)", "def testAddEmptyPassword(self):\n self.assertEquals(models.SUCCESS, self.users.add(\"userE\", \"\"))", "def test_add_user_with_none_exist_user_id(\n self, app, auth_user, none_exist_user=1000\n ):\n data = UserInfo.random()\n us_info = app.user_info.add_user_info(\n user_id=none_exist_user,\n data=data,\n header=auth_user.header,\n type_response=MessageResponse,\n )\n assert us_info.status_code == 404, \"Check status code\"\n assert us_info.data.message == ResponseText.MESSAGE_USER_NOT_FOUND", "def test_empty_user_name_field(self):\r\n result=self.user.get_user_register(\"Stephen\",\" Ochieng\",\"[email protected]\",\"stephenochieng\",\"eat\")\r\n self.assertEqual(2,result,\"Fill in the username field please\")", "def test_user_empty_username(self):\n data = json.dumps({\n \"username\" : \"\", \"email\" : \"[email protected]\",\n \"password\" : \"12345678\", \"confirm_password\" : \"12345678\"})\n response = self.app.post(\n '/api/v3/users', data=data,\n content_type='application/json',\n headers=self.admin_header)\n self.assertEqual(response.status_code, 400)", "def test_registeration_no_username(self):\n response = self.signup_a_user(self.user_lacks_username)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertNotIn(\"token\", response.data)", "def test_create_user_with_no_username(self):\n data = {\n 'username': '',\n 'email': '[email protected]',\n 'password': 'foobar'\n }\n response = self.client.post(self.create_url, data, format='json')\n self.assertEqual(User.objects.count(), 1)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(len(response.data['username']), 1)", "def test_create_user_fails_with_no_username(self):\n user = get_user_model().objects.create(\n email='[email protected]',\n first_name='Test',\n password='pass123456!'\n )\n\n users = User.objects.filter(username='Test')\n\n self.assertEqual(len(users), 0)", "def test_add_user(self):\n pass", "def test_unique_username(self):\n user = User(name=\"thealphadollar\")\n g.db.add(user)\n g.db.commit()\n\n user_field = Field(\"thealphadollar\")\n\n with self.assertRaises(ValidationError):\n unique_username(None, user_field)", "def test_username_not_in_use(self):\n self.request.json_body = {'username': 'newuser'}\n self.datautils.create_user({'username': 'testuser', 'password': 'testpass'})\n result = users_post_view(self.request)['d']\n self.assertIsInstance(result, dict)\n self.assertEqual(result, error_dict('api_errors', 'username, email, and password are all required string fields'))", "def test_create_user_with_preexisting_username(self):\n data = {\n 'username': 'test_user',\n 'email': '[email protected]',\n 'password': 'testpassword'\n }\n response = self.client.post(self.create_url, data, format='json')\n self.assertEqual(User.objects.count(), 1)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(len(response.data['username']), 1)", "def test_signup_missing_first_name(self):\n\n invalid_u = User.signup(\"[email protected]\", \"testuser\", \"testpass\", None, \"User\", None)\n \n uid = 99999\n invalid_u.id = uid\n\n with self.assertRaises(exc.IntegrityError) as context:\n db.session.commit()", "def test_username_not_unique(self, client, users):\n data = factory.build(dict, FACTORY_CLASS=UserFactory, username=users[1].username)\n url = reverse('users:create')\n response = client.post(url, data)\n assert response.status_code == 200\n assert 'A user with that username already exists.' in str(response.content)", "def test_create_user_empty_string(self):\n res = self.client.post(CREATE_USER_URL, {\n **self.mock_user,\n 'name': ''\n })\n\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n\n user = get_user_model().objects.get(**res.data)\n\n self.assertTrue(user.check_password(self.mock_user['password']))\n self.assertNotEqual('', user.name)", "def test_register_user_with_empty_data(self, app):\n data = RegisterUser.random()\n setattr(data, \"username\", None)\n res = app.register.register(\n data=data, type_response=RegisterUserResponseInvalid\n )\n assert res.status_code == 400\n assert res.data.message == ResponseText.MESSAGE_REGISTER_USER_INVALID", "def test_000_add_user(self):\n # This case is always passed because it's tested in setup_module,\n # If setup module fails, this case will never run\n pass", "def test_08_create_user_not_exists(self):\n\n _, user = self.get_random_item(models.User)\n utils.create_user(user, session=self.session)\n success, error = utils.create_user(user, session=self.session)\n self.assertFalse(success)\n self.assertTrue(error)", "def test_append_existing_user(self):\n print('(' + self.test_append_existing_user.__name__+')',\n self.test_append_existing_user.__doc__)\n self.assertIsNone(self.connection.append_user(\n PATIENT_USERNAME, NEW_PATIENT))", "def test_user1_method1():\n assert u is not None, \"Could not create a new User object\"", "def test_modify_nonexist_username(self):\n print('(' + self.test_modify_nonexist_username.__name__+')',\n self.test_modify_nonexist_username.__doc__)\n self.assertIsNone(self.connection.modify_user(\n NON_EXIST_PATIENT_USERNAME, PATIENT['public_profile'],\n PATIENT['restricted_profile']))", "def test_nonexistent_user(self):\n nonexistent_username = \"nonexistent user\"\n self.retired_username = get_retired_username_by_username(nonexistent_username)\n data = {'username': nonexistent_username}\n headers = self.build_jwt_headers(self.superuser)\n response = self.client.post(self.url, data, **headers)\n self.assert_response_correct(response, 404, None)" ]
[ "0.8658032", "0.8558699", "0.83201635", "0.7840982", "0.7833403", "0.77367413", "0.76577866", "0.7565609", "0.749817", "0.746846", "0.74393463", "0.7334507", "0.730809", "0.72923476", "0.72747236", "0.7273885", "0.7257682", "0.72494537", "0.7236431", "0.72091144", "0.7205949", "0.7165705", "0.71388113", "0.7136547", "0.71322596", "0.711769", "0.7104719", "0.7088036", "0.7035591", "0.69959503" ]
0.88843197
0
Tests that adding a user with both username and password as None fails
def testAddNoneUsernameAndPassword(self): self.assertEquals(models.ERR_BAD_USERNAME, self.users.add(None, None))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def testAddNoneUsernameAndPassword(self):\n self.assertEquals(models.ERR_BAD_USERNAME, self.users.add(\"\", \"\"))", "def testAddNoneUsername(self):\n self.assertEquals(models.ERR_BAD_USERNAME, self.users.add(None, \"password\"))", "def testAddNonePassword(self):\n self.assertEquals(models.ERR_BAD_PASSWORD, self.users.add(\"userF\", None))", "def testAddEmptyUsername(self):\n self.assertEquals(models.ERR_BAD_USERNAME, self.users.add(\"\", \"password\"))", "def testAddEmptyPassword(self):\n self.assertEquals(models.SUCCESS, self.users.add(\"userE\", \"\"))", "def test_create_user_invalid_password(self):\r\n print(\"Create user invalid password (empty)\")\r\n u_id = 3\r\n username = \"newtestuser\"\r\n password = \"\"\r\n u_type = 1\r\n\r\n prev_noUsers = len(User.query.all())\r\n self.assertEqual(self.system.create_user(u_id, username, password, u_type), 0)\r\n curr_noUsers = len(User.query.all())\r\n self.assertEqual(prev_noUsers, curr_noUsers)", "def test_create_user_invalid_username(self):\r\n print(\"Create user invalid username (already taken)\")\r\n u_id = 3\r\n username = \"100\"\r\n password = \"test9999\"\r\n u_type = 1\r\n\r\n prev_noUsers = len(User.query.all())\r\n self.assertEqual(self.system.create_user(u_id, username, password, u_type), 0)\r\n curr_noUsers = len(User.query.all())\r\n self.assertEqual(prev_noUsers, curr_noUsers)", "def test_create_user_empty_string(self):\n res = self.client.post(CREATE_USER_URL, {\n **self.mock_user,\n 'name': ''\n })\n\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n\n user = get_user_model().objects.get(**res.data)\n\n self.assertTrue(user.check_password(self.mock_user['password']))\n self.assertNotEqual('', user.name)", "def testLoginBadUsernameAndPassword(self):\n self.assertEquals(models.SUCCESS, self.users.add(\"userJ\", \"password\"))\n self.assertEquals(models.ERR_BAD_CREDENTIALS, self.users.login(\"nobody_user\", \"nobody_password\"))", "def test_user_creation(self):\n user = UserModel.objects.create_user(\n username=\"saimer\"\n )\n self.assertEqual(user.email, \"\")\n self.assertEqual(user.username, \"saimer\")\n self.assertFalse(user.has_usable_password())", "def test_create_user_with_no_password(self):\n data = {\n 'username': 'foobar',\n 'email': '[email protected]',\n 'password': ''\n }\n response = self.client.post(self.create_url, data, format='json')\n self.assertEqual(User.objects.count(), 1)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(len(response.data['password']), 1)", "def test_create_no_username(self):\n\n # If there is no username, email will be used instead\n properties = self.portal.portal_properties.site_properties\n properties.manage_changeProperties(use_email_as_login=True)\n\n user = api.user.create(\n email='[email protected]',\n password='secret'\n )\n\n self.assertEquals(user.getUserName(), '[email protected]')\n\n # But if using emails as a username is disabled, we should get\n # an error\n properties.manage_changeProperties(use_email_as_login=False)\n\n self.assertRaises(\n ValueError,\n api.user.create,\n email='[email protected]', password='secret'\n )", "def test_add_user(self):\n pass", "def test_register_user_with_empty_password(self, app):\n data = RegisterUser.random()\n setattr(data, \"password\", None)\n res = app.register.register(\n data=data, type_response=RegisterUserResponseInvalid\n )\n assert res.status_code == 400\n assert res.data.message == ResponseText.MESSAGE_REGISTER_USER_INVALID", "def test_append_existing_user(self):\n print('(' + self.test_append_existing_user.__name__+')',\n self.test_append_existing_user.__doc__)\n self.assertIsNone(self.connection.append_user(\n PATIENT_USERNAME, NEW_PATIENT))", "def test_creation_without_password(self, user):\n with pytest.raises(mongoengine.errors.ValidationError):\n user.save()", "def test_signup_missing_password(self):\n with self.assertRaises(ValueError) as context:\n invalid_u = User.signup(\"[email protected]\", \"testuser\", None, \"Test\", \"User\", None)", "def test_invalid_user_without_email(self):\n email = ''\n password = None\n with self.assertRaises(ValueError):\n self.user_manager.create_user(email=email, password=password)", "def test_create_user(self):\n pass", "def test_create_user(self):\n pass", "def test_create_user(self):\n pass", "def test_admin_cannot_create_user_with_empty_fields(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n user = dict(\n name='',\n username='',\n password='',\n role=''\n )\n\n resp = self.client.post(\n '/api/v1/users',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n\n reply = json.loads(resp.data.decode())\n\n self.assertEqual(reply['message'], 'Please input all fields!')\n self.assertEqual(resp.status_code, 400)", "def testAddLongUsernameAndPassword(self):\n original_username = \"thisgonnabelong\"\n longer_username = original_username*10\n original_password = \"thisalsogonnabelong\"\n longer_password = original_password*10\n self.assertEquals(models.ERR_BAD_USERNAME, self.users.add(longer_username, longer_password))", "def test_signup_missing_username(self):\n\n invalid_u = User.signup(\"[email protected]\", None, \"testpass\", \"Test\", \"User\", None)\n \n uid = 99999\n invalid_u.id = uid\n\n with self.assertRaises(exc.IntegrityError) as context:\n db.session.commit()", "def test_create_user_same_username(self):\n first_name = \"a\"\n last_name = \"a\"\n username = \"a\"\n email = \"a\"\n password = \"a\"\n\n manager = UserManager()\n result = manager.create(first_name, last_name, username, email, password)\n self.assertFalse(result)", "def test_create_user_with_no_username(self):\n data = {\n 'username': '',\n 'email': '[email protected]',\n 'password': 'foobar'\n }\n response = self.client.post(self.create_url, data, format='json')\n self.assertEqual(User.objects.count(), 1)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(len(response.data['username']), 1)", "def test_create_user_missing_fields(self):\n payload = {\n 'email': 'email',\n 'password': ''\n }\n res = self.client.post(CREATE_USER_API, payload)\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_000_add_user(self):\n # This case is always passed because it's tested in setup_module,\n # If setup module fails, this case will never run\n pass", "def test_add_user_with_none_exist_user_id(\n self, app, auth_user, none_exist_user=1000\n ):\n data = UserInfo.random()\n us_info = app.user_info.add_user_info(\n user_id=none_exist_user,\n data=data,\n header=auth_user.header,\n type_response=MessageResponse,\n )\n assert us_info.status_code == 404, \"Check status code\"\n assert us_info.data.message == ResponseText.MESSAGE_USER_NOT_FOUND", "def test_add_basic(self, db_session: Session) -> None:\n user_service = get_user_service(db_session)\n profile = RandomDbAdder().random_profile(db_session)\n basic_user_dict = InputDictGenerator().random_basic_user(profile.name)\n\n response = user_service.add_user(**basic_user_dict)\n\n assert response == {\n \"status\": \"success\",\n \"code\": 200,\n \"data\": {\"message\": f\"User '{basic_user_dict['username']}' successfully added to database.\"},\n }\n actual_user = db_session.query(Users).filter_by(username=basic_user_dict[\"username\"]).one()\n assert actual_user.id.startswith(\"us-\")\n assert actual_user.auth_type == AuthType.basic\n assert actual_user.role == \"user\"\n assert actual_user.username == basic_user_dict[\"username\"]\n assert actual_user.password_hash != basic_user_dict[\"password\"]\n assert actual_user.email is None\n assert actual_user.identity_provider_id is None\n assert actual_user.profile_id == profile.id\n assert actual_user.budget is None\n assert actual_user.name is None" ]
[ "0.86757123", "0.85757244", "0.82967484", "0.8035735", "0.781305", "0.7600376", "0.752247", "0.74228996", "0.7413409", "0.73913527", "0.7388319", "0.7348273", "0.73237", "0.72724587", "0.7259575", "0.723713", "0.7212342", "0.72081846", "0.71922207", "0.71922207", "0.71922207", "0.7154959", "0.71448636", "0.71033", "0.70942545", "0.7067317", "0.7067138", "0.70656663", "0.7063352", "0.70563406" ]
0.8776377
0
Tests that adding a user with both a blank username and password fails
def testAddNoneUsernameAndPassword(self): self.assertEquals(models.ERR_BAD_USERNAME, self.users.add("", ""))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def testAddNoneUsernameAndPassword(self):\n self.assertEquals(models.ERR_BAD_USERNAME, self.users.add(None, None))", "def testAddEmptyUsername(self):\n self.assertEquals(models.ERR_BAD_USERNAME, self.users.add(\"\", \"password\"))", "def testAddNoneUsername(self):\n self.assertEquals(models.ERR_BAD_USERNAME, self.users.add(None, \"password\"))", "def test_create_user_invalid_username(self):\r\n print(\"Create user invalid username (already taken)\")\r\n u_id = 3\r\n username = \"100\"\r\n password = \"test9999\"\r\n u_type = 1\r\n\r\n prev_noUsers = len(User.query.all())\r\n self.assertEqual(self.system.create_user(u_id, username, password, u_type), 0)\r\n curr_noUsers = len(User.query.all())\r\n self.assertEqual(prev_noUsers, curr_noUsers)", "def test_create_user_invalid_password(self):\r\n print(\"Create user invalid password (empty)\")\r\n u_id = 3\r\n username = \"newtestuser\"\r\n password = \"\"\r\n u_type = 1\r\n\r\n prev_noUsers = len(User.query.all())\r\n self.assertEqual(self.system.create_user(u_id, username, password, u_type), 0)\r\n curr_noUsers = len(User.query.all())\r\n self.assertEqual(prev_noUsers, curr_noUsers)", "def testAddEmptyPassword(self):\n self.assertEquals(models.SUCCESS, self.users.add(\"userE\", \"\"))", "def testAddNonePassword(self):\n self.assertEquals(models.ERR_BAD_PASSWORD, self.users.add(\"userF\", None))", "def testLoginBadUsernameAndPassword(self):\n self.assertEquals(models.SUCCESS, self.users.add(\"userJ\", \"password\"))\n self.assertEquals(models.ERR_BAD_CREDENTIALS, self.users.login(\"nobody_user\", \"nobody_password\"))", "def test_create_user_empty_string(self):\n res = self.client.post(CREATE_USER_URL, {\n **self.mock_user,\n 'name': ''\n })\n\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n\n user = get_user_model().objects.get(**res.data)\n\n self.assertTrue(user.check_password(self.mock_user['password']))\n self.assertNotEqual('', user.name)", "def test_user_creation(self):\n user = UserModel.objects.create_user(\n username=\"saimer\"\n )\n self.assertEqual(user.email, \"\")\n self.assertEqual(user.username, \"saimer\")\n self.assertFalse(user.has_usable_password())", "def test_create_user_with_no_password(self):\n data = {\n 'username': 'foobar',\n 'email': '[email protected]',\n 'password': ''\n }\n response = self.client.post(self.create_url, data, format='json')\n self.assertEqual(User.objects.count(), 1)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(len(response.data['password']), 1)", "def test_signup_missing_password(self):\n with self.assertRaises(ValueError) as context:\n invalid_u = User.signup(\"[email protected]\", \"testuser\", None, \"Test\", \"User\", None)", "def testAddLongUsernameAndPassword(self):\n original_username = \"thisgonnabelong\"\n longer_username = original_username*10\n original_password = \"thisalsogonnabelong\"\n longer_password = original_password*10\n self.assertEquals(models.ERR_BAD_USERNAME, self.users.add(longer_username, longer_password))", "def test_creation_without_password(self, user):\n with pytest.raises(mongoengine.errors.ValidationError):\n user.save()", "def test_username_not_in_use(self):\n self.request.json_body = {'username': 'newuser'}\n self.datautils.create_user({'username': 'testuser', 'password': 'testpass'})\n result = users_post_view(self.request)['d']\n self.assertIsInstance(result, dict)\n self.assertEqual(result, error_dict('api_errors', 'username, email, and password are all required string fields'))", "def test_admin_cannot_create_user_with_empty_fields(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n user = dict(\n name='',\n username='',\n password='',\n role=''\n )\n\n resp = self.client.post(\n '/api/v1/users',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n\n reply = json.loads(resp.data.decode())\n\n self.assertEqual(reply['message'], 'Please input all fields!')\n self.assertEqual(resp.status_code, 400)", "def test_create_user(self):\n self.assertEqual(self.new_user.first_name, 'John')\n self.assertEqual(self.new_user.last_name, 'Doe')\n self.assertEqual(self.new_user.username, 'john_doe')\n self.assertEqual(self.new_user.email, '[email protected]')\n self.assertTrue(self.new_user.check_password('test_password'))\n self.assertFalse(self.new_user.is_staff)\n self.assertFalse(self.new_user.is_superuser)\n self.assertFalse(self.new_user.is_active)\n\n with self.assertRaises(ValueError):\n User.objects.create_user(\n first_name='', last_name='', username='', email='', bio='', password=''\n )", "def test_username_nodigits(self):\n response = self.signup_a_user(self.username_nodigits)\n self.assertEqual(response.data['errors']['username'],\n [\"username is invalid\"]\n )\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_create_user_same_username(self):\n first_name = \"a\"\n last_name = \"a\"\n username = \"a\"\n email = \"a\"\n password = \"a\"\n\n manager = UserManager()\n result = manager.create(first_name, last_name, username, email, password)\n self.assertFalse(result)", "def test_admin_cannot_create_user_with_invalid_username(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n user = dict(\n name='Summer Love',\n username='love summer',\n password='Andela8',\n role='attendant'\n )\n\n resp = self.client.post(\n '/api/v1/users',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n\n reply = json.loads(resp.data.decode())\n\n self.assertEqual(reply['message'], 'Enter username in a correct string format no spaces, (johndoe)!')\n self.assertEqual(resp.status_code, 400)", "def test_invalid_user_without_email(self):\n email = ''\n password = None\n with self.assertRaises(ValueError):\n self.user_manager.create_user(email=email, password=password)", "def test_signup_missing_username(self):\n\n invalid_u = User.signup(\"[email protected]\", None, \"testpass\", \"Test\", \"User\", None)\n \n uid = 99999\n invalid_u.id = uid\n\n with self.assertRaises(exc.IntegrityError) as context:\n db.session.commit()", "def test_create_no_username(self):\n\n # If there is no username, email will be used instead\n properties = self.portal.portal_properties.site_properties\n properties.manage_changeProperties(use_email_as_login=True)\n\n user = api.user.create(\n email='[email protected]',\n password='secret'\n )\n\n self.assertEquals(user.getUserName(), '[email protected]')\n\n # But if using emails as a username is disabled, we should get\n # an error\n properties.manage_changeProperties(use_email_as_login=False)\n\n self.assertRaises(\n ValueError,\n api.user.create,\n email='[email protected]', password='secret'\n )", "def test_user_empty_username(self):\n data = json.dumps({\n \"username\" : \"\", \"email\" : \"[email protected]\",\n \"password\" : \"12345678\", \"confirm_password\" : \"12345678\"})\n response = self.app.post(\n '/api/v3/users', data=data,\n content_type='application/json',\n headers=self.admin_header)\n self.assertEqual(response.status_code, 400)", "def test_register_user_with_empty_password(self, app):\n data = RegisterUser.random()\n setattr(data, \"password\", None)\n res = app.register.register(\n data=data, type_response=RegisterUserResponseInvalid\n )\n assert res.status_code == 400\n assert res.data.message == ResponseText.MESSAGE_REGISTER_USER_INVALID", "def test_create_user_fails_with_no_username(self):\n user = get_user_model().objects.create(\n email='[email protected]',\n first_name='Test',\n password='pass123456!'\n )\n\n users = User.objects.filter(username='Test')\n\n self.assertEqual(len(users), 0)", "def test_should_create_user_when_give_password_and_username(self):\n data = {'username': 'zhe_xu', 'password': '123'}\n response = self.client.post('/api/users', data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(User.objects.count(), 1)\n self.assertTrue(check_password('123', User.objects.get().password))\n self.assertNotEqual('123', User.objects.get().password)", "def test_create_user_missing_fields(self):\n payload = {\n 'email': 'email',\n 'password': ''\n }\n res = self.client.post(CREATE_USER_API, payload)\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_user_existence(self):\n\n credentials = {\n 'email': '[email protected]',\n 'password': 'Testpass12',\n 'name': 'Test Name'\n }\n get_user_model().objects.create_user(**credentials)\n\n # Check that this is a bad request since the user does already exists.\n response = self.client.post(URL_CREATE_USER, credentials)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_create_user_with_no_username(self):\n data = {\n 'username': '',\n 'email': '[email protected]',\n 'password': 'foobar'\n }\n response = self.client.post(self.create_url, data, format='json')\n self.assertEqual(User.objects.count(), 1)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(len(response.data['username']), 1)" ]
[ "0.8665212", "0.85453916", "0.8401641", "0.81767356", "0.8146304", "0.80946857", "0.80876476", "0.79441357", "0.78285795", "0.7711268", "0.7669809", "0.76175237", "0.7617285", "0.75937957", "0.75469226", "0.75368714", "0.7527013", "0.7497341", "0.7478632", "0.74692166", "0.746301", "0.746027", "0.74560523", "0.74537927", "0.7443093", "0.74323225", "0.7417465", "0.74069536", "0.739865", "0.738838" ]
0.88185066
0
Tests that adding a user with a long username fails
def testAddLongUsername(self): original_username = "thiswillbelong" longer_username = original_username*10 self.assertEquals(models.ERR_BAD_USERNAME, self.users.add(longer_username, "password"))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def testAddLongUsernameAndPassword(self):\n original_username = \"thisgonnabelong\"\n longer_username = original_username*10\n original_password = \"thisalsogonnabelong\"\n longer_password = original_password*10\n self.assertEquals(models.ERR_BAD_USERNAME, self.users.add(longer_username, longer_password))", "def test_create_user_with_too_long_username(self):\n data = {\n 'username': 'foo'*11,\n 'email': '[email protected]',\n 'password': 'foobar'\n }\n response = self.client.post(self.create_url, data, format='json')\n self.assertEqual(User.objects.count(), 1)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(len(response.data['username']), 1)", "def test_long_username():\n expect_error(register, InputError,\n \"a\" * (MAX_USERNAME + 1), \"abcdef\", \"a\", \"a\", \"a\")", "def test_create_user_invalid_username(self):\r\n print(\"Create user invalid username (already taken)\")\r\n u_id = 3\r\n username = \"100\"\r\n password = \"test9999\"\r\n u_type = 1\r\n\r\n prev_noUsers = len(User.query.all())\r\n self.assertEqual(self.system.create_user(u_id, username, password, u_type), 0)\r\n curr_noUsers = len(User.query.all())\r\n self.assertEqual(prev_noUsers, curr_noUsers)", "def testAddEmptyUsername(self):\n self.assertEquals(models.ERR_BAD_USERNAME, self.users.add(\"\", \"password\"))", "def test_username_not_unique(bot):\n expect_error(register, InputError, bot.username, \"abcdef\", \"a\", \"a\", \"a\")", "def test_unique_username(self):\n user = User(name=\"thealphadollar\")\n g.db.add(user)\n g.db.commit()\n\n user_field = Field(\"thealphadollar\")\n\n with self.assertRaises(ValidationError):\n unique_username(None, user_field)", "def testAddNoneUsername(self):\n self.assertEquals(models.ERR_BAD_USERNAME, self.users.add(None, \"password\"))", "def test_signup_dupe_username(self):\n\n invalid_u = User.signup(\"[email protected]\", \"allison\", \"testpass\", \"Test\", \"User\", None)\n \n uid = 99999\n invalid_u.id = uid\n\n with self.assertRaises(exc.IntegrityError) as context:\n db.session.commit()", "def testAddNoneUsernameAndPassword(self):\n self.assertEquals(models.ERR_BAD_USERNAME, self.users.add(\"\", \"\"))", "def test_admin_cannot_create_users_with_same_username(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n user = dict(\n name='Summer Love',\n username='love',\n password='Andela8',\n role='attendant'\n )\n resp = self.client.post(\n '/api/v1/users',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n\n user = dict(\n name='Paul Love',\n username='love',\n password='Andela8',\n role='attendant'\n )\n resp = self.client.post(\n '/api/v1/users',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'This username is already taken!')\n self.assertEqual(resp.status_code, 400)", "def test_admin_cannot_create_user_with_invalid_username(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n user = dict(\n name='Summer Love',\n username='love summer',\n password='Andela8',\n role='attendant'\n )\n\n resp = self.client.post(\n '/api/v1/users',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n\n reply = json.loads(resp.data.decode())\n\n self.assertEqual(reply['message'], 'Enter username in a correct string format no spaces, (johndoe)!')\n self.assertEqual(resp.status_code, 400)", "def testAddLongPassword(self):\n original_password = \"thiswillbelong\"\n longer_password = original_password*10\n self.assertEquals(models.ERR_BAD_PASSWORD, self.users.add(\"paulinarocks\", longer_password))", "def testAddNoneUsernameAndPassword(self):\n self.assertEquals(models.ERR_BAD_USERNAME, self.users.add(None, None))", "def test_signup_missing_username(self):\n\n invalid_u = User.signup(\"[email protected]\", None, \"testpass\", \"Test\", \"User\", None)\n \n uid = 99999\n invalid_u.id = uid\n\n with self.assertRaises(exc.IntegrityError) as context:\n db.session.commit()", "def test_password_too_short(self):\n\t\tpayload = {\n\t\t'email': '[email protected]',\n\t\t'password': 'pw',\n\t\t'name': 'test'\n\t\t}\n\n\t\tres = self.client.post(CREATE_USER_URL, payload)\n\t\tself.assertEqual(res.status_code,status.HTTP_400_BAD_REQUEST)\n\t\tuser_exists = get_user_model().objects.filter(\n\t\t\temail = payload['email']\n\t\t\t).exists()\n\n\t\tself.assertFalse(user_exists)", "def test_password_too_short(self):\n payload = {'email': '[email protected]', 'password': 'fu'}\n res = self.client.post(CREATE_USER_URL, payload)\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)\n user_exists = get_user_model().objects.filter(\n email=payload['email']\n ).exists()\n self.assertFalse(user_exists)", "def test_password_too_short(self):\r\n payload = {\r\n 'email': '[email protected]',\r\n 'password': 'pw',\r\n 'name': 'Maks'\r\n }\r\n\r\n res = self.client.post(CREATE_USER_URL, payload)\r\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)\r\n\r\n user_exists = get_user_model().objects.filter(\r\n email=payload['email']\r\n ).exists()\r\n self.assertFalse(user_exists)", "def test_signup_missing_last_name(self):\n\n invalid_u = User.signup(\"[email protected]\", \"testuser\", \"testpass\", \"Test\", None, None)\n \n uid = 99999\n invalid_u.id = uid\n\n with self.assertRaises(exc.IntegrityError) as context:\n db.session.commit()", "def test_user_cannot_register_with_short_username(self):\n response = self.client.post(SIGNUP_URL,\n data=json.dumps(\n {'username': 'dan', 'email': '[email protected]', 'password': 'pass12345'}),\n content_type='application/json')\n self.assertEqual(response.status_code, 400)\n result = json.loads(response.data.decode())\n self.assertEqual(result[\"message\"],\n \"Username should be atleast 4 characters\")", "def test_username_in_use(self):\n self.request.json_body = {'username': 'testuser'}\n self.datautils.create_user({'username': 'testuser', 'password': 'testpass'})\n result = users_post_view(self.request)['d']\n self.assertIsInstance(result, dict)\n self.assertEqual(result, error_dict('verification_error', 'username already in use: testuser'))", "def test_register_user_incorrect(self):\n result = self.client.post(\"/users\", data={\"username\":\"test_user1\"}, follow_redirects=True)\n self.assertEqual(result.status_code, 200)\n self.assertIn(b\"Sorry! That username is already in use!\", result.data)", "def test_long_username(self):\r\n\r\n self.url_params['username'] = 'username' * 4\r\n response = self.client.post(self.url, self.url_params)\r\n\r\n # Status code should be 400.\r\n self.assertEqual(response.status_code, 400)\r\n\r\n obj = json.loads(response.content)\r\n self.assertEqual(\r\n obj['value'],\r\n \"Username cannot be more than 30 characters long\",\r\n )", "def test_username_taken(self):\n self.datautils.create_user({'username': 'newuser'})\n self.assertEqual(1, self.session.query(User).count())\n self.request.json_body = deepcopy(self.new_account)\n result = users_post_view(self.request)['d']\n self.assertEqual(result, error_dict('verification_error',\n 'username already in use: %s' % self.new_account['username']))", "def test_registeration_duplicate_username(self):\n self.signup_a_user(self.user_data)\n response_duplicate = self.signup_a_user(\n self.user_data_duplicate_username)\n self.assertEqual(response_duplicate.status_code,\n status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response_duplicate.data[\"errors\"][\"username\"],\n [\"user with this username already exists.\"])\n self.assertNotIn(\"token\", response_duplicate.data)", "def test_password_too_short(self):\n payload = {'email': '[email protected]', 'password': 'pw'}\n \n res = self.client.post(CREATE_USER_URL, payload)\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)\n \n user_exists = get_user_model().objects.filter(\n email=payload['email']\n ).exists()\n \n self.assertFalse(user_exists)", "def test_shib_id_as_username(self):\n JenkinsUser.objects.create(username=\"CBIV/Ddgyl825NoRetYfRNAQl0E=42\")", "def test_incorrect_user_registeration_duplicate_username(self):\n with self.client:\n response = self.client.post('/users/signup', data=dict(\n username='eschoppik',password='doesnotmatter',name=\"anything\",email=\"[email protected]\"))\n self.assertIn(b'Username has been taken', response.data)\n self.assertIn('/users/signup', request.url)", "def test_username_not_unique(self, client, users):\n data = factory.build(dict, FACTORY_CLASS=UserFactory, username=users[1].username)\n url = reverse('users:create')\n response = client.post(url, data)\n assert response.status_code == 200\n assert 'A user with that username already exists.' in str(response.content)", "def test_password_too_short(self):\n payload = {\n 'email': '[email protected]',\n 'password': 'pw'\n }\n res = self.client.post(CREATE_USER_API, payload)\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)\n user_exists = get_user_model().objects.filter(\n email=payload['email']\n ).exists()\n self.assertFalse(user_exists)" ]
[ "0.83422065", "0.7946055", "0.78104156", "0.780026", "0.7580546", "0.7476788", "0.74197304", "0.74022245", "0.7361944", "0.73097354", "0.7270362", "0.7230703", "0.7222272", "0.72112274", "0.7194711", "0.718886", "0.71489906", "0.7133822", "0.71329373", "0.71161985", "0.7106094", "0.7100197", "0.7097026", "0.7096739", "0.7060606", "0.70586044", "0.7028061", "0.70214856", "0.70107836", "0.6985737" ]
0.8756425
0
Tests that adding a user with a long password fails
def testAddLongPassword(self): original_password = "thiswillbelong" longer_password = original_password*10 self.assertEquals(models.ERR_BAD_PASSWORD, self.users.add("paulinarocks", longer_password))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def testAddLongUsernameAndPassword(self):\n original_username = \"thisgonnabelong\"\n longer_username = original_username*10\n original_password = \"thisalsogonnabelong\"\n longer_password = original_password*10\n self.assertEquals(models.ERR_BAD_USERNAME, self.users.add(longer_username, longer_password))", "def test_password_too_short(self):\n\t\tpayload = {\n\t\t'email': '[email protected]',\n\t\t'password': 'pw',\n\t\t'name': 'test'\n\t\t}\n\n\t\tres = self.client.post(CREATE_USER_URL, payload)\n\t\tself.assertEqual(res.status_code,status.HTTP_400_BAD_REQUEST)\n\t\tuser_exists = get_user_model().objects.filter(\n\t\t\temail = payload['email']\n\t\t\t).exists()\n\n\t\tself.assertFalse(user_exists)", "def test_password_too_short(self):\n payload = {'email': '[email protected]', 'password': 'pw'}\n \n res = self.client.post(CREATE_USER_URL, payload)\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)\n \n user_exists = get_user_model().objects.filter(\n email=payload['email']\n ).exists()\n \n self.assertFalse(user_exists)", "def test_password_too_short(self):\n # requirments for creating user\n payload = {\n 'email': '[email protected]',\n 'password': 'pwd',\n 'name': 'Test',\n }\n\n # this will do a HTTP POST request and create a user\n response = self.client.post(CREATE_USER_URL, payload)\n\n # Ensure that statuscode returns a HTTP400 bad request\n # becos must exist before we can ckeck password\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n # chech if user exists true else false\n user_exists = get_user_model().objects.filter(\n email=payload['email']\n ).exists()\n\n self.assertFalse(user_exists)", "def test_password_too_short(self):\n payload = {'email': '[email protected]', 'password': 'fu'}\n res = self.client.post(CREATE_USER_URL, payload)\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)\n user_exists = get_user_model().objects.filter(\n email=payload['email']\n ).exists()\n self.assertFalse(user_exists)", "def test_password_too_short(self):\r\n payload = {\r\n 'email': '[email protected]',\r\n 'password': 'pw',\r\n 'name': 'Maks'\r\n }\r\n\r\n res = self.client.post(CREATE_USER_URL, payload)\r\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)\r\n\r\n user_exists = get_user_model().objects.filter(\r\n email=payload['email']\r\n ).exists()\r\n self.assertFalse(user_exists)", "def test_password_too_short(self):\n\n payload = {\n \"email\": \"[email protected]\",\n \"name\": \"Test\",\n 'password': 'tTTt'\n }\n res = self.client.post(CREATE_USER_URL, payload)\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)\n\n user_exitst = get_user_model().objects.filter(\n email=payload['email']\n ).exists()\n\n self.assertFalse(user_exitst)", "def test_create_user_password_too_short(self):\n res = self.client.post(CREATE_USER_URL, {\n **self.mock_user,\n 'password': 'pw'\n })\n\n db_user = get_user_model().objects.filter(\n email=self.mock_user['email']\n )\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertFalse(db_user)", "def test_password_too_short(self):\n payload = {'email': '[email protected]', 'password': '123'}\n\n res = self.client.post(CREATE_USER_URL, payload)\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)\n user_exists = get_user_model().objects.filter(\n email=payload['email']\n ).exists()\n self.assertFalse(user_exists)", "def test_password_too_short(self):\n payload = {\n 'email': '[email protected]',\n 'password': 'pw',\n 'name': 'test Name'\n }\n\n res = self.client.post(CREATE_USER_URL, payload)\n\n self.assertEquals(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_password_too_short(self):\n payload = {\n 'email': '[email protected]',\n 'password': 'pw'\n }\n res = self.client.post(CREATE_USER_API, payload)\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)\n user_exists = get_user_model().objects.filter(\n email=payload['email']\n ).exists()\n self.assertFalse(user_exists)", "def testAddLongUsername(self):\n original_username = \"thiswillbelong\"\n longer_username = original_username*10\n self.assertEquals(models.ERR_BAD_USERNAME, self.users.add(longer_username, \"password\"))", "def test_create_user_password_too_short(self):\n data = {\n \"firstname\": \"John\",\n \"lastname\": \"Doe\",\n \"password\": \"short\",\n \"password_repeat\": \"short\",\n }\n res = self.post(url=\"/users\", data=data)\n self.assertException(res, exc.PasswordTooShort)\n\n users = User.query.all()\n self.assertEqual(len(users), 5)", "def test_password_too_short(self):\n\n credentials = {\n 'email': '[email protected]',\n 'password': 'pw',\n 'name': 'Test Name'\n }\n response = self.client.post(URL_CREATE_USER, credentials)\n\n # Check that this is a bad request since the password was too short.\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n is_user_created = get_user_model().objects.filter(\n email=credentials['email']\n ).exists()\n\n self.assertFalse(is_user_created)\n self.assertEqual(response.data['password'][0].code, 'min_length')", "def test_create_user_invalid_password(self):\r\n print(\"Create user invalid password (empty)\")\r\n u_id = 3\r\n username = \"newtestuser\"\r\n password = \"\"\r\n u_type = 1\r\n\r\n prev_noUsers = len(User.query.all())\r\n self.assertEqual(self.system.create_user(u_id, username, password, u_type), 0)\r\n curr_noUsers = len(User.query.all())\r\n self.assertEqual(prev_noUsers, curr_noUsers)", "def test_long_password():\n expect_error(register, InputError,\n \"abcdef\", \"a\" * (MIN_PASSWORD - 1), \"a\", \"A\", \"a\")", "def test_invalid_password(self):\n pass", "def test_password_length(self):\n payload = {\n 'email': '[email protected]',\n 'name': \"hello\",\n 'password': 'pw',\n }\n\n res = self.client.post(CREATE_USER_URL, payload)\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)\n\n user_exists = get_user_model().objects.filter(\n email=payload['email']\n ).exists()\n self.assertFalse(user_exists)", "def test_password_too_short(self):\n\n payload = {\n \"user\": {\n \"email\": \"[email protected]\",\n \"password\": \"us\",\n \"is_doctor\": False,\n \"is_hospital_admin\": True\n },\n 'first_name': 'Test1',\n 'last_name': 'JustUser2'\n }\n\n response = self.client.post(HOSPITAL_ADMIN_REGISTER_URL, payload, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_user_short_passwords(self):\n data = json.dumps({\n \"username\" : \"moses\", \"email\" : \"[email protected]\",\n \"password\" : \"1234567\", \"confirm_password\" : \"1234567\"})\n response = self.app.post(\n '/api/v3/users', data=data,\n content_type='application/json',\n headers=self.admin_header)\n self.assertEqual(response.status_code, 400)", "def testAddNonePassword(self):\n self.assertEquals(models.ERR_BAD_PASSWORD, self.users.add(\"userF\", None))", "def test_create_user_with_short_password(self):\n data = {\n 'email': '[email protected]',\n 'password': 'foo'\n }\n\n response = self.client.post(self.create_url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(len(response.data['password']), 1)", "def test_invalid_length_for_new_password():\n user = User(email=\"[email protected]\", user_type=0)\n user_password = \"ILoveHTML\"\n user.SetPassword(user_password)\n\n new_password1 = \"pwd\"\n with pytest.raises(ValueError):\n user.SetPassword(new_password1)\n assert not user.VerifyPassword(new_password1)\n assert user.VerifyPassword(user_password)\n\n new_password2 = \"I love meatball and tuna.\"\n with pytest.raises(ValueError):\n user.SetPassword(new_password2)\n assert not user.VerifyPassword(new_password2)\n assert user.VerifyPassword(user_password)", "def test_32_oauth_password(self):\r\n user = User(email_addr=\"[email protected]\",\r\n name=self.user.username,\r\n passwd_hash=None,\r\n fullname=self.user.fullname,\r\n api_key=\"api-key\")\r\n db.session.add(user)\r\n db.session.commit()\r\n res = self.signin()\r\n assert \"Ooops, we didn't find you in the system\" in res.data, res.data", "def testLoginPassword(self):\n self.assertEquals(models.SUCCESS, self.users.add(\"userI\", \"password\"))\n self.assertEquals(models.ERR_BAD_CREDENTIALS, self.users.login(\"userI\", \"passw0rd\"))", "def test_password_is_to_short(setup_client):\n client = setup_client\n payload = {\n 'email': '[email protected]',\n 'password': 'pw',\n 'role': 'Supplier',\n 'name': 'Test name'\n }\n res = client.post(CREATE_USER_URL, payload)\n assert res.status_code == status.HTTP_400_BAD_REQUEST\n user_exists = get_user_model().objects.filter(\n email=payload['email']).exists()\n assert not user_exists", "def test_creation_with_password(self, user):\n user.password = \"is_god\"\n user.save()", "def testLoginBadUsernameAndPassword(self):\n self.assertEquals(models.SUCCESS, self.users.add(\"userJ\", \"password\"))\n self.assertEquals(models.ERR_BAD_CREDENTIALS, self.users.login(\"nobody_user\", \"nobody_password\"))", "def test_set_user_password(self):\n pass", "def test_valid_password(self):\n pass_field = Field(\"\".join(['x' * (int(self.app.config['MAX_PWD_LEN']))]))\n\n valid_password(None, pass_field)" ]
[ "0.8364861", "0.81454116", "0.8075492", "0.8066621", "0.8062283", "0.80539316", "0.8046353", "0.80201375", "0.80164695", "0.7999124", "0.7964631", "0.7932641", "0.7927489", "0.78352296", "0.7782757", "0.766781", "0.7564408", "0.7523772", "0.74742943", "0.74702775", "0.74666864", "0.7464871", "0.74171937", "0.74119437", "0.74079806", "0.7405213", "0.73640287", "0.73601025", "0.7334002", "0.72789043" ]
0.8502937
0
Tests that adding a user with both a long username and long password fails
def testAddLongUsernameAndPassword(self): original_username = "thisgonnabelong" longer_username = original_username*10 original_password = "thisalsogonnabelong" longer_password = original_password*10 self.assertEquals(models.ERR_BAD_USERNAME, self.users.add(longer_username, longer_password))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def testAddLongUsername(self):\n original_username = \"thiswillbelong\"\n longer_username = original_username*10\n self.assertEquals(models.ERR_BAD_USERNAME, self.users.add(longer_username, \"password\"))", "def testAddLongPassword(self):\n original_password = \"thiswillbelong\"\n longer_password = original_password*10\n self.assertEquals(models.ERR_BAD_PASSWORD, self.users.add(\"paulinarocks\", longer_password))", "def test_password_too_short(self):\n\t\tpayload = {\n\t\t'email': '[email protected]',\n\t\t'password': 'pw',\n\t\t'name': 'test'\n\t\t}\n\n\t\tres = self.client.post(CREATE_USER_URL, payload)\n\t\tself.assertEqual(res.status_code,status.HTTP_400_BAD_REQUEST)\n\t\tuser_exists = get_user_model().objects.filter(\n\t\t\temail = payload['email']\n\t\t\t).exists()\n\n\t\tself.assertFalse(user_exists)", "def test_password_too_short(self):\n payload = {'email': '[email protected]', 'password': 'fu'}\n res = self.client.post(CREATE_USER_URL, payload)\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)\n user_exists = get_user_model().objects.filter(\n email=payload['email']\n ).exists()\n self.assertFalse(user_exists)", "def test_password_too_short(self):\r\n payload = {\r\n 'email': '[email protected]',\r\n 'password': 'pw',\r\n 'name': 'Maks'\r\n }\r\n\r\n res = self.client.post(CREATE_USER_URL, payload)\r\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)\r\n\r\n user_exists = get_user_model().objects.filter(\r\n email=payload['email']\r\n ).exists()\r\n self.assertFalse(user_exists)", "def test_password_too_short(self):\n payload = {'email': '[email protected]', 'password': 'pw'}\n \n res = self.client.post(CREATE_USER_URL, payload)\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)\n \n user_exists = get_user_model().objects.filter(\n email=payload['email']\n ).exists()\n \n self.assertFalse(user_exists)", "def test_create_user_password_too_short(self):\n res = self.client.post(CREATE_USER_URL, {\n **self.mock_user,\n 'password': 'pw'\n })\n\n db_user = get_user_model().objects.filter(\n email=self.mock_user['email']\n )\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertFalse(db_user)", "def test_password_too_short(self):\n payload = {\n 'email': '[email protected]',\n 'password': 'pw',\n 'name': 'test Name'\n }\n\n res = self.client.post(CREATE_USER_URL, payload)\n\n self.assertEquals(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_password_too_short(self):\n payload = {'email': '[email protected]', 'password': '123'}\n\n res = self.client.post(CREATE_USER_URL, payload)\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)\n user_exists = get_user_model().objects.filter(\n email=payload['email']\n ).exists()\n self.assertFalse(user_exists)", "def test_password_too_short(self):\n # requirments for creating user\n payload = {\n 'email': '[email protected]',\n 'password': 'pwd',\n 'name': 'Test',\n }\n\n # this will do a HTTP POST request and create a user\n response = self.client.post(CREATE_USER_URL, payload)\n\n # Ensure that statuscode returns a HTTP400 bad request\n # becos must exist before we can ckeck password\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n # chech if user exists true else false\n user_exists = get_user_model().objects.filter(\n email=payload['email']\n ).exists()\n\n self.assertFalse(user_exists)", "def test_password_too_short(self):\n payload = {\n 'email': '[email protected]',\n 'password': 'pw'\n }\n res = self.client.post(CREATE_USER_API, payload)\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)\n user_exists = get_user_model().objects.filter(\n email=payload['email']\n ).exists()\n self.assertFalse(user_exists)", "def test_password_too_short(self):\n\n payload = {\n \"email\": \"[email protected]\",\n \"name\": \"Test\",\n 'password': 'tTTt'\n }\n res = self.client.post(CREATE_USER_URL, payload)\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)\n\n user_exitst = get_user_model().objects.filter(\n email=payload['email']\n ).exists()\n\n self.assertFalse(user_exitst)", "def test_create_user_with_too_long_username(self):\n data = {\n 'username': 'foo'*11,\n 'email': '[email protected]',\n 'password': 'foobar'\n }\n response = self.client.post(self.create_url, data, format='json')\n self.assertEqual(User.objects.count(), 1)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(len(response.data['username']), 1)", "def test_password_too_short(self):\n\n credentials = {\n 'email': '[email protected]',\n 'password': 'pw',\n 'name': 'Test Name'\n }\n response = self.client.post(URL_CREATE_USER, credentials)\n\n # Check that this is a bad request since the password was too short.\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n is_user_created = get_user_model().objects.filter(\n email=credentials['email']\n ).exists()\n\n self.assertFalse(is_user_created)\n self.assertEqual(response.data['password'][0].code, 'min_length')", "def test_create_user_password_too_short(self):\n data = {\n \"firstname\": \"John\",\n \"lastname\": \"Doe\",\n \"password\": \"short\",\n \"password_repeat\": \"short\",\n }\n res = self.post(url=\"/users\", data=data)\n self.assertException(res, exc.PasswordTooShort)\n\n users = User.query.all()\n self.assertEqual(len(users), 5)", "def testLoginBadUsernameAndPassword(self):\n self.assertEquals(models.SUCCESS, self.users.add(\"userJ\", \"password\"))\n self.assertEquals(models.ERR_BAD_CREDENTIALS, self.users.login(\"nobody_user\", \"nobody_password\"))", "def test_create_user_invalid_password(self):\r\n print(\"Create user invalid password (empty)\")\r\n u_id = 3\r\n username = \"newtestuser\"\r\n password = \"\"\r\n u_type = 1\r\n\r\n prev_noUsers = len(User.query.all())\r\n self.assertEqual(self.system.create_user(u_id, username, password, u_type), 0)\r\n curr_noUsers = len(User.query.all())\r\n self.assertEqual(prev_noUsers, curr_noUsers)", "def testAddNoneUsernameAndPassword(self):\n self.assertEquals(models.ERR_BAD_USERNAME, self.users.add(\"\", \"\"))", "def test_create_user_with_short_password(self):\n data = {\n 'email': '[email protected]',\n 'password': 'foo'\n }\n\n response = self.client.post(self.create_url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(len(response.data['password']), 1)", "def testAddNoneUsernameAndPassword(self):\n self.assertEquals(models.ERR_BAD_USERNAME, self.users.add(None, None))", "def test_create_user_with_short_password(self):\n data = {\n 'username': 'foobar',\n 'email': '[email protected]',\n 'password': 'foo'\n }\n response = self.client.post(self.create_url, data, format='json')\n self.assertEqual(User.objects.count(), 1)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(len(response.data['password']), 1)", "def test_create_user_invalid_username(self):\r\n print(\"Create user invalid username (already taken)\")\r\n u_id = 3\r\n username = \"100\"\r\n password = \"test9999\"\r\n u_type = 1\r\n\r\n prev_noUsers = len(User.query.all())\r\n self.assertEqual(self.system.create_user(u_id, username, password, u_type), 0)\r\n curr_noUsers = len(User.query.all())\r\n self.assertEqual(prev_noUsers, curr_noUsers)", "def test_long_username():\n expect_error(register, InputError,\n \"a\" * (MAX_USERNAME + 1), \"abcdef\", \"a\", \"a\", \"a\")", "def test_password_is_to_short(setup_client):\n client = setup_client\n payload = {\n 'email': '[email protected]',\n 'password': 'pw',\n 'role': 'Supplier',\n 'name': 'Test name'\n }\n res = client.post(CREATE_USER_URL, payload)\n assert res.status_code == status.HTTP_400_BAD_REQUEST\n user_exists = get_user_model().objects.filter(\n email=payload['email']).exists()\n assert not user_exists", "def test_user_short_passwords(self):\n data = json.dumps({\n \"username\" : \"moses\", \"email\" : \"[email protected]\",\n \"password\" : \"1234567\", \"confirm_password\" : \"1234567\"})\n response = self.app.post(\n '/api/v3/users', data=data,\n content_type='application/json',\n headers=self.admin_header)\n self.assertEqual(response.status_code, 400)", "def test_password_length(self):\n payload = {\n 'email': '[email protected]',\n 'name': \"hello\",\n 'password': 'pw',\n }\n\n res = self.client.post(CREATE_USER_URL, payload)\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)\n\n user_exists = get_user_model().objects.filter(\n email=payload['email']\n ).exists()\n self.assertFalse(user_exists)", "def testAddEmptyUsername(self):\n self.assertEquals(models.ERR_BAD_USERNAME, self.users.add(\"\", \"password\"))", "def test_password_too_short(self):\n\n payload = {\n \"user\": {\n \"email\": \"[email protected]\",\n \"password\": \"us\",\n \"is_doctor\": False,\n \"is_hospital_admin\": True\n },\n 'first_name': 'Test1',\n 'last_name': 'JustUser2'\n }\n\n response = self.client.post(HOSPITAL_ADMIN_REGISTER_URL, payload, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def testAddNonePassword(self):\n self.assertEquals(models.ERR_BAD_PASSWORD, self.users.add(\"userF\", None))", "def test_new_user(new_user):\n user, _ = new_user\n\n assert_that(repr(user)).matches(r\"\\<User '[\\w_-]+?'\\>\")\n assert_that(user.salt).is_length(32)\n assert_that(len(user.hashed_password)).is_greater_than(50)" ]
[ "0.8243537", "0.7912154", "0.7906771", "0.78553146", "0.7854444", "0.7844873", "0.7770756", "0.77600276", "0.7758058", "0.7728684", "0.77277994", "0.7720663", "0.7621893", "0.7594183", "0.7564204", "0.7557341", "0.7486266", "0.74427015", "0.7390739", "0.7349027", "0.7303733", "0.7292742", "0.72695243", "0.72308105", "0.7195562", "0.71663225", "0.7157816", "0.7147181", "0.7141715", "0.7113219" ]
0.8709033
0
Tests that logging in with both an invalid username and invalid password fails
def testLoginBadUsernameAndPassword(self): self.assertEquals(models.SUCCESS, self.users.add("userJ", "password")) self.assertEquals(models.ERR_BAD_CREDENTIALS, self.users.login("nobody_user", "nobody_password"))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_wrong_login_input(self):\n self.user.list_of_accounts = [{'username': 'dalton',\n 'pwd': 'chromelegend',\n 'email': '[email protected]'}]\n msg = self.user.login(\"[email protected]\", \"legendchrome\")\n self.assertEqual(msg, \"Invalid email, password combination\")", "def test_login_wrong_credentials(self):\n\t\tdata = {'username' : 'nonexistentuser', 'password' : 'nopasswordlol'}\n\t\tresponse = self.login(data)\n\n\t\terror_text = \"Unable to log in with provided credentials.\"\n\n\t\tself.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\t\ttry:\n\t\t\tif error_text not in response.data[\"non_field_errors\"]:\n\t\t\t\tself.fail(\"Error text must be : '\" + error_text + \"'\")\n\t\texcept AttributeError:\n\t\t\tself.fail(\"There must be at least one entry in 'non_field_errors'\")", "def test_valid_username_invalid_password(self):\n response = self.client.post(reverse('users:login'), {'username': self.user['username'], 'password': '1sfsdf'})\n self.assertEqual(response.status_code, 200)\n self.assertFormError(response, 'form', None, ERROR_MSG)", "def test_auth_user_fail_bad_username(self):\n\n self.assertFalse(User.authenticate(\"invalid\", \"allison\"))", "def test_incorrect_username(db_session):\n assert (login(\"unknown\", \"unknown\") == \"login.failed\")", "def test_invalid_username_invalid_password(self):\n response = self.client.post(\n reverse('users:login'), {\n 'username': self.create_user_data()['username'],\n 'password': self.create_user_data()['password1']\n }\n )\n self.assertEqual(response.status_code, 200)\n self.assertFormError(response, 'form', None, ERROR_MSG)", "def test_incorrect_username(self):\n input = (\"\", \"password\")\n if is_travis():\n self.login_test(*input, True)\n else:\n self.login_test(*input)", "def test_authenticate_invalid_username(self):\r\n print(\"Authenticate user invalid username\")\r\n username = \"test9999user\"\r\n password = \"password\"\r\n\r\n self.assertEqual(self.system.authenticate(username, password), False)", "def test_incorrect_login(self):\n response = self.client.post(\n '/users/login',\n data=dict(username=\"dsadsa\", password=\"dsadsadsa\"),\n follow_redirects=True\n )\n self.assertIn(b\"Username and password do not match\", response.data)", "def test_user_authenticate_invalid_username(self):\n\n user = User.authenticate(\"wrong_username\", \"password\")\n\n self.assertEqual(user, False)", "def test_authenticate_invalid_password(self):\r\n print(\"Authenticate user invalid password (wrong)\")\r\n username = \"admin\"\r\n password = \"password9999\"\r\n\r\n self.assertEqual(self.system.authenticate(username, password), False)", "def test_login_wrong_credentials_false(self):\n res = self.client().post('/api/v1/auth/signup', data=self.user)\n self.assertEqual(res.status_code, 201)\n logins = {\n \"Email\": \"[email protected]\",\n \"Password\": \"pass4567\"\n }\n resp = self.client().post('/api/v1/auth/login', data=logins)\n self.assertEqual(resp.status_code, 400)\n resp = resp.get_json()\n self.assertEqual(resp['error'],\n 'Invalid password/email combination')", "def test_valid_login_form_but_failed_authentication(self):\n\n\n\t\tpass", "def test_invalid_username_valid_password(self):\n response = self.client.post(reverse('users:login'), {'username': 'xyzabe', 'password': self.user['password1']})\n self.assertEqual(response.status_code, 200)\n self.assertFormError(response, 'form', None, ERROR_MSG)", "def test_invalid_credentials(self):\n self.user = {\"username\": \"testuserother\", \"password\": \"testpassword\"}\n response = self.app.post(\"/auth/login/\", data=self.user)\n self.assertEqual(response.status_code, 403)\n\n output = json.loads(response.data.decode('utf-8'))\n self.assertIn(\"Error: Invalid username and/or password.\",\n output[\"message\"])\n\n self.user = {\"username\": \"testuser\", \"password\": \"invalid\"}\n response = self.app.post(\"/auth/login/\", data=self.user)\n self.assertEqual(response.status_code, 403)\n\n output = json.loads(response.data.decode('utf-8'))\n self.assertIn(\"Error: Invalid username and/or password.\",\n output[\"message\"])", "def test_login_with_bad_username(self):\n resp = self.client.post(\n reverse('login'),\n json.dumps({\n \"username\": \"\",\n \"password\": self.PASSWORD,\n }),\n content_type=\"application/json\"\n )\n assert resp.status_code == 403, resp.content.decode('utf-8')\n assert not self.is_authenticated(self.user)", "def test_authenticate_user_with_incorrect_username(self):\n data = {\n 'username': 'test_user_2',\n 'password': 'testpassword'\n }\n response = self.client.post(self.authenticate_url, data, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(len(response.data['non_field_errors']), 1)", "def test_failed_login_keeps_username(self):\n resp = self.app.get('/login_handler?login=manager&password=badpassword',\n status=302)\n resp = resp.follow(status=200)\n ok_('Invalid Password' in resp, resp)\n eq_(resp.form['login'].value, 'manager')", "def test_wrong_password_login(self):\n\n self.client.post(\"/api/v2/auth/signup\", data=json.dumps(\n self.generic_user), content_type=\"application/json\")\n\n wrong_info = {\"email\": \"[email protected]\",\n \"password\": \"obviouslyfake\"}\n\n res = self.client.post(\n \"/api/v2/auth/login\", data=json.dumps(wrong_info), content_type=\"application/json\")\n result = json.loads(res.data)\n self.assertEqual(\n result[\"Error\"], \"Incorrect credentials. Please try again\")\n self.assertEqual(res.status_code, 401)", "def test_incorrect_password(self):\n input = (\"admin\", \"\")\n if is_travis():\n self.login_test(*input, True)\n else:\n self.login_test(*input)", "def test_auth_user_fail_bad_password(self):\n\n self.assertFalse(User.authenticate(self.user1.username, \"invalid\"))", "def test_invalid_password(self):\n pass", "def test_login_missing_password(self):\n response = self.client.post('/api/v2/auth/login',\n data=json.dumps(users[7]),\n content_type='application/json')\n self.assertEqual(response.status_code, 400)\n assert 'Missing required parameter ', str(response.data)", "def test_login_route_incorrect(self):\n result = self.client.post(\"/login\",\n data={\"username\":\"test_user1\", \"password\":\"test_pass2\"}, follow_redirects=True)\n self.assertEqual(result.status_code, 200)\n self.assertIn(b\"The password you inputed for test_user1 is incorrect. Try again!\", result.data)", "def test_invalid_login(self):\n self.assertRaises(\n session.AuthenticationError,\n lambda: self.session.authenticate('[email protected]', 'wrongsecret')\n )", "def test_login_wrong_username(self):\n res = self.client.post('api/v2/auth/signup', json=self.user,\n headers={'Content-Type': 'application/json'})\n \n res_other = self.client.post('/api/v2/auth/login', json={\n 'username': 'MrMan', 'password': 'Aw3someSauce'}, headers={'Content-Type': 'application/json'})\n data_other = res_other.get_json()\n\n self.assertEqual(res_other.status_code, 401)\n self.assertEqual(data_other['error'], 'User not found: Please register')", "def test_authenticate_just_username(self):\n \n self.assertRaises(\n ValueError, \n self.authenticator.authenticate, \n username=u'thruflo'\n )", "def test_failedLoginPassword(self):\n login = self.failureResultOf(\n self.portal.login(\n credentials.UsernamePassword(b\"bob\", b\"h3llo\"), self, ITestable\n )\n )\n self.assertTrue(login)\n self.assertEqual(error.UnauthorizedLogin, login.type)", "def test_invalid_password(self):\n self.signup('Bo', 'Theo', '[email protected]', 'Bo1995', 'Bo1995')\n rv = self.login('[email protected]', 'Bo1905')\n self.assertIn(b'Invalid password! Please try again', rv.data)", "def test_login_with_bad_password(self):\n resp = self.client.post(\n reverse('login'),\n json.dumps({\n \"username\": self.USERNAME,\n \"password\": \"\",\n }),\n content_type=\"application/json\"\n )\n assert resp.status_code == 403, resp.content.decode('utf-8')\n assert not self.is_authenticated(self.user)" ]
[ "0.82186335", "0.81208724", "0.80137014", "0.8002412", "0.7950306", "0.791697", "0.7853422", "0.78504646", "0.7841491", "0.7744492", "0.7737247", "0.7733766", "0.7724799", "0.7675062", "0.7597364", "0.7575842", "0.75734806", "0.7559961", "0.755887", "0.7550253", "0.7546143", "0.7530981", "0.75290334", "0.7482402", "0.7475716", "0.7468349", "0.74528927", "0.74475676", "0.7444211", "0.74422073" ]
0.8395619
0
Constructs a master spec graph.
def master_spec_graph(master_spec): if not isinstance(master_spec, spec_pb2.MasterSpec): raise TypeError("master_spec_graph() expects a MasterSpec input.") graph = pygraphviz.AGraph(directed=True) graph.node_attr.update(shape="box", style="filled", fillcolor="white", fontname="roboto, helvetica, arial", fontsize=11) graph.edge_attr.update(fontname="roboto, helvetica, arial", fontsize=11) for component in master_spec.component: graph.add_node(component.name, label=_component_contents(component)) for component in master_spec.component: for linked_feature in component.linked_feature: graph.add_edge(linked_feature.source_component, component.name, label=_linked_feature_label(linked_feature)) with warnings.catch_warnings(): warnings.simplefilter("ignore") return graph.draw(format="svg", prog="dot")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _createMaster(self, *args, **kwds):\n raise NotImplementedError", "def construct_master(self, master_scenarios):\n lr_instance = self.tsdro.lr_instance\n\n self.master, self.stage1_vars = lr_instance.construct_stage1()\n if self.method == \"RO\":\n self.wass_mult = 0\n else:\n self.wass_mult = self.master.addVar(name=\"wass_multiplier\", lb=0)\n self.epi_vars = self.master.addVars(self.tsdro.samples.keys(), lb=0,\n name=\"epi_vars\")\n objexpr_stage1 = lr_instance.get_objective_stage1(self.stage1_vars)\n objexpr_stage2 = quicksum(\n self.tsdro.probs[sample_name] * self.epi_vars[sample_name]\n for sample_name in self.tsdro.samples.keys())\n\n self.objexpr_master = (objexpr_stage1\n + self.tsdro.wass_rad * self.wass_mult\n + objexpr_stage2)\n self.master.setObjective(self.objexpr_master, GRB.MINIMIZE)\n\n for scenario_name in master_scenarios:\n scenario = self.tsdro.scenarios[scenario_name]\n curr_vars, _ = lr_instance.add_stage2(\n self.master, self.stage1_vars, scenario, scenario_name)\n for sample_name, sample in self.tsdro.samples.items():\n objexpr_stage2 = lr_instance.get_objective_stage2(curr_vars,\n scenario)\n if scenario_name == sample_name or self.method == \"RO\":\n scenario_distance = 0\n else:\n scenario_distance = sc.get_scenario_distance(\n scenario, sample, lr_instance)\n rhs = objexpr_stage2 - self.wass_mult * scenario_distance\n self.master.addLConstr(self.epi_vars[sample_name], \">\", rhs,\n name=(\"epi_constr_\" + str(scenario_name)\n + \"_\" + str(sample_name)))\n self.stage2_vars[scenario_name] = curr_vars\n return", "def test_create_platfrom(self):\n # procedure object\n proc1 = Procedure(\"procedure 1\", \"proc1\")\n proc2 = Procedure(\"procedure 2\", \"proc2\")\n # list of procedures\n proList = [proc1, proc2]\n # observable property object\n obs1 = ObservableProperty(\"obs-property1\", \"obs-property\")\n obs2 = ObservableProperty(\"obs-property2\", \"obs-property2\")\n obs3 = ObservableProperty(\"obs-property3\", \"obs-property3\")\n # list of observable properties\n obsList = [obs1, obs2]\n obsList2 =[obs1,obs2]\n # sensor object\n s1 = Sensor(\"Sensor 1\", \"first sensor\", obsList, proList)\n s2 = Sensor(\"Sensor 2\", \"second sensor\", obsList2, proList)\n s3 = Sensor(\"Sensor 3\", \"second sensor\", obsList2, proList)\n act1 = Actuator(\"Actuator 1\", \"first actuator\",[],[])\n act2 = Actuator(\"Actuator 2\", \"second actuator\",[],[])\n act3 = Actuator(\"Actuator 3\", \"third actuator\",[],[])\n #list of actuators\n actList =[act1,act2,act3]\n #list of sensors\n senList = [s1,s2]\n # platform object\n p1 = Platform(\"platform 1\", \"p1\", senList, actList,[])\n p1.add_sensor(s3)\n\n this_graph = cfg.get_graph()\n #print(this_graph.serialize(format='turtle'))\n print(this_graph.serialize(format=\"ttl\").decode('utf-8'))", "def build_graph(self):\n self.import_tree(ZOO_PATH, self.import_zoo, self.verify_zoos)\n self.import_tree(WILD_PATH, self.import_wild, self.verify_wilds)\n self.import_tree(PANDA_PATH, self.import_redpanda, self.verify_pandas)\n self.import_tree(MEDIA_PATH, self.import_media, self.verify_media)", "def _build_graph(self):\n pass", "def __init__(self):\n\n self._mh = MasterHead.get_head()", "def build_subsets(self):\n self.all = nrn.SectionList()\n self.all.wholetree(sec=self.soma)", "def build_graph(self):\n pass", "def build_subsets(self):\n\t\tself.all = h.SectionList()\n\t\tself.all.wholetree(sec=self.soma)\n\n\t\t# morphological section lists\n\t\tself.axon_list = []\n\t\tself.axosomatic_list = []\n\t\tself.apicalshaftoblique_list = []\n\t\tself.apicaltree_list = []\n\t\tself.tuft_list = []\n\t\tself.soma_list = []\n\t\tself.basal_list = []\n\n\t\tself.axon_list.append(hillock)\n\t\tself.axon_list.append(iseg)\n\t\tself.axon_list.append(axon)\n\n\t\tself.axosomatic_list.append(soma)\n\t\tself.axosomatic_list.append(basal)\n\t\tself.axosomatic_list.append(hillock)\n\t\tself.axosomatic_list.append(iseg)\n\t\tself.axosomatic_list.append(axon)\n\n\t\tself.apicalshaftoblique_list.append(apical)\n\n\t\tself.apicaltree_list.append(apical)\n\t\tself.apicaltree_list.append(tuft)\n\n\t\tself.tuft_list.append(tuft)\n\n\t\tself.soma_list.append(soma)\n\n\t\tself.basal_list.append(basal)\n\n\t# Create lists of cell parts that contain each ion channel type\n\t\tself.nat_list = []\n\t\tself.kslow_list = []\n\t\tself.kfast_list = []\n\t\tself.ih_list = []\n\n\t\tself.ih_list.append(basal)\n\t\tself.ih_list.append(apical)\n\t\tself.ih_list.append(tuft)\n\n\t\tself.excsyn_list.append(basal)\n\t\tself.excsyn_list.append(apical)\n\t\tself.excsyn_list.append(tuft)\n\n\t\tself.inhdendsyn_list.append(basal)\n\t\tself.inhdendsyn_list.append(apical)\n\n\t\tself.inhsomasyn_list.append(soma)\n\n\t\tself.nat_list.append(soma)\n\t\tself.nat_list.append(hillock)\n\t\tself.nat_list.append(iseg)\n\t\tself.nat_list.append(apical)\n\t\tself.nat_list.append(tuft)\n\n\t\tself.kfast_list.append(soma)\n\t\tself.kfast_list.append(apical)\n\t\tself.kfast_list.append(tuft)\n\n\t\tself.kslow_list.append(soma)\n\t\tself.kslow_list.append(apical)\n\t\tself.kslow_list.append(tuft)", "def generate(self):\n self.graph_repl = self.master.graph_repl", "def _setup_graph(self):\n pass", "def _setup_graph(self):\n pass", "def __build_graph(self):\n all_matches = self.___matches()\n # make new matrix of the student classes and their general studies matrix\n split_reqs = [reqs.split(', ') for reqs in all_matches['ALL'].as_matrix() ]\n rep_values = [line.count(\",\") + 1 for line in all_matches['ALL']]\n CLS = np.repeat(all_matches['FULL'].as_matrix(), rep_values )\n REQ = np.array(list(chain.from_iterable(split_reqs)))\n graph = pd.DataFrame([CLS, REQ]).T\n graph.columns = ['CLS','REQ']\n graph = graph.drop_duplicates()\n return graph", "def build_masters(opts):\n logger.info(\"Reading designspace file...\")\n ds = DesignSpaceDocument.fromfile(opts.dsPath)\n validateDesignspaceDoc(ds)\n master_paths = [s.path for s in ds.sources]\n\n logger.info(\"Building local OTFs for master font paths...\")\n curDir = os.getcwd()\n dsDir = os.path.dirname(opts.dsPath)\n\n for master_path in master_paths:\n master_path = os.path.join(dsDir, master_path)\n masterDir = os.path.dirname(master_path)\n ufoName = os.path.basename(master_path)\n otfName = os.path.splitext(ufoName)[0]\n otfName = f\"{otfName}.otf\"\n\n if masterDir:\n os.chdir(masterDir)\n\n makeotf(['-nshw', '-f', ufoName, '-o', otfName,\n '-r', '-nS'] + opts.mkot)\n logger.info(f\"Built OTF font for {master_path}\")\n generalizeCFF(otfName)\n os.chdir(curDir)", "def genStartGraph(Xs, numnodes, td, fitinfo):\n if fitinfo.startGraph==\"cn_valid\":\n graph = conceptualNetwork(Xs, numnodes, td=td, valid=True, fitinfo=fitinfo)\n elif fitinfo.startGraph==\"pf_valid\":\n graph = pathfinder(Xs, numnodes, valid=True, td=td)\n elif (fitinfo.startGraph==\"rw\" or fitinfo.startGraph==\"nrw\"):\n graph = naiveRandomWalk(Xs,numnodes)\n elif fitinfo.startGraph==\"fully_connected\":\n graph = fullyConnected(numnodes)\n elif fitinfo.startGraph==\"empty_graph\":\n graph = np.zeros((numnodes,numnodes)).astype(int) # useless...\n else:\n graph = np.copy(fitinfo.startGraph) # assume a graph has been passed as a starting point\n return graph", "def gen_graph(self):", "def build_graph(self):\n self.__create_placeholders()\n self.__create_encoder()\n self.__create_latent()\n self.__create_decoder()\n self.__create_loss()\n self.__create_generate()\n self.__create_reconstruct()\n self.__create_optimizer()\n self.__create_summary()", "def _construct_graph(self):\n raise NotImplementedError", "def setup(self):\n self.vert1 = TestVertex(10, \"New AbstractConstrainedVertex 1\")\n self.vert2 = TestVertex(5, \"New AbstractConstrainedVertex 2\")\n self.vert3 = TestVertex(3, \"New AbstractConstrainedVertex 3\")\n self.edge1 = SimpleApplicationEdge(self.vert1, self.vert2,\n None, \"First edge\")\n self.edge2 = SimpleApplicationEdge(self.vert2, self.vert1,\n None, \"Second edge\")\n self.edge3 = SimpleApplicationEdge(self.vert1, self.vert3,\n None, \"Third edge\")\n self.verts = [self.vert1, self.vert2, self.vert3]\n self.edges = [self.edge1, self.edge2, self.edge3]\n self.graph = ApplicationGraph(\"Graph\", self.verts, self.edges)\n\n flops = 1000\n (e, ne, n, w, sw, s) = range(6)\n\n processors = list()\n for i in range(18):\n processors.append(Processor(i, flops))\n\n links = list()\n links.append(Link(0, 0, 0, 0, 1, s, s))\n\n _sdram = SDRAM(128 * (2**20))\n\n links = list()\n\n links.append(Link(0, 0, 0, 1, 1, n, n))\n links.append(Link(0, 1, 1, 1, 0, s, s))\n links.append(Link(1, 1, 2, 0, 0, e, e))\n links.append(Link(1, 0, 3, 0, 1, w, w))\n r = Router(links, False, 100, 1024)\n\n ip = \"192.162.240.253\"\n chips = list()\n for x in range(5):\n for y in range(5):\n chips.append(Chip(x, y, processors, r, _sdram, 0, 0, ip))\n\n self.machine = Machine(chips)\n self.bp = BasicPartitioner()", "def build_head(self):\n stages = [f'stage{i}' for i in range(1, 7)]\n for stage in stages:\n block = getattr(self.arch, stage)\n PAF, CFM = block.keys()\n PAF = build_blocks(block[PAF], 'head')\n CFM = build_blocks(block[CFM], 'head')\n setattr(self, f\"{stage}_PAF\", PAF)\n setattr(self, f\"{stage}_CFM\", CFM)", "def make_graph_from_spec(graphtype, args):\n parsed = parse_graph_argument(graphtype, args)\n assert parsed['graphtype'] == graphtype\n return obtain_graph(parsed)", "def build_graph(self):\n raise NotImplementedError", "def setUp(self):\n self.nC4H10O = Species(\n label='n-C4H10O',\n conformer=Conformer(\n E0=(-317.807, 'kJ/mol'),\n modes=[\n IdealGasTranslation(mass=(74.07, \"g/mol\")),\n NonlinearRotor(inertia=([41.5091, 215.751, 233.258], \"amu*angstrom^2\"), symmetry=1),\n HarmonicOscillator(frequencies=(\n [240.915, 341.933, 500.066, 728.41, 809.987, 833.93, 926.308, 948.571, 1009.3, 1031.46, 1076,\n 1118.4, 1184.66, 1251.36, 1314.36, 1321.42, 1381.17, 1396.5, 1400.54, 1448.08, 1480.18, 1485.34,\n 1492.24, 1494.99, 1586.16, 2949.01, 2963.03, 2986.19, 2988.1, 2995.27, 3026.03, 3049.05, 3053.47,\n 3054.83, 3778.88], \"cm^-1\")),\n HinderedRotor(inertia=(0.854054, \"amu*angstrom^2\"), symmetry=1, fourier=(\n [[0.25183, -1.37378, -2.8379, 0.0305112, 0.0028088],\n [0.458307, 0.542121, -0.599366, -0.00283925, 0.0398529]], \"kJ/mol\")),\n HinderedRotor(inertia=(8.79408, \"amu*angstrom^2\"), symmetry=1, fourier=(\n [[0.26871, -0.59533, -8.15002, -0.294325, -0.145357],\n [1.1884, 0.99479, -0.940416, -0.186538, 0.0309834]], \"kJ/mol\")),\n HinderedRotor(inertia=(7.88153, \"amu*angstrom^2\"), symmetry=1, fourier=(\n [[-4.67373, 2.03735, -6.25993, -0.27325, -0.048748],\n [-0.982845, 1.76637, -1.57619, 0.474364, -0.000681718]], \"kJ/mol\")),\n HinderedRotor(inertia=(2.81525, \"amu*angstrom^2\"), symmetry=3, barrier=(2.96807, \"kcal/mol\")),\n ],\n spin_multiplicity=1,\n optical_isomers=1,\n ),\n molecular_weight=(74.07, \"g/mol\"),\n transport_data=TransportData(sigma=(5.94, 'angstrom'), epsilon=(559, 'K')),\n energy_transfer_model=SingleExponentialDown(alpha0=(447.5 * 0.011962, \"kJ/mol\"), T0=(300, \"K\"), n=0.85),\n )\n\n self.nC4H10O.from_smiles('CCCCO')\n\n self.nC4H8 = Species(\n label='n-C4H8',\n conformer=Conformer(\n E0=(-17.8832, 'kJ/mol'),\n modes=[\n IdealGasTranslation(mass=(56.06, \"g/mol\")),\n NonlinearRotor(inertia=([22.2748, 122.4, 125.198], \"amu*angstrom^2\"), symmetry=1),\n HarmonicOscillator(frequencies=(\n [308.537, 418.67, 636.246, 788.665, 848.906, 936.762, 979.97, 1009.48, 1024.22, 1082.96, 1186.38,\n 1277.55, 1307.65, 1332.87, 1396.67, 1439.09, 1469.71, 1484.45, 1493.19, 1691.49, 2972.12, 2994.31,\n 3018.48, 3056.87, 3062.76, 3079.38, 3093.54, 3174.52], \"cm^-1\")),\n HinderedRotor(inertia=(5.28338, \"amu*angstrom^2\"), symmetry=1, fourier=(\n [[-0.579364, -0.28241, -4.46469, 0.143368, 0.126756],\n [1.01804, -0.494628, -0.00318651, -0.245289, 0.193728]], \"kJ/mol\")),\n HinderedRotor(inertia=(2.60818, \"amu*angstrom^2\"), symmetry=3, fourier=(\n [[0.0400372, 0.0301986, -6.4787, -0.0248675, -0.0324753],\n [0.0312541, 0.0538, -0.493785, 0.0965968, 0.125292]], \"kJ/mol\")),\n ],\n spin_multiplicity=1,\n optical_isomers=1,\n ),\n )\n\n self.nC4H8.from_smiles('CCC=C')\n\n self.H2O = Species(\n label='H2O',\n conformer=Conformer(\n E0=(-269.598, 'kJ/mol'),\n modes=[\n IdealGasTranslation(mass=(18.01, \"g/mol\")),\n NonlinearRotor(inertia=([0.630578, 1.15529, 1.78586], \"amu*angstrom^2\"), symmetry=2),\n HarmonicOscillator(frequencies=([1622.09, 3771.85, 3867.85], \"cm^-1\")),\n ],\n spin_multiplicity=1,\n optical_isomers=1,\n ),\n )\n\n self.H2O.from_smiles('O')\n\n self.N2 = Species(\n label='N2',\n molecular_weight=(28.04, \"g/mol\"),\n transport_data=TransportData(sigma=(3.41, \"angstrom\"), epsilon=(124, \"K\")),\n energy_transfer_model=None,\n )\n\n self.N2.from_smiles('N#N')\n\n logging.error('to TS')\n\n self.TS = TransitionState(\n label='TS',\n conformer=Conformer(\n E0=(-42.4373, \"kJ/mol\"),\n modes=[\n IdealGasTranslation(mass=(74.07, \"g/mol\")),\n NonlinearRotor(inertia=([40.518, 232.666, 246.092], \"u*angstrom**2\"), symmetry=1, quantum=False),\n HarmonicOscillator(frequencies=(\n [134.289, 302.326, 351.792, 407.986, 443.419, 583.988, 699.001, 766.1, 777.969, 829.671, 949.753,\n 994.731, 1013.59, 1073.98, 1103.79, 1171.89, 1225.91, 1280.67, 1335.08, 1373.9, 1392.32, 1417.43,\n 1469.51, 1481.61, 1490.16, 1503.73, 1573.16, 2972.85, 2984.3, 3003.67, 3045.78, 3051.77, 3082.37,\n 3090.44, 3190.73, 3708.52], \"kayser\")),\n HinderedRotor(inertia=(2.68206, \"amu*angstrom^2\"), symmetry=3, barrier=(3.35244, \"kcal/mol\")),\n HinderedRotor(inertia=(9.77669, \"amu*angstrom^2\"), symmetry=1, fourier=(\n [[0.208938, -1.55291, -4.05398, -0.105798, -0.104752],\n [2.00518, -0.020767, -0.333595, 0.137791, -0.274578]], \"kJ/mol\")),\n ],\n spin_multiplicity=1,\n optical_isomers=1,\n ),\n frequency=(-2038.34, 'cm^-1'),\n )\n\n self.reaction = Reaction(\n label='dehydration',\n reactants=[self.nC4H10O],\n products=[self.nC4H8, self.H2O],\n transition_state=self.TS,\n kinetics=Arrhenius(A=(0.0387, 'm^3/(mol*s)'), n=2.7, Ea=(2.6192e4, 'J/mol'), T0=(1, 'K'))\n )\n\n self.network = Network(\n label='n-butanol',\n isomers=[Configuration(self.nC4H10O)],\n reactants=[],\n products=[Configuration(self.nC4H8, self.H2O)],\n path_reactions=[self.reaction],\n bath_gas={self.N2: 1.0},\n )\n\n self.pdepnetwork = deepcopy(self.network)\n self.pdepnetwork.__class__ = PDepNetwork\n self.pdepnetwork.source = [self.pdepnetwork.isomers[0].species[0]]\n self.pdepnetwork.index = 1\n self.pdepnetwork.explored = []", "def get_master_nodes(self):\n default = 3\n master_nodes_count = input('enter number of master nodes\\n'\n 'default [3]: ')\n master_nodes_count = set_values(master_nodes_count, default, check='integer')\n master_keys = ['name','ip','mac']\n self.inventory_dict['csah']['vars']['master_nodes'] = []\n for num in range(master_nodes_count):\n master_values = []\n default = 'etcd-{}'.format(num)\n master_name = input('enter the master {} node name \\n'\n 'default [{}]: '.format(num, default))\n master_name = set_values(master_name, default)\n master_ip = get_ip(node_name=master_name, ip_type='os')\n master_mac = get_network_device_mac(node_name=master_name, ip_type='idrac')\n master_values.append(master_name)\n master_values.append(master_ip)\n master_values.append(master_mac)\n master_node_dict_pairs = dict(zip(master_keys, master_values))\n logging.info('adding {} values as name: {} ip: {} mac: {}'.format(master_name, master_name,\n master_ip, master_mac)) \n self.inventory_dict['csah']['vars']['master_nodes'].append(master_node_dict_pairs)\n self.clear_screen()\n self.inventory_dict['csah']['vars']['number_of_masters'] = master_nodes_count", "def make_sims(self):\n self.sims = [Simulation(conf=c) for c in self.sim_confs]", "def make_graph(self):\n\n # the root node\n self.graph.node(self.playbook_filename, style=\"dotted\", id=\"root_node\")\n\n # loop through the plays\n for play_counter, play in enumerate(self.playbook.get_plays(), 1):\n\n # the load basedir is relative to the playbook path\n if play._included_path is not None:\n self.data_loader.set_basedir(play._included_path)\n else:\n self.data_loader.set_basedir(self.playbook._basedir)\n self.display.vvv(\"Loader basedir set to {}\".format(self.data_loader.get_basedir()))\n\n play_vars = self.variable_manager.get_vars(play)\n play_hosts = [h.get_name() for h in self.inventory_manager.get_hosts(self.template(play.hosts, play_vars))]\n play_name = \"Play #{}: {} ({})\".format(play_counter, clean_name(play.get_name()), len(play_hosts))\n play_name = self.template(play_name, play_vars)\n\n self.display.banner(\"Graphing \" + play_name)\n\n play_id = \"play_\" + str(uuid.uuid4())\n\n self.graph_representation.add_node(play_id)\n\n with self.graph.subgraph(name=play_name) as play_subgraph:\n color, play_font_color = get_play_colors(play)\n # play node\n play_subgraph.node(play_name, id=play_id, style=\"filled\", shape=\"box\", color=color,\n fontcolor=play_font_color, tooltip=\" \".join(play_hosts))\n\n # edge from root node to plays\n play_edge_id = \"edge_\" + str(uuid.uuid4())\n play_subgraph.edge(self.playbook_filename, play_name, id=play_edge_id, style=\"bold\",\n label=str(play_counter), color=color, fontcolor=color)\n\n # loop through the pre_tasks\n self.display.v(\"Graphing pre_tasks...\")\n nb_pre_tasks = 0\n for pre_task_block in play.pre_tasks:\n nb_pre_tasks = self._include_tasks_in_blocks(current_play=play, graph=play_subgraph,\n parent_node_name=play_name, parent_node_id=play_id,\n block=pre_task_block, color=color,\n current_counter=nb_pre_tasks, play_vars=play_vars,\n node_name_prefix=\"[pre_task] \")\n\n # loop through the roles\n self.display.v(\"Graphing roles...\")\n role_number = 0\n for role in play.get_roles():\n # Don't insert tasks from ``import/include_role``, preventing duplicate graphing\n if role.from_include:\n continue\n\n # the role object doesn't inherit the tags from the play. So we add it manually.\n role.tags = role.tags + play.tags\n if not role.evaluate_tags(only_tags=self.options.tags, skip_tags=self.options.skip_tags,\n all_vars=play_vars):\n self.display.vv(\"The role '{}' is skipped due to the tags.\".format(role.get_name()))\n # Go to the next role\n continue\n\n role_number += 1\n role_name = \"[role] \" + clean_name(role.get_name())\n\n with self.graph.subgraph(name=role_name, node_attr={}) as role_subgraph:\n current_counter = role_number + nb_pre_tasks\n role_id = \"role_\" + str(uuid.uuid4())\n edge_id = \"edge_\" + str(uuid.uuid4())\n\n role_subgraph.node(role_name, id=role_id)\n # edge from play to role\n role_subgraph.edge(play_name, role_name, label=str(current_counter), color=color,\n fontcolor=color, id=edge_id)\n\n self.graph_representation.add_link(play_id, edge_id)\n self.graph_representation.add_link(edge_id, role_id)\n\n # loop through the tasks of the roles\n if self.options.include_role_tasks:\n role_tasks_counter = 0\n for block in role.compile(play):\n role_tasks_counter = self._include_tasks_in_blocks(current_play=play,\n graph=role_subgraph,\n parent_node_name=role_name,\n parent_node_id=role_id, block=block,\n color=color, play_vars=play_vars,\n current_counter=role_tasks_counter,\n node_name_prefix=\"[task] \")\n role_tasks_counter += 1\n self.display.v(\"{} roles added to the graph\".format(role_number))\n\n # loop through the tasks\n self.display.v(\"Graphing tasks...\")\n nb_tasks = 0\n for task_block in play.tasks:\n nb_tasks = self._include_tasks_in_blocks(current_play=play, graph=play_subgraph,\n parent_node_name=play_name, parent_node_id=play_id,\n block=task_block, color=color,\n current_counter=role_number + nb_pre_tasks,\n play_vars=play_vars, node_name_prefix=\"[task] \")\n\n # loop through the post_tasks\n self.display.v(\"Graphing post_tasks...\")\n for post_task_block in play.post_tasks:\n self._include_tasks_in_blocks(current_play=play, graph=play_subgraph, parent_node_name=play_name,\n parent_node_id=play_id, block=post_task_block, color=color,\n current_counter=nb_tasks, play_vars=play_vars,\n node_name_prefix=\"[post_task] \")\n\n self.display.banner(\"Done graphing {}\".format(play_name))\n self.display.display(\"\") # just an empty line\n # moving to the next play", "def build_object_spec(client_factory, root_folder, traversal_specs):\r\n object_spec = client_factory.create('ns0:ObjectSpec')\r\n object_spec.obj = root_folder\r\n object_spec.skip = False\r\n object_spec.selectSet = traversal_specs\r\n return object_spec", "def _build_graphs(self):\n g1 = self._build_graph1()\n g2 = self._build_graph2(g1)\n return g1, g2", "def _setup_graph_def(self):\n raise NotImplementedError", "def build(self, spec, prefix):\n make()" ]
[ "0.5888808", "0.57467175", "0.57278377", "0.5650693", "0.55837756", "0.5575762", "0.5521239", "0.5461212", "0.54128194", "0.53838843", "0.5381022", "0.5381022", "0.53774726", "0.53684235", "0.53078026", "0.5304308", "0.5303614", "0.5298643", "0.52795887", "0.52760595", "0.5237828", "0.5233417", "0.5232891", "0.522899", "0.5223168", "0.52121603", "0.5190136", "0.5189212", "0.51836276", "0.51779395" ]
0.7268906
0
Fetch child nodes for a given Zookeeper path.
def _get_zk_path_children(self, zk_conn, zk_path, name_for_error): children = [] try: children = zk_conn.get_children(zk_path) except NoNodeError: self.log.info('No zookeeper node at %s', zk_path) except Exception: self.log.exception('Could not read %s from %s', name_for_error, zk_path) return children
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_child_znodes(cluster_name, path):\n zoo_client = ZookeeperService.get_zoo_client(cluster_name)\n child_znodes = []\n\n children = zoo_client.get_children(path)\n # iter child nodes and convert to dict with extra info\n for child in children:\n child_path = os.path.join(path, child)\n data, _ = zoo_client.get(child_path)\n # node\n node = {\"path\": child_path, \"value\": data}\n node[\"name\"] = child_path.rsplit('/', 1)[-1]\n child_znodes.append(node)\n return child_znodes", "def children(self, path):\n url = u'/'.join(\n [self.conf[\"api\"], \"path\", escape_path(path).strip('/'), \"@children\"])\n params = {}\n self.logger.info(path)\n self.logger.debug(url)\n return self._get_iter(url, params)", "def getChildNodes( self, path ):\n\n return self.db.childNodes( path )", "def getChildren(self, path):\n \n self._sharedState.lock.acquire()\n try:\n try:\n self.update(path)\n children = list()\n entries = self._client.list(self._workingCopyPath + path, recurse=False)\n for entry in entries:\n entryPath = entry[0].path[self._workingPathLength:]\n formerEntry = self._sharedState.getFromCache(path)\n if formerEntry is None:\n newEntry = _Info(entry[0])\n else:\n newEntry = _Info(entry[0])\n newEntry.logMessage = formerEntry.logMessage # creation date and owner do not change\n self._sharedState.addToCache(entryPath, newEntry)\n children.append(entryPath)\n del children[0] # First item is always the queried path\n return children\n except ClientError, error:\n raise SubversionError(error)\n finally:\n self._sharedState.lock.release()", "def get_children(cur, node):\n sql = \"\"\"\n SELECT\n *\n FROM\n nodes\n WHERE\n parent=%s\n ORDER BY\n position;\n \"\"\"\n cur.execute(sql, (str(node), ))\n for result in cur:\n yield NodeData(**result)", "def cluster_list_children(cluster_name, znode, headers=None):\n\n return cluster_znode_children(cluster_name,\n \"/\",\n headers=headers or request.headers)", "def get_child_nodes(node):\r\n return list(iter_child_nodes(node))", "def retrieve(self, path):\n if len(path) == 0:\n for leaf in self.leaves:\n yield leaf\n else:\n next_index = path[0]\n if self._is_variable(next_index):\n for leaf in self._retrieve_variable(path[1:]):\n yield leaf\n else:\n next_disc_tree = self._child_at_index(next_index)\n if next_disc_tree is None:\n return\n else:\n for leaf in next_disc_tree.retrieve(path[1:]):\n yield leaf", "def cluster_znode_children(cluster_name, znode, headers=None):\n\n _zclient = get_client(cluster_name,\n headers or request.headers)\n zchildren = _zclient.get_children(znode)\n return make_response(str(zchildren),\n 200)", "def get_children(self, refobj):\n children = cmds.listConnections(\"%s.children\" % refobj, d=False)\n if not children:\n children = []\n return children", "def get_children(self):\n std = self._std\n bld = self._bld\n cls = self.__class__\n\n root = self.get_sobj()\n cit = std.NewChildIterator(root)\n cit.InitEx(0)\n\n children = []\n while cit.More():\n node = cls(std, bld, cit.Value().GetID(), self)\n if node.is_alive():\n children.append(node)\n cit.Next()\n return children", "def Children( cls, pid ):\n\t\tres = []\n\t\tpid = int(pid)\n\t\tfor cpid, cmd in cls.List().items():\n\t\t\tppid = int(cls.Status(cpid)[\"ppid\"])\n\t\t\tif ppid == pid:\n\t\t\t\tres.append( (cpid, None, cmd))\n\t\treturn res", "def _get_nodes(self, selector):\r\n arr = []\r\n def traverse(cont):\r\n children = cont.get_children()\r\n for n in xrange(len(children)):\r\n child = children[n]\r\n if child.node_type == selector:\r\n arr.append(child)\r\n elif child.node_type != 'Shape':\r\n traverse(child)\r\n traverse(self)\r\n return arr", "def do_select_children(self, node_id):\n try:\n _children = self.tree.children(node_id)\n except NodeIDAbsentError:\n _children = None\n\n return _children", "def get_children(self):\n raise NotImplementedError()", "def get_children(self):\n\n pass", "def getChildren(self):\n return self.directories.values()", "def node_lookup_bulk(self, paths):\n\n placeholders = ','.join('?' for path in paths)\n q = \"select node from nodes where path in (%s)\" % placeholders\n self.execute(q, paths)\n r = self.fetchall()\n if r is not None:\n return [row[0] for row in r]\n return None", "async def get_child_ids(db, post_id):\n sql = \"SELECT id FROM hive_posts WHERE parent_id = :id AND is_deleted = '0'\"\n return await db.query_col(sql, id=post_id)", "def GetChildren(self, *args, **kwargs):\n pass", "def _get_child_from_path(self, path):\n\n keys = path.split(\".\")\n\n this_child = self\n\n for key in keys:\n\n try:\n\n this_child = this_child._get_child(key)\n\n except KeyError:\n\n raise KeyError(\"Child %s not found\" % path)\n\n return this_child", "def get_nodes(self, parent, keys, limit):\n queue = deque(parent.children)\n\n while len(queue) != 0:\n node = queue.popleft()\n if node.real:\n keys.append(node.value)\n\n if len(keys) == limit:\n break\n\n queue.extend(node.children)", "def get_children(self):\n return self.children", "def _fetchObjectChildren(self, obj, obj_path):\n obj_children = []\n path_strings = []\n tree_items = []\n\n is_attr_list = [False] * len(obj_children)\n\n # Object attributes\n # Needed to handle errors while getting object's attributes\n # Related with spyder-ide/spyder#6728 and spyder-ide/spyder#9959\n for attr_name in dir(obj):\n try:\n attr_value = getattr(obj, attr_name)\n obj_children.append((attr_name, attr_value))\n path_strings.append('{}.{}'.format(obj_path, attr_name)\n if obj_path else attr_name)\n is_attr_list.append(True)\n except Exception:\n # Attribute could not be get\n pass\n assert len(obj_children) == len(path_strings), \"sanity check\"\n\n for item, path_str, is_attr in zip(obj_children, path_strings,\n is_attr_list):\n name, child_obj = item\n tree_items.append(TreeItem(child_obj, name, path_str, is_attr))\n\n return tree_items", "def get_all_children_id_list_from_redis_by_pk(gmac_id):\n try:\n gmac = GoogleMapsAddressComponent.objects.get(pk=gmac_id)\n conn = get_redis_connection()\n key = GoogleMapsAddressComponent.get_redis_all_children_key(gmac_id)\n length = conn.llen(key)\n return conn.lrange(key, 0, length)\n except GoogleMapsAddressComponent.DoesNotExist:\n return None", "def _get_children(self, x):\n try:\n return x._pfp__children\n\n except AttributeError:\n return []", "def getchildren(self):\n return self.root.getchildren()", "def iter_child_nodes(predicate, cursor):\n return (c for c in cursor.get_children() if predicate(c))", "def _target_hosts(self, paths):\n for path in paths:\n response = self.api_client.get(path)\n self.assertHttpOK(response)\n content = json.loads(response.content)\n (volume_node,) = content[\"volume\"][\"volume_nodes\"]\n yield volume_node[\"host_label\"]", "def get_catalog_nodes(self, *args, **kwargs):\n # Implemented from kitosid template for -\n # osid.resource.BinHierarchySession.get_bin_nodes\n return self._get_provider_session('catalog_hierarchy_session').get_catalog_nodes(*args, **kwargs)" ]
[ "0.75089127", "0.7355353", "0.6786223", "0.6630263", "0.6162781", "0.59692436", "0.59392494", "0.5907962", "0.58292747", "0.5684854", "0.5647041", "0.5617625", "0.5610342", "0.56037736", "0.55999404", "0.55986404", "0.5597942", "0.5592955", "0.55828", "0.5567267", "0.5557384", "0.5537331", "0.5523012", "0.55208576", "0.5504054", "0.54941094", "0.5474764", "0.54732835", "0.5467073", "0.54586107" ]
0.75423163
1
Fetch Consumer Group offsets from Zookeeper. Also fetch consumer_groups, topics, and partitions if not already specified in consumer_groups.
def _get_zk_consumer_offsets(self, zk_hosts_ports, consumer_groups=None, zk_prefix=''): zk_consumer_offsets = {} # Construct the Zookeeper path pattern # /consumers/[groupId]/offsets/[topic]/[partitionId] zk_path_consumer = zk_prefix + '/consumers/' zk_path_topic_tmpl = zk_path_consumer + '{group}/offsets/' zk_path_partition_tmpl = zk_path_topic_tmpl + '{topic}/' zk_conn = KazooClient(zk_hosts_ports, timeout=self.zk_timeout) zk_conn.start() try: if consumer_groups is None: # If consumer groups aren't specified, fetch them from ZK consumer_groups = {consumer_group: None for consumer_group in self._get_zk_path_children(zk_conn, zk_path_consumer, 'consumer groups')} for consumer_group, topics in consumer_groups.iteritems(): if topics is None: # If topics are't specified, fetch them from ZK zk_path_topics = zk_path_topic_tmpl.format(group=consumer_group) topics = {topic: None for topic in self._get_zk_path_children(zk_conn, zk_path_topics, 'topics')} for topic, partitions in topics.iteritems(): if partitions is not None: partitions = set(partitions) # defend against bad user input else: # If partitions aren't specified, fetch them from ZK zk_path_partitions = zk_path_partition_tmpl.format( group=consumer_group, topic=topic) # Zookeeper returns the partition IDs as strings because # they are extracted from the node path partitions = [int(x) for x in self._get_zk_path_children( zk_conn, zk_path_partitions, 'partitions')] # Fetch consumer offsets for each partition from ZK for partition in partitions: zk_path = (zk_path_partition_tmpl + '{partition}/').format( group=consumer_group, topic=topic, partition=partition) try: consumer_offset = int(zk_conn.get(zk_path)[0]) key = (consumer_group, topic, partition) zk_consumer_offsets[key] = consumer_offset except NoNodeError: self.log.info('No zookeeper node at %s', zk_path) except Exception: self.log.exception('Could not read consumer offset from %s', zk_path) finally: try: zk_conn.stop() zk_conn.close() except Exception: self.log.exception('Error cleaning up Zookeeper connection') return zk_consumer_offsets
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_zk_consumer_offsets(self, zk_hosts_ports, consumer_groups=None, zk_prefix=''):\n zk_consumer_offsets = {}\n\n # Construct the Zookeeper path pattern\n # /consumers/[groupId]/offsets/[topic]/[partitionId]\n zk_path_consumer = zk_prefix + '/consumers/'\n zk_path_topic_tmpl = zk_path_consumer + '{group}/offsets/'\n zk_path_partition_tmpl = zk_path_topic_tmpl + '{topic}/'\n\n zk_conn = KazooClient(zk_hosts_ports, timeout=self._zk_timeout)\n zk_conn.start()\n try:\n if consumer_groups is None:\n # If consumer groups aren't specified, fetch them from ZK\n consumer_groups = {consumer_group: None for consumer_group in\n self._get_zk_path_children(zk_conn, zk_path_consumer, 'consumer groups')}\n\n for consumer_group, topics in consumer_groups.iteritems():\n if topics is None:\n # If topics are't specified, fetch them from ZK\n zk_path_topics = zk_path_topic_tmpl.format(group=consumer_group)\n topics = {topic: None for topic in\n self._get_zk_path_children(zk_conn, zk_path_topics, 'topics')}\n consumer_groups[consumer_group] = topics\n\n for topic, partitions in topics.iteritems():\n if partitions is not None:\n partitions = set(partitions) # defend against bad user input\n else:\n # If partitions aren't specified, fetch them from ZK\n zk_path_partitions = zk_path_partition_tmpl.format(\n group=consumer_group, topic=topic)\n # Zookeeper returns the partition IDs as strings because\n # they are extracted from the node path\n partitions = [int(x) for x in self._get_zk_path_children(\n zk_conn, zk_path_partitions, 'partitions')]\n consumer_groups[consumer_group][topic] = partitions\n\n # Fetch consumer offsets for each partition from ZK\n for partition in partitions:\n zk_path = (zk_path_partition_tmpl + '{partition}/').format(\n group=consumer_group, topic=topic, partition=partition)\n try:\n consumer_offset = int(zk_conn.get(zk_path)[0])\n key = (consumer_group, topic, partition)\n zk_consumer_offsets[key] = consumer_offset\n except NoNodeError:\n self.log.info('No zookeeper node at %s', zk_path)\n except Exception:\n self.log.exception('Could not read consumer offset from %s', zk_path)\n finally:\n try:\n zk_conn.stop()\n zk_conn.close()\n except Exception:\n self.log.exception('Error cleaning up Zookeeper connection')\n return zk_consumer_offsets, consumer_groups", "def _get_kafka_consumer_offsets(self, instance, consumer_groups):\n consumer_offsets = {}\n topics = defaultdict(set)\n\n cli = self._get_kafka_client(instance)\n\n for consumer_group, topic_partitions in consumer_groups.iteritems():\n try:\n coordinator_id = self._get_group_coordinator(cli, consumer_group)\n if coordinator_id:\n offsets = self._get_consumer_offsets(cli, consumer_group, topic_partitions, coordinator_id)\n else:\n offsets = self._get_consumer_offsets(cli, consumer_group, topic_partitions)\n self.log.info(\"unable to find group coordinator for %s\", consumer_group)\n\n for (topic, partition), offset in offsets.iteritems():\n topics[topic].update([partition])\n key = (consumer_group, topic, partition)\n consumer_offsets[key] = offset\n except Exception:\n self.log.exception('Could not read consumer offsets from kafka.')\n\n return consumer_offsets, topics", "def _list_consumer_group_offsets_send_request(self, group_id, group_coordinator_id, partitions=None):\n version = self.kafka_client._matching_api_version(OffsetFetchRequest)\n if version <= 3:\n if partitions is None:\n if version <= 1:\n raise ValueError(\n \"\"\"OffsetFetchRequest_v{} requires specifying the\n partitions for which to fetch offsets. Omitting the\n partitions is only supported on brokers >= 0.10.2.\n For details, see KIP-88.\"\"\".format(\n version\n )\n )\n topics_partitions = None\n else:\n # transform from [TopicPartition(\"t1\", 1), TopicPartition(\"t1\", 2)] to [(\"t1\", [1, 2])]\n topics_partitions_dict = defaultdict(set)\n for topic, partition in partitions:\n topics_partitions_dict[topic].add(partition)\n topics_partitions = list(six.iteritems(topics_partitions_dict))\n request = OffsetFetchRequest[version](group_id, topics_partitions)\n else:\n raise NotImplementedError(\n \"Support for OffsetFetchRequest_v{} has not yet been added to KafkaAdminClient.\".format(version)\n )\n return self._send_request_to_node(group_coordinator_id, request, wakeup=False)", "def _get_consumer_offsets(self):\n # Store the list of futures on the object because some of the callbacks create/store additional futures and they\n # don't have access to variables scoped to this method, only to the object scope\n self._consumer_futures = []\n\n if self._monitor_unlisted_consumer_groups:\n for broker in self.kafka_client._client.cluster.brokers():\n # FIXME: This is using a workaround to skip socket wakeup, which causes blocking\n # (see https://github.com/dpkp/kafka-python/issues/2286).\n # Once https://github.com/dpkp/kafka-python/pull/2335 is merged in, we can use the official\n # implementation for this function instead.\n list_groups_future = self._list_consumer_groups_send_request(broker.nodeId)\n list_groups_future.add_callback(self._list_groups_callback, broker.nodeId)\n self._consumer_futures.append(list_groups_future)\n elif self._consumer_groups:\n self.validate_consumer_groups()\n for consumer_group in self._consumer_groups:\n find_coordinator_future = self._find_coordinator_id_send_request(consumer_group)\n find_coordinator_future.add_callback(self._find_coordinator_callback, consumer_group)\n self._consumer_futures.append(find_coordinator_future)\n else:\n raise ConfigurationError(\n \"Cannot fetch consumer offsets because no consumer_groups are specified and \"\n \"monitor_unlisted_consumer_groups is %s.\" % self._monitor_unlisted_consumer_groups\n )\n\n # Loop until all futures resolved.\n self.kafka_client._wait_for_futures(self._consumer_futures)\n del self._consumer_futures # since it's reset on every check run, no sense holding the reference between runs", "def _list_groups_callback(self, broker_id, response):\n for consumer_group, group_type in self.kafka_client._list_consumer_groups_process_response(response):\n # consumer groups from Kafka < 0.9 that store their offset in Kafka don't use Kafka for group-coordination\n # so their group_type is empty\n if group_type in ('consumer', ''):\n single_group_offsets_future = self._list_consumer_group_offsets_send_request(\n group_id=consumer_group, group_coordinator_id=broker_id\n )\n single_group_offsets_future.add_callback(self._single_group_offsets_callback, consumer_group)\n self._consumer_futures.append(single_group_offsets_future)", "def _get_offsets_based_on_config(self, client, zk_conn, zk_prefix, log_name):\n\n zk_path_partition_tmpl = zk_prefix + '/consumers/%s/offsets/%s/'\n zk_path_offset_tmpl = zk_path_partition_tmpl + '%s'\n\n consumer_offsets = defaultdict(dict)\n topic, group_id = log_name[\"topic\"], log_name[\"group\"]\n partitions = self._get_all_partitions(client, topic)\n\n # Remember the topic partitions that we've see so that we can\n # look up their broker offsets later\n for partition in partitions:\n zk_path_offset = zk_path_offset_tmpl % (group_id, topic, partition)\n try:\n offset = int(zk_conn.get(zk_path_offset)[0])\n consumer_offsets[(topic, partition)] = offset\n except NoNodeError:\n logger.warn('No zookeeper node at %s' % zk_path_offset)\n except Exception:\n logger.exception('Could not read consumer offset from %s' % zk_path_offset)\n\n return consumer_offsets", "def _find_coordinator_callback(self, consumer_group, response):\n coordinator_id = self.kafka_client._find_coordinator_id_process_response(response)\n topics = self._consumer_groups[consumer_group]\n if not topics:\n topic_partitions = None # None signals to fetch all known offsets for the consumer group\n else:\n # transform [(\"t1\", [1, 2])] into [TopicPartition(\"t1\", 1), TopicPartition(\"t1\", 2)]\n topic_partitions = []\n for topic, partitions in topics.items():\n if not partitions: # If partitions aren't specified, fetch all partitions in the topic\n partitions = self.kafka_client._client.cluster.partitions_for_topic(topic)\n topic_partitions.extend([TopicPartition(topic, p) for p in partitions])\n single_group_offsets_future = self._list_consumer_group_offsets_send_request(\n group_id=consumer_group, group_coordinator_id=coordinator_id, partitions=topic_partitions\n )\n single_group_offsets_future.add_callback(self._single_group_offsets_callback, consumer_group)\n self._consumer_futures.append(single_group_offsets_future)", "def _single_group_offsets_callback(self, consumer_group, response):\n single_group_offsets = self.kafka_client._list_consumer_group_offsets_process_response(response)\n self.log.debug(\"Single group offsets: %s\", single_group_offsets)\n for (topic, partition), (offset, _metadata) in single_group_offsets.items():\n # If the OffsetFetchRequest explicitly specified partitions, the offset could returned as -1, meaning there\n # is no recorded offset for that partition... for example, if the partition doesn't exist in the cluster.\n # So ignore it.\n if offset == -1:\n self.kafka_client._client.cluster.request_update() # force metadata update on next poll()\n continue\n key = (consumer_group, topic, partition)\n self._consumer_offsets[key] = offset", "def _get_broker_offsets(self, instance, topics):\n\n # Connect to Kafka\n highwater_offsets = {}\n topic_partitions_without_a_leader = []\n topics_to_fetch = defaultdict(set)\n cli = self._get_kafka_client(instance)\n\n for topic, partitions in topics.iteritems():\n # if no partitions are provided\n # we're falling back to all available partitions (?)\n if len(partitions) == 0:\n partitions = cli.cluster.available_partitions_for_topic(topic)\n topics_to_fetch[topic].update(partitions)\n\n\n leader_tp = defaultdict(lambda: defaultdict(set))\n for topic, partitions in topics_to_fetch.iteritems():\n for partition in partitions:\n partition_leader = cli.cluster.leader_for_partition(TopicPartition(topic, partition))\n if partition_leader is not None and partition_leader > -1:\n leader_tp[partition_leader][topic].update([partition])\n\n max_offsets = 1\n for node_id, tps in leader_tp.iteritems():\n # Construct the OffsetRequest\n request = OffsetRequest[0](\n replica_id=-1,\n topics=[\n (topic, [\n (partition, OffsetResetStrategy.LATEST, max_offsets) for partition in partitions])\n for topic, partitions in tps.iteritems()])\n\n response = self._make_blocking_req(cli, request, node_id=node_id)\n offsets, unled = self._process_highwater_offsets(request, instance, node_id, response)\n highwater_offsets.update(offsets)\n topic_partitions_without_a_leader.extend(unled)\n\n return highwater_offsets, list(set(topic_partitions_without_a_leader))", "def _get_coordinator_for_group(self, consumer_group):\n if self.consumer_group_to_brokers.get(consumer_group) is None:\n yield self.load_consumer_metadata_for_group(consumer_group)\n\n returnValue(self.consumer_group_to_brokers.get(consumer_group))", "def _report_consumer_offsets_and_lag(self, contexts_limit):\n reported_contexts = 0\n self.log.debug(\"Reporting consumer offsets and lag metrics\")\n for (consumer_group, topic, partition), consumer_offset in self._consumer_offsets.items():\n if reported_contexts >= contexts_limit:\n self.log.debug(\n \"Reported contexts number %s greater than or equal to contexts limit of %s, returning\",\n str(reported_contexts),\n str(contexts_limit),\n )\n return\n consumer_group_tags = ['topic:%s' % topic, 'partition:%s' % partition, 'consumer_group:%s' % consumer_group]\n consumer_group_tags.extend(self._custom_tags)\n\n partitions = self.kafka_client._client.cluster.partitions_for_topic(topic)\n self.log.debug(\"Received partitions %s for topic %s\", partitions, topic)\n if partitions is not None and partition in partitions:\n # report consumer offset if the partition is valid because even if leaderless the consumer offset will\n # be valid once the leader failover completes\n self.gauge('consumer_offset', consumer_offset, tags=consumer_group_tags)\n reported_contexts += 1\n\n if (topic, partition) not in self._highwater_offsets:\n self.log.warning(\n \"Consumer group: %s has offsets for topic: %s partition: %s, but no stored highwater offset \"\n \"(likely the partition is in the middle of leader failover) so cannot calculate consumer lag.\",\n consumer_group,\n topic,\n partition,\n )\n continue\n producer_offset = self._highwater_offsets[(topic, partition)]\n consumer_lag = producer_offset - consumer_offset\n if reported_contexts < contexts_limit:\n self.gauge('consumer_lag', consumer_lag, tags=consumer_group_tags)\n reported_contexts += 1\n\n if consumer_lag < 0:\n # this will effectively result in data loss, so emit an event for max visibility\n title = \"Negative consumer lag for group: {}.\".format(consumer_group)\n message = (\n \"Consumer group: {}, topic: {}, partition: {} has negative consumer lag. This should never \"\n \"happen and will result in the consumer skipping new messages until the lag turns \"\n \"positive.\".format(consumer_group, topic, partition)\n )\n key = \"{}:{}:{}\".format(consumer_group, topic, partition)\n self.send_event(title, message, consumer_group_tags, 'consumer_lag', key, severity=\"error\")\n self.log.debug(message)\n\n if reported_contexts >= contexts_limit:\n continue\n if not self._data_streams_enabled:\n continue\n timestamps = self._broker_timestamps[\"{}_{}\".format(topic, partition)]\n # The producer timestamp can be not set if there was an error fetching broker offsets.\n producer_timestamp = timestamps.get(producer_offset, None)\n consumer_timestamp = self._get_interpolated_timestamp(timestamps, consumer_offset)\n if consumer_timestamp is None or producer_timestamp is None:\n continue\n lag = producer_timestamp - consumer_timestamp\n self.gauge('consumer_lag_seconds', lag, tags=consumer_group_tags)\n reported_contexts += 1\n else:\n if partitions is None:\n msg = (\n \"Consumer group: %s has offsets for topic: %s, partition: %s, but that topic has no partitions \"\n \"in the cluster, so skipping reporting these offsets.\"\n )\n else:\n msg = (\n \"Consumer group: %s has offsets for topic: %s, partition: %s, but that topic partition isn't \"\n \"included in the cluster partitions, so skipping reporting these offsets.\"\n )\n self.log.warning(msg, consumer_group, topic, partition)\n self.kafka_client._client.cluster.request_update() # force metadata update on next poll()", "def send_offset_fetch_request(self, group, payloads=None,\n fail_on_error=True, callback=None):\n encoder = partial(KafkaCodec.encode_offset_fetch_request,\n group=group)\n decoder = KafkaCodec.decode_offset_fetch_response\n resps = yield self._send_broker_aware_request(\n payloads, encoder, decoder, consumer_group=group)\n\n returnValue(self._handle_responses(\n resps, fail_on_error, callback, group))", "def fetch_offset_limits(self, offsets_before, max_offsets=1):\n requests = defaultdict(list) # one request for each broker\n for part in itervalues(self.partitions):\n requests[part.leader].append(PartitionOffsetRequest(\n self.name, part.id, offsets_before, max_offsets\n ))\n output = {}\n for broker, reqs in iteritems(requests):\n res = broker.request_offset_limits(reqs)\n output.update(res.topics[self.name])\n return output", "def _get_highwater_offsets(self):\n highwater_futures = [] # No need to store on object because the callbacks don't create additional futures\n\n # If we aren't fetching all broker highwater offsets, then construct the unique set of topic partitions for\n # which this run of the check has at least once saved consumer offset. This is later used as a filter for\n # excluding partitions.\n if not self._monitor_all_broker_highwatermarks:\n tps_with_consumer_offset = {(topic, partition) for (_, topic, partition) in self._consumer_offsets}\n\n for batch in self.batchify(self.kafka_client._client.cluster.brokers(), self._broker_requests_batch_size):\n for broker in batch:\n broker_led_partitions = self.kafka_client._client.cluster.partitions_for_broker(broker.nodeId)\n if broker_led_partitions is None:\n continue\n\n # Take the partitions for which this broker is the leader and group them by topic in order to construct\n # the OffsetRequest while simultaneously filtering out partitions we want to exclude\n partitions_grouped_by_topic = defaultdict(list)\n for topic, partition in broker_led_partitions:\n # No sense fetching highwater offsets for internal topics\n if topic not in KAFKA_INTERNAL_TOPICS and (\n self._monitor_all_broker_highwatermarks or (topic, partition) in tps_with_consumer_offset\n ):\n partitions_grouped_by_topic[topic].append(partition)\n\n # Construct the OffsetRequest\n max_offsets = 1\n request = OffsetRequest[0](\n replica_id=-1,\n topics=[\n (topic, [(partition, OffsetResetStrategy.LATEST, max_offsets) for partition in partitions])\n for topic, partitions in partitions_grouped_by_topic.items()\n ],\n )\n\n # We can disable wakeup here because it is the same thread doing both polling and sending. Also, it\n # is possible that the wakeup itself could block if a large number of sends were processed beforehand.\n highwater_future = self._send_request_to_node(node_id=broker.nodeId, request=request, wakeup=False)\n\n highwater_future.add_callback(self._highwater_offsets_callback)\n highwater_futures.append(highwater_future)\n\n # Loop until all futures resolved.\n self.kafka_client._wait_for_futures(highwater_futures)", "def load_consumer_metadata_for_group(self, group):\n group = _coerce_consumer_group(group)\n log.debug(\"%r: load_consumer_metadata_for_group: %r\", self, group)\n\n # If we are already loading the metadata for this group, then\n # just return the outstanding deferred\n if group in self.coordinator_fetches:\n return self.coordinator_fetches[group]\n\n # No outstanding request, create a new one\n requestId = self._next_id()\n request = KafkaCodec.encode_consumermetadata_request(\n self._clientIdBytes, requestId, group)\n\n # Callbacks for the request deferred...\n def _handleConsumerMetadataResponse(response, group):\n # Clear the outstanding fetch\n self.coordinator_fetches.pop(group, None)\n # Decode the response (returns ConsumerMetadataResponse)\n c_m_resp = KafkaCodec.decode_consumermetadata_response(response)\n log.debug(\"%r: c_m_resp: %r\", self, c_m_resp)\n if c_m_resp.error:\n # Raise the appropriate error\n resp_err = kafka_errors.get(\n c_m_resp.error, UnknownError)(c_m_resp)\n raise resp_err\n\n self.consumer_group_to_brokers[group] = \\\n BrokerMetadata(c_m_resp.node_id, c_m_resp.host,\n c_m_resp.port)\n return True\n\n def _handleConsumerMetadataErr(err, group):\n # Clear the outstanding fetch\n self.coordinator_fetches.pop(group, None)\n log.error(\"Failed to retrieve consumer metadata \"\n \"for group: %s Error:%r\", group, err)\n # Clear any stored value for the group's coordinator\n self.reset_consumer_group_metadata(group)\n raise ConsumerCoordinatorNotAvailableError(\n \"Coordinator for group: %s not available\" % (group))\n\n # Send the request, add the handlers\n d = self._send_broker_unaware_request(requestId, request)\n # Save the deferred under the fetches for this group\n self.coordinator_fetches[group] = d\n d.addCallback(_handleConsumerMetadataResponse, group)\n d.addErrback(_handleConsumerMetadataErr, group)\n return d", "def _highwater_offsets_callback(self, response):\n if type(response) not in OffsetResponse:\n raise RuntimeError(\"response type should be OffsetResponse, but instead was %s.\" % type(response))\n for topic, partitions_data in response.topics:\n for partition, error_code, offsets in partitions_data:\n error_type = kafka_errors.for_code(error_code)\n if error_type is kafka_errors.NoError:\n self._highwater_offsets[(topic, partition)] = offsets[0]\n if self._data_streams_enabled:\n timestamps = self._broker_timestamps[\"{}_{}\".format(topic, partition)]\n timestamps[offsets[0]] = time()\n # If there's too many timestamps, we delete the oldest\n if len(timestamps) > MAX_TIMESTAMPS:\n del timestamps[min(timestamps)]\n elif error_type is kafka_errors.NotLeaderForPartitionError:\n self.log.warning(\n \"Kafka broker returned %s (error_code %s) for topic %s, partition: %s. This should only happen \"\n \"if the broker that was the partition leader when kafka_admin_client last fetched metadata is \"\n \"no longer the leader.\",\n error_type.message,\n error_type.errno,\n topic,\n partition,\n )\n self.kafka_client._client.cluster.request_update() # force metadata update on next poll()\n elif error_type is kafka_errors.UnknownTopicOrPartitionError:\n self.log.warning(\n \"Kafka broker returned %s (error_code %s) for topic: %s, partition: %s. This should only \"\n \"happen if the topic is currently being deleted or the check configuration lists non-existent \"\n \"topic partitions.\",\n error_type.message,\n error_type.errno,\n topic,\n partition,\n )\n else:\n raise error_type(\n \"Unexpected error encountered while attempting to fetch the highwater offsets for topic: %s, \"\n \"partition: %s.\" % (topic, partition)\n )", "def encode_offset_fetch_request(cls, group, payloads, from_kafka=False):\n version = 1 if from_kafka else 0\n return kafka.protocol.commit.OffsetFetchRequest[version](\n consumer_group=group,\n topics=[(\n topic,\n list(topic_payloads.keys()))\n for topic, topic_payloads in six.iteritems(group_by_topic_and_partition(payloads))])", "def get_offset_start(brokers, topic=mjolnir.kafka.TOPIC_RESULT):\n consumer = kafka.KafkaConsumer(bootstrap_servers=brokers, api_version=mjolnir.kafka.BROKER_VERSION)\n parts = consumer.partitions_for_topic(topic)\n if parts is None:\n return None\n partitions = [kafka.TopicPartition(topic, p) for p in parts]\n consumer.assign(partitions)\n return [consumer.position(p) for p in partitions]", "def _get_highwater_offsets(self, kafka_hosts_ports):\n kafka_conn = SimpleClient(kafka_hosts_ports, timeout=self.kafka_timeout)\n try:\n broker_topics_partitions = kafka_conn.topics_to_brokers.keys()\n # batch a bunch of requests into a single network call\n offsets_request = [OffsetRequestPayload(topic, partition, -1, 1)\n for topic, partition in broker_topics_partitions]\n offsets_response = kafka_conn.send_offset_request(offsets_request)\n highwater_offsets = {(x.topic, x.partition): x.offsets[0] for x in offsets_response}\n finally:\n try:\n kafka_conn.close()\n except Exception:\n self.log.exception('Error cleaning up Kafka connection')\n return highwater_offsets", "def describe_group(args, topic):\n global bootstrap\n out = ()\n\n consumer = KafkaConsumer(\n bootstrap_servers=bootstrap,\n group_id=\"backbeat-replication-group-{0}\".format(args.destination),\n enable_auto_commit=False,\n )\n topics = consumer.topics()\n if not topic in topics:\n return False\n\n for part in consumer.partitions_for_topic(topic):\n tp = TopicPartition(topic, part)\n consumer.assign([tp])\n committed = consumer.committed(tp)\n consumer.seek_to_end(tp)\n last_offset = consumer.position(tp)\n try:\n out += (\n {\n \"topic\": topic,\n \"partition\": part,\n \"committed\": committed,\n \"last_offset\": last_offset,\n \"lag\": (last_offset - committed),\n },\n )\n except TypeError:\n sys.stderr.write(\"bad/missing info on consumer group (doesn't exist?)\\n\")\n sys.exit(1)\n\n consumer.close(autocommit=False)\n return out", "def start_exited_consumers(kafka, p):\n for i in TOPICS[\"data\"]:\n kafka.initialize_consumer(topic=i[\"topic\"], config=i[\"config\"], partition=int(i[\"partition\"]))", "def send_offset_commit_request(self, group, payloads=None,\n fail_on_error=True, callback=None,\n group_generation_id=-1,\n consumer_id=''):\n group = _coerce_consumer_group(group)\n encoder = partial(KafkaCodec.encode_offset_commit_request,\n group=group, group_generation_id=group_generation_id,\n consumer_id=consumer_id)\n decoder = KafkaCodec.decode_offset_commit_response\n resps = yield self._send_broker_aware_request(\n payloads, encoder, decoder, consumer_group=group)\n\n returnValue(self._handle_responses(\n resps, fail_on_error, callback, group))", "def encode_offset_commit_request_kafka(cls, group, payloads):\n return kafka.protocol.commit.OffsetCommitRequest[2](\n consumer_group=group,\n consumer_group_generation_id=kafka.protocol.commit.OffsetCommitRequest[2].DEFAULT_GENERATION_ID,\n consumer_id='',\n retention_time=kafka.protocol.commit.OffsetCommitRequest[2].DEFAULT_RETENTION_TIME,\n topics=[(\n topic,\n [(\n partition,\n payload.offset,\n payload.metadata)\n for partition, payload in six.iteritems(topic_payloads)])\n for topic, topic_payloads in six.iteritems(group_by_topic_and_partition(payloads))])", "def dump_from_offset(args, topic, part, offset, end, nownow=None):\n global bootstrap, globals\n\n consumer = KafkaConsumer(\n bootstrap_servers=bootstrap,\n group_id=\"backbeat-replication-group-{0}\".format(args.destination),\n enable_auto_commit=False,\n consumer_timeout_ms=int(args.timeout),\n )\n partition = TopicPartition(topic, part)\n consumer.assign([partition])\n consumer.seek(partition, offset)\n\n ttl_bytes = 0\n o_count = 0\n queuelist = {}\n for msg in consumer:\n if msg.offset > end:\n break\n else:\n logline = json.loads(msg.value)\n if args.bucket and logline[\"bucket\"] != args.bucket:\n continue\n if \"canary\" in logline:\n continue\n value = json.loads(logline[\"value\"])\n for backend in value[\"replicationInfo\"][\"backends\"]:\n if backend[\"site\"] == args.destination:\n queue_time = nownow - (float(msg.timestamp) / 1000)\n if globals[\"max_q_time\"] < queue_time:\n globals[\"max_q_time\"] = queue_time\n\n line = \"\"\n if args.csv:\n line += '\"{0}'.format(value[\"dataStoreName\"])\n if args.bucket == False:\n line += \":{0}\".format(logline[\"bucket\"])\n line += '\", {0}, \"{1}\"'.format(part, value[\"key\"])\n if (\n \"isDeleteMarker\" in value\n and value[\"isDeleteMarker\"] == True\n ):\n line += ', \"(delete)\"'\n else:\n line += \", {0}\".format(int(value[\"content-length\"]))\n ttl_bytes += int(value[\"content-length\"])\n line += \", {0:.2f}\".format(round(queue_time, 2))\n o_count += 1\n queuelist[\n \"{0}{1}\".format(str(int(msg.timestamp)), value[\"key\"])\n ] = line\n break # we hit the backend we're looking for. stop.\n else:\n line += \"src: {0}\".format(value[\"dataStoreName\"])\n if args.bucket == False:\n line += \":{0}\".format(logline[\"bucket\"])\n line += ', part: {0}, key: \"{1}\"'.format(part, value[\"key\"])\n if (\n \"isDeleteMarker\" in value\n and value[\"isDeleteMarker\"] == True\n ):\n line += \" (delete)\"\n else:\n line += \", size: {0}\".format(\n sizeof_fmt10(int(value[\"content-length\"]))\n )\n ttl_bytes += int(value[\"content-length\"])\n line += \", {0:.2f} sec\".format(round(queue_time, 2))\n o_count += 1\n queuelist[\n \"{0}{1}\".format(str(int(msg.timestamp)), value[\"key\"])\n ] = line\n break # we hit the backend we're looking for. stop.\n\n return o_count, ttl_bytes, queuelist", "def test_kafka_group_io_dataset_auto_offset_reset():\n\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\"],\n group_id=\"cgglobaloffsetearliest\",\n servers=\"localhost:9092\",\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"auto.offset.reset=earliest\",\n ],\n )\n assert np.all(\n sorted(k.numpy() for (k, _) in dataset)\n == sorted((\"D\" + str(i)).encode() for i in range(100))\n )\n\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\"],\n group_id=\"cgglobaloffsetlatest\",\n servers=\"localhost:9092\",\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"auto.offset.reset=latest\",\n ],\n )\n assert np.all(sorted(k.numpy() for (k, _) in dataset) == [])\n\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\"],\n group_id=\"cgtopicoffsetearliest\",\n servers=\"localhost:9092\",\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"conf.topic.auto.offset.reset=earliest\",\n ],\n )\n assert np.all(\n sorted(k.numpy() for (k, _) in dataset)\n == sorted((\"D\" + str(i)).encode() for i in range(100))\n )\n\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\"],\n group_id=\"cgtopicoffsetlatest\",\n servers=\"localhost:9092\",\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"conf.topic.auto.offset.reset=latest\",\n ],\n )\n assert np.all(sorted(k.numpy() for (k, _) in dataset) == [])", "def test_describe_consumer_group_does_not_exist(kafka_admin_client):\n with pytest.raises(GroupCoordinatorNotAvailableError):\n group_description = kafka_admin_client.describe_consumer_groups(['test'])", "def encode_offset_commit_request(cls, group, payloads):\n return kafka.protocol.commit.OffsetCommitRequest[0](\n consumer_group=group,\n topics=[(\n topic,\n [(\n partition,\n payload.offset,\n payload.metadata)\n for partition, payload in six.iteritems(topic_payloads)])\n for topic, topic_payloads in six.iteritems(group_by_topic_and_partition(payloads))])", "def decode_offset_fetch_response(cls, response):\n return [\n kafka.structs.OffsetFetchResponsePayload(\n topic, partition, offset, metadata, error\n )\n for topic, partitions in response.topics\n for partition, offset, metadata, error in partitions\n ]", "def test_describe_consumer_group_exists(kafka_admin_client, kafka_consumer_factory, topic):\n consumers = {}\n stop = {}\n threads = {}\n random_group_id = 'test-group-' + random_string(6)\n group_id_list = [random_group_id, random_group_id + '_2']\n generations = {group_id_list[0]: set(), group_id_list[1]: set()}\n def consumer_thread(i, group_id):\n assert i not in consumers\n assert i not in stop\n stop[i] = Event()\n consumers[i] = kafka_consumer_factory(group_id=group_id)\n while not stop[i].is_set():\n consumers[i].poll(20)\n consumers[i].close()\n consumers[i] = None\n stop[i] = None\n\n num_consumers = 3\n for i in range(num_consumers):\n group_id = group_id_list[i % 2]\n t = Thread(target=consumer_thread, args=(i, group_id,))\n t.start()\n threads[i] = t\n\n try:\n timeout = time() + 35\n while True:\n for c in range(num_consumers):\n\n # Verify all consumers have been created\n if c not in consumers:\n break\n\n # Verify all consumers have an assignment\n elif not consumers[c].assignment():\n break\n\n # If all consumers exist and have an assignment\n else:\n\n info('All consumers have assignment... checking for stable group')\n # Verify all consumers are in the same generation\n # then log state and break while loop\n\n for consumer in consumers.values():\n generations[consumer.config['group_id']].add(consumer._coordinator._generation.generation_id)\n\n is_same_generation = any([len(consumer_generation) == 1 for consumer_generation in generations.values()])\n\n # New generation assignment is not complete until\n # coordinator.rejoining = False\n rejoining = any([consumer._coordinator.rejoining\n for consumer in list(consumers.values())])\n\n if not rejoining and is_same_generation:\n break\n else:\n sleep(1)\n assert time() < timeout, \"timeout waiting for assignments\"\n\n info('Group stabilized; verifying assignment')\n output = kafka_admin_client.describe_consumer_groups(group_id_list)\n assert len(output) == 2\n consumer_groups = set()\n for consumer_group in output:\n assert(consumer_group.group in group_id_list)\n if consumer_group.group == group_id_list[0]:\n assert(len(consumer_group.members) == 2)\n else:\n assert(len(consumer_group.members) == 1)\n for member in consumer_group.members:\n assert(member.member_metadata.subscription[0] == topic)\n assert(member.member_assignment.assignment[0][0] == topic)\n consumer_groups.add(consumer_group.group)\n assert(sorted(list(consumer_groups)) == group_id_list)\n finally:\n info('Shutting down %s consumers', num_consumers)\n for c in range(num_consumers):\n info('Stopping consumer %s', c)\n stop[c].set()\n threads[c].join()\n threads[c] = None", "def _setup_consumer(self):\n # <WTF> https://github.com/dpkp/kafka-python/issues/601\n self.available_topics = self.client.topics()\n # </WTF>\n\n # might as well use it\n assert self.topic in self.available_topics\n\n if (self.start_params is None) != (self.end_params is None):\n raise ValueError(\"Both start and end params must be set or both must be None\")\n\n if self.start_params is None:\n # setup partitions to read through\n # TODO not checked with multiple partitions since inheriting from foxglove\n # An offset is assigned to make repeatability (via a locking file) possible later on.\n # and it's easier to terminate the fetch loop this way.\n p_id = self.client.partitions_for_topic(self.topic)\n topic_partitions = [TopicPartition(topic=self.topic, partition=p) for p in list(p_id)]\n starts = self.client.beginning_offsets(topic_partitions)\n ends = self.client.end_offsets(topic_partitions)\n\n self.start_p_offsets = {\n tp: OffsetAndTimestamp(offset=offset, timestamp=None)\n for tp, offset in starts.items()\n }\n self.end_p_offsets = {\n tp: OffsetAndTimestamp(offset=offset - 1, timestamp=None)\n for tp, offset in ends.items()\n }\n\n else:\n # TODO - this code was inherited from Foxglove and hasn't be checked through\n # setup start and end partitions and offsets\n # self.client.seek_to_beginning()\n # datetime is only start/end implemented\n assert isinstance(self.start_params, datetime) and isinstance(self.end_params, datetime)\n start = int(self.start_params.timestamp() * 1000)\n end = int(self.end_params.timestamp() * 1000)\n\n partitions = self.client.partitions_for_topic(self.topic)\n tx = {TopicPartition(topic=self.topic, partition=p): start for p in list(partitions)}\n self.start_p_offsets = self.client.offsets_for_times(tx)\n\n # if you give a timestamp after the last record it returns None\n for tp, offset_details in self.start_p_offsets.items():\n if offset_details is None:\n raise ValueError(\"Start date outside of available messages\")\n\n tx = {TopicPartition(topic=self.topic, partition=p): end for p in list(partitions)}\n self.end_p_offsets = self.client.offsets_for_times(tx)\n\n # as above - out of range, for end offset give something useful\n for tp, offset_details in self.end_p_offsets.items():\n if offset_details is None:\n # go to last message. I'm not 100% sure this is correct\n end_offsets = self.client.end_offsets([tp])\n offset = end_offsets[tp] - 1\n self.end_p_offsets[tp] = OffsetAndTimestamp(offset=offset, timestamp=None)" ]
[ "0.7943811", "0.7267268", "0.7246765", "0.71853805", "0.71067864", "0.67003626", "0.663087", "0.6584401", "0.62624973", "0.6203875", "0.6060059", "0.6012397", "0.5937667", "0.5835406", "0.57882696", "0.57616556", "0.56687766", "0.56449854", "0.5552708", "0.5517917", "0.54876196", "0.5362944", "0.53622097", "0.53610003", "0.52980214", "0.52169186", "0.5177145", "0.5138825", "0.5126622", "0.5030959" ]
0.79819244
0
Widget specific css class
def css_class(self): css_type = self.widget_type css_title = normalizer.normalize(self.data.title) return ('faceted-checkboxtree-widget ' 'faceted-{0}-widget section-{1}').format(css_type, css_title)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def style_widget( self, a_widget ):\n widget_type = type( a_widget )\n\n if widget_type == Tk.Label:\n self.style_label( a_widget )\n # elif widget_type == class( 'tkinter.Button' ):\n\n elif widget_type == Tk.Button:\n #rint( \" type widget Tk.Button\" )\n self.style_button( a_widget )\n\n elif widget_type == Tk.Entry:\n #rint( \" type widget Tk.Entry\" )\n self.style_entry( a_widget )\n\n elif widget_type == Tk.Frame:\n #rint( \" type widget Tk.Entry\" )\n self.style_frame( a_widget )\n\n elif widget_type == Tk.LabelFrame:\n #rint( \" type widget Tk.Entry\" )\n self.style_labelframe( a_widget )\n\n elif widget_type == Tk.Checkbutton:\n #rint( \" type widget Tk.Entry\" )\n self.style_checkbutton( a_widget )\n\n elif widget_type == Tk.ttk.Combobox:\n #rint( \" type widget Tk.Entry\" )\n self.style_combobox( a_widget )\n\n else:\n print( \"******************** no dispacth for this type widget\" )\n print( f\"style_widget -- str>>{widget_type}<< repr >>{repr( a_widget)}<<\" )", "def getWidget(self):", "def CSSClasses(self):", "def styled_widget(translated_name):\n\n def render(self, value):\n if getattr(self, 'pubsub_is_registered', False):\n pubsub_singleton.publish(RENDER_TOPIC, self)\n\n def inner(cls):\n def cls_render(value):\n if RENDER_TOPIC in pubsub_singleton:\n pubsub_singleton.publish(RENDER_TOPIC, cls)\n\n if not hasattr(cls, 'SETTINGS'):\n cls.SETTINGS = {}\n for key, value in style_settings(translated_name).items():\n cls.SETTINGS.setdefault(key, value)\n cls.on_cls_setting_colors = cls_render\n cls.on_cls_setting_fonts = cls_render\n cls.on_cls_setting_style_defines = cls_render\n cls.on_setting_colors = render\n cls.on_setting_fonts = render\n cls.on_setting_style_defines = render\n return cls\n return inner", "def create_widgets(self):", "def create_widgets( self ):", "def set_style(self):", "def getWidgetClass(self):\n\t\treturn AbstraccionWindowWidget", "def get_widget_cls(cls, **kwargs):\n return cls.widget_class", "def setWidget(self, widget: QtWidgets.QWidget):\n super().setWidget(widget)\n if globalstuff.theme == 'dark':\n w = self.widget()\n w.setPalette(globalstuff.textpal)\n if hasattr(w, 'TreeWidget'):\n w.TreeWidget.setStyleSheet(globalstuff.treeqss)", "def XPGetWidgetClassFunc(inWidgetClass):\n pass", "def get_css_class(self):\n\n return None", "def init_widget(self):", "def getWidgetClassName(self, tagName):\n if tagName == \"labelframe\":\n className = \"TkLabelFrame\"\n elif tagName == \"optionmenu\":\n className = \"TkOptionMenu\"\n elif tagName == \"toplevel\":\n className = \"TkToplevel\"\n else:\n className = \"Tk\" + tagName.capitalize()\n\n return className", "def create_widget(self):\n pass", "def get_widget(self):\n\t\treturn None", "def decorate(self,widget,level): \n\n w = widget\n if level == False: return\n \n if type(w.style.background) != int:\n w.background = Background(w,self) \n \n if level == 'app': return\n \n for k,v in list(w.style.__dict__.items()):\n if k in ('border','margin','padding'):\n for kk in ('top','bottom','left','right'):\n setattr(w.style,'%s_%s'%(k,kk),v)\n\n w.paint = self.paint(w,w.paint)\n w.event = self.event(w,w.event)\n w.update = self.update(w,w.update)\n w.resize = self.resize(w,w.resize)\n w.open = self.open(w,w.open)", "def style_button( self, a_widget ):\n a_widget.config( self.button_dict )\n #print( \"problem with fg in styling look into \" )\n\n # if True: # old style without dict -- configure or config\n # pass\n # # for testing without dict windows only green and red seem to ever show\n # a_widget.configure( activebackground ='red' )\n # a_widget.configure( activeforeground ='blue' )\n # a_widget.configure( bg ='green' )\n # a_widget.configure( fg ='yellow' )\n # a_widget.configure( highlightcolor ='orange' )", "def add_class_to_widget(widget, *css_classes):\n css_string = \" \".join(css_classes)\n if 'class' in widget.attrs:\n widget.attrs['class'] += ' {} '.format(css_string)\n else:\n widget.attrs['class'] = css_string", "def _lookup_class(r, widget):\n\n page_cols = current.s3db.get_config(r.tablename, \"profile_cols\")\n if not page_cols:\n page_cols = 2\n widget_cols = widget.get(\"colspan\", 1)\n span = int(12 / page_cols) * widget_cols\n\n # Default (=foundation)\n return \"profile-widget medium-%s columns\" % span", "def predefined_style(self, style):\n if style == 'minimal':\n self.style(box_style='', border_visible=False)\n self.play_stop_toggle.button_style = ''\n self.play_stop_toggle.font_weight = 'normal'\n self.play_options_toggle.button_style = ''\n format_box(self.loop_interval_box, '', False, 'black', 'solid', 1,\n 10, '0.1cm', '0.1cm')\n if self.index_style == 'buttons':\n self.index_wid.button_plus.button_style = ''\n self.index_wid.button_plus.font_weight = 'normal'\n self.index_wid.button_minus.button_style = ''\n self.index_wid.button_minus.font_weight = 'normal'\n self.index_wid.index_text.background_color = None\n elif self.index_style == 'slider':\n self.index_wid.slider.slider_color = None\n self.index_wid.slider.background_color = None\n self._toggle_play_style = ''\n self._toggle_stop_style = ''\n elif (style == 'info' or style == 'success' or style == 'danger' or\n style == 'warning'):\n self.style(box_style=style, border_visible=False)\n self.play_stop_toggle.button_style = 'success'\n self.play_stop_toggle.font_weight = 'bold'\n self.play_options_toggle.button_style = 'info'\n format_box(self.loop_interval_box, 'info', True,\n map_styles_to_hex_colours('info'), 'solid', 1, 10,\n '0.1cm', '0.1cm')\n if self.index_style == 'buttons':\n self.index_wid.button_plus.button_style = 'primary'\n self.index_wid.button_plus.font_weight = 'bold'\n self.index_wid.button_minus.button_style = 'primary'\n self.index_wid.button_minus.font_weight = 'bold'\n self.index_wid.index_text.background_color = \\\n map_styles_to_hex_colours(style, True)\n elif self.index_style == 'slider':\n self.index_wid.slider.slider_color = \\\n map_styles_to_hex_colours(style)\n self.index_wid.slider.background_color = \\\n map_styles_to_hex_colours(style)\n self._toggle_play_style = 'success'\n self._toggle_stop_style = 'danger'\n else:\n raise ValueError('style must be minimal or info or success or '\n 'danger or warning')", "def widgets(self):\r\n return resources.Widgets(self)", "def Custom(\n owner: QWidget,\n name: str,\n widget: QWidget\n):\n return widget", "def get_widget(self):\r\n return None", "def widget(self, widget_id):\r\n return resources.Widget(self, widget_id)", "def XPHideWidget(inWidget):\n pass", "def widget(self, p_int): # real signature unknown; restored from __doc__\n pass", "def widget(self, p_int): # real signature unknown; restored from __doc__\n pass", "def predefined_style(self, style):\n if style == 'minimal':\n self.style(box_style='', border_visible=False)\n self.play_stop_toggle.button_style = ''\n self.play_stop_toggle.font_weight = 'normal'\n self.play_options_toggle.button_style = ''\n _format_box(self.loop_interval_box, '', False, 'black', 'solid', 1,\n 10, '0.1cm', '0.1cm')\n if self.index_style == 'buttons':\n self.index_wid.button_plus.button_style = ''\n self.index_wid.button_plus.font_weight = 'normal'\n self.index_wid.button_minus.button_style = ''\n self.index_wid.button_minus.font_weight = 'normal'\n self.index_wid.index_text.background_color = None\n elif self.index_style == 'slider':\n self.index_wid.slider.slider_color = None\n self.index_wid.slider.background_color = None\n self._toggle_play_style = ''\n self._toggle_stop_style = ''\n elif (style == 'info' or style == 'success' or style == 'danger' or\n style == 'warning'):\n self.style(box_style=style, border_visible=False)\n self.play_stop_toggle.button_style = 'success'\n self.play_stop_toggle.font_weight = 'bold'\n self.play_options_toggle.button_style = 'info'\n _format_box(self.loop_interval_box, 'info', True,\n _map_styles_to_hex_colours('info'), 'solid', 1, 10,\n '0.1cm', '0.1cm')\n if self.index_style == 'buttons':\n self.index_wid.button_plus.button_style = 'primary'\n self.index_wid.button_plus.font_weight = 'bold'\n self.index_wid.button_minus.button_style = 'primary'\n self.index_wid.button_minus.font_weight = 'bold'\n self.index_wid.index_text.background_color = \\\n _map_styles_to_hex_colours(style, True)\n elif self.index_style == 'slider':\n self.index_wid.slider.slider_color = \\\n _map_styles_to_hex_colours(style)\n self.index_wid.slider.background_color = \\\n _map_styles_to_hex_colours(style)\n self._toggle_play_style = 'success'\n self._toggle_stop_style = 'danger'\n else:\n raise ValueError('style must be minimal or info or success or '\n 'danger or warning')", "def update_style(self):\n pass" ]
[ "0.6704201", "0.6648001", "0.6635693", "0.65978724", "0.65335196", "0.6479021", "0.64238656", "0.6410524", "0.63951045", "0.6247971", "0.6211813", "0.6205558", "0.6154018", "0.6119156", "0.60828286", "0.6058856", "0.5991134", "0.5858697", "0.5830774", "0.5830671", "0.58251435", "0.5801665", "0.57993764", "0.57738805", "0.57612705", "0.5743955", "0.5725701", "0.5725701", "0.5723986", "0.56925535" ]
0.7314865
1
Return True if key in self.default
def selected(self, key): default = self.default if not default: return False for item in default: if compare(key, item) == 0: return True return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def has_key(self, key):\n return key in self", "def has(self, key):", "def __contains__(self, key):\n return self._lookup(key).value is not None", "def __contains__(self, key):\n found = True\n try:\n self.__getitem__(key)\n except:\n found = False\n return found", "def __contains__(self, key):\n try:\n if self[key]:\n return True\n except KeyError:\n return False", "def __contains__(self, key):\n return key in self.keys", "def has(self, key):\n return False", "def __contains__(self, key):\n return key in self._mappings.keys()", "def containsKey(self, key):\n return get(key) != None", "def __contains__(self, key):\n try:\n self._get(key)\n return True\n except Exception:\n return False", "def __contains__(self, key):\n\n return key in self.keys_set", "def contains(self, key):\n if key in self.key_list:\n return True\n return False", "def __contains__(self, key):\n return key in self._group._opts", "def has(self, key) -> bool:\r\n if self.get(key) is not None:\r\n return True\r\n return False", "def has_default(self):\r\n return self.default is not None", "def contains(self, key: int) -> bool:\n if key in self.d:\n return True\n else:\n return False", "def has(self, name):\n return name in self._defaults", "def __contains__(self, key):\n try:\n self[key]\n return True\n except:\n return False", "def has_key(self, key):\n return self.contains(key)", "def __contains__(self, key):\n return key in self._opts or key in self._groups", "def __contains__(self, key):\n return self.contains(key)", "def __contains__(self, key):\n return key in self._get_storage()", "def __contains__(self, key):\n return self.__getitem__(key)", "def bool(self, key: str, def_: Union[builtins.bool, T] = False) -> Union[builtins.bool, T]:\n try:\n return BOOL_LOOKUP[self.get(key).casefold()]\n except KeyError:\n return def_", "def __contains__(self, key):\n\t\treturn key in self.__dStore", "def has_default(self):\r\n return self._default is not None", "def contains(self, key):\n try:\n self.keyvaluepair_set.get(key=key)\n return True\n except KeyValuePair.DoesNotExist:\n return False", "def contains(self, key):\n\n return key in self.keys()", "def __contains__(self, val):\n if self.lookup.get(val, 0) > 0:\n return True\n else:\n return False", "def get_bool(self, key, default):\n value = self.get(key, default)\n if isinstance(value, bool):\n return value\n return value.lower() in (\"true\", \"t\", \"yes\", \"y\")" ]
[ "0.7080448", "0.6991802", "0.6945687", "0.693497", "0.69090164", "0.6869424", "0.6864009", "0.6844763", "0.68307567", "0.6808193", "0.68030185", "0.68011004", "0.6780968", "0.67448217", "0.6737738", "0.6706005", "0.6704852", "0.6698478", "0.66980475", "0.6686528", "0.6682971", "0.66828555", "0.6678057", "0.66229355", "0.6600105", "0.6561241", "0.6540234", "0.6516251", "0.6515231", "0.6497362" ]
0.8206618
1
Get value from form and return a catalog dict query
def query(self, form): query = {} index = self.data.get('index', '') index = index.encode('utf-8', 'replace') if not self.operator_visible: operator = self.operator else: operator = form.get(self.data.getId() + '-operator', self.operator) operator = operator.encode('utf-8', 'replace') if not index: return query if self.hidden: value = self.default else: value = form.get(self.data.getId(), '') if not value: return query catalog = getToolByName(self.context, 'portal_catalog') if index in catalog.Indexes: if catalog.Indexes[index].meta_type == 'BooleanIndex': if value == 'False': value = False elif value == 'True': value = True query[index] = {'query': value, 'operator': operator} return query
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def query(self, form):\n query = {}\n index = self.data.get('index', '')\n index = index.encode('utf-8', 'replace')\n\n if not self.operator_visible:\n operator = self.operator\n else:\n operator = form.get(self.data.getId() + '-operator', self.operator)\n\n operator = operator.encode('utf-8', 'replace')\n\n if not index:\n return query\n\n if self.hidden:\n value = self.default\n else:\n value = form.get(self.data.getId(), '')\n\n value = atdx_normalize(value)\n\n if not value:\n return query\n\n catalog = getToolByName(self.context, 'portal_catalog')\n if index in catalog.Indexes:\n if catalog.Indexes[index].meta_type == 'BooleanIndex':\n if value == 'False':\n value = False\n elif value == 'True':\n value = True\n\n query[index] = {'query': value, 'operator': operator}\n return query", "def get_value(self, query_dict, k): \n if k in query_dict:\n return query_dict[k]\n return ''", "def search_form_servee(context, cl):\r\n return {\r\n \"request\": context[\"request\"],\r\n \"cl\": cl,\r\n \"show_result_count\": cl.result_count != cl.full_result_count,\r\n \"search_var\": \"q\"\r\n }", "def form_get(var, convert=None, default=_DEFAULT):\n\n return _parameter_get(request.form, var, convert, default)", "def query_view_data(self):\n if not self.valid:\n raise MudderyError(\"Invalid form: %s.\" % self.form_name)\n\n self.form = None\n self.key = None\n if self.record:\n try:\n # Query record's data.\n instance = self.form_class.Meta.model.objects.get(pk=self.record)\n self.form = self.form_class(instance=instance)\n self.key = getattr(instance, \"key\", None)\n except Exception, e:\n self.form = None\n\n if not self.form:\n # Get empty data.\n self.form = self.form_class()", "def gval( k,i ):\n if code_info.form.has_key( k+str(i) ):\n return code_info.form[ k+str(i) ].value\n return None", "def get_data(self, form_name, expr, data=None):\n if data:\n self.update_cleaned_data(data, form_name=form_name)\n data = self.cleaned_data\n return expr.evaluate(data=data, context=self.context)", "def getform():\n form = cgi.FieldStorage()\n host = form.getvalue('host')\n user = form.getvalue('user')\n passwd = form.getvalue('passwd')\n cert = form.getvalue('cert')\n proxy = form.getvalue('proxy')\n name = form.getvalue('name')\n return (host, user, passwd, cert, proxy, name)", "def get_form_details_sqli(form):\n details = {}\n # get the form action (target url)\n action = form.attrs.get(\"action\").lower()\n # get the form method (POST, GET, etc.)\n method = form.attrs.get(\"method\", \"get\").lower()\n # get all the input details such as type and name\n inputs = []\n cookies = {}\n for input_tag in form.find_all(\"input\"):\n input_type = input_tag.attrs.get(\"type\", \"text\")\n input_name = input_tag.attrs.get(\"name\")\n if input_name == 'csrf' or input_name == 'PHPSESSID':\n cookies[input_name] = input_tag.attrs.get(\"value\")\n inputs.append({\"type\": input_type, \"name\": input_name})\n # put everything to the resulting dictionary\n #print(cookies)\n details[\"action\"] = action\n details[\"method\"] = method\n details[\"inputs\"] = inputs\n return details,cookies", "def get_one(self, *args, **kw):\n #this would probably only be realized as a json stream\n tmpl_context.widget = self.edit_form\n pks = self.provider.get_primary_fields(self.model)\n kw = {}\n for i, pk in enumerate(pks):\n kw[pk] = args[i]\n value = self.edit_filler.get_value(kw)\n return dict(value=value,model=self.model.__name__)", "def loadCgiParameter(self):\n if self.usecgi:\n self.form=FieldStorage(keep_blank_values=1)\n self.path = self.cgiparam(name='path',nvl='/')\n else:\n # Form inhalte holen\n qs = self.query_string\n\n parsed = parse_qs(qs)\n self.form = dict()\n\n for key in parsed.keys():\n for val in parsed.get(key):\n self.form[key] = val\n try:\n self.path=parsed.get('path')[0]\n except: \n self.form = {'path':'/root'}\n \n self.path = self.cgiparam('path','/root')", "def _form_data(self, response):\n SQFI_audit_type = response.xpath(self.filters[6]).extract_first()\n SQFI_audit_type_val = response.xpath(self.filters[7]).extract_first()\n food_sector_categories = response.xpath(self.filters[8]).extract_first()\n food_sector_categories_val = response.xpath(self.filters[9]).extract()\n audit_rating = response.xpath(self.filters[10]).extract_first()\n audit_rating_val = response.xpath(self.filters[11]).extract()\n country = response.xpath(self.filters[12]).extract_first()\n country_val = response.xpath(self.filters[13]).extract()\n form_data = {\n SQFI_audit_type: SQFI_audit_type_val,\n food_sector_categories: food_sector_categories_val,\n audit_rating: audit_rating_val,\n country: country_val,\n }\n return form_data", "def query(self, queryString, value):\n return", "def get(self, request, app, model, field_path, *args, **kwargs):\n Model = apps.get_model(app, model)\n field = get_fields_from_path(Model, field_path)[-1]\n qs = self.get_queryset(Model, field_path)\n if field.is_relation:\n results = self.get_relation_choices(field, qs, field_path)\n if self.q:\n results = results.filter(**{'pk': self.q})\n else:\n results = self.get_choices(qs, field_path)\n if self.q:\n results = [x for x in results if self.q in x]\n\n query_string = get_query_string(request.GET, remove=[field_path])\n return http.HttpResponse(json.dumps({\n 'results': [dict(id=self.get_result_value(x, field_path, query_string), text=self.get_result_label(x))\n for x in results]\n }))", "def get_form(self, *args, **kwargs):\n form_kwargs = {\"label\": self.input_label, \"help_text\": self.input_help_text}\n form_kwargs.update(kwargs)\n return self.lookup_response_class.get_form(*args, **form_kwargs)", "def get_value(self, dictionary):\n # We override the default field access in order to support\n # lists in HTML forms.\n if html.is_html_input(dictionary):\n return html.parse_html_list(dictionary, prefix=self.field_name)\n return dictionary.get(self.field_name, empty)", "def get_patient_lookup_results(form):\n args = form.data\n data = {k: v for (k, v) in args.items() if not is_empty_form_value(v)}\n if (not form.validate()) or (len(data) == 0):\n return {\"has_form_data\": False, \"tb\": None}\n where = \"where \" + (\" and \".join(\"(X.[{}] = ?)\".format(k) for k, _ in data.items()))\n params = [v for _, v in data.items()]\n q = (\"select top 1000 X.* \" +\n \"from {schema}.PersonCombined X \".format(schema=app_schema) +\n where)\n tb = pd.read_sql(q, engine, params=params)\n return {\"has_form_data\": True, \"tb\": tb}", "def query(self, value, fieldname=None, *args, **kwargs):\n raise NotImplementedError", "def search():\n\n # POST\n if request.method == \"POST\":\n\n # validate form submission\n if not request.form.get(\"intervention\"):\n return render_template(\"results.html\", results=entries.values())\n ''' \n elif not request.form.get(\"setting\"):\n return apology(\"missing setting\")\n elif not request.form.get(\"emrpref\"):\n return apology(\"missing emr pref\")\n elif not request.form.get(\"budget\"):\n return apology(\"missing budget\")'''\n \n results = []\n for k in entries:\n print('entries', entries[k]['Keywords'])\n print('term', request.form.get(\"intervention\"))\n if request.form.get(\"intervention\") in entries[k]['Keywords']:\n print('ya')\n results.append(entries[k])\n\n\n return render_template(\"results.html\", results=results)\n\n\n # GET\n else:\n return render_template(\"search.html\")", "def get(self):\n return get_msg_form(config['filter_form_values_path'])", "def request_value(self) -> global___Expression:", "def feature_layer_query(form):\n\n resource = None\n if \"resource\" in form.vars:\n resource = form.vars.resource\n # Remove the module from name\n form.vars.resource = resource[len(form.vars.module) + 1:]\n\n #if \"advanced\" in form.vars:\n # # We should use the query field as-is\n # pass\n\n #if resource:\n # # We build query from helpers\n # if \"filter_field\" in form.vars and \"filter_value\" in form.vars:\n # if \"deleted\" in db[resource]:\n # form.vars.query = \"(db[%s].deleted == False) & (db[%s][%s] == '%s')\" % (resource, resource, filter_field, filter_value)\n # else:\n # form.vars.query = \"(db[%s][%s] == '%s')\" % (resource, filter_field, filter_value)\n # else:\n # if \"deleted\" in db[resource]:\n # # All undeleted members of the resource\n # form.vars.query = \"(db[%s].deleted == False)\" % (resource)\n # else:\n # # All members of the resource\n # form.vars.query = \"(db[%s].id > 0)\" % (resource)\n if not resource:\n # Resource is mandatory if not in advanced mode\n session.error = T(\"Need to specify a Resource!\")\n\n return", "def pop_form(env):\n if 'wsgi.input' not in env:\n return None\n post_env = env.copy()\n post_env['QUERY_STRING'] = ''\n form = cgi.FieldStorage(\n fp=env.pop('wsgi.input'),\n environ=post_env,\n keep_blank_values=True\n )\n return {k: form[k].value for k in form}", "def get_data(self, query):\n result = input(\"{}: \".format(query))\n return result", "def getfield(form, fieldname):\n try:\n return form[fieldname]\n except KeyError:\n return None", "def get(self, field: str, value: str):\n data = {\n 'query': {\n 'object': 'EEXPENSES',\n 'select': {\n 'field': [\n 'RECORDNO',\n 'RECORDID',\n 'WHENCREATED',\n 'WHENPOSTED',\n 'TOTALENTERED',\n 'STATE',\n 'TOTALDUE',\n 'DESCRIPTION',\n 'CURRENCY',\n 'BASECURR',\n 'MEMO'\n ]\n },\n 'filter': {\n 'equalto': {\n 'field': field,\n 'value': value\n }\n },\n 'pagesize': '2000'\n }\n }\n\n return self.format_and_send_request(data)['data']", "def cgiparam(self,name=None,nvl='',noneifnotused=False): \n if self.form is None:\n self.logger.debug('Form not defined, nvl returnd')\n return nvl\n \n # Wurde Spezielle CGI Verarbeitung gewuenscht\n if isinstance(self.form,dict):\n return self.form.get(name,nvl)\n\n # wenn Parameter nicht definiert\n # null-value zurueckgeben\n if name not in self.form:\n if noneifnotused:\n return None\n else:\n return nvl\n\n value = self.form.getvalue(name)\n \n if value is None:\n value = nvl\n else:\n if isinstance(value,list): \n try:\n value = value[0]\n except: value = nvl\n\n auxValue = value if name != 'password' else '*' * len(value)\n self.logger.debug('Get from CGI: \"{}\"=\"{}\"'.format(name,auxValue))\n\n return value", "def getresult(request):\r\n\r\n form = forms.SearchForm(request.POST)\r\n form.find_minmax_criteria(request.POST)\r\n\r\n if form.is_valid():\r\n\r\n headers, results = search.query(form.to_criteria(), form.cleaned_data['sector'], form.cleaned_data['exchange'], form.cleaned_data['show_result'])\r\n\r\n # show result in response\r\n return render_to_response('search/result.html', {\r\n 'headers': headers,\r\n 'results': results\r\n })\r\n\r\n else:\r\n # show error message in response\r\n return render_to_response('search/result-error.html', {\r\n 'message': 'Please enter details correctly.',\r\n 'form': form\r\n })", "def get_entry(name, req, form):\n entry = {'requirement': name, 'form': form}\n form.initial['credits_needed'] = 0\n if req:\n form.instance = req\n form.initial['credits_needed'] = req.credits_needed\n return entry", "def get_name(data_store, form_value):\n if form_value:\n data_store[\"name\"] = form_value\n return \"hello\"\n else:\n return \"error\"" ]
[ "0.659652", "0.59559315", "0.5930462", "0.5637737", "0.5619595", "0.559166", "0.5567393", "0.556528", "0.5523497", "0.5457537", "0.543612", "0.5396403", "0.5385721", "0.536719", "0.5355745", "0.5342777", "0.5341243", "0.5340816", "0.53347945", "0.5322574", "0.5317065", "0.52952385", "0.52942055", "0.5289461", "0.5273229", "0.5218948", "0.5215105", "0.51898366", "0.51879215", "0.5148497" ]
0.65017927
1
Predicts whether examples are anomalies.
def predict(X, epsilon, gaussian, **kwargs): p = gaussian(X=X, **kwargs) return is_anomaly(p, threshold=epsilon)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def predict_evidences(self, X):", "def predictFailures (self) :\n \n while self.traceData :\n\n if self.traceData [0] == self.traceType :\n\n self.totalEvents += 1\n\n if random.random () < self.recall :\n\n self.predictedEvents += 1\n self.pTraceHandle.write (\"%d\\t%d\\t%d\\n\" % (self.traceData [0], self.traceData [1], self.traceData [2]))\n self.correctPredictions += 1\n self.totalPredictions += 1\n\n self.readNextTraceLine (self.fTraceHandle)\n\n if self.precision < 1 :\n\n wrongPredictions = int ((float (self.correctPredictions * (1 - self.precision)) / self.precision) + 0.5)\n \n interval = int ((self.endTime - self.startTime) / wrongPredictions)\n start = self.startTime\n end = start + interval\n\n for i in range (wrongPredictions) :\n \n self.pTraceHandle.write (\"%d\\t%d\\t%d\\n\" % (0, random.randint (0, self.totalNodes - 1), \\\n random.randint (start, end - 1)))\n self.totalPredictions += 1\n start = end\n end = start + interval", "def get_predictions():\n\n print(\"OK1\");\n print(\"OK2\");\n return;", "def a_test_predict_is_nans():\n model = ARIMAX(formula=\"y ~ x1\", data=data, ar=2, ma=2, family=Exponential())\n x = model.fit()\n x.summary()\n assert(len(model.predict_is(h=5).values[np.isnan(model.predict_is(h=5).values)]) == 0)", "def a_test_predict_nans():\n model = ARIMAX(formula=\"y ~ x1\", data=data, ar=2, ma=2, family=Exponential())\n x = model.fit()\n x.summary()\n assert(len(model.predict(h=5, oos_data=data_oos).values[np.isnan(model.predict(h=5, \n oos_data=data_oos).values)]) == 0)", "def a_test_predict_is_length():\n model = ARIMAX(formula=\"y ~ x1\", data=data, ar=2, ma=2, family=Exponential())\n x = model.fit()\n assert(model.predict_is(h=5).shape[0] == 5)", "def anomaly():\n\n #Load anomaly dataset\n anomaly_data = LoadDataset(\"dataset/kaggle_anomalies/\",0)\n anomaly_data, anomaly_label, val, val_label = anomaly_data.load_data()\n for i in range (len(anomaly_label)):\n anomaly_label[i] = anomaly_label[i] + 5\n\n #Concatinate test and anomaly\n test_anomaly_data = np.vstack((test_data,anomaly_data))\n test_anomaly_label = np.hstack((test_label, anomaly_label))\n\n \"\"\"# Get k-means cluster distance\n cluster_model = KMeansClustering()\n cluster_model.train(encoded_train,None)\n cluster_dist = cluster_model.transform(encoded_test_anomaly)\n\n correct = 0\n wrong = 0\n total = 0\n for i in range(len(cluster_dist)):\n min_distance = np.amin(cluster_dist[i])\n if(min_distance > 4):\n if(test_anomaly_label[i] > 4):\n correct = correct +1\n else:\n wrong = wrong +1\n\n print(\"Dist \",min_distance,\" True label \", test_anomaly_label[i])\n print(\"Found \",correct,\" anomalies and \",wrong,\" wrong\")\n\n decoded = auto.predict(test_anomaly_data)\n errors = []\n # loop over all original images and their corresponding\n # reconstructions\n for (image, recon) in zip(test_anomaly_data, decoded):\n \t# compute the mean squared error between the ground-truth image\n \t# and the reconstructed image, then add it to our list of errors\n \tmse = np.mean((image - recon) ** 2)\n \terrors.append(mse)\n # compute the q-th quantile of the errors which serves as our\n # threshold to identify anomalies -- any data point that our model\n # reconstructed with > threshold error will be marked as an outlier\n thresh = np.quantile(errors, 0.4)\n idxs = np.where(np.array(errors) >= thresh)[0]\n print(\"[INFO] mse threshold: {}\".format(thresh))\n print(\"[INFO] {} outliers found\".format(len(idxs)))\n correct = 0\n wrong = 0\n for i in idxs:\n if(test_anomaly_label[i] > 4):\n correct = correct +1\n else:\n wrong = wrong +1\n print(\"Found \",correct,\" anomalies and \",wrong,\" wrong\")\n\n ds = np.zeros(len(test_anomaly_data))\n for i in idxs:\n ds[i] = 1\n tsne(enc, test_anomaly_data,ds,\"anomaly_plot\",\"anomaly_plot\")\"\"\"", "def a_test2_predict_is_nans():\n model = ARIMAX(formula=\"y ~ x1 + x2\", data=data, ar=2, ma=2, family=Exponential())\n x = model.fit()\n x.summary()\n assert(len(model.predict_is(h=5).values[np.isnan(model.predict_is(h=5).values)]) == 0)", "def test_predict(self):\n\n docs = self.docs\n for m in self.models:\n preds = m.predict(docs)\n self.assertTrue(isinstance(preds, turicreate.SArray))\n self.assertEqual(len(preds), len(docs))\n self.assertEqual(preds.dtype, int)\n\n preds = m.predict(docs, output_type=\"probability\")\n self.assertTrue(isinstance(preds, turicreate.SArray))\n self.assertTrue(len(preds) == len(docs))\n s = preds.apply(lambda x: sum(x))\n self.assertTrue((s.apply(lambda x: abs(x - 1)) < 0.000001).all())\n\n # Test predictions when docs have new words\n new_docs = turicreate.SArray([{\"-1,-1\": 3.0, \"0,4\": 5.0, \"0,3\": 2.0}])\n preds = m.predict(new_docs)\n self.assertEqual(len(preds), len(new_docs))\n\n # Test additional burnin. Ideally we could show that things\n # converge as you increase burnin.\n preds_no_burnin = m.predict(docs, output_type=\"probability\", num_burnin=0)\n self.assertEqual(len(preds_no_burnin), len(docs))", "def model_predictions_assertions(model):\n predictions = model.predict([\n markup.load_query(\"Medium Beers pizza from oz pizza\",\n query_factory=QUERY_FACTORY).query])[0]\n assert len(predictions) <= 6\n for prediction in predictions:\n if prediction: # non entities are predicted as NoneType\n assert prediction.entity.type in {\n 'category', 'cuisine', 'dish', 'option', 'restaurant', 'sys_number'\n }", "def a_test_predict_length():\n model = ARIMAX(formula=\"y ~ x1\", data=data, ar=2, ma=2, family=Exponential())\n x = model.fit()\n x.summary()\n assert(model.predict(h=5, oos_data=data_oos).shape[0] == 5)", "def a_test2_predict_is_length():\n model = ARIMAX(formula=\"y ~ x1 + x2\", data=data, ar=2, ma=2, family=Exponential())\n x = model.fit()\n assert(model.predict_is(h=5).shape[0] == 5)", "def predict_example(ex: lit_types.JsonDict) -> lit_types.JsonDict:\n # Logit values for ['unknown', 'elephant', 'ant', 'whale'].\n logits = np.zeros((len(ANIMALS),))\n for db_rec in self._dataset.examples:\n animal_index = ANIMALS.index(db_rec['animal'])\n for field_name in self._dataset.spec():\n if ex[field_name] is None or db_rec[field_name] is None:\n continue\n if field_name == 'animal':\n continue\n field_spec_value = self._dataset.spec()[field_name]\n if (isinstance(field_spec_value, lit_types.CategoryLabel) or\n isinstance(field_spec_value, lit_types.Boolean)) and (\n ex[field_name] == db_rec[field_name]):\n logits[animal_index] += 1\n if isinstance(field_spec_value, lit_types.Scalar):\n logits[animal_index] += 1.0 - abs(ex[field_name] -\n db_rec[field_name])\n return scipy_special.softmax(logits)", "def test_predict(self):\n self.regression_single.predict(self.X_test)\n self.assertTrue(len(self.regression_single.y_pred))\n self.regression_boston.predict(self.boston_x_test)\n self.assertTrue(len(self.regression_boston.y_pred))", "def test_ebm_unknown_value_at_predict():\n X = np.array(\n [[0, 1, 0, 0], [0, 0, 0, 1], [0, 0, 0, 0], [1, 0, 0, 0], [0, 0, 0, 1]],\n dtype=np.uint8,\n )\n\n X_test = np.array([[0, 1, 0, 0], [0, 0, 1, 1], [1, 0, 0, 0]], dtype=np.uint8)\n\n y = np.array([0, 1, 1, 1, 1], dtype=np.uint8)\n\n clf = ExplainableBoostingClassifier()\n clf.fit(X, y)\n clf.predict(X_test)\n\n valid_ebm(clf)", "def test_output_is_counterfactuals(self):\n\n output = self._gen.generate(\n example=self._example,\n model=self._model,\n dataset=self._dataset,\n config=self._config)\n self.assertGreaterEqual(len(output), 1)\n target_prediction = self._predict_and_return_argmax_label(self._example)\n for cf_example in output:\n cf_prediction = self._predict_and_return_argmax_label(cf_example)\n self.assertNotEqual(cf_prediction, target_prediction)", "def a_test2_predict_nans():\n model = ARIMAX(formula=\"y ~ x1 + x2\", data=data, ar=2, ma=2, family=Exponential())\n x = model.fit()\n x.summary()\n assert(len(model.predict(h=5, oos_data=data_oos).values[np.isnan(model.predict(h=5, \n oos_data=data_oos).values)]) == 0)", "def test_nn_predicts_accurate_results(self):\n self.nn.train_nn(self.X_train, self.y_train, 6, 10, 0.06)\n accuracy = 0\n X_test, y_test = load_data(\"../data/testdata.mat.tar.gz\")\n for i in range(len(X_test[:100])):\n out = self.nn.forward_prop(X_test[i])[0][-1]\n if np.argmax(out) == np.where(y_test[i])[0][0]:\n accuracy += 1\n else:\n print(\"Incorrect\", np.argmax(out))\n print(\"accuracy: \", accuracy)\n self.assertGreaterEqual(accuracy, 70)", "def predict(self):\n\n global pos\n pos = (pos + 1) % len(ue_data) # iterate through entire list one by one in cycle manner and will be updated when live feed will be coming through KPIMON to influxDB\n sample = ue_data[pos]\n ue_df = pd.DataFrame([sample], columns=db.data.columns)\n val = predict_anomaly(self, ue_df)\n if (val is not None) and (len(val) > 2):\n msg_to_ts(self, val)", "def is_artificial(self):\n\t\treturn 0", "def mae(y_true: np.ndarray, y_pred: np.ndarray):\n return np.mean(np.abs(y_true - y_pred))", "def show_predictions(result, invalid, event):\n\n s.sendReply(\n event,\n f'Predictions: {result if result else \"none\"}\\n'\n f'Invalid: {\", \".join(invalid) if invalid else \"none\"}',\n )\n\n if result:\n s.sendReply(\n event,\n \"Note: The models do not take news or current events into account. \"\n \"Some models work better for different stocks based on their behaviour, \"\n \"try following the predictions for a while to determine which is best.\",\n )", "def test_response_value(predict, y):\r\n print(\"test_response_value()...\", end = \"\")\r\n if len(set(y)) == 1:\r\n assert (predict == y).all()\r\n print(\"Passed!\")", "def test_predict():\n\n tpot_obj = TPOTClassifier()\n\n try:\n tpot_obj.predict(testing_features)\n assert False # Should be unreachable\n except ValueError:\n pass", "def check_prediction(self):\n predicted_scores = self.sess.run(self.NET.output_with_relu, feed_dict={self.NET.input: self.test_image if len(self.test_image.shape)==4 else [self.test_image]})\n self.original_confidence = np.max(predicted_scores)\n if np.argmax(predicted_scores,1) != self.original_label:\n print(\"Network's Prediction is Already Incorrect!\")\n return True\n else:\n return False", "def test_test_value_oob_gets_error_message(self):\n res = predict_model.predict(test_value=-1)\n assert res == self.err_msg\n res = predict_model.predict(test_value=40283)\n assert res == self.err_msg", "def test_04_predict(self):\n today = date.today()\n log_file = os.path.join(LOG_DIR, \"{}-predict-{}-{}.log\".format(LOG_PREFIX, today.year, today.month))\n\n ## update the log\n y_pred = [0]\n y_proba = [0.6,0.4]\n runtime = \"00:00:02\"\n model_version = 0.1\n country = \"india\"\n target_date = '2018-01-05'\n\n update_predict_log(country, y_pred,y_proba,target_date,runtime,\n model_version, test=True, prefix=LOG_PREFIX)\n\n df = pd.read_csv(log_file)\n logged_y_pred = [literal_eval(i) for i in df['y_pred'].copy()][-1]\n self.assertEqual(y_pred,logged_y_pred)", "def a_test2_predict_length():\n model = ARIMAX(formula=\"y ~ x1 + x2\", data=data, ar=2, ma=2, family=Exponential())\n x = model.fit()\n x.summary()\n assert(model.predict(h=5, oos_data=data_oos).shape[0] == 5)", "def test_model_outcome(predicted, actual, planned):\n if not isinstance(predicted, pd.DataFrame):\n predicted = pd.DataFrame(predicted, columns=[\"PREDICTED_TRIP_DURATION\"])\n if not isinstance(actual, pd.DataFrame):\n actual = pd.DataFrame(actual, columns=[\"ACTUAL_TRIP_DURATION\"])\n if not isinstance(planned, pd.DataFrame):\n planned = pd.DataFrame(planned, columns=[\"PLANNED_TRIP_DURATION\"])\n # Initialise the combined dataframe\n combined = pd.concat([predicted, actual, planned], axis=1)\n # Calculate the actual delay\n actual_delay = combined[\"PLANNED_TRIP_DURATION\"] - combined[\"ACTUAL_TRIP_DURATION\"]\n # Calculate the predicted delay\n predicted_delay = combined[\"PLANNED_TRIP_DURATION\"] - combined[\"PREDICTED_TRIP_DURATION\"]\n # Calculate the difference in delay\n delay_diff = actual_delay - predicted_delay\n # Combine the delays into a single dataframe\n combined_delay = pd.concat([pd.DataFrame(actual_delay, columns=['Actual_Delay']),\n pd.DataFrame(predicted_delay, columns=['Predicted_Delay']),\n pd.DataFrame(delay_diff, columns=['Difference_In_Delay'])], axis=1)\n # Obtain the index of the max and min values of the actual, predicted and difference delays\n actual_max_index = combined_delay[\"Actual_Delay\"].argmax()\n actual_min_index = combined_delay[\"Actual_Delay\"].argmin()\n predicted_max_index = combined_delay[\"Predicted_Delay\"].argmax()\n predicted_min_index = combined_delay[\"Predicted_Delay\"].argmin()\n delay_diff_max_index = combined_delay[\"Difference_In_Delay\"].argmax()\n delay_diff_min_index = combined_delay[\"Difference_In_Delay\"].argmin()\n # Get the Mean Absolute Error\n MAE = metrics.mean_absolute_error(combined[\"ACTUAL_TRIP_DURATION\"], combined[\"PREDICTED_TRIP_DURATION\"])\n # Get the R2 Score\n R2 = metrics.r2_score(combined[\"ACTUAL_TRIP_DURATION\"], combined[\"PREDICTED_TRIP_DURATION\"])\n # Get the Root Mean Squared Error\n RMSE = metrics.mean_squared_error(combined[\"ACTUAL_TRIP_DURATION\"], combined[\"PREDICTED_TRIP_DURATION\"],\n squared=False)\n # Get the Median Absolute Error\n MEDAE = metrics.median_absolute_error(combined[\"ACTUAL_TRIP_DURATION\"], combined[\"PREDICTED_TRIP_DURATION\"])\n # Get the Mean Squared Error Log Value\n MSLE = metrics.mean_squared_log_error(combined[\"ACTUAL_TRIP_DURATION\"], combined[\"PREDICTED_TRIP_DURATION\"])\n # Build Dictionary\n pass_val = {\"combined\": combined,\n \"combined_delay\": combined_delay,\n \"actual_max_index\": actual_max_index,\n \"actual_min_index\": actual_min_index,\n \"predicted_max_index\": predicted_max_index,\n \"predicted_min_index\": predicted_min_index,\n \"delay_diff_max_index\": delay_diff_max_index,\n \"delay_diff_min_index\": delay_diff_min_index,\n \"MAE\": MAE,\n \"R2\": R2,\n \"MEDAE\": MEDAE,\n \"RMSE\": RMSE,\n \"MSLE\": MSLE}\n # Return Dictionary\n return pass_val", "def anomaly(self):\n return self._anomaly(result_count=1, failure_amount=1)" ]
[ "0.6425468", "0.6234993", "0.6177747", "0.61675006", "0.61499196", "0.6123187", "0.6110093", "0.61028236", "0.6091568", "0.6053405", "0.6050498", "0.60391706", "0.6036215", "0.60330534", "0.6031875", "0.59863746", "0.5979558", "0.5956812", "0.5931241", "0.5925367", "0.58910334", "0.5889601", "0.5782532", "0.5763313", "0.5743234", "0.5728338", "0.57206243", "0.5712959", "0.57002455", "0.5697233" ]
0.6635491
0
register requirement syntax for later use returns an ID for retrieving the syntax
def _register_requirement_syntax(self, syntax): syntaxId = self.nextSyntaxId self.nextSyntaxId += 1 return syntaxId
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def interpret_requirement(string):\n string_list = split(string, sep=' ')\n \n requirement = Requirement(points, degree, majors, levels, max_non_degree)\n return requirement", "def syntax_text():", "def reqid(self) -> str:", "def reqid(self) -> str:", "def identifier(self):", "def instruction():\n def treat_syn(acc):\n for ac in acc:\n vs = [t.value for t in ac]\n v_expand = Instruction._expand_slashdash(vs)\n c.synonym_add(v_expand)\n return ()\n def treat_instruct(acc):\n keyword,ls = acc\n instruct[keyword.value] = Instruction._param_value(ls)\n return ()\n keyword_instruct = (first_word(\"\"\"exit timelimit printgoal dump \n ontored read library error warning\"\"\") + \n Parse.next_token().possibly())\n return (c.bracket(next_word('synonym') + Instruction._syn().treat(treat_syn) |\n c.bracket(keyword_instruct.treat(treat_instruct))))", "def get_id(self):\n return \"no_requirements_plugin\"", "def install_syntax_functions(self):\n self.syntax_functions[':head'] = head_prediction_generator\n self.syntax_functions[':optional'] = optional_prediction_generator\n self.syntax_functions[':sequence'] = sequence_prediction_generator\n self.syntax_functions[':any'] = any_prediction_generator", "def register_specify_shape_c_code(typ, code, version=(),\r\n c_support_code_apply=None):\r\n SpecifyShape.c_code_and_version[typ] = (code, version, c_support_code_apply)", "def get_definition_id(self, usage_id):\n raise NotImplementedError()", "def grr_id(line: Text) -> Text:\n del line # Unused.\n return magics_impl.grr_id_impl()", "def get_identifier(self):", "def register_shape_i_c_code(typ, code, version=()):\r\n Shape_i.c_code_and_version[typ] = (code, version)", "def _add_spec(self, requirement_name, spec_str):\n spec_str = spec_str or '>=0.0.0'\n spec_str = spec_str.replace(' ', '')\n spec_str = '~' + spec_str.replace('.x', '.0') if '.x' in spec_str else spec_str\n self.versions_spec[requirement_name].add(spec_str)", "def eidr_identifier(title):\n pass", "def parse_requirement(req_text):\n req_text = req_text.strip()\n if not req_text:\n return None\n if req_text[0] == \"#\":\n return None\n return pkg_resources.Requirement.parse(req_text)", "def create_schema_example_id(argval):\n if argval[0] == '/':\n # ID for the first argument is just the schema name\n return get_schema_name(argval)\n else:\n # This will cause pytest to create labels of the form:\n # SCHEMA_NAME-example\n # If there are multiple examples within a single schema, the\n # examples will be numbered automatically to distinguish them\n return \"example\"", "def _id(self):\n result = ''\n while self.current_char is not None and self.current_char.isalnum():\n result += self.current_char\n self.advance()\n\n if self.current_char == '(' and self.is_declaration is False:\n self.advance()\n token = self.RESERVED_KEYWORDS.get(\n result.upper(), Token(CALL, result))\n else:\n token = self.RESERVED_KEYWORDS.get(\n result.upper(), Token(ID, result))\n self.is_declaration = False\n if token.type in (PROCEDURE, FUNCTION):\n self.is_declaration = True\n return token", "def require(name):", "def __check_external_code__(self, line, name):\n line, _ = self.find_vars_in_str(line)\n words = line.split()\n self.E_str = f\"check_{name}_command\"\n\n corr_syn = f\"The correct syntax for running a bit of {name} code is:\\n\\n\"\n corr_syn += f\" {name} \" + \" {\\n\\n ...\\n\\n }\"\n\n # Check the braces are opened and closed properly\n if self.file_ltxt[self.line_num+1] != \"{\":\n self.print_error(f\"You must open a bracket for the {name} command\"+\"\\n\\n\"\n + corr_syn)\n\n # Get the filetxt after the command\n rest_filetxt = '\\n'.join(self.file_ltxt[self.line_num:])\n if gen_parse.get_bracket_close(rest_filetxt, \"{\", \"}\") == -1:\n self.print_error(f\"You must close a brace in the {name} command.\"+\"\\n\\n\"\n + corr_syn)\n\n # Find where the little script ends\n brack_num, new_lines = 1, []\n for end_line, new_line in enumerate(self.file_ltxt[self.line_num+2:]):\n if new_line == '{': brack_num += 1\n elif new_line == '}': brack_num -= 1\n\n if brack_num > 0: new_lines.append(new_line)\n elif brack_num == 0: break\n\n end_line += self.line_num + 2\n\n return end_line", "def registerIdentifier(self, name):\n assert mathUtils.isAString(name)\n assert name not in self._registeredIdentifiers\n # don't allow adding identifiers if existing jobs are already running, I think?\n assert not self._prefixToIdentifiers\n self._registeredIdentifiers.add(name)", "def registration_definition_id(self) -> str:\n return pulumi.get(self, \"registration_definition_id\")", "def identifier(self):\n raise NotImplementedError", "def get_id(self):\n return \"non_existing_and_existing_module_required_plugin\"", "def get_exp_identifier(dataset_type, FOLD, AR, AUTHOR, POST):\n global FEATURES_STR\n global AR_TYPE\n return dataset_type + '-' + AR_TYPE + '-fo' + str(FOLD) + '-fe' +\\\n FEATURES_STR + '-ar' + str(AR) + '-a' + str(AUTHOR) + '-p' + str(POST)", "def createRequirementLike(\n self,\n functionName: str,\n body: ast.AST,\n lineno: int,\n name: Optional[str] = None,\n prob: Optional[float] = None,\n ):\n propTransformer = PropositionTransformer(self.filename)\n newBody, self.nextSyntaxId = propTransformer.transform(body, self.nextSyntaxId)\n newBody = self.visit(newBody)\n requirementId = self._register_requirement_syntax(body)\n\n return ast.Expr(\n value=ast.Call(\n func=ast.Name(functionName, loadCtx),\n args=[\n ast.Constant(requirementId), # requirement IDre\n newBody, # body\n ast.Constant(lineno), # line number\n ast.Constant(name), # requirement name\n ],\n keywords=[ast.keyword(arg=\"prob\", value=ast.Constant(prob))]\n if prob is not None\n else [],\n )\n )", "def rule_01_set_job_id(session):\n\n my_id = \"\".join(\"%02x\" % random.randint(0,255) for _ in xrange(4))\n\n session[\"config\"][\"tags\"][\"instavpn\"] = my_id\n show.output(\"Instavpn Task ID\", \"is %s\" % my_id)\n\n return True", "def ident(self):\r\n text = self.component.get(\"id\", \"\")\r\n # strip surrounding curly braces from id\r\n return re.sub(\"[{}]\", \"\", text)", "def create(pat: str, resource_registration_endpoint: str,\n name: str, scopes: List[str],\n description: str = None, icon_uri: str = None, typ: str = None,\n secure: bool = False) -> str:\n\n payload = {\"name\": name , \"resource_scopes\": scopes }\n dict_insert_if_exists(payload, \"description\", description)\n dict_insert_if_exists(payload, \"icon_uri\", icon_uri)\n dict_insert_if_exists(payload, \"type\", typ)\n\n headers = {\n 'Content-Type': \"application/json\",\n 'Authorization': \"Bearer \"+pat,\n }\n\n disable_warnings_if_debug(secure)\n response = request(\"POST\", resource_registration_endpoint, json=payload, headers=headers, verify=secure)\n\n if not is_ok(response):\n raise Exception(\"An error occurred while registering the resource: \"+str(response.status_code)+\":\"+str(response.reason)+\":\"+str(response.text))\n\n\n try:\n return response.json()[\"_id\"]\n except Exception as e:\n raise Exception(\"Call to registration endpoint returned unexpected value: '\"+response.text+\"'\"+\". Error: \"+str(e))", "def build_id():\n return \"test123\"" ]
[ "0.5907982", "0.5524388", "0.5376405", "0.5376405", "0.5335209", "0.5323732", "0.52494186", "0.5154274", "0.5144197", "0.51264733", "0.50688785", "0.5062202", "0.5050767", "0.5009953", "0.50040245", "0.49779958", "0.49704483", "0.4951111", "0.49311674", "0.49255788", "0.48836946", "0.48785788", "0.48430648", "0.48342973", "0.48288175", "0.4824134", "0.48230925", "0.48140177", "0.48077804", "0.47841972" ]
0.7798727
0
Given an expression, create an atomic proposition factory.
def _create_atomic_proposition_factory(self, node): lineNum = ast.Constant(node.lineno) ast.copy_location(lineNum, node) closure = ast.Lambda(noArgs, node) ast.copy_location(closure, node) syntaxId = self._register_requirement_syntax(node) syntaxIdConst = ast.Constant(syntaxId) ast.copy_location(syntaxIdConst, node) ap = ast.Call( func=ast.Name(id=ATOMIC_PROPOSITION, ctx=loadCtx), args=[closure], keywords=[ ast.keyword(arg="syntaxId", value=syntaxIdConst), ], ) ast.copy_location(ap, node) return ap
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_expr(self, exprcls, ast, params=None, nopush=False):\n if params is None:\n expr = exprcls(self.current_parent, ast=ast)\n else:\n expr = exprcls(self.current_parent, ast=ast, **params)\n if not nopush:\n self.push_state(expr)\n return expr", "def parse_expression(expression: str) -> nodes.ExpNode:\r\n\r\n tokens = tokenize(expression)\r\n node = build_expression_tree(tokens)\r\n\r\n return node", "def make_positive(expression: Expr) -> Expr:\n if expression.op == '~':\n new_expression = Expr(expression.args[0].op, *expression.args[0].args)\n return new_expression\n return expression", "def expression_maker(ex_names, stat_type):\n if len(ex_names) == 1:\n expression = part_expression(ex_names[0], stat_type)\n else:\n current_part = ex_names.pop(-1)\n expression = (expression_maker(ex_names, stat_type) + ','\n + part_expresion(current_part, stat_type))\n\n return expression", "def construct_persona(x):\n return Persona(x)", "def getFactoryEvaluateExpressionOnly(self):\n # factory function for evaluateExpressionOnly\n def evaluateExpressionOnly_factory(expression):\n return self.evaluateExpressionOnly(expression)\n\n return evaluateExpressionOnly_factory", "def compile(expression: str) -> Compiled:\r\n e = Compiled(expression)\r\n e.tokenize()\r\n return e", "def __init__(self, expression, result, is_singleton=False):\n\n self.expr = expression\n self.result = result\n self.is_singleton = is_singleton", "def main(expression):\n\n exception = parse_expression(expression)\n return calc(poland_notation(exception))", "def _make_executor(self, expr=None):\n raise NotImplementedError()", "def expression_to_english_NP_tree(expr):\n if isinstance(expr, ApplicationExpression):\n preds = list(expr.predicates())\n if ((len(preds) != 1) or (preds[0].name not in common_nouns)): # for now, only common nouns allowed\n raise GenerationError(\"Head of restrictor predicate must be common noun.\") \n return SynTree(deepcopy(default_featstructs['NP']), [SynTree(deepcopy(default_featstructs['NBar']), [SynTree(default_featstructs['N'], [SynTree(preds[0].name, [])])])])\n elif isinstance(expr, AndExpression):\n adj_preds = list(expr.first.predicates())\n if ((len(adj_preds) != 1) or (adj_preds[0].name not in adjectives)): # for now, only adjectives can pre-modify NPs\n raise GenerationError(\"Modifier of NP must be an adjective.\")\n NP_tree = expression_to_english_NP_tree(expr.second)\n adj_subtree = SynTree(default_featstructs['AdjP'], [SynTree(default_featstructs['AdjBar'], [SynTree(default_featstructs['Adj'], [SynTree(adj_preds[0].name, [])])])])\n NP_tree.children = [SynTree(deepcopy(NP_tree.children[0].label), [adj_subtree, NP_tree.children[0]])]\n return NP_tree\n else:\n raise GenerationError(\"Invalid NP expression.\")", "def indirectedTransactionFactory(*a):\n return self.store.newTransaction(*a)", "def build_ast(expression):\n\n # use a directed graph to store the tree\n G = DiGraph()\n\n stack = []\n\n for n in expression:\n # Since the graph does not maintain the order of adding nodes/edges\n # add an extra attribute 'pos' so we can always sort to the correct order\n if isinstance(n, OperatorNode):\n if n.ttype == ept.TOK_TYPE_OP_IN:\n arg2 = stack.pop()\n arg1 = stack.pop()\n G.add_node(arg1, pos=1)\n G.add_node(arg2, pos=2)\n G.add_edge(arg1, n)\n G.add_edge(arg2, n)\n else:\n arg1 = stack.pop()\n G.add_node(arg1, pos=1)\n G.add_edge(arg1, n)\n\n elif isinstance(n, FunctionNode):\n args = [stack.pop() for _ in range(n.num_args)]\n args.reverse()\n for i, a in enumerate(args):\n G.add_node(a, pos=i)\n G.add_edge(a, n)\n # for i in range(n.num_args):\n # G.add_edge(stack.pop(),n)\n else:\n G.add_node(n, pos=0)\n\n stack.append(n)\n\n return G, stack.pop()", "def from_string_expr(cls, expr):\n if \"*\" in expr:\n ch = \"*\"\n op = \"cross\"\n elif \"+\" in expr:\n ch = \"+\"\n op = \"blend\"\n elif \"/\" in expr:\n ch = \"/\"\n op = \"nest\"\n factors = [cls(s.strip()) for s in expr.split(ch)]\n return cls(op=op, factors=factors)", "def __init__(__self__, *,\n expression: str,\n name: str):\n pulumi.set(__self__, \"expression\", expression)\n pulumi.set(__self__, \"name\", name)", "def __init__(__self__, *,\n expression: str,\n name: str):\n pulumi.set(__self__, \"expression\", expression)\n pulumi.set(__self__, \"name\", name)", "def factory(process_id):\n factory_classes = {\n # 'primer template creation': TODO,\n # 'reagent creation': TODO,\n 'primer working plate creation': PrimerWorkingPlateCreationProcess,\n 'sample plating': SamplePlatingProcess,\n 'reagent creation': ReagentCreationProcess,\n 'gDNA extraction': GDNAExtractionProcess,\n '16S library prep': LibraryPrep16SProcess,\n 'shotgun library prep': LibraryPrepShotgunProcess,\n 'quantification': QuantificationProcess,\n 'gDNA normalization': NormalizationProcess,\n 'compress gDNA plates': GDNAPlateCompressionProcess,\n 'pooling': PoolingProcess,\n 'sequencing': SequencingProcess}\n\n with sql_connection.TRN as TRN:\n sql = \"\"\"SELECT description\n FROM qiita.process_type\n JOIN qiita.process USING (process_type_id)\n WHERE process_id = %s\"\"\"\n TRN.add(sql, [process_id])\n p_type = TRN.execute_fetchlast()\n constructor = factory_classes[p_type]\n\n if constructor._table == 'qiita.process':\n instance = constructor(process_id)\n else:\n sql = \"\"\"SELECT {}\n FROM {}\n WHERE process_id = %s\"\"\".format(\n constructor._id_column, constructor._table)\n TRN.add(sql, [process_id])\n subclass_id = TRN.execute_fetchlast()\n instance = constructor(subclass_id)\n\n return instance", "def create_expression_network(\n self, fin_expression, column=\"tpm\", tfs=None, bindingfile=None\n ):\n # Convert to a list of filename(s)\n if isinstance(fin_expression, str):\n fin_expression = [fin_expression]\n\n # Read all expression input files and take the mean expression per gene\n re_column = re.compile(fr\"^{column}$\", re.IGNORECASE)\n expression = pd.DataFrame(\n pd.concat(\n [\n pd.read_table(f, index_col=0).filter(regex=re_column)\n for f in fin_expression\n ],\n axis=1,\n ).mean(1),\n columns=[column],\n )\n expression[column] = np.log2(expression[column] + 1e-5)\n\n genes = pd.read_table(\n self.gene_bed, usecols=[3], comment=\"#\", names=[\"name\"], index_col=0\n )\n overlap = len(genes.index.intersection(expression.index))\n if overlap / expression.shape[0] < 0.1:\n logger.error(\n \"gene annotation identifiers do not seem to match between annotation and expression files!\"\n )\n sample_exp = \", \".join(expression.sample(5).index.values)\n sample_gene = \", \".join(genes.sample(5).index.values)\n logger.error(f\"expression sample: {sample_exp}\")\n logger.error(f\"annotation sample: {sample_gene}\")\n sys.exit(1)\n\n # Create the TF list, based on valid transcription factors\n if tfs is None:\n try:\n act = pd.read_hdf(bindingfile, key=\"_factor_activity\")\n if \"factor\" in act.columns:\n act = act.set_index(\"factor\")\n tfs = list(set(act.index.tolist()))\n except KeyError:\n tffile = os.path.join(PACKAGE_DIR, \"db\", \"tfs.txt\")\n tfs = pd.read_csv(tffile, header=None)[0].tolist()\n\n # Save TFs and targets as temporary files\n idx = expression.index[expression.index.isin(tfs)]\n tmp = expression.loc[idx]\n if tmp.shape[0] == 0:\n logger.error(\n \"None of the transcription factors are found in your expression file.\"\n )\n logger.error(\n \"If you have human data, please make sure you use HGNC symbols (gene names).\"\n )\n logger.error(\n \"If you have non-human data, you have to create a custom motif to gene mapping.\"\n )\n logger.error(\"See this link for one possibility to create this file: \")\n logger.error(\n \"https://gimmemotifs.readthedocs.io/en/stable/reference.html#command-gimme-motif2factors\"\n )\n logger.error(\n \"If you use a custom motif mapping, you will also have (re)run `gimme binding` with this file.\"\n )\n sys.exit(1)\n\n tf_fname = self._save_temp_expression(tmp, \"tf\")\n target_fname = self._save_temp_expression(expression, \"target\")\n\n # Read files (delayed) and merge on 'key' to create a Cartesian product\n # combining all TFs with all target genes.\n a = dd.read_parquet(tf_fname)\n b = dd.read_parquet(target_fname)\n network = a.merge(b, how=\"outer\")\n\n # Use one-column index that contains TF and target genes.\n # This is necessary for dask, as dask cannot merge on a MultiIndex.\n # Otherwise this would be an inefficient and unnecessary step.\n network[\"tf_target\"] = network[\"tf\"] + \"_\" + network[\"target\"]\n network = network[\n [\"tf\", \"target\", \"tf_target\", \"tf_expression\", \"target_expression\"]\n ]\n\n return network", "def create_operator(statement_a, operator, statement_b):\n return S(statement_a=statement_a, operator=operator, statement_b=statement_b)", "def create_dummy_expression(extra_constraints=None):\n return expression.Expression(\n basic_expression.BasicExpression([]),\n basic_expression.BasicExpression([]), extra_constraints)", "def preprocess(expression_string: str):\n return expression_string.replace(\"(\", \"(X\")", "def indirectedTransactionFactory(*a, **b):\n return self.store.newTransaction(*a, **b)", "def create_contract(symbol, sec_type, exch, prim_exch, curr):\n contract = Contract()\n contract.m_symbol = symbol\n contract.m_secType = sec_type\n contract.m_exchange = exch\n contract.m_primaryExch = prim_exch\n contract.m_currency = curr\n return contract", "def create_contract(symbol, sec_type, exch, prim_exch, curr):\n contract = Contract()\n contract.m_symbol = symbol\n contract.m_secType = sec_type\n contract.m_exchange = exch\n contract.m_primaryExch = prim_exch\n contract.m_currency = curr\n return contract", "def instantiate(formula, instantiation_map):\n if is_constant(formula.root):\n return Formula(formula.root)\n\n if is_variable(formula.root):\n return instantiation_map[formula.root] if formula.root in instantiation_map else formula\n\n first = instantiate(formula.first, instantiation_map)\n if is_unary(formula.root):\n return Formula(formula.root, first)\n\n second = instantiate(formula.second, instantiation_map)\n if is_binary(formula.root):\n return Formula(formula.root, first, second)\n\n return Formula(formula.root, first, second, instantiate(formula.third, instantiation_map))", "def atom_to_plush_gene(self, atom):\n is_literal = False\n proc_atom = None\n if callable(atom):\n # If it is callable, then it is likely a function that will\n # produce a literal.\n fn_element = atom()\n if callable(fn_element): # It's another function!\n proc_atom = fn_element()\n else:\n proc_atom = fn_element\n is_literal = True\n else:\n # If atom is not callable, then it is the instruction/literal.\n proc_atom = atom\n is_literal = not isinstance(proc_atom, Instruction)\n\n return Gene(proc_atom, is_literal, self.generate_close_count())", "def create_dummy_expression(extra_constraints=None):\n return expression.ConstrainedExpression(\n expression.ExplicitExpression(\n basic_expression.BasicExpression([]),\n basic_expression.BasicExpression([])),\n extra_constraints=extra_constraints)", "def compile(expression):", "def prep_equation(self):\n \n # This transforms the equation into an expression for sympy.\n prepped_equation = self.equation.replace(\"=\", \"-(\") + \")\"\n\n # This transforms the equation string into a sympy-readable equation.\n transformations = standard_transformations + (implicit_multiplication_application,)\n prepped_equation = parse_expr(prepped_equation, transformations=transformations)\n\n return prepped_equation", "def create_dummy_expression(value):\n basic_expression_object = basic_expression.BasicExpression(\n [term.TensorTerm(value)])\n return expression.ExplicitExpression(basic_expression_object,\n basic_expression_object)" ]
[ "0.5734446", "0.5312159", "0.5248564", "0.5139162", "0.5127595", "0.51051027", "0.5091432", "0.5089576", "0.49904886", "0.49830782", "0.49765256", "0.49529302", "0.49464598", "0.4907458", "0.485304", "0.485304", "0.48462987", "0.4785017", "0.47622085", "0.47553635", "0.47486255", "0.47308165", "0.4727981", "0.4727981", "0.469839", "0.46872643", "0.46604693", "0.45996132", "0.45918143", "0.45905444" ]
0.7372538
0
Convert a BoolOp node (`and`, `or`) to a corresponding proposition factory
def visit_BoolOp(self, node: ast.BoolOp) -> ast.AST: # 1. wrap each operand with a lambda function operands = [] for operand in node.values: o = self.visit(operand) if self.is_proposition_factory(o): # if the operand is already an temporal requirement factory, keep it operands.append(self.visit(o)) continue # if the operand is not an temporal requirement factory, make it an AP closure = self._create_atomic_proposition_factory(o) operands.append(closure) # 2. create a function call and pass operands boolOpToFunctionName = { ast.Or: "PropositionOr", ast.And: "PropositionAnd", } funcId = boolOpToFunctionName.get(type(node.op)) newNode = ast.Call( func=ast.Name(id=funcId, ctx=ast.Load()), # pass a list of operands as the first argument args=[ast.copy_location(ast.List(elts=operands, ctx=ast.Load()), node)], keywords=[], ) return ast.copy_location(newNode, node)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def constraint_from_bool_op(self, node):\n is_and = isinstance(node.op, ast.And)\n out_constraints = []\n with self.scope.subscope():\n values = []\n left = node.values[:-1]\n for condition in left:\n new_value, constraint = self.constraint_from_condition(condition)\n out_constraints.append(constraint)\n if is_and:\n self.add_constraint(condition, constraint)\n values.append(constrain_value(new_value, FALSY_CONSTRAINT))\n else:\n self.add_constraint(condition, constraint.invert())\n values.append(constrain_value(new_value, TRUTHY_CONSTRAINT))\n right_value, constraint = self._visit_possible_constraint(node.values[-1])\n values.append(right_value)\n out_constraints.append(constraint)\n constraint_cls = AndConstraint if is_and else OrConstraint\n constraint = reduce(constraint_cls, reversed(out_constraints))\n return unite_values(*values), constraint", "def convert_broadcast_logical_and(node, **kwargs):\n return create_basic_op_node('And', node, kwargs)", "def convert_broadcast_logical_or(node, **kwargs):\n return create_basic_op_node('Or', node, kwargs)", "def bool_ops(self, ctx: Context) -> Iterator[AnnotatedExpression]:\n for left, right in combinations(ctx.expressions_by_type(bool), 2):\n yield AnnotatedExpression(\n ast.BoolOp(op=ast.And(), values=[left.expr, right.expr]),\n TypeAnnotation(bool),\n )\n yield AnnotatedExpression(\n ast.BoolOp(op=ast.Or(), values=[left.expr, right.expr]),\n TypeAnnotation(bool),\n )", "def _(obj: And, visitor: BooleanExpressionVisitor[T]) -> T:\n left_result: T = visit(obj.left, visitor=visitor)\n right_result: T = visit(obj.right, visitor=visitor)\n return visitor.visit_and(left_result=left_result, right_result=right_result)", "def pl_true(exp, model={}):\n op, args = exp.op, exp.args\n if exp == TRUE:\n return True\n elif exp == FALSE:\n return False\n elif is_prop_symbol(op):\n return model.get(exp)\n elif op == '~':\n p = pl_true(args[0], model)\n if p is None: return None\n else: return not p\n elif op == '|':\n result = False\n for arg in args:\n p = pl_true(arg, model)\n if p is True: return True\n if p is None: result = None\n return result\n elif op == '&':\n result = True\n for arg in args:\n p = pl_true(arg, model)\n if p is False: return False\n if p is None: result = None\n return result\n p, q = args\n if op == '>>':\n return pl_true(~p | q, model)\n elif op == '<<':\n return pl_true(p | ~q, model)\n pt = pl_true(p, model)\n if pt is None: return None\n qt = pl_true(q, model)\n if qt is None: return None\n if op == '<=>':\n return pt == qt\n elif op == '^':\n return pt != qt\n else:\n raise ValueError, \"illegal operator in logic expression\" + str(exp)", "def convert_binary_logical_op(g, op, block):\n\n ipt0 = g.get_node(op.input(\"X\")[0])\n ipt1 = g.get_node(op.input(\"Y\")[0])\n op_func = get_relay_op(op.type)\n out = op_func(ipt0, ipt1)\n g.add_node(op.output(\"Out\")[0], out)", "def get_bprop_logical_and(self):\n\n def bprop(x, y, out, dout):\n return zeros_like(x), zeros_like(y)\n return bprop", "def to_bool(data, true_label, **kwargs):\n return Component(\n \"ToBool\",\n arguments={\n 'data': Component.of(data),\n 'true_label': Component.of(true_label)\n },\n options={\n \n },\n constraints=kwargs)", "def _op(\n x: Union[bool, dts.Boolean, tps.BooleanValue],\n y: Union[bool, dts.Boolean, tps.BooleanValue],\n ) -> T:", "def test_expression_and_or(self):\n\n # Checks several examples with \"and\" and \"or\" operators\n expression = BooleanExpression(\"NORMAL\", or_(and_(models.Network.label != \"network_3\", models.Network.multi_host == True), models.Network.label == \"network_3\"))\n value = expression.evaluate(KeyedTuple([{\"label\": \"network_3\", \"multi_host\": False}], [\"networks\"]))\n self.assertTrue(value, \"complex expression (1)\")\n\n expression = BooleanExpression(\"NORMAL\", or_(and_(models.Network.label != \"network_3\", models.Network.multi_host == True), models.Network.label == \"network_3\"))\n value = expression.evaluate(KeyedTuple([{\"label\": \"network_2\", \"multi_host\": True}], [\"networks\"]))\n self.assertTrue(value, \"complex expression (2)\")\n\n expression = BooleanExpression(\"NORMAL\", or_(and_(models.Network.label != \"network_3\", models.Network.multi_host == True), models.Network.label == \"network_3\"))\n value = expression.evaluate(KeyedTuple([{\"label\": \"network_2\", \"multi_host\": False}], [\"networks\"]))\n self.assertFalse(value, \"complex expression (3)\")", "def get_bprop_logical_or(self):\n\n def bprop(x, y, out, dout):\n return zeros_like(x), zeros_like(y)\n return bprop", "def evalBoolean(tree):\n # check if children the children is a \"or\" or a \"and\" tokken\n if (tree.children[0].data == \"or\"):\n return evalBoolean(tree.children[0].children[0]) or evalBoolean(tree.children[0].children[1])\n if (tree.children[0].data) == \"and\":\n return evalBoolean(tree.children[0].children[0]) and evalBoolean(tree.children[0].children[1])\n \n # set var1\n if(tree.children[0].data == \"integer\"):\n var1 = evalInteger(tree.children[0])\n elif(tree.children[0].data == \"variable\"):\n var1 = getValue(tree.children[0].children[0].value)\n\n # set var2\n if(tree.children[2].data == \"integer\"):\n var2 = evalInteger(tree.children[2])\n elif(tree.children[2].data == \"variable\"):\n var2 = getValue(tree.children[2].children[0].value)\n\n if(tree.children[1].children[0].data == \"greater\"):\n return var1 > var2\n if(tree.children[1].children[0].data == \"less\"):\n return var1 < var2\n if(tree.children[1].children[0].data == \"equals\"):\n return var1 == var2\n if(tree.children[1].children[0].data == \"nequal\"):\n return var1 != var2\n\n print(\"ERROR : UNEXPECTED TOKKEN\")\n return False", "def parse_program(program, bools_dict):\n bools = {'A':True,'B':True,'C':True,'D':True,'T':False,'J':False}\n bools.update(bools_dict)\n print(bools)\n for instruction in program:\n parts = instruction.split(' ')\n if parts[0] == 'WALK':\n return\n instr, arg0, arg1 = parts[0], parts[1], parts[2]\n if instr == 'NOT':\n new_bool = not bools[arg0]\n print(f'{instruction}: {arg1}={new_bool}')\n elif instr == 'OR':\n new_bool = bools[arg0] or bools[arg1]\n print(f'{instruction} : {arg1}={new_bool}')\n elif instr == 'AND':\n new_bool = bools[arg0] and bools[arg1]\n print(f'{instruction}: {arg1}={new_bool}')\n bools[arg1] = new_bool", "def to_not_and_or(formula: Formula) -> Formula:\r\n # Task 3.5\r\n\r\n map_operators = {'->': Formula.parse('(~p|q)'),\r\n '+': Formula.parse('((p&~q)|(~p&q))'),\r\n '<->': Formula.parse('~((p&~q)|(~p&q))'),\r\n '-&': Formula.parse('~(p&q)'),\r\n '-|': Formula.parse('~(p|q)'),\r\n 'F': Formula.parse('(p&~p)'),\r\n 'T': Formula.parse('~(p&~p)')}\r\n return formula.substitute_operators(map_operators)", "def _build_logical_op(op):\n def logical_op(self, other):\n \"\"\"`plist` logical operation. **Logical operations perform set operations on `plist`s.**\n\n **IMPORTANT:** `plist` logical operations between two `plist`s perform `set` operations\n on the two `plist`s. Logical operations between a `plist` and any other type attempts\n to perform that operation on the values in the `plist` and `other` itself.\n\n `logical_op` is not callable directly from `plist`. It implements the various\n python logical operations: `&`, `|`, `^`, etc. The logical operators\n can be called directly with their corresponding 'magic' functions,\n `plist.__and__`, `plist.__or__`, `plist.__xor__`, etc., but are generally just\n called implicitly.\n\n Examples:\n ```python\n foos = plist([pdict(foo=0, bar=0), pdict(foo=1, bar=1), pdict(foo=2, bar=0)])\n (foos.bar == 0).baz = 3 + (foos.bar == 0).foo\n (foos.bar == 1).baz = 6\n\n assert (((foos.bar == 0) & (foos.baz == 3)).aslist() ==\n [{'baz': 3, 'foo': 0, 'bar': 0}])\n\n assert (((foos.bar == 0) | (foos.baz == 3)).aslist() ==\n [{'bar': 0, 'baz': 3, 'foo': 0}, {'bar': 0, 'baz': 5, 'foo': 2}])\n\n assert (((foos.bar == 0) ^ (foos.baz == 3)).aslist() ==\n [{'bar': 0, 'baz': 5, 'foo': 2}])\n\n by_bar = foos.bar.groupby()\n\n assert (((by_bar.bar == 0) & (by_bar.bar == 1)).aslist() ==\n [[], []])\n assert (((by_bar.bar == 0) & (by_bar.bar <= 1)).aslist() ==\n [[{'bar': 0, 'baz': 3, 'foo': 0}, {'bar': 0, 'baz': 5, 'foo': 2}], []])\n\n assert (((by_bar.baz == 3) | (by_bar.baz == 6)).aslist() ==\n [[{'bar': 0, 'baz': 3, 'foo': 0}], [{'bar': 1, 'baz': 6, 'foo': 1}]])\n assert (((by_bar.baz == 6) | (by_bar.baz <= 4)).aslist() ==\n [[{'bar': 0, 'baz': 3, 'foo': 0}], [{'bar': 1, 'baz': 6, 'foo': 1}]])\n\n assert (((by_bar.baz == 3) ^ (by_bar.baz == 6)).aslist() ==\n [[{'bar': 0, 'baz': 3, 'foo': 0}], [{'bar': 1, 'baz': 6, 'foo': 1}]])\n assert (((by_bar.baz == 6) ^ (by_bar.bar <= 4)).aslist() ==\n [[{'bar': 0, 'baz': 3, 'foo': 0}, {'bar': 0, 'baz': 5, 'foo': 2}], []])\n ```\n\n Logical operations can be applied element-wise if `other` is not a `plist`:\n ```python\n assert ((foos.baz & 1).aslist() ==\n [1, 0, 1])\n assert ((by_bar.baz | 1).aslist() ==\n [[3, 5], [7]])\n assert ((1 ^ by_bar.baz).aslist() ==\n [[2, 4], [7]])\n ```\n\n Args:\n other: Object to perform the logical operation with.\n\n Returns:\n New `plist`, merging `self` and `other` according to the operation provided\n to `_build_logical_op`.\n \"\"\"\n if isinstance(other, plist):\n if len(self) == len(other):\n try:\n return plist([op(x, o) for x, o in zip(self, other)])\n except Exception:\n pass\n self_flat = self.ungroup(-1)\n other_flat = other.ungroup(-1)\n ids = op(set([id(x) for x in self_flat]),\n set([id(x) for x in other_flat]))\n if op is operator.__and__ or op is operator.__iand__:\n return plist([x for x in self_flat if id(x) in ids]) # Don't pass root -- we are uprooting\n else:\n return plist(\n [ids.remove(id(x)) or x for x in self_flat if id(x) in ids] +\n [ids.remove(id(x)) or x for x in other_flat if id(x) in ids]\n ) # Don't pass root -- we are uprooting\n else:\n return plist([op(x, other) for x in self], root=self.__root__)\n\n return logical_op", "def _parse_boolean(\n value_expr: str, target_expr: str, ref_parts: List[str],\n auto_id: mapry.py.generate.AutoID) -> str:\n uid = auto_id.next_identifier()\n\n return _PARSE_BOOLEAN_TPL.render(\n uid=uid,\n value_expr=value_expr,\n ref_parts=ref_parts,\n target_expr=target_expr)", "def bool_op(\n self,\n opstring: str,\n precedence: int = 0,\n python_impl: Optional[Callable[..., Any]] = None,\n ) -> Callable[[Any], Operators]:\n return self.op(\n opstring,\n precedence=precedence,\n is_comparison=True,\n python_impl=python_impl,\n )", "def simplify_and_node(parse_str=None, location=None, tokens=None):\n if len(tokens) == 1:\n return tokens[0]\n else:\n return AndNode(tokens.asList())", "def _to_ops(from_op):\n\n for to_op in OPERATORS:\n if to_op and isinstance(from_op, ast.Not):\n # 'not' can only be removed but not replaced with\n # '+', '-' or '~' b/c that may lead to strange results\n pass\n elif isinstance(from_op, ast.UAdd) and (to_op is None):\n # '+1' => '1' yields equivalent mutations\n pass\n else:\n yield to_op", "def visit_and(self, left_result: T, right_result: T) -> T:", "def veval_ast_bool_op(astc : 'AstContext', local_field : 'values.Field', graph : 'graphs.Graph', context : 'functions.VEvalContext' = None):\n assert(isinstance(astc.nast, gast.gast.BoolOp))\n lineprop = utils.LineProperty(astc.lineno, astc.filename)\n\n multiaryop = nodes.MultiaryOpType.Unknown\n if isinstance(astc.nast.op, gast.And):\n multiaryop = nodes.MultiaryOpType.And\n if isinstance(astc.nast.op, gast.Or):\n multiaryop = nodes.MultiaryOpType.Or\n\n values_list = [veval_ast(astc.c(value_), local_field, graph, context) for value_ in astc.nast.values]\n values_list_value = [utils.try_get_value(value_, 'multiary', lineprop) for value_ in values_list]\n\n node = nodes.NodeMultiaryOp(values_list_value, multiaryop)\n\n ret_value = veval_multiary.veval(multiaryop, values_list_value)\n node.set_outputs([ret_value])\n graph.add_node(node)\n\n return values.Object(ret_value)", "def convert_broadcast_logical_xor(node, **kwargs):\n return create_basic_op_node('Xor', node, kwargs)", "def simplify_logical_and_not(node, leaf_transform):\n # the leaf_function transforms a leaf by extracting\n # its single input (decapsulating LogicalNot) and \n # calling leaf_transform on this input\n def leaf_function(op):\n return leaf_transform(op.get_input(0))\n\n result = simplify_logical_tree(node,\n op_predicate=(lambda op: isinstance(op, LogicalAnd)),\n leaf_predicate=(lambda op: isinstance(op, LogicalNot)),\n result_ctor=lambda op, op_list: LogicalNot(logical_reduce(\n list(map(leaf_function, op_list)),\n LogicalOr,\n precision=node.get_precision()\n ), precision=node.get_precision())\n )\n forward_attributes(node, result)\n return result", "def test_for_bool(self, parse_input_mocked_metadata):\n bb = parse_input_mocked_metadata(\n \"for bool b in [True, False]\\n\\tUnaryGate(b, 0) | 0\"\n )\n assert np.all(\n bb._forvar[\"b\"] == np.array([True, False])\n )", "def _(obj: Or, visitor: BooleanExpressionVisitor[T]) -> T:\n left_result: T = visit(obj.left, visitor=visitor)\n right_result: T = visit(obj.right, visitor=visitor)\n return visitor.visit_or(left_result=left_result, right_result=right_result)", "def __and__(self, other: Any) -> Operators:\n return self.operate(and_, other)", "def _logical_and(*args):\n args_ = [_static_value(x) for x in args]\n if any(x is not None and not bool(x) for x in args_):\n return constant_op.constant(False)\n if all(x is not None and bool(x) for x in args_):\n return constant_op.constant(True)\n if len(args) == 2:\n return math_ops.logical_and(*args)\n return math_ops.reduce_all(args)", "def test_predicate9(self):\n xpb = XPathBuilder()\n xp = xpb.foo.bar[(xpb.attr('foo') == 'bar') | xpb.foobar]\n exp = '/foo/bar[@foo = \"bar\" or /foobar]'\n self.assertEqual(xp.tostring(), exp)", "def test_evaluate_and_expression(self):\n value = self.evaluate_common(\"false and false\")\n self.assertTrue(\n value.type_code == edm.SimpleType.Boolean, \"Expected Boolean\")\n self.assertTrue(value.value is False, \"Expected False\")\n try:\n value = self.evaluate_common(\"false and 0\")\n self.fail(\"Integer promotion to Boolean\")\n except odata.EvaluationError:\n pass\n value = self.evaluate_common(\"false and true\")\n self.assertTrue(value.value is False, \"Expected False\")\n value = self.evaluate_common(\"true and false\")\n self.assertTrue(value.value is False, \"Expected False\")\n value = self.evaluate_common(\"true and true\")\n self.assertTrue(value.value is True, \"Expected True\")\n value = self.evaluate_common(\"true and null\")\n self.assertTrue(\n value.type_code == edm.SimpleType.Boolean, \"Expected Boolean\")\n self.assertTrue(value.value is False, \"Expected False\")\n value = self.evaluate_common(\"false and null\")\n self.assertTrue(value.value is False, \"Expected False\")\n value = self.evaluate_common(\"false and false\")\n self.assertTrue(value.value is False, \"Expected False\")" ]
[ "0.7032627", "0.6538599", "0.6263333", "0.62044555", "0.58645797", "0.58232236", "0.58098066", "0.57785386", "0.56813306", "0.56503755", "0.5618404", "0.55552155", "0.55338264", "0.54900324", "0.5486236", "0.5473619", "0.54251933", "0.5424429", "0.54230464", "0.53802073", "0.53562266", "0.53231174", "0.53201216", "0.5290002", "0.5284267", "0.52811885", "0.5269472", "0.52568567", "0.524819", "0.5238259" ]
0.7547726
0
Create a list of statements that defines precondition and invariant checker
def makeGuardCheckers( self, args: ast.arguments, preconditions: List[s.Precondition], invariants: List[s.Invariant], ) -> List[ast.AST]: # Statements that check preconditions are satisfied preconditionChecks = [] for precondition in preconditions: call = ast.Call( ast.Name("PreconditionViolation", loadCtx), [ ast.Name(behaviorArgName, loadCtx), ast.Constant(precondition.lineno), ], [], ) throw = ast.Raise(exc=call, cause=None) check = ast.If( test=ast.UnaryOp(ast.Not(), self.visit(precondition.value)), body=[throw], orelse=[], ) chained_throw = ast.Raise(exc=call, cause=ast.Name("e", loadCtx)) catch = ast.ExceptHandler( type=ast.Name("RejectionException", loadCtx), name="e", body=[chained_throw], ) wrapped_check = ast.Try( body=[check], handlers=[catch], orelse=[], finalbody=[] ) preconditionChecks.append(ast.copy_location(wrapped_check, precondition)) definePreconditionChecker = ast.FunctionDef( checkPreconditionsName, args, preconditionChecks or [ast.Pass()], [], None ) # Statements that check invariants are satisfied invariantChecks = [] for invariant in invariants: call = ast.Call( ast.Name("InvariantViolation", loadCtx), [ast.Name(behaviorArgName, loadCtx), ast.Constant(invariant.lineno)], [], ) throw = ast.Raise(exc=call, cause=None) check = ast.If( test=ast.UnaryOp(ast.Not(), self.visit(invariant.value)), body=[throw], orelse=[], ) chained_throw = ast.Raise(exc=call, cause=ast.Name("e", loadCtx)) catch = ast.ExceptHandler( type=ast.Name("RejectionException", loadCtx), name="e", body=[chained_throw], ) wrapped_check = ast.Try( body=[check], handlers=[catch], orelse=[], finalbody=[] ) invariantChecks.append(ast.copy_location(wrapped_check, invariant)) defineInvariantChecker = ast.FunctionDef( checkInvariantsName, args, invariantChecks or [ast.Pass()], [], None ) # assemble function body preamble preamble = [ definePreconditionChecker, defineInvariantChecker, ] return preamble
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def conditions():\n pass", "def separatePreconditionsAndInvariants(\n self, header: List[Union[s.Precondition, s.Invariant]]\n ) -> Tuple[List[s.Precondition], List[s.Invariant]]:\n preconditions: List[s.Precondition] = []\n invariants: List[s.Invariant] = []\n for n in header:\n if isinstance(n, s.Precondition):\n preconditions.append(n)\n elif isinstance(n, s.Invariant):\n invariants.append(n)\n else:\n assert False, f\"Unexpected node type {n.__class__.__name__}\"\n return (preconditions, invariants)", "def verifications(sv):\r\n verif_no_special(sv) # check no special character remains (fatal error) \r\n verif_functions(sv) # check if all functions are defined (fatal error)\r\n verif_similar_names(sv) # check similar names for typing errors (warning)\r\n verif_stochastic(sv) # make sure stochastic objects are not used in expressions\r\n verif_unused(sv) # check physical and logical configurations match \r", "def testConditionReasons(self):\n \n state = State.from_problem(self.prob)\n\n relevantVars = []\n drive = self.dom.get_action(\"drive\")\n with drive.instantiate([\"agent\", \"tru1\", \"apt1\"], self.prob):\n self.assert_(state.is_satisfied(drive.precondition, relevantVars))\n\n relevantVars = set(relevantVars)\n \n s1 = StateVariable(self.prob.functions[\"city-of\"][0], [self.prob[\"pos1\"]])\n s2 = StateVariable(self.prob.functions[\"city-of\"][0], [self.prob[\"apt1\"]])\n s3 = StateVariable(self.prob.functions[\"location-of\"][0], [self.prob[\"tru1\"]])\n \n self.assertEqual(len(relevantVars), 3)\n self.assert_(s1 in relevantVars)\n self.assert_(s2 in relevantVars)\n self.assert_(s3 in relevantVars)", "def _assert_preconditions(preconditions: List[List[Contract]], resolved_kwargs: Mapping[str, Any],\n func: CallableT) -> Optional[BaseException]:\n exception = None # type: Optional[BaseException]\n\n # Assert the preconditions in groups. This is necessary to implement \"require else\" logic when a class\n # weakens the preconditions of its base class.\n\n for group in preconditions:\n exception = None\n\n for contract in group:\n assert exception is None, \"No exception as long as pre-condition group is satisfiable.\"\n\n condition_kwargs = select_condition_kwargs(contract=contract, resolved_kwargs=resolved_kwargs)\n\n if inspect.iscoroutinefunction(contract.condition):\n raise ValueError(\"Unexpected coroutine (async) condition {} for a sync function {}.\".format(\n contract.condition, func))\n\n check = contract.condition(**condition_kwargs)\n\n if inspect.iscoroutine(check):\n raise ValueError(\"Unexpected coroutine resulting from the condition {} for a sync function {}.\".format(\n contract.condition, func))\n\n if not_check(check=check, contract=contract):\n exception = _create_violation_error(contract=contract, resolved_kwargs=resolved_kwargs)\n break\n\n # The group of preconditions was satisfied, no need to check the other groups.\n if exception is None:\n break\n\n return exception", "def generate_constraints():\n return list(chain(collect_rows(), collect_columns(), collect_blocks()))", "def add_constraints(self, constraints):\n for const in constraints:\n self.add_constraint(const.type, const.column, const.check_clause)", "def add_precondition_to_checker(checker: CallableT, contract: Contract) -> None:\n # Add the precondition to the list of preconditions stored at the checker\n assert hasattr(checker, \"__preconditions__\")\n preconditions = getattr(checker, \"__preconditions__\")\n assert isinstance(preconditions, list)\n assert len(preconditions) <= 1, \\\n (\"At most a single group of preconditions expected when wrapping with a contract checker. \"\n \"The preconditions are merged only in the DBC metaclass. \"\n \"The current number of precondition groups: {}\").format(len(preconditions))\n\n if len(preconditions) == 0:\n # Create the first group if there is no group so far, i.e. this is the first decorator.\n preconditions.append([])\n\n preconditions[0].append(contract)", "def check_all_constraints(csp) :\n constraints=csp.get_all_constraints()\n for constraint in constraints:\n var1 = constraint.var1\n var2 = constraint.var2\n val1=csp.get_assigned_value(var1)\n val2=csp.get_assigned_value(var2)\n if val1!=None and val2!=None:\n if not constraint.check(val1,val2):\n return False\n return True", "def __init__(self, clause_list):\n raw_clauses = {Clause(c) for c in clause_list}\n # We do some initial sanity checking. \n # If a clause is empty, then it\n # cannot be satisfied, and the entire problem is False.\n # If a clause is true, it can be dropped. \n self.clauses = set()\n for c in raw_clauses:\n if c.isfalse:\n # Unsatisfiable.\n self.clauses = {c}\n break\n elif c.istrue:\n pass\n else:\n self.clauses.add(c)", "def solution(i, literals, clauses):\n valuation_list = binary_list(i, literals)\n num_true_clauses = 0\n\n for c in clauses:\n num_true_clauses += is_clause_satisfied(valuation_list, c)\n\n return valuation_list, num_true_clauses", "def check_all_constraints(csp) :\n\n for constraint in csp.get_all_constraints():\n assigned1 = csp.get_assigned_value(constraint.var1)\n assigned2 = csp.get_assigned_value(constraint.var2)\n check = constraint.check(assigned1,assigned2)\n if check==False and assigned1!=None and assigned2!=None:\n return False \n return True", "def clauses(self):\n for state in self.states:\n yield Xor(\n AssertStateVariable(self, state),\n DeassertStateVariable(self, state))\n for other_state in (self.states - set([state])):\n yield Implies(\n AssertStateVariable(self, state),\n DeassertStateVariable(self, other_state))", "def test_available(self):\n feature_guard = _make_requires(True, \"Error text\")\n results = []\n\n @feature_guard\n def inner():\n results.append(True)\n return True\n\n assert inner() is True\n assert [True] == results", "def check_constraints(Px,pk1,pk2,mu1,mu2,mu3):\n # Constraint 1: Check polarisation basis probabilities are valid.\n if (Px >= 1.0 or Px <= 0.0):\n print(\"Error! Constraint 1 < Px < 0: \", Px)\n exit(1)\n # Constraint 2: Check probability of pulse with intensity 1 is in bounds.\n if (pk1 >= 1.0 or pk1 <= 0.0):\n print(\"Error! Constraint 1 < pk1 < 0: \", pk1)\n exit(1)\n # Constraint 3: Check probability of pulse with intensity 2 is in bounds.\n if (pk2 >= 1.0 or pk2 <= 0.0):\n print(\"Error! Constraint 1 < pk2 < 0: \", pk2)\n exit(1)\n # Constraint 4: Check sum of probabilities for intensity 1 & 2 are less\n # than unity.\n if ((pk1 + pk2) >= 1.0):\n print(\"Error! Constraint (pk1 + pk2) < 1: \", pk1 + pk2)\n exit(1)\n # Constraint 5: Check value of intensity 1 is in bounds.\n if (mu1 >= 1.0 or mu1 <= 0.0):\n print(\"Error! Constraint 1 < mu1 < 0: \", mu1)\n exit(1)\n # Constraint 6: Check value of intensity 2 is in bounds.\n if (mu2 >= 1.0 or mu2 <= 0.0):\n print(\"Error! Constraint 1 < mu2 < 0: \", mu2)\n exit(1)\n # Constraint 7: Check values of all intensities are in bounds.\n if ((mu1 - mu3) <= mu2):\n print(\"Error! Constraint (mu1-mu3) > mu2: \", (mu1-mu3), mu2)\n exit(1)\n # Constraint 8: Check values of intensities 2 & 3 are in bounds.\n if (mu2 <= mu3):\n print(\"Error! Constraint mu2 > mu3: \", mu2, mu3)\n exit(1)\n return None", "def test_eval(self):\n # expr and expr\n base = abs_path('./specs/')\n ps = Parser(base + 'script3-6.py', base)\n ps.main(verbose=False)\n self.assertEqual(ps.wrangler.counter, 2)\n\n # expr or expr\n ps.spec['constraints'] = [{\"block\": \"D\", \"condition\": \"a == if or B == b1\"}]\n ps._parse_constraints()\n ps.main(verbose=False)\n self.assertEqual(ps.wrangler.counter, 6)\n\n # expr and (expr or expr)\n ps.spec['constraints'] = [{\"block\": \"D\", \"condition\": \"a == if and (B == b1 or B == b2)\"}]\n ps._parse_constraints()\n ps.main(verbose=False)\n self.assertEqual(ps.wrangler.counter, 4)\n\n # testing !=\n ps.spec['constraints'] = [{\"block\": \"D\", \"condition\": \"a != if\"}]\n ps._parse_constraints()\n ps.main(verbose=False)\n self.assertEqual(ps.wrangler.counter, 4)\n\n # testing >=\n ps.spec['constraints'] = [{\"block\": \"D\", \"condition\": \"a.index >= 1\"}]\n ps._parse_constraints()\n ps.main(verbose=False)\n self.assertEqual(ps.wrangler.counter, 4)\n\n # testing index\n ps.spec['constraints'] = [{\"block\": \"D\", \"condition\": \"b.index == 1\"}]\n ps._parse_constraints()\n ps.main(verbose=False)\n self.assertEqual(ps.wrangler.counter, 4)\n\n # testing option with integer type\n ps.spec['constraints'] = [{\"block\": \"D\", \"condition\": \"b == 0\"}]\n ps._parse_constraints()\n ps.main(verbose=False)\n self.assertEqual(ps.wrangler.counter, 4)\n\n # testing option with float type\n ps.spec['constraints'] = [{\"block\": \"D\", \"condition\": \"b == 1.5\"}]\n ps._parse_constraints()\n ps.main(verbose=False)\n self.assertEqual(ps.wrangler.counter, 4)\n\n # testing unmade decision\n ps.spec['constraints'] = [{\"block\": \"A\", \"condition\": \"b.index == 0\"}]\n ps._parse_constraints()\n ps.main(verbose=False)\n self.assertEqual(ps.wrangler.counter, 0)\n\n # testing if the decision is made when the block depends on a variable\n # inside the block\n ps.spec['constraints'] = [{\"block\": \"B\", \"condition\": \"b.index == 0\"}]\n ps._parse_constraints()\n ps.main(verbose=False)\n self.assertEqual(ps.wrangler.counter, 0)", "def init_constraint_list(self):\n constraints = []\n for row, equ_val, rhs_val in \\\n zip(self.matrix, self.equ_vec, self.rhs_vec):\n\n constraints.append({'type': self.get_eq_type(equ_val),\n 'fun': lambda x: rhs_val - np.dot(row, x)})\n\n bounds = Bounds(self.low_bounds, self.upper_bounds)\n\n return constraints, bounds", "def asp_problem(choices: TextIO, rules: TextIO = None, positives: TextIO = None, main: Union[str, TextIO] = '', outf: TextIO = None):\n if outf is None:\n outf = sys.stdout\n\n def asp_section(name):\n print('\\n' + ('%' * 30) + '\\n%%%% ' + name + '\\n', file=outf)\n\n def asp_comment(txt: str):\n print(ASPenc.comment(txt), file=outf)\n\n actions = set()\n asp_section('Choices')\n asp_comment('constraints in choices are in all possible constraints')\n print(ASPenc.rule(ASPenc.all_constraintsp('C'), [ASPenc.choicep('_', 'C')]), file=outf)\n outf.write('\\n')\n choices2asp(choices, actions, out=outf)\n\n if positives is not None:\n asp_section('Positive')\n asp_comment('positive constraints holds by assumption')\n print(ASPenc.rule(ASPenc.holdsp('C'), [ASPenc.fixedp('C')]), file=outf)\n outf.write('\\n')\n asp_comment('positive constraints constraints in choices are in all possible constraints')\n print(ASPenc.rule(ASPenc.all_constraintsp('C'), [ASPenc.fixedp('C')]), file=outf)\n constraints2asp(positives, actions, outf=outf)\n outf.write('\\n')\n\n asp_section('Actions')\n for a in actions:\n print(ASPenc.all_actionsp(ASPenc.const(a)) + '.', file=outf)\n\n asp_section('Declare rules')\n asp_comment(f'selected constraints holds')\n print(ASPenc.rule(ASPenc.holdsp('C'), [ASPenc.selectedp('C')]), file=outf)\n outf.write('\\n')\n if rules is not None:\n rules2asp(rules, outf=outf)\n\n asp_section('Constraints structure')\n max_arity = 3\n for i in range(1, max_arity+1):\n asp_comment(f'Constraints of arity {i}')\n avars = [f'A{j}' for j in range(1, i+1)]\n rhead = ASPenc.constraint_namep('C', 'P', i)\n rbody = [\n ASPenc.all_constraintsp('C'),\n 'C = ' + ASPenc.constraint('P', avars, quote=False),\n ]\n print(ASPenc.rule(rhead, rbody), file=outf)\n for j in range(i):\n rhead = ASPenc.constraint_actionp('C', avars[j])\n print(ASPenc.rule(rhead, rbody), file=outf)\n\n asp_section('Main')\n if isinstance(main, str):\n outf.write(main)\n else:\n outf.write(main.read())\n\n asp_comment('print selected constraints')\n print(r'#show ${predicate_selected}/1.', file=outf)", "def conditions(self):\n return self._separated_constructs(RuleCondition)", "def test_condition_syntax(self):\n\n base = abs_path('./specs/')\n ps = Parser(base+'script3-1.py', base)\n\n spec = {'constraints': [{'block': 'A', 'condition': 'B=b1'}]}\n with self.assertRaises(ParseError):\n read_wrapper(spec, ps)\n\n spec = {'constraints': [{'block': 'A', 'condition': 'B b1'}]}\n with self.assertRaises(ParseError):\n read_wrapper(spec, ps)\n\n spec = {'constraints': [{'block': 'A', 'condition': 'B == 2.5'}]}\n read_wrapper(spec, ps)", "def getChecks(self):\r\n raise AbstractError\r\n return []", "async def _assert_preconditions_async(preconditions: List[List[Contract]],\n resolved_kwargs: Mapping[str, Any]) -> Optional[BaseException]:\n exception = None # type: Optional[BaseException]\n\n # Assert the preconditions in groups. This is necessary to implement \"require else\" logic when a class\n # weakens the preconditions of its base class.\n\n for group in preconditions:\n exception = None\n\n for contract in group:\n assert exception is None, \"No exception as long as pre-condition group is satisfiable.\"\n\n condition_kwargs = select_condition_kwargs(contract=contract, resolved_kwargs=resolved_kwargs)\n\n if inspect.iscoroutinefunction(contract.condition):\n check = await contract.condition(**condition_kwargs)\n else:\n check_or_coroutine = contract.condition(**condition_kwargs)\n if inspect.iscoroutine(check_or_coroutine):\n check = await check_or_coroutine\n else:\n check = check_or_coroutine\n\n if not_check(check=check, contract=contract):\n exception = _create_violation_error(contract=contract, resolved_kwargs=resolved_kwargs)\n break\n\n # The group of preconditions was satisfied, no need to check the other groups.\n if exception is None:\n break\n\n return exception", "def violated_constraints(\n self, x: Dict[str, ArrayType], p: Dict[str, ArrayType]\n ) -> Tuple:\n x = self.opt.decision_variables.dict2vec(x)\n p = self.opt.parameters.dict2vec(p)\n\n @dataclass\n class ViolatedConstraint:\n label: str\n ctype: str\n diff: cs.DM\n pattern: cs.DM\n\n def __str__(self):\n return f\"\\n{self.label} [{self.ctype}]:\\n{self.pattern}\\n\"\n\n def __repr__(self):\n info = str(self)\n max_width = max(len(line) for line in info.split(\"\\n\"))\n return \"=\" * max_width + info + \"-\" * max_width + \"\\n\"\n\n @property\n def verbose_info(self):\n info = str(self)\n info += f\"{self.diff}\\n\"\n return info\n\n lin_eq_violated_constraints = []\n for label, sx_var in self.opt.lin_eq_constraints.items():\n fun = cs.Function(\"fun\", [self.opt.x, self.opt.p], [sx_var])\n diff = fun(x, p)\n lin_eq_violated_constraints.append(\n ViolatedConstraint(label, \"lin_eq\", diff, diff >= 0.0)\n )\n\n eq_violated_constraints = []\n for label, sx_var in self.opt.eq_constraints.items():\n fun = cs.Function(\"fun\", [self.opt.x, self.opt.p], [sx_var])\n diff = fun(x, p)\n eq_violated_constraints.append(\n ViolatedConstraint(label, \"eq\", diff, diff >= 0.0)\n )\n\n lin_ineq_violated_constraints = []\n for label, sx_var in self.opt.lin_ineq_constraints.items():\n fun = cs.Function(\"fun\", [self.opt.x, self.opt.p], [sx_var])\n diff = fun(x, p)\n lin_ineq_violated_constraints.append(\n ViolatedConstraint(label, \"lin_ineq\", diff, diff >= 0.0)\n )\n\n ineq_violated_constraints = []\n for label, sx_var in self.opt.ineq_constraints.items():\n fun = cs.Function(\"fun\", [self.opt.x, self.opt.p], [sx_var])\n diff = fun(x, p)\n ineq_violated_constraints.append(\n ViolatedConstraint(label, \"ineq\", diff, diff >= 0.0)\n )\n\n return (\n lin_eq_violated_constraints,\n eq_violated_constraints,\n lin_ineq_violated_constraints,\n ineq_violated_constraints,\n )", "def precondition(self, *args, **kwargs):\n pass", "def initializeConditions(self, conditions):\n try:\n new = self.preprocessConditions(conditions)\n evaluate(new) # check if valid\n self.conditions = conditions\n self.check = new\n except:\n try:\n new = self.preprocessConditions(self.conditions)\n evaluate(new)\n self.check = new\n except:\n self.conditions = \"True\"\n self.check = \"lambda x, y, z=0: True\"", "def cond_actions(clause):\n return cdr(clause)", "def check_precond(self, kb, args):\n # check for positive clauses\n for clause in self.precond_pos:\n if self.substitute(clause, args) not in kb.clauses:\n return False\n # check for negative clauses\n for clause in self.precond_neg:\n if self.substitute(clause, args) in kb.clauses:\n return False\n return True", "def check(filename, lines):\n Condition = namedtuple('Condition', ('mandatory', 'fix', 'message'))\n conditions = (\n # doxygen comment in header. [0] to [7]\n Condition(True, True, 'doxygen comments are not from 1st line.'),\n Condition(True, True, '@file is not in 2nd line.'),\n Condition(True, True, '@brief is not in 3rd line.'),\n Condition(True, False, '@author is not mentioned.'),\n Condition(True, False, '@date is not mentioned.'),\n Condition(True, False, '@version is not mentioned.'),\n Condition(True, False, '@remark is not mentioned.'),\n Condition(True, False, 'doxygen comments do not close.'),\n Condition(True, False, 'no blank line after doxygen comment.'),\n # start include guard (only in .hpp files). [9] and [10]\n Condition(False, False, 'wrong place include guard check.'),\n Condition(False, True, 'wrong place include guard start.'),\n # include macro (optional). [11]\n Condition(False, False, '#include is not in correct place.'),\n # start and end namespace ken3. [12] and [13]\n Condition(True, False, 'namespace ken3 does not start.'),\n Condition(True, False, 'namespace ken3 does not end.'),\n # end include guard (only in .hpp files). [14]\n Condition(False, False, 'wrong place include guard end.'),\n )\n\n indices = create_indices(filename, lines)\n assert len(conditions) == len(indices)\n\n last_index = -1\n for index, condition in zip(indices, conditions):\n if index is None:\n if condition.mandatory:\n yield (filename, condition.message)\n else:\n pass\n else:\n if condition.fix and index == (last_index + 1):\n last_index = index\n elif (not condition.fix) and index > last_index:\n last_index = index\n else:\n yield (filename, condition.message)\n\n if filename.endswith('.hpp'):\n if any(indices[i] is None for i in (9, 10, 14)):\n yield (filename, 'not correct include guard.')\n else:\n if any(indices[i] is not None for i in (9, 10, 14)):\n yield (filename, 'not necessary include guard.')\n\n if any('\\t' in line for line in lines):\n yield (filename, 'includes \\\\t.')", "def initial_conditions(self):\n pass", "def check_parameters(\n expr: Variable,\n *conditions: Iterable[Variable],\n msg: str = \"\",\n can_be_replaced_by_ninf: bool = True,\n):\n # pt.all does not accept True/False, but accepts np.array(True)/np.array(False)\n conditions_ = [\n cond if (cond is not True and cond is not False) else np.array(cond) for cond in conditions\n ]\n all_true_scalar = pt.all([pt.all(cond) for cond in conditions_])\n\n return CheckParameterValue(msg, can_be_replaced_by_ninf)(expr, all_true_scalar)" ]
[ "0.5991565", "0.58791965", "0.5860759", "0.5577513", "0.55355036", "0.54914343", "0.5464114", "0.54137397", "0.53726864", "0.5349494", "0.5334554", "0.53180647", "0.53053993", "0.5299661", "0.5256091", "0.52218145", "0.5217142", "0.5212644", "0.52015454", "0.5174639", "0.5165271", "0.51533616", "0.5145138", "0.51258945", "0.51230174", "0.5122288", "0.5116076", "0.5108298", "0.5103268", "0.5081634" ]
0.7096544
0
Given a list of preconditions and invariants, separate items into the list of preconditions and list of invariants
def separatePreconditionsAndInvariants( self, header: List[Union[s.Precondition, s.Invariant]] ) -> Tuple[List[s.Precondition], List[s.Invariant]]: preconditions: List[s.Precondition] = [] invariants: List[s.Invariant] = [] for n in header: if isinstance(n, s.Precondition): preconditions.append(n) elif isinstance(n, s.Invariant): invariants.append(n) else: assert False, f"Unexpected node type {n.__class__.__name__}" return (preconditions, invariants)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def makeGuardCheckers(\n self,\n args: ast.arguments,\n preconditions: List[s.Precondition],\n invariants: List[s.Invariant],\n ) -> List[ast.AST]:\n\n # Statements that check preconditions are satisfied\n preconditionChecks = []\n for precondition in preconditions:\n call = ast.Call(\n ast.Name(\"PreconditionViolation\", loadCtx),\n [\n ast.Name(behaviorArgName, loadCtx),\n ast.Constant(precondition.lineno),\n ],\n [],\n )\n throw = ast.Raise(exc=call, cause=None)\n check = ast.If(\n test=ast.UnaryOp(ast.Not(), self.visit(precondition.value)),\n body=[throw],\n orelse=[],\n )\n\n chained_throw = ast.Raise(exc=call, cause=ast.Name(\"e\", loadCtx))\n\n catch = ast.ExceptHandler(\n type=ast.Name(\"RejectionException\", loadCtx),\n name=\"e\",\n body=[chained_throw],\n )\n\n wrapped_check = ast.Try(\n body=[check], handlers=[catch], orelse=[], finalbody=[]\n )\n\n preconditionChecks.append(ast.copy_location(wrapped_check, precondition))\n\n definePreconditionChecker = ast.FunctionDef(\n checkPreconditionsName, args, preconditionChecks or [ast.Pass()], [], None\n )\n\n # Statements that check invariants are satisfied\n invariantChecks = []\n for invariant in invariants:\n call = ast.Call(\n ast.Name(\"InvariantViolation\", loadCtx),\n [ast.Name(behaviorArgName, loadCtx), ast.Constant(invariant.lineno)],\n [],\n )\n throw = ast.Raise(exc=call, cause=None)\n check = ast.If(\n test=ast.UnaryOp(ast.Not(), self.visit(invariant.value)),\n body=[throw],\n orelse=[],\n )\n\n chained_throw = ast.Raise(exc=call, cause=ast.Name(\"e\", loadCtx))\n\n catch = ast.ExceptHandler(\n type=ast.Name(\"RejectionException\", loadCtx),\n name=\"e\",\n body=[chained_throw],\n )\n\n wrapped_check = ast.Try(\n body=[check], handlers=[catch], orelse=[], finalbody=[]\n )\n\n invariantChecks.append(ast.copy_location(wrapped_check, invariant))\n\n defineInvariantChecker = ast.FunctionDef(\n checkInvariantsName, args, invariantChecks or [ast.Pass()], [], None\n )\n\n # assemble function body preamble\n preamble = [\n definePreconditionChecker,\n defineInvariantChecker,\n ]\n return preamble", "def process_pred_list(pred_list: List) -> List:\n result = []\n for item in pred_list:\n item_replace_special_char = item.replace(\"\\n\", \"\")\n meta_data = item_replace_special_char.split(\":\")\n label = meta_data[0]\n confidence = float(meta_data[1].split(\"\\t\")[0].strip(\" \").strip(\"%\")) / 100\n bbox_info = meta_data[2:]\n bbox = get_bbox(bbox_info)\n result.append([label, confidence, bbox])\n return result", "def _assert_preconditions(preconditions: List[List[Contract]], resolved_kwargs: Mapping[str, Any],\n func: CallableT) -> Optional[BaseException]:\n exception = None # type: Optional[BaseException]\n\n # Assert the preconditions in groups. This is necessary to implement \"require else\" logic when a class\n # weakens the preconditions of its base class.\n\n for group in preconditions:\n exception = None\n\n for contract in group:\n assert exception is None, \"No exception as long as pre-condition group is satisfiable.\"\n\n condition_kwargs = select_condition_kwargs(contract=contract, resolved_kwargs=resolved_kwargs)\n\n if inspect.iscoroutinefunction(contract.condition):\n raise ValueError(\"Unexpected coroutine (async) condition {} for a sync function {}.\".format(\n contract.condition, func))\n\n check = contract.condition(**condition_kwargs)\n\n if inspect.iscoroutine(check):\n raise ValueError(\"Unexpected coroutine resulting from the condition {} for a sync function {}.\".format(\n contract.condition, func))\n\n if not_check(check=check, contract=contract):\n exception = _create_violation_error(contract=contract, resolved_kwargs=resolved_kwargs)\n break\n\n # The group of preconditions was satisfied, no need to check the other groups.\n if exception is None:\n break\n\n return exception", "def get_conditions(inputs): \n # itertools.product summarizes all combinations of ordered conditions\n # at len = 1 it wraps values in tuples (0,) that confuse the timer below\n if hasattr(inputs[0], '__iter__'):\n return list(product(*inputs))\n else:\n return [[n] if not isinstance(n,(list,tuple)) else n for n in inputs]", "def split_list(items, pred):\n\n thisresult = []\n results = [thisresult]\n for i in items:\n thisresult.append(i)\n if pred(i):\n thisresult = []\n results.append(thisresult)\n return results", "def process_constraints(constraints, params):\n\n with warnings.catch_warnings():\n warnings.filterwarnings(\n \"ignore\", message=\"indexing past lexsort depth may impact performance.\"\n )\n constraints = _apply_consraint_killers(constraints)\n constraints = _process_selectors(constraints, params)\n fixed = apply_fixes_to_external_params(\n params, [c for c in constraints if c[\"type\"] == \"fixed\"]\n )\n constraints = _replace_pairwise_equality_by_equality(constraints, params)\n constraints = _consolidate_equality_constraints(constraints, params)\n\n processed_constraints = []\n for constr in constraints:\n if constr[\"type\"] == \"covariance\":\n processed_constraints.append(\n _process_cov_constraint(constr, params, fixed)\n )\n elif constr[\"type\"] == \"sdcorr\":\n processed_constraints.append(\n _process_sdcorr_constraint(constr, params, fixed)\n )\n elif constr[\"type\"] in [\n \"fixed\",\n \"sum\",\n \"probability\",\n \"increasing\",\n \"equality\",\n ]:\n processed_constraints.append(constr)\n else:\n raise ValueError(\"Invalid constraint type {}\".format(constr[\"type\"]))\n\n check_compatibility_of_constraints(processed_constraints, params, fixed)\n\n return processed_constraints", "def _unpack_pre_snap_posts(wrapper: CallableT) -> Tuple[List[List[Contract]], List[Snapshot], List[Contract]]:\n preconditions = getattr(wrapper, \"__preconditions__\") # type: List[List[Contract]]\n snapshots = getattr(wrapper, \"__postcondition_snapshots__\") # type: List[Snapshot]\n postconditions = getattr(wrapper, \"__postconditions__\") # type: List[Contract]\n\n return preconditions, snapshots, postconditions", "def add_precondition_to_checker(checker: CallableT, contract: Contract) -> None:\n # Add the precondition to the list of preconditions stored at the checker\n assert hasattr(checker, \"__preconditions__\")\n preconditions = getattr(checker, \"__preconditions__\")\n assert isinstance(preconditions, list)\n assert len(preconditions) <= 1, \\\n (\"At most a single group of preconditions expected when wrapping with a contract checker. \"\n \"The preconditions are merged only in the DBC metaclass. \"\n \"The current number of precondition groups: {}\").format(len(preconditions))\n\n if len(preconditions) == 0:\n # Create the first group if there is no group so far, i.e. this is the first decorator.\n preconditions.append([])\n\n preconditions[0].append(contract)", "def preprocessConditions(conditions):\n conditions = re.sub(r'&+', ' and ', conditions)\n conditions = re.sub(r'\\|+', ' or ', conditions)\n conditions = re.sub(r'==+', '=', conditions)\n conditions = re.sub(r'(?<![<>])=', '==', conditions)\n conditions = \"lambda x, y, z=0: any([ \" + conditions + \" ])\"\n return conditions", "def _list(self, exprs):\n require(exprs, len(exprs)!=0)\n result = Pair(exprs[-1], [])\n for i in reversed(exprs[:-1]):\n result = Pair(i, result)\n return result", "def split_conjuncts(expr):\n if isinstance(expr, AndExpression):\n conjuncts = split_conjuncts(expr.first) + split_conjuncts(expr.second)\n else:\n conjuncts = [expr]\n return conjuncts", "async def _assert_preconditions_async(preconditions: List[List[Contract]],\n resolved_kwargs: Mapping[str, Any]) -> Optional[BaseException]:\n exception = None # type: Optional[BaseException]\n\n # Assert the preconditions in groups. This is necessary to implement \"require else\" logic when a class\n # weakens the preconditions of its base class.\n\n for group in preconditions:\n exception = None\n\n for contract in group:\n assert exception is None, \"No exception as long as pre-condition group is satisfiable.\"\n\n condition_kwargs = select_condition_kwargs(contract=contract, resolved_kwargs=resolved_kwargs)\n\n if inspect.iscoroutinefunction(contract.condition):\n check = await contract.condition(**condition_kwargs)\n else:\n check_or_coroutine = contract.condition(**condition_kwargs)\n if inspect.iscoroutine(check_or_coroutine):\n check = await check_or_coroutine\n else:\n check = check_or_coroutine\n\n if not_check(check=check, contract=contract):\n exception = _create_violation_error(contract=contract, resolved_kwargs=resolved_kwargs)\n break\n\n # The group of preconditions was satisfied, no need to check the other groups.\n if exception is None:\n break\n\n return exception", "def separate_ranges(ranges, varflags): # for putting each VARTRUE range in its own range list, so that coverage and \"cg_content\" are only for true regions\n newranges=[]; newvarflags=[]\n for rangelist, flaglist in zip(ranges, varflags):\n fr, fv, tr, tv = [], [], [], []\n for r, v in zip(rangelist, flaglist):\n if v=='VARFALSE':\n fr.append(r); fv.append(v) \n if v=='VARTRUE':\n tr.append(r); tv.append(v)\n if fr and fv:\n newranges.append(fr)\n newvarflags.append(fv)\n if tr and tv:\n newranges.append(tr)\n newvarflags.append(tv)\n \n return newranges, newvarflags", "def _split_invariants(\n cls,\n raw_sliced: List[RawFileSlice],\n literals: List[str],\n raw_occurrences: Dict[str, List[int]],\n templated_occurrences: Dict[str, List[int]],\n templated_str: str,\n ) -> Iterator[IntermediateFileSlice]:\n # Calculate invariants\n invariants = [\n literal\n for literal in literals\n if len(raw_occurrences[literal]) == 1\n and len(templated_occurrences[literal]) == 1\n ]\n # Work through the invariants and make sure they appear\n # in order.\n for linv in sorted(invariants, key=len, reverse=True):\n # Any invariants which have templated positions, relative\n # to source positions, which aren't in order, should be\n # ignored.\n\n # Is this one still relevant?\n if linv not in invariants:\n continue # pragma: no cover\n\n source_pos, templ_pos = raw_occurrences[linv], templated_occurrences[linv]\n # Copy the list before iterating because we're going to edit it.\n for tinv in invariants.copy():\n if tinv != linv:\n src_dir = source_pos > raw_occurrences[tinv]\n tmp_dir = templ_pos > templated_occurrences[tinv]\n # If it's not in the same direction in the source and template\n # remove it.\n if src_dir != tmp_dir: # pragma: no cover\n templater_logger.debug(\n \" Invariant found out of order: %r\", tinv\n )\n invariants.remove(tinv)\n\n # Set up some buffers\n buffer: List[RawFileSlice] = []\n idx: Optional[int] = None\n templ_idx = 0\n # Loop through\n for raw_file_slice in raw_sliced:\n if raw_file_slice.raw in invariants:\n if buffer:\n yield IntermediateFileSlice(\n \"compound\",\n slice(idx, raw_file_slice.source_idx),\n slice(templ_idx, templated_occurrences[raw_file_slice.raw][0]),\n buffer,\n )\n buffer = []\n idx = None\n yield IntermediateFileSlice(\n \"invariant\",\n offset_slice(\n raw_file_slice.source_idx,\n len(raw_file_slice.raw),\n ),\n offset_slice(\n templated_occurrences[raw_file_slice.raw][0],\n len(raw_file_slice.raw),\n ),\n [\n RawFileSlice(\n raw_file_slice.raw,\n raw_file_slice.slice_type,\n templated_occurrences[raw_file_slice.raw][0],\n )\n ],\n )\n templ_idx = templated_occurrences[raw_file_slice.raw][0] + len(\n raw_file_slice.raw\n )\n else:\n buffer.append(\n RawFileSlice(\n raw_file_slice.raw,\n raw_file_slice.slice_type,\n raw_file_slice.source_idx,\n )\n )\n if idx is None:\n idx = raw_file_slice.source_idx\n # If we have a final buffer, yield it\n if buffer:\n yield IntermediateFileSlice(\n \"compound\",\n slice((idx or 0), (idx or 0) + sum(len(slc.raw) for slc in buffer)),\n slice(templ_idx, len(templated_str)),\n buffer,\n )", "def process(input_list: list) -> dict:\n bag_requirements = {}\n for bag_requirement in input_list:\n outer_bag = bag_requirement.split(' bags ')[0].replace(' ', '_').lower()\n bag_requirements[outer_bag] = {}\n\n sub_bags = bag_requirement.split(' contain ')[1].split(', ')\n for bag in sub_bags:\n if bag and bag.strip(' ') != \"no other bags\":\n bag = bag.lower()\n parsed_bag = re.findall('([0-9]+) ([a-z]+ [a-z]+) ([a-z]+)', bag)\n if parsed_bag:\n parsed_bag = parsed_bag[0]\n n_bags = parsed_bag[0]\n bag_color = parsed_bag[1].replace(' ', '_')\n bag_requirements[outer_bag][bag_color] = n_bags\n return bag_requirements", "def check_if_THE_assumption(assumption: Formula, new_proof_lines: list):\n task_1_Formula = Formula(IMPLICATION_OPERATOR, assumption, assumption) # create 'A->A' formula\n assumption_inference = InferenceRule([], task_1_Formula)\n pivot_index = len(new_proof_lines)\n task_1_proof = prove_implies_self() # proves 'p->p'\n assumption_proof_with_task_1 = prove_instance(task_1_proof, assumption_inference)\n for assumption_index, assumption_line in enumerate(assumption_proof_with_task_1.lines):\n justification_list = []\n if assumption_line.justification:\n justification_list = [a + pivot_index for a in assumption_line.justification]\n\n new_proof_lines.append(\n DeductiveProof.Line(assumption_line.conclusion, assumption_line.rule, justification_list))", "def _list_assert(actual_list, expected_list):\n for actual, expected in itertools.izip_longest(actual_list, expected_list):\n _value_assert(None, actual, expected)", "def weekly_income_preprocess(input_list: list) -> list:\n output = []\n for i in range(0, len(input_list)):\n output.append((i, i, input_list[i]))\n return output", "def _consolidate_equality_constraints(constraints, params):\n equality_constraints = [c for c in constraints if c[\"type\"] == \"equality\"]\n other_constraints = [c for c in constraints if c[\"type\"] != \"equality\"]\n\n candidates = [constr[\"index\"] for constr in equality_constraints]\n # drop constraints that just restrict one parameter to be equal to itself\n candidates = [c for c in candidates if len(c) >= 2]\n\n merged = []\n\n while len(candidates) > 0:\n new_candidates = _unite_first_with_all_intersecting_elements(candidates)\n if len(candidates) == len(new_candidates):\n merged.append(candidates[0])\n candidates = candidates[1:]\n else:\n candidates = new_candidates\n\n ordered = []\n for m in merged:\n helper = params.copy()\n helper[\"selected\"] = False\n helper.loc[m, \"selected\"] = True\n ordered.append(helper.query(\"selected\").index)\n\n consolidated = [{\"index\": index, \"type\": \"equality\"} for index in ordered]\n return consolidated + other_constraints", "def preprocess_upgrade_list(self, upgrade_list):\n return [(ed_pkg, able_pkg, [], []) for (ed_pkg, able_pkg) in upgrade_list]", "def validate_and_split_constraints(word, ctx=None, engine=None):\n # TODO make this a node semantic\n if CONSTRAINT_S not in word._data:\n return ([], [], [], set())\n\n comps = [word.verify(ctx=ctx, engine=engine) for word in word._data[CONSTRAINT_S] if isinstance(word, QueryComponent)]\n others = set([word for word in word._data[CONSTRAINT_S] if not isinstance(word, QueryComponent)])\n alphas = []\n betas = []\n sub_binds = []\n for c in comps:\n if c.is_sub_bind_test:\n sub_binds.append(c)\n elif c.is_alpha_test:\n alphas.append(c)\n else:\n betas.append(c)\n\n return (alphas, betas, sub_binds, others)", "def separate_by(self, *criteria):\n def is_a(seq): return all(c(seq) for c in criteria)\n \n def op_separate(s):\n if s is None: return None, None\n return [s for s in s if is_a(s)], [s for s in s if not is_a(s)]\n tuple_array = self.element_wise(op_separate)\n\n return tuple_array.element_wise(lambda x: x[0]), tuple_array.element_wise(lambda x: x[1])", "def _parse_parameter_list(\n parameter_list: abc.Iterable[str],\n normalize_parameter_names: bool = False,\n normalize_parameter_values: bool = True,\n strip_interior_whitespace: bool = False) -> list[tuple[str, str]]:\n parameters = []\n for param in parameter_list:\n param = param.strip()\n if param:\n name, value = param.split('=')\n if strip_interior_whitespace:\n name, value = name.strip(), value.strip()\n if normalize_parameter_names:\n name = name.lower()\n if normalize_parameter_values:\n value = value.lower()\n parameters.append((name, _dequote(value.strip())))\n return parameters", "def parse_requirements(reqs):\n for req in reqs:\n req = req.strip()\n if \"\\n\" in req:\n for inner_req in parse_requirements(req.split(\"\\n\")):\n yield inner_req\n else:\n result = parse_requirement(req)\n if result is not None:\n yield result", "def condition(self, evidence):\n return self.condition2( [x for x in evidence], [evidence[x] for x in evidence] )", "def splitdefines(txt):\n pre = []\n c = []\n for line in txt.split(\"\\n\"):\n if line.startswith(\"#\"):\n pre.append(line)\n else:\n c.append(line)\n return pre, c", "def decompose_expressions(sv): \r\n done=False # iterate until no more change \r\n while not done:\r\n done=True # set to False whenever there is a change \r\n # build expressions from clauses\r\n for nam in list(sv.Object_list): # list is modified in loop: use a copy\r\n nod=sv.Object[nam]\r\n li=[]\r\n for c,v in nod.clauses: # explore clauses \r\n k,w=c,v # copy of condition and value (may change)\r\n \r\n # cache condition \r\n if k:\r\n if not (k[0] in [Always, Start]+Glitch_list): # add 'begin' except for [Begin, End, Change, Always, Start]\r\n k=(Begin, k, None) \r\n k=(k[0], create_expression(sv, k[1]), None) # skip one level\r\n if k!=c: done=False # a change has occurred\r\n \r\n #cache value \r\n if w and tree_join(w)!=nam: # do not create circular ref \r\n if w[0] in Glitch_list: # do not cache [Begin, End, Change] \r\n w=(w[0], create_expression(sv, w[1]), None) \r\n elif w[0]==Comma:\r\n w=create_expression(sv, w) # process list \r\n elif ( w[1] and ( w[1][1] or w[1][2]) ) or \\\r\n ( w[2] and ( w[2][1] or w[2][2]) ): # do not cache a single operation \r\n w=(w[0], create_expression(sv, w[1]), create_expression(sv, w[2])) \r\n if w!=v: done=False # a change has occurred\r\n # store result\r\n li+=[(k,w)] # store one clause\r\n \r\n nod.clauses=li # store list of clauses\r", "def organizeTerms_fromOriginal(self):\n self.LHS = []\n\n # LHS: [ ]\n # LHS: [ term1, term2, term3 ]\n # LHS: [ ( ), ( ), ( ) ]\n # LHS: [ ( item1, item2, item3 ), term2, term3 ]\n # _______________ _______________ __\n # LHS: [ ( (obj1, 'attr1'), (obj2, 'attr2'), 20 ), term2, term3 ]\n\n\n # Expand brackets\n def someTermsHaveListItems():\n for term in self._LHS_original:\n if any(isinstance(item, list) for item in term):\n return True\n return False\n\n while someTermsHaveListItems():\n for termIndex, Term in enumerate(self._LHS_original):\n newTerms_toAdd = []\n listItems_inTerm = [item for item in Term if isinstance(item, list)]\n hasListItems = len(listItems_inTerm) > 0\n\n for listItem_inTerm in listItems_inTerm:\n otherItems_inTerm = tuple(item for item in Term if item is not listItem_inTerm) # otherItems_inTerm are all multiplied items\n for term in listItem_inTerm:\n assert isinstance(term, tuple) # isolated term in format (const, [unknowns])\n # coeff unknown addresses unpacked from unknowns list\n newTerms_toAdd.append( otherItems_inTerm + (term[0],) + tuple(unknownAddress for unknownAddress in term[1]) )\n break # process one list item in the term at once\n\n if hasListItems:\n for newTerm_toAdd in reversed(newTerms_toAdd):\n self._LHS_original.insert(termIndex, newTerm_toAdd)\n self._LHS_original.remove(Term)\n break # process one term and break\n # Brackets expanded.\n\n for term in self._LHS_original: # each term is a tuple as all brackets expanded above\n constantFactors = []\n unknownFactors = []\n\n for item in term: # items within the term are to be multiplied\n\n if isinstance(item, tuple):\n # item is an object.attribute address, in form (object, 'attribute')\n assert isinstance(item[1], str)\n\n attribute = getattr_fromAddress(*item)\n if isNumeric(attribute):\n # If the object.attribute has a value, add it to constant factors\n constantFactors.append(attribute)\n else:\n # If the object.attribute does not have a value, it is an unknownFactor\n # TODO - the following check may be accommodated to have equation solved if the high-power term is found in another equation\n assert item not in unknownFactors, 'LinearEquationError: Same unknown appears twice in one term, i.e. a higher power of the unknown encountered - not a linear equation!'\n unknownFactors.append(item)\n\n elif any(isinstance(item, _type) for _type in [float, int]):\n # item is a number, i.e. a coefficient\n assert isNumeric(item)\n constantFactors.append(item)\n\n constantFactor = 1\n for factor in constantFactors:\n constantFactor *= factor\n\n if len(unknownFactors) != 0:\n # term has an unknown, e.g. term is in form of \"6*x\"\n self.LHS.append([constantFactor, unknownFactors])\n else:\n # term does not have an unknown, e.g. term is in form \"6\"\n self.RHS -= constantFactor # move constant term to the RHS\n\n self._gatherUnknowns()", "def test_pre_order_list(self):\n _expected_list = [23, 5, 13, 57, 103]\n\n _output_list = []\n\n # Call pre_order_list to test\n pre_order_list(self.root, _output_list)\n\n # We just want to test the values\n # so make a list from the list of objects\n _pre_order_output = [x.get_value() for x in _output_list]\n\n assert len(_expected_list) == len(_output_list)\n assert _expected_list == _pre_order_output", "def get_args_static_compound_EPSP_amp():\n syn_group_list = []\n syn_id_lists = []\n syn_condition_list = []\n for syn_group in context.clustered_branch_names:\n this_syn_id_group = context.syn_id_dict[syn_group]\n this_syn_id_lists = []\n for i in range(len(this_syn_id_group)):\n this_syn_id_lists.append(this_syn_id_group[:i+1])\n num_sims = len(this_syn_id_lists)\n for syn_condition in context.syn_conditions:\n syn_id_lists.extend(this_syn_id_lists)\n syn_group_list.extend([syn_group] * num_sims)\n syn_condition_list.extend([syn_condition] * num_sims)\n\n return [syn_id_lists, syn_condition_list, syn_group_list]" ]
[ "0.6050532", "0.53167266", "0.5286975", "0.52497184", "0.51461506", "0.5131494", "0.505154", "0.49972683", "0.49794406", "0.4918598", "0.48932764", "0.4883346", "0.48719564", "0.48354745", "0.480938", "0.4780705", "0.47786057", "0.47761136", "0.47605288", "0.47526938", "0.47485775", "0.47459096", "0.4744983", "0.46618247", "0.46452808", "0.4644135", "0.46371758", "0.4620208", "0.4606362", "0.45771837" ]
0.7246501
0
Generate an invocation of an action, behavior, or scenario.
def generateInvocation(self, node: ast.AST, actionlike, invoker=ast.Yield): invokeAction = ast.Expr(invoker(actionlike)) checker = ast.Attribute( ast.Name(behaviorArgName, loadCtx), checkInvariantsName, loadCtx ) args = ast.Starred( ast.Attribute(ast.Name(behaviorArgName, loadCtx), "_args", loadCtx), loadCtx ) kwargs = ast.keyword( None, ast.Attribute(ast.Name(behaviorArgName, loadCtx), "_kwargs", loadCtx) ) checkInvariants = ast.Expr( ast.Call(checker, [ast.Name("self", loadCtx), args], [kwargs]) ) ast.copy_location(invokeAction, node) ast.copy_location(checkInvariants, node) return [ invokeAction, checkInvariants, ]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def take_action(self, action):\n getattr(self, action['func'])(\n *action.get('args', ()), \n **action.get('kwargs', {})\n )", "def call_action(self, action):\n pass", "def KB_AgentProgram(KB):\n steps = itertools.count()\n\n def program(percept):\n t = steps.next()\n KB.tell(make_percept_sentence(percept, t))\n action = KB.ask(make_action_query(t))\n KB.tell(make_action_sentence(action, t))\n return action\n\n def make_percept_sentence(self, percept, t):\n return Expr(\"Percept\")(percept, t)\n\n def make_action_query(self, t):\n return expr(\"ShouldDo(action, %d)\" % t)\n\n def make_action_sentence(self, action, t):\n return Expr(\"Did\")(action[expr('action')], t)\n\n return program", "def step(self, action):\n pass", "def step(self, action):\n pass", "def generate(self, namespace: Optional[str], template: str, func: Callable, call_args: Dict) -> str:", "def sample_action(self):\n raise NotImplementedError", "def step(self, action):", "def some_operation(self) -> str:\n\n # Call the factory_method to create a Product object\n product = self.factory_method()\n\n # Now, use the product.\n result = f\"\"\"\n Creator: The same creator's code has just worked with\n {product.operation()}\n \"\"\"\n\n return result", "def _formulate_action(Action, **kwargs):\n\n return Action(**kwargs)", "def DoAction(self, a, args):\r\n return a(*args)", "def execute_action(self, a):\n return self.emulator.next(a)", "def RandomAgentProgram(actions):\n return lambda percept: random.choice(actions)", "def RandomAgentProgram(actions):\n return lambda percept: random.choice(actions)", "def do_action(self, action, a=None, b=None):\n pass", "def act(self, env: FakeEnv, s: ActorStrategy):\n action = env.action_space.sample()\n print(f\"Sampled action shape : {action.shape}\")\n env.step(action)", "def call_action(the_action, raw_kwargs):\n kwargs = {}\n args = []\n \n if \"args\" in raw_kwargs:\n args = raw_kwargs['args']\n del(raw_kwargs['args'])\n \n if \"kwargs\" in raw_kwargs:\n kwargs = raw_kwargs['kwargs']\n del(raw_kwargs['kwargs'])\n \n kwargs.update(raw_kwargs)\n \n return the_action()(*args, **kwargs)", "def call_method(self, action):\n\n\t\tif action[0] in self.methods:\n\t\t\tself.methods[action[0]](action[0:])\n\t\telse:\n\t\t\tself.no_such_method()", "def invocation(username, root_wf_id, wf_id, job_id, job_instance_id, invocation_id):\n dashboard = Dashboard(g.master_db_url, root_wf_id, wf_id)\n invocation = dashboard.get_invocation_information(wf_id, job_id, job_instance_id, invocation_id)\n\n return render_template('workflow/job/invocation/invocation_details.html', root_wf_id=root_wf_id, wf_id=wf_id,\n job_id=job_id, job_instance_id=job_instance_id, invocation_id=invocation_id,\n invocation=invocation)", "def perform_step(self, action):\n pass", "def testCreateFunctionCallAction(self):\n\t\tfca = GeneratorAction(('key',), 'c', 'd')\n\t\tself.failUnless(fca.key == ('key',))\n\t\tself.failUnless(fca.pargs == 'c')\n\t\tself.failUnless(fca.vargs == 'd')", "def _execute(self):\n\n action_name = self.action.lower()\n method_name = action_name.replace('node', 'do')\n method = getattr(self, method_name, None)\n\n if method is None:\n reason = 'Unsupported action: %s' % self.action\n EVENT.error(self, consts.PHASE_ERROR, reason)\n return self.RES_ERROR, reason\n\n return method()", "def perform_action(self, action):\n method_name = action.text().lower()\n method_name = method_name + \"_action\"\n action_method = getattr(self, method_name)\n action_method()", "def takeAction(self, action):\n return self.env.step(action)", "def generateAction(self):\n # make a game action\n self.gameEnv.performAction(self.gameNetwork)\n # get the game action\n x, y = self.current\n gameAction = self.game.toSinglePos(x, y)\n # make a piece action\n net = self.toNetInput()\n pieceAction = self.selectAction(self.internalNetwork, net)\n # return the actions\n return pieceAction, gameAction", "def _run_scenario(self, cls, method_name, context, args, config):", "def execute(self):\n idx, c, result_msg, op = self._choose()\n if self.guards[idx]:\n action = self.guards[idx][-1]\n\n # Executing Choice object method\n if isinstance(action, Choice):\n if op==WRITE:\n action.invoke_on_output()\n else:\n action.invoke_on_input(result_msg)\n\n # Executing callback function object\n elif isinstance(action, collections.Callable):\n # Choice function not allowed as callback\n if type(action) == types.FunctionType and action.__name__ == '__choice_fn':\n raise InfoException('@choice function is not instantiated. Please use action() and not just action')\n else:\n # Execute callback function\n if op==WRITE:\n action()\n else:\n action(channel_input=result_msg)\n\n # Compiling and executing string\n elif type(action) == str:\n # Fetch process frame and namespace\n processframe= inspect.currentframe()\n steps = self.execute_frame\n while (steps < 0):\n processframe = processframe.f_back\n steps += 1\n \n # Compile source provided in a string.\n code = compile(action,processframe.f_code.co_filename + ' line ' + str(processframe.f_lineno) + ' in string' ,'exec')\n f_globals = processframe.f_globals\n f_locals = processframe.f_locals\n if op==READ:\n f_locals.update({'channel_input':result_msg})\n\n # Execute action\n exec(code, f_globals, f_locals)\n\n elif type(action) == type(None):\n pass\n else:\n raise Exception('Failed executing action: '+str(action))\n\n return (c, result_msg)", "def do_action(self, action, **kwargs):\r\n print(action)\r\n action_method = getattr(self, action._method.__name__)\r\n if action_method:\r\n action_method(**kwargs)", "def __call__(self):\n action = self.args.get('action', None)\n if not hasattr(self, 'action_%s' % (action,)):\n action = 'plugin_root'\n\n action_method = getattr(self, 'action_%s' % (action, ))\n return action_method()", "def trig_code(self, bot, source, target, trigger, argument):\n\t\treturn \"Hello, I'm a pyirkbot based on pynik. My code https://github.com/blueCommand/pyirkbot For feature requests use https://github.com/blueCommand/pyirkbot/issues beer is good also\"" ]
[ "0.60873693", "0.59453475", "0.5931221", "0.5755276", "0.5755276", "0.5754673", "0.5716948", "0.57143855", "0.5706276", "0.56706935", "0.5670493", "0.5641106", "0.5600258", "0.5600258", "0.55860025", "0.55815315", "0.5568558", "0.5566936", "0.5542704", "0.55235267", "0.5503572", "0.5502473", "0.546618", "0.5463511", "0.5448694", "0.54399747", "0.5429256", "0.54226947", "0.54062533", "0.5400536" ]
0.6997138
0
Returns the metacontroller state. Concatenatates vector representation of the largest selected primitive action with the tried constraints vector.
def get_meta_controller_state(self): state = np.zeros(self._num_primitive_actions) if len(self._selected_primitive_actions): selected_primitive_actions = np.array(self._selected_primitive_actions) max_primtive_action = np.max(selected_primitive_actions) state[max_primtive_action] = 1 state = np.concatenate((state, np.copy(self._tried_constraints)), axis=0) return state
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_current_state(self):\n\n # One hot encoding of illegal actions\n illegal_actions_one_hot = np.ones(len(self.vehicle_data[0]))\n if len(self.possible_actions) != 0:\n illegal_actions_one_hot[self.possible_actions] = 0\n\n # Calculate mandatory vehicles left to load\n mandatory_vehicles_left = self.vehicle_data[1] - self.number_of_vehicles_loaded\n\n return np.hstack((self.end_of_lanes,\n self.lowest_destination,\n mandatory_vehicles_left[self.mandatory_cargo_mask],\n illegal_actions_one_hot,\n self.current_Lane)).astype(np.int16)", "def getPolicy(self, state):\n \"\"\"Description:\n Find all of q-values of current state, and choose the action \n with the hight q-value as optimal policy\n \"\"\"\n \"\"\" YOUR CODE HERE \"\"\"\n legalActions = self.getLegalActions(state)\n action = None\n policy = util.Counter() # use counter to store action and its q-value\n \n if len(legalActions) == 0:\n return action\n \n for a in legalActions:\n policy[a] = self.getQValue(state, a)\n action = policy.argMax()\n return action\n\n \"\"\" END CODE \"\"\"", "def getstate(self):\r\n return [self.tied_indices,\r\n self.fixed_indices,\r\n self.fixed_values,\r\n self.constrained_indices,\r\n self.constraints]", "def getAction(self, gameState):\n value = float(\"-inf\")\n alpha = float(\"-inf\")\n beta = float(\"inf\")\n \n #self.index = 0 -> pacman \n optimalList = [value, None]\n for action in gameState.getLegalActions(self.index):\n sucessorState = gameState.generateSuccessor(self.index, action)\n tempValue = self.value(sucessorState, self.depth, 1, alpha, beta)\n\n if max(tempValue,value) > alpha:\n alpha = max(tempValue,value)\n\n if tempValue > value:\n value = tempValue\n optimalList = [value, action]\n # return optimal action \n return optimalList[1]", "def getAction(self, gameState):\n value = float(\"-inf\")\n \n #self.index = 0 -> pacman \n optimalList = [value, None]\n for action in gameState.getLegalActions(self.index):\n sucessorState = gameState.generateSuccessor(self.index, action)\n tempValue = self.value(sucessorState, self.depth, 1)\n if tempValue > value:\n value = tempValue\n optimalList = [value, action]\n # return optimal action \n return optimalList[1]", "def getAction(self, gameState):\n value = float(\"-inf\")\n \n #self.index = 0 -> pacman \n optimalList = [value, None]\n for action in gameState.getLegalActions(self.index):\n sucessorState = gameState.generateSuccessor(self.index, action)\n tempValue = self.value(sucessorState, self.depth, 1)\n if tempValue > value:\n value = tempValue\n optimalList = [value, action]\n # return optimal action \n return optimalList[1]", "def bestAction(self, state):\n action = self.q_network.chooseBestAction(state)\n V = max(self.q_network.qValues(state))\n return action, V", "def get_final_action(self, state):\n\t\tstate = Variable(torch.from_numpy(state))\n\t\taction = self.target_actor.forward(state).detach()\n\t\tgenus = torch.argmax(self.genus.forward(state),dim=-1).detach()\n\t\treturn action.data.numpy(), genus.data.numpy()", "def get_action(self, state):\n key = (state.getPacmanPosition(), state.getFood()) # Key for dict\n computed = self.computed.get(key, False)\n\n if computed:\n return computed # Return already computed result\n else:\n self.visited.clear() # Clear set if previously used\n q = queue.Queue() # FIFO queue\n q.put((state, {})) # Initial state of the queue\n path = self.get_path(q)\n self.computed.update(path) # Keep computed answers\n return path.get(key) # Value associated to key (position, food)", "def act(self):\n\n\t\t# Figure out the action selected by each head\n\t\tQs = self.dqn.get_Qs(self.state_history)\n\t\tactions = np.argmax(Qs, axis=1)\n\n\t\t# Select the action of the control head\n\t\taction = actions[self.head_number]\n\t\tQ = Qs[self.head_number]\n\n\t\treturn action, Q", "def argmax(self, state, action):\n return copy.deepcopy(state.object_states[self._objid])", "def result(self, state, action):\n state_after_act = [[0 for i in range(self.col)] for j in range(self.row)]\n for k in action:\n x = k[1][0]\n y = k[1][1]\n if k[0] == \"vaccinate\":\n state_after_act[x][y] = ('I', 1)\n else:\n state_after_act[x][y] = ('Q', 1)\n\n for i in range(self.row):\n for j in range(self.col):\n if state_after_act[i][j] == 0:\n if state[i][j][0] == 'U' or state[i][j][0] == 'I':\n state_after_act[i][j] = state[i][j]\n\n elif state[i][j][0] == 'S':\n if state[i][j][1] == 3:\n state_after_act[i][j] = ('H', 1)\n else:\n if state[i][j][1] == 1:\n state_after_act[i][j] = ('S', 2)\n elif state[i][j][1] == 2:\n state_after_act[i][j] = ('S', 3)\n\n elif state[i][j][0] == 'Q':\n if state[i][j][1] == 2:\n state_after_act[i][j] = ('H', 1)\n else:\n state_after_act[i][j] = ('Q', 2)\n\n elif state[i][j][0] == 'H':\n state_after_act[i][j] = self.healthy(i, j, state,state_after_act)\n state_after_act[i] = tuple(state_after_act[i])\n return tuple(state_after_act)", "def getstate(self):\r\n return Parameterized.getstate(self) + \\\r\n [self.priors, self.optimization_runs,\r\n self.sampling_runs, self.preferred_optimizer]", "def _get_state(self):\n # COMPUTE CLASSIFIER_STATE\n predictions = self.model.predict_proba(self.dataset.state_data)[:,0]\n predictions = np.array(predictions)\n idx = np.argsort(predictions)\n # the state representation is the *sorted* list of scores \n classifier_state = predictions[idx]\n \n # COMPUTE ACTION_STATE\n unknown_data = self.dataset.train_data[self.indeces_unknown,:]\n # prediction (score) of classifier on each unlabelled sample\n a1 = self.model.predict_proba(unknown_data)[:,0]\n # average distance to every unlabelled datapoint\n a2 = np.mean(self.dataset.distances[self.indeces_unknown,:][:,self.indeces_unknown],axis=0)\n # average distance to every labelled datapoint\n a3 = np.mean(self.dataset.distances[self.indeces_known,:][:,self.indeces_unknown],axis=0)\n next_action_state = np.concatenate(([a1], [a2], [a3]), axis=0)\n return classifier_state, next_action_state", "def computeActionFromValues(self, state):\n \n State_actions = self.mdp.getPossibleActions(state)\n max_Action=util.Counter()\n for k in State_actions:\n max_Action[k] = self.getQValue(state,k)\n return max_Action.argMax()\n \n util.raiseNotDefined()", "def get_final_state(self):\n state = {}\n s = self.copy()\n clip = not np.isscalar(self['t'])\n if clip:\n # only get results for the last timepoint\n s.set_state(self.state[:, -1])\n \n for k in self.difeq_vars:\n state[k] = s[k]\n for k in self.dep_vars:\n state[k] = s[k]\n for k,v in self.extra.items():\n if clip:\n state[k] = v[-1]\n else:\n state[k] = v\n \n return state", "def computeActionFromValues(self, state):\n \"*** YOUR CODE HERE ***\"\n maxvalue = -100000000\n bestaction = None\n for action in self.mdp.getPossibleActions(state):\n valueforthisaction = self.getQValue(state, action) # is this right? \n if valueforthisaction > maxvalue:\n bestaction = action\n maxvalue = valueforthisaction\n return bestaction", "def getPolicy(self, state):\n \"*** YOUR CODE HERE ***\"\n # OUR CODE HERE\n possibleActions = self.mdp.getPossibleActions(state)\n #checking for terminal state (no possible actions)\n if len(possibleActions) is 0: \n return None\n \n #attempt at using the Counter\n eValsActions = util.Counter()\n for action in possibleActions:\n for transitionState, probability in self.mdp.getTransitionStatesAndProbs(state, action):\n eValsActions[action] += probability * (self.mdp.getReward( state, action, transitionState) + self.discount * self.values[transitionState])\n \n return eValsActions.argMax()\n \n #fail attempt using lists :(\n \"\"\"\n #list to hold the expected value of the actions\n eValsActions = []\n #iterate through all actions and their transtion states\n for action in possibleActions:\n for transitionState, probability in self.mdp.getTransitionStatesAndProbs(state, action):\n #expected value of reward with discount * the value of the transitions\n eValsActions[action] += probability * (self.mdp.getReward( state, action, transitionState) + self.discount * self.values[transitionState])\n \n #now iterate through and find the action with the best value\n #(that will be the best action)\n maxVal = -float(\"inf\")\n bestAction = None\n for action in possibleActions:\n if eValsActions[action] > maxVal:\n maxVal = eValsAction[action]\n bestAction = action\n \"\"\"\n return action\n # END OUR CODE", "def calculate_state(self):\n\t\tif self.state_type == 'Queues':\n\t\t\t#self.queue_state =\\\n\t\t\t#[0. if movement.AttValue('QLen(Current, Last)') is None else movement.AttValue('QLen(Current, Last)') for movement in self.lanes_movement]\n\n\t\t\tself.queue_state =\\\n\t\t\t[0. if queue.AttValue('QLen(Current, Last)') is None else queue.AttValue('QLen(Current, Last)') for queue in self.queues_counters]\n\n\t\t\tstate = np.array(self.queue_state)[np.newaxis,:]\n\n\t\tif self.state_type == \"QueuesSig\":\n\n\t\t\tself.queue_state =\\\n\t\t\t[0. if queue.AttValue('QLen(Current, Last)') is None else queue.AttValue('QLen(Current, Last)') for queue in self.queues_counters]\n\n\t\t\tstate = np.array(self.queue_state+[self.next_action_key])[np.newaxis,:]\n\t\n\t\treturn(state)", "def computeactionfromqvalues(self, state):\n legalactions = env.getlegalactions(env.state_to_array(state))\n if len(legalactions) == 0:\n return None\n tmp = Counter()\n for action in legalactions:\n tmp[action] = self.getqvalue(state, action)\n return tmp.argMax()", "def get_action(self, state):\n assert np.shape(state) == (self._state_dim,)\n\n ### PROBLEM 2\n ### YOUR CODE HERE\n # raise NotImplementedError\n best_action = self._sess.run(self._best_action,\n feed_dict={self._state_ph: np.atleast_2d(state)})\n assert np.shape(best_action) == (self._action_dim,)\n\n return best_action", "def T(self,state,action):\n result = NumMap()\n s_p = action.apply(state)\n if not self.is_legal(s_p) or s_p.__eq__(state):\n result[state] = 1\n else: \n result[s_p] = 1 - self._p_fail\n result[state] = self._p_fail\n\n return result", "def getBestActionValuePair(self, state):\n\t\tactionValuePairs = self.neuralNet.getValues(state)\n\t\tbestAction = np.argmax(actionValuePairs)\n\t\treturn (bestAction, actionValuePairs[bestAction])", "def computeActionFromQValues(self, state):\n \"*** YOUR CODE HERE ***\"\n self.Temporary_QValue = util.Counter() #initializing a temporary QValue counter\n\n temporary_QValue = self.Temporary_QValue\n\n legal_Actions = self.getLegalActions(state) #get all the legal actions like north,south,east,west,exit\n\n length_legalActions = len(legal_Actions) #find length of legal actions just to find later if we have legal actions or not\n\n if length_legalActions == 0: #to check if we have any legal action or not\n return 0.0 #Returns value 0 as we do not have any legal actions, we cannot pass 'None' as autograder in q8 expects a float value and not string value\n\n for a in legal_Actions: #loop to check for each legal action\n\n temporary_QValue[a] = self.getQValue(state,a) #Find the Qvalue of each action\n\n best_action = temporary_QValue.argMax() #find the best action to take in a state\n return best_action\n #util.raiseNotDefined()", "def getAction(self, gameState):\n result = float(\"-inf\")\n action = 1\n for agentState in gameState.getLegalActions(0):\n valorminimax = self.miniMaxDecision(1, 0, gameState.generateSuccessor(0, agentState))\n if valorminimax > result:\n result = valorminimax\n action = agentState\n return action", "def get_action(self, state):\n\n \"\"\"\n XXX: DO NOT MODIFY THAT FUNCTION !!!\n Doing so will result in a 0 grade.\n \"\"\"\n\n # XXX : You shouldn't care on what is going on below.\n # Variables are specified in constructor.\n if self.beliefGhostStates is None:\n self.beliefGhostStates = state.getGhostBeliefStates()\n if self.walls is None:\n self.walls = state.getWalls()\n return self.updateAndGetBeliefStates(\n self._computeNoisyPositions(state))", "def _compute_action(self, final_hidden: Tensor) -> Tensor:\n actions = self.activation(self.action_net.forward(final_hidden))\n return actions.view(self.batch_size, self.num_experts, self.output_size_per_expert)", "def __getstate__(self):\n return (self.layers, self.best_loss)", "def getPolicy(self, state):\n \"*** YOUR CODE HERE ***\"\n possibleActions = self.mdp.getPossibleActions(state)\n if len(possibleActions) == 0: return None\n results = []\n for action in possibleActions:\n total = 0\n for (nextState, prob) in self.mdp.getTransitionStatesAndProbs(state,action):\n total += (prob * self.values[nextState])\n results.append(total)\n maxIndex = max(enumerate(results), key=lambda x: x[1])[0]\n #print(\"here\")\n return possibleActions[maxIndex]", "def computeValueFromQValues(self, state):\n \treturn max([self.getQValue(state, action) for action in self.actions])" ]
[ "0.6379533", "0.6109914", "0.60754544", "0.6071877", "0.6062136", "0.6062136", "0.5973997", "0.5971481", "0.5957208", "0.5934242", "0.5891367", "0.5885219", "0.583377", "0.5827091", "0.5804313", "0.5786684", "0.57818174", "0.57702297", "0.5767344", "0.57548946", "0.5747696", "0.5720408", "0.57095486", "0.5702368", "0.57019776", "0.5696734", "0.56839293", "0.56723213", "0.566801", "0.56616116" ]
0.81656325
0
Returns an array of controller environment states.
def get_controller_environment_states(env_state): controller_environment_states = np.split(env_state, self._num_controllers) return controller_environment_states
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def envs():\n\n # update and grab the envs from the metadata keys\n metadata = _init()\n return list(metadata.keys())", "def get_states(self):\n for model_name in self.model_names:\n if model_name == \"cube\":\n data = self.get_model_state(\n model_name, \"world\") # gazebo service client\n return np.array([\n data.pose.position.x,\n data.pose.position.y,\n data.pose.position.z,\n data.pose.orientation.x,\n data.pose.orientation.y,\n data.pose.orientation.z\n ])", "def get_states():\n states = []\n for state in storage.all(\"State\").values():\n states.append(state.to_dict())\n return jsonify(states)", "def states():\n states = []\n for state in storage.all('State').values():\n states.append(state.to_dict())\n return jsonify(states)", "def get_list_of_states(self):\n return self.states", "def get_states():\n all_states = []\n states = storage.all(State).values()\n for state in states:\n all_states.append(state.to_dict())\n return jsonify(all_states)", "def envs(self) -> Optional[Sequence['outputs.CSIIsilonSpecDriverControllerEnvs']]:\n return pulumi.get(self, \"envs\")", "def states():\n states = storage.all(State).values()\n return jsonify([item.to_dict() for item in states])", "def envs(self) -> Optional[Sequence['outputs.CSIUnitySpecDriverControllerEnvs']]:\n return pulumi.get(self, \"envs\")", "def envs(self) -> Optional[Sequence['outputs.CSIPowerStoreSpecDriverControllerEnvs']]:\n return pulumi.get(self, \"envs\")", "def states():\n states = storage.all(State).values()\n states_list = []\n for state in states:\n states_list.append(state.to_dict())\n return jsonify(states_list)", "def envs(self) -> Optional[Sequence['outputs.CSIVXFlexOSSpecDriverControllerEnvs']]:\n return pulumi.get(self, \"envs\")", "def all_states():\n states = []\n states_values = storage.all(\"State\").values()\n for obj in states_values:\n states.append(obj.to_dict())\n return jsonify(states)", "def all_states(self):\n return self._states", "def envs(self) -> Optional[Sequence['outputs.CSIPowerMaxSpecDriverControllerEnvs']]:\n return pulumi.get(self, \"envs\")", "def active_states(self):\n return self.states.get_active_states()", "def get_all_states(self):\n return self._states", "def get_all_environments():\n return ENVIRONMENTS", "def all_states():\n states = storage.all(State)\n states_list = []\n for state in states.values():\n states_list.append(state.to_dict())\n return jsonify(states_list)", "def States(self) -> List[Callable]:\r\n\t\treturn self.__STATES__", "def all_states():\n dict = storage.all(State)\n list = []\n for state in dict.values():\n list.append(state.to_dict())\n return jsonify(list)", "def get_states():\n all_states = storage.all('State')\n states_list = all_states.values()\n states_json = []\n for state in states_list:\n states_json.append(state.to_dict())\n return jsonify(states_json)", "def environments(self):\n env_txt = self.config[\"tox\"][\"envlist\"]\n env_lst_raw = env_txt.strip().replace(\"\\n\",\",\").split(\",\")\n env_lst = [x.strip() for x in env_lst_raw if x != \"\"]\n return env_lst", "def get_all_states():\n states = storage.all(State).values()\n return jsonify([state.to_dict() for state in states]), 200", "def states(self) -> Type[Any]:\n return []", "def get_terminal_observing_states(self):\n pass", "def env(self): # type: () -> t.List[str]\n return self.config['Env']", "def _state(self):\n state = [] \n for _temp in self.config[\"performance_targets\"]:\n ID = _temp[0]\n attribute = _temp[1]\n state.append(self.methods[attribute](ID))\n \n for _temp in self.config[\"states\"]:\n ID = _temp[0]\n attribute = _temp[1]\n state.append(self.methods[attribute](ID))\n\n state = np.asarray(state)\n \n return state", "def get_states(self):\n raise NotImplementedError()", "def states(self):\n return self._x_list" ]
[ "0.6854194", "0.68268436", "0.6735878", "0.6734534", "0.67082626", "0.66963553", "0.6675871", "0.6657624", "0.66188204", "0.6608691", "0.6592775", "0.6592281", "0.65800303", "0.65366554", "0.6533935", "0.65275776", "0.65239054", "0.6520074", "0.64688414", "0.6431106", "0.6427624", "0.63944983", "0.6381198", "0.63457304", "0.63252723", "0.6320009", "0.63131803", "0.6306216", "0.63022244", "0.62960374" ]
0.8024106
0
Returns the controller state containing the controller's environment state, constraint, ordering vector, and received communication vectors.
def get_controller_state(self, env_state, constraint, ordering, comm_turn, communication_vector=None): controller_state = np.zeros(self._controller_state_size) # Apply the constraint to the environment state. env_state_plus_constraint = np.logical_and(env_state, constraint).astype(int) env_state_size = np.size(env_state_plus_constraint) controller_state[0:env_state_size] = env_state_plus_constraint controller_state[env_state_size:env_state_size_size + self._num_controllers_per_subtask] = ordering if comm_turn >= 1: controller_state[(env_state_size + self._num_controllers_per_subtask + (comm_turn - 1) * num_primitive_actions):( env_state_size + self._num_controllers_per_subtask + comm_turn * num_primitive_actions)] = communication_vector return np.copy(controller_state)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def controller_bookkeeping_vars(self):\n # Keeps track of all the controller states.\n controller_states = np.zeros(\n self._num_communication_turns + 1, self._num_controllers, self._controller_state_size)\n # Keeps track of all controllers' selected actions (communication + output).\n controller_actions = np.zeros(\n self._num_communication_turns, self._num_controllers, 1)\n # List that will contain the output actions.\n output_actions = []\n\n return controller_states, controller_actions, output_actions", "def get_state(self):\n return self.controller.get_state()", "def get_state(self):\n xml = self.env.sim.model.get_xml() # model xml file\n state = np.array(self.env.sim.get_state().flatten()) # simulator state\n return dict(model=xml, states=state)", "def get_state(self):\n return self.env.sim.get_state()", "def get_state(self):\n return self.fmu._get_continuous_states()", "def get_state(self):\n return self._env.get_state()", "def _log_model_state(self):\n\n state = {\n 'model_state': self.state_dict(),\n 'model_name': type(self).__name__,\n 'optimizer_state': self.optimizer.state_dict(),\n 'optimizer_name': type(self.optimizer).__name__,\n }\n\n return state", "def get_controller_environment_states(env_state):\n controller_environment_states = np.split(env_state, self._num_controllers)\n return controller_environment_states", "def getstate(self):\r\n return Model.getstate(self) + [self.X,\r\n self.num_data,\r\n self.input_dim,\r\n self.kern,\r\n self.likelihood,\r\n self.output_dim,\r\n self._Xoffset,\r\n self._Xscale]", "def get_input_state_matrix(self):\n\n # state matrix for control\n B = zeros((9,3))\n\n # control term impact on pendulum\n B[3:6,0:3] = matrix([\n [self.params['Km']/(self.params['Ix'])/self.params['Rm'], 0, 0],\n [0, self.params['Km']/(self.params['Iy'])/self.params['Rm'], 0],\n [0, 0, self.params['Km']/(self.params['Iz'])/self.params['Rm']]\n ])\n # control term impact on wheel\n B[6:9,0:3] = self.params['Km']/self.params['Iw']/self.params['Rm']*identity(3)\n\n return B", "def get_state(self):\n return {\n \"epoch\": self.epoch,\n \"weights\": self.model.get_weights(),\n \"optimizer_weights\": self.model.optimizer.get_weights()\n }", "def get_meta_controller_state(self):\n state = np.zeros(self._num_primitive_actions)\n\n if len(self._selected_primitive_actions):\n selected_primitive_actions = np.array(self._selected_primitive_actions)\n max_primtive_action = np.max(selected_primitive_actions)\n state[max_primtive_action] = 1\n state = np.concatenate((state, np.copy(self._tried_constraints)), axis=0)\n\n return state", "def state(self):\n return {\n 'network': self._network,\n 'target_network': self._target_network,\n 'optimizer': self._optimizer,\n 'num_steps': self._num_steps\n }", "def getstate(self):\r\n return Parameterized.getstate(self) + [self.parts,\r\n self.num_parts,\r\n self.num_params,\r\n self.input_dim,\r\n self.input_slices,\r\n self.param_slices\r\n ]", "def current_state(self):\n return {\n \"cube_pos\": self.mujoco_simulation.get_qpos(\"cube_position\"),\n \"cube_quat\": self.mujoco_simulation.get_qpos(\"cube_rotation\"),\n \"cube_face_angle\": self.mujoco_simulation.get_face_angles(\"cube\"),\n }", "def getstate(self):\r\n return [self.tied_indices,\r\n self.fixed_indices,\r\n self.fixed_values,\r\n self.constrained_indices,\r\n self.constraints]", "def sim_to_state(self):\n # As mentioned earlier, we only need z-axis variables.\n # This will keep our state nice and small.\n return [\n self.sim.pose[Z_AXIS],\n self.sim.v[Z_AXIS],\n self.sim.linear_accel[Z_AXIS]\n ]", "def stateVector(self):\n simulator=Aer.get_backend('statevector_simulator')\n result=execute(self.circuit,backend=simulator).result()\n statevector=result.get_statevector(decimals=4) #\"decimals=4\" doesn't work in version 0.20.0 \n return statevector.tolist()", "def get_state(self):\r\n return self.currentObservation", "def _get_current_state(self):\n\n # One hot encoding of illegal actions\n illegal_actions_one_hot = np.ones(len(self.vehicle_data[0]))\n if len(self.possible_actions) != 0:\n illegal_actions_one_hot[self.possible_actions] = 0\n\n # Calculate mandatory vehicles left to load\n mandatory_vehicles_left = self.vehicle_data[1] - self.number_of_vehicles_loaded\n\n return np.hstack((self.end_of_lanes,\n self.lowest_destination,\n mandatory_vehicles_left[self.mandatory_cargo_mask],\n illegal_actions_one_hot,\n self.current_Lane)).astype(np.int16)", "def _stateDict(self):\n\n data = {}\n # if self.currentState[4]:\n # data['action'] = 'BRAK'\n # else:\n data['action'] = 'MCTL'\n data['speed'] = float(self.speed)\n data['steerAngle'] = float(self.steering_angle)\n\n return data", "def get_states(self):\n for model_name in self.model_names:\n if model_name == \"cube\":\n data = self.get_model_state(\n model_name, \"world\") # gazebo service client\n return np.array([\n data.pose.position.x,\n data.pose.position.y,\n data.pose.position.z,\n data.pose.orientation.x,\n data.pose.orientation.y,\n data.pose.orientation.z\n ])", "def getState( self, cCtrlName ):\n return self.getControlModelProperty( cCtrlName, \"State\" )", "def __getstate__(self):\n state = {\n 'connector_keys' : self.connector_keys,\n 'metric_key' : self.metric_key,\n 'location_key' : self.location_key,\n 'parameters' : self.parameters,\n 'mrsm_instance' : self.instance_keys,\n }\n return state", "def get_state(self):\n #print(\"ComponentBase.get_state\")\n state = {\"part_name\" : self.part_name,\n \"mount\" : self.mount,\n \"silkscreen_commands\" : self.pcb_layers[\"silkscreen\"].commands,\n \"connectors\" : [c.get_state() for c in self.connectors] }\n return state", "def state(self):\n return self.coordinator.data[PVS_DEVICE_TYPE][self.base_unique_id][PVS_STATE]", "def state_dict(self):\r\n return {'ImageModel': self.image_model.state_dict(),\r\n 'QuestionModel': self.question_model.state_dict(),\r\n 'AttentionModel': self.attention.state_dict()\r\n }", "def get_state(self) -> Dict[str, Any]:\n return {\"aq_potential_num\": self.aq_potential_num, \"wq_potential_num\": self.wq_potential_num}", "def state_dict(self):\n return {\n 'XY_net': self.XY_net.state_dict(),\n 'XY_optimizer_minee': self.XY_optimizer_minee.state_dict(),\n 'X_net': self.X_net.state_dict(),\n 'X_optimizer_minee': self.X_optimizer_minee.state_dict(),\n 'Y_net': self.Y_net.state_dict(),\n 'Y_optimizer_minee': self.Y_optimizer_minee.state_dict(),\n 'X': self.X,\n 'Y': self.Y,\n 'lr': self.lr,\n 'batch_size': self.batch_size,\n 'ref_batch_factor': self.ref_batch_factor\n }", "def _get_model_state(self) -> dict:\n raise NotImplementedError" ]
[ "0.72435206", "0.6839589", "0.67760944", "0.6572749", "0.63595426", "0.63493794", "0.6297692", "0.62836903", "0.6233724", "0.61903673", "0.61850005", "0.61357987", "0.61350757", "0.6120478", "0.6109083", "0.6104067", "0.60701954", "0.6047435", "0.60060453", "0.59885216", "0.59697586", "0.5968845", "0.5957862", "0.5957227", "0.5941871", "0.5936867", "0.59295535", "0.59219515", "0.59189785", "0.59091353" ]
0.6928261
1
Intrinsically rewards a subset of controllers using the provided critic function.
def intrinsic_reward(self, env_states, constraints, orderings, selected_actions): return self._critic_fn( controller_states, constraints, orderings, selected_actions)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def critic(self, critic: CriticType) -> None:\n self._critic = critic", "def update_critic_weights(self, states, actions, new_states, dones, rewards):\n with tf.GradientTape(True) as tape:\n noise = (\n tf.random.normal(\n (self.buffers[0].batch_size * self.n_envs, self.n_actions)\n )\n * self.policy_noise_coef\n )\n noise = tf.clip_by_value(noise, -self.noise_clip, self.noise_clip)\n new_actions = tf.clip_by_value(\n self.target_actor(new_states) + noise, -1.0, 1.0\n )\n target_critic_input = tf.concat(\n [tf.cast(new_states, tf.float64), tf.cast(new_actions, tf.float64)], 1\n )\n target_value1 = self.target_critic1(target_critic_input)\n target_value2 = self.target_critic2(target_critic_input)\n target_value = tf.minimum(target_value1, target_value2)\n target_value = rewards + tf.stop_gradient(\n (1 - dones) * self.gamma * target_value\n )\n critic_input = tf.concat([states, actions], 1)\n value1 = self.critic1(critic_input)\n value2 = self.critic2(critic_input)\n critic1_loss, critic2_loss = MSE(value1, target_value), MSE(\n value2, target_value\n )\n self.critic1.optimizer.minimize(\n critic1_loss, self.critic1.trainable_variables, tape=tape\n )\n self.critic2.optimizer.minimize(\n critic2_loss, self.critic2.trainable_variables, tape=tape\n )", "def myopic(actuation_name,robot,depth_of_view_multiplier,possibility):\n goodluck = random.randint(0,100)\n \n if goodluck < possibility:\n robot.depth_of_view = robot.depth_of_view * depth_of_view_multiplier\n print(\"{0} is affected by {2}. Its depth of view multiplied by {1}!\".format(robot.name,depth_of_view_multiplier,actuation_name))\n return robot\n else:\n print(\"{1} effect is not succesful on {0}!\".format(robot.name,actuation_name))\n return robot", "def apply_action(self, physics, action, random_state):\n del random_state\n physics.bind(self.actuators).ctrl = action", "def train_actor_and_critic(self):\n\n # if not enough transitions in memory, don't train!\n if len(self.memory) < self.batch_size:\n return\n\n transitions = self.memory.sample() # sample a batch from memory\n\n # Convert experience tuples to separate arrays for each element\n # (states, actions, rewards, etc.)\n states = np.vstack([e.state for e in transitions if e is not None])\n actions = np.array([\n e.action for e in transitions if e is not None]).astype(\n np.float32).reshape(-1, self.action_size)\n rewards = np.array([\n e.reward for e in transitions if e is not None]).astype(\n np.float32).reshape(-1, 1)\n dones = np.array([\n e.done for e in transitions if e is not None]).astype(\n np.uint8).reshape(-1, 1)\n next_states = np.vstack(\n [e.next_state for e in transitions if e is not None])\n\n # Get predicted next-state actions and Q values from target models\n actions_next = self.actor_target.model.predict_on_batch(next_states) #mu_marked in algo\n Q_targets_next = self.critic_target.model.predict_on_batch([next_states, actions_next]) #Q' in algo\n\n # Compute Q targets for current states and train critic model (local)\n Q_targets = rewards + self.gamma * Q_targets_next * (1 - dones) #y_i in algo\n critic_loss = self.critic_local.model.train_on_batch(x=[states, actions], y=Q_targets)\n\n # Train actor model (local)\n action_gradients = np.reshape(self.critic_local.get_action_gradients([states, actions, 0]), (-1, self.action_size))\n # print(\"action_gradients\",action_gradients)\n # custom training function\n self.actor_local.train_fn([states, action_gradients, 1])\n\n # Soft-update target models\n # self.soft_update(self.critic_local.model, self.critic_target.model, self.tau)\n # self.soft_update(self.actor_local.model, self.actor_target.model, self.tau)\n self.soft_update_critic()\n self.soft_update_actor()\n\n return critic_loss", "def make_critics(self, obs=None, action=None, reuse=False,\n scope=\"values_fn\", create_vf=True, create_qf=True):\n raise NotImplementedError", "def call(self, states, actions):\n x = tf.concat([states, actions], -1)\n\n q1 = self.critic1(x)\n\n q2 = self.critic2(x)\n\n return q1, q2", "def crit_ai(crit):\n if crit['type'] == 'crawler':\n # Crawlers move at random.\n return random.choice(['left','right','up','down'])\n #if crit['type'] == 'bullet':\n # return crit['dir']\n return None", "def calculate_controller_reward(self, controller1, controller2):", "def policy(self, obs, exploit, c1_c2_games):\n\n @tf.function\n def exploratoy_policy(temp):\n q_values = self._q_target_net(obs)\n obs_list = tf.split(q_values, num_or_size_splits=c1_c2_games, axis=0, num=None, name='split')\n explore = tf.squeeze(tf.random.categorical(obs_list[0] / temp, 1, dtype=tf.int32))\n greedy = tf.squeeze(tf.argmax(obs_list[1], axis=-1, output_type=tf.int32))\n return tf.concat([explore, greedy], axis=0)\n\n @tf.function\n def greedy_policy():\n q_values = self._q_target_net(obs)\n return tf.squeeze(tf.argmax(q_values, axis=-1, output_type=tf.int32))\n\n if exploit:\n actions = greedy_policy()\n else:\n actions = exploratoy_policy(temp=tf.math.maximum(self._temp_min, self._temp_max * (1 - self._eps_count/self._max_schedule)))\n\n return actions", "def policy (self,forced_actions=None,forced_rewards=None,state_idx=None):\n\t\tif self.gamble:\n\t\t\tself.policy_gamble()\n\t\t\treturn\n\t\tif self.UCB:\n\t\t\tself.policy_UCB(forced_actions,forced_rewards,state_idx)\n\t\t\treturn\n\n\t\tidx = self.idx \t\t\t\t# internal time index of state\n\t\tprobs = self.probs\t\t\t# prob of reward for an action\n\t\tbeta = self.beta\t\t\t# inverse temp \n\n\t\t# calc Act thalamus activation\n\t\tAct = beta*self.Q[idx,:]\n\n\t\t# multioption softmax (invariant to constant offsets)\n\t\tnewAct = Act - np.max(Act)\n\t\texpAct = np.exp(newAct)\n\t\tps = expAct/np.sum(expAct)\n\t\tself.SM[idx,:] = ps\n\t\tcs_ps = np.cumsum(ps)\n\n\t\t# select action\n\t\tif forced_actions is None:\n\t\t\tsample = np.random.random_sample()\n\t\t\tselected = False\n\t\t\tcheck = 0\n\t\t\twhile not selected:\n\t\t\t\tif sample < cs_ps[check]:\n\t\t\t\t\tC = check\n\t\t\t\t\tselected = True\n\t\t\t\telse:\n\t\t\t\t\tcheck = check + 1\n\t\telse:\n\t\t\tC = forced_actions[state_idx,idx]\n\t\tself.C[idx] = C\n\t\t\t\n\t\t# decide whether a reward is delivered\n\t\tif forced_rewards is None:\n\t\t\treward = np.random.binomial(size=1, n=1, p= probs[C])[0]\n\t\telse:\n\t\t\treward = forced_rewards[state_idx,idx]\n\t\tself.R[idx] = reward # indicator that reward was received\n\t\tif reward == 0:\n\t\t\treward = self.l_mag\n\t\telse:\n\t\t\treward = self.r_mag\n\n\t\tPE = reward - self.Q[idx,C]\n\t\tself.PE[idx] = PE", "def explore_action():\n # def get_action(o, noise_scale):\n # a = ac.act(torch.as_tensor(o, dtype=torch.float32))\n # a += noise_scale * np.random.randn(act_dim)\n # return np.clip(a, -act_limit, act_limit)\n raise NotImplementedError", "def test_affect_of_strategy(self):\n self.responses_test([C, C, C], [C, C, C], [C, C, C])\n # Make sure that the retaliations are increasing\n # Retaliate once and forgive\n self.responses_test([C], [D], [D])\n self.responses_test([C, D], [D, C], [C])\n self.responses_test([C, D, C], [D, C, C], [C])\n # Retaliate twice and forgive\n self.responses_test([C, D, C], [D, C, D], [D, D])\n self.responses_test([C, D, C, D, D], [D, C, D, C, C], [C])\n # Opponent defection during retaliation doesn't increase retaliation period\n self.responses_test([C, D, C, D, D], [D, C, D, D, C], [C])\n # Retaliate thrice and forgive\n self.responses_test([C, D, C, D, D, C], [D, C, D, C, C, D], [D, D, D])\n history_1 = [C, D, C, D, D, C, D, D, D]\n history_2 = [D, C, D, C, C, D, C, C, C]\n self.responses_test(history_1, history_2, [C])", "def criticize(self, env: FakeEnv) -> Tensor:\n c = Critique(env.observation)", "def RandomAgentProgram(actions):\n return lambda percept: random.choice(actions)", "def RandomAgentProgram(actions):\n return lambda percept: random.choice(actions)", "def getAction(self, state):\n # Pick Action\n legalActions = self.getLegalActions(state)\n action = None\n\n \"\"\"Description:\n Use util.flipCoin, if return true then randomly choice from legalAction\n if flase, then sue getPolicy to get best policy action\n \"\"\"\n \"\"\" YOUR CODE HERE \"\"\"\n if len(legalActions) == 0:\n return action # None\n \n if util.flipCoin(self.epsilon):\n ''' exploration function (not work well)''' \n# posPol = util.Counter()\n# for a in legalActions:\n# if self.getQValue(state,a) >= 0:\n# posPol[a] = -1*self.getQValue(state, a) + (1000/(self.vitCount[(state,a)]+0.0001))\n# #print \"posPol[\", a, \"]= \",posPol[a]\n# #posPol[a] = (self.getQValue(state, a) * self.epsilon** self.vitCount[(state,a)]) + ( self.epsilon/(self.vitCount[(state,a)]+0.1) )\n# if len(posPol) == 0:\n# action = random.choice(legalActions)\n# else:\n# action = posPol.argMax() # random.choice(posPol.keys())\n ''' Random exploration '''\n action = random.choice(legalActions)\n else:\n action = self.getPolicy(state)\n \n \"\"\" END CODE \"\"\"\n\n return action", "def _Learn(self, Actor, ActorTarget, actorOpt, experiances):\n Actor.train() # Set in Train Mode\n # Get split experiances into: states, actions ...\n states, actions, rewards, nextStates, dones = experiances\n # ....................... Update Critic .......................\n QTargetsNext = self.CriticTarget(nextStates, ActorTarget(nextStates))\n QTargets = rewards + (GAMMA * QTargetsNext * (1 - dones))\n QExpected = self.Critic(states, actions)\n # Minimize Loss & Update Weights\n critic_loss = F.smooth_l1_loss(QExpected, QTargets.detach())\n self.criticOpt.zero_grad()\n critic_loss.backward()\n T.nn.utils.clip_grad_norm(self.Critic.parameters(), 1)\n self.criticOpt.step()\n # ....................... Update Actor .......................\n actor_loss = -self.Critic(states, Actor(states)).mean()\n # Update Weights\n actorOpt.zero_grad()\n actor_loss.backward()\n T.nn.utils.clip_grad_norm(Actor.parameters(), 1)\n actorOpt.step()\n # ............. Update Actor & Critic Target Nets .............\n self.SoftUpdate(self.Critic, self.CriticTarget, TAU)\n self.SoftUpdate(Actor, ActorTarget, TAU)", "def action(self, observations):\n observations = observations.float()\n policy_hidden = self.policy_backbone(observations)\n action = self.action_head(policy_hidden)\n return action", "def perm_escalate_helper_get_with_client(self, albumcontrol, testalbum, id, argname, func, level):\n #permexceptstr = \"Well, look at you, trying to access stuff you can't access on a privacy aware website.\"\n\n albumcontrol.set_accesstype(testalbum, ALBUM_PUBLIC)\n\n response = self.client.get(reverse(func, kwargs={argname: id}))\n\n if level >= ALBUM_PUBLIC:\n self.assertEqual(response.status_code, 200)\n else:\n self.assertEqual(response.status_code, 404)\n\n albumcontrol.set_accesstype(testalbum, ALBUM_ALLFRIENDS)\n\n response = self.client.get(reverse(func, kwargs={argname: id}))\n\n if level >= ALBUM_ALLFRIENDS:\n self.assertEqual(response.status_code, 200)\n else:\n self.assertEqual(response.status_code, 404)\n\n albumcontrol.set_accesstype(testalbum, ALBUM_GROUPS)\n\n response = self.client.get(reverse(func, kwargs={argname: id}))\n\n if level >= ALBUM_GROUPS:\n self.assertEqual(response.status_code, 200)\n else:\n self.assertEqual(response.status_code, 404)\n\n albumcontrol.set_accesstype(testalbum, ALBUM_PRIVATE)\n\n response = self.client.get(reverse(func, kwargs={argname: id}))\n\n if level >= ALBUM_PRIVATE:\n self.assertEqual(response.status_code, 200)\n else:\n self.assertEqual(response.status_code, 404)", "def _critic(self):\n nactions = np.product(self.env.action_shape)\n action_input = keras.layers.Input(shape=(nactions,), name='action_input')\n obs_input = keras.layers.Input(shape=(1,) + self.env.observation_space.shape, name='observation_input')\n flattened_obs = keras.layers.Flatten()(obs_input)\n\n out = keras.layers.Concatenate()([action_input, flattened_obs])\n out = keras.layers.Dense(16)(out)\n out = keras.layers.Activation('relu')(out)\n out = keras.layers.Dense(8)(out)\n out = keras.layers.Activation('relu')(out)\n out = keras.layers.Dense(1)(out) # Must be single output\n out = keras.layers.Activation('linear')(out)\n critic = keras.models.Model(inputs=[action_input, obs_input], outputs=out)\n return critic, action_input", "def mact(circuit, q_controls, q_target, ancilla):\n circuit.x(q_controls)\n circuit.mct(q_controls, q_target[0], ancilla)\n circuit.x(q_controls)\n circuit.barrier()", "def learn(self, experiences, gamma):\n states, actions, rewards, next_states, dones = experiences\n\n # ------------------- update critic ------------------- #\n next_actions = self.actor_target(next_states)\n # Get Q targets (for next states) from target model (on CPU)\n Q_targets_next = self.critic_target(next_states, next_actions).detach().max(1)[0].unsqueeze(1)\n # Compute Q targets for current states \n Q_targets = rewards + (gamma * Q_targets_next * (1 - dones))\n\n # Get expected Q values from local model\n Q_expected = self.critic_local(states, actions)\n\n # Compute critic loss\n critic_loss = F.mse_loss(Q_expected, Q_targets)\n # Minimize the critic loss\n self.critic_optimizer.zero_grad()\n critic_loss.backward()\n torch.nn.utils.clip_grad_norm_(self.critic_local.parameters(), 1) # As mentioned on project page\n self.critic_optimizer.step()\n\n # ------------------- update actor ------------------- #\n actions_expected = self.actor_local(states)\n # Compute actor loss based on expectation from actions_expected\n actor_loss = -self.critic_local(states, actions_expected).mean()\n # Minimize the actor loss\n self.actor_optimizer.zero_grad()\n actor_loss.backward()\n self.actor_optimizer.step()\n\n\n # ------------------- update target network ------------------- #\n self.soft_update(self.critic_local, self.critic_target, TAU) \n self.soft_update(self.actor_local, self.actor_target, TAU)", "def exploit(cll, all_actions: int) -> int:\n best_classifier = None\n anticipated_change_cls = [cl for cl in cll\n if cl.does_anticipate_change()]\n\n if len(anticipated_change_cls) > 0:\n random.shuffle(anticipated_change_cls)\n best_classifier = max(anticipated_change_cls,\n key=lambda cl: cl.fitness * cl.num)\n\n if best_classifier is not None:\n return best_classifier.action\n\n return choose_random_action(all_actions)", "def actor_critic_continuous(num_inputs, num_hidden, bound):\n # Input\n inputs = Input(shape=(num_inputs,), name='input_layer')\n # Shared layer\n common = Dense(units=num_hidden, activation='relu',\n kernel_initializer='he_uniform', name='common_layer_1')(inputs)\n # common = Dense(units=num_hidden, activation='relu', name='common_layer_2')(common)\n # Actor\n init = tf.random_uniform_initializer(minval=-0.003, maxval=0.003)\n action = Dense(units=1, activation='tanh',\n kernel_initializer=init, name='actor_output_layer')(common)\n action = action * bound\n # Critic\n critic = Dense(units=1, activation='linear',\n kernel_initializer='glorot_uniform', name='critic_output_layer')(common)\n # Combine\n model = Model(inputs=inputs, outputs=[action, critic])\n return model", "def _action_rewards(self, context) -> ActionRewards:\n pass", "def chooseAction(self, gameState):\n actions = gameState.getLegalActions(self.index)\n\n ''' \n You should change this in your own agent.\n '''\n\n return random.choice(actions)", "def lift(self, pool_name, *missions):\n\t\tpool = getattr(self, pool_name)\n\t\tfor mission in reversed(missions):\n\t\t\tpool.move_to_end(mission.url, last=False)\n\t\tself.bubble(\"MISSION_LIST_REARRANGED\", pool)\n\t\tself.edit = True", "def act_func_part3_pit_sto_v1(action_raw, action_raw_idx, raw_state_limits, stptLmt, ob_this_raw, logger, is_show_debug):\n CLG_DMD_IDX = 14;\n CHILLER1_CAP = 1079600 # W\n CHILLER2_CAP = 1079600 # W\n CHILLER3_CAP = 541500 # W\n\n act_choice_0 = [1,0,0,0,0];\n act_choice_1 = [0,1,0,0,0];\n act_choice_2 = [0,0,1,0,0];\n act_choice_3 = [0,0,0,1,0];\n act_choice_4 = [0,0,0,0,1];\n act_num = 5;\n act_choices = [act_choice_0, act_choice_1, act_choice_2, \n act_choice_3, act_choice_4]; \n act_0_max_cap = CHILLER3_CAP; # 1 small chiller\n act_1_max_cap = CHILLER1_CAP; # 1 big chiller\n act_2_max_cap = CHILLER1_CAP + CHILLER3_CAP; # 1 small 1 big\n act_3_max_cap = CHILLER1_CAP + CHILLER2_CAP; # 2 bigs\n act_4_max_cap = CHILLER1_CAP + CHILLER2_CAP + CHILLER3_CAP; # all chillers\n clg_demand = ob_this_raw[CLG_DMD_IDX];\n org_action_raw = copy.deepcopy(action_raw);\n org_action_raw_idx = action_raw_idx;\n # Check the current cooling demand in which range\n if clg_demand <= act_0_max_cap:\n action_ret_idx = org_action_raw_idx;\n action_ret = org_action_raw;\n elif act_1_max_cap >= clg_demand > act_0_max_cap:\n if org_action_raw_idx < 1:\n action_ret_idx = np.random.randint(1, act_num);\n action_ret = act_choices[action_ret_idx];\n else:\n action_ret_idx = org_action_raw_idx;\n action_ret = org_action_raw;\n elif act_2_max_cap >= clg_demand > act_1_max_cap:\n if org_action_raw_idx < 2:\n action_ret_idx = np.random.randint(2, act_num);\n action_ret = act_choices[action_ret_idx];\n else:\n action_ret_idx = org_action_raw_idx;\n action_ret = org_action_raw;\n elif act_3_max_cap >= clg_demand > act_2_max_cap:\n if org_action_raw_idx < 3:\n action_ret_idx = np.random.randint(3, act_num);\n action_ret = act_choices[action_ret_idx];\n else:\n action_ret_idx = org_action_raw_idx;\n action_ret = org_action_raw;\n elif act_4_max_cap >= clg_demand > act_3_max_cap:\n if org_action_raw_idx < 4:\n action_ret_idx = np.random.randint(4, act_num);\n action_ret = act_choices[action_ret_idx];\n else:\n action_ret_idx = org_action_raw_idx;\n action_ret = org_action_raw;\n else:\n action_ret_idx = org_action_raw_idx;\n action_ret = act_choice_4;\n\n if action_raw_idx != action_ret_idx:\n if is_show_debug:\n logger.debug('Action function: raw action %s has been changed to %s for '\n 'the demand %s W.'%(action_raw_idx, action_ret_idx, clg_demand));\n return (action_ret, action_ret_idx);", "def decide(self, map_list, bombs, powerups, bombers, explosion_list, player_index, move_number):\r\n\t\tself.action_to_take_next = Do_Nothing_Behaviour()\r\n\t\t\r\n\t\t\"\"\"Set up the DangerMap!\"\"\"\r\n\t\tdanger_map = self.map_converter.convert_to_danger_map(map_list, bombs, explosion_list)\r\n\r\n\t\t\"\"\"Check accessible squares\"\"\"\r\n\t\taccessible_squares = self.path_planner.query_accessible_squares(map_list, bombers, player_index)\r\n\r\n\t\tprint(\"******Decision Time!******\")\r\n\t\tfor behaviour in self.behaviours:\r\n\t\t\tif behaviour.check_conditions(map_list, bombs, powerups, bombers, explosion_list, player_index, move_number, danger_map, accessible_squares) == True and (behaviour.priority > self.action_to_take_next.priority):\r\n\t\t\t\tself.action_to_take_next = behaviour\r\n\r\n\t\tprint(\"******Action Time!******\")\r\n\t\tself.move = self.action_to_take_next.take_action(map_list, bombs, powerups, bombers, explosion_list, player_index, move_number, danger_map, accessible_squares)\r\n\t\tif self.move == None:\r\n\t\t\tself.move = self.random_move.take_action(map_list, bombs, powerups, bombers, explosion_list, player_index, move_number, danger_map, accessible_squares)\r\n\t\tprint self.move\r\n\t\treturn self.move" ]
[ "0.53727496", "0.52645314", "0.5157133", "0.49841082", "0.49510542", "0.4925426", "0.49011576", "0.48871118", "0.48354113", "0.48184738", "0.47840136", "0.47807923", "0.47687817", "0.4744988", "0.47337708", "0.47337708", "0.47306305", "0.47026968", "0.46788028", "0.46617597", "0.46319973", "0.46177083", "0.46157676", "0.45959032", "0.4580701", "0.45701534", "0.45567495", "0.45390245", "0.45383227", "0.4530032" ]
0.5285151
1
Returns initilizations for controller states, actions, communications, and outputs.
def controller_bookkeeping_vars(self): # Keeps track of all the controller states. controller_states = np.zeros( self._num_communication_turns + 1, self._num_controllers, self._controller_state_size) # Keeps track of all controllers' selected actions (communication + output). controller_actions = np.zeros( self._num_communication_turns, self._num_controllers, 1) # List that will contain the output actions. output_actions = [] return controller_states, controller_actions, output_actions
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getInitParams(self):\n return {}", "def get_initial_state(self, **kwargs):\n return {'successes': np.ones(self.env.nA, dtype=np.int32),\n 'failures': np.ones(self.env.nA, dtype=np.int32)}", "def _get_controller_parameters(self):\n pass", "def initialise(self):\n self.sc.init.exec_action(self.variables)", "def initial_states(self):\n return self._initial_states", "def get_initial_states(self):\n raise NotImplementedError()", "def getInitParams(self):\n paramDict = Distribution.getInitParams(self)\n paramDict['mapping'] = self.mapping\n paramDict['values'] = self.values\n return paramDict", "def make_initial_state(self):\n return {\n 'h_rec':Variable(np.zeros((1, self.n_hidden), dtype=np.float32)),\n 'c_rec':Variable(np.zeros((1, self.n_hidden), dtype=np.float32)),\n 'h_gen':Variable(np.zeros((1, self.n_hidden), dtype=np.float32)),\n 'c_gen':Variable(np.zeros((1, self.n_hidden), dtype=np.float32))\n }", "def init_vars(self):\n # type: () -> None\n raise NotImplementedError", "def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['n' ] = self.n\n paramDict['p' ] = self.p\n return paramDict", "def init(self):\n self.initialised=True\n return \"%s init successful\" % self.hardwareActionName", "def get_init_list(self):\n\n return self.convert_compartments_to_list(self.init_compartments)", "def getInitParams(self):\n paramDict = super().getInitParams()\n paramDict['transition'] = self.transition\n paramDict['steadyStatePb'] = self.steadyStatePb\n return paramDict", "def initial_state():\n\treturn [[EMPTY, EMPTY, EMPTY],\n\t\t\t[EMPTY, EMPTY, EMPTY],\n\t\t\t[EMPTY, EMPTY, EMPTY]]", "def __getstate__(self):\n state = {\n 'connector_keys' : self.connector_keys,\n 'metric_key' : self.metric_key,\n 'location_key' : self.location_key,\n 'parameters' : self.parameters,\n 'mrsm_instance' : self.instance_keys,\n }\n return state", "def get_init_state(self, agent_index=0):\n return self.init_state", "def initView(self):\n return {}", "def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['lambda'] = self.lambdaVar\n paramDict['k' ] = self.k\n paramDict['low' ] = self.low\n return paramDict", "def initial_states(self):\n return list(self.iter_initial_states())", "def test_controller_initialization(self):\n for name in self.our_controllers:\n self.assertTrue(self.check_state(name, 'initialized'), \"{} is initialized correctly\".format(name))", "def initialise_acquisition_conductors(self):\n for _, acq in self._acquisition_controllers.items():\n acq.initialise()", "def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['mu' ] = self.mu\n return paramDict", "def prepareController(self):\n pass", "def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['p'] = self.p\n return paramDict", "def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['p'] = self.p\n return paramDict", "def _init_state(self):\n\n if self.init is None:\n self.init_ = self.loss_.init_estimator()\n elif isinstance(self.init, six.string_types):\n self.init_ = INIT_ESTIMATORS[self.init]()\n else:\n self.init_ = self.init\n\n self.estimators_ = np.empty((self.n_estimators, self.loss_.K),\n dtype=np.object)\n self.train_score_ = np.zeros((self.n_estimators,), dtype=np.float64)\n # do oob?\n if self.subsample < 1.0:\n self.oob_improvement_ = np.zeros((self.n_estimators),\n dtype=np.float64)", "def initialisation(self):\n self.create_variables()\n self.create_placeholders()\n self.build_model()\n self.reset_lr(None, True)\n self.build_loss()\n self.initialised = True", "def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['low'] = self.low\n paramDict['alpha'] = self.alpha\n paramDict['beta'] = self.beta\n return paramDict", "def initial_parameters(self):\n return self._initial_parameters", "def setup_initial_state(self):\n # collect the ids of vehicles in the network\n self.ids = self.vehicles.get_ids()\n self.controlled_ids = self.vehicles.get_controlled_ids()\n self.sumo_ids = self.vehicles.get_sumo_ids()\n self.rl_ids = self.vehicles.get_rl_ids()\n\n # dictionary of initial observations used while resetting vehicles after\n # each rollout\n self.initial_observations = dict.fromkeys(self.ids)\n\n # create the list of colors used to different between different types of\n # vehicles visually on sumo's gui\n #TODO: Get these colors working!\n # self.colors = {(255,0,0), (0,255,0),(0,0,255),(255,255,255)}\n self.colors = {}\n key_index = 1\n color_choice = np.random.choice(len(COLORS))\n for i in range(self.vehicles.num_types):\n self.colors[self.vehicles.types[i]] = \\\n COLORS[(color_choice + key_index) % len(COLORS)]\n key_index += 1\n\n for veh_id in self.ids:\n # set the colors of the vehicles based on their unique types\n veh_type = self.vehicles.get_state(veh_id, \"type\")\n self.traci_connection.vehicle.setColor(veh_id,\n self.colors[veh_type])\n\n # add the initial states to the vehicles class\n self.vehicles.set_edge(\n veh_id, self.traci_connection.vehicle.getRoadID(veh_id))\n self.vehicles.set_position(\n veh_id, self.traci_connection.vehicle.getLanePosition(veh_id))\n self.vehicles.set_lane(\n veh_id, self.traci_connection.vehicle.getLaneIndex(veh_id))\n self.vehicles.set_speed(\n veh_id, self.traci_connection.vehicle.getSpeed(veh_id))\n self.vehicles.set_route(\n veh_id, self.available_routes[self.vehicles.get_edge(veh_id)])\n self.vehicles.set_absolute_position(\n veh_id, self.get_x_by_id(veh_id))\n # the time step of the last lane change is always present in\n # the environment,but only used by sub-classes that apply lane\n # changing\n self.vehicles.set_state(veh_id, \"last_lc\",\n -1 * self.lane_change_duration)\n # some constant vehicle parameters\n self.vehicles.set_state(\n veh_id, \"length\",\n self.traci_connection.vehicle.getLength(veh_id))\n self.vehicles.set_state(veh_id, \"max_speed\", self.max_speed)\n\n # import initial state data to initial_observations dict\n self.initial_observations[veh_id] = dict()\n self.initial_observations[veh_id][\"type\"] = veh_type\n self.initial_observations[veh_id][\"edge\"] = \\\n self.traci_connection.vehicle.getRoadID(veh_id)\n self.initial_observations[veh_id][\"position\"] = \\\n self.traci_connection.vehicle.getLanePosition(veh_id)\n self.initial_observations[veh_id][\"lane\"] = \\\n self.traci_connection.vehicle.getLaneIndex(veh_id)\n self.initial_observations[veh_id][\"speed\"] = \\\n self.traci_connection.vehicle.getSpeed(veh_id)\n self.initial_observations[veh_id][\"route\"] = \\\n self.available_routes[self.initial_observations[veh_id][\"edge\"]]\n self.initial_observations[veh_id][\"absolute_position\"] = \\\n self.get_x_by_id(veh_id)\n\n # set speed mode\n self.set_speed_mode(veh_id)\n\n # set lane change mode\n self.set_lane_change_mode(veh_id)\n\n # save the initial state. This is used in the _reset function\n #\n route_id = \"route\" + self.initial_observations[veh_id][\"edge\"]\n pos = self.traci_connection.vehicle.getPosition(veh_id)\n\n self.initial_state[veh_id] = \\\n (self.initial_observations[veh_id][\"type\"], route_id,\n self.initial_observations[veh_id][\"lane\"],\n self.initial_observations[veh_id][\"position\"],\n self.initial_observations[veh_id][\"speed\"], pos)\n\n # collect list of sorted vehicle ids\n self.sorted_ids, self.sorted_extra_data = self.sort_by_position()\n\n # collect headway, leader id, and follower id data\n for veh_id in self.ids:\n headway = self.traci_connection.vehicle.getLeader(veh_id, 2000)\n if headway is None:\n self.vehicles.set_leader(veh_id, None)\n self.vehicles.set_headway(veh_id, 9e9)\n else:\n self.vehicles.set_leader(veh_id, headway[0])\n self.vehicles.set_headway(veh_id, headway[1])\n self.vehicles.set_follower(headway[0], veh_id)\n\n # contains the last lc before the current step\n self.prev_last_lc = dict()\n for veh_id in self.ids:\n self.prev_last_lc[veh_id] = self.vehicles.get_state(veh_id,\n \"last_lc\")\n\n # subscribe the requested states for traci-related speedups\n for veh_id in self.ids:\n self.traci_connection.vehicle.subscribe(\n veh_id, [tc.VAR_LANE_INDEX, tc.VAR_LANEPOSITION,\n tc.VAR_ROAD_ID, tc.VAR_SPEED])\n self.traci_connection.vehicle.subscribeLeader(veh_id, 2000)" ]
[ "0.6425426", "0.6151258", "0.6135855", "0.60412157", "0.59889364", "0.59743536", "0.5963586", "0.59514403", "0.5904424", "0.58903825", "0.5870927", "0.58576226", "0.5829913", "0.5769961", "0.5758137", "0.57319695", "0.5729912", "0.57269716", "0.5722204", "0.5721112", "0.5717913", "0.5713581", "0.5710071", "0.57006276", "0.57006276", "0.5697208", "0.5696632", "0.56800747", "0.5666427", "0.56571335" ]
0.7170085
0
Splits data frame into trainingvalidationtest data frames. This also calibrates scaling object, and transforms data for each split.
def split_data(self, df, valid_boundary=2016, test_boundary=2018): stock_count = len(self.sl) test_ratio = 0.2 print('Stock count:%d'% stock_count) train_x = [] test_x = [] for label_, d_ in enumerate(self.sl): stock_train_len = int(len(d_.train_y) * (1 - test_ratio)) train_x += list(d_.train_x[:stock_train_len]) test_x += list(d_.train_x[stock_train_len:]) train_g = pd.DataFrame(train_x, columns=([k[0] for k in self._column_definition])) test_g = pd.DataFrame(test_x, columns=([k[0] for k in self._column_definition])) self.set_scalers(train_g) def tofloat(data): for col in data.columns: if col not in {'Symbol', 'date'}: data[col] = data[col].astype('float32') return data train_g = tofloat(train_g) test_g = tofloat(test_g) # used test for both valid and test return train_g, test_g, test_g
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def train_val_test_split(df):\n from sklearn.model_selction import train_test_split\n train, test = train_test_split(df, train_size = 0.80, test_size=0.20,\n random_state = 42)\n train, val = train_test_split(train, train_size = 0.70, val_size=0.30)\n print(train.shape, val.shape, test.shape)\n\n return train, val, test", "def scale(data, test=False):\n\n if test:\n data_test = data.loc[data['Train'] == 0]\n data = data.loc[data['Train'] == 1]\n\n scaler = StandardScaler()\n scaler.fit(data)\n scaled_array = scaler.transform(data)\n data = pd.DataFrame(scaled_array,\n columns=list(data.columns))\n data['Train'] = 1\n if test:\n test_array = scaler.transform(data_test)\n data_test = pd.DataFrame(test_array, columns=list(data_test.columns))\n data_test['Train'] = 0\n data = pd.concat([data, data_test])\n return data", "def split_data(self, df, valid_boundary=1315, test_boundary=1339):\n\n print_info('Formatting train-valid-test splits.')\n\n index = df['days_from_start']\n train = df.loc[index < valid_boundary]\n valid = df.loc[(index >= valid_boundary - 7) & (index < test_boundary)]\n test = df.loc[index >= test_boundary - 7]\n\n self.set_scalers(train)\n\n return (self.transform_inputs(data) for data in [train, valid, test])", "def scale(train, validate, test):\n train, validate, test = add_scaled_columns(\n train,\n validate,\n test,\n scaler=sklearn.preprocessing.MinMaxScaler(),\n columns_to_scale=['total_lines'],\n )\n return train, validate, test", "def _divide_into_test_train(\n self, test_size: int, train_size: int\n ) -> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame]:\n X_train, X_test, y_train, y_test = train_test_split(\n self.df.iloc[:, :-1],\n self.df.iloc[:, -1],\n test_size=test_size,\n train_size=train_size,\n )\n return X_train, X_test, y_train, y_test", "def Train_Test_Split_and_Scale(features, labels, scaler=None, random_state=42, test_size=0.25):\n # train_test_split from SciKit learn is applied on the feature and labels\n (X_train, X_test,\n y_train, y_test) = train_test_split(features, labels,\n random_state=random_state,\n test_size=test_size)\n # if a scaler from SciKit learn is passed then apply it to X_train\n if scaler:\n # try to scale X_train and transform X_train and X_test with passed in scaler\n try:\n scaler.fit(X_train)\n # supress warnings for SettingCopywithWarning\n with pd.option_context(\"mode.chained_assignment\", None):\n # maintain dataframe structure\n X_train.loc[:,:] = scaler.transform(X_train.values)\n X_test.loc[:,:] = scaler.transform(X_test.values)\n # return the following values\n return (X_train, X_test, y_train, y_test, scaler)\n except:\n print(\"Passed in scaler does not have .fit() and .transform() methods.\\nReturn values from train_test_split() method.\")\n return (X_train, X_test, y_train, y_test, scaler)\n else:\n # return values from train_test_split() method\n return (X_train, X_test, y_train, y_test, scaler)", "def train_test_split(df, test_size=0.3):\r\n # split df here\r\n train_size = int(df.shape[0] * (1 - test_size))\r\n test_size = df.shape[0] - train_size\r\n train = df[:train_size]\r\n test = df[train_size:]\r\n\r\n return train, test # return the train and test datasets\r", "def train_validation_test_split(col_stratify='Kind of offensive language',\n train_percent=0.6,\n validate_percent=0.2,\n test_percent=0.2,\n random_state=101):\n\n data = pd.read_csv('cleaned_data.csv', header=0)\n\n if train_percent + validate_percent + test_percent != 1.0:\n raise ValueError(f'Sum of train, validate and test is not 1.0')\n\n if col_stratify not in data.columns:\n raise ValueError(f'{col_stratify} is not a column in the dataframe')\n\n X = data\n y = data[[col_stratify]]\n\n # Split original dataframe into train and temp dataframes.\n data_train, data_temp, y_train, y_temp = train_test_split(X,\n y,\n stratify=y,\n test_size=(\n 1.0 - train_percent),\n random_state=random_state)\n # Split the temp dataframe into val and test dataframes.\n test_to_split = test_percent / (validate_percent + test_percent)\n data_val, data_test, y_val, y_val = train_test_split(data_temp,\n y_temp,\n stratify=y_temp,\n test_size=test_to_split,\n random_state=random_state)\n\n assert len(data) == len(data_train) + len(data_val) + len(data_test)\n\n return data_train, data_val, data_test, y_train, y_val, y_val", "def scale(self, X_train, X_test):\n\n #X_train, X_test, y_train, y_test = self.split_X_y_sets()\n self.scaler.fit(X_train)\n X_train_sc = self.scaler.transform(X_train)\n X_test_sc = self.scaler.transform(X_test)\n\n return X_train_sc, X_test_sc #, y_train, y_test", "def scale_data(self, train_data):\n\n # Fit on training data only.\n # scaler = StandardScaler().fit(train_data[self.feature_names])\n scaler = QuantileTransformer().fit(train_data[self.feature_names])\n self.scaler = scaler\n scaled_train_data = scaler.transform(train_data[self.feature_names])\n\n scaled_train_data_df = pd.DataFrame(data=scaled_train_data, columns=self.feature_names)\n scaled_train_data_df.index = train_data.index\n scaled_train_data_df[self.outcome_name] = train_data[self.outcome_name]\n\n return scaled_train_data_df", "def __split_dataset(self):\n self.train, self.valid, _, _ = train_test_split(self.data, self.data, test_size=0.2)\n self.valid, self.test, _, _ = train_test_split(self.valid, self.valid, test_size=0.5)", "def split_data(df):\n\trandom_seed = 1\n\tdf_train = df.sample(frac=0.8, random_state=random_seed)\n\tdf_rem = df.loc[~df.index.isin(df_train.index)]\n\tdf_valid = df_rem.sample(frac=0.5, random_state=random_seed)\n\tdf_test = df_rem.loc[~df_rem.index.isin(df_valid.index)]\n\tlogger.info(\"Shape of training dataframe: \" + str(df_train.shape))\n\tlogger.info(\"Shape of validation dataframe: \" + str(df_valid.shape))\n\tlogger.info(\"Sahpe of test dataframe: \" + str(df_test.shape))\n\n\treturn df_train, df_valid, df_test", "def standard_scaler(X_train, X_validate, X_test):\n\n scaler = StandardScaler().fit(X_train)\n X_train_scaled = pd.DataFrame(scaler.transform(X_train), index = X_train.index, columns = X_train.columns)\n X_validate_scaled = pd.DataFrame(scaler.transform(X_validate), index = X_validate.index, columns = X_validate.columns)\n X_test_scaled = pd.DataFrame(scaler.transform(X_test), index = X_test.index, columns = X_test.columns)\n \n return scaler, X_train_scaled, X_validate_scaled, X_test_scaled", "def split_data(df, test_size): \n\n X_train, X_test, y_train, y_test = train_test_split(df[[\"description_processed\", \"transaction_type\", \"transaction_account_type\"]],\n df['transaction_class'],\n test_size=test_size,\n shuffle=True,\n random_state=42)\n \n return X_train, X_test, y_train, y_test", "def test_split_data(self, whp_pandas):\n test_class = Slug_Forecasting(whp_pandas.copy())\n test_class.stationarity_check()\n test_class.split_data()\n\n assert hasattr(test_class, \"y_train\"), \"y_train attribute must have been create\"\n assert hasattr(test_class, \"y_pred\"), \"y_test attribute must have been create\"\n\n assert len(test_class.y_train) == 180, \"In this example, y_train should be 180 long\"\n assert len(test_class.y_pred) == 60, \"In this example, y_pred should be 60 long\"\n\n test_class = Slug_Forecasting(whp_pandas.copy())\n\n # test train size data\n try:\n test_class.split_data(train_size=400)\n print(\"Not enough data to fulfill train_size requirement\")\n raise ValueError\n except AssertionError:\n pass", "def scale(train, test):\n # fit scaler\n scaler = MinMaxScaler(feature_range=(-1, 1))\n scaler = scaler.fit(train)\n # transform train\n train = train.reshape(train.shape[0], train.shape[1])\n train_scaled = scaler.transform(train)\n # transform test\n test = test.reshape(test.shape[0], test.shape[1])\n test_scaled = scaler.transform(test)\n return scaler, train_scaled, test_scaled", "def X_scale_train_test(self, do_data=False):\n\n X_scaled_all_vars_train = self.X_scaler.transform(self.all_vars_X_train) #returns np array so need to re-cast into pandas to get colums/variables\n X_scaled_all_vars_train = pd.DataFrame(X_scaled_all_vars_train, columns=self.low_level_vars_flat+self.high_level_vars)\n self.X_train_low_level = X_scaled_all_vars_train[self.low_level_vars_flat].values #will get changed to 2D arrays later\n self.X_train_high_level = X_scaled_all_vars_train[self.high_level_vars].values\n\n X_scaled_all_vars_test = self.X_scaler.transform(self.all_vars_X_test) #important to use scaler tuned on X train\n X_scaled_all_vars_test = pd.DataFrame(X_scaled_all_vars_test, columns=self.low_level_vars_flat+self.high_level_vars)\n self.X_test_low_level = X_scaled_all_vars_test[self.low_level_vars_flat].values #will get changed to 2D arrays later\n self.X_test_high_level = X_scaled_all_vars_test[self.high_level_vars].values\n\n if do_data: #for plotting purposes\n X_scaled_data_all_vars_train = self.X_scaler.transform(self.all_X_data_train)\n X_scaled_data_all_vars_train = pd.DataFrame(X_scaled_data_all_vars_train, columns=self.low_level_vars_flat+self.high_level_vars)\n self.X_data_train_high_level = X_scaled_data_all_vars_train[self.high_level_vars].values \n self.X_data_train_low_level = X_scaled_data_all_vars_train[self.low_level_vars_flat].values\n\n X_scaled_data_all_vars_test = self.X_scaler.transform(self.all_X_data_test)\n X_scaled_data_all_vars_test = pd.DataFrame(X_scaled_data_all_vars_test, columns=self.low_level_vars_flat+self.high_level_vars)\n self.X_data_test_high_level = X_scaled_data_all_vars_test[self.high_level_vars].values\n self.X_data_test_low_level = X_scaled_data_all_vars_test[self.low_level_vars_flat].values", "def scale_test_train(train, test=None, scale=QuantileTransformer):\n\n scaler = scale()\n train = Data(scaler.fit_transform(train.X), train.y)\n test = None if test is None else Data(scaler.transform(test.X), test.y)\n\n return train, test", "def train_test_split(df):\n training_size = int(len(df) * .67)\n test_size = int(len(df) - training_size)\n train, test = df[0:training_size], df[training_size:len(df)]\n return train, test", "def split_dataset(self, test_size=0.20):\n\t\t(self.training_data, self.test_data, self.training_labels, self.test_labels) = train_test_split(self.training_data, self.training_labels, test_size=test_size)", "def train_test_split(df, test_size=0.1):\n ntrn = int(round(len(df) * (1 - test_size)))\n\n X_train, y_train = _load_data(df.iloc[0:ntrn])\n X_test, y_test = _load_data(df.iloc[ntrn:])\n\n return (X_train, y_train), (X_test, y_test)", "def splitTrainValidate(df, perc_training = 0.8):\n train = df.sample(frac=perc_training)#, random_state=200)\n validate = df.drop(train.index)\n return (train, validate)", "def train_test(df, response, train_size=0.75, time_series=False, scaling=None):\n\n data = df.copy()\n X = data.drop(response, 1)\n y = data[response]\n\n logging.info('X columns')\n logging.info(list(X.columns))\n logging.info('Response')\n logging.info(response)\n\n if time_series:\n trainsize = int(train_size*len(X))\n X_train = X[:trainsize].values\n X_test = X[trainsize:].values\n y_train = y[:trainsize].values\n y_test = y[trainsize:].values\n\n else:\n X_train, X_test, y_train, y_test = train_test_split(X.values,\n y.values,\n random_state=0,\n train_size=train_size)\n if scaling == 'standard':\n scaler = preprocessing.StandardScaler()\n if scaling == 'minmax':\n scaler = preprocessing.MinMaxScaler()\n if scaling == 'maxabs':\n scaler = preprocessing.MaxAbsScaler()\n if scaling == 'robust':\n scaler = preprocessing.RobustScaler()\n if scaling == 'quantile':\n scaler = preprocessing.QuantileTransformer()\n\n if scaling != None:\n scaler.fit(X_train)\n X_train = scaler.transform(X_train)\n X_test = scaler.transform(X_test)\n\n return X_train, X_test, y_train, y_test", "def normalize_datasets(train, test):\n columns = train.columns[:-1]\n train[columns] = (train[columns] - train[columns].mean()) / (train[columns].max() - train[columns].min())\n test[columns] = (test[columns] - test[columns].mean()) / (test[columns].max() - test[columns].min())\n\n return train, test", "def prepare_scale_train_valid_test(\n data: Union[pd.DataFrame, pd.Series],\n n_input_days: int,\n n_predict_days: int,\n test_size: float,\n s_end_date: str,\n no_shuffle: bool,\n):\n\n # Pre-process data\n if PREPROCESSER == \"standardization\":\n scaler = StandardScaler()\n\n elif PREPROCESSER == \"minmax\":\n scaler = MinMaxScaler()\n\n elif PREPROCESSER == \"normalization\":\n scaler = Normalizer()\n\n elif (PREPROCESSER == \"none\") or (PREPROCESSER is None):\n scaler = None\n # Test data is used for forecasting. Takes the last n_input_days data points.\n # These points are not fed into training\n\n if s_end_date:\n data = data[data.index <= s_end_date]\n if n_input_days + n_predict_days > data.shape[0]:\n print(\"Cannot train enough input days to predict with loaded dataframe\\n\")\n return (\n None,\n None,\n None,\n None,\n None,\n None,\n None,\n None,\n None,\n None,\n None,\n True,\n )\n\n test_data = data.iloc[-n_input_days:]\n train_data = data.iloc[:-n_input_days]\n\n dates = data.index\n dates_test = test_data.index\n if scaler:\n train_data = scaler.fit_transform(data.values.reshape(-1, 1))\n test_data = scaler.transform(test_data.values.reshape(-1, 1))\n else:\n train_data = data.values.reshape(-1, 1)\n test_data = test_data.values.reshape(-1, 1)\n\n prices = train_data\n\n input_dates = []\n input_prices = []\n next_n_day_prices = []\n next_n_day_dates = []\n\n for idx in range(len(prices) - n_input_days - n_predict_days):\n input_prices.append(prices[idx : idx + n_input_days])\n input_dates.append(dates[idx : idx + n_input_days])\n next_n_day_prices.append(\n prices[idx + n_input_days : idx + n_input_days + n_predict_days]\n )\n next_n_day_dates.append(\n dates[idx + n_input_days : idx + n_input_days + n_predict_days]\n )\n\n input_dates = np.asarray(input_dates)\n input_prices = np.array(input_prices)\n next_n_day_prices = np.array(next_n_day_prices)\n next_n_day_dates = np.asarray(next_n_day_dates)\n\n (\n X_train,\n X_valid,\n y_train,\n y_valid,\n X_dates_train,\n X_dates_valid,\n y_dates_train,\n y_dates_valid,\n ) = train_test_split(\n input_prices,\n next_n_day_prices,\n input_dates,\n next_n_day_dates,\n test_size=test_size,\n shuffle=no_shuffle,\n )\n return (\n X_train,\n X_valid,\n y_train,\n y_valid,\n X_dates_train,\n X_dates_valid,\n y_dates_train,\n y_dates_valid,\n test_data,\n dates_test,\n scaler,\n False,\n )", "def train_val_test_split(data):\n raise NotImplementedError", "def standardize_data(X_train, X_test):\n scaler = StandardScaler()\n scaler.fit(X_train)\n X_train = scaler.transform(X_train)\n # apply same transformation to test data\n X_test = scaler.transform(X_test)\n return X_train, X_test", "def split_data(self, data):\n\n train_df, test_df = train_test_split(data, test_size=self.test_size, \n random_state=0, \n stratify=data[self.outcome_name])\n\n # print(\"Splitting data into training with \", train_df.shape, \"sampes and \",\n # test_df.shape, \"testing samples\")\n\n return train_df, test_df", "def train_val_test_split(df, target):\n X_tv, X_test, y_tv, y_test = train_test_split(df, df[target],\n test_size=0.15,\n random_state=42)\n\n X_train, X_val, y_train, y_val = train_test_split(X_tv, y_tv,\n test_size=0.2,\n random_state=42)\n\n return X_train, y_train, X_val, y_val, X_test, y_test", "def split_data(X, scaling, ids, y, split_ratio=0.2):\r\n split = int(X.shape[0] * split_ratio) # index must be int\r\n X_test = X[:split, :, :, :]\r\n scaling_test = scaling[:split, :]\r\n ids_test = ids[:split]\r\n y_test = y[:split, :]\r\n X_train = X[split:, :, :, :]\r\n scaling_train = scaling[split:, :]\r\n ids_train = y[split:]\r\n y_train = y[split:, :]\r\n\r\n return X_train, scaling_train, ids_train, y_train, X_test, scaling_test, ids_test, y_test" ]
[ "0.69305676", "0.68500185", "0.6825401", "0.6820178", "0.6819639", "0.6806733", "0.67640877", "0.67156935", "0.6681609", "0.6675585", "0.6672727", "0.6662629", "0.6619539", "0.661847", "0.6615578", "0.6607513", "0.65914935", "0.6542333", "0.65270346", "0.6515594", "0.6490784", "0.6471735", "0.64228415", "0.64097786", "0.64070934", "0.63845503", "0.6351802", "0.6330788", "0.63271445", "0.6320291" ]
0.70878667
0
Calibrates scalers using the data supplied.
def set_scalers(self, df): print('Setting scalers with training data...') column_definitions = self.get_column_definition() id_column = utils.get_single_col_by_input_type(InputTypes.ID, column_definitions) target_column = utils.get_single_col_by_input_type(InputTypes.TARGET, column_definitions) # Extract identifiers in case required self.identifiers = list(df[id_column].unique()) # Format real scalers real_inputs = utils.extract_cols_from_data_type( DataTypes.REAL_VALUED, column_definitions, {InputTypes.ID, InputTypes.TIME}) data = df[real_inputs].values self._real_scalers = sklearn.preprocessing.StandardScaler().fit(data) self._target_scaler = sklearn.preprocessing.StandardScaler().fit( df[[target_column]].values) # used for predictions # Format categorical scalers categorical_inputs = utils.extract_cols_from_data_type( DataTypes.CATEGORICAL, column_definitions, {InputTypes.ID, InputTypes.TIME}) categorical_scalers = {} num_classes = [] for col in categorical_inputs: # Set all to str so that we don't have mixed integer/string columns srs = df[col].apply(str) categorical_scalers[col] = sklearn.preprocessing.LabelEncoder().fit( srs.values) num_classes.append(srs.nunique()) # Set categorical scaler outputs self._cat_scalers = categorical_scalers self._num_classes_per_cat_input = num_classes
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main(light_graph,\n calibration_data,\n hw_specs,\n sw_config,\n sim_params,\n nodes_to_calibrate):\n with graph_collection.GraphCollection() as graph_coll:\n # Create calibration graph\n hist_coll = graph_coll.histogram_collection()\n convert_to_calib_graph = (convert_to_activation_scale_calibration_graph.\n ConvertToActivationScaleCalibrationGraph(\n nodes_to_calibrate,\n sw_config,\n hist_coll))\n calib_graph = convert_to_calib_graph.process_transforms(light_graph)\n\n runner = histogram_graph_runner.HistogramGraphRunner(calib_graph,\n hw_specs,\n sw_config,\n sim_params,\n graph_coll)\n runner.run(calibration_data)\n\n # Get scales data\n logging.info(\"-Computing Scales\")\n activation_scales_data = get_scales_data(hist_coll,\n nodes_to_calibrate,\n convert_to_calib_graph,\n sw_config)\n\n return activation_scales_data", "def set_scalers(self, df):\n print_info('Setting scalers with training data...')\n\n column_definitions = self.getcolumn_definition()\n id_column = get_single_col_by_input_type(InputTypes.ID, column_definitions)\n target_column = get_single_col_by_input_type(InputTypes.TARGET, column_definitions)\n\n # Format real scalers\n real_inputs = extract_cols_from_data_type(\n DataTypes.REAL_VALUED, column_definitions,\n {InputTypes.ID, InputTypes.TIME})\n\n # Initialise scaler caches\n self.real_scalers = {}\n self.target_scaler = {}\n identifiers = []\n for identifier, sliced in df.groupby(id_column):\n\n if len(sliced) >= self._time_steps:\n\n data = sliced[real_inputs].values\n targets = sliced[[target_column]].values\n self.real_scalers[identifier] = sk_preprocessing.StandardScaler().fit(data)\n self.target_scaler[identifier] = sk_preprocessing.StandardScaler().fit(targets)\n identifiers.append(identifier)\n\n # Format categorical scalers\n categorical_inputs = extract_cols_from_data_type(\n DataTypes.CATEGORICAL, column_definitions,\n {InputTypes.ID, InputTypes.TIME})\n\n categorical_scalers = {}\n num_classes = []\n for col in categorical_inputs:\n # Set all to str so that we don't have mixed integer/string columns\n srs = df[col].apply(str)\n categorical_scalers[col] = sk_preprocessing.LabelEncoder().fit(srs.values)\n num_classes.append(srs.nunique())\n\n # Set categorical scaler outputs\n self._cat_scalers = categorical_scalers\n self._num_classes_per_cat_input = num_classes\n\n # Extract identifiers in case required\n self.identifiers = identifiers", "def run (self, scalers = {'capital costs':1.0}):\n\n self.was_run = True\n self.reason = \"OK\"\n tag = self.cd['file id'].split('+')\n if len(tag) > 1 and tag[1] != 'wind':\n self.was_run = False\n self.reason = \"Not a Wind project\"\n return\n\n try:\n #~ self.generation = self.forecast.get_generation(self.start_year)\n self.calc_average_load()\n self.calc_generation_wind_proposed()\n except AttributeError:\n self.diagnostics.add_warning(self.component_name,\n \"could not be run\")\n self.was_run = False\n self.reason = (\"Could not Calculate average load or \"\n \"proposed generation\")\n return\n\n\n\n\n #~ #~ print self.comp_specs['wind class']\n # ??? some kind of failure message\n if self.average_load is None or \\\n (self.average_load > self.comp_specs['average load limit'] and \\\n self.load_offset_proposed > 0):\n #~ float(self.comp_specs['wind class']) > \\\n #~ self.comp_specs['minimum wind class'] and \\\n\n # if the average load is greater that the lower limit run this component\n # else skip\n\n self.calc_transmission_losses()\n self.calc_excess_energy()\n self.calc_net_generation_wind()\n self.calc_electric_diesel_reduction()\n self.calc_diesel_equiv_captured()\n self.calc_loss_heat_recovery()\n self.calc_reduction_diesel_used()\n\n\n if self.cd[\"model financial\"]:\n # AnnualSavings functions (don't need to write)\n self.get_diesel_prices()\n\n # change these below\n self.calc_capital_costs()\n self.calc_maintenance_cost()\n self.calc_annual_electric_savings()\n self.calc_annual_heating_savings()\n\n # AnnualSavings functions (don't need to write)\n self.calc_annual_total_savings()\n self.calc_annual_costs(self.cd['interest rate'],\n scalers['capital costs'])\n self.calc_annual_net_benefit()\n self.calc_npv(self.cd['discount rate'], self.cd[\"current year\"])\n #~ print self.benefit_cost_ratio\n self.calc_levelized_costs(self.maintenance_cost)\n else:\n #~ print \"wind project not feasible\"\n self.was_run = False\n if self.load_offset_proposed <= 0:\n self.reason = \"Proposed load offset less than 0\"\n else:\n self.reason = \\\n \"Average load too small for viable wind generation.\"\n self.diagnostics.add_note(self.component_name,\n \"communities average load is not large enough to consider project\")\n #~ print self.benefit_cost_ratio", "def scale_together(data, comp):\n scales = []\n guess = 1.\n s = opt.minimize(sq_residuals_in_range, guess, args = (data, comp), \n method = 'Nelder-Mead').x\n return s", "def run (self, scalers = {'capital costs':1.0}):\n self.was_run = True\n self.reason = \"OK\"\n\n tag = self.cd['file id'].split('+')\n if len(tag) > 1 and tag[1] != 'biomass_pellet':\n self.was_run = False\n self.reason = (\"Not a biomass pellet project.\")\n return\n\n if not self.cd[\"on road system\"]:\n self.diagnostics.add_warning(self.component_name,\n \"not on road system\")\n self.max_boiler_output = 0\n self.heat_displaced_sqft = 0\n self.biomass_fuel_consumed = 0\n self.fuel_price_per_unit = 0\n self.heat_diesel_displaced = 0\n self.reason = \\\n \"Not on road or marine highway system, so it is assumed that\" +\\\n \" pellets cannot be delivered cost effectively.\"\n return\n\n if np.isnan(float(self.comp_specs['peak month % of total'])):\n self.diagnostics.add_warning(self.component_name,\n \"bad config value for 'peak month % of total'\")\n self.max_boiler_output = 0\n self.heat_displaced_sqft = 0\n self.biomass_fuel_consumed = 0\n self.fuel_price_per_unit = 0\n self.heat_diesel_displaced = 0\n self.reason = \"bad config value for 'peak month % of total'\"\n return\n\n if self.cd[\"model heating fuel\"]:\n self.calc_heat_displaced_sqft()\n self.calc_energy_output()\n efficiency = self.comp_specs[\"pellet efficiency\"]\n self.calc_max_boiler_output(efficiency)\n factor = self.comp_specs['capacity factor']\n self.calc_biomass_fuel_consumed(factor)\n self.calc_diesel_displaced()\n\n\n if self.cd[\"model financial\"]:\n self.get_diesel_prices()\n\n self.calc_capital_costs()\n self.calc_maintenance_cost()\n\n\n self.fuel_price_per_unit = self.cd['pellet price']\n\n self.calc_proposed_biomass_cost(self.fuel_price_per_unit)\n self.calc_displaced_heating_oil_price()\n\n\n self.calc_annual_electric_savings()\n self.calc_annual_heating_savings()\n\n self.calc_annual_total_savings()\n self.calc_annual_costs(self.cd['interest rate'],\n scalers['capital costs'])\n self.calc_annual_net_benefit()\n self.calc_npv(self.cd['discount rate'], self.cd[\"current year\"])\n\n fuel_cost = self.biomass_fuel_consumed * self.fuel_price_per_unit\n self.calc_levelized_costs(self.maintenance_cost + fuel_cost)", "def __call__(self, data):\n cal = self.calc()\n assert data.shape == cal.shape, \\\n \"data shape does not match calibration ({} != {})\".format(data.shape, cal.shape)\n return data * cal", "def fit_scalers(self, df: pd.DataFrame) -> None:\n for feature, scaler in self._scalers.items():\n if feature == \"season\":\n scaler.fit(df[\"season\"].unique().reshape(-1, 1))\n elif feature in FEATURES_TO_SCALE:\n values = np.concatenate((df[f\"home_{feature}\"].values, df[f\"away_{feature}\"].values))\n scaler.fit(np.unique(values).reshape(-1, 1))\n else:\n scaler.fit(df[feature].unique().reshape(-1, 1))", "def platform_auto_calibrate(self):\n self.platform_auto_calibrate_servos()\n self.platform_auto_calibrate_imu()", "def compute_scaler(data_type):\n workspace = config.workspace\n\n if data_type == 'train':\n snr = config.Tr_SNR\n \n # Load data. \n t1 = time.time()\n hdf5_path = os.path.join(workspace, \"packed_features\", \"spectrogram\", data_type, \"%ddb\" % int(snr), \"data.h5\")\n with h5py.File(hdf5_path, 'r') as hf:\n x = hf.get('x') \n x = np.array(x) # (n_segs, n_concat, n_freq)\n \n # Compute scaler. \n (n_segs, n_concat, n_freq) = x.shape\n x2d = x.reshape((n_segs * n_concat, n_freq))\n scaler = StandardScaler(with_mean=True, with_std=True).fit(x2d)\n# print(scaler.mean_)\n# print(scaler.scale_)\n \n # Write out scaler. \n out_path = os.path.join(workspace, \"packed_features\", \"spectrogram\", data_type, \"%ddb\" % int(snr), \"scaler.p\")\n create_folder(os.path.dirname(out_path))\n pickle.dump(scaler, open(out_path, 'wb'))\n \n print(\"Save scaler to %s\" % out_path)\n print(\"Compute scaler finished! %s s\" % (time.time() - t1,))", "def optscl(args=None):\n\n parser = argparse.ArgumentParser(description=optscl.__doc__)\n\n # positional\n parser.add_argument('map', help='name of the input map')\n parser.add_argument('data', help='data file')\n parser.add_argument('scaled', help='scaled output map')\n\n # optional\n parser.add_argument(\n '-i', dest='iscale', action='store_true',\n help='individual scaling (else a single global scale)'\n )\n\n # OK, done with arguments.\n args = parser.parse_args()\n\n # load map and data\n dmap = doppler.Map.rfits(doppler.afits(args.map))\n data = doppler.Data.rfits(doppler.afits(args.data))\n\n nscale = 0\n for image in dmap.data:\n nscale += len(image.wave)\n\n if args.iscale and nscale > 1:\n # in this option we try to individually scale the images\n mtemp = copy.deepcopy(dmap)\n flux, ferr = retarr(data)\n wgt = np.empty_like(ferr)\n ok = ferr > 0\n wgt[ok] = 1./ferr[ok]**2\n\n # create indices to access the scale factors\n # save old scale factors\n sindices = []\n osfacs = []\n for ni, image in enumerate(dmap.data):\n for ns in range(len(image.scale)):\n sindices.append((ni,ns))\n osfacs.append(image.scale[ns])\n image.scale[ns] = 0.\n\n # compute a set of data vectors with each scale factor\n # set to 1 with all others set = 0, one by one\n dvecs = []\n for ni, ns in sindices:\n dmap.data[ni].scale[ns] = 1.0\n\n # compute data equivelent to data\n dvec = copy.deepcopy(data)\n doppler.comdat(dmap, dvec)\n dvecs.append(retarr(dvec)[0])\n\n dmap.data[ni].scale[ns] = 0.0\n\n # compile least-squares matrix & right-hand vector\n nvec = len(dvecs)\n A = np.empty((nvec,nvec))\n b = np.empty((nvec))\n for j in range(nvec):\n b[j] = (wgt[ok]*dvecs[j]*flux[ok]).sum()\n for i in range(j+1):\n A[j][i] = (wgt[ok]*dvecs[j]*dvecs[i]).sum()\n A[i][j] = A[j][i]\n\n nsfacs = linalg.solve(A,b)\n ocalc = np.zeros_like(flux)\n ncalc = np.zeros_like(flux)\n for j in range(nvec):\n ocalc += osfacs[j]*dvecs[j]\n ncalc += nsfacs[j]*dvecs[j]\n\n ndata = flux.size\n cold = (wgt*(flux-ocalc)**2).sum()/ndata\n cnew = (wgt*(flux-ncalc)**2).sum()/ndata\n print('Chi**2/ndata (before) =',cold,' (after) =',cnew)\n\n # set the new scale factors in place\n i = 0\n for ni, ns in sindices:\n dmap.data[ni].scale[ns] = nsfacs[i]\n i += 1\n\n # set the new scale factors in place\n i = 0\n for ni, ns in sindices:\n dmap.data[ni].scale[ns] = nsfacs[i]\n i += 1\n\n # set the singleton scale factors = 1 by\n # re-scaling the corresponding images instead.\n for ni, image in enumerate(dmap.data):\n if len(image.scale) == 1:\n image.data *= image.scale[0]\n image.scale[0] = 1\n\n else:\n # compute data equivalent to data\n dcalc = copy.deepcopy(data)\n doppler.comdat(dmap, dcalc)\n\n # compute optimum scale factor.\n sum0 = 0.\n sum1 = 0.\n sum2 = 0.\n ndata = 0\n for cspec, dspec in zip(dcalc.data, data.data):\n ok = dspec.ferr > 0.\n sum0 += ((dspec.flux[ok]/dspec.ferr[ok])**2).sum()\n sum1 += ((cspec.flux[ok]/dspec.ferr[ok])*(dspec.flux[ok]/dspec.ferr[ok])).sum()\n sum2 += ((cspec.flux[ok]/dspec.ferr[ok])**2).sum()\n ndata += dspec.ferr.size\n\n scale = sum1 / sum2\n cold = cnew = 0\n for cspec, dspec in zip(dcalc.data, data.data):\n ok = dspec.ferr > 0.\n cold += (((dspec.flux[ok]-cspec.flux[ok])/dspec.ferr[ok])**2).sum()\n cnew += (((dspec.flux[ok]-scale*cspec.flux[ok])/dspec.ferr[ok])**2).sum()\n\n print('ndata =',ndata)\n print('Optimum scale factor =',scale)\n print('Chi**2/ndata (before) =',cold/ndata,' (after) =',cnew/ndata)\n\n # scale images and write out\n for image in dmap.data:\n image.data *= scale\n\n # Write to a fits file\n dmap.wfits(doppler.afits(args.scaled))", "def scalers(self):\n sc = StandardScaler() if self.scm == 'ss' else MinMaxScaler()\n sc.fit(self.t)\n return pd.DataFrame(sc.transform(self.t), columns=self.t.columns.values), pd.DataFrame(sc.transform(self.v), columns=self.v.columns.values)", "def Calibrator(\n data_loader, cache=None, BaseClass=None, batch_size=None, quantile=None, regression_cutoff=None, algo=None\n):\n BaseClass = util.default(BaseClass, trt.IInt8EntropyCalibrator2)\n\n class CalibratorClass(BaseClass):\n \"\"\"\n Calibrator that supplies calibration data to TensorRT to calibrate the network for INT8 inference.\n \"\"\"\n\n def __init__(self):\n # Must explicitly initialize parent for any trampoline class! Will mysteriously segfault without this.\n BaseClass.__init__(self) # type: ignore\n\n self.data_loader = data_loader\n self._cache = cache\n self.device_buffers = OrderedDict()\n self.input_metadata = None\n self.reset()\n G_LOGGER.verbose(f\"Created calibrator [cache={self._cache}]\")\n\n self.batch_size = util.default(batch_size, 1)\n\n self.is_polygraphy_calibrator = True\n # The function that constructed this instance\n self.make_func = Calibrator\n\n def set_input_metadata(self, input_metadata):\n \"\"\"\n Sets the input metadata for the calibrator.\n\n This is passed along to the data loader and is also used for\n input data type and shape checks.\n\n NOTE: This generally does not need to be called manually if the calibrator is being used\n with Polygraphy's loaders, like ``CreateConfig`` or ``EngineFromNetwork``.\n\n Args:\n input_metadata (TensorMetadata):\n Mapping of input names to their data types and shapes.\n Passed along to the data loader if provided. This is required if\n using Polygraphy's included `DataLoader` to provide calibration data,\n or if data type and shape checking is desired.\n \"\"\"\n self.input_metadata = input_metadata\n if input_metadata is not None:\n with contextlib.suppress(AttributeError):\n self.data_loader.input_metadata = input_metadata\n\n def reset(self):\n \"\"\"\n Reset this calibrator for reuse.\n\n The calibrator will clear any dynamic ranges cached from previous calibration runs, and will\n attempt to rewind the data loader (note that generators cannot be rewound).\n\n Typically, this is only required if the same calibrator is used for multiple different networks.\n \"\"\"\n # Attempt to reset data loader\n self.data_loader_iter = iter(self.data_loader)\n self.num_batches = 0\n\n # Make sure calibrator will check the cache again when reset.\n self.cache_contents = None\n\n def get_batch_size(self):\n return self.batch_size\n\n def _get_batch_impl(self, names):\n try:\n buffers = next(self.data_loader_iter)\n except StopIteration:\n if not self.num_batches:\n G_LOGGER.critical(\n \"Calibrator data loader provided no data.\\nPossible reasons for this include:\\n(1) data loader \"\n \"has no data to provide\\n(2) data loader was a generator, and the calibrator is being \"\n \"used multiple times (generators cannot be rewound)\"\n )\n return None\n else:\n self.num_batches += 1\n\n util.check_sequence_contains(\n buffers.keys(),\n names,\n name=\"calibration input data provided by the data loader\",\n items_name=\"inputs\",\n )\n\n def check_buffer(name, buffer):\n if self.input_metadata is None:\n return\n\n expected_dtype, expected_shape = self.input_metadata[name]\n\n err_prefix = \"Received an unexpected input from the data loader during calibration. \"\n if buffer.dtype != expected_dtype:\n G_LOGGER.critical(\n err_prefix\n + f\"For input: '{name}', expected data type: {expected_dtype}, but received: {buffer.dtype}\"\n )\n\n if not util.is_valid_shape_override(buffer.shape, expected_shape):\n G_LOGGER.critical(\n err_prefix\n + f\"For input: '{name}', expected a shape compatible with: {expected_shape}, but received: {buffer.shape}\"\n )\n\n ptrs = []\n for name in names:\n buf = buffers[name]\n\n if isinstance(buf, cuda.DeviceView):\n check_buffer(name, buf)\n ptrs.append(buf.ptr)\n elif isinstance(buf, np.ndarray):\n check_buffer(name, buf)\n if name not in self.device_buffers:\n self.device_buffers[name] = cuda.DeviceArray(shape=buf.shape, dtype=buf.dtype)\n G_LOGGER.verbose(f\"Allocated: {self.device_buffers[name]}\")\n\n self.device_buffers[name].resize(buf.shape)\n buf = util.make_contiguous(buf)\n ptrs.append(self.device_buffers[name].copy_from(buf).ptr)\n elif isinstance(buf, int):\n ptrs.append(buf)\n else:\n G_LOGGER.critical(\n f\"Calibration data loader provided an unrecognized type: {type(buf).__name__} for input: {name}.\"\n \"\\nPlease provide either a NumPy array, Polygraphy DeviceView, or GPU pointer. \"\n )\n\n return ptrs\n\n def get_batch(self, names):\n ptrs = None\n try:\n ptrs = self._get_batch_impl(names)\n except PolygraphyException:\n pass\n if ptrs is None:\n self.free()\n return ptrs\n\n def read_calibration_cache(self):\n def load_from_cache():\n if self._cache is None or not util.get_file_size(self._cache):\n return None\n\n try:\n return util.load_file(self._cache, description=\"calibration cache\")\n except Exception as err:\n G_LOGGER.error(f\"Could not read from calibration cache: {self._cache}\\nNote: Error was: {err}\")\n return None\n\n if self.cache_contents is not None:\n return self.cache_contents\n\n self.cache_contents = load_from_cache()\n\n if not self.cache_contents:\n if self.cache_contents is not None:\n G_LOGGER.warning(\n \"Calibration cache was provided, but is empty. \"\n \"Will regenerate scales by running calibration.\",\n mode=LogMode.ONCE,\n )\n self.cache_contents = None\n\n return self.cache_contents\n\n def write_calibration_cache(self, cache):\n self.cache_contents = cache.tobytes()\n\n if self._cache is None:\n return\n\n try:\n util.save_file(contents=self.cache_contents, dest=self._cache, description=\"calibration cache\")\n except Exception as err:\n G_LOGGER.error(f\"Could not write to calibration cache: {self._cache}.\\nNote: Error was: {err}\")\n\n def free(self):\n \"\"\"\n Frees all device buffers associated with this calibrator\n \"\"\"\n for device_buffer in self.device_buffers.values():\n device_buffer.free()\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n self.free()\n\n # IInt8LegacyCalibrator methods\n if BaseClass == trt.IInt8LegacyCalibrator:\n\n def get_quantile(self):\n return util.default(quantile, 0.5)\n\n def get_regression_cutoff(self):\n return util.default(regression_cutoff, 0.5)\n\n def read_histogram_cache(self, length):\n pass\n\n def write_histogram_cache(self, ptr, length):\n pass\n\n # IInt8Calibrator methods\n if BaseClass == trt.IInt8Calibrator:\n\n def get_algorithm(self):\n return util.default(algo, trt.CalibrationAlgoType.ENTROPY_CALIBRATION_2)\n\n def __repr__(self):\n return util.make_repr(\n \"Calibrator\",\n data_loader,\n cache=cache,\n BaseClass=BaseClass,\n batch_size=batch_size,\n quantile=quantile,\n regression_cutoff=regression_cutoff,\n algo=algo,\n )[0]\n\n return CalibratorClass()", "def _doCalibration(self):\n self._cmdCalibration(2)", "def compute_scaler(args):\n workspace = args.workspace\n data_type = args.data_type\n dir_name = args.dir_name \n # Load data. \n t1 = time.time()\n hdf5_path = os.path.join(workspace, \"packed_features\", \"spectrogram\", data_type, dir_name, \"data.h5\")\n with h5py.File(hdf5_path, 'r') as hf:\n x = hf.get('x') \n x = np.array(x) # (n_segs, n_concat, n_freq)\n \n # Compute scaler. \n (n_segs, n_concat, n_freq) = x.shape\n x2d = x.reshape((n_segs * n_concat, n_freq))\n scaler = preprocessing.StandardScaler(with_mean=True, with_std=True).fit(x2d)\n print(scaler.mean_)\n print(scaler.scale_)\n \n # Write out scaler. \n out_path = os.path.join(workspace, \"packed_features\", \"spectrogram\", data_type, dir_name, \"scaler.p\")\n create_folder(os.path.dirname(out_path))\n pickle.dump(scaler, open(out_path, 'wb'))\n \n print(\"Save scaler to %s\" % out_path)\n print(\"Compute scaler finished! %s s\" % (time.time() - t1,))", "def __call__(self, data: np.ndarray) -> np.ndarray:\n gram_matrices = []\n for k, vtype in enumerate(self.vartypes):\n func = kernel_func[self.kertypes[vtype]]\n gram_matrix = func(self.bandwidths[k], data[:, k][:, None],\n self.data_ref[:, k][None, :])\n gram_matrices.append(gram_matrix)\n return np.array(gram_matrices).prod(axis=0)", "def calibrate(): \n \n # Calibrate of the run using beam data. Creates a folder cal-files/caltag \n # containing all calibration data. \n CalObj = Calibration(steerfiles=steerfiles, name=localcaltag + '-cal') \n\n # Set Beam energy\n CalObj.set_beam_momentum(beamenergy)\n\n # Get gearfile and set air as DUT material\n localgearfile = CalObj.get_filename('gear.xml')\n set_parameter(gearfile=localgearfile, sensorID=11, parametername='thickness', value=0.0001)\n set_parameter(gearfile=localgearfile, sensorID=11, parametername='radLength', value=304000.0)\n \n # Create list of calibration steps \n calpath = create_calibration_path(CalObj)\n \n # Run the calibration steps \n CalObj.calibrate(path=calpath,ifile=rawfile_air,caltag=localcaltag)", "def calibrate(self, calib_fns, calib_params, analytes=None, drift_correct=False):\n # can have calibration function stored in self and pass *coefs?\n if analytes is None:\n analytes = self.analytes\n\n if 'calibrated' not in self.data.keys():\n self.data['calibrated'] = {}\n\n for a in analytes:\n if drift_correct:\n P = self.drift_params(calib_params, a)\n else:\n P = calib_params[a].values[0]\n\n self.data['calibrated'][a] = \\\n calib_fns[a](P,\n self.data['ratios'][a])\n\n # coefs = calib_params[a]\n # if len(coefs) == 1:\n # self.data['calibrated'][a] = \\\n # self.data['ratios'][a] * coefs\n # else:\n # self.data['calibrated'][a] = \\\n # np.polyval(coefs, self.data['ratios'][a])\n # self.data['ratios'][a] * coefs[0] + coefs[1]\n self.setfocus('calibrated')\n return", "def calibrate(self, cal=1.0, pol_eff=1.0):\n \n if self.ncomp == 1:\n self.data *= cal\n else:\n self.data[0] *= cal\n self.data[1] *= cal * pol_eff\n self.data[2] *= cal * pol_eff\n\n return self", "def fit(self, GRFData):\n\n self.__is_valid_dict(GRFData)\n for component in self.comp_list:\n self.scaler[component].fit(np.reshape(GRFData[component], (-1, 1)))\n self.isFitted = True", "def __create_scaler(self):\n \n self.scaler = {}\n for component in self.comp_list:\n self.scaler[component] = self.__create_scaler_type()", "def start(self):\n if self.preemptableScaler != None:\n self.preemptableScaler.start()\n\n if self.scaler != None:\n self.scaler.start()", "def calibrate(self):\n\t\tLTOGRIGHT = []\n\t\tLTOGUP = []\n\t\tRTOGRIGHT = []\n\t\tRTOGUP = []\n\t\tstart = time.time()\n\t\tcalibration_time = 5.0\n\t\twhile time.time() - start < calibration_time:\n\t\t\tevents = pygame.event.get()\n\t\t\tfor event in events:\n\t\t\t\tif event.type == pygame.JOYAXISMOTION:\n\t\t\t\t\tLTOGRIGHT.append(self.joystick.get_axis(self.LTOGRIGHT))\n\t\t\t\t\tLTOGUP.append(-self.joystick.get_axis(self.LTOGUP))\n\t\t\t\t\tRTOGRIGHT.append(self.joystick.get_axis(self.RTOGRIGHT))\n\t\t\t\t\tRTOGUP.append(-self.joystick.get_axis(self.RTOGUP))\n\n\t\t# calibration sets highest value equal to 1.0\n\t\tself.calibration[0] = 1.0/max(LTOGRIGHT)\n\t\tself.calibration[1] = -1.0/min(LTOGRIGHT)\n\t\tself.calibration[2] = -1.0/min(LTOGUP)\n\t\tself.calibration[3] = 1.0/max(LTOGUP)\n\t\tself.calibration[4] = 1.0/max(RTOGRIGHT)\n\t\tself.calibration[5] = -1.0/min(RTOGRIGHT)\n\t\tself.calibration[6] = -1.0/min(RTOGUP)\n\t\tself.calibration[7] = 1.0/max(RTOGUP)", "def run (self, scalers = {'capital costs':1.0}):\n self.was_run = True\n self.reason = \"OK\"\n tag = self.cd['file id'].split('+')\n if len(tag) > 1 and tag[1] != 'transmission':\n self.was_run = False\n self.reason = \"Not a transmission project.\"\n return\n\n if not self.cd[\"model electricity\"]:\n self.was_run = False\n self.reason = \"Electricity must be modeled to analyze \"+\\\n \"transmission. It was not for this community.\"\n return\n if np.isnan(float(self.comp_specs['distance to community'])):\n self.was_run = False\n self.reason = (\"There are no communities within 30 miles with\"\n \" lower cost of electricity.\")\n return\n\n self.calc_average_load()\n try:\n self.get_intertie_values()\n except ValueError:\n self.was_run = False\n self.reason = (\"Could not find data on community to intertie to.\")\n return\n self.calc_pre_intertie_generation()\n self.calc_intertie_offset_generation()\n\n\n if self.cd[\"model heating fuel\"]:\n # change these below\n self.calc_lost_heat_recovery()\n # see NOTE*\n\n #~ return\n if self.cd[\"model financial\"]:\n # AnnualSavings functions (don't need to write)\n self.get_diesel_prices()\n\n # change these below\n self.calc_capital_costs()\n self.calc_annual_electric_savings()\n self.calc_annual_heating_savings()\n\n # AnnualSavings functions (don't need to write)\n self.calc_annual_total_savings()\n self.calc_annual_costs(self.cd['interest rate'],\n scalers['capital costs'])\n self.calc_annual_net_benefit()\n self.calc_npv(self.cd['discount rate'], self.cd[\"current year\"])\n #~ print self.benefit_cost_ratio\n self.calc_levelized_costs(self.proposed_generation_cost)", "def rescale_data(self):\n\n # Dividing every array of simulated data vectors by the mean of that array.\n '''# Didnt work\n for key in self.data.keys():\n self.data[key] /= np.mean(self.data[key])\n '''\n\n self.rescaled = True\n\n # Mean normalization\n \"\"\" didnt work\n for key in self.data.keys():\n self.data[key] -= np.mean(self.data[key])\n self.data[key] /= (np.max(self.data[key]) - np.min(self.data[key]))\n \"\"\"\n\n # Median normalization\n \"\"\" didnt work, still dividing by large number \n for key in self.data.keys():\n self.data[key] -= np.median(self.data[key])\n self.data[key] /= (np.max(self.data[key]) - np.min(self.data[key]))\n \"\"\"\n\n # Divide by median\n \"\"\" didnt work\n for key in self.data.keys():\n self.data[key] -= np.median(self.data[key])\n self.data[key] /= (np.median(self.data[key]))\n \"\"\"\n\n # Take logarithm of data\n \"\"\" didnt work\n for key in self.data.keys():\n self.data[key] = np.log10(self.data[key])\n \"\"\"\n\n # Scale by length of vector\n \"\"\"\n for key in self.data.keys():\n self.data[key] /= np.linalg.norm(self.Cl_noiseless)\n \"\"\"\n\n \n # Scale by negative of the natural logarithm \n for key in self.data.keys():\n self.data[key] = -1 * np.log(self.data[key]) \n \n \"\"\"\n # Scale by subtracting the mean and dividing by std\n std = np.nanstd(self.data['data'])\n mean = np.nanmean(self.data['data'])\n for key in self.data.keys():\n # self.data[key] -= np.log(self.Cl_noiseless) # -1* # scale this same way\n # self.data[key] -= self.Cl_noiseless # -1* # scale this same way\n self.data[key] -= mean \n self.data[key] /= std\n \"\"\"", "def calibrate(self, master):\n if master.polyorder == 'linear':\n self.fitfunction = \"A0 + A1 * D\"\n self.fit_fkt = self.calc_lin\n elif master.polyorder == 'quadratic':\n self.fit_fkt = self.calc_quad\n self.fitfunction = \"A0 + A1 * D + A2 * D**2\"\n elif master.polyorder == \"cubic\":\n self.fitfunction = \"A0 + A1 * D + A2 * D**2 + A3 * D**3\"\n self.fit_fkt = self.calc_cubic\n else:\n print(\"Polynomgrad nicht definiert\")\n \n self.mw = np.asarray(self.mw)\n if master.sensortype == \"Druck\":\n self.best, self.covar = curve_fit(self.fit_fkt, self.mw, master.Referencedata.caldat)\n else:\n print(\"Sensortyp noch nicht Hinterlegt\")", "def __call__(self):\n self.brain._update_fscale(self.factor)\n for key in self.brain.keys:\n if self.widgets[key] is not None:\n self.widgets[key].set_value(self.brain._data[key])", "def calculate_scaling_factors(blk):\n\n def cs(blk2):\n \"\"\"Recursive function for to do subblocks first\"\"\"\n for b in blk2.component_data_objects(pyo.Block, descend_into=False):\n cs(b)\n if hasattr(blk2, \"calculate_scaling_factors\"):\n blk2.calculate_scaling_factors()\n\n # Call recursive function to run calculate_scaling_factors on blocks from\n # the bottom up.\n cs(blk)\n # If a scale factor is set for an indexed component, propagate it to the\n # component data if a scale factor hasn't already been explicitly set\n propagate_indexed_component_scaling_factors(blk)\n # Use the variable scaling factors to scale the arc constraints.\n scale_arc_constraints(blk)", "def __scale(data, max_value_list, min_value_list, scale_value_list, process_cols_list):\n features = np.array(data.features, dtype=float)\n for i in process_cols_list:\n value = features[i]\n if value > max_value_list[i]:\n value = max_value_list[i]\n elif value < min_value_list[i]:\n value = min_value_list[i]\n\n features[i] = (value - min_value_list[i]) / scale_value_list[i]\n _data = copy.deepcopy(data)\n _data.features = features\n return _data", "def _process(self, data: np.ndarray) -> np.ndarray:\n return data[..., 0] * self.scale", "def BuildScales(self, par_specs):\n for par in par_specs:\n res = abs((par.hi_limit - par.lo_limit) / self.length)\n if par.initial_value is None:\n preset = (par.lo_limit + par.hi_limit) / 2.0\n else:\n preset = par.initial_value\n par.var.set(preset)\n par.scale = tk.Scale(self.frame, label=par.name, variable=par.var,\n from_=par.lo_limit, to=par.hi_limit,\n orient=self.orient, length=self.length,\n width=self.slider_width, resolution=res)\n par.scale.configure(command=lambda val, scale=par.scale:\n self.Respond(val, scale))\n self.par_dic.update({par.scale: par})\n if self.orient == tk.VERTICAL:\n par.scale.pack(side=tk.LEFT)\n else:\n par.scale.pack(side=tk.TOP)" ]
[ "0.55551463", "0.55168366", "0.5510334", "0.54566354", "0.54343325", "0.5372346", "0.52166396", "0.5209569", "0.5180968", "0.5142922", "0.5126212", "0.5119817", "0.5116321", "0.5078507", "0.5077397", "0.5049095", "0.50360376", "0.50137097", "0.500637", "0.49929506", "0.4990554", "0.49744853", "0.49685207", "0.49662542", "0.49510372", "0.49496883", "0.49340934", "0.49324673", "0.49134716", "0.49090567" ]
0.55441695
1
Returns fixed model parameters for experiments.
def get_fixed_params(self): fixed_params = { 'total_time_steps': 40, 'num_encoder_steps': 39, 'num_epochs': 100, 'early_stopping_patience': 10, 'multiprocessing_workers': 2, } return fixed_params
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_fixed_params():\n fixed_params = {\n 'total_time_steps': 8 * 24,\n 'num_encoder_steps': 7 * 24,\n 'num_epochs': 100,\n 'early_stopping_patience': 5,\n 'multiprocessing_workers': 5\n }\n return fixed_params", "def params():\n return utils.Params('../experiments/base-model/params.json')", "def setup_fixed_model():\n out = {}\n out['Q'] = Q\n out['X'] = X\n out['y'] = y\n\n return out", "def get_default_model_params(self):\n\n model_params = {\n 'dropout_rate': 0.3,\n 'hidden_layer_size': 160,\n 'learning_rate': 0.01,\n 'minibatch_size': 64,\n 'max_gradient_norm': 0.01,\n 'num_heads': 1,\n 'stack_size': 1\n }\n\n return model_params", "def get_fe_params(self):\n return self._params[0:self.k_fe]", "def get_model_params(self):\n\n results = self._model.fit()\n model_params = np.expand_dims(results.params.as_matrix(), 1)\n return model_params", "def experiment_params():\n exp = {\n 'lr': [1e-3],\n 'loss_function': ['cce'],\n 'optimizer': ['nadam'],\n 'dataset': [\n # 'curv_contour_length_9',\n 'cluttered_nist_ix1',\n # 'curv_baseline',\n ]\n }\n exp['data_augmentations'] = [\n [\n 'grayscale',\n 'center_crop',\n # 'left_right',\n # 'up_down',\n 'uint8_rescale',\n 'singleton',\n 'zero_one'\n ]]\n exp['val_augmentations'] = [\n [\n 'grayscale',\n 'center_crop',\n # 'left_right',\n # 'up_down',\n 'uint8_rescale',\n 'singleton',\n 'zero_one'\n ]]\n exp['batch_size'] = 32 # Train/val batch size.\n exp['epochs'] = 4\n exp['model_name'] = 'unet'\n exp['exp_name'] = exp['model_name'] + '_' + exp['dataset'][0]\n exp['save_weights'] = True\n exp['validation_iters'] = 1000\n exp['num_validation_evals'] = 200\n exp['shuffle_val'] = True # Shuffle val data.\n exp['shuffle_train'] = True\n return exp", "def _get_params(model):\n return model.coefs.shape[0]", "def get_model_parameter_bounds():\n minf = float(\"-inf\")\n inf = float(\"inf\")\n params = dict(mu=(minf,inf), rho=(0.0 ,inf))\n return params", "def doParametersOfInterest(self):\n \n self.modelBuilder.doVar('expr::cosW(\"0.87681811112\",)')\n self.modelBuilder.doVar('expr::sinW(\"0.48082221247\",)')\n self.modelBuilder.doVar('expr::mZ(\"91.2\",)')\n self.modelBuilder.doVar('expr::Lambda1(\"100.0\",)')\n self.modelBuilder.doVar('expr::e2(\"0.0917\",)')\n self.modelBuilder.doVar('expr::gs2(\"1.533\",)')\n\n # EFT Higgs basis couplings\n\n self.modelBuilder.doVar('cZ[0,-1,1]') \n self.modelBuilder.doVar(\"cZZ[0,-2,2]\") \n self.modelBuilder.doVar(\"cZZt[0,-2,2]\") \n self.modelBuilder.doVar(\"cZB[0,-6,6]\") \n\n poi='cZ,cZZ,cZZt,cZB'\n\n # Amplitude couplings from EFT couplings \n\n self.modelBuilder.doVar('expr::a1(\"@0+1\",cZ)') # (\"2*(@0+1)\",cZ) in AN/Paper but a1 = 1 for signal model and width calculation\n self.modelBuilder.doVar('expr::a2(\"-1*@0*(@1/(2*pow(@2,2)*pow(@3,2)))\",cZZ,e2,sinW,cosW)')\n self.modelBuilder.doVar('expr::a3(\"-1*@0*(@1/(2*pow(@2,2)*pow(@3,2)))\",cZZt,e2,sinW,cosW)')\n self.modelBuilder.doVar('expr::k1(\"@0*(@1*pow(@2,2)/(pow(@3,2)*pow(@4,2)))\",cZB,e2,Lambda1,sinW,mZ)')\n self.modelBuilder.doVar('expr::k1L1(\"@0/pow(@1,2)\",k1,Lambda1)')\n\n ###### gamma_H ########\n\n # SMEFT relationships for VV couplings (Expressed using amplitude couplings)\n\n self.modelBuilder.doVar('expr::kappa(\"1.0\",)')\n self.modelBuilder.doVar('expr::kappa_tilde(\"0.0\",)') \n\n self.modelBuilder.doVar('expr::a1_WW(\"@0\",a1)')\n self.modelBuilder.doVar('expr::a2_WW(\"@0*@0*@1\",cosW,a2)')\n self.modelBuilder.doVar('expr::a3_WW(\"@0*@0*@1\",cosW,a3)')\n self.modelBuilder.doVar('expr::k1_WW(\"(@2 / (@0*@0 - @1*@1) - 2*@1*@1*@3*@4*@4 /(@5*@5*(@0*@0 - @1*@1)))\",cosW,sinW,k1,a2,Lambda1,mZ)')\n self.modelBuilder.doVar('expr::k2_k1(\"2*@0*@1*@2/(@0*@0 - @1*@1)\",cosW,sinW,k1)')\n self.modelBuilder.doVar('expr::k2_a2(\"-2*@0*@1*@3*@4*@4/((@2*@2)*(@0*@0 - @1*@1))\",cosW,sinW,mZ,a2,Lambda1)')\n self.modelBuilder.doVar('expr::k2(\"@0 + @1\",k2_k1,k2_a2)')\n\n # Determine gamma_H from VV couplings\n\n zz_expr = '\"4*(@0*@0/4. + 0.1695*@3*@3 + 0.09076*@1*@1 + 0.03809*@2*@2 + 0.8095*@0*@3/2. + 0.5046*@0*@1/2. + 0.2092*@1*@3 + 0.1023*@4*@4 + 0.1901*@0*@4/2. + 0.07429*@3*@4 + 0.04710*@1*@4) \",a1,a2,a3,k1,k2'\n ww_expr = '\"4*(@0*@0/4. + 0.1320*@3*@3 + 0.1944*@1*@1 + 0.08075*@2*@2 + 0.7204*@0*@3/2. + 0.7437*@0*@1/2. + 0.2774*@3*@1) \",a1_WW,a2_WW,a3_WW,k1_WW'\n zgamma_expr = '\"4*(1.118600*@0*@0/4. +0.0035*@1*@1 - 0.125010*@0*@1/2. + 0.000003*@1*@1 - 0.00018*@1*@1 + 0.003100*@0*@1/2. +0.00126*@2*@2 + 0.000005*@2*@2 -0.00047*@2*@2)\",a1_WW,kappa,kappa_tilde'\n gg_expr = '\"(1.1068*@0*@0 + 0.0082*@0*@0 - 0.1150*@0*@0 + 2.5717*@1*@1 + 0.0091*@1*@1 - 0.1982*@1*@1)\",kappa,kappa_tilde'\n bb_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n cc_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n tautau_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n mumu_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n gmgm_expr = '\"4*(1.6054*@0*@0/4. + 0.07312*@1*@1 - 0.6854*@0*@1/2. + 0.00002*@1*@1 - 0.0018*@1*@1 + 0.0085*@0*@1/2. + 0.1699*@2*@2 + 0.00002*@2*@2 - 0.0031*@2*@2)\",a1_WW,kappa,kappa_tilde'\n \n self.modelBuilder.doVar('expr::R_WW('+str(ww_expr)+')')\n self.modelBuilder.doVar('expr::R_ZZ('+str(zz_expr)+')')\n self.modelBuilder.doVar('expr::R_Zgamma('+str(zgamma_expr)+')')\n self.modelBuilder.doVar('expr::R_gg('+str(gg_expr)+')')\n self.modelBuilder.doVar('expr::R_bb('+str(bb_expr)+')')\n self.modelBuilder.doVar('expr::R_cc('+str(cc_expr)+')')\n self.modelBuilder.doVar('expr::R_tautau('+str(tautau_expr)+')')\n self.modelBuilder.doVar('expr::R_mumu('+str(mumu_expr)+')')\n self.modelBuilder.doVar('expr:R_gammagamma('+str(gmgm_expr)+')')\n\n self.modelBuilder.doVar('expr::gammaH(\"(0.5824*@0 + 0.2137*@1 + 0.08187*@2 + 0.06272*@3 + 0.02891*@4 + 0.02619*@5 + 0.002270*@6 + 0.001533*@7 + 0.0002176*@8 )/0.9998\",R_bb,R_WW,R_gg,R_tautau,R_cc,R_ZZ,R_gammagamma,R_Zgamma,R_mumu)') \n\n ###########################\n\n self.g1V = GetCoupTerms(1,1,1,-0.0001,\"1V\") # Compensate for scaling of k1 templates \n self.g2V = GetCoupTerms(1,1,1,-0.0001,\"2V\") \n \n self.modelBuilder.doVar(\"expr::g2V_1(\\\"\"+str(self.g2V[0])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T1(\\\"((pow(@0,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_1)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T1_Neg(\\\"-1*((pow(@0,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_1)\") \n self.modelBuilder.doVar(\"expr::g2V_2(\\\"\"+str(self.g2V[1])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T2(\\\"((pow(@0,3)*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_2)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T2_Neg(\\\"-1*((pow(@0,3)*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_2)\") \n self.modelBuilder.doVar(\"expr::g2V_3(\\\"\"+str(self.g2V[2])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T3(\\\"((pow(@0,2)*pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_3)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T3_Neg(\\\"-1*((pow(@0,2)*pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_3)\") \n self.modelBuilder.doVar(\"expr::g2V_4(\\\"\"+str(self.g2V[3])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T4(\\\"((@0*pow(@1,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_4)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T4_Neg(\\\"-1*((@0*pow(@1,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_4)\") \n self.modelBuilder.doVar(\"expr::g2V_5(\\\"\"+str(self.g2V[4])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T5(\\\"((pow(@1,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_5)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T5_Neg(\\\"-1*((pow(@1,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_5)\") \n self.modelBuilder.doVar(\"expr::g2V_6(\\\"\"+str(self.g2V[5])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T6(\\\"((pow(@0,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_6)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T6_Neg(\\\"-1*((pow(@0,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_6)\") \n self.modelBuilder.doVar(\"expr::g2V_7(\\\"\"+str(self.g2V[6])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T7(\\\"((pow(@0,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_7)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T7_Neg(\\\"-1*((pow(@0,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_7)\") \n self.modelBuilder.doVar(\"expr::g2V_8(\\\"\"+str(self.g2V[7])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T8(\\\"((@0*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_8)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T8_Neg(\\\"-1*((@0*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_8)\") \n self.modelBuilder.doVar(\"expr::g2V_9(\\\"\"+str(self.g2V[8])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T9(\\\"((pow(@2,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_9)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T9_Neg(\\\"-1*((pow(@2,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_9)\") \n self.modelBuilder.doVar(\"expr::g2V_10(\\\"\"+str(self.g2V[9])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T10(\\\"((pow(@0,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_10)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T10_Neg(\\\"-1*((pow(@0,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_10)\") \n self.modelBuilder.doVar(\"expr::g2V_11(\\\"\"+str(self.g2V[10])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T11(\\\"((pow(@0,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_11)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T11_Neg(\\\"-1*((pow(@0,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_11)\") \n self.modelBuilder.doVar(\"expr::g2V_12(\\\"\"+str(self.g2V[11])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T12(\\\"((@0*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_12)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T12_Neg(\\\"-1*((@0*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_12)\") \n self.modelBuilder.doVar(\"expr::g2V_13(\\\"\"+str(self.g2V[12])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T13(\\\"((pow(@3,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_13)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T13_Neg(\\\"-1*((pow(@3,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_13)\") \n self.modelBuilder.doVar(\"expr::g2V_14(\\\"\"+str(self.g2V[13])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T14(\\\"((pow(@1,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_14)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T14_Neg(\\\"-1*((pow(@1,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_14)\") \n self.modelBuilder.doVar(\"expr::g2V_15(\\\"\"+str(self.g2V[14])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T15(\\\"((pow(@1,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_15)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T15_Neg(\\\"-1*((pow(@1,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_15)\") \n self.modelBuilder.doVar(\"expr::g2V_16(\\\"\"+str(self.g2V[15])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T16(\\\"((@1*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_16)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T16_Neg(\\\"-1*((@1*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_16)\") \n self.modelBuilder.doVar(\"expr::g2V_17(\\\"\"+str(self.g2V[16])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T17(\\\"((pow(@1,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_17)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T17_Neg(\\\"-1*((pow(@1,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_17)\") \n self.modelBuilder.doVar(\"expr::g2V_18(\\\"\"+str(self.g2V[17])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T18(\\\"((pow(@1,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_18)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T18_Neg(\\\"-1*((pow(@1,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_18)\") \n self.modelBuilder.doVar(\"expr::g2V_19(\\\"\"+str(self.g2V[18])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T19(\\\"((@1*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_19)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T19_Neg(\\\"-1*((@1*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_19)\") \n self.modelBuilder.doVar(\"expr::g2V_20(\\\"\"+str(self.g2V[19])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T20(\\\"((pow(@2,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_20)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T20_Neg(\\\"-1*((pow(@2,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_20)\") \n self.modelBuilder.doVar(\"expr::g2V_21(\\\"\"+str(self.g2V[20])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T21(\\\"((pow(@2,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_21)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T21_Neg(\\\"-1*((pow(@2,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_21)\") \n self.modelBuilder.doVar(\"expr::g2V_22(\\\"\"+str(self.g2V[21])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T22(\\\"((@2*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_22)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T22_Neg(\\\"-1*((@2*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_22)\") \n self.modelBuilder.doVar(\"expr::g2V_23(\\\"\"+str(self.g2V[22])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T23(\\\"((@0*@1*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_23)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T23_Neg(\\\"-1*((@0*@1*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_23)\") \n self.modelBuilder.doVar(\"expr::g2V_24(\\\"\"+str(self.g2V[23])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T24(\\\"((@0*pow(@1,2)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_24)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T24_Neg(\\\"-1*((@0*pow(@1,2)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_24)\") \n self.modelBuilder.doVar(\"expr::g2V_25(\\\"\"+str(self.g2V[24])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T25(\\\"((pow(@0,2)*@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_25)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T25_Neg(\\\"-1*((pow(@0,2)*@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_25)\") \n self.modelBuilder.doVar(\"expr::g2V_26(\\\"\"+str(self.g2V[25])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T26(\\\"((@0*@1*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_26)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T26_Neg(\\\"-1*((@0*@1*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_26)\") \n self.modelBuilder.doVar(\"expr::g2V_27(\\\"\"+str(self.g2V[26])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T27(\\\"((@0*pow(@1,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_27)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T27_Neg(\\\"-1*((@0*pow(@1,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_27)\") \n self.modelBuilder.doVar(\"expr::g2V_28(\\\"\"+str(self.g2V[27])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T28(\\\"((pow(@0,2)*@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_28)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T28_Neg(\\\"-1*((pow(@0,2)*@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_28)\") \n self.modelBuilder.doVar(\"expr::g2V_29(\\\"\"+str(self.g2V[28])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T29(\\\"((@0*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_29)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T29_Neg(\\\"-1*((@0*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_29)\") \n self.modelBuilder.doVar(\"expr::g2V_30(\\\"\"+str(self.g2V[29])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T30(\\\"((@0*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_30)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T30_Neg(\\\"-1*((@0*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_30)\") \n self.modelBuilder.doVar(\"expr::g2V_31(\\\"\"+str(self.g2V[30])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T31(\\\"((pow(@0,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_31)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T31_Neg(\\\"-1*((pow(@0,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_31)\") \n self.modelBuilder.doVar(\"expr::g2V_32(\\\"\"+str(self.g2V[31])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T32(\\\"((@1*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_32)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T32_Neg(\\\"-1*((@1*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_32)\") \n self.modelBuilder.doVar(\"expr::g2V_33(\\\"\"+str(self.g2V[32])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T33(\\\"((@1*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_33)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T33_Neg(\\\"-1*((@1*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_33)\") \n self.modelBuilder.doVar(\"expr::g2V_34(\\\"\"+str(self.g2V[33])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T34(\\\"((pow(@1,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_34)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T34_Neg(\\\"-1*((pow(@1,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_34)\") \n self.modelBuilder.doVar(\"expr::g2V_35(\\\"\"+str(self.g2V[34])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T35(\\\"((@0*@1*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_35)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T35_Neg(\\\"-1*((@0*@1*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_35)\") \n \n self.modelBuilder.doVar(\"expr::g1V_1(\\\"\"+str(self.g1V[0])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T1(\\\"((pow(@0,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_1)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T1_Neg(\\\"-1*((pow(@0,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_1)\") \n self.modelBuilder.doVar(\"expr::g1V_2(\\\"\"+str(self.g1V[1])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T2(\\\"((@0*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_2)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T2_Neg(\\\"-1*((@0*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_2)\") \n self.modelBuilder.doVar(\"expr::g1V_3(\\\"\"+str(self.g1V[2])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T3(\\\"((pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_3)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T3_Neg(\\\"-1*((pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_3)\") \n self.modelBuilder.doVar(\"expr::g1V_4(\\\"\"+str(self.g1V[3])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T4(\\\"((@0*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_4)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T4_Neg(\\\"-1*((@0*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_4)\") \n self.modelBuilder.doVar(\"expr::g1V_5(\\\"\"+str(self.g1V[4])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T5(\\\"((pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_5)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T5_Neg(\\\"-1*((pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_5)\") \n self.modelBuilder.doVar(\"expr::g1V_6(\\\"\"+str(self.g1V[5])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T6(\\\"((@0*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_6)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T6_Neg(\\\"-1*((@0*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_6)\") \n self.modelBuilder.doVar(\"expr::g1V_7(\\\"\"+str(self.g1V[6])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T7(\\\"((pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_7)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T7_Neg(\\\"-1*((pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_7)\") \n self.modelBuilder.doVar(\"expr::g1V_8(\\\"\"+str(self.g1V[7])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T8(\\\"((@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_8)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T8_Neg(\\\"-1*((@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_8)\") \n self.modelBuilder.doVar(\"expr::g1V_9(\\\"\"+str(self.g1V[8])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T9(\\\"((@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_9)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T9_Neg(\\\"-1*((@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_9)\") \n self.modelBuilder.doVar(\"expr::g1V_10(\\\"\"+str(self.g1V[9])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T10(\\\"((@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_10)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T10_Neg(\\\"-1*((@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_10)\") \n \n self.modelBuilder.doSet(\"POI\",poi)", "def get_pars(model_info, use_demo=False):\n # Get the default values for the parameters\n pars = dict((p.name, p.default) for p in model_info['parameters'])\n\n # Fill in default values for the polydispersity parameters\n for p in model_info['parameters']:\n if p.type in ('volume', 'orientation'):\n pars[p.name+'_pd'] = 0.0\n pars[p.name+'_pd_n'] = 0\n pars[p.name+'_pd_nsigma'] = 3.0\n pars[p.name+'_pd_type'] = \"gaussian\"\n\n # Plug in values given in demo\n if use_demo:\n pars.update(model_info['demo'])\n return pars", "def experiment_params():\n exp = {\n 'lr': [1e-3],\n 'loss_function': ['cce'],\n 'optimizer': ['nadam'],\n 'dataset': [\n # 'curv_contour_length_9',\n 'curv_contour_length_14',\n # 'curv_baseline',\n ]\n }\n exp['data_augmentations'] = [\n [\n 'grayscale',\n 'left_right',\n 'up_down',\n 'uint8_rescale',\n 'singleton',\n 'resize',\n # 'per_image_standardization',\n 'zero_one'\n ]]\n exp['val_augmentations'] = exp['data_augmentations']\n exp['batch_size'] = 32 # Train/val batch size.\n exp['epochs'] = 16\n exp['exp_name'] = 'hgru_bn_pathfinder_14'\n exp['model_name'] = 'hgru'\n # exp['clip_gradients'] = 7.\n exp['save_weights'] = True\n exp['validation_iters'] = 1000\n exp['num_validation_evals'] = 50\n exp['shuffle_val'] = True # Shuffle val data.\n exp['shuffle_train'] = True\n return exp", "def get_params(self):\n return {\n \"nspecies\": self.nspecies,\n \"lmax\": self.lmax,\n \"nmax\": self.nmax,\n \"rcut\": self.rcut,\n \"sigma\": self.sigma,\n \"trans_width\": self.trans_width\n }", "def default_parameters():\n prm = Parameters('windkessel_model')\n\n prm.add('total_volume', float())\n\n prm.add('venous_compliance', float())\n prm.add('arterial_compliance', float())\n\n prm.add('venous_resistance', float())\n prm.add('arterial_resistance', float())\n prm.add('peripheral_resistance', float())\n\n prm.add('venous_resting_volume', float())\n prm.add('arterial_resting_volume', float())\n\n return prm", "def getDefaultParameterValues(self):\r\n dct = {}\r\n self.initializeRoadRunnerModel()\r\n self.roadrunnerModel.reset()\r\n for parameterName in self.parametersToFit:\r\n dct[parameterName] = self.roadrunnerModel.model[parameterName]\r\n return dct", "def get_model_params(self):\n w1 = self.w1\n b1 = self.b1\n w2 = self.w2\n b2 = self.b2\n w3 = self.w3\n b3 = self.b3\n w4 = self.w4\n b4 = self.b4\n w5 = self.w5\n b5 = self.b5\n w6 = self.w6\n b6 = self.b6\n\n return w1, b1, w2, b2, w3, b3, w4, b4, w5, b5, w6, b6", "def get_prob_params():\n prob = Namespace()\n prob.study_name = STUDY_NAME\n if IS_DEBUG:\n prob.num_trials = 3\n prob.max_capital = 10\n else:\n prob.num_trials = NUM_TRIALS\n prob.max_capital = MAX_CAPITAL\n # Common\n prob.time_distro = TIME_DISTRO\n prob.num_workers = NUM_WORKERS\n _study_params = {\n 'branin': ('synthetic/branin/config_mf.json',\n branin_mf, cost_branin_mf, 0.1, 0, 1),\n 'hartmann3_2': ('synthetic/hartmann3_2/config_mf.json',\n hartmann3_2_mf, cost_hartmann3_2_mf, 0.1, 0, 1),\n 'hartmann6_4': ('synthetic/hartmann6_4/config_mf.json',\n hartmann6_4_mf, cost_hartmann6_4_mf, 0.1, 0, 1),\n 'borehole_6': ('synthetic/borehole_6/config_mf.json',\n borehole_6_mf, cost_borehole_6_mf, 1, 0, 1),\n 'park2_4': ('synthetic/park2_4/config_mf.json',\n park2_4_mf, cost_park2_4_mf, 0.3, 0, 1),\n 'park2_3': ('synthetic/park2_3/config_mf.json',\n park2_3_mf, cost_park2_3_mf, 0.1, 0, 1),\n 'park1_3': ('synthetic/park1_3/config_mf.json',\n park1_3_mf, cost_park1_3_mf, 0.5, 0, 1),\n }\n (domain_config_file_suffix, raw_func, raw_fidel_cost_func, _fc_noise_scale,\n _initial_pool_size, _) = _study_params[prob.study_name]\n domain_config_file = os.path.join(DRAGONFLY_EXPERIMENTS_DIR, domain_config_file_suffix)\n # noisy\n prob.noisy_evals = NOISY_EVALS\n if NOISY_EVALS:\n noise_type = 'gauss'\n noise_scale = _fc_noise_scale\n else:\n noise_type = 'no_noise'\n noise_scale = None\n # Create domain, function_caller and worker_manager\n config = load_config_file(domain_config_file)\n func_caller = get_multifunction_caller_from_config(raw_func, config,\n raw_fidel_cost_func=raw_fidel_cost_func, noise_type=noise_type,\n noise_scale=noise_scale)\n # Set max_capital\n if hasattr(func_caller, 'fidel_cost_func'):\n prob.max_capital = prob.max_capital * \\\n func_caller.fidel_cost_func(func_caller.fidel_to_opt)\n else:\n prob.max_capital = prob.max_capital\n # Store everything in prob\n prob.func_caller = func_caller\n prob.worker_manager = SyntheticWorkerManager(prob.num_workers,\n time_distro='caller_eval_cost')\n prob.save_file_prefix = prob.study_name + ('-debug' if IS_DEBUG else '')\n prob.methods = METHODS\n prob.save_results_dir = SAVE_RESULTS_DIR\n prob.reporter = get_reporter('default')\n # evaluation options\n prob.evaluation_options = Namespace(prev_eval_points='none',\n initial_pool_size=_initial_pool_size)\n return prob", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"Afb[0.6,-0.7,0.7]\");\n self.modelBuilder.doVar(\"A0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doSet(\"POI\",\"Afb,A0\")\n\n # ss templates\n self.modelBuilder.doVar(\"R_ee_os_fakes[0.6,0.0,1.0]\");\n self.modelBuilder.doVar(\"ee16_fakes_norm[1.0, 0.01, 10.]\");\n self.modelBuilder.doVar(\"ee17_fakes_norm[1.0, 0.01, 10.]\");\n self.modelBuilder.doVar(\"ee18_fakes_norm[1.0, 0.01, 10.]\");\n #Remember, cant use spaces in these formulas!\n #self.modelBuilder.options.verbose = 10\n self.modelBuilder.factory_('expr::R_ee16_qcd_os(\"@0*@1\",ee16_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee17_qcd_os(\"@0*@1\",ee17_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee18_qcd_os(\"@0*@1\",ee18_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee16_qcd_ss(\"@0*(1.0-@1)\",ee16_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee17_qcd_ss(\"@0*(1.0-@1)\",ee17_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee18_qcd_ss(\"@0*(1.0-@1)\",ee18_fakes_norm,R_ee_os_fakes)')\n \n self.modelBuilder.factory_('expr::Alph(\"2.0*@0/(2.0-@0)\",A0)')\n self.modelBuilder.factory_('expr::Norm(\"3.0/4.0/(2.0+@0)\",Alph)')\n self.modelBuilder.factory_('expr::RAlph(\"@0*@1\",Alph,Norm)')\n self.modelBuilder.factory_('expr::Rpl(\"(@0+@1)\",Norm,Afb)')\n self.modelBuilder.factory_('expr::Rmn(\"(@0-@1)\",Norm,Afb)')", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"eAfb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"eA0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doVar(\"rAfb[1.0,-5.0, 5.0]\");\n self.modelBuilder.doVar(\"rA0[1.0, -5.0, 5.0]\");\n self.modelBuilder.doSet(\"POI\",\"rAfb,rA0\")\n self.modelBuilder.factory_('expr::mAfb(\"@0*@1\",eAfb,rAfb)')\n self.modelBuilder.factory_('expr::mA0(\"(@0*@1)\",eA0,rA0)')\n\n \n self.modelBuilder.factory_('expr::eAlph(\"2.0*@0/(2.0-@0)\",eA0)')\n self.modelBuilder.factory_('expr::eNorm(\"3.0/4.0/(2.0+@0)\",eAlph)')\n self.modelBuilder.factory_('expr::eRAlph(\"@0*@1\",eAlph,eNorm)')\n self.modelBuilder.factory_('expr::eRpl(\"(@0+@1)\",eNorm,eAfb)')\n self.modelBuilder.factory_('expr::eRmn(\"(@0-@1)\",eNorm,eAfb)')\n\n self.modelBuilder.factory_('expr::mAlph(\"2.0*@0/(2.0-@0)\",mA0)')\n self.modelBuilder.factory_('expr::mNorm(\"3.0/4.0/(2.0+@0)\",mAlph)')\n self.modelBuilder.factory_('expr::mRAlph(\"@0*@1\",mAlph,mNorm)')\n self.modelBuilder.factory_('expr::mRpl(\"(@0+@1)\",mNorm,mAfb)')\n self.modelBuilder.factory_('expr::mRmn(\"(@0-@1)\",mNorm,mAfb)')", "def get_parameters(self):\n d = Algorithm.get_parameters(self)\n d.update({\n 'M': d.pop('population_size', self.population_size),\n 'num_tests': self.num_tests,\n 'num_searches': self.num_searches,\n 'num_searches_best': self.num_searches_best,\n 'bonus1': self.bonus1,\n 'bonus2': self.bonus2,\n 'num_enabled': self.num_enabled,\n 'local_searches': self.local_searches\n })\n return d", "def parameters(self):\n res = dict()\n res[\"population_size\"] = self.population_size\n res[\"mutation_prob\"] = self.mutation_prob\n res[\"crossover\"] = self.crossover\n res[\"selection\"] = self.selection\n res[\"sigma\"] = self.sigma\n res[\"crossover_method\"] = self.crossover_method\n res[\"selection_method\"] = self.selection_method\n res[\"best_rate\"] = self.best_rate\n res[\"n_parents\"] = self.n_parents\n res[\"model_parameters\"] = self.model.total_parameters()\n res[\"IDCT_from\"] = self.IDCT_from\n res[\"elitism\"] = self.elitism\n return res", "def densenet_params(model_name):\n params_dict = {\n # Coefficients: growth_rate, num_init_features, res\n 'densenet121': (32, 64, 224),\n 'densenet161': (48, 96, 224),\n 'densenet169': (32, 64, 224),\n 'densenet201': (32, 64, 224),\n }\n return params_dict[model_name]", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"Rdy[1.,0.0,10.0]\");\n self.modelBuilder.doVar(\"Rbk[1.,0.0,10.0]\");\n self.modelBuilder.doVar(\"Rqcd_emu[1,0.0,10.0]\");\n self.modelBuilder.doSet(\"POI\",\"Rbk,Rdy,Rqcd_emu\")", "def setup_parameters(self):\n structure = self.ctx.structure_initial_primitive\n ecutwfc = []\n ecutrho = []\n\n for kind in structure.get_kind_names():\n try:\n dual = self.ctx.protocol['pseudo_data'][kind]['dual']\n cutoff = self.ctx.protocol['pseudo_data'][kind]['cutoff']\n cutrho = dual * cutoff\n ecutwfc.append(cutoff)\n ecutrho.append(cutrho)\n except KeyError as exception:\n self.abort_nowait('failed to retrieve the cutoff or dual factor for {}'.format(kind))\n\n natoms = len(structure.sites)\n conv_thr = self.ctx.protocol['convergence_threshold'] * natoms\n\n self.ctx.inputs['parameters'] = {\n 'CONTROL': {\n 'restart_mode': 'from_scratch',\n 'tstress': self.ctx.protocol['tstress'],\n },\n 'SYSTEM': {\n 'ecutwfc': max(ecutwfc),\n 'ecutrho': max(ecutrho),\n 'smearing': self.ctx.protocol['smearing'],\n 'degauss': self.ctx.protocol['degauss'],\n 'occupations': self.ctx.protocol['occupations'],\n },\n 'ELECTRONS': {\n 'conv_thr': conv_thr,\n }\n }", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"eAfb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"eA0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doVar(\"dAfb[0.,-0.75,0.75]\");\n self.modelBuilder.doVar(\"dA0[0.0, -1.0, 1.0]\");\n #self.modelBuilder.doSet(\"POI\",\"dAfb,dA0\")\n self.modelBuilder.doSet(\"POI\",\"dAfb\")\n self.modelBuilder.factory_('expr::mAfb(\"@0+@1\",eAfb,dAfb)')\n self.modelBuilder.factory_('expr::mA0(\"(@0+@1)\",eA0,dA0)')\n\n \n self.modelBuilder.factory_('expr::eAlph(\"2.0*@0/(2.0-@0)\",eA0)')\n self.modelBuilder.factory_('expr::eNorm(\"3.0/4.0/(2.0+@0)\",eAlph)')\n self.modelBuilder.factory_('expr::eRAlph(\"@0*@1\",eAlph,eNorm)')\n self.modelBuilder.factory_('expr::eRpl(\"(@0+@1)\",eNorm,eAfb)')\n self.modelBuilder.factory_('expr::eRmn(\"(@0-@1)\",eNorm,eAfb)')\n\n self.modelBuilder.factory_('expr::mAlph(\"2.0*@0/(2.0-@0)\",mA0)')\n self.modelBuilder.factory_('expr::mNorm(\"3.0/4.0/(2.0+@0)\",mAlph)')\n self.modelBuilder.factory_('expr::mRAlph(\"@0*@1\",mAlph,mNorm)')\n self.modelBuilder.factory_('expr::mRpl(\"(@0+@1)\",mNorm,mAfb)')\n self.modelBuilder.factory_('expr::mRmn(\"(@0-@1)\",mNorm,mAfb)')", "def get_model_params(model_name, override_params):\n if model_name.startswith('densenet'):\n g, n, s = densenet_params(model_name)\n blocks_args, global_params = densenet(\n model_name=model_name, growth_rate=g, num_init_features=n, image_size=s)\n else:\n raise NotImplementedError('model name is not pre-defined: %s' % model_name)\n if override_params:\n # ValueError will be raised here if override_params has fields not included in global_params.\n global_params = global_params._replace(**override_params)\n return list(blocks_args), global_params", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"eAfb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"eA0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doVar(\"mAfb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"mA0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doSet(\"POI\",\"eAfb,mAfb\")\n\n \n self.modelBuilder.factory_('expr::eAlph(\"2.0*@0/(2.0-@0)\",eA0)')\n self.modelBuilder.factory_('expr::eNorm(\"3.0/4.0/(2.0+@0)\",eAlph)')\n self.modelBuilder.factory_('expr::eRAlph(\"@0*@1\",eAlph,eNorm)')\n self.modelBuilder.factory_('expr::eRpl(\"(@0+@1)\",eNorm,eAfb)')\n self.modelBuilder.factory_('expr::eRmn(\"(@0-@1)\",eNorm,eAfb)')\n\n self.modelBuilder.factory_('expr::mAlph(\"2.0*@0/(2.0-@0)\",mA0)')\n self.modelBuilder.factory_('expr::mNorm(\"3.0/4.0/(2.0+@0)\",mAlph)')\n self.modelBuilder.factory_('expr::mRAlph(\"@0*@1\",mAlph,mNorm)')\n self.modelBuilder.factory_('expr::mRpl(\"(@0+@1)\",mNorm,mAfb)')\n self.modelBuilder.factory_('expr::mRmn(\"(@0-@1)\",mNorm,mAfb)')", "def get_model_params(self):\n return self._model_params", "def getParams(self):\n\n\t\tparams = {\"Nparticles\":self.__Nparticles,\"Nkicks\":self.__Nkicks,\"kappa\":self.__kappa, \"eta\":self.__eta,\"gamma\":self.__gamma, \"omega\":self.__omega,\n\t\t\"Kbt\":self.__Kbt, \"tk\":self.__tk}\n\n\t\treturn params", "def get_prunable_parameters(\r\n model: Module,\r\n only_matrices: bool = True,\r\n # TODO: Replace exclude with a filter function per model.\r\n exclude: List[str] = [\"embed\", \"decoder.\", \"pooler.\", \"shared.\"],\r\n) -> List[Parameter]:\r\n # names = [name for name, _ in model.named_parameters()]\r\n # import pdb; pdb.set_trace()\r\n return [\r\n param\r\n for name, param in model.named_parameters()\r\n if (\r\n (not only_matrices or len(param.size()) == 2)\r\n and not any(substr in name for substr in exclude)\r\n )\r\n ]" ]
[ "0.7432524", "0.680208", "0.65654784", "0.649133", "0.6464081", "0.64224565", "0.61998606", "0.6116676", "0.6029708", "0.6024388", "0.59877205", "0.59875476", "0.59871364", "0.59819573", "0.59542567", "0.5951514", "0.59463716", "0.5937351", "0.5936956", "0.59318024", "0.5910307", "0.5909747", "0.5907286", "0.5900464", "0.5884304", "0.5848801", "0.5837779", "0.5834596", "0.5815642", "0.581327" ]
0.7252054
1
Returns default optimised model parameters.
def get_default_model_params(self): model_params = { 'dropout_rate': 0.3, 'hidden_layer_size': 160, 'learning_rate': 0.01, 'minibatch_size': 64, 'max_gradient_norm': 0.01, 'num_heads': 1, 'stack_size': 1 } return model_params
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _default_params(self) -> Dict[str, Any]:\n normal_params = {\n \"temperature\": self.temperature,\n \"max_tokens\": self.max_tokens,\n \"top_p\": self.top_p,\n \"frequency_penalty\": self.frequency_penalty,\n \"presence_penalty\": self.presence_penalty,\n \"n\": self.n,\n # \"best_of\": self.best_of,\n \"request_timeout\": self.request_timeout,\n \"logit_bias\": self.logit_bias,\n }\n return {**normal_params, **self.model_kwargs}", "def _default_parameters():\n\n return {\n 'opt': 'adadelta',\n 'activation_function': 'softmax',\n 'lr': 0.0001,\n 'decay': 1e-6,\n 'loss': 'categorical_crossentropy',\n 'batch_size': 32,\n 'nb_epoch': 20,\n 'shuffle': True,\n 'momentum': 0.9,\n 'nesterov': True,\n 'rho': 0.95,\n 'epsilon': 1e-08,\n 'beta_1': 0.9,\n 'beta_2': 0.999,\n 'horizontal_flip': False,\n 'im_size': 240,#256,\n 'dense_layer': 1024,\n 'nb_classes': 10,\n 'nb_channels': 3,\n 'dropout': 0.5,\n 'metrics': ['accuracy'],\n 'volume': None,\n 'input_size': 25,\n 'temporal': False,\n 'input_dim': 512,\n 'nb_frames': 60,\n 'stride': 16,\n 'nb_hidden':512,\n 'lstm': False\n\n }", "def getDefaultParameterValues(self):\r\n dct = {}\r\n self.initializeRoadRunnerModel()\r\n self.roadrunnerModel.reset()\r\n for parameterName in self.parametersToFit:\r\n dct[parameterName] = self.roadrunnerModel.model[parameterName]\r\n return dct", "def default_optimization_hparams() -> Dict[str, Any]:\n return {\n \"optimizer\": {\n \"type\": \"Adam\",\n \"kwargs\": {\n \"lr\": 0.001\n }\n },\n \"learning_rate_decay\": {\n \"type\": \"\",\n \"kwargs\": {}\n },\n \"gradient_clip\": {\n \"type\": \"\",\n \"kwargs\": {}\n },\n \"gradient_noise_scale\": None,\n # TODO(zhiting): allow module-level control of gradient_multipliers\n \"name\": None\n }", "def default_parameters():\n prm = Parameters('windkessel_model')\n\n prm.add('total_volume', float())\n\n prm.add('venous_compliance', float())\n prm.add('arterial_compliance', float())\n\n prm.add('venous_resistance', float())\n prm.add('arterial_resistance', float())\n prm.add('peripheral_resistance', float())\n\n prm.add('venous_resting_volume', float())\n prm.add('arterial_resting_volume', float())\n\n return prm", "def get_optimization_parameters(self):\n pass", "def parameters(self):\n return self._default_params", "def _default_params(self) -> dict[str, Any]:\n return {\n \"max_tokens\": self.max_tokens,\n \"temperature\": self.temperature,\n \"top_p\": self.top_p,\n \"logprobs\": self.logprobs,\n \"echo\": self.echo,\n \"stop_sequences\": self.stop_sequences,\n \"repeat_penalty\": self.repeat_penalty,\n \"top_k\": self.top_k,\n \"n_threads\": self.n_threads,\n \"n_ctx\": self.n_ctx,\n \"n_gpu_layers\": self.n_gpu_layers,\n \"n_gqa\": self.n_gqa if self.n_gqa else None,\n \"n_parts\": self.n_parts,\n \"seed\": self.seed,\n \"f16_kv\": self.f16_kv,\n \"logits_all\": self.logits_all,\n \"vocab_only\": self.vocab_only,\n \"use_mlock\": self.use_mlock,\n \"n_batch\": self.n_batch,\n \"last_n_tokens_size\": self.last_n_tokens_size,\n \"streaming\": self.streaming,\n }", "def default_params():\n params = {}\n params['dataset'] = 'adult'\n params['engines'] = ['MD','RDA']\n params['iters'] = 10000\n params['epsilon'] = 1.0\n params['delta'] = 0.0\n params['bounded'] = True\n params['frequency'] = 1\n params['seed'] = 0\n params['save'] = None\n params['load'] = None\n params['plot'] = None\n\n return params", "def optimize_params(self, qnodes=None):\n #logger.debug(\"optimize_params of baseclass --> no optimization available!!!\")\n return {}", "def optimize_parameters(self):\n pass", "def optimize_parameters(self):\n pass", "def optimize_parameters(self):\n pass", "def parameters_default(cls):\n return cls._Parameters.__new__.__defaults__", "def _default_parameters(cls) -> Options:\n params = super()._default_parameters()\n params.main_axes = None\n params.i_means = None\n params.q_means = None\n params.scales = None\n\n return params", "def get_parameters(self):\n params = {}\n for p in self.DEFAULT_VALUES.keys():\n params[p] = getattr(self, p)\n return params", "def get_default_params() -> Dict:\n default_params = {\n \"n_estimators\": {\n \"default_value\": 100,\n \"description\": \"Number of gradient boosted trees. \"\n \"Equivalent to number of boosting rounds.\",\n \"type\": \"int\"\n },\n \"max_depth\": {\n \"default_value\": 6,\n \"description\": \"Maximum tree depth for base learners.\",\n \"type\": \"int\"\n },\n \"learning_rate\": {\n \"default_value\": 0.3,\n \"description\": \"Boosting learning rate (xgb's 'eta')\",\n \"type\": \"float\"\n },\n \"verbosity\": {\n \"default_value\": 1,\n \"description\": \"The degree of verbosity. Valid values are 0 (silent) - 3 (debug).\",\n \"type\": [0, 1, 2, 3]\n },\n \"booster\": {\n \"default_value\": \"gbtree\",\n \"description\": \"Specify which booster to use: gbtree, gblinear or dart.\",\n \"type\": ['gbtree', 'gblinear', 'dart']\n },\n \"tree_method\": {\n \"default_value\": \"auto\",\n \"description\":\n '''\n Specify which tree method to use. Default to auto. If this parameter\n is set to default, XGBoost will choose the most conservative option\n available. It's recommended to study this option from parameters\n document.\n ''',\n \"type\": [\"auto\", \"exact\", \"approx\", \"hist\", \"gpu_hist\"]\n },\n \"n_jobs\": {\n \"default_value\": 1,\n \"description\": '''\n Number of parallel threads used to run xgboost. When used with other Scikit-Learn\n algorithms like grid search, you may choose which algorithm to parallelize and\n balance the threads. Creating thread contention will significantly slow dowm both\n algorithms.\n ''',\n \"type\": \"int\"\n },\n \"gamma\": {\n \"default_value\": 0.0,\n \"description\": \"Minimum loss reduction required to make a further \"\n \"partition on a leaf node of the tree.\",\n \"type\": \"float\"\n },\n \"min_child_weight\": {\n \"default_value\": 1.0,\n \"description\": \"Minimum loss reduction required to make a further \"\n \"partition on a leaf node of the tree.\",\n \"type\": \"float\"\n },\n \"max_delta_step\": {\n \"default_value\": 0.0,\n \"description\": \"Maximum delta step we allow each tree's weight estimation to be.\",\n \"type\": \"float\"\n },\n \"subsample\": {\n \"default_value\": 1.0,\n \"description\": \"Subsample ratio of the training instance.\",\n \"type\": \"float\"\n },\n \"colsample_bytree\": {\n \"default_value\": 1.0,\n \"description\": \"Subsample ratio of columns when constructing each tree.\",\n \"type\": \"float\"\n },\n \"colsample_bylevel\": {\n \"default_value\": 1.0,\n \"description\": \"Subsample ratio of columns for each level.\",\n \"type\": \"float\"\n },\n \"colsample_bynode\": {\n \"default_value\": 1.0,\n \"description\": \"Subsample ratio of columns for each split.\",\n \"type\": \"float\"\n },\n \"reg_alpha\": {\n \"default_value\": 0.0,\n \"description\": \"L1 regularization term on weights\",\n \"type\": \"float\"\n },\n \"reg_lambda\": {\n \"default_value\": 0.0,\n \"description\": \"L2 regularization term on weights\",\n \"type\": \"float\"\n },\n \"scale_pos_weight\": {\n \"default_value\": 1.0,\n \"description\": \"Balancing of positive and negative weights.\",\n \"type\": \"float\"\n },\n \"random_state\": {\n \"default_value\": 0,\n \"description\": \"Random number seed.\",\n \"type\": \"int\"\n },\n \"base_score\": {\n \"default_value\": 0.5,\n \"description\": \"The initial prediction score of all instances, global bias.\",\n \"type\": \"float\"\n },\n # \"missing\": {\n # \"default_value\": None,\n # \"description\": \"Value in the data which needs to be present as a missing value.\",\n # \"type\": \"float\"\n # },\n \"num_parallel_tree\": {\n \"default_value\": 1,\n \"description\": \"Used for boosting random forest.\",\n \"type\": \"int\"\n },\n # \"monotone_constraints\": {\n # \"default_value\": \"(0,0)\",\n # \"description\": \" Constraint of variable monotonicity. \"\n # \"See tutorial for more information.\",\n # \"type\": \"str\"\n # },\n # \"interaction_constraints\": {\n # \"default_value\": None,\n # \"description\": '''\n # Constraints for interaction representing permitted interactions. The\n # constraints must be specified in the form of a nest list, e.g. [[0, 1],\n # [2, 3, 4]], where each inner list is a group of indices of features\n # that are allowed to interact with each other. See tutorial for more\n # information\n # ''',\n # \"type\": \"str\"\n # },\n \"importance_type\": {\n \"default_value\": \"gain\",\n \"description\": '''\n The feature importance type for the feature_importances. property:\n either \"gain\", \"weight\", \"cover\", \"total_gain\" or \"total_cover\".\n ''',\n \"type\": [\"gain\", \"weight\", \"cover\", \"total_gain\", \"total_cover\"]\n }\n }\n\n return default_params", "def get_default_hparams():\n hparams_map = base_model.get_default_hparams().values()\n hparams_map.update({\n 'conditional': True,\n 'dec_rnn_size': [512], # Decoder RNN: number of units per layer.\n 'dec_rnn_attn_len': 0, # Decoder RNN: length of attention vector.\n 'enc_rnn_size': [256], # Encoder RNN: number of units per layer per dir.\n 'dropout_keep_prob': 1.0, # Probability all dropout keep.\n 'sampling_schedule': 'constant', # constant, exponential, inverse_sigmoid\n 'sampling_rate': 0.0, # Interpretation is based on `sampling_schedule`.\n })\n return tf.contrib.training.HParams(**hparams_map)", "def get_model_params(model_name, override_params):\n if model_name.startswith('efficientnet'):\n w, d, s, p = efficientnet_params(model_name)\n # note: all models have drop connect rate = 0.2\n blocks_args, global_params = efficientnet(\n width_coefficient=w, depth_coefficient=d, dropout_rate=p, image_size=s)\n else:\n raise NotImplementedError('model name is not pre-defined: %s' % model_name)\n if override_params:\n # ValueError will be raised here if override_params has fields not included in global_params.\n global_params = global_params._replace(**override_params)\n return blocks_args, global_params", "def get_model_params(model_name, override_params):\n if model_name.startswith('efficientnet'):\n w, d, s, p = efficientnet_params(model_name)\n # note: all models have drop connect rate = 0.2\n blocks_args, global_params = efficientnet(\n width_coefficient=w, depth_coefficient=d, dropout_rate=p, image_size=s)\n else:\n raise NotImplementedError('model name is not pre-defined: %s' % model_name)\n if override_params:\n # ValueError will be raised here if override_params has fields not included in global_params.\n global_params = global_params._replace(**override_params)\n return blocks_args, global_params", "def __init__(self, defaults={}, data=None):\n\n super().__init__(\n defaults={**OptimizationParameters.parameters, **defaults}, data=data\n )", "def get_params(self):\n return {'threshold': self.threshold,\n 'subsample': self.subsample,\n 'estimator': self.estimator,\n 'n_folds': self.n_folds,\n 'stratify': self.stratify,\n 'random_state': self.random_state,\n 'n_jobs': self.n_jobs}", "def default_parameters(self) -> List[Parameter]:\n return self.settings.job_default_parameters", "def default_training_params():\n N_EPOCHS = 100\n BATCH_SIZE = 64\n EPSILON = 0.0001\n return N_EPOCHS, BATCH_SIZE, EPSILON", "def default_parameters(name):\n prm = Parameters(name)\n\n prm.add('venous_compliance', float())\n prm.add('arterial_compliance', float())\n\n prm.add('venous_resistance', float())\n prm.add('arterial_resistance', float())\n prm.add('peripheral_resistance', float())\n\n prm.add('venous_resting_volume', float())\n prm.add('arterial_resting_volume', float())\n\n return prm", "def default_parameters(name):\n prm = Parameters(name)\n\n prm.add('total_volume', 5000.0) # Not important for non-closed loop. Included for compatibility.\n\n prm.add('venous_pressure', float())\n\n prm.add('arterial_compliance', float())\n\n prm.add('venous_resistance', float())\n prm.add('arterial_resistance', float())\n prm.add('peripheral_resistance', float())\n\n return prm", "def default_parameters():\n prm = Parameters('lvad_model')\n\n prm.add('lvad_volume', 66.0)\n\n prm.add('alpha_slope', 0.0091)\n prm.add('alpha_intercept', 1.4)\n\n prm.add('beta_slope', -0.19)\n prm.add('beta_intercept', -1.9)\n\n prm.add('frequency', float())\n\n return prm", "def get_fixed_params(self):\n\n fixed_params = {\n 'total_time_steps': 40,\n 'num_encoder_steps': 39,\n 'num_epochs': 100,\n 'early_stopping_patience': 10,\n 'multiprocessing_workers': 2,\n }\n\n return fixed_params", "def get_base_parameters(cls):\n return {\n \"cutoff\": None,\n \"method\": None\n }", "def params():\n return utils.Params('../experiments/base-model/params.json')" ]
[ "0.786182", "0.7648742", "0.74824107", "0.74050725", "0.7169978", "0.7132334", "0.71250355", "0.712289", "0.7111139", "0.69280326", "0.6880837", "0.6880837", "0.6880837", "0.6879771", "0.68396217", "0.67895633", "0.6772952", "0.67317784", "0.6703785", "0.6703785", "0.6691981", "0.6664467", "0.6653366", "0.66215324", "0.66021466", "0.66005135", "0.65533924", "0.6549365", "0.6547869", "0.6543906" ]
0.81408334
0
sum of all the values from one prop of fund daily report, of coures many of the props make no sense to sum
def tot(self, prop="基金现值", date=yesterdayobj()): res = 0 for fund in self.fundtradeobj: res += fund.dailyreport(date).iloc[0][prop] return res
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def SumaryPresupuesto(vj):\n\n sumaUSD = sumaCUC = totalUSD = totalCUC = 0.0\n\n for row in vj.tbPresupesto.rows.values():\n cambio = row.cambio\n moneda = row.moneda\n value = row.value\n\n if moneda == MD.Usd:\n sumaUSD += value\n totalUSD += value\n totalCUC += ( value * cambio )\n else:\n sumaCUC += value\n totalCUC += value\n totalUSD += ( value / cambio )\n\n vj.PresupCuc = totalCUC\n if totalUSD>0 and totalCUC>0: \n vj.MD.SetChange( totalCUC/totalUSD, MD.Usd, MD.Cuc )", "def sum_values(self):\n raise NotImplementedError", "def _get_sum_total(\n self, cr, uid, brw, operand, number_month=None,\n one_per=False, bag=None, context=None):\n context = context and dict(context) or {}\n res = 0\n\n # If the report is two or twelve columns, will choose the field needed\n # to make the sum\n if context.get('whole_fy', False) or one_per:\n field_name = 'ytd'\n else:\n field_name = 'period_%s' % str(number_month)\n\n # It takes the sum of the total_ids & operand_ids\n for ttt in getattr(brw, operand):\n res += bag[ttt.id].get(field_name, 0.0)\n return res", "def sum (self):\n return self.values.sum ()", "def sum (self):\n return self.values.sum ()", "def sum(self):\n return sum(self.values)", "def sum(self):\n return sum(self._values.values())", "def total(evictiondata):\r\n total = 0\r\n for index, row in evictiondata.iterrows():\r\n total += row['filings_2020']", "def total(proportions):\n final = {}\n for i in proportions:\n if i in running_total:\n final[i] = proportions[i] * running_total[i]\n print(final)\n else:\n final[i] = 0\n print(final)\n\n total_sum = sum(final.values())\n return total_sum", "def _sum(self):\n s = 0\n for element, value in self.items():\n s += value\n return s", "def sum(self):\n return sum(self.items())", "def _total_d(self):\n debit = 0.0\n for l in self.data:\n debit += l['debit']\n self.t_credit += l['credit']\n self.t_balance += l['balance']\n return debit", "def patrimony_total(self):\n pass", "def total(self):\n return sum(self.d.values())", "def sum(self):\n # skipna == True\n # only_numerical == True\n # skipna == True\n return self._lift(\"sum\")", "def sum(self):\n return self._reduce_for_stat_function(F.sum, only_numeric=True)", "def total_present_value_rule(_m):\r\n\r\n return sum(m.DELTA[y] * (m.INV[y] + m.FOM[y] + m.OP[y]) for y in m.Y) + m.EOH", "def total_wc(d):\n return sum(d.values())", "def calculate_profit(self):", "def sum(self) -> float:\n return sum(self.values)", "def running_total(date_list):\n return sum(d.price for d in date_list)", "def sumAllValues(self,*toSkip):\n sum=0\n for counterKey in self.counters.keys():\n if not counterKey in toSkip: sum += self.counters[counterKey]\n # 026 #self.debug.mainLogger.debug(\"Sumation of all counters finished with result %i.\"%(sum))\n return sum", "def _amount_all(self, cr, uid, ids,field_name, arg, context={}):\n res={}\n for record in self.browse(cr, uid, ids, context=context):\n val = 0.0\n for line in record.enrich_lines:\n if line.state == 'done' :\n val += line.cost\n res[record.id] = {\n 'paid_amount':val,\n 'residual_amount':record.amount - val,\n }\n return res", "def total(self):\n total = sum(self.d.values())\n return total", "def sum(self):\n return self._summarize(lambda c: c.sum)", "def calcularTotal(self):\n subtotales=[]\n for row in range(0,self.tableFactura.rowCount()):\n subtotales.append(float(self.tableFactura.item(row,2).text()))\n importeTotal=sum(subtotales)\n return importeTotal", "def total(self, desired_period: int = 12):\n self._trigger_gather()\n result = Decimal(0)\n for item in self.elements:\n result += item.income.amount(desired_period)\n return(Decimal(result))", "def total(self, desired_period: int = 12):\n self._trigger_gather()\n result = Decimal(0)\n for item in self.elements:\n result += item.income.amount(desired_period)\n return(Decimal(result))", "def calculate_sum_of_all_attributes(self):\n\n sum = 0\n\n for key, val in self.__dict__.items():\n\n if isinstance(val, (int, float)):\n sum += val\n\n return sum", "def sum(self):\n return self.vsum" ]
[ "0.6597488", "0.64506793", "0.64362556", "0.62868077", "0.62868077", "0.6221355", "0.61697733", "0.615913", "0.6102456", "0.6087261", "0.6078752", "0.6066523", "0.60638386", "0.6062406", "0.6061726", "0.60488385", "0.6031776", "0.60287493", "0.59733987", "0.5963322", "0.59592813", "0.5923061", "0.5902652", "0.58878934", "0.58853173", "0.58672684", "0.5824241", "0.5824241", "0.58189267", "0.58023936" ]
0.73464024
0
merge the different cftable for different funds into one table
def _mergecftb(self): dtlist = [] for fund in self.fundtradeobj: dtlist2 = [] for _, row in fund.cftable.iterrows(): dtlist2.append((row["date"], row["cash"])) dtlist.extend(dtlist2) nndtlist = set([item[0] for item in dtlist]) nndtlist = sorted(list(nndtlist), key=lambda x: x) reslist = [] for date in nndtlist: reslist.append(sum([item[1] for item in dtlist if item[0] == date])) df = pd.DataFrame(data={"date": nndtlist, "cash": reslist}) df = df[df["cash"] != 0] df = df.reset_index(drop=True) return df
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def combine_tables():\n ranked = unicef_data()\n cpi_table = cpi_data()\n cpi_and_cl = cpi_table.join(ranked, 'Country / Territory',\n 'Countries and areas', inner=True)\n return cpi_and_cl", "def transferfunds(self):", "def merge_table(t1, t2):\r\n input1 = pd.merge(t1, t2, on=\"zip_code\", how=\"inner\")\r\n covid_zip = gpd.GeoDataFrame(input1)\r\n\r\n #change column name\r\n covid_zip.columns = [\"zip_code\", \"covid_cases\", \"time\", \"geometry\"]\r\n return covid_zip", "def transfers_dataframe(tables_list):\r\n return pd.concat([pd.DataFrame(table[1:], columns=table[0]) for table in tables_list])", "def bef_ft(self):\n # Join #\n df = self.parent.pool_indicators\n # Sum for everyone #\n cols_sum = {'sw_merch' : 'sum',\n 'sw_foliage': 'sum',\n 'sw_other' : 'sum',\n 'hw_merch' : 'sum',\n 'hw_foliage': 'sum',\n 'hw_other' : 'sum',\n 'sw_coarse' : 'sum',\n 'sw_fine' : 'sum',\n 'hw_coarse' : 'sum',\n 'hw_fine' : 'sum'}\n # Group and aggregate #\n df = df.groupby(\"forest_type\").agg(cols_sum).reset_index()\n # Make new columns #\n df['tot_merch'] = df.sw_merch + df.hw_merch\n df['tot_abg'] = df.sw_merch + df.hw_merch + \\\n df.sw_foliage + df.hw_foliage + \\\n df.hw_other + df.sw_other\n df['bg_biomass'] = df.sw_coarse + df.sw_fine + \\\n df.hw_coarse + df.hw_fine\n # Calculate the biomass expansion factor\n # Ratio of (total above and below ground) / total above ground\n df['bef_tot'] = (df.tot_abg + df.bg_biomass) / df.tot_abg\n # Return #\n return df", "def mergeData(x,recover,death):\n x = x.rename(columns = {'latest':'confirm'})\n x['recover'] = recover['latest']\n x['death'] = death['latest']\n\n return x", "def process_compustat(fund):\n # dataframe schema\n required = ['at', 'che', 'act', 'lct', 'lt', 'sale']\n defaults = ['dlc', 'dltt', 'ivao', 'ivst', 'oiadp', 'pstk'] # per sloan 2005\n non_zeros = ['at']\n keep = ['dwc', 'dnco', 'dnoa', 'dfin', 'tacc', 'tacc2', 'oa', 'dacy', 'dac1', 'dac2', 'dac3']\n\n total = fund.shape[0]\n print(f'Total rows: {total}')\n print(f'{fund.date.min()} to {fund.date.max()}')\n\n # unique permno, time idx\n fund['time_idx'] = fund.date + MonthEnd(0)\n fund = fund.sort_values(['permno', 'time_idx'])\n fund = fund.groupby(['permno', 'time_idx'], as_index=False).last() # use the latest for each time idx\n\n # handle missing value\n fund.dropna(how='any', subset=required, inplace=True)\n fund = fund.fillna({col: 0 for col in defaults})\n print(f'Handle NAs: {fund.shape[0]} ({fund.shape[0] / total:.2%})')\n\n # force non-zero on specified columns\n print('Check zeros')\n for col in non_zeros:\n zero = (fund[col] == 0).sum()\n print(f' {col} has zeros: {zero}')\n fund = fund[fund[col] != 0]\n print(f' Drop {col} zeros: {fund.shape[0]} ({fund.shape[0] / total:.2%})')\n\n fund = fund[fund.time_idx > '1970-01-01']\n print(f'After 1970: {fund.shape[0]} ({fund.shape[0] / total:.2%})')\n\n # ========== Before Join ==========\n # extended definition of accruals\n fund['coa'] = fund.act - fund.che\n fund['col'] = fund.lct - fund.dlc\n fund['wc'] = fund.coa - fund.col\n fund['ncoa'] = fund['at'] - fund.act - fund.ivao\n fund['ncol'] = fund['lt'] - fund.lct - fund.dltt\n fund['nco'] = fund.ncoa - fund.ncol\n fund['fina'] = fund.ivst + fund.ivao\n fund['finl'] = fund.dltt + fund.dlc + fund.pstk\n fund['fin'] = fund.fina - fund.finl\n\n # ========== Use sentinel ==========\n # to allow monthly record. not the most efficiency way. But trade time for accuracy\n start = time.time()\n sentinel = []\n whole_range = pd.date_range(fund.time_idx.min(), fund.time_idx.max(), freq='m')\n whole_range = pd.DataFrame({'date': whole_range}, index=whole_range)\n\n time_range = fund.groupby('permno').agg({'time_idx': ['min', 'max']})\n for permno, times in time_range['time_idx'].iterrows():\n dates = whole_range.loc[times['min']: times['max']].values.flatten()\n sentinel.append(pd.DataFrame({'time_idx': dates, 'permno': permno}))\n sentinel = pd.concat(sentinel, axis=0)\n print(time.time() - start, 's')\n\n fund = pd.merge(fund, sentinel, on=['time_idx', 'permno'], how='outer')\n fund = fund.set_index(['permno', 'time_idx']).sort_index()\n fund = fund.groupby(level=0).fillna(method='ffill')\n total = fund.shape[0]\n print(f'Expended rows: {total}')\n\n # ========== After Join ==========\n # operating accruals\n fund['dca'] = fund.act - lag(fund, 'act')\n fund['dcash'] = fund.che - lag(fund, 'che')\n fund['dcl'] = fund.lct - lag(fund, 'lct')\n fund['dstd'] = fund.dlc - lag(fund, 'dlc')\n fund['dtp'] = (fund.txp - lag(fund, 'txp')).fillna(0) # set to 0 if missing\n fund['oa'] = ((fund.dca - fund.dcash) - (fund.dcl - fund.dstd - fund.dtp) - fund.dp) / lag(fund, 'at')\n\n # DAC\n fund['avg_at'] = (fund['at'] + lag(fund, 'at')) / 2\n fund['dsale'] = fund.sale - lag(fund, 'sale')\n fund['drec'] = fund.rect - lag(fund, 'rect')\n fund['dacy'] = fund.oa / fund.avg_at\n fund['dac1'] = 1 / fund.avg_at\n fund['dac2'] = (fund.dsale - fund.drec) / fund.avg_at\n fund['dac3'] = fund.ppegt / fund.avg_at\n\n # extended defintion of accruals\n fund['dwc'] = (fund.wc - lag(fund, 'wc')) / fund.avg_at\n fund['dnco'] = (fund.nco - lag(fund, 'nco')) / fund.avg_at\n fund['dnoa'] = fund.dwc + fund.dnco\n fund['dfin'] = (fund.fin - lag(fund, 'fin')) / fund.avg_at\n fund['tacc'] = fund.dwc + fund.dnco + fund.dfin\n fund['tacc2'] = fund.dwc + fund.dnco - fund.dfin\n\n fund = fund[keep].dropna().reset_index()\n print(f'Final rows: {fund.shape[0]} ({fund.shape[0] / total:.2%})')\n return fund", "def combsummary(self, date=yesterdayobj()):\n date = convert_date(date)\n columns = [\n \"基金名称\",\n \"基金代码\",\n \"当日净值\",\n \"单位成本\",\n \"持有份额\",\n \"基金现值\",\n \"基金总申购\",\n \"历史最大占用\",\n \"基金持有成本\",\n \"基金分红与赎回\",\n \"换手率\",\n \"基金收益总额\",\n \"投资收益率\",\n ]\n summarydf = pd.DataFrame([], columns=columns)\n for fund in self.fundtradeobj:\n summarydf = summarydf.append(\n fund.dailyreport(date), ignore_index=True, sort=True\n )\n tname = \"总计\"\n tcode = \"total\"\n tunitvalue = float(\"NaN\")\n tunitcost = float(\"NaN\")\n tholdshare = float(\"NaN\")\n tcurrentvalue = summarydf[\"基金现值\"].sum()\n tpurchase = summarydf[\"基金总申购\"].sum()\n tbtnk = bottleneck(self.totcftable[self.totcftable[\"date\"] <= date])\n tcost = summarydf[\"基金持有成本\"].sum()\n toutput = summarydf[\"基金分红与赎回\"].sum()\n tturnover = turnoverrate(self.totcftable[self.totcftable[\"date\"] <= date], date)\n # 计算的是总系统作为整体和外界的换手率,而非系统各成分之间的换手率\n tearn = summarydf[\"基金收益总额\"].sum()\n trate = round(tearn / tbtnk * 100, 4)\n trow = pd.DataFrame(\n [\n [\n tname,\n tcode,\n tunitvalue,\n tunitcost,\n tholdshare,\n tcurrentvalue,\n tpurchase,\n tbtnk,\n tcost,\n toutput,\n tturnover,\n tearn,\n trate,\n ]\n ],\n columns=columns,\n )\n summarydf = summarydf.append(trow, ignore_index=True, sort=True)\n\n return summarydf[columns].sort_values(by=\"基金现值\", ascending=False)", "def main():\n\n bank_lookup = {\n 'Spending': bank_main,\n 'Income': bank_main,\n 'Saving': bank_savings,\n 'Credit card': bank_credit,\n }\n\n sheet_data = petl.fromxlsx('sample-data.xlsx', sheet='Data')\n data = sheet_data.cut(*range(5))\n early_data = data.select('Date', lambda r: r.month <= 2)\n\n for account, table in split_table(early_data, 'Account'):\n modified_table = bank_lookup[account](table)\n # modified_table.tocsv(table['Account'][0]+'.csv')\n print(modified_table)", "def merge(df):\n return (df['utterance_t-3'] + df['utterance_t-2'] + df['utterance_t-1'] \\\n + df['utterance_t'])", "def _merge_table_data(self, first_page):\n table = self._table_defs.get(first_page * self.page_size)\n parsed_header = TDEF_HEADER.parse(table)\n data = table[parsed_header.header_end:]\n while parsed_header.next_page_ptr:\n table = self._table_defs.get(parsed_header.next_page_ptr * self.page_size)\n parsed_header = TDEF_HEADER.parse(table)\n data = data + table[parsed_header.header_end:]\n return data", "def coalesce_tables(tables):\n ## For each table, we can:\n ## Process the names in the first row, as column names\n ## If we have a \"quantity\" column, convert this from euro style to float\n\n ## If the column names are the same, we can append one to the other.\n \n proc_tables = OrderedDict()\n most_recent_key = None\n for tn,t in enumerate(tables):\n for i, r in enumerate(t):\n ##print(f\"Table {tn}, Row number {i}\")\n col_accessors = [str(x) for x in range(len(r))]\n ## Get the processed row names\n if i == 0: \n cnames = {}\n for c in col_accessors:\n cnames[c] = r[c].lower().strip().replace(\" \", \"\")\n continue\n ## Now, cnames was defined from iteration i==0\n rec = {}\n for c in col_accessors:\n rec[cnames[c]] = r[c]\n\n fixweight = lambda x: float(x.replace(\",\", \".\"))\n \n \n if 'netweight' in rec.keys():\n if rec['netweight'] is not None:\n rec['netweight'] = fixweight(rec['netweight'])\n\n if rec['no.'] is not None:\n ## new record\n most_recent_key = rec['no.']\n proc_tables[most_recent_key] = rec\n else:\n ## append the description to previous\n if rec['description'] is not None:\n proc_tables[most_recent_key]['description'] = proc_tables[most_recent_key]['description'] + \" \" + rec['description']\n\n\n return(list(proc_tables.values()))", "def sub_transfer_pricing(manager, df_fields, seq_recs, seq_reservas):\n # df_hotel = manager.get_dataframe(tables[\"dwc_bok_t_canco_hotel\"])\n # df_circuit = manager.get_dataframe(tables[\"dwc_bok_t_canco_hotel_circuit\"])\n # df_other = manager.get_dataframe(tables[\"dwc_bok_t_canco_other\"])\n # df_transfer = manager.get_dataframe(tables[\"dwc_bok_t_canco_transfer\"])\n # df_endow = manager.get_dataframe(tables[\"dwc_bok_t_canco_endowments\"])\n # df_extra = manager.get_dataframe(tables[\"dwc_bok_t_canco_extra\"])\n\n df_aux = df_fields.select(\"operative_incoming\", \"booking_id\", \"invoicing_company\",\n \"creation_date\", \"booking_currency\")\n\n df_hotel = sub_transfer_pricing_aux(manager, df_hotelt, seq_recs, seq_reservas, df_aux)\n df_circuit = sub_transfer_pricing_aux(manager, df_circuitt, seq_recs, seq_reservas, df_aux)\n df_other = sub_transfer_pricing_aux(manager, df_othert, seq_recs, seq_reservas, df_aux)\n df_transfer = sub_transfer_pricing_aux(manager, df_transfert, seq_recs, seq_reservas, df_aux)\n df_endow = sub_transfer_pricing_aux(manager, df_endowt, seq_recs, seq_reservas, df_aux)\n df_extra = sub_transfer_pricing_aux_extra(manager, df_extrat, seq_recs, seq_reservas, df_aux)\n\n df_impuesto_canco = df_hotel.union(df_circuit).union(df_other).union(df_transfer).union(df_endow).union(\n df_extra)\n\n df_impuesto_canco = df_impuesto_canco.groupBy(\"operative_incoming\", \"booking_id\") \\\n .agg({'impuesto_canco': 'sum'}).withColumnRenamed(\"SUM(impuesto_canco)\", \"impuesto_canco\")\n\n df_impuesto_canco = df_impuesto_canco.withColumnRenamed(\"operative_incoming\", \"seq_rec\") \\\n .withColumnRenamed(\"booking_id\", \"seq_res\")\n\n # add impuesto_canco\n df_fields = df_fields.join(df_impuesto_canco, [df_fields.operative_incoming == df_impuesto_canco.seq_rec,\n df_fields.booking_id == df_impuesto_canco.seq_res],\n 'left_outer').drop(\"seq_rec\", \"seq_res\")\n\n df_addcanco = sub_transfer_pricing_aux_add_canco(manager, df_fields, seq_recs, seq_reservas)\n\n df_addcanco = df_addcanco.withColumnRenamed(\"operative_incoming\", \"seq_rec\") \\\n .withColumnRenamed(\"booking_id\", \"seq_res\")\n\n # add add_impuesto_canco\n df_fields = df_fields.join(df_addcanco, [df_fields.operative_incoming == df_addcanco.seq_rec,\n df_fields.booking_id == df_addcanco.seq_res],\n \"left_outer\").drop(df_addcanco.seq_rec).drop(df_addcanco.seq_res)\n\n df_fields = df_fields.na.fill({'impuesto_canco': 0, 'add_impuesto_canco': 0})\n\n df_fields = df_fields.withColumn(\"Transfer_pricing\", df_fields.impuesto_canco + df_fields.add_impuesto_canco) \\\n .drop(\"impuesto_canco\", \"add_impuesto_canco\")\n\n df_fields = df_fields.withColumn(\"Transfer_pricing\", udf_round_ccy(df_fields.Transfer_pricing,\n df_fields.booking_currency))\n\n del df_hotel, df_circuit, df_other, df_transfer, df_endow, df_extra, df_impuesto_canco, df_addcanco, df_aux\n\n return df_fields", "def create_fact_table(input_file, sheet_name=None):\n\n df = read_excel(input_file, sheet_name=sheet_name)\n for idx, row in df.iterrows():\n funding_source = session.query(FundingSource).filter(\n FundingSource.source == row['funding source']\n ).first()\n funding_agency = session.query(FundingAgency).filter(\n FundingAgency.name == row['funding agency']\n ).first()\n\n en_name = row['research title eng']\n th_name = row['research title thai']\n\n en_name = en_name.strip() if not isinstance(en_name, float) else None\n th_name = th_name.strip() if not isinstance(th_name, float) else None\n\n project = session.query(ResearchProject).filter(\n ResearchProject.title_th == th_name).first()\n if not project:\n project = session.query(ResearchProject).filter(\n ResearchProject.title_en == en_name).first()\n\n total_funding = row['amount fund']\n\n staff_email = row['main researcher email']\n staff_ = mis_session.query(StaffAccount).filter(StaffAccount.email == staff_email).first()\n if staff_:\n s = Staff(\n email=staff_.email,\n en_firstname=staff_.staff_personal_info.en_firstname,\n en_lastname=staff_.staff_personal_info.en_lastname,\n\n )\n session.add(s)\n session.commit()\n else:\n print('Staff not found.')\n\n\n startdate = row['start date']\n enddate = row['end date']\n\n if project and staff_:\n ft = FundingResearchFact(\n funding_source_id=funding_source.id,\n funding_agency_id=funding_agency.id,\n project_id=project.id,\n total_funding=total_funding,\n staff_id=s.id,\n startdate_id=int(startdate.strftime('%Y%m%d')),\n enddate_id=int(enddate.strftime('%Y%m%d')),\n )\n session.add(ft)\n session.commit()", "def get_usda_food_data (connection):\n\n tables = ['usda_food_access_feb2014', 'usda_food_assistance_feb2014',\n 'usda_food_health_feb2014', 'usda_food_insecurity_feb2014',\n 'usda_food_stores_feb2014']\n\n for table in tables:\n if table == tables[0]:\n sql_query = \"\"\"\nSELECT \"FIPS\",\n\"PCT_LACCESS_POP10\" AS \"low_access_food_pct10\",\n\"PCT_LACCESS_LOWI10\" AS \"low_access_food_low_inc_pct10\",\n\"PCT_LACCESS_SENIORS10\" AS \"low_access_food_snr_pct10\",\n\"PCT_LACCESS_HHNV10\" AS \"low_access_food_no_car_pct10\"\nFROM {0};\n \"\"\".format (table)\n # print (sql_query)\n elif table == tables[1]:\n sql_query = \"\"\"\nSELECT \"FIPS\",\n\"REDEMP_SNAPS12\" AS \"snap_redemp_per_store_2012\"\nFROM {0};\n \"\"\".format (table)\n # print (sql_query)\n elif table == tables[2]:\n sql_query = \"\"\"\nSELECT \"FIPS\",\n\"PCT_DIABETES_ADULTS10\" AS \"pct_diabetes_adults_2010\",\n\"PCT_OBESE_ADULTS13\" AS \"pct_obese_adults_2013\",\n\"RECFACPTH12\" AS \"rec_fac_2012\",\n\"NATAMEN\" AS \"ers_nat_amenity_index_1999\"\nFROM {0};\n \"\"\".format (table)\n # print (sql_query)\n elif table == tables[3]:\n sql_query = \"\"\"\nSELECT \"FIPS\",\n\"FOODINSEC_10_12\" AS \"food_insec_house_pct_10_12\",\n\"VLFOODSEC_10_12\" AS \"very_low_food_insec_house_pct_10_12\"\nFROM {0};\n \"\"\".format (table)\n # print (sql_query)\n elif table == tables[4]:\n sql_query = \"\"\"\nSELECT \"FIPS\",\n\"GROCPTH12\" AS \"grocery_pct10\"\nFROM {0};\n \"\"\".format (table)\n # print (sql_query)\n\n if table == tables[0]:\n data = pd.read_sql_query(sql_query, con)\n data.where ((pd.notnull (data)), other=np.nan, inplace=True)\n data = data.dropna (subset=['FIPS'])\n data['FIPS'] = data['FIPS'].apply (lambda x: str(x).zfill (5))\n else:\n data_tmp = pd.read_sql_query(sql_query, con)\n data_tmp.where ((pd.notnull (data_tmp)), other=np.nan, inplace=True)\n data_tmp = data_tmp.dropna (subset=['FIPS'])\n data_tmp['FIPS'] = data_tmp['FIPS'].apply (lambda x: str(x).zfill (5))\n data = pd.merge (data, data_tmp, on=\"FIPS\", how=\"left\")\n\n return (data)", "def join_data(df_order, df_stock, df_all):\n df_stock0 = df_stock.set_index('date')\n df_list = []\n for index, data in df_order.iterrows():\n df_both = df_all.query(\n '(option_code == %r | option_code == %r) & date >= %r & date <= %r' % (\n data['code0'], data['code1'], data['date0'], data['date1']\n )\n )[['date', 'option_code', 'dte', 'sell', 'buy', 'strike']]\n\n df0 = df_both.query('option_code == %r' % data['code0'])\n df1 = df_both.query('option_code == %r' % data['code1'])\n df_join = pd.merge(df0, df1, on='date', suffixes=(0, 1))\n\n if data['signal0'] == 'BUY':\n first = df_join['buy0'] - df_join['sell1']\n remain = df_join['sell0'] - df_join['buy1']\n df_join['signal'] = ['BUY'] + ['SELL'] * len(remain[1:])\n df_join['option'] = [first.iloc[0]] + list(remain[1:])\n else:\n first = -df_join['sell0'] + df_join['buy1']\n remain = -df_join['buy0'] + df_join['sell1']\n df_join['signal'] = ['SELL'] + ['BUY'] * len(remain[1:])\n df_join['option'] = [first.iloc[0]] + list(remain[1:])\n\n df_join['pos_net'] = df_join['option'] - df_join['option'].iloc[0]\n df_join['pct_chg'] = df_join['pos_net'] / np.abs(df_join['option'].iloc[0]) + 0\n\n df_join = df_join.drop([\n 'sell0', 'buy0', 'sell1', 'buy1', 'strike0', 'strike1', 'dte1'\n ], axis=1)\n df_close = df_stock0[data['date0']:data['date1']]\n df_close = df_close[['close']].reset_index()\n df_both = pd.merge(df_join, df_close, on='date')\n df_both.rename(index=str, columns={'close': 'stock', 'dte0': 'dte'}, inplace=True)\n\n # print df.to_string(line_width=1000)\n df_both = df_both.replace([np.inf, -np.inf], np.nan)\n df_both = df_both.fillna(0)\n\n df_both = df_both.round({\n 'pos_net': 2,\n 'pct_chg': 2,\n })\n\n df_list.append(df_both)\n\n return df_list", "def update_data():\n etf_prices = get_prices(start=START_DATE, end=END_DATE)\n etf_returns = compute_returns(etf_prices)\n merged_etf_data = etf_prices.merge(etf_returns, right_index=True, left_index=True)\n indicators = compute_indicators(merged_etf_data) # this uses the \"ta\" lib, but it does not need\n # to be imported\n merged_etf_data = merged_etf_data.merge(indicators, right_index=True, left_index=True)\n vix_data = get_vix()\n data = merged_etf_data.merge(vix_data, right_index=True, left_index=True)\n data.to_csv('Data/database.csv')\n return", "def merge_both_tables():\n old = Table.read('data/data_table_cartesian_including_tims_stars_with_bg_ols_and_component_overlaps.fits')\n wanted = Table.read('data/scocen_candidates_300k_only_spatial_cut.fits')\n additional = Table.read('data/scocen_candidates_300k_only_spatial_cut_200k_to_determine_bg_ols.fits')\n\n d_old = dict(zip(old['source_id'], old['background_log_overlap']))\n d_add = dict(zip(additional['source_id'], additional['background_log_overlap']))\n d_old.update(d_add)\n dct = d_old\n\n ln_bg_ols = [dct[source_id] for source_id in wanted['source_id']]\n print\n len(ln_bg_ols), len(wanted)\n\n wanted['background_log_overlap'] = ln_bg_ols\n print\n wanted\n\n wanted.write('data/scocen_candidates_300k_only_spatial_cut.fits', overwrite=True, format='fits')", "def merge_breached():\n # read breach and CMS data\n breachdf = pd.read_csv(updated_breach_file_name, encoding='latin1')\n breachdf.rename(columns=lambda x: x.strip(), inplace=True)\n print(breachdf.isnull().sum().sum(), \"null columns\")\n\n df = pd.read_csv(CMS_file_name, encoding='latin1')\n df.rename(columns=lambda x: x.strip(), inplace=True)\n\n print(\"dataframes read\")\n\n # merge data\n new_df = df.merge(breachdf, left_on='FAC_NAME', right_on='FAC_NAME', how='outer')\n print(\"merged\", new_df)\n\n new_df.to_csv(merged_file_name, index=False)\n print(\"Written to\", merged_file_name)", "def return_combine_df_for_graph():\n\n\tkey = '18c95216b1230de68164158aeb02e2c2'\n\t# bade login with key\n\tbase = Dashboard(key)\n\t# get csv with write vars\n\tstart =os.path.dirname(os.path.realpath(sys.argv[0])) \n\tpath = os.path.join(start, 'Fred Graphs')\n\t# this path was used for flask,anothe day to fix this one \n\t#path = '/home/mike/Documents/coding_all/ModelApp/app/Fred Graphs'\n\t#base.write_fred_data_to_csv_from_dict(fred_econ_data, path)\n\n\t# convert csv to dict to use\n\tdf_dict = base.saved_csvs_to_dict(fred_econ_data.keys(), path)\n\n\t# skipped step, drop down, whyich can be typed into bc at some point will have list of all vars\n\t# and display graph indivusal with relevent data (type, seaonailty) displayed liek fed\n\n\t# next combine wanted vars to single df\n\tcombined_df = base.get_full_group_df(df_dict, time_interval='6M', group_unit='mean')\n\t# get spreads for IR rates\n\tcols_against = ['10 YR Treasury','Moody Aaa Yield','Moody Baa Yield','30 Year Mortgage', ]\n\tbase_col = 'Federal Funds Rate'\n\tspread_dict = base.get_yield_spreads_new(combined_df, cols_against, base_col, graph='no')\n\tspread_dict['date'] = combined_df.index\n\tcombined_spread_df = pd.DataFrame.from_dict(spread_dict)\n\tcombined_spread_df = combined_spread_df.set_index('date')\n\tcombined_spread_df.index = pd.to_datetime(combined_spread_df.index)\n\treturn combined_df, combined_spread_df", "def sub_tax_transfer_pricing_eur(manager, df_fields, seq_recs, seq_reservas):\n # df_hotel = manager.get_dataframe(tables[\"dwc_bok_t_canco_hotel\"])\n # df_circuit = manager.get_dataframe(tables[\"dwc_bok_t_canco_hotel_circuit\"])\n # df_other = manager.get_dataframe(tables[\"dwc_bok_t_canco_other\"])\n # df_transfer = manager.get_dataframe(tables[\"dwc_bok_t_canco_transfer\"])\n # df_endow = manager.get_dataframe(tables[\"dwc_bok_t_canco_endowments\"])\n # df_extra = manager.get_dataframe(tables[\"dwc_bok_t_canco_extra\"])\n\n df_aux = df_fields.select(\"operative_incoming\", \"booking_id\", \"invoicing_company\", \"creation_date\",\n \"booking_currency\")\n\n df_hotel = sub_tax_transfer_pricing_eur_aux(manager, df_hotelt, seq_recs, seq_reservas, df_aux)\n df_circuit = sub_tax_transfer_pricing_eur_aux(manager, df_circuitt, seq_recs, seq_reservas, df_aux)\n df_other = sub_tax_transfer_pricing_eur_aux(manager, df_othert, seq_recs, seq_reservas, df_aux)\n df_transfer = sub_tax_transfer_pricing_eur_aux(manager, df_transfert, seq_recs, seq_reservas, df_aux)\n df_endow = sub_tax_transfer_pricing_eur_aux(manager, df_endowt, seq_recs, seq_reservas, df_aux)\n df_extra = sub_tax_transfer_pricing_eur_aux_extra(manager, df_extrat, seq_recs, seq_reservas, df_aux)\n\n df_impuesto_canco = df_hotel.union(df_circuit).union(df_other).union(df_transfer).union(df_endow).union(\n df_extra)\n\n df_impuesto_canco = df_impuesto_canco.groupBy(\"operative_incoming\", \"booking_id\") \\\n .agg({'impuesto_canco': 'sum'}).withColumnRenamed(\"SUM(impuesto_canco)\", \"impuesto_canco\")\n\n df_impuesto_canco = df_impuesto_canco.withColumnRenamed(\"operative_incoming\", \"seq_rec\") \\\n .withColumnRenamed(\"booking_id\", \"seq_res\")\n\n df_fields = df_fields.join(df_impuesto_canco, [df_fields.operative_incoming == df_impuesto_canco.seq_rec,\n df_fields.booking_id == df_impuesto_canco.seq_res],\n 'left_outer').drop(\"seq_rec\", \"seq_res\")\n\n df_addcanco = sub_transfer_pricing_aux_add_canco(manager, df_fields, seq_recs, seq_reservas, EUR)\n\n df_addcanco = df_addcanco.withColumnRenamed(\"operative_incoming\", \"seq_rec\") \\\n .withColumnRenamed(\"booking_id\", \"seq_res\")\n\n # add add_impuesto_canco\n df_fields = df_fields.join(df_addcanco, [df_fields.operative_incoming == df_addcanco.seq_rec,\n df_fields.booking_id == df_addcanco.seq_res],\n \"left_outer\").drop(df_addcanco.seq_rec).drop(df_addcanco.seq_res)\n\n df_fields = df_fields.na.fill({'impuesto_canco': 0, 'add_impuesto_canco': 0})\n\n df_fields = df_fields.withColumn(\"Tax_Transfer_pricing_EUR\",\n df_fields.impuesto_canco + df_fields.add_impuesto_canco) \\\n .drop(\"impuesto_canco\", \"add_impuesto_canco\")\n\n del df_hotel, df_circuit, df_other, df_transfer, df_endow, df_extra, df_impuesto_canco, df_addcanco\n\n return df_fields", "def load_capitalCharge_data(tbls):\n tbls.optionalityChargeTbl.load_csv(\n \"s3://data.atoti.io/notebooks/irrbb/OptionalityCharge.csv\"\n )\n tbls.otherAPRAAmtTbl.load_csv(\n \"s3://data.atoti.io/notebooks/irrbb/OtherAPRAAmount.csv\"\n )\n\n hist_icc_df = process_historical_icc(\n \"https://data.atoti.io/notebooks/irrbb/HistoricalICC.csv\"\n )\n tbls.historcialICCTbl.load_pandas(hist_icc_df[tbls.historcialICCTbl.columns])", "def account_df(self, typ='trades', improve=False):\n cols = ['date_open', 'date_close', 'symbol', 'style', 'volume', 'price_open', 'price_stop', 'price_limit', 'price_close', 'comment', 'magic', 'order_id_master', 'order_id_stop', 'order_id_limit', 'direction', 'price_diff', 'price_diff', 'price_diff_d', 'price_diff_rel', 'price_diff_rel_d', 'MAE', 'MFE', 'MAE_rel', 'MFE_rel', 'price_trailing_diff', 'profit']\n d = self._d_orders[typ]\n if len(d)>0:\n df = pd.DataFrame(d.values(), index=d.keys())\n df = df.rename(columns={0: 'bo'})\n df['date_created'] = df['bo'].map(lambda o: o.date_created)\n df['date_open'] = df['bo'].map(lambda o: o.date_open)\n df['date_close'] = df['bo'].map(lambda o: o.date_close)\n df['date_closed'] = df['bo'].map(lambda o: o.date_closed)\n df['symbol'] = df['bo'].map(lambda o: o.symbol)\n #df['style'] = df['bo'].map(lambda o: o.style)\n df['volume'] = df['bo'].map(lambda o: o.volume)\n df['price_open'] = df['bo'].map(lambda o: o.price_open)\n df['price_stop'] = df['bo'].map(lambda o: o.price_stop)\n df['price_limit'] = df['bo'].map(lambda o: o.price_limit)\n df['price_close'] = df['bo'].map(lambda o: o.price_close)\n df['comment'] = df['bo'].map(lambda o: o.comment)\n df['magic'] = df['bo'].map(lambda o: o.magic)\n #df['order_id_master'] = df['bo'].map(lambda o: o.order_id_master)\n #df['order_id_stop'] = df['bo'].map(lambda o: o.order_id_stop)\n #df['order_id_limit'] = df['bo'].map(lambda o: o.order_id_limit)\n\n df['direction'] = df['bo'].map(lambda o: o.direction)\n\n df['price_diff'] = df['bo'].map(lambda o: o.price_diff)\n df['price_diff_d'] = df['bo'].map(lambda o: o.price_diff_d)\n df['price_diff_rel'] = df['bo'].map(lambda o: o.price_diff_rel)\n df['price_diff_rel_d'] = df['bo'].map(lambda o: o.price_diff_rel_d)\n \n df['MAE'] = df['bo'].map(lambda o: o.MAE)\n df['MFE'] = df['bo'].map(lambda o: o.MFE)\n \n #df['MAE_rel'] = df['MAE'] / df['price_open']\n #df['MFE_rel'] = df['MFE'] / df['price_open']\n df['MAE_rel'] = df['bo'].map(lambda o: o.MAE_rel)\n df['MFE_rel'] = df['bo'].map(lambda o: o.MFE_rel)\n \n\n #df['profit'] = df['volume'] * df['price_diff'].fillna(0)\n df['profit'] = df['bo'].map(lambda o: o.profit)\n #df['profit_rel'] = df['bo'].map(lambda o: o.profit_rel)\n \n if improve:\n try:\n df = improve_account_df_with_additional_data(df)\n except Exception as e:\n log.error(\"Can't improve account df with additional data\")\n log.error(\"Reason: %s\" % str(e))\n \n #del df['bo'] \n \n return(df)\n else:\n return(pd.DataFrame(columns=cols))", "def merge_tables():\r\n\r\n # get sql connection\r\n conn = get_sql_conn()\r\n\r\n # get all info from materials table\r\n query_mat = 'Select * from material_procurement'\r\n df_mat = pd.read_sql_query(query_mat, con=conn)\r\n df_mat = df_mat.drop(['uid'], axis=1)\r\n df_mat = df_mat.pivot(index='ball_milling_uid',\r\n columns='material_name',\r\n values='mass_fraction')\r\n df_mat = df_mat.reset_index()\r\n df_mat = df_mat.add_prefix('MT-')\r\n\r\n # get all info from ball mill table\r\n query_ball = 'Select * from ball_milling'\r\n df_ball = pd.read_sql_query(query_ball, con=conn)\r\n\r\n # added prefix to distinctly identify a column\r\n df_ball = df_ball.add_prefix('BM-')\r\n\r\n # get all info from hot process\r\n query_hot = 'Select * from hot_press'\r\n df_hot = pd.read_sql_query(query_hot, con=conn)\r\n\r\n # added prefix to distinctly identify a column\r\n df_hot = df_hot.add_prefix('HP-')\r\n\r\n # get all info from hall measurements table\r\n query_hall = 'Select * from hall_measurement'\r\n df_hall = pd.read_sql_query(query_hall, con=conn)\r\n\r\n # get all info from icp measurements table\r\n query_icp = 'Select * from icp_measurement'\r\n df_icp = pd.read_sql_query(query_icp, con=conn)\r\n\r\n # Left merge tables in database starting from materials area to lab reports\r\n df_com = df_ball.merge(df_mat, how='left', left_on='BM-uid',\r\n right_on='MT-ball_milling_uid')\r\n df_com = df_com.merge(df_hot, how='left', left_on='BM-hot_press_uid'\r\n , right_on='HP-uid')\r\n df_com = df_com.merge(df_hall.add_prefix('BM-HA-'), how='left',\r\n left_on='BM-output_material_uid',\r\n right_on='BM-HA-material_uid')\r\n df_com = df_com.merge(df_icp.add_prefix('BM-ICP-'), how='left',\r\n left_on='BM-output_material_uid',\r\n right_on='BM-ICP-material_uid')\r\n df_com = df_com.merge(df_hall.add_prefix('HP-HA-'), how='left',\r\n left_on='HP-output_material_uid',\r\n right_on='HP-HA-material_uid')\r\n df_com = df_com.merge(df_icp.add_prefix('HP-ICP-'), how='left',\r\n left_on='HP-output_material_uid',\r\n right_on='HP-ICP-material_uid')\r\n\r\n # close connection\r\n conn.close()\r\n\r\n # return complete db tables\r\n return df_com", "def merge_on_fip(df, directory):\n pol_fip_data = pd.read_csv(directory, index_col=0)\n df = df.merge(pol_fip_data, left_on='GESTFIPS', right_on='fips' ,how='left')\n df.drop(columns=['fips'], inplace=True)\n \n return df", "def get_data(self, df, latest_currency):\n file_paths = list(df[\"File\"])\n df = self.extract_df(file_paths[0])\n df = self.group_df(df)\n df = self.fill_league_currency(df, latest_currency)\n for file_path in file_paths[1:]:\n league = self.extract_df(file_path)\n league_grp = self.group_df(league)\n league_grp = self.fill_league_currency(league_grp, latest_currency)\n df = df.join(league_grp)\n df = df.reset_index(drop=True)\n return df", "def sub_tax_cost_transfer_pricing_eur(manager, df_fields, seq_recs, seq_reservas):\n # df_hotel = manager.get_dataframe(tables[\"dwc_bok_t_canco_hotel\"])\n # df_circuit = manager.get_dataframe(tables[\"dwc_bok_t_canco_hotel_circuit\"])\n # df_other = manager.get_dataframe(tables[\"dwc_bok_t_canco_other\"])\n # df_transfer = manager.get_dataframe(tables[\"dwc_bok_t_canco_transfer\"])\n # df_endow = manager.get_dataframe(tables[\"dwc_bok_t_canco_endowments\"])\n # df_extra = manager.get_dataframe(tables[\"dwc_bok_t_canco_extra\"])\n\n df_aux = df_fields.select(\"operative_incoming\", \"booking_id\", \"invoicing_company\", \"creation_date\",\n \"booking_currency\")\n\n df_hotel = sub_tax_transfer_pricing_eur_aux(manager, df_hotelt, seq_recs, seq_reservas, df_aux)\n df_circuit = sub_tax_transfer_pricing_eur_aux(manager, df_circuitt, seq_recs, seq_reservas, df_aux)\n df_other = sub_tax_transfer_pricing_eur_aux(manager, df_othert, seq_recs, seq_reservas, df_aux)\n df_transfer = sub_tax_transfer_pricing_eur_aux(manager, df_transfert, seq_recs, seq_reservas, df_aux)\n df_endow = sub_tax_transfer_pricing_eur_aux(manager, df_endowt, seq_recs, seq_reservas, df_aux)\n df_extra = sub_tax_transfer_pricing_eur_aux_extra(manager, df_extrat, seq_recs, seq_reservas, df_aux)\n\n df_impuesto_canco = df_hotel.union(df_circuit).union(df_other).union(df_transfer).union(df_endow).union(\n df_extra)\n\n df_impuesto_canco = df_impuesto_canco.groupBy(\"operative_incoming\", \"booking_id\") \\\n .agg({'impuesto_canco': 'sum'}).withColumnRenamed(\"SUM(impuesto_canco)\", \"impuesto_canco\")\n\n df_impuesto_canco = df_impuesto_canco.withColumnRenamed(\"operative_incoming\", \"seq_rec\") \\\n .withColumnRenamed(\"booking_id\", \"seq_res\")\n\n df_fields = df_fields.join(df_impuesto_canco, [df_fields.operative_incoming == df_impuesto_canco.seq_rec,\n df_fields.booking_id == df_impuesto_canco.seq_res],\n 'left_outer').drop(\"seq_rec\", \"seq_res\")\n\n df_addcanco = sub_transfer_pricing_aux_add_canco(manager, df_fields, seq_recs, seq_reservas, EUR)\n\n df_addcanco = df_addcanco.withColumnRenamed(\"operative_incoming\", \"seq_rec\") \\\n .withColumnRenamed(\"booking_id\", \"seq_res\")\n\n # add add_impuesto_canco\n df_fields = df_fields.join(df_addcanco, [df_fields.operative_incoming == df_addcanco.seq_rec,\n df_fields.booking_id == df_addcanco.seq_res],\n \"left_outer\").drop(df_addcanco.seq_rec).drop(df_addcanco.seq_res)\n\n df_fields = df_fields.na.fill({'impuesto_canco': 0, 'add_impuesto_canco': 0})\n\n df_fields = df_fields.withColumn(\"Tax_Cost_Transfer_pricing_EUR\",\n df_fields.impuesto_canco + df_fields.add_impuesto_canco) \\\n .drop(\"impuesto_canco\", \"add_impuesto_canco\")\n\n del df_hotel, df_circuit, df_other, df_transfer, df_endow, df_extra, df_impuesto_canco, df_addcanco\n\n return df_fields", "def join_country_code_data(daily_data, country_code_data):\n #new columns: country, country_code, geometry\n return country_code_data.merge(daily_data, left_on = 'country', right_on = 'Country/Region').drop(['country'], axis=1)", "def get_funda_data(conn, items):\n comp = conn.raw_sql(f\"\"\"\n select gvkey, datadate as date, fyear, cusip, sich, seq, \n {items}\n from comp.funda\n where indfmt='INDL' \n and datafmt='STD'\n and popsrc='D'\n and consol='C'\n and datadate >= '01/01/1960'\n \"\"\")\n\n ccm = conn.raw_sql(\"\"\"\n select gvkey, lpermno as permno, linkdt, linkenddt\n from crsp.ccmxpf_linktable\n where (linktype ='LU' or linktype='LC')\n \"\"\")\n\n print(f'comp records: {comp.shape[0]}')\n print(f'ccm records: {ccm.shape[0]}')\n\n comp['date'] = pd.to_datetime(comp.date)\n ccm['linkdt'] = pd.to_datetime(ccm['linkdt'])\n ccm['linkenddt'] = pd.to_datetime(ccm['linkenddt']).fillna(pd.to_datetime('today')) # use today if missing\n ccm1 = pd.merge(comp, ccm, how='left', on=['gvkey'])\n final = ccm1[(ccm1.date >= ccm1.linkdt) & (ccm1.date <= ccm1.linkenddt)]\n print(f'funda records: {final.shape[0]}')\n return final.drop(['linkdt', 'linkenddt', 'gvkey'], axis=1)", "def __load_company_data(self):\n\n for ticker_type, ticker_list in self.tickers.items():\n # yfinance only has sector, industry and country for stocks\n if ticker_type == \"STOCK\":\n for ticker in ticker_list:\n # Only gets fields for tickers with missing data\n # TODO: Should only get field missing for tickers with missing data\n # now it's taking the 4 of them\n if (\n self.__transactions.loc[\n self.__transactions[\"Ticker\"] == ticker,\n [\"Sector\", \"Industry\", \"Country\", \"Region\"],\n ]\n .isnull()\n .values.any()\n ):\n # Get ticker info in list [\"Sector\", \"Industry\", \"Country\", \"Region\"] from isin/ticker\n info_list = get_info_from_ticker(ticker)\n\n # Replace fields in transactions\n self.__transactions.loc[\n self.__transactions.Ticker == ticker,\n [\"Sector\", \"Industry\", \"Country\", \"Region\"],\n ] = info_list\n\n elif ticker_type == \"CRYPTO\":\n for ticker in ticker_list:\n if (\n self.__transactions.loc[\n self.__transactions[\"Ticker\"] == ticker,\n [\"Sector\", \"Industry\", \"Country\", \"Region\"],\n ]\n .isnull()\n .values.any()\n ):\n # Get ticker info in list [\"Sector\", \"Industry\", \"Country\", \"Region\"]\n info_list = [\"Crypto\", \"Crypto\", \"Crypto\", \"Crypto\"]\n\n # Replace fields in transactions\n self.__transactions.loc[\n self.__transactions.Ticker == ticker,\n [\"Sector\", \"Industry\", \"Country\", \"Region\"],\n ] = info_list\n\n else:\n for ticker in ticker_list:\n if (\n self.__transactions.loc[\n self.__transactions[\"Ticker\"] == ticker,\n [\"Sector\", \"Industry\", \"Country\", \"Region\"],\n ]\n .isnull()\n .values.any()\n ):\n # Get ticker info in list [\"Sector\", \"Industry\", \"Country\", \"Region\"]\n info_list = [\"-\", \"-\", \"-\", \"-\"]\n\n # Replace fields in transactions\n self.__transactions.loc[\n self.__transactions.Ticker == ticker,\n [\"Sector\", \"Industry\", \"Country\", \"Region\"],\n ] = info_list" ]
[ "0.5947118", "0.57218575", "0.56129074", "0.5611306", "0.5544104", "0.5530321", "0.54318535", "0.53656286", "0.53199464", "0.52788687", "0.51809466", "0.51803374", "0.51164687", "0.50988007", "0.50969416", "0.5092525", "0.50831497", "0.50804347", "0.5064814", "0.5063479", "0.50506955", "0.5045443", "0.5034089", "0.50279146", "0.50225335", "0.5001613", "0.49885938", "0.4982412", "0.49766514", "0.49646565" ]
0.75052285
0
return a virtue status table with a mf(cash) column based on the given tot money and cftable
def _vcash(totmoney, totcftable, cashobj): cashl = [] cashl.append(totmoney + totcftable.iloc[0].cash) for i in range(len(totcftable) - 1): date = totcftable.iloc[i + 1].date delta = totcftable.iloc[i + 1].cash if delta < 0: cashl.append( myround( delta / cashobj.price[cashobj.price["date"] <= date].iloc[-1].netvalue ) ) else: cashl.append(delta) datadict = {"date": totcftable.loc[:, "date"], "mf": cashl} return pd.DataFrame(data=datadict)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def combsummary(self, date=yesterdayobj()):\n date = convert_date(date)\n columns = [\n \"基金名称\",\n \"基金代码\",\n \"当日净值\",\n \"单位成本\",\n \"持有份额\",\n \"基金现值\",\n \"基金总申购\",\n \"历史最大占用\",\n \"基金持有成本\",\n \"基金分红与赎回\",\n \"换手率\",\n \"基金收益总额\",\n \"投资收益率\",\n ]\n summarydf = pd.DataFrame([], columns=columns)\n for fund in self.fundtradeobj:\n summarydf = summarydf.append(\n fund.dailyreport(date), ignore_index=True, sort=True\n )\n tname = \"总计\"\n tcode = \"total\"\n tunitvalue = float(\"NaN\")\n tunitcost = float(\"NaN\")\n tholdshare = float(\"NaN\")\n tcurrentvalue = summarydf[\"基金现值\"].sum()\n tpurchase = summarydf[\"基金总申购\"].sum()\n tbtnk = bottleneck(self.totcftable[self.totcftable[\"date\"] <= date])\n tcost = summarydf[\"基金持有成本\"].sum()\n toutput = summarydf[\"基金分红与赎回\"].sum()\n tturnover = turnoverrate(self.totcftable[self.totcftable[\"date\"] <= date], date)\n # 计算的是总系统作为整体和外界的换手率,而非系统各成分之间的换手率\n tearn = summarydf[\"基金收益总额\"].sum()\n trate = round(tearn / tbtnk * 100, 4)\n trow = pd.DataFrame(\n [\n [\n tname,\n tcode,\n tunitvalue,\n tunitcost,\n tholdshare,\n tcurrentvalue,\n tpurchase,\n tbtnk,\n tcost,\n toutput,\n tturnover,\n tearn,\n trate,\n ]\n ],\n columns=columns,\n )\n summarydf = summarydf.append(trow, ignore_index=True, sort=True)\n\n return summarydf[columns].sort_values(by=\"基金现值\", ascending=False)", "def cash_income(df):\n return (df.aftertax_income -\n (1 - tc.HOUSING_CASH_SHARE) * df.housing_ben -\n (1 - tc.MCAID_CASH_SHARE) * df.mcaid_ben -\n (1 - tc.MCARE_CASH_SHARE) * df.mcare_ben -\n (1 - tc.OTHER_CASH_SHARE) * df.other_ben -\n (1 - tc.SNAP_CASH_SHARE) * df.snap_ben -\n (1 - tc.SSI_CASH_SHARE) * df.ssi_ben -\n (1 - tc.TANF_CASH_SHARE) * df.tanf_ben -\n (1 - tc.VET_CASH_SHARE) * df.vet_ben -\n (1 - tc.WIC_CASH_SHARE) * df.wic_ben)", "def portfolio_table(self):\n idx = set(name.split('-')[0].split('.')[0] for name, etf in self.etfs.items() if not etf.sold())\n table = pd.DataFrame({'Invested': 0, 'Shares':0, 'Share Price':0, 'Present Value':0, 'P/L':0, 'P/L%':0},index=idx)\n for name, etf in self.etfs.items():\n if not etf.sold():\n table.loc[name.split('-')[0].split('.')[0], 'Invested'] += etf.initial_investment()\n table.loc[name.split('-')[0].split('.')[0], 'Shares'] += etf.n_shares\n table.loc[name.split('-')[0].split('.')[0], 'Share Price'] = etf.stock_price()\n table.loc[name.split('-')[0].split('.')[0], 'Present Value'] += etf.present_value()\n table.loc[name.split('-')[0].split('.')[0], 'P/L'] += etf.profit_loss()\n table.insert(1, 'PMA', round(table['Invested'] / table['Shares'], 2))\n table.insert(3, 'Initial Weight', round(table['Invested'] / table['Invested'].sum() * 100, 2))\n table.insert(4, 'Present Weight', round(table['Present Value'] / table['Present Value'].sum() * 100, 2))\n table['P/L%'] = round(table['P/L'] / table['Invested'] * 100, 2)\n table['P/L'] = round(table['P/L'], 2)\n table['Present Value'] = round(table['Present Value'], 2)\n return table.sort_values('Invested', 0, ascending=False)", "def get_customer_balance_sheet(self):\n total = 0\n taxes = 0\n balances = 0\n un_paid_count = 0\n conflicts = 0\n unresolved_conflicts = 0\n projected_before_tax = 0\n\n invoice_list = Customer_Invoice.objects.all()\n count = len(invoice_list)\n for invoice in invoice_list:\n if invoice.invoice_quote.total_price_quoted:\n total += invoice.invoice_quote.total_price_quoted\n taxes += invoice.invoice_quote.tax_on_quote\n balances += invoice.get_balance_due()\n else:\n projected = invoice.get_cost()\n projected_before_tax += projected[1]\n if not invoice.paid_in_full:\n un_paid_count += 1\n for conflict in invoice.conflict.all():\n conflicts += 1\n if not conflict.conflict_resolution:\n unresolved_conflicts += 1\n profit = total - taxes\n\n return total, taxes, profit, balances, count, conflicts, unresolved_conflicts, projected_before_tax", "def cash(self, qtt_100s, qtt_50s, qtt_20s):\n return (qtt_100s * 100) + (qtt_50s * 50) + (qtt_20s * 20)", "def checking_account(ctx, year=CURRENT_YEAR):\n ss = open_spreadsheet('Business Checking Account Activity')\n worksheet = ss.worksheet(year)\n\n debit = credit = revenue = Decimal(0.0)\n categories = defaultdict(Decimal)\n\n rows = worksheet.get_all_records()\n for row in rows:\n category = row['Category']\n if category == 'Revenue':\n revenue += get_decimal(row['Credit'])\n else:\n categories[category] += get_decimal(row['Debit'])\n\n debit += get_decimal(row['Debit'])\n credit += get_decimal(row['Credit'])\n\n data = [\n ('Total debit', debit),\n ('Total credit', credit),\n ('Total revenue', revenue)\n ]\n table = AsciiTable(data, 'Summary')\n table.inner_heading_row_border = False\n print(table.table)\n\n\n data = sorted(categories.items(), key=lambda x: x[1], reverse=True)\n table = AsciiTable(data, 'Debits by category')\n table.inner_heading_row_border = False\n print(table.table)", "def fee_VS_tx_value(df):\n\n total_fees = df['Tx fees (USD)']\n tx_vol_USD = df['Tx Volume (USD)']\n result = total_fees.div(tx_vol_USD)\n result.name = 'Tx Fees / Tx Volume'\n return out(SETTINGS, df, result)", "def full_table():\n #oen the the file\n list_of_current_account_objects = []\n opened_file = open('customers.txt')\n opened_file.readline()\n for line in opened_file: #get a list of all the customers accounts as objects\n line_array = line.split(\",\")\n customer = Account((line_array[0]+\" \"+line_array[1]),line_array[2],line_array[4])\n list_of_current_account_objects.append(customer)\n #update the savings & current variables for all accounts.\n for i in list_of_current_account_objects:\n i.set_sav_bal(account_bal(i,\"savings\"))\n i.set_cur_bal(account_bal(i,\"current\"))\n\n #print the answer\n print(\"customer customer account number-avings balance-current balance\")\n for i in list_of_current_account_objects:\n print(i.get_name()+\"---\"+i.get_acc_num()+\"---\"+str(i.get_sav_bal())+\"---\"+str(i.get_cur_bal()))\n print()", "def button_fac_cob_ent(self):\n invoice = self._fac_ent()\n\n # pagar la factura\n # hacer configuracion para modificar esto\n receipt_obj = self.env['account.voucher.receiptbook']\n receipt = receipt_obj.search([('name', 'like', 'Recibos')], limit=1)\n\n journal = self.journal_id\n res = invoice.invoice_pay_customer()\n context = res['context']\n\n account_voucher_obj = self.env['account.voucher']\n voucher = account_voucher_obj.create({\n 'partner_id': context['default_partner_id'],\n 'journal_id': journal.id,\n 'account_id': journal.default_debit_account_id.id,\n 'type': context['type'],\n 'amount': context['default_amount'],\n 'net_amount': context['default_amount'],\n 'receiptbook_id': receipt.id,\n 'company_id': self.env.user.company_id.id\n })\n voucher.signal_workflow('proforma_voucher')\n\n account_move_line_obj = self.env['account.move.line']\n\n # obtener un recordser vacio\n lines2rec = account_move_line_obj.browse()\n\n # obtener las lineas a conciliar de facturas\n account_move_line = account_move_line_obj.search(\n [('document_number', '=', invoice.document_number)])\n for re in account_move_line:\n if re.account_id.reconcile:\n lines2rec += re\n\n # obtener las lineas a conciliar de pagos\n account_move_line = account_move_line_obj.search(\n [('document_number', '=', voucher.document_number)])\n for re in account_move_line:\n if re.account_id.reconcile:\n lines2rec += re\n\n period_obj = self.env['account.period']\n period = period_obj.find()\n\n # reconciliar las lineas de factura con pagos\n lines2rec.reconcile('manual',\n journal.default_debit_account_id.id, # writeoff_acc_id\n period.id, # writeoff_period_id,\n journal.id) # writeoff_journal_id)\n\n # imprime factura\n datas = {\n 'ids': invoice.ids,\n 'model': 'account.report_invoice',\n 'form': invoice.read()\n }\n return {\n 'type': 'ir.actions.report.xml',\n 'report_name': 'aeroo_report_ar_einvoice',\n 'datas': datas,\n }", "def silver_card(subtotal):\n return subtotal * 0.02", "def get_cash(self):\r\n return self.cash", "def PV_NetCashflows(t):\n if t > last_t:\n return 0\n else:\n return (prj_incm_Premium(t)\n - prj_exps_Total(t)\n - prj_bnft_Total(t) / (1 + DiscRate(t))\n + PV_NetCashflows(t + 1) / (1 + DiscRate(t)))", "def index():\n rows = db.execute(\"SELECT Symbol, SUM(Shares) as totalShares FROM cash WHERE id=:id GROUP BY Symbol HAVING totalShares > 0\", id=session[\"user_id\"])\n transactions=[]\n grand_total = 0\n for row in rows:\n stock = lookup(row[\"Symbol\"])\n transactions.append({\n \"Symbol\": stock[\"symbol\"],\n \"Name\": stock[\"name\"],\n \"Shares\": row[\"totalShares\"],\n \"Price\": usd(stock[\"price\"]),\n \"Total\": usd(stock[\"price\"] * row[\"totalShares\"])\n })\n grand_total += stock[\"price\"] * row[\"totalShares\"]\n rows = db.execute(\"SELECT cash FROM users WHERE id=:id\", id=session[\"user_id\"])\n cash = rows[0][\"cash\"]\n return render_template(\"table.html\", transactions=transactions, cash=usd(cash), grand_total=usd(grand_total))", "def bond_cashflows(fv, c, n, m):\n #front_cashflow = [fv*c/m for i in cashflow_time if i != 1]\n return [fv*c/m for i in cashflow_times(n,m) if i != 1]+[fv*c/m+fv]", "def askCash(total):\n respuesta = float(input(\"CUANTO PAGO EL REPARTIDOR? \"))\n if respuesta <= total:\n result = float(total) - respuesta\n return result\n else:\n print(\"EL PAGO TIENE QUE SER MENOR O IGUAL AL TOTAL DE LA ORDEN\")\n askCash(total)", "def calculate_gain_table(info_table):\n\n\t# sort and separate\n\tinfo_table['decile'] = 10 - pd.qcut(info_table['proba'], 10, labels = False)\n\n\t# group by each decile, calculate gain score\n\ttotal_positive = sum(info_table['target'])\n\tgain_table = info_table.groupby('decile', as_index = False)['target'].sum()\n\tgain_table = gain_table.rename(columns = {'target': 'positive'})\n\tgain_table['gain_score'] = gain_table['positive'] / total_positive\n\n\t# gain = cumsum(gain score)\n\tgain_table['gain'] = gain_table['gain_score'].cumsum()\n\n\treturn gain_table", "def tablecost(self):\n subtotal_getter = operator.attrgetter(\"subtotal\")\n\n cost = 0.0\n\n cost += sum(map(subtotal_getter, self.materials))\n cost += sum(map(subtotal_getter, self.processes))\n cost += sum(map(subtotal_getter, self.fasteners))\n cost += sum(map(subtotal_getter, self.toolings))\n\n return cost", "def _mergecftb(self):\n dtlist = []\n for fund in self.fundtradeobj:\n dtlist2 = []\n for _, row in fund.cftable.iterrows():\n dtlist2.append((row[\"date\"], row[\"cash\"]))\n dtlist.extend(dtlist2)\n\n nndtlist = set([item[0] for item in dtlist])\n nndtlist = sorted(list(nndtlist), key=lambda x: x)\n reslist = []\n for date in nndtlist:\n reslist.append(sum([item[1] for item in dtlist if item[0] == date]))\n df = pd.DataFrame(data={\"date\": nndtlist, \"cash\": reslist})\n df = df[df[\"cash\"] != 0]\n df = df.reset_index(drop=True)\n return df", "def _get_prix_tarif(self,cout,pricelist):\n cr = self._cr\n product=cout.name\n prix_tarif=0\n date=time.strftime('%Y-%m-%d') # Date du jour\n if pricelist:\n #Convertion du lot_mini de US vers UA\n min_quantity = self.env['product.uom']._compute_qty(cout.name.uom_id.id, cout.name.lot_mini, cout.name.uom_po_id.id)\n #TODO : Pour contourner un bug d'arrondi (le 31/01/2017)\n min_quantity=min_quantity+0.00000000001\n #TODO en utilisant la fonction repr à la place de str, cela ne tronque pas les décimales\n SQL=\"\"\"\n select ppi.price_surcharge\n from product_pricelist_version ppv inner join product_pricelist_item ppi on ppv.id=ppi.price_version_id\n where ppv.pricelist_id=\"\"\"+str(pricelist.id)+ \"\"\" \n and min_quantity<=\"\"\"+repr(min_quantity)+\"\"\"\n and (ppv.date_start <= '\"\"\"+date+\"\"\"' or ppv.date_start is null)\n and (ppv.date_end >= '\"\"\"+date+\"\"\"' or ppv.date_end is null)\n\n and ppi.product_id=\"\"\"+str(product.id)+ \"\"\" \n and (ppi.date_start <= '\"\"\"+date+\"\"\"' or ppi.date_start is null)\n and (ppi.date_end >= '\"\"\"+date+\"\"\"' or ppi.date_end is null)\n order by ppi.sequence\n limit 1\n \"\"\"\n cr.execute(SQL)\n result = cr.fetchall()\n for row in result:\n coef=1\n if min_quantity:\n coef=cout.name.lot_mini/min_quantity\n prix_tarif=row[0]/coef\n\n\n\n return prix_tarif", "def table_total(self):\n total = 0.00\n\n for customer in self.customers:\n total = total + customer.get_total()\n\n return total", "def impvol_table(data):\r\n try:\r\n data = data.to_records()\r\n except:\r\n pass\r\n return impvol_bisection(data['moneyness'], data['maturity'],\r\n data['premium'], data['call'])", "def get_discounts(table):\n\n discounts_money_spent = {}\n discount_levels = [100, 1000, 5000]\n discount_percents = [\"2%\", \"5%\", \"10%\"]\n\n customer_id_index = 2\n amount_sold_index = 4\n id_index = 0\n product_id_index = 3\n name_index = 1\n price_index = 3\n\n store_table = store.get_table()\n store.check_table(store_table)\n\n crm_table = crm.get_table('model/crm/customers.csv')\n\n for record in table:\n product_id = record[product_id_index]\n amount_sold = record[amount_sold_index]\n customer_id = record[customer_id_index]\n\n for customer in crm_table:\n customer_name = customer[name_index]\n client_id = customer[id_index]\n\n if customer_id == client_id:\n\n for game in store_table:\n game_id = game[id_index]\n price = game[price_index]\n if product_id == game_id:\n money_spent = int(price) * int(amount_sold)\n \n if customer_name in discounts_money_spent:\n discounts_money_spent[customer_name] += int(money_spent)\n else:\n discounts_money_spent[customer_name] = int(money_spent)\n \n discounts = {}\n\n for customer_name in discounts_money_spent:\n money_spent = discounts_money_spent[customer_name]\n if money_spent >= discount_levels[0] and money_spent < discount_levels[1]:\n discount = discount_percents[0]\n elif money_spent >= discount_levels[1] and money_spent < discount_levels[2]:\n discount = discount_percents[1]\n elif money_spent >= discount_levels[2]:\n discount = discount_percents[2]\n else:\n discount = \"0%\"\n\n discounts[customer_name] = discount\n\n return discounts", "def __cacula_agio(table):\n from m2py.misc.vectorize import column\n\n PV = table[0][-1]\n total = sum(column(table, 1))\n premium = total/PV - 1\n return round(premium, 2)", "def account_bal(user, card):\n amount = 0\n if card == \"savings\": #savings\n file = user.get_acc_num()+\"-\"+\"savings.txt\"\n file_opened = open(file)\n for line in file_opened:\n line_array =line.split(\"\\\\t\")\n if line_array[1] == \"deposit\":\n amount += float(line_array[2]) #check this with \\t\n else:\n amount -= float(line_array[2]) #check this with \\t\n return amount\n else: #current\n file = user.get_acc_num()+\"-\"+\"current.txt\"\n file_opened = open(file)\n for line in file_opened:\n line_array =line.split(\"\\\\t\")\n if line_array[1] == \"deposit\":\n amount += float(line_array[2]) #check this with \\t\n else:\n amount -= float(line_array[2]) #check this with \\t\n return amount", "def calc_monthly_cash(self):\n # shortcut to self\n s = self\n\n # Start the DataFrames, base and w/ heat pump\n # Each starts with just an index column with the month\n # Make shortcut variables as well.\n s.df_mo_dol_base = dfb = s.df_mo_en_base[[]].copy()\n s.df_mo_dol_hp = dfh = s.df_mo_en_base[[]].copy()\n\n # Determine the base electric use by month. Approach is different \n # if there is electric heat.\n is_electric_heat = (s.exist_heat_fuel_id == constants.ELECTRIC_ID)\n if not is_electric_heat:\n # Fuel-based space heat.\n # The User supplied a January and a May kWh usage value that should\n # be used for the base case (no heat pump) total electricity use.\n # But, need to come up with a kWh value for every month. Do that by\n # adjusting the kWh pattern available for this city.\n #\n # Determine the multiplier to adjust to the pattern to the actual.\n pat_use = np.array(s.city.avg_elec_usage)\n mult = (s.elec_use_jan - s.elec_use_may) / (pat_use[0] - pat_use[4])\n pat_use = mult * pat_use\n pat_use += s.elec_use_jan - pat_use[0]\n\n # The electricity use in the base case\n dfb['elec_kwh'] = pat_use\n\n # rough estimate of a base demand: not super critical, as the demand rate \n # structure does not have blocks. Assume a load factor of 0.4\n dfb['elec_kw'] = dfb.elec_kwh / (DAYS_IN_MONTH * 24.0) / 0.4\n\n else:\n # Electric Heat Case\n # No Jan and May values are provided. Instead we have possibly some\n # DHW, clothes drying, and cooking. Plus, we have base lights/other appliances.\n # And finally we have the Elecric heat making up the base electric usage.\n\n # First, DHW, Clothes Drying and Cooking. Assume flat use through year.\n # This is a numpy array because DAYS_IN_MONTH is an array.\n elec_kwh = s.fuel_other_uses / 8760.0 * DAYS_IN_MONTH * 24.0\n\n # Now lights and other misc. appliances. Some monthly variation, given\n # by LIGHTS_OTHER_PAT.\n elec_kwh += s.lights_other_elec / 8760.0 * LIGHTS_OTHER_PAT * DAYS_IN_MONTH * 24.0\n\n # For the peak demand of those two categories of use, just assume 40% load factor.\n elec_kw = elec_kwh / (DAYS_IN_MONTH * 24.0) / 0.4\n\n # Now add in space heating kWh and kW\n elec_kwh += s.df_mo_en_base.total_kwh.values\n elec_kw += s.df_mo_en_base.total_kw.values\n\n # store results\n dfb['elec_kwh'] = elec_kwh\n dfb['elec_kw'] = elec_kw\n\n # Make an object to calculate electric utility costs\n elec_cost_calc = ElecCostCalc(s.utility, sales_tax=s.sales_tax, pce_limit=s.pce_limit)\n # cost function that will be applied to each row of the cost DataFrame\n cost_func = lambda r: elec_cost_calc.monthly_cost(r.elec_kwh, r.elec_kw)\n\n dfb['elec_dol'] = dfb.apply(cost_func, axis=1)\n\n if not is_electric_heat:\n # Now fuel use by month. Remember that the home heat model only looked at\n # space heating, so we need to add in the fuel use from the other end uses\n # that use this fuel.\n dfb['secondary_fuel_units'] = s.df_mo_en_base.secondary_fuel_units + \\\n s.fuel_other_uses / 12.0\n dfb['secondary_fuel_dol'] = dfb.secondary_fuel_units * s.exist_unit_fuel_cost * (1. + s.sales_tax)\n else:\n # Electric Heat, so no secondary fuel\n dfb['secondary_fuel_units'] = 0.0\n dfb['secondary_fuel_dol'] = 0.0\n\n # Total Electric + space heat\n dfb['total_dol'] = dfb.elec_dol + dfb.secondary_fuel_dol\n\n # Now with the heat pump\n # determine extra kWh used in the heat pump scenario. Note, this will\n # be negative numbers if the base case used electric heat.\n extra_kwh = (s.df_mo_en_hp.total_kwh - s.df_mo_en_base.total_kwh).values\n dfh['elec_kwh'] = dfb['elec_kwh'] + extra_kwh\n extra_kw = (s.df_mo_en_hp.total_kw - s.df_mo_en_base.total_kw).values\n dfh['elec_kw'] = dfb['elec_kw'] + extra_kw\n dfh['elec_dol'] = dfh.apply(cost_func, axis=1)\n\n # Now fuel, including other end uses using the heating fuel\n if not is_electric_heat:\n dfh['secondary_fuel_units'] = s.df_mo_en_hp.secondary_fuel_units + \\\n s.fuel_other_uses / 12.0\n dfh['secondary_fuel_dol'] = dfh.secondary_fuel_units * s.exist_unit_fuel_cost * (1. + s.sales_tax)\n else:\n # Electric Heat, so no secondary fuel\n dfh['secondary_fuel_units'] = 0.0\n dfh['secondary_fuel_dol'] = 0.0\n\n # Total Electric + space heat\n dfh['total_dol'] = dfh.elec_dol + dfh.secondary_fuel_dol", "def gold_card(subtotal):\n return subtotal * 0.05", "def gamestats(self, table, curr_team):\n\n # Drop unneeded header \n tmp = table.iloc[1:,]\n # Fix the column names by reading line 0\n tmp.columns = [x.replace(\" \", \"\").replace(\"/\",\"\").replace(\".\",\"\") for x in tmp.iloc[0]]\n # Drop row zero which held the header row\n tmp = tmp.drop(tmp.index[0])\n # Forward fill the dates for defensive split later \n tmp['Date'].fillna(method='ffill', inplace = True)\n # Add in the team \n tmp['Team'] = curr_team\n # Create an offense/defense variable\n tmp['OffenseDefense'] = tmp['Opponent']\n # If it's not a defensive total then it's offense - set that in the offensedefense variable\n tmp['OffenseDefense'] = tmp['OffenseDefense'].apply(lambda x: \"Defense\" if x == \"Defensive Totals\" else \"Offense\")\n # Set the defensive totals in the opponent varaible to nullls\n tmp['Opponent'] = tmp['Opponent'].apply(lambda x: None if x == \"Defensive Totals\" else x)\n # Forward fill the opponents in for analysis later\n tmp['Opponent'].fillna(method='ffill', inplace = True)\n # Forward fill the results in for analysis later \n tmp['Result'].fillna(method='ffill', inplace = True)\n return tmp", "def __init__(self, name, cash=1000000):\n self.name = name\n self.cash = cash\n self.term = 7\n self.stock_money = 0.40 * cash\n self.margin_money = 0.40 * cash\n self.fee = 30\n self.portfolios = list()\n self.maintenance_threshold = 0.25", "def _cashflow_rule(self, meta, m):\n activity = m.Activity # dict((comp, getattr(m, f\"{comp.name}_production\")) for comp in m.Components)\n total = self._compute_cashflows(m.Components, activity, m.Times, meta)\n return total", "def index():\n userid = session[\"user_id\"]\n purchased = db.execute(\"SELECT * FROM purchase WHERE id=:uid\", uid=userid)\n current_balance = db.execute(\"SELECT cash FROM users WHERE id=:uid\",uid=userid)\n nrows = len(purchased)\n # print(purchased)\n dic = {}\n data = []\n temp_total = 0.0\n for row in purchased:\n # print(row)\n dic[\"symbol\"] = row[\"symbol\"]\n dic[\"name\"] = row[\"name\"]\n dic[\"shares\"] = row[\"shares\"]\n temp = lookup(row[\"symbol\"])\n dic[\"price\"] = usd(temp[\"price\"])\n dic[\"total\"] = usd(temp[\"price\"] * row[\"shares\"])\n print(type(temp[\"price\"] * row[\"shares\"]))\n temp_total = temp_total + float(temp[\"price\"] * row[\"shares\"])\n data.append(dic.copy())\n # print(data)\n # print(data)\n c_balance = usd(current_balance[0].get(\"cash\"))\n # print(c_balance)\n grand_total = usd(temp_total + float(current_balance[0].get(\"cash\")))\n return render_template(\"index.html\", data=data, grand_total=grand_total, current_balance=c_balance)" ]
[ "0.5609491", "0.56090033", "0.5605382", "0.54486775", "0.5409336", "0.5349029", "0.5310988", "0.5306069", "0.5277752", "0.52737856", "0.52526504", "0.5243906", "0.52153254", "0.52056533", "0.5202589", "0.51329315", "0.51262546", "0.5124966", "0.51038945", "0.51027447", "0.5072905", "0.50638586", "0.5054858", "0.50308764", "0.5030316", "0.502374", "0.5018404", "0.49769393", "0.4947184", "0.49422055" ]
0.76324147
0
Revert the associated document instance back to this revision. Return the document instance.
def revert(self): self.instance.save() return self.instance
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def restore(self):\n documentUrl = self.metaData.graveyard[0].selfLink + \"/restore\"\n response = self._adapter.putRequest(documentUrl, self._baseHeader, \"{}\")\n self.metaData.graveyard.pop()\n\n return Document(self._client, response['Headers']['location'])", "def revert(self, ref=None):\n # TODO\n raise NotImplementedError", "def revert_to_revision(self, revision, **kwargs):\n revision_id = obj_or_id(revision, \"revision\", (PageRevision,))\n response = self._requester.request(\n \"POST\",\n \"{}s/{}/pages/{}/revisions/{}\".format(\n self.parent_type, self.parent_id, self.url, revision_id\n ),\n _kwargs=combine_kwargs(**kwargs),\n )\n pagerev_json = response.json()\n pagerev_json.update({\"{self.parent_type}_id\": self.parent_id})\n\n return PageRevision(self._requester, pagerev_json)", "def revert(self):\n original = getattr(self, \"_original\", None)\n if not original:\n return\n\n if hasattr(self, \"output\"):\n output = self.output\n keep_output = True\n else:\n keep_output = False\n\n del self._original\n\n self.__dict__ = original.__dict__\n\n if keep_output:\n self.output = output", "def remove(self, document):\n return self.db.pop(document['id'], None)", "def revert(self, rec=0):\r\n if rec:\r\n result = self._svn('revert -R')\r\n else:\r\n result = self._svn('revert')\r\n return result", "def revert(self, snapshot):\n state_root, checkpoint_id = snapshot\n\n with self.state_db() as state_db:\n # first revert the database state root.\n state_db.root_hash = state_root\n # now roll the underlying database back\n\n self.chaindb.revert(checkpoint_id)", "def __delete__(self, instance):\n instance.doc.pop(self.slug, None)", "def revive(self):\n field_name = self.get_delete_flag_field_name()\n return self.update(**{field_name: None})", "def back(self):\n self.book.back()\n self.book.save()\n self.save()", "def restore(self, session, **attrs):\n body = {\"instance\": {\"restorePoint\": {\"backupRef\": self.id}}}\n body.update(attrs)\n resp = session.post('instances', service=self.service, json=body).body\n return resp['instance']", "def revert( self, version, message ):\n\n context = self.__parent__\n trusted = removeSecurityProxy(context)\n ctx_class = trusted.__class__\n \n has_wf_status = hasattr(context, 'status')\n if has_wf_status:\n wf_status = context.status\n \n # set values on version from context\n self._copyFields(version, trusted)\n\n if has_wf_status:\n context.status = wf_status\n \n msg = _(u\"Reverted to previous version $version.\",\n mapping={'version': version.version_id})\n\n event.notify(\n interfaces.VersionReverted(context, self, version, msg))\n \n self.create(message=msg)", "def restore_object(self, attrs, instance=None):\n obj = super(UnDeleteSerializer, self).restore_object(attrs, instance)\n\n obj.recipient_deleted_at = None\n obj.save()\n return obj", "def revert(self, checkpoint):\n self._validate_checkpoint(checkpoint)\n\n for key, value in self.journal.pop_checkpoint(checkpoint).items():\n if value is None:\n self.wrapped_db.delete(key)\n else:\n self.wrapped_db.set(key, value)", "def revert(self, *args, **kwargs):", "def restore(self, obj):\n return obj", "def delete(self):\n self.current_revision.delete()", "def restore(self):\n return self._restore", "def restore_object(self, attrs, instance=None):\n obj = super(ReplySerializer, self).restore_object(attrs, instance)\n return obj", "def revert(self):\n reverted = Line(l=self)\n reverted.direction *= -1.0\n return reverted", "def revert(self):\n\n if len(self.stack) == 0 or not self.revertable:\n return\n\n for s in self.stack:\n s[\"model\"].setPos(s[\"model\"].getPos() + Vec3(0,0,THING_REVERT_DISTANCE))\n\n state = self.stack.pop()\n\n #not sure if this helps, but it can't hurt\n self.model.detachNode()\n\n for x in self.toRevert:\n self.toRevert[x](state[x])", "def __editRevert(self):\n self.activeWindow().revertToUnmodified()", "def undelete(self):\n if self.deleted is not None:\n LOGGER.debug('Soft-undeleting object %s %s',\n self._meta.model.__name__, self.pk)\n self.deleted = None\n self.save()\n\n return self", "def rev(self):\n self.set.reverse()", "def undo(self, outer_instance):\n pass", "def revert(self, ref):\n self._git.head.commit = ref\n self._git.head.reset(index=True, working_tree=True)", "def post_revert(self):", "def declone(self):\n return self.__parent or self", "def backToSource(self, point):\n if self.revertTransformation is not None:\n return self.revertTransformation(point)\n return point", "def reverse(self):\n cls = self.__class__\n # , kind = None, transmission_reflection = None,\n # reflection_against = None,\n # are_normals_on_inc_rays_side = None, are_normals_on_out_rays_side = None\n if self.kind is None:\n rev_kind = None\n else:\n if self.transmission_reflection is None:\n raise ValueError(\"reverse path is ambiguous\")\n elif self.transmission_reflection is TransmissionReflection.transmission:\n rev_kind = self.kind.reverse()\n elif self.transmission_reflection is TransmissionReflection.reflection:\n rev_kind = self.kind\n else:\n raise RuntimeError\n\n return cls(\n self.points,\n self.orientations,\n kind=rev_kind,\n transmission_reflection=self.transmission_reflection,\n reflection_against=self.reflection_against,\n are_normals_on_inc_rays_side=self.are_normals_on_out_rays_side,\n are_normals_on_out_rays_side=self.are_normals_on_inc_rays_side,\n )" ]
[ "0.69118136", "0.62749267", "0.6198141", "0.6081206", "0.58588725", "0.58482563", "0.57816803", "0.56374973", "0.556933", "0.55578405", "0.5556515", "0.5501226", "0.5477532", "0.545381", "0.5427725", "0.5415648", "0.54093003", "0.53723705", "0.531905", "0.5276174", "0.5251659", "0.52482235", "0.5236867", "0.5208264", "0.51529557", "0.5139754", "0.51242363", "0.5058518", "0.5053323", "0.50533205" ]
0.75671864
0
Return the DCTII matrix of order n
def dctmtx(n): x,y = np.meshgrid(range(n), range(n)) D = np.sqrt(2.0/n) * np.cos(np.pi * (2*x+1) * y / (2*n)) D[0] /= np.sqrt(2) return D
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_dct_matrix(N):\r\n dct_m = np.eye(N)\r\n for k in np.arange(N):\r\n for i in np.arange(N):\r\n w = np.sqrt(2 / N)\r\n if k == 0:\r\n w = np.sqrt(1 / N)\r\n dct_m[k, i] = w * np.cos(np.pi * (i + 1 / 2) * k / N)\r\n idct_m = np.linalg.inv(dct_m)\r\n return dct_m, idct_m", "def direct_dctii(x):\n n = x.size\n a = np.empty((n, n), dtype = x.dtype)\n for i in xrange(n):\n for j in xrange(n):\n a[i, j] = x[j] * np.cos(np.pi * (0.5 + j) * i / n)\n\n a[0] *= np.sqrt(1. / n)\n a[1:] *= np.sqrt(2. / n)\n\n return a.sum(axis = 1)", "def direct_dctii_2(x):\n # We are a bit smarter here by computing the coefficient matrix directly,\n # but still O(N^2)\n n = x.size\n\n a = np.cos(np.pi / n * np.linspace(0, n - 1, n)[:, None]\n * np.linspace(0.5, 0.5 + n - 1, n)[None, :])\n a *= x\n a[0] *= np.sqrt(1. / n)\n a[1:] *= np.sqrt(2. / n)\n\n return a.sum(axis = 1)", "def gen_dct(size):\n\n D=tor.from_numpy(dct(np.eye(size),norm=\"ortho\",axis=0)) ## Generate DCT basis\n D=D.view(1,1,D.size(0),-1).type(tor.FloatTensor) ## Resize it and convert it to float datatype\n \n return D", "def _dgp_cov_matrix(Nt, snr2=100, clen2=1):\n f = lambda x: np.exp(-(x**2)/clen2)\n C = snr2 * (2*f(np.arange(Nt)) - f(1+np.arange(Nt))- f(-1+np.arange(Nt)))\n C[0] += 2 + 0.01 # noise, add a small number to regularize\n C[1] += -1\n return scipy.linalg.toeplitz(C)", "def dctm(N, M):\n return math.sqrt(2.0 / M) * np.cos(\n np.kron(np.ones((1, M)), np.arange(N).reshape(-1, 1))\n * np.kron(np.ones((N, 1)), np.pi * (np.arange(1, M + 1) - 0.5) / M)\n )", "def construct_M_N(n):\n n2 = n**2\n D0 = 2*np.ones(n2) # 0th diagonal\n D1 = - np.ones(n2 - 1) # -1st, 1st diagonals\n D1[n-1::n] = 0 # Setting every k*n-1 entries = 0 for k < n\n DN = - np.ones(n2 - n) # -nth, nth diagonals\n return (scipy.sparse.diags((D1, D0, D1), (-1, 0, 1), shape=(n2, n2), format=\"csr\"),\n scipy.sparse.diags((DN, D0, DN), (-n, 0, n), shape=(n2, n2), format=\"csr\"))", "def dctn(array, axes=None):\n\n # Axes along which the DCT is computed. The default is over all axes.\n if axes is None:\n axes = range(array.ndim)\n\n # Apply the 1D DCT to each axis in sequence.\n for axis in axes:\n array = dct(array, axis=axis)\n\n return array", "def _gp_cov_matrix(Nt, snr2, clen2):\n f = lambda x: np.exp(-(x**2)/clen2)\n C = snr2 * f(np.arange(Nt))\n C[0] += 1 # noise\n return scipy.linalg.toeplitz(C)", "def bc_outgoing_mat(n, h, k):\n \n d = [1.0, 2.0j*k*h]\n i = [n-1, n-1]\n j = [n-2, n-1]\n return scipy.sparse.coo_matrix((d, (i, j)))", "def dct(n_filters, n_input):\n\n basis = np.empty((n_filters, n_input))\n basis[0, :] = 1.0 / np.sqrt(n_input)\n\n samples = np.arange(1, 2*n_input, 2) * np.pi / (2.0 * n_input)\n\n for i in range(1, n_filters):\n basis[i, :] = np.cos(i*samples) * np.sqrt(2.0/n_input)\n\n return basis", "def dct(f, axis=-1):\n\n # Size of the input along the specified axis.\n n = f.shape[axis]\n\n # Create two vectors containing the integers from 0 to n-1.\n i = k = np.arange(n)\n\n # Compute the x-axis coordinate of the f function.\n x = (2 * i + 1) / (2 * n)\n\n # Compute the outer product of x and kπ, obtaining the nxn matrix that will\n # form the argument of the cosine.\n arg = np.multiply.outer(x, k * np.pi)\n\n # Normalization factors.\n alpha = np.where(k == 0, 1 / np.sqrt(n), np.sqrt(2 / n))\n\n # The orthonormal DCT basis.\n w = alpha * np.cos(arg)\n\n # Compute the convolution between the input array and the DCT basis.\n # The output contains the amplitude coefficient for every frequency.\n c = np.tensordot(f, w, axes=(axis, 0))\n\n # `axis` becomes the last dimension in the output of `np.tensordot`.\n # Move it back to its original position so that the output shape matches\n # the input shape.\n c = np.moveaxis(c, -1, axis)\n\n return c", "def computeInverse2DDCT(self, imge):\n \n # Assuming a square image\n N = imge.shape[0]\n finalInverse2DDCT = np.zeros([N, N], dtype=float)\n for x in xrange(N):\n for y in xrange(N):\n #Compute the DCT value for each cells/points in the resulting transformed image.\n finalInverse2DDCT[x, y] = DCT.__computeSinglePointInverse2DCT(imge, x, y, N)\n return finalInverse2DDCT", "def I(n):\n identity = Matrix(n,n)\n print identity.matrix\n index = 0 \n for i in range(identity.nrows):\n for j in range(identity.ncols):\n identity.matrix[i][index] = 1\n index += 1\n\n\n flat = []\n for i in range(identity.nrows):\n for j in range(identity.ncols):\n flat.append(identity.matrix[i][j])\n\n\n return identity", "def Problem2(n):\n diag_entries = np.empty((3,n))\n diag_entries[0] = np.ones(n)*(-1)\n diag_entries[1] = np.ones(n)*2\n diag_entries[2] = np.ones(n)*(-1)\n A = sparse.spdiags(diag_entries, [-1,0,1],n,n,format=\"csr\")\n return A", "def get_C(n_c,CV_matrix):\n C = np.zeros((n_c, n_c), dtype=np.float32)\n for i in range(3):\n C += np.asfortranarray(CV_matrix[:, :, i]) @ np.asfortranarray(CV_matrix[:, :, np.mod(i + 2, 3)].T)\n C = (C != 0).astype(np.int32)\n return C", "def decomposition_into_s_n_irreducibles(self, n):\r\n w5 = partitions_list(n)\r\n M5 = form_matrix_yt(w5)\r\n card = math.factorial(n)\r\n vec_dic = {}\r\n for k in range(self.dimension()+1):\r\n D = {}\r\n uu = []\r\n vv = []\r\n p = k \r\n A = self.matrix_simmetric_representate(p)\r\n if (p >0 and (p <= self.dimension())):\r\n null = nullspace(A)\r\n w3 = []\r\n for i in range(len(null[0])):\r\n w = []\r\n for j in range(len(null)):\r\n w.append(null[j][i])\r\n w3.append(w) \r\n null = w3\r\n M = np.matrix(w3, dtype= np.float64).transpose()\r\n Mi = np.linalg.pinv(M)\r\n else:\r\n if (p == 0):\r\n M = A\r\n null = []\r\n for i in range(A.shape[0]):\r\n aux = []\r\n for j in range(A.shape[1]):\r\n aux.append(M[i,j])\r\n null.append(aux)\r\n M = np.matrix(null, dtype=np.float64)\r\n Mi = M\r\n p = k + 1\r\n if (p>0 and (p <= self.dimension())):\r\n A1=self.matrix_simmetric_representate(p)\r\n col = columnspace(A1)\r\n w4 = []\r\n for i in range(len(col[0])):\r\n w = []\r\n for j in range(len(col)):\r\n w.append(col[j][i])\r\n w4.append(w)\r\n col = w4\r\n M1 = np.matrix(w4, dtype=np.float64).transpose()\r\n Mii = np.linalg.pinv(M1)\r\n for h in w5:\r\n p = k \r\n if (p >0 and (p <= self.dimension())):\r\n if (all(elem == 0 for elem in null[0])):\r\n l1 = 0\r\n else:\r\n he = self.basis_group_oriented_p_chains(p) \r\n on1 = np.ones(len(list(he.dic.keys())), dtype=np.float64) \r\n v = P_chains([],[])\r\n v = P_chains(list(he.dic.keys()),on1)\r\n v1 = permutation_in_simplex_test(v, make_permutation(h))\r\n D1={}\r\n c1 = 0\r\n for i in list(v1.dic.keys()):\r\n c2 = 1\r\n for j in list(he.dic.keys()):\r\n if (i == j):\r\n if (v1.dic[i] == he.dic[j]):\r\n D1[c1] = c2\r\n else:\r\n D1[c1] = -c2\r\n c2 = c2 + 1\r\n c1 = c1 + 1\r\n rr = M.shape[0]\r\n cc = M.shape[1]\r\n Ma = np.zeros([rr,cc],dtype=np.float64)\r\n for i in range(rr):\r\n Ma[i,:] = (M[(abs(D1[i])-1),:]*(np.sign(D1[i])))\r\n l1 = 0\r\n for j in range(cc):\r\n l1 = np.dot(Mi[j,:],Ma[:,j])[0,0] + l1\r\n else:\r\n if (p == 0):\r\n he = self.basis_group_oriented_p_chains(p) \r\n on1 = np.ones(len(list(he.dic.keys())), dtype=np.float64) \r\n v = P_chains([],[])\r\n v = P_chains(list(he.dic.keys()),on1)\r\n v1 = permutation_in_simplex_test(v, make_permutation(h))\r\n D1={}\r\n c1 = 0\r\n for i in list(v1.dic.keys()):\r\n c2 = 1\r\n for j in list(he.dic.keys()):\r\n if (i == j):\r\n if (v1.dic[i] == he.dic[j]):\r\n D1[c1] = c2\r\n else:\r\n D1[c1] = -c2\r\n c2 = c2 + 1\r\n c1 = c1 + 1\r\n rr = M.shape[0]\r\n cc = M.shape[1]\r\n Ma = np.zeros([rr,cc],dtype=np.float64)\r\n for i in range(rr):\r\n Ma[i,:] = (M[(abs(D1[i])-1),:]*(np.sign(D1[i])))\r\n l1 = 0\r\n for j in range(cc):\r\n l1 = np.dot(Mi[j,:],Ma[:,j])[0,0] + l1\r\n else:\r\n l1 = 0\r\n p = k + 1\r\n if (p>0 and (p <= self.dimension())):\r\n hi = self.basis_group_oriented_p_chains(p-1) \r\n on1i = np.ones(len(list(hi.dic.keys())), dtype=np.float64) \r\n vi = P_chains([],[])\r\n vi = P_chains(list(hi.dic.keys()),on1i)\r\n v1i = permutation_in_simplex_test(vi, make_permutation(h))\r\n D1i={}\r\n c1 = 0\r\n for i in list(v1i.dic.keys()):\r\n c2 = 1\r\n for j in list(hi.dic.keys()):\r\n if (i == j):\r\n if (v1i.dic[i] == hi.dic[j]):\r\n D1i[c1] = c2\r\n else:\r\n D1i[c1] = -c2\r\n c2 = c2 + 1\r\n c1 = c1 + 1\r\n rr = M1.shape[0]\r\n cc = M1.shape[1]\r\n Mai = np.zeros([rr,cc],dtype=np.float64)\r\n for i in range(rr):\r\n Mai[i,:] = (M1[(abs(D1i[i])-1),:]*(np.sign(D1i[i])))\r\n l2 = 0\r\n for j in range(cc):\r\n l2 = np.dot(Mii[j,:],Mai[:,j])[0,0] + l2\r\n else:\r\n l2 = 0\r\n uu.append(l1-l2) \r\n vv.append(size_conjugacy_class(h,n))\r\n for i in range(M5.shape[0]):\r\n Ip = 0\r\n for j in range(M5.shape[1]):\r\n Ip = Ip + M5[i,j]*uu[j]*vv[j]\r\n Ip = Ip/card\r\n D[tuple(w5[i])] = abs(round(Ip))\r\n '''Note that I am using round, only because the results obtained are \r\n not esthetics'''\r\n vec_dic[k] = D\r\n return vec_dic", "def hpint_perm(n):\n c_new = []\n D_new = []\n H_new = []\n for i in range(2 ** n - 1):\n c_new_i = np.zeros((n, 1))\n binStr = bin(i + 1)[2:]\n for j in range(len(binStr)):\n c_new_i[n - 1 - j][0] = int(binStr[len(binStr) - 1 - j])\n c_new.append(c_new_i)\n D_new_i = np.diag(np.transpose(c_new_i)[0])\n D_new.append(D_new_i)\n H_new_i = np.diag(np.transpose(c_new_i * (-2) + 1)[0])\n H_new.append(H_new_i)\n\n return c_new, D_new, H_new", "def construct_matrix_A(n):\n n2 = n**2\n D0 = 4*np.ones(n2) # 0th diagonal\n D1 = - np.ones(n2 - 1) # -1st, 1st diagonals\n D1[n-1::n] = 0 # Setting every k*n-1 entries = 0 for k < n\n DN = - np.ones(n2 - n) # -nth, nth diagonals\n return scipy.sparse.diags((DN, D1, D0, D1, DN), (-n, -1, 0, 1, n),\n shape=(n2, n2), format=\"csr\")", "def identity_matrix(n):\n data = [[1 if c == r else 0 for c in range(n)] for r in range(n)]\n return Matrix(data)", "def colony(N: int) -> np.ndarray:\n M = np.zeros((N, N))\n n = (N-1)//2\n M[n, n] = 1 # a bacteria at the center then n reproductions\n return binary_dilation(M, iterations = n).astype(int)", "def dct_matrix(rows, cols, unitary=True):\r\n rval = numpy.zeros((rows, cols))\r\n col_range = numpy.arange(cols)\r\n scale = numpy.sqrt(2.0/cols)\r\n for i in xrange(rows):\r\n rval[i] = numpy.cos(i * (col_range*2+1)/(2.0 * cols) * numpy.pi) * scale\r\n\r\n if unitary:\r\n rval[0] *= numpy.sqrt(0.5)\r\n return rval", "def discreteComplexInverseTransform(self,S,n):\n N=len(S)\n M=N/2\n return sum([S[k+M]*cmath.exp(2j*cmath.pi*k*n/N)+S[-k+M]*cmath.exp(-2j*cmath.pi*k*n/N) for k in range(M+1)])", "def hpint_perm_torch(n):\n c_new = []\n D_new = []\n H_new = []\n for i in range(2 ** n - 1):\n c_new_i = torch.zeros(n, 1).to(device)\n binStr = bin(i + 1)[2:]\n for j in range(len(binStr)):\n c_new_i[n - 1 - j][0] = int(binStr[len(binStr) - 1 - j])\n c_new.append(c_new_i)\n D_new_i = torch.diag(c_new_i.T[0]).to(device)\n D_new.append(D_new_i)\n H_new_i = torch.diag((c_new_i * (-2) + 1).T[0]).to(device)\n H_new.append(H_new_i)\n\n return c_new, D_new, H_new", "def cepstrum(input, nceps):\n return dct(input)[:,0:nceps]", "def get_matrixS(n):\n\n mat_nxn = np.zeros([n, n], dtype=int)\n for row_num in range(1, n + 1):\n i = row_num - 1\n if row_num == 1:\n mat_nxn[i][i + 1] = 1\n mat_nxn[i][i + 2] = 1\n elif row_num == 2:\n mat_nxn[i][i - 1] = 1\n mat_nxn[i][i + 2] = 1\n elif row_num == n - 1:\n mat_nxn[i][i + 1] = 1\n mat_nxn[i][i - 2] = 1\n elif row_num == n:\n mat_nxn[i][i - 1] = 1\n mat_nxn[i][i - 2] = 1\n elif row_num % 2 == 1:\n mat_nxn[i][i + 1] = 1\n mat_nxn[i][i + 2] = 1\n mat_nxn[i][i - 2] = 1\n elif row_num % 2 == 0:\n mat_nxn[i][i - 1] = 1\n mat_nxn[i][i + 2] = 1\n mat_nxn[i][i - 2] = 1\n mat_nxn = mat_nxn + np.eye(n, dtype=int)\n mat_2nx2n = np.repeat(np.repeat(mat_nxn, 2, 0), 2, 1)\n return torch.as_tensor(mat_2nx2n)", "def discreteComplexCompose(self,c,n):\n z=self.discreteComplexInverseTransform(c,n)\n return (z.real,z.imag)", "def identity(n):\r\n I = np.zeros((n, n))\r\n diag = np.ones(n)\r\n np.fill_diagonal(I, diag)\r\n return matrix(I)", "def laplacian_mat(n):\n data = [1, -2, 1]*n\n i = flatten([[k,k,k] for k in range(n)])\n j = flatten([[k-1, k, k+1] for k in range(n)])\n return scipy.sparse.coo_matrix((data[1:-1], (i[1:-1], j[1:-1])))", "def identity_matrix(self, n):\r\n IdM = self.zeros_matrix(n, n)\r\n for i in range(n):\r\n IdM[i][i] = 1.0\r\n \r\n return IdM" ]
[ "0.78467476", "0.68740726", "0.67651325", "0.65155023", "0.6344569", "0.63325715", "0.6290011", "0.6210422", "0.61951417", "0.6149881", "0.61017364", "0.6054974", "0.60508734", "0.5981218", "0.5954551", "0.58851784", "0.58829117", "0.5834347", "0.5812427", "0.58059543", "0.57797575", "0.57685894", "0.57494915", "0.5731736", "0.5722549", "0.5702873", "0.57012063", "0.56845623", "0.5641744", "0.56400216" ]
0.75993985
1
klUCB index computation for Poisson distributions.
def klucb_poisson(x, d, precision=1e-6): upperbound = x+d+sqrt(d*d+2*x*d) # looks safe, to check: left (Gaussian) tail of Poisson dev return klucb(x, d, kl_poisson, upperbound, precision)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def kl_ucb(self, T, f):\n def index_func(x):\n return x.Sa / x.Na + np.sqrt(f(x.t)*2 / x.Na)\n return self.Index_Policy(T, index_func)", "def kernal_mus(n_kernels):\n l_mu = [1]\n if n_kernels == 1:\n return l_mu\n\n bin_size = 2.0 / (n_kernels - 1) # score range from [-1, 1]\n l_mu.append(1 - bin_size / 2) # mu: middle of the bin\n for i in range(1, n_kernels - 1):\n l_mu.append(l_mu[i] - bin_size)\n print(l_mu)\n return l_mu", "def find_new_kbl(self):\n [Ly,N] = self.b.shape\n z_u_w = self.grid_dict['z_u_w']\n z_u_r = self.grid_dict['z_u_r']\n\n #---> j loop\n for j in range(Ly):\n self.kbl[j] = N #initialize search at top\n\n # in fortran k=N-1,1,-1\n for k in range(N-1,0,-1):\n #INDEX MAP\n k_w = k\n k_r = k-1\n \n for j in range(Ly):\n if z_u_w[j,k_w] > z_u_w[j,N] - self.hbls[j]:\n self.kbl[j] = k_w", "def kull_leib_distance (obs_pval_hist,null_pval_hist):\n\tkl_on = 0\n\tkl_no = 0\n\t#calculate total number of points for normalisation\n\ttot_o=np.sum(obs_pval_hist)\n\ttot_n=np.sum(null_pval_hist)\n\t# normalise\n obs_p_norm = obs_pval_hist / tot_o\n null_p_norm = null_pval_hist / tot_n\n # use masked arrays\n # masked arrays allow to leave out undefined values such as nan and inf\n kl_on = np.sum( obs_p_norm * np.ma.log(obs_p_norm / null_p_norm))\n kl_no = np.sum( null_p_norm * np.ma.log(null_p_norm / obs_p_norm))\n\t# go two ways as not symmetric and take the mean\n\treturn (kl_on+kl_no)/2", "def kull_leib_distance_not_vect (obs_pval_hist,null_pval_hist):\n\tkl_on = 0\n\tkl_no = 0\n\ttot_o=0\n\ttot_n=0\n\t#calculate total number of points for normalisation\n\tfor i in range(len(obs_pval_hist)):\n\t\ttot_o+=obs_pval_hist[i]\n\t\ttot_n+=null_pval_hist[i]\n\t# go two ways as not symmetric and take the mean\n\tfor i in range(len(obs_pval_hist)):\n\t\t\tif (obs_pval_hist[i] !=0 and null_pval_hist[i] != 0):\n\t\t\t\tobs_p = obs_pval_hist[i]/tot_o\n\t\t\t\tnull_p = null_pval_hist[i]/tot_n\n\t\t\t\tkl_on += obs_p * np.math.log( obs_p / null_p )\n\t\t\t\tkl_no += null_p * np.math.log( null_p / obs_p )\n\treturn (kl_on+kl_no)/2", "def generate_pn2kc_weights(nb_pn, nb_kc, min_pn=10, max_pn=20, aff_pn2kc=None, nb_trials=100000, baseline=25000,\r\n rnd=np.random.RandomState(2018), dtype=np.float32):\r\n\r\n dispersion = np.zeros(nb_trials)\r\n best_pn2kc = None\r\n\r\n for trial in range(nb_trials):\r\n pn2kc = np.zeros((nb_pn, nb_kc), dtype=dtype)\r\n\r\n if aff_pn2kc is None or aff_pn2kc <= 0:\r\n vaff_pn2kc = rnd.randint(min_pn, max_pn + 1, size=nb_pn)\r\n else:\r\n vaff_pn2kc = np.ones(nb_pn) * aff_pn2kc\r\n\r\n # go through every kenyon cell and select a nb_pn PNs to make them afferent\r\n for i in range(nb_pn):\r\n pn_selector = rnd.permutation(nb_kc)\r\n pn2kc[i, pn_selector[:vaff_pn2kc[i]]] = 1\r\n\r\n # This selections mechanism can be used to restrict the distribution of random connections\r\n # compute the sum of the elements in each row giving the number of KCs each PN projects to.\r\n pn2kc_sum = pn2kc.sum(axis=0)\r\n dispersion[trial] = pn2kc_sum.max() - pn2kc_sum.min()\r\n # pn_mean = pn2kc_sum.mean()\r\n\r\n # Check if the number of projections per PN is balanced (min max less than baseline)\r\n # if the dispersion is below the baseline accept the sample\r\n if dispersion[trial] <= baseline: return pn2kc\r\n\r\n # cache the pn2kc with the least dispersion\r\n if best_pn2kc is None or dispersion[trial] < dispersion[:trial].min():\r\n best_pn2kc = pn2kc\r\n\r\n # if non of the samples have dispersion lower than the baseline,\r\n # return the less dispersed one\r\n return best_pn2kc", "def algo_UCB(mu, Na):\n i = 0\n while i < Na.size:\n if Na[i] < 1:\n return i\n else:\n i+= 1\n t = Na.sum()\n return np.argmax(mu + np.sqrt(2*np.log(t)/Na))", "def dctUelIndex(pdct, uelLabel):\n return _dctmcc.dctUelIndex(pdct, uelLabel)", "def _ucbpe_lcb(x):\n mu, sigma = gp.eval(x, uncert_form='std')\n return mu - beta_th * sigma", "def _ucbpe_lcb(x):\n mu, sigma = gp.eval(x, uncert_form='std')\n return mu - beta_th * sigma", "def grad_KL_mu(self):\n return kron_mvp(self.K_invs, self.q_mu - self.mu)", "def kernel(mu, N):\n\n # Check that -1 <= mu <= 1\n mu = np.clip(mu, -1, 1)\n\n # Need Legendre polynomials\n legPolys = legp(mu, N)\n \n coefs = 2*np.arange(0, N+1) + 1\n \n ker = coefs*legPolys \n\n return ker.sum() / (4.0*np.pi)", "def _compute_kl(self, lvl):\n kl = [] # kernal length\n for n in range(lvl):\n fct = self.scaling**n # up-sampling factor\n kl.append(fct*(self.nfreq-1)+1)\n kl.append(kl[-1]) # repeat the value of the coarsest scale for the approximation coefficient\n return kl[::-1]", "def ref_main(l, k):\n S = sum(binomial(k, z)*(2*z-k)**l for z in range(k+1))\n return S / 2**k", "def ref_main(l, k):\n S = sum(binomial(k, z)*(2*z-k)**l for z in range(k+1))\n return S / 2**k", "def make_k_matrix(self):\r\n K = self.uv_vol + self.Epsilon * self.guv_vol + \\\r\n (self.Epsilon / self.Beta) * self.uv_bound\r\n return K", "def QBO_indices(self, U):\n \n\t\t# raise type error if U is not an iris cube\n\t\tif not isinstance(U, iris.cube.Cube):\n\t\t\traise TypeError(\"U must be an iris cube\") \n\n\t\t# define latitude constraint between 5N and 5S\n\t\tlat_constraint=iris.Constraint(latitude=lambda cell: -5.1 < cell < 5.1)\n\t\t\n\t\t# restrcit latitudes to 5S-5N \n\t\tU_QBO = U.extract(lat_constraint)\n\n\t\t# average over latitudes using the cosine of the latitude of each\n\t\t# gridbox as weights to take into account gridbox area differences\n\t\t# across the range.\n\t\tU.coord('latitude').guess_bounds()\n\t\tcos_weights = cosine_latitude_weights(U_QBO)\n\t\tU_QBO = U_QBO.collapsed('latitude', iris.analysis.MEAN, \n weights = cos_weights) \n\t \n\t\t# assign QBO winds to instance\n\t\tself.U_QBO = U_QBO\n\n\t\t# dt in months should always be 1 for monthly data. \n\t\tdt = 1\n\n\t\t# compute fast fourier transform\n\t\tfft_U = np.fft.fft(U_QBO.data, axis = 0)\n \t\t\n\t\t# fetch associated frequencies\n\t\tfreq = np.fft.fftfreq(U_QBO.data[:,0].size, d=dt)\n\t\n\t\t# throw away negative frequencies\n\t\tkeep = freq>=0\n\t\tfft_U = fft_U[keep,:]\n\t\tfreq = freq[keep]\n\n\t\t# calculate the spectrum (absolute value of FFT)\n\t\t# and normalise by the standard deviation of the QBO timeseries\n\t\t# at each pressure level.\n\t\tfourier_spectrum = np.array([np.abs(fft_U[:,i])/np.std(U_QBO.data, \n axis = 0)[i] for i in range(len(fft_U[0,:]))])\n\n\t\t# assign numpy arrays for the fourier power spectrum \n\t\t# and frequency array\n\t\tself.QBO_spectrum = fourier_spectrum\n\t\tself.QBO_freqs = freq\n\t\t\n\t\treturn", "def kernel_mus(self, n_kernels: int):\n l_mu = [1.0]\n if n_kernels == 1:\n return l_mu\n\n bin_size = 2.0 / (n_kernels - 1) # score range from [-1, 1]\n l_mu.append(1 - bin_size / 2) # mu: middle of the bin\n for i in range(1, n_kernels - 1):\n l_mu.append(l_mu[i] - bin_size)\n return l_mu", "def grad_KL_mu(self):\n return np.multiply(np.exp(self.q_S),\n - kron_mvp(self.K_invs, self.mu - self.q_mu))", "def create_K_u(n: int) -> Array:\n return Array([K(i) for i in range(n ** 2 - 1)])", "def __call__(self, u):\n # get index of grid point left of u\n index = self.get_index(u)\n # get current controlpoints\n current_controlpoints = self.get_controlpoints(index)\n # setup matrix to store the values in the de Boor array:\n # deBoorvalues =\n # d[I-2, I-1, I]\n # d[I-1, I, I+1] d[u, I-1, I]\n # d[I, I+1, I+2] d[u, I, I+1] d[u, u, I]\n # d[I+1, I+2, I+3] d[u, I+1, I+2] d[u, u, I+1] d[u, u, u]\n deBoorvalues = scipy.column_stack((current_controlpoints,\n scipy.zeros((4, 6))))\n # calculate values for de Boor array\n for i in range(1, 4):\n for j in range(1, i + 1):\n leftmostknot = index + i - 3 # current leftmost knot\n rightmostknot = leftmostknot + 4 - j # current rightmost knot\n alpha = self.get_alpha(u, [leftmostknot, rightmostknot])\n deBoorvalues[i, j*2:j*2+2] = (\n alpha * deBoorvalues[i-1, (j-1)*2:(j-1)*2+2] +\n (1 - alpha) * deBoorvalues[i, (j-1)*2:(j-1)*2+2]\n )\n return deBoorvalues[3, -2:]", "def getlikeweibull(k, samples):\n \n N = len(samples)\n samplemean = sum(samples) / N\n \n # inverse of the scale parameter\n lambdainv = scipy.special.gamma(1 + 1/k) / samplemean \n \n # equation for likelihood:\n # Nlog(k/lambda) + sum{(k-1)log(x_i/lambda) - (x_i/lambda)^k}\n \n sumterm = 0\n for val in samples:\n sumterm += ((k - 1) * math.log(val * lambdainv, math.e) - (val * lambdainv) ** k)\n \n # log-likelihood\n like = N * math.log(k * lambdainv, math.e) + sumterm\n \n return like", "def nball_volume(R,k=3):\n return (np.pi**(k/2.0)/gamma(k/2.0+1.0))*R**k", "def knotvector_u(self):\n return self._knot_vector_u", "def _u_kln(self, eTs, protocol, noBeta=False):\n L = len(protocol)\n\n addMM = ('MM' in protocol[0].keys()) and (protocol[0]['MM'])\n addSite = ('site' in protocol[0].keys()) and (protocol[0]['site'])\n probe_keys = ['MM','k_angular_ext','k_spatial_ext','k_angular_int'] + \\\n scalables\n probe_key = [key for key in protocol[0].keys() if key in probe_keys][0]\n\n if isinstance(eTs, dict):\n # There is one configuration per state\n K = len(eTs[probe_key])\n N_k = np.ones(K, dtype=int)\n u_kln = []\n E_base = np.zeros(K)\n if addMM:\n E_base += eTs['MM']\n if addSite:\n E_base += eTs['site']\n for l in range(L):\n E = 1. * E_base\n for scalable in scalables:\n if scalable in protocol[l].keys():\n E += protocol[l][scalable] * eTs[scalable]\n for key in ['k_angular_ext', 'k_spatial_ext', 'k_angular_int']:\n if key in protocol[l].keys():\n E += protocol[l][key] * eTs[key]\n if noBeta:\n u_kln.append(E)\n else:\n u_kln.append(E / (R * protocol[l]['T']))\n elif isinstance(eTs[0], dict):\n K = len(eTs)\n N_k = np.array([len(eTs[k][probe_key]) for k in range(K)])\n u_kln = np.zeros([K, L, N_k.max()], np.float)\n\n for k in range(K):\n E_base = 0.0\n if addMM:\n E_base += eTs[k]['MM']\n if addSite:\n E_base += eTs[k]['site']\n for l in range(L):\n E = 1. * E_base\n for scalable in scalables:\n if scalable in protocol[l].keys():\n E += protocol[l][scalable] * eTs[k][scalable]\n for key in ['k_angular_ext', 'k_spatial_ext', 'k_angular_int']:\n if key in protocol[l].keys():\n E += protocol[l][key] * eTs[k][key]\n if noBeta:\n u_kln[k, l, :N_k[k]] = E\n else:\n u_kln[k, l, :N_k[k]] = E / (R * protocol[l]['T'])\n elif isinstance(eTs[0], list):\n K = len(eTs)\n N_k = np.zeros(K, dtype=int)\n\n for k in range(K):\n for c in range(len(eTs[k])):\n N_k[k] += len(eTs[k][c][probe_key])\n u_kln = np.zeros([K, L, N_k.max()], np.float)\n\n for k in range(K):\n E_base = 0.0\n C = len(eTs[k])\n if addMM:\n E_base += np.concatenate([eTs[k][c]['MM'] for c in range(C)])\n if addSite:\n E_base += np.concatenate([eTs[k][c]['site'] for c in range(C)])\n for l in range(L):\n E = 1. * E_base\n for scalable in scalables:\n if scalable in protocol[l].keys():\n E += protocol[l][scalable]*np.concatenate([eTs[k][c][scalable] \\\n for c in range(C)])\n for key in ['k_angular_ext', 'k_spatial_ext', 'k_angular_int']:\n if key in protocol[l].keys():\n E += protocol[l][key]*np.concatenate([eTs[k][c][key] \\\n for c in range(C)])\n if noBeta:\n u_kln[k, l, :N_k[k]] = E\n else:\n u_kln[k, l, :N_k[k]] = E / (R * protocol[l]['T'])\n\n if (K == 1) and (L == 1):\n return u_kln.ravel()\n else:\n return (u_kln, N_k)", "def kullback_leibler(p: np.ndarray, q: np.ndarray) -> float:\n kl = 0\n for pi, qi in zip(p, q):\n if pi > 0:\n if qi > 0:\n kl += pi * np.log(pi/qi)\n else:\n kl = np.inf\n return kl", "def nCkarray(*k_values):\n result = 1\n for i, j in enumerate((m for k in k_values for m in range(1, k+1)), 1):\n result = (result * i) // j\n return result", "def test_inu(self):\n lmax = 3\n x = np.array([5000])\n result_i, result_k = bessel_sk.lniknu(x, lmax)\n pih = np.log(0.5*np.pi)\n expP = (1+np.exp(-2*x))\n expM = (1-np.exp(-2*x))\n expected_i = np.array([\n -np.log(2*x**1) + x + np.log(expM),\n -np.log(2*x**2) + x + np.log(expM*(x+1)+x-1),\n -np.log(2*x**3) + x + np.log((3+x**2)*expM-3*x*expP),\n -np.log(2*x**4) + x + np.log((15*x+x**3)*expP-(15+6*x**2)*expM) \n ])\n expected_k = np.array([pih -x - 1*np.log(x),\n pih -x - 2*np.log(x) + np.log(x+1),\n pih -x - 3*np.log(x) + np.log(x**2+3*x+3),\n pih -x - 4*np.log(x) + np.log(x**3+6*x**2+15*x+15)\n ])\n assert_almost_equal(result_i[0]/expected_i.T, 1, decimal=4)\n assert_almost_equal(result_k[0]/expected_k.T, 1, decimal=4)", "def c_index(true_labels, predictions):\n\n true_labels = list(true_labels)\n predictions = list(predictions)\n\n n = 0\n h_sum = 0\n for i in range(len(true_labels)):\n t = true_labels[i]\n p = predictions[i]\n for j in range(i + 1, len(true_labels)):\n nt = true_labels[j]\n np = predictions[j]\n if t != nt:\n n += 1\n if (p < np and t < nt) or (p > np and t > nt):\n h_sum += 1\n elif p == np:\n h_sum += 0.5\n # To avoid 'ZeroDivisionError' exception\n if n == 0:\n return h_sum\n return h_sum / n", "def log_poisson(k, l):\n return k*np.log(l) -l - gammaln(k+1)" ]
[ "0.6288863", "0.58540034", "0.5780807", "0.57525176", "0.56694484", "0.5533527", "0.552716", "0.54448533", "0.54232484", "0.54232484", "0.54122174", "0.54034543", "0.53963554", "0.5395306", "0.5395306", "0.538923", "0.53744537", "0.53612316", "0.53517383", "0.5325024", "0.53160185", "0.5315928", "0.5293913", "0.52904284", "0.5284717", "0.52668184", "0.52608585", "0.52530164", "0.52440447", "0.524251" ]
0.5900849
1
Perform style transfer on a detected class in a frame
def create_stylized_detection(style_transfer_executor, style_transfer_class, frame: np.ndarray, detections: list, resize_factor, labels: dict): for detection in detections: class_idx, box, confidence = [d for d in detection] label = labels[class_idx][0] if label.lower() == style_transfer_class.lower(): # Obtain frame size and resized bounding box positions frame_height, frame_width = frame.shape[:2] x_min, y_min, x_max, y_max = [int(position * resize_factor) for position in box] # Ensure box stays within the frame x_min, y_min = max(0, x_min), max(0, y_min) x_max, y_max = min(frame_width, x_max), min(frame_height, y_max) # Crop only the detected object cropped_frame = cv_utils.crop_bounding_box_object(frame, x_min, y_min, x_max, y_max) # Run style_transfer on preprocessed_frame stylized_frame = style_transfer_executor.run_style_transfer(cropped_frame) # Paste stylized_frame on the original frame in the correct place frame[int(y_min)+1:int(y_max), int(x_min)+1:int(x_max)] = stylized_frame return frame
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_style(self):", "def CSSClasses(self):", "def update_style(self):\n pass", "def detect(self, frame, cur_count):\n self.variables['is_black'] = False\n self.process(frame, cur_count)", "def style_transfer_postprocess(preprocessed_frame: np.ndarray, image_shape: tuple):\n\n postprocessed_frame = np.squeeze(preprocessed_frame, axis=0)\n # select original height and width from image_shape\n frame_height = image_shape[0]\n frame_width = image_shape[1]\n postprocessed_frame = cv2.resize(postprocessed_frame, (frame_width, frame_height)).astype(\"float32\") * 255\n postprocessed_frame = cv2.cvtColor(postprocessed_frame, cv2.COLOR_RGB2BGR)\n\n return postprocessed_frame", "def frame(self):", "def hook_frame_selected(self):", "def forward(self, x):\n return self.style_model(x)", "def run_style_transfer(self, content_image):\n # The content image has to be preprocessed to (1, 384, 384, 3)\n preprocessed_style_image = cv_utils.preprocess(content_image, np.float32,\n self.style_transfer_executor.get_shape(), True, keep_aspect_ratio=False)\n\n # Transform content image. output[0] is the stylized image\n stylized_image = self.style_transfer_executor.run([preprocessed_style_image, self.style_bottleneck])[0]\n\n post_stylized_image = style_transfer_postprocess(stylized_image, content_image.shape)\n\n return post_stylized_image", "def process_frame(self, frame):\n\t\treturn frame", "def change_frame(self, frame):\r\n pass", "def assign_class_from_feature_annotation(feature_annotation_df):\n print \"============Start assigning and encoding classes=================\"\n print \"================thresholds=====================\"\n walkthreshold = 0.3\n ambithreshold = 0.25\n phonethreshold = 0.3\n computerthreshold = 0.3\n activity_thresholds = [0.3,0.25,0.3,0.3,0.3,0.3,0.3,0.25]\n activity_names = ['walking','eating','phone','computer','talking', 'reading','in car', 'drinking']\n class_codes = [1,2,3,4,5,6,7,8]\n activity_cols = [s_info.walkproportion_col,\n s_info.eatproportion_col,\n s_info.phoneproportion_col,\n s_info.computerproportion_col,\n s_info.talkproportion_col,\n s_info.readproportion_col,\n s_info.carproportion_col,\n s_info.drinkproportion_col]\n puff_with_puff_duration_threshold = 0.3\n puff_with_segment_duration_threshold = 0.3\n print \"walking: \" + str(walkthreshold)\n print \"drinking/eating: \" + str(ambithreshold)\n print \"phone: \" + str(phonethreshold)\n print \"computer: \" + str(computerthreshold)\n print \"puff: \" + str(puff_with_puff_duration_threshold) + \", \" + str(puff_with_segment_duration_threshold)\n class_df = feature_annotation_df.copy(deep=True)\n \n # class assignment rules\n class_df[s_info.classname_col] = 'others'\n class_df[s_info.classnum_col] = 0\n\n for code, name, threshold, col in zip(class_codes, activity_names, activity_thresholds, activity_cols):\n flag = class_df[col] >= threshold\n class_df[s_info.classname_col][flag] = name\n class_df[s_info.classnum_col][flag] = code\n # for short window size\n puff_flag1 = class_df[s_info.puff_with_segment_duration_col] >= puff_with_segment_duration_threshold\n class_df[s_info.classname_col][puff_flag1] = 'puff'\n class_df[s_info.classnum_col][puff_flag1] = len(class_codes) + 1\n # for short puffs\n puff_flag2 = class_df[s_info.puff_with_puff_duration_col] >= puff_with_puff_duration_threshold\n class_df[s_info.classname_col][puff_flag2] = 'puff'\n class_df[s_info.classnum_col][puff_flag2] = len(class_codes) + 1\n return class_df", "def switch_frame(self, frame_class):\n if frame_class in self.built_frames:\n new_frame = self.built_frames[frame_class]\n else:\n raise ValueError(\"Unknown frame\")\n\n if self._frame is not None:\n self._frame.grid_remove()\n self._frame = new_frame\n self._frame.grid()", "def run_style_transfer(self, content_and_style_class,\n num_iterations=3000,\n content_weight=1e-1,\n style_weight=1e2,\n ta_weight=1,\n save=False):\n # trainable to false.\n for layer in self.model.layers:\n layer.trainable = False\n\n # Get the style and content feature representations\n style_features, content_features = self._get_feature_representations(\n content_and_style_class)\n gram_style_features = [self._get_gram_matrix(style_feature)\n for style_feature in style_features]\n\n # Set initial image\n init_image = content_and_style_class.processed_content_image\n init_image = tf.Variable(init_image, dtype=tf.float32)\n # Create our optimizer\n opt = tf.train.AdamOptimizer(\n learning_rate=5, beta1=0.99, epsilon=1e-1)\n\n # Store our best result\n best_loss, best_img = float('inf'), None\n\n # Create a nice config\n loss_weights = (style_weight, content_weight, ta_weight)\n config = {\n 'loss_weights': loss_weights,\n 'init_image': init_image,\n 'gram_style_features': gram_style_features,\n 'content_features': content_features,\n }\n\n # For displaying\n global_start = time.time()\n\n norm_means = np.array([103.939, 116.779, 123.68])\n min_vals = -norm_means\n max_vals = 255 - norm_means\n\n imgs = []\n _, style_tail = os.path.split(\n content_and_style_class.path_to_style_img)\n _, content_tail = os.path.split(\n content_and_style_class.path_to_content_img)\n\n print(\n f\"Initializing Transfer of Style from image: {style_tail} upon \\\n image: {content_tail}\"\n )\n for i in tqdm(range(num_iterations)):\n grads, all_loss = self._compute_gradients(config)\n loss, _, _ = all_loss\n opt.apply_gradients([(grads, init_image)])\n clipped = tf.clip_by_value(init_image, min_vals, max_vals)\n init_image.assign(clipped)\n if loss < best_loss:\n # Update best loss and best image from total loss.\n best_loss = loss\n best_img = content_and_style_class.deprocess_image(\n init_image.numpy())\n if i % 100 == 0:\n imgs.append(content_and_style_class.deprocess_image(\n (init_image.numpy())))\n print('Finished Style Transfer; Total time: {:.4f}s'.format(\n time.time() - global_start))\n if save:\n plt.figure(figsize=(14, 4))\n fig, axes = plt.subplots(num_iterations // 100, 1)\n for i, img in enumerate(imgs):\n axes[i].imshow(img)\n fig.savefig(\"image\")\n fig_best, ax_best = plt.subplots(1, 1)\n ax_best.imshow(best_img)\n fig_best.savefig(\"image_best\")\n return best_img, best_loss", "def dispatch_frame(self, frame):", "def set_sclasses(self, w: Wrapper, classes: Any) -> None:\n w.setProperty(self.style_sclass_property, f\" {' '.join(set(classes))} \")", "def func(frame):\n nonlocal net\n\t\n prevh, prevw, _= frame.shape\n\n wscale = prevw / 480\n hscale = prevh / 320\n\n frame = cv2.resize(frame, (480, 320))\n frame = jetson.utils.cudaFromNumpy(frame)\n detections = net.Detect(frame)\n ret = [(d.ClassID, d.Top*hscale, d.Left*wscale, d.Right*wscale, d.Bottom*hscale) for d in detections]\n print(ret)\n return ret", "def highlight(self,element):\n self = element._parent\n\n def apply_style(s):\n self.execute_script(\"arguments[0].setAttribute('style', arguments[1]);\",\n element, s)\n\n original_style = element.get_attribute('style')\n apply_style(\"background: yellow; border: 2px solid red;\")\n time.sleep(.3)\n apply_style(original_style)", "def classic_transfer(content_img, style_img, args, ctx = None, verbose = True):\n\n assert isinstance(args, ClassicTransferArgs), 'Args should be instance of ClassicTransferArgs'\n\n content_img = PreprocessImage(content_img).copyto(ctx)\n style_img = PreprocessImage(style_img).copyto(ctx)\n\n # load pretrained vgg19\n vgg19avg = get_vgg19_avg(pretrained = True)\n # style = [relu1_1, relu2_1, relu3_1, relu4_1, relu5_1]\n # content = [relu4_2]\n input = mx.sym.var('data')\n style_content_symbols = vgg19avg.get_output_symbols(input, args.style_feature, args.content_feature),\n\n style_content_net = mx.gluon.SymbolBlock(inputs = input, outputs = style_content_symbols, params = vgg19avg.collect_params())\n style_content_net.collect_params().reset_ctx(ctx)\n\n # extract target content and style\n target = style_content_net(content_img)[0]\n content_targets = target[len(args.style_feature):]\n target = style_content_net(style_img)[0]\n style_targets = target[:len(args.style_feature)]\n\n # compute target gram matrix\n target_gram_list, gram_scale_list = __get_style_gram(style_targets)\n\n # Generate random image to do style transfer\n random_img = mx.nd.random_uniform(-0.1, 0.1, content_img.shape, ctx = ctx)\n clip_norm = np.prod(random_img.shape)\n\n # optimizer\n lr = mx.lr_scheduler.FactorScheduler(step = args.lr_sched_delay, factor = args.lr_sched_factor)\n optimizer = mx.optimizer.NAG(learning_rate = args.learning_rate, wd = 0.0001,\n momentum = 0.95, lr_scheduler = lr)\n\n # This is needed for momentum\n optim_state = optimizer.create_state(0, random_img)\n\n # Training and transfer\n random_img.attach_grad() # attach grad for update\n for epoch in tqdm(range(args.epochs)):\n with mx.autograd.record():\n style_content = style_content_net(random_img)[0]\n contents = style_content[len(args.style_feature):]\n styles = style_content[:len(args.style_feature)]\n\n gram_list, _ = __get_style_gram(styles)\n total_loss = 0\n for content, target_content in zip(contents, content_targets):\n loss = mx.nd.sum(mx.nd.square(content - target_content))\n total_loss = total_loss + loss * args.content_weight\n\n for gram, target_gram, gscale in zip(gram_list, target_gram_list, gram_scale_list):\n loss = mx.nd.sum(mx.nd.square(gram - target_gram))\n total_loss = total_loss + loss * args.style_weight / gscale\n\n total_loss.backward()\n\n gnorm = mx.nd.norm(random_img.grad).asscalar()\n if gnorm > clip_norm:\n random_img.grad[:] *= clip_norm / gnorm\n\n if verbose:\n print('Training: epoch %d, loss: %f' % (epoch, total_loss.asscalar()))\n\n old_img = random_img.copy()\n tv_grad = __get_tv_grad(random_img, ctx, args.tv_weight)\n optimizer.update(0, random_img, random_img.grad + tv_grad, optim_state)\n\n eps = (mx.nd.norm(old_img - random_img) / mx.nd.norm(random_img)).asscalar()\n if eps < args.stop_eps:\n print('eps (%f) < args.stop_eps (%f), training finished' % (eps, args.stop_eps))\n break\n\n yield PostprocessImage(random_img)\n yield PostprocessImage(random_img)", "def detect(self, frame, foreground_mask):\n pass", "def _apply_style(self):\n for actor in self.clean_actors:\n if settings.SHADER_STYLE != \"cartoon\":\n style = settings.SHADER_STYLE\n else:\n if self.backend: # notebook backend\n print(\n 'Shader style \"cartoon\" cannot be used in a notebook'\n )\n style = \"off\"\n\n try:\n actor.mesh.reverse() # flip normals\n actor.mesh.lighting(style=style)\n\n actor._mesh.reverse()\n actor._mesh.lighting(style=style)\n except AttributeError:\n pass", "def run_frame(self, ti, img):\n pass", "def postprocess(frame, outs, save_image=False):\n frameHeight = frame.shape[0]\n frameWidth = frame.shape[1]\n\n # Scan through all the bounding boxes output from the network and keep only the\n # ones with high confidence scores. Assign the box's class label as the class with the highest score.\n classIds = []\n confidences = []\n boxes = []\n for out in outs:\n for detection in out:\n scores = detection[5:]\n classId = np.argmax(scores)\n confidence = scores[classId]\n if confidence > confThreshold:\n center_x = int(detection[0] * frameWidth)\n center_y = int(detection[1] * frameHeight)\n width = int(detection[2] * frameWidth)\n height = int(detection[3] * frameHeight)\n left = int(center_x - width / 2)\n top = int(center_y - height / 2)\n classIds.append(classId)\n confidences.append(float(confidence))\n boxes.append([left, top, width, height])\n\n # non maximum suppression to eliminate redundant overlapping boxes with lower confidences\n indices = cv2.dnn.NMSBoxes(boxes, confidences, confThreshold, nmsThreshold)\n for i in indices:\n i = i[0]\n # Skip classes that aren't people\n if classIds[i] != 0:\n continue\n box = boxes[i]\n left = box[0]\n top = box[1]\n width = box[2]\n height = box[3]\n if save_image:\n # Save cropped image of detected object\n class_name = classes[classIds[i]]\n dimensions = (top, top + height, left, left + width)\n utils.write_image(frame, \"output/yolo\", class_name, dimensions)\n drawPred(classIds[i], confidences[i], left, top, left + width, top + height)", "def postprocess(self, frame, outs):\n frameHeight = frame.shape[0]\n frameWidth = frame.shape[1]\n classIds = []\n confidences = []\n boxes = []\n # Scan through all the bounding boxes output from the network and keep only the\n # ones with high confidence scores. Assign the box's class label as the class with the highest score.\n # your code here\n # loop over each of the layer output (I guess the outs is the number of anchor boxes)\n for output in outs:\n # loop over each of the detection\n for detection in output:\n # extract the class ID and confidence of the current object detection\n # the detection is an array of [bx, by, bw, bh, Pc, c1, c2, ..., c80]\n # Pc is the probability that there is an object\n scores = detection[5:]\n classID = np.argmax(scores)\n confidence = scores[classID]\n \n if confidence > self.confThreshold:\n center_x = int(detection[0] * frameWidth)\n center_y = int(detection[1] * frameHeight)\n width = int(detection[2] * frameWidth)\n height = int(detection[3] * frameHeight)\n left = int(center_x - width / 2)\n top = int(center_y - height / 2)\n \n classIds.append(classID)\n confidences.append(float(confidence))\n boxes.append([left, top, width, height])\n \n # Perform non maximum suppression to eliminate redundant overlapping boxes with\n # lower confidences.\n # your code here\n idxs = cv2.dnn.NMSBoxes(boxes, confidences, self.confThreshold, self.nmsThreshold)\n \n # get the bounding bxoes after performing non maximum suppression\n # your code here\n output_boxes = []\n if len(idxs) > 0:\n for i in idxs.flatten(): # idxs = [[1],[2],[5],...], idxs.flatten() = [1,2,5,...]\n output_boxes.append(boxes[i])\n left = boxes[i][0]\n top = boxes[i][1]\n width = boxes[i][2]\n height = boxes[i][3]\n right = left + width\n bottom = top + height\n frame = self.drawPred(frame, classIds[i], confidences[i], left, top, right, bottom)\n \n output_image = frame\n return output_image, output_boxes", "def stylization(stretched_image, style_image,\r\n\t\talpha = 1.0, style_size = 512, crop_size = 0):\r\n\ttf.reset_default_graph()\r\n\r\n\tassert stretched_image.ndim == 3\r\n\t\r\n\tcp = [\"./models/relu5_1\",\r\n\t\t\t \"./models/relu4_1\",\r\n\t\t \"./models/relu3_1\",\r\n\t\t \"./models/relu2_1\",\r\n\t\t \"./models/relu1_1\"]\r\n\trelu_targets = [\"relu5_1\", \"relu4_1\", \"relu3_1\", \"relu2_1\", \"relu1_1\"]\r\n\t\t#*****************\r\n\t\t## need to modify checkpoints, relu_targets, and vgg_path\r\n\twct_model = WCT(checkpoints=cp,\r\n\t\t relu_targets=relu_targets,\r\n\t\t vgg_path='./models/vgg_normalised.t7'\r\n\r\n\t\t )\r\n\r\n\r\n\tfor style_fullpath in style_image:\r\n\t\tstyle_prefix, style_ext = os.path.splitext(style_fullpath)\r\n\t\tstyle_prefix = os.path.basename(style_prefix) # Extract filename prefix without ext\r\n\r\n\t\tstyle_img = skimage.io.imread(style_fullpath)\r\n\r\n\t\tif style_size > 0:\r\n\t\t\tstyle_img = resize_to(style_img, style_size)\r\n\t\tif crop_size > 0:\r\n\t\t\tstyle_img = center_crop(style_img, crop_size)\r\n\r\n\t\t\"\"\"\r\n\t if keep_colors:\r\n\t style_img = preserve_colors_np(style_img, content_img)\r\n\t \"\"\"\r\n\t # Run the frame through the style network\r\n\r\n\t\tstylized_rgb = wct_model.predict(stretched_image, style_img, alpha).astype(\"uint8\")\r\n\r\n\r\n\t ## the stylized_rgb size may not be equal to the original content image size\r\n\t\tstylized_rgb = image_align(stretched_image, stylized_rgb)\r\n\r\n\r\n\treturn stylized_rgb", "def extended_frame_annotation(self, original_frame):\n self.frame = self.annotated_frame(original_frame)\n text = \"\"\n if self.is_right():\n text = \"Looking right\"\n elif self.is_left():\n text = \"Looking left\"\n elif self.is_center():\n text = \"Looking center\"\n\n h_ratio = \"HR: \" + str(self.horizontal_ratio())[:4]\n v_ratio = \"VR: \" + str(self.vertical_ratio())[:4]\n\n width = int(0.9 * self.frame.shape[1])\n height = int(0.9 * self.frame.shape[0])\n\n # cv2.putText(self.frame, text, (60, 60), cv2.FONT_HERSHEY_DUPLEX, 2, (255, 0, 0), 2)\n # cv2.putText(self.frame, h_ratio, (60, height), cv2.FONT_HERSHEY_DUPLEX, 2, (255, 0, 0), 2)\n # cv2.putText(self.frame, v_ratio, (int(0.8 * width), height), cv2.FONT_HERSHEY_DUPLEX, 2, (255, 0, 0), 2)\n return self.frame", "def handle(self, frame, cur_count):\n self.variables['is_black'] = True\n if DEBUG_LEVEL > 1:\n log(\"[%s]: Handled\" % (self.name()))", "def switchFrame(self, frameClass):\r\n newFrame = frameClass(self)\r\n if self._frame is not None:\r\n self._frame.destroy()\r\n self._frame = newFrame\r\n self._frame.pack()", "def run_style_transfer(cnn, normalization_mean, normalization_std,\n args, content_layers_default, style_layers_default, num_steps,\n style_weight, content_weight): # default: style_weight = 1e6, content_weight = 1\n content_img = image_loader(args.content, args.img_size)\n style_img = image_loader(args.style, args.img_size)\n input_img = content_img.clone()\n assert style_img.size() == content_img.size(), \\\n \"we need to import style and content images of the same size\"\n \n logprint('Building the style transfer model..')\n model, style_losses, content_losses = get_style_model_and_losses(cnn,\n normalization_mean, normalization_std, style_img, content_img, \n args, content_layers_default, style_layers_default)\n \n if args.fft:\n input_img = fft_image(input_img.shape).to(device, torch.float) # convert to fft parameterization\n optimizer = get_input_optimizer(input_img)\n \n logprint('Optimizing..')\n run = [0]\n while run[0] <= num_steps:\n def closure():\n input_img.data.clamp_(0, 1) # correct the values of updated input image\n optimizer.zero_grad()\n model(input_img)\n style_score = 0\n content_score = 0\n\n for layer_name, sl in style_losses.items():\n style_score += sl.loss\n if args.plot_feature and run[0] == num_steps: # visualize feature maps at the last iter\n analyze_gram(sl.gram, layer_name) # analyze the gram matrix, like SVD analysis\n visualize_feature_map(sl.feat, layer_id=layer_name, save_dir=logger.gen_img_path, prefix=prefix, ext=args.ext)\n\n for layer_name, cl in style_losses.items():\n content_score += cl.loss\n\n style_score *= style_weight\n content_score *= content_weight\n loss = style_score + content_score\n loss.backward()\n\n run[0] += 1\n if run[0] % 50 == 0:\n logprint(\"run {}:\".format(run))\n logprint('Style Loss : {:4f} Content Loss: {:4f}'.format(style_score.item(), content_score.item()))\n return style_score + content_score\n\n optimizer.step(closure)\n if run[0] % 100 == 0:\n input_img.data.clamp_(0, 1)\n content_name = os.path.split(args.content)[1].split('.')[0] \n style_name = os.path.split(args.style)[1].split('.')[0]\n out_path = \"%s/%s__%s__%s_iter%d.jpg\" % (logger.gen_img_path, content_name, style_name, args.net, run[0])\n vutils.save_image(input_img, out_path)", "def run_style_transfer(cnn, normalization, content_img, style_img, input_img, mask_img, num_steps = 3000,\n style_weight = 100, content_weight = 5):\n print('Building the style transfer model..')\n model, style_losses, content_losses = get_style_model_and_losses(cnn, normalization, style_img, content_img, mask_img)\n optimizer = LBFGS([input_img.requires_grad_()], max_iter=num_steps,lr = 1)\n\n print('Optimizing..')\n run = [0]\n def closure():\n optimizer.zero_grad()\n model(input_img)\n style_score = 0\n content_score = 0\n\n for sl in style_losses:\n style_score += sl.loss\n for cl in content_losses:\n content_score += cl.loss\n\n style_score *= style_weight\n content_score *= content_weight\n\n loss = style_score + content_score\n loss.backward()\n\n if run[0] % 100 == 0:\n print(\"run {}:\".format(run))\n print('Style Loss : {} Content Loss: {}'.format(style_score.item(), content_score.item()))\n # print()\n # plt.figure(figsize = (8, 8))\n #imshow(input_img.clone())\n run[0] += 1\n\n return style_score + content_score\n\n optimizer.step(closure)\n\n # a last correction...\n input_img.data.clamp_(0, 1)\n\n return input_img" ]
[ "0.555527", "0.55425155", "0.552141", "0.53011477", "0.5250051", "0.5232363", "0.5162412", "0.5114399", "0.5083028", "0.5069768", "0.50682425", "0.5024041", "0.5003642", "0.4973851", "0.49458343", "0.49029955", "0.48944783", "0.48923257", "0.48872736", "0.4868653", "0.48496276", "0.48371956", "0.48216224", "0.48189706", "0.47867823", "0.4779204", "0.47611025", "0.4750509", "0.47455427", "0.4732731" ]
0.6410726
0
Creates an inference executor for style predict network, style transfer network, list of backends and a style image.
def __init__(self, style_predict_model_path: str, style_transfer_model_path: str, style_image: np.ndarray, backends: list, delegate_path: str): self.style_predict_executor = network_executor_tflite.TFLiteNetworkExecutor(style_predict_model_path, backends, delegate_path) self.style_transfer_executor = network_executor_tflite.TFLiteNetworkExecutor(style_transfer_model_path, backends, delegate_path) self.style_bottleneck = self.run_style_predict(style_image)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def construct_graph(network_class: Type[InferenceNetwork],\n config: Path, checkpoint_dir: str,\n batch_size: int,\n batches_per_step: int,\n image_filenames: Tuple[str],\n loop: bool,\n preprocess_fn: Callable,\n num_ipus: int,\n mode: str,\n save_graph_pb: bool) -> Tuple[tf.Operation, tf.Operation, tf.Operation]:\n # Model specific config\n with open(config.as_posix()) as file_stream:\n try:\n config_dict = yaml.safe_load(file_stream)\n except yaml.YAMLError as exc:\n tf.logging.error(exc)\n\n config_dict['network_name'] = config.stem\n if 'dtype' not in config_dict:\n config_dict[\"dtype\"] = 'float16'\n\n # Create inference optimized frozen graph definition\n network = network_class(input_shape=config_dict[\"input_shape\"],\n num_outputs=1000, batch_size=batch_size,\n data_type=config_dict['dtype'],\n config=config_dict,\n checkpoint_dir=checkpoint_dir)\n\n # Export frozen graph to event file to view in Tensorboard\"\n if save_graph_pb:\n log_dir = Path(f\"{config_dict['network_name']}_graph\")\n graph_filename = f\"{log_dir}/{config_dict['network_name']}_graph.pb\"\n if not log_dir.exists():\n log_dir.mkdir()\n with tf.io.gfile.GFile(graph_filename, \"wb\") as f:\n f.write(network.optimized_graph.SerializeToString())\n logging.info(\"%d ops in the final graph.\" % len(network.optimized_graph.node))\n import_to_tensorboard(graph_filename, log_dir=log_dir.as_posix())\n\n # Reset graph before creating one on the IPU\n tf.reset_default_graph()\n\n # Create dataset\n dataset = get_dataset(image_filenames, batch_size, loop=loop, preprocess_fn=preprocess_fn,\n img_width=config_dict[\"input_shape\"][1],\n img_height=config_dict[\"input_shape\"][0], dtype=config_dict['dtype'])\n\n # Set up graph on device, connect infeed and outfeed to the graph.\n num_replicas = num_ipus if mode == 'replicated' else 1\n infeed_queue = ipu_infeed_queue.IPUInfeedQueue(dataset, device_ordinal=0, feed_name=\"infeed\",\n replication_factor=num_replicas)\n outfeed_queue = ipu_outfeed_queue.IPUOutfeedQueue(device_ordinal=0, feed_name=\"outfeed\",\n outfeed_mode=ipu_outfeed_queue.IPUOutfeedMode.ALL,\n replication_factor=num_replicas)\n\n def comp_fn():\n def body(img):\n with scopes.ipu_scope('/device:IPU:0'):\n probs = tf.import_graph_def(network.optimized_graph,\n input_map={network.graph_input: img},\n name=\"optimized\",\n return_elements=[network.graph_output])[0]\n outfeed_op = outfeed_queue.enqueue(probs)\n # Note that enqueue happens on the IPU.\n return outfeed_op\n\n return loops.repeat(batches_per_step,\n body,\n [],\n infeed_queue)\n\n loop_op = ipu_compiler.compile(comp_fn, [])\n\n # The dequeue of the outfeed needs to happen on the CPU.\n with tf.device('cpu'):\n outfeed_dequeue = outfeed_queue.dequeue()\n\n ipu_utils.move_variable_initialization_to_cpu()\n return loop_op, infeed_queue.initializer, outfeed_dequeue", "def inference(config_file, image_file):\n # Get config\n FLAGS = Flags(config_file).get()\n out_charset = load_charset(FLAGS.charset)\n num_classes = len(out_charset)\n net = get_network(FLAGS, out_charset)\n\n if FLAGS.use_rgb:\n num_channel = 3\n mode = cv2.IMREAD_COLOR\n else:\n num_channel = 1\n mode = cv2.IMREAD_GRAYSCALE\n\n # Input node\n image = tf.placeholder(tf.uint8,\n shape=[None, None, num_channel],\n name='input_node')\n\n # Network\n proc_image = net.preprocess_image(image, is_train=False)\n proc_image = tf.expand_dims(proc_image, axis=0)\n proc_image.set_shape(\n [None, FLAGS.resize_hw.height, FLAGS.resize_hw.width, num_channel])\n logits, sequence_length = net.get_logits(proc_image,\n is_train=False,\n label=None)\n prediction, log_prob = net.get_prediction(logits, sequence_length)\n prediction = tf.sparse_to_dense(sparse_indices=prediction.indices,\n sparse_values=prediction.values,\n output_shape=prediction.dense_shape,\n default_value=num_classes,\n name='output_node')\n\n # Restore\n restore_model = get_init_trained()\n sess = tf.Session()\n restore_model(sess, FLAGS.eval.model_path)\n\n # Run\n img = cv2.imread(image_file, mode)\n img = np.reshape(img, [img.shape[0], img.shape[1], num_channel])\n predicted = sess.run(prediction, feed_dict={image: img})\n string = get_string(predicted[0], out_charset)\n string = adjust_string(string, FLAGS.eval.lowercase,\n FLAGS.eval.alphanumeric)\n print(string)\n\n return string", "def inference(images, indication, flags):\r\n # We instantiate all variables using tf.get_variable() instead of\r\n # tf.Variable() in order to share variables across multiple GPU training runs.\r\n # If we only ran this model on a single GPU, we could simplify this function\r\n # by replacing all instances of tf.get_variable() with tf.Variable().\r\n \r\n # conv1\r\n with tf.variable_scope('conv1') as scope:\r\n kernel = _variable_with_weight_decay('weights',\r\n shape=[7, 7, 3, 32],\r\n init_parameter=0.001,\r\n wd=0.0,\r\n use_fp16=flags.use_fp16)\r\n conv = tf.nn.conv2d(input=images, filter=kernel, strides=[1, 2, 2, 1], padding='SAME')\r\n biases = _variable_on_cpu('biases', [32], tf.constant_initializer(0.0), flags.use_fp16)\r\n pre_activation = tf.nn.bias_add(conv, biases)\r\n conv_1 = tf.nn.leaky_relu(pre_activation, alpha=0.5 ,name=scope.name)\r\n _activation_summary(conv_1)\r\n\r\n \r\n # pool1\r\n pool_1 = tf.nn.max_pool(conv_1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],\r\n padding='SAME', name='pool1') \r\n # conv2\r\n with tf.variable_scope('conv2') as scope:\r\n kernel = _variable_with_weight_decay('weights',\r\n shape=[3, 3, 32, 32],\r\n init_parameter=0.001,\r\n wd=0.0,\r\n use_fp16=flags.use_fp16)\r\n conv = tf.nn.conv2d(pool_1, kernel, [1, 1, 1, 1], padding='SAME')\r\n biases = _variable_on_cpu('biases', [32], tf.constant_initializer(0.0), flags.use_fp16)\r\n pre_activation = tf.nn.bias_add(conv, biases) \r\n conv_2 = tf.nn.leaky_relu(pre_activation, alpha=0.5 ,name=scope.name)\r\n _activation_summary(conv_2)\r\n \r\n # conv3\r\n with tf.variable_scope('conv3') as scope:\r\n kernel = _variable_with_weight_decay('weights',\r\n shape=[3, 3, 32, 32],\r\n init_parameter=0.001,\r\n wd=0.0,\r\n use_fp16=flags.use_fp16)\r\n conv = tf.nn.conv2d(conv_2, kernel, [1, 1, 1, 1], padding='SAME')\r\n biases = _variable_on_cpu('biases', [32], tf.constant_initializer(0.0), flags.use_fp16)\r\n pre_activation = tf.nn.bias_add(conv, biases)\r\n conv_3 = tf.nn.leaky_relu(pre_activation, alpha=0.5 ,name=scope.name)\r\n _activation_summary(conv_3) \r\n \r\n \r\n # pool2\r\n pool_2 = tf.nn.max_pool(conv_3, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],\r\n padding='SAME', name='pool2')\r\n \r\n # conv4\r\n with tf.variable_scope('conv4') as scope:\r\n kernel = _variable_with_weight_decay('weights',\r\n shape=[3, 3, 32, 64],\r\n init_parameter=0.001,\r\n wd=0.0,\r\n use_fp16=flags.use_fp16)\r\n conv = tf.nn.conv2d(pool_2, kernel, [1, 1, 1, 1], padding='SAME')\r\n biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.0), flags.use_fp16)\r\n pre_activation = tf.nn.bias_add(conv, biases) \r\n conv_4 = tf.nn.leaky_relu(pre_activation, alpha=0.5 ,name=scope.name)\r\n _activation_summary(conv_4)\r\n \r\n # conv5\r\n with tf.variable_scope('conv5') as scope:\r\n kernel = _variable_with_weight_decay('weights',\r\n shape=[3, 3, 64, 64],\r\n init_parameter=0.001,\r\n wd=0.0,\r\n use_fp16=flags.use_fp16)\r\n conv = tf.nn.conv2d(conv_4, kernel, [1, 1, 1, 1], padding='SAME')\r\n biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.0), flags.use_fp16)\r\n pre_activation = tf.nn.bias_add(conv, biases)\r\n conv_5 = tf.nn.leaky_relu(pre_activation, alpha=0.5 ,name=scope.name)\r\n _activation_summary(conv_5) \r\n\r\n # conv6\r\n with tf.variable_scope('conv6') as scope:\r\n kernel = _variable_with_weight_decay('weights',\r\n shape=[3, 3, 64, 64],\r\n init_parameter=0.001,\r\n wd=0.0,\r\n use_fp16=flags.use_fp16)\r\n conv = tf.nn.conv2d(conv_5, kernel, [1, 1, 1, 1], padding='SAME')\r\n biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.0), flags.use_fp16)\r\n pre_activation = tf.nn.bias_add(conv, biases)\r\n conv_6 = tf.nn.leaky_relu(pre_activation, alpha=0.5 ,name=scope.name)\r\n _activation_summary(conv_6) \r\n \r\n # pool3\r\n pool_3 = tf.nn.max_pool(conv_6, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],\r\n padding='SAME', name='pool3') \r\n # conv7\r\n with tf.variable_scope('conv7') as scope:\r\n kernel = _variable_with_weight_decay('weights',\r\n shape=[3, 3, 64, 128],\r\n init_parameter=0.001,\r\n wd=0.0,\r\n use_fp16=flags.use_fp16)\r\n conv = tf.nn.conv2d(pool_3, kernel, [1, 1, 1, 1], padding='SAME')\r\n\r\n biases = _variable_on_cpu('biases', [128], tf.constant_initializer(0.0), flags.use_fp16)\r\n pre_activation = tf.nn.bias_add(conv, biases)\r\n conv_7 = tf.nn.leaky_relu(pre_activation, alpha=0.5 ,name=scope.name) \r\n _activation_summary(conv_7)\r\n\r\n # conv8\r\n with tf.variable_scope('conv8') as scope:\r\n kernel = _variable_with_weight_decay('weights',\r\n shape=[3, 3, 128, 128],\r\n init_parameter=0.001,\r\n wd=0.0,\r\n use_fp16=flags.use_fp16)\r\n conv = tf.nn.conv2d(conv_7, kernel, [1, 1, 1, 1], padding='SAME')\r\n biases = _variable_on_cpu('biases', [128], tf.constant_initializer(0.0), flags.use_fp16)\r\n pre_activation = tf.nn.bias_add(conv, biases) \r\n conv_8 = tf.nn.leaky_relu(pre_activation, alpha=0.5 ,name=scope.name)\r\n _activation_summary(conv_8) \r\n \r\n # conv9\r\n with tf.variable_scope('conv9') as scope:\r\n kernel = _variable_with_weight_decay('weights',\r\n shape=[3, 3, 128, 128],\r\n init_parameter=0.001,\r\n wd=0.0,\r\n use_fp16=flags.use_fp16)\r\n conv = tf.nn.conv2d(conv_8, kernel, [1, 1, 1, 1], padding='SAME')\r\n biases = _variable_on_cpu('biases', [128], tf.constant_initializer(0.0), flags.use_fp16)\r\n pre_activation = tf.nn.bias_add(conv, biases)\r\n conv_9 = tf.nn.leaky_relu(pre_activation, alpha=0.5 ,name=scope.name)\r\n _activation_summary(conv_9) \r\n \r\n # conv10\r\n with tf.variable_scope('conv10') as scope:\r\n kernel = _variable_with_weight_decay('weights',\r\n shape=[3, 3, 128, 128],\r\n init_parameter=0.001,\r\n wd=0.0,\r\n use_fp16=flags.use_fp16)\r\n conv = tf.nn.conv2d(conv_9, kernel, [1, 1, 1, 1], padding='SAME')\r\n biases = _variable_on_cpu('biases', [128], tf.constant_initializer(0.0), flags.use_fp16)\r\n pre_activation = tf.nn.bias_add(conv, biases)\r\n conv_10 = tf.nn.leaky_relu(pre_activation, alpha=0.5 ,name=scope.name)\r\n _activation_summary(conv_10) \r\n \r\n # pool4\r\n pool_4 = tf.nn.max_pool(conv_10, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],\r\n padding='SAME', name='pool4') \r\n # conv11\r\n with tf.variable_scope('conv11') as scope:\r\n kernel = _variable_with_weight_decay('weights',\r\n shape=[3, 3, 128, 256],\r\n init_parameter=0.001,\r\n wd=0.0,\r\n use_fp16=flags.use_fp16)\r\n conv = tf.nn.conv2d(pool_4, kernel, [1, 1, 1, 1], padding='SAME')\r\n biases = _variable_on_cpu('biases', [256], tf.constant_initializer(0.0), flags.use_fp16)\r\n pre_activation = tf.nn.bias_add(conv, biases)\r\n conv_11 = tf.nn.leaky_relu(pre_activation, alpha=0.5 ,name=scope.name)\r\n _activation_summary(conv_11)\r\n \r\n # conv12\r\n with tf.variable_scope('conv12') as scope:\r\n kernel = _variable_with_weight_decay('weights',\r\n shape=[3, 3, 256, 256],\r\n init_parameter=0.001,\r\n wd=0.0,\r\n use_fp16=flags.use_fp16)\r\n conv = tf.nn.conv2d(conv_11, kernel, [1, 1, 1, 1], padding='SAME')\r\n biases = _variable_on_cpu('biases', [256], tf.constant_initializer(0.0), flags.use_fp16)\r\n pre_activation = tf.nn.bias_add(conv, biases)\r\n conv_12 = tf.nn.leaky_relu(pre_activation, alpha=0.5 ,name=scope.name) \r\n _activation_summary(conv_12) \r\n \r\n # conv13\r\n with tf.variable_scope('conv13') as scope:\r\n kernel = _variable_with_weight_decay('weights',\r\n shape=[3, 3, 256, 256],\r\n init_parameter=0.001,\r\n wd=0.0,\r\n use_fp16=flags.use_fp16)\r\n conv = tf.nn.conv2d(conv_12, kernel, [1, 1, 1, 1], padding='SAME') \r\n biases = _variable_on_cpu('biases', [256], tf.constant_initializer(0.0), flags.use_fp16)\r\n pre_activation = tf.nn.bias_add(conv, biases)\r\n conv_13 = tf.nn.leaky_relu(pre_activation, alpha=0.5 ,name=scope.name) \r\n _activation_summary(conv_13) \r\n \r\n # conv14\r\n with tf.variable_scope('conv14') as scope:\r\n kernel = _variable_with_weight_decay('weights',\r\n shape=[3, 3, 256, 256],\r\n init_parameter=0.001,\r\n wd=0.0,\r\n use_fp16=flags.use_fp16)\r\n conv = tf.nn.conv2d(conv_13, kernel, [1, 1, 1, 1], padding='SAME')\r\n biases = _variable_on_cpu('biases', [256], tf.constant_initializer(0.0), flags.use_fp16)\r\n pre_activation = tf.nn.bias_add(conv, biases)\r\n conv_14 = tf.nn.leaky_relu(pre_activation, alpha=0.5 ,name=scope.name)\r\n _activation_summary(conv_14) \r\n\r\n # pool5\r\n pool_5 = tf.nn.max_pool(conv_14, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],\r\n padding='SAME', name='pool5') \r\n # dropout1\r\n dropout_1 = tf.nn.dropout(pool_5, keep_prob=0.25, name='dropout1') \r\n \r\n # maxout1\r\n with tf.variable_scope('maxout1') as scope:\r\n # reshape from convolution\r\n reshape = tf.reshape(dropout_1, [-1, 16384])\r\n weights = _variable_with_weight_decay('weights', shape=[16384, 512, 2],\r\n init_parameter=0.001, wd=0.0, use_fp16=flags.use_fp16) \r\n biases = _variable_on_cpu('biases', [512, 2], tf.constant_initializer(0.0), flags.use_fp16)\r\n maxout = tf.tensordot(reshape, weights, axes=1) + biases\r\n maxout_1 = tf.reduce_max(maxout, axis=2, name=scope.name)\r\n _activation_summary(maxout_1) \r\n \r\n # merge two eyes\r\n with tf.variable_scope('reshape1') as scope:\r\n # concat left and right lable\r\n concat_1 = tf.concat([maxout_1,indication], axis=-1)\r\n # reshape1(merge eyes)\r\n reshape_1 = tf.reshape(concat_1, [32,-1], name=scope.name)\r\n \r\n # dropout2\r\n dropout_2 = tf.nn.dropout(reshape_1, keep_prob=0.25, name='dropout2') \r\n \r\n # maxout2\r\n with tf.variable_scope('maxout2') as scope:\r\n reshape = tf.reshape(dropout_2, [-1, 1028])\r\n weights = _variable_with_weight_decay('weights', shape=[1028, 512, 2],\r\n init_parameter=0.001, wd=0.0, use_fp16=flags.use_fp16) \r\n biases = _variable_on_cpu('biases', [512, 2], tf.constant_initializer(0.0), flags.use_fp16)\r\n maxout = tf.tensordot(reshape, weights, axes=1) + biases\r\n maxout_2 = tf.reduce_max(maxout, axis=2, name='maxout2')\r\n _activation_summary(maxout_2) \r\n\r\n # dropout3\r\n dropout_3 = tf.nn.dropout(maxout_2, keep_prob=0.25, name='dropout3')\r\n \r\n # linear layer(WX + b),\r\n # We don't apply softmax here because\r\n # tf.nn.sparse_softmax_cross_entropy_with_logits accepts the unscaled logits\r\n # and performs the softmax internally for efficiency.\r\n with tf.variable_scope('softmax_linear') as scope:\r\n weights = _variable_with_weight_decay('weights', [512, 2*NUM_CLASSES],\r\n init_parameter=0.001, wd=0.0, use_fp16=flags.use_fp16)\r\n biases = _variable_on_cpu('biases', [2*NUM_CLASSES], tf.constant_initializer(0.0), flags.use_fp16)\r\n softmax_linear = tf.add(tf.matmul(dropout_3, weights), biases, name=scope.name)\r\n _activation_summary(softmax_linear)\r\n \r\n # back to one eye\r\n with tf.variable_scope('reshape2') as scope:\r\n reshape_2 = tf.reshape(softmax_linear,[64,5],name=scope.name)\r\n \r\n return reshape_2", "def __init__(self, class_definitions, network, batch_size, batch_image_size,\n create_learn_epoch_fn, create_evaluate_epoch_fn, create_synthesis_fn,\n experiment, negative_labels=None, auxiliary_processors=None,\n config_proto=None, graph=None, device_string=None):\n super(LearnDiscriminatively, self).__init__(\n experiment.run_path, config_proto, graph, device_string)\n\n # the base network parameters: what to learn and on which architecture to learn it.\n self.network = network\n self.batch_size = batch_size\n self.batch_image_size = ensure_shape_3d(batch_image_size)\n self.experiment = experiment\n self.offline_file = self.experiment.file\n self.negative_labels = negative_labels\n self.auxiliary_processors = auxiliary_processors or []\n self.auxiliary_inputs = []\n\n self.class_definitions = class_definitions\n self.classes = self.create_training_classes(self.class_definitions, self.negative_labels)\n \n with tf.name_scope(LearnDiscriminatively.MAIN_SCOPE_NAME):\n self.global_step = tf.train.create_global_step(graph=self.graph)\n \n self.input = self.create_input()\n\n auxiliary_images = self.input.images\n auxiliary_labels = self.input.labels\n\n # we go through the auxiliary processors in the given order. this means\n # the innermost part of the composition is the first element, etc.\n for auxiliary_processor in self.auxiliary_processors:\n auxiliary_images, auxiliary_labels, auxiliary_names = auxiliary_processor(auxiliary_images, auxiliary_labels)\n\n self.auxiliary_inputs.append(\n AuxiliaryInput(images=auxiliary_images, labels=auxiliary_labels, names=auxiliary_names))\n \n self.discriminator = self.create_discriminator(self.network, auxiliary_images, auxiliary_labels)\n\n with tf.name_scope(\"input_loader\"):\n self.batch_loader = self.create_batch_loader(self.input)\n\n self.scaffold = tf.train.Scaffold( \n init_op=tf.global_variables_initializer(),\n local_init_op=tf.local_variables_initializer())\n self.saver = self.discriminator.saver()\n\n self.learn_epoch = create_learn_epoch_fn(self.discriminator, self.auxiliary_inputs)\n self.evaluate_epoch = create_evaluate_epoch_fn(self.discriminator, self.auxiliary_inputs)\n self.synthesize = create_synthesis_fn(self.discriminator, self.input)", "def main():\n \n # Create queue coordinator.\n coord = tf.train.Coordinator()\n h, w = INPUT_SIZE\n # Load reader.\n with tf.name_scope(\"create_inputs\"):\n reader = ImageReader(DATA_DIRECTORY, DATA_LIST_PATH, None, False, False, coord)\n image = reader.image\n image_rev = tf.reverse(image, tf.stack([1]))\n image_list = reader.image_list\n\n image_batch_origin = tf.stack([image, image_rev])\n image_batch = tf.image.resize_images(image_batch_origin, [int(h), int(w)])\n image_batch075 = tf.image.resize_images(image_batch_origin, [int(h * 0.75), int(w * 0.75)])\n image_batch125 = tf.image.resize_images(image_batch_origin, [int(h * 1.25), int(w * 1.25)])\n \n # Create network.\n with tf.variable_scope('', reuse=False):\n net_100 = JPPNetModel({'data': image_batch}, is_training=False, n_classes=N_CLASSES)\n with tf.variable_scope('', reuse=True):\n net_075 = JPPNetModel({'data': image_batch075}, is_training=False, n_classes=N_CLASSES)\n with tf.variable_scope('', reuse=True):\n net_125 = JPPNetModel({'data': image_batch125}, is_training=False, n_classes=N_CLASSES)\n\n \n # parsing net\n parsing_fea1_100 = net_100.layers['res5d_branch2b_parsing']\n parsing_fea1_075 = net_075.layers['res5d_branch2b_parsing']\n parsing_fea1_125 = net_125.layers['res5d_branch2b_parsing']\n\n parsing_out1_100 = net_100.layers['fc1_human']\n parsing_out1_075 = net_075.layers['fc1_human']\n parsing_out1_125 = net_125.layers['fc1_human']\n\n # pose net\n resnet_fea_100 = net_100.layers['res4b22_relu']\n resnet_fea_075 = net_075.layers['res4b22_relu']\n resnet_fea_125 = net_125.layers['res4b22_relu']\n\n with tf.variable_scope('', reuse=False):\n pose_out1_100, pose_fea1_100 = pose_net(resnet_fea_100, 'fc1_pose')\n pose_out2_100, pose_fea2_100 = pose_refine(pose_out1_100, parsing_out1_100, pose_fea1_100, name='fc2_pose')\n parsing_out2_100, parsing_fea2_100 = parsing_refine(parsing_out1_100, pose_out1_100, parsing_fea1_100, name='fc2_parsing')\n parsing_out3_100, parsing_fea3_100 = parsing_refine(parsing_out2_100, pose_out2_100, parsing_fea2_100, name='fc3_parsing')\n\n with tf.variable_scope('', reuse=True):\n pose_out1_075, pose_fea1_075 = pose_net(resnet_fea_075, 'fc1_pose')\n pose_out2_075, pose_fea2_075 = pose_refine(pose_out1_075, parsing_out1_075, pose_fea1_075, name='fc2_pose')\n parsing_out2_075, parsing_fea2_075 = parsing_refine(parsing_out1_075, pose_out1_075, parsing_fea1_075, name='fc2_parsing')\n parsing_out3_075, parsing_fea3_075 = parsing_refine(parsing_out2_075, pose_out2_075, parsing_fea2_075, name='fc3_parsing')\n\n with tf.variable_scope('', reuse=True):\n pose_out1_125, pose_fea1_125 = pose_net(resnet_fea_125, 'fc1_pose')\n pose_out2_125, pose_fea2_125 = pose_refine(pose_out1_125, parsing_out1_125, pose_fea1_125, name='fc2_pose')\n parsing_out2_125, parsing_fea2_125 = parsing_refine(parsing_out1_125, pose_out1_125, parsing_fea1_125, name='fc2_parsing')\n parsing_out3_125, parsing_fea3_125 = parsing_refine(parsing_out2_125, pose_out2_125, parsing_fea2_125, name='fc3_parsing')\n\n\n parsing_out1 = tf.reduce_mean(tf.stack([tf.image.resize_images(parsing_out1_100, tf.shape(image_batch_origin)[1:3,]),\n tf.image.resize_images(parsing_out1_075, tf.shape(image_batch_origin)[1:3,]),\n tf.image.resize_images(parsing_out1_125, tf.shape(image_batch_origin)[1:3,])]), axis=0)\n parsing_out2 = tf.reduce_mean(tf.stack([tf.image.resize_images(parsing_out2_100, tf.shape(image_batch_origin)[1:3,]),\n tf.image.resize_images(parsing_out2_075, tf.shape(image_batch_origin)[1:3,]),\n tf.image.resize_images(parsing_out2_125, tf.shape(image_batch_origin)[1:3,])]), axis=0)\n parsing_out3 = tf.reduce_mean(tf.stack([tf.image.resize_images(parsing_out3_100, tf.shape(image_batch_origin)[1:3,]),\n tf.image.resize_images(parsing_out3_075, tf.shape(image_batch_origin)[1:3,]),\n tf.image.resize_images(parsing_out3_125, tf.shape(image_batch_origin)[1:3,])]), axis=0)\n\n raw_output = tf.reduce_mean(tf.stack([parsing_out1, parsing_out2, parsing_out3]), axis=0)\n head_output, tail_output = tf.unstack(raw_output, num=2, axis=0)\n tail_list = tf.unstack(tail_output, num=20, axis=2)\n tail_list_rev = [None] * 20\n for xx in range(14):\n tail_list_rev[xx] = tail_list[xx]\n tail_list_rev[14] = tail_list[15]\n tail_list_rev[15] = tail_list[14]\n tail_list_rev[16] = tail_list[17]\n tail_list_rev[17] = tail_list[16]\n tail_list_rev[18] = tail_list[19]\n tail_list_rev[19] = tail_list[18]\n tail_output_rev = tf.stack(tail_list_rev, axis=2)\n tail_output_rev = tf.reverse(tail_output_rev, tf.stack([1]))\n\n \n raw_output_all = tf.reduce_mean(tf.stack([head_output, tail_output_rev]), axis=0)\n raw_output_all = tf.expand_dims(raw_output_all, dim=0)\n raw_output_all = tf.argmax(raw_output_all, dimension=3)\n pred_all = tf.expand_dims(raw_output_all, dim=3) # Create 4-d tensor.\n\n # Which variables to load.\n restore_var = tf.global_variables()\n # Set up tf session and initialize variables. \n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n sess = tf.Session(config=config)\n init = tf.global_variables_initializer()\n \n sess.run(init)\n sess.run(tf.local_variables_initializer())\n \n # Load weights.\n loader = tf.train.Saver(var_list=restore_var)\n if RESTORE_FROM is not None:\n if load(loader, sess, RESTORE_FROM):\n print(\" [*] Load SUCCESS\")\n else:\n print(\" [!] Load failed...\")\n \n # Start queue threads.\n threads = tf.train.start_queue_runners(coord=coord, sess=sess)\n\n rand_int = [i/100 for i in random.sample(range(0, 100), NUM_STEPS)]\n # Iterate over training steps.\n for step in range(NUM_STEPS):\n parsing_ = sess.run(pred_all)\n if step % 100 == 0:\n print('step {:d}'.format(step))\n print (image_list[step])\n img_split = image_list[step].split('/')\n img_id = img_split[-1][:-4]\n mask_top = decode_labels(parsing_, [5,6,7,10], num_classes=N_CLASSES)\n mask_bottom = decode_labels(parsing_, [9, 12], num_classes=N_CLASSES)\n im_mask_top = Image.fromarray(mask_top[0]).convert('L')\n im_mask_bottom = Image.fromarray(mask_bottom[0]).convert('L')\n _mask_top = np.array(im_mask_top)\n _mask_top[_mask_top != 0 ] = 255\n _mask_bottom = np.array(im_mask_bottom)\n _mask_bottom[_mask_bottom != 0 ] = 255\n\n real_img = Image.open(image_list[step]).convert(\"RGB\")\n top_real_img = Image.open(image_list[step]).convert(\"RGB\")\n bottom_real_img = Image.open(image_list[step]).convert(\"RGB\")\n\n imgdata_top = reload_img(top_real_img)\n for i in range(0,len(imgdata_top)):\n # hsv code start\n (h,s,v) = rgb2hsv(imgdata_top[i])\n h += float(i)/len(imgdata_top)\n v += 0.005\n imgdata_top[i] = hsv2rgb((h,s,v))\n # hsv code end\n\n # hls code\n # (h,l,s) = rgb2hls(imgdata_top[i])\n # h = rand_int[step]\n # s = 0.3\n # imgdata_top[i] = hls2rgb((h,l,s))\n # hls code end\n top_real_img.putdata(imgdata_top)\n im = Image.composite(top_real_img, real_img, Image.fromarray(_mask_top).convert('L'))\n if _mask_bottom.size > 0:\n imgdata_bottom = reload_img(bottom_real_img)\n for i in range(0,len(imgdata_bottom)):\n # bottom part only use hsv to avoid not matching color with top parts\n (h,s,v) = rgb2hsv(imgdata_bottom[i])\n h += float(i)/len(imgdata_bottom)\n v += 0.005\n imgdata_bottom[i] = hsv2rgb((h,s,v))\n bottom_real_img.putdata(imgdata_bottom)\n im = Image.composite(bottom_real_img, im, Image.fromarray(_mask_bottom).convert('L'))\n Image.fromarray(_mask_bottom).save('{}/{}_bottom.png'.format(OUTPUT_DIR, img_id))\n im.save('{}/{}_hsl.png'.format(OUTPUT_DIR, img_id))\n Image.fromarray(_mask_top).save('{}/{}_top.png'.format(OUTPUT_DIR, img_id))\n \n # parsing_im.save('{}/{}_vis.png'.format(OUTPUT_DIR, img_id))\n # cv2.imwrite('{}/{}.png'.format(OUTPUT_DIR, img_id), parsing_[0,:,:,0])\n\n coord.request_stop()\n coord.join(threads)", "def main():\n # lr_decay = 0.5\n # decay_every = 100\n args = get_arguments()\n \n h, w = map(int, args.input_size.split(','))\n input_size = (h, w)\n \n tf.set_random_seed(args.random_seed)\n \n coord = tf.train.Coordinator()\n \n with tf.name_scope(\"create_inputs\"):\n reader = ImageReader(\n args.data_list,\n input_size,\n args.random_scale,\n args.random_mirror,\n args.ignore_label,\n IMG_MEAN,\n coord)\n image_batch, label_batch = reader.dequeue(args.batch_size)\n \n # Set up tf session and initialize variables. \n config = tf.ConfigProto()\n # config.gpu_options.allow_growth = True\n # config.allow_soft_placement = True\n # config.intra_op_parallelism_threads = 1\n sess = tf.Session(config = config)\n net = unext(image_batch, is_train = True, reuse = False, n_out = NUM_CLASSES)\n \n # Predictions: ignoring all predictions with labels greater or equal than n_classes\n raw_output = net.outputs\n raw_prediction = tf.reshape(raw_output, [-1, args.num_classes])\n label_proc = prepare_label(label_batch, tf.stack(raw_output.get_shape()[1:3]), num_classes=args.num_classes, one_hot=False) # [batch_size, h, w]\n raw_gt = tf.reshape(label_proc, [-1,])\n indices = tf.squeeze(tf.where(tf.less_equal(raw_gt, args.num_classes - 1)), 1)\n gt = tf.cast(tf.gather(raw_gt, indices), dtype = tf.int32)\n prediction = tf.gather(raw_prediction, indices)\n \n main_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits = prediction, labels = gt)\n\n t_vars = tf.trainable_variables()\n l2_losses = [args.weight_decay * tf.nn.l2_loss(v) for v in t_vars if 'kernel' in v.name]\n #reduced_loss = 0.5 * tf.reduce_mean(main_loss) + generalised_dice_loss(prediction, gt) + tf.add_n(l2_losses)\n reduced_loss = tf.reduce_mean(main_loss) + tf.add_n(l2_losses)\n\n # Processed predictions: for visualisation.\n raw_output_up = tf.image.resize_bilinear(raw_output, tf.shape(image_batch)[1:3,])\n raw_output_up = tf.argmax(raw_output_up, dimension = 3)\n pred = tf.expand_dims(raw_output_up, dim = 3)\n \n # Image summary.\n images_summary = tf.py_func(inv_preprocess, [image_batch, args.save_num_images, IMG_MEAN], tf.uint8)\n labels_summary = tf.py_func(decode_labels, [label_batch, args.save_num_images, args.num_classes], tf.uint8)\n preds_summary = tf.py_func(decode_labels, [pred, args.save_num_images, args.num_classes], tf.uint8)\n \n total_summary = tf.summary.image('images', \n tf.concat(axis=2, values=[images_summary, labels_summary, preds_summary]), \n max_outputs=args.save_num_images) # Concatenate row-wise.\n loss_summary = tf.summary.scalar('TotalLoss', reduced_loss)\n summary_writer = tf.summary.FileWriter(args.snapshot_dir,\n graph=tf.get_default_graph())\n\n # Using Poly learning rate policy \n base_lr = tf.constant(args.learning_rate)\n step_ph = tf.placeholder(dtype=tf.float32, shape=())\n learning_rate = tf.train.exponential_decay(base_lr, step_ph, args.num_steps, args.power)\n\n lr_summary = tf.summary.scalar('LearningRate', learning_rate)\n #train_op = tf.train.MomentumOptimizer(learning_rate, args.momentum).minimize(reduced_loss, var_list = t_vars)\n train_op = tf.train.AdamOptimizer(learning_rate).minimize(reduced_loss, var_list = t_vars)\n init = tf.global_variables_initializer()\n sess.run(init)\n \n # Saver for storing checkpoints of the model.\n saver = tf.train.Saver(var_list = tf.global_variables(), max_to_keep = 10)\n\n ckpt = tf.train.get_checkpoint_state(SNAPSHOT_DIR)\n if ckpt and ckpt.model_checkpoint_path:\n #restore_vars = list([t for t in tf.global_variables() if not 'uconv1' in t.name])\n loader = tf.train.Saver(var_list = tf.global_variables())\n load_step = int(os.path.basename(ckpt.model_checkpoint_path).split('-')[1])\n load(loader, sess, ckpt.model_checkpoint_path)\n else:\n print('No checkpoint file found.')\n load_step = 0\n\n # Start queue threads.\n threads = tf.train.start_queue_runners(coord = coord, sess = sess)\n\n # Iterate over training steps.\n save_summary_every = 10\n for step in range(args.num_steps):\n start_time = time.time()\n \n feed_dict = {step_ph: step}\n if not step % args.save_pred_every == 0:\n loss_value, _, l_summary, lr_summ = sess.run([reduced_loss, train_op, loss_summary, lr_summary], feed_dict=feed_dict)\n duration = time.time() - start_time\n elif step % args.save_pred_every == 0:\n loss_value, _, summary, l_summary, lr_summ = sess.run([reduced_loss, train_op, total_summary, loss_summary, lr_summary], feed_dict=feed_dict)\n duration = time.time() - start_time\n save(saver, sess, args.snapshot_dir, step)\n summary_writer.add_summary(summary, step)\n\n if step % save_summary_every == 0:\n \n summary_writer.add_summary(l_summary, step)\n summary_writer.add_summary(lr_summ, step)\n \n print('step {:d} \\t loss = {:.3f}, ({:.3f} sec/step)'.format(step, loss_value, duration))\n \n coord.request_stop()\n coord.join(threads)", "def run_inference_on_image(image):\n if not tf.gfile.Exists(image):\n tf.logging.fatal('File does not exist %s', image)\n\n # Read an image\n image_data = tf.gfile.FastGFile(image, 'rb').read()\n img_data_jpg = tf.image.decode_jpeg(image_data) # Decode image\n img_data_jpg = tf.image.convert_image_dtype(img_data_jpg, dtype=tf.float32) # Convert uint8 to float32\n img_data_jpg = tf.image.resize_image_with_crop_or_pad(img_data_jpg,IMAGE_SIZE,IMAGE_SIZE)\n\n # Creates graph from saved GraphDef.\n create_graph()\n\n with tf.Session() as sess:\n image_data = img_data_jpg.eval().reshape(-1,IMAGE_SIZE,IMAGE_SIZE,CHANNEL)\n softmax_tensor = sess.graph.get_tensor_by_name('lg/InceptionV3/Predictions/Reshape_1:0')\n predictions = sess.run(softmax_tensor, {'lg/Placeholder:0': image_data})\n predictions = np.squeeze(predictions)\n print('predictions: ',predictions)\n # Read the labels from label.txt.\n label_path = os.path.join(FLAGS.model_dir, '/home/lg/projects/labels.txt')\n label = np.loadtxt(fname=label_path,dtype=str)\n\n top_k = predictions.argsort()[-FLAGS.num_top_predictions:][::-1]\n for node_id in top_k:\n label_string = label[node_id]\n score = predictions[node_id]\n print('%s (score = %.5f)' % (label_string, score))", "def fit_style_transfer(style_image, content_image, style_weight=1e-2, content_weight=1e-4, \n optimizer='adam', epochs=1, steps_per_epoch=1):\n\n images = []\n step = 0\n\n # get the style image features \n style_targets = get_style_image_features(style_image)\n \n # get the content image features\n content_targets = get_content_image_features(content_image)\n\n # initialize the generated image for updates\n generated_image = tf.cast(content_image, dtype=tf.float32)\n generated_image = tf.Variable(generated_image) \n \n # collect the image updates starting from the content image\n images.append(content_image)\n \n for n in range(epochs):\n for m in range(steps_per_epoch):\n step += 1\n \n ### START CODE HERE ###\n # Update the image with the style using the function that you defined\n \n update_image_with_style(image=generated_image,\n style_targets=style_targets,\n content_targets=content_targets,\n style_weight=style_weight,\n content_weight=content_weight,\n optimizer=optimizer)\n\n ### END CODE HERE\n\n print(\".\", end='')\n if (m + 1) % 10 == 0:\n images.append(generated_image)\n \n # display the current stylized image\n clear_output(wait=True)\n display_image = tensor_to_image(generated_image)\n display_fn(display_image)\n\n # append to the image collection for visualization later\n images.append(generated_image)\n print(\"Train step: {}\".format(step))\n \n # convert to uint8 (expected dtype for images with pixels in the range [0,255])\n generated_image = tf.cast(generated_image, dtype=tf.uint8)\n \n return generated_image, images", "def inference(image, keep_prob):\r\n '''\r\n print(\"setting up vgg initialized conv layers ...\")\r\n model_data = utils.get_model_data(FLAGS.model_dir, MODEL_URL)\r\n\r\n mean = model_data['normalization'][0][0][0]\r\n mean_pixel = np.mean(mean, axis=(0, 1))\r\n\r\n weights = np.squeeze(model_data['layers'])\r\n print(\"weights.shape\",weights.shape)\r\n\r\n processed_image = utils.process_image(image, mean_pixel)'''\r\n\r\n with tf.variable_scope(\"inference\"):\r\n pooling_net,conv_final_layer = inference_op(image)\r\n #conv_final_layer = image_net[\"conv5_3\"]\r\n\r\n pool5 = utils.max_pool_2x2(conv_final_layer)\r\n\r\n W6 = utils.weight_variable([7, 7, 512, 4096], name=\"W6\")\r\n b6 = utils.bias_variable([4096], name=\"b6\")\r\n conv6 = utils.conv2d_basic(pool5, W6, b6)\r\n relu6 = tf.nn.relu(conv6, name=\"relu6\")\r\n if FLAGS.debug:\r\n utils.add_activation_summary(relu6)\r\n relu_dropout6 = tf.nn.dropout(relu6, keep_prob=keep_prob)\r\n\r\n W7 = utils.weight_variable([1, 1, 4096, 4096], name=\"W7\")\r\n b7 = utils.bias_variable([4096], name=\"b7\")\r\n conv7 = utils.conv2d_basic(relu_dropout6, W7, b7)\r\n relu7 = tf.nn.relu(conv7, name=\"relu7\")\r\n if FLAGS.debug:\r\n utils.add_activation_summary(relu7)\r\n relu_dropout7 = tf.nn.dropout(relu7, keep_prob=keep_prob)\r\n\r\n W8 = utils.weight_variable([1, 1, 4096, NUM_OF_CLASSESS], name=\"W8\")\r\n b8 = utils.bias_variable([NUM_OF_CLASSESS], name=\"b8\")\r\n conv8 = utils.conv2d_basic(relu_dropout7, W8, b8)\r\n # annotation_pred1 = tf.argmax(conv8, dimension=3, name=\"prediction1\")\r\n\r\n # now to upscale to actual image size\r\n deconv_shape1 = pooling_net[\"pool4\"].get_shape()\r\n W_t1 = utils.weight_variable([4, 4, deconv_shape1[3].value, NUM_OF_CLASSESS], name=\"W_t1\")\r\n b_t1 = utils.bias_variable([deconv_shape1[3].value], name=\"b_t1\")\r\n # 对第8层的结果进行反卷积(上采样),通道数也由NUM_OF_CLASSESS变为第4层的通道数\r\n conv_t1 = utils.conv2d_transpose_strided(conv8, W_t1, b_t1, output_shape=tf.shape(pooling_net[\"pool4\"]))\r\n fuse_1 = tf.add(conv_t1, pooling_net[\"pool4\"], name=\"fuse_1\")\r\n\r\n deconv_shape2 = pooling_net[\"pool3\"].get_shape()\r\n W_t2 = utils.weight_variable([4, 4, deconv_shape2[3].value, deconv_shape1[3].value], name=\"W_t2\")\r\n b_t2 = utils.bias_variable([deconv_shape2[3].value], name=\"b_t2\")\r\n conv_t2 = utils.conv2d_transpose_strided(fuse_1, W_t2, b_t2, output_shape=tf.shape(pooling_net[\"pool3\"]))\r\n fuse_2 = tf.add(conv_t2, pooling_net[\"pool3\"], name=\"fuse_2\")\r\n\r\n shape = tf.shape(image)\r\n deconv_shape3 = tf.stack([shape[0], shape[1], shape[2], NUM_OF_CLASSESS])\r\n W_t3 = utils.weight_variable([16, 16, NUM_OF_CLASSESS, deconv_shape2[3].value], name=\"W_t3\")\r\n b_t3 = utils.bias_variable([NUM_OF_CLASSESS], name=\"b_t3\")\r\n conv_t3 = utils.conv2d_transpose_strided(fuse_2, W_t3, b_t3, output_shape=deconv_shape3, stride=8)\r\n\r\n annotation_pred = tf.argmax(conv_t3, dimension=3, name=\"prediction\")\r\n print(\"annotation_pred.shape\",annotation_pred.shape)\r\n print(\"conv_t3\",conv_t3)\r\n print(\"tf.expand_dims(annotation_pred, dim=3)\",tf.expand_dims(annotation_pred, dim=3))\r\n return tf.expand_dims(annotation_pred, dim=3), conv_t3", "def process_image(\n sess, samples, predictions, im_ind, crop_size, output_scale, model_store,\n renderer, task_type, infer_name, infer_dir, vis_dir):\n # Dictionary for run times.\n run_times = {}\n\n # Prediction.\n time_start = time.time()\n (samples, predictions) = sess.run([samples, predictions])\n run_times['prediction'] = time.time() - time_start\n\n # Scene and image ID's.\n scene_id = samples[common.SCENE_ID][0]\n im_id = samples[common.IM_ID][0]\n\n # Intrinsic parameters.\n K = samples[common.K][0]\n\n if task_type == common.LOCALIZATION:\n gt_poses = []\n gt_obj_ids = samples[common.GT_OBJ_IDS][0]\n for gt_id in range(len(gt_obj_ids)):\n R = transform.quaternion_matrix(\n samples[common.GT_OBJ_QUATS][0][gt_id])[:3, :3]\n t = samples[common.GT_OBJ_TRANS][0][gt_id].reshape((3, 1))\n gt_poses.append({'obj_id': gt_obj_ids[gt_id], 'R': R, 't': t})\n else:\n gt_poses = None\n\n # Establish many-to-many 2D-3D correspondences.\n time_start = time.time()\n corr = corresp.establish_many_to_many(\n obj_confs=predictions[common.PRED_OBJ_CONF][0],\n frag_confs=predictions[common.PRED_FRAG_CONF][0],\n frag_coords=predictions[common.PRED_FRAG_LOC][0],\n gt_obj_ids=[x['obj_id'] for x in gt_poses],\n model_store=model_store,\n output_scale=output_scale,\n min_obj_conf=FLAGS.corr_min_obj_conf,\n min_frag_rel_conf=FLAGS.corr_min_frag_rel_conf,\n project_to_surface=FLAGS.project_to_surface,\n only_annotated_objs=(task_type == common.LOCALIZATION))\n run_times['establish_corr'] = time.time() - time_start\n\n # PnP-RANSAC to estimate 6D object poses from the correspondences.\n time_start = time.time()\n poses = []\n for obj_id, obj_corr in corr.items():\n # tf.logging.info(\n # 'Image path: {}, obj: {}'.format(samples[common.IMAGE_PATH][0], obj_id))\n\n # Number of established correspondences.\n num_corrs = obj_corr['coord_2d'].shape[0]\n\n # Skip the fitting if there are too few correspondences.\n min_required_corrs = 6\n if num_corrs < min_required_corrs:\n continue\n\n # The correspondences need to be sorted for PROSAC.\n if FLAGS.use_prosac:\n sorted_inds = np.argsort(obj_corr['conf'])[::-1]\n for key in obj_corr.keys():\n obj_corr[key] = obj_corr[key][sorted_inds]\n\n # Select correspondences with the highest confidence.\n if FLAGS.max_correspondences is not None \\\n and num_corrs > FLAGS.max_correspondences:\n # Sort the correspondences only if they have not been sorted for PROSAC.\n if FLAGS.use_prosac:\n keep_inds = np.arange(num_corrs)\n else:\n keep_inds = np.argsort(obj_corr['conf'])[::-1]\n keep_inds = keep_inds[:FLAGS.max_correspondences]\n for key in obj_corr.keys():\n obj_corr[key] = obj_corr[key][keep_inds]\n\n # Save the established correspondences (for analysis).\n if FLAGS.save_corresp:\n obj_gt_poses = []\n if gt_poses is not None:\n obj_gt_poses = [x for x in gt_poses if x['obj_id'] == obj_id]\n pred_time = float(np.sum(list(run_times.values())))\n image_path = samples[common.IMAGE_PATH][0].decode('utf-8')\n save_correspondences(\n scene_id, im_id, im_ind, obj_id, image_path, K, obj_corr, pred_time,\n infer_name, obj_gt_poses, infer_dir)\n\n # Make sure the coordinates are saved continuously in memory.\n coord_2d = np.ascontiguousarray(obj_corr['coord_2d'].astype(np.float64))\n coord_3d = np.ascontiguousarray(obj_corr['coord_3d'].astype(np.float64))\n\n if FLAGS.fitting_method == common.PROGRESSIVE_X:\n # If num_instances == 1, then only GC-RANSAC is applied. If > 1, then\n # Progressive-X is applied and up to num_instances poses are returned.\n # If num_instances == -1, then Progressive-X is applied and all found\n # poses are returned.\n if task_type == common.LOCALIZATION:\n num_instances = len([x for x in gt_poses if x['obj_id'] == obj_id])\n else:\n num_instances = -1\n\n if FLAGS.max_instances_to_fit is not None:\n num_instances = min(num_instances, FLAGS.max_instances_to_fit)\n\n pose_ests, inlier_indices, pose_qualities = pyprogressivex.find6DPoses(\n x1y1=coord_2d,\n x2y2z2=coord_3d,\n K=K,\n threshold=FLAGS.inlier_thresh,\n neighborhood_ball_radius=FLAGS.neighbour_max_dist,\n spatial_coherence_weight=FLAGS.spatial_coherence_weight,\n scaling_from_millimeters=FLAGS.scaling_from_millimeters,\n max_tanimoto_similarity=FLAGS.max_tanimoto_similarity,\n max_iters=FLAGS.max_fitting_iterations,\n conf=FLAGS.required_progx_confidence,\n proposal_engine_conf=FLAGS.required_ransac_confidence,\n min_coverage=FLAGS.min_hypothesis_quality,\n min_triangle_area=FLAGS.min_triangle_area,\n min_point_number=6,\n max_model_number=num_instances,\n max_model_number_for_optimization=FLAGS.max_model_number_for_pearl,\n use_prosac=FLAGS.use_prosac,\n log=False)\n\n pose_est_success = pose_ests is not None\n if pose_est_success:\n for i in range(int(pose_ests.shape[0] / 3)):\n j = i * 3\n R_est = pose_ests[j:(j + 3), :3]\n t_est = pose_ests[j:(j + 3), 3].reshape((3, 1))\n poses.append({\n 'scene_id': scene_id,\n 'im_id': im_id,\n 'obj_id': obj_id,\n 'R': R_est,\n 't': t_est,\n 'score': pose_qualities[i],\n })\n\n elif FLAGS.fitting_method == common.OPENCV_RANSAC:\n # This integration of OpenCV-RANSAC can estimate pose of only one object\n # instance. Note that in Table 3 of the EPOS CVPR'20 paper, the scores\n # for OpenCV-RANSAC were obtained with integrating cv2.solvePnPRansac\n # in the Progressive-X scheme (as the other methods in that table).\n pose_est_success, r_est, t_est, inliers = cv2.solvePnPRansac(\n objectPoints=coord_3d,\n imagePoints=coord_2d,\n cameraMatrix=K,\n distCoeffs=None,\n iterationsCount=FLAGS.max_fitting_iterations,\n reprojectionError=FLAGS.inlier_thresh,\n confidence=0.99, # FLAGS.required_ransac_confidence\n flags=cv2.SOLVEPNP_EPNP)\n\n if pose_est_success:\n poses.append({\n 'scene_id': scene_id,\n 'im_id': im_id,\n 'obj_id': obj_id,\n 'R': cv2.Rodrigues(r_est)[0],\n 't': t_est,\n 'score': 0.0, # TODO: Define the score.\n })\n\n else:\n raise ValueError(\n 'Unknown pose fitting method ({}).'.format(FLAGS.fitting_method))\n\n run_times['fitting'] = time.time() - time_start\n run_times['total'] = np.sum(list(run_times.values()))\n\n # Add the total time to each pose.\n for pose in poses:\n pose['time'] = run_times['total']\n\n # Visualization.\n if FLAGS.vis:\n visualize(\n samples=samples,\n predictions=predictions,\n pred_poses=poses,\n im_ind=im_ind,\n crop_size=crop_size,\n output_scale=output_scale,\n model_store=model_store,\n renderer=renderer,\n vis_dir=vis_dir)\n\n return poses, run_times", "def run_inference_on_image(image):\n if not gfile.Exists(image):\n tf.logging.fatal('File does not exist %s', image)\n image_data = gfile.FastGFile(image, 'rb').read()\n\n # Creates graph from saved GraphDef.\n create_graph()\n\n with tf.Session() as sess:\n # Runs the softmax tensor by feeding the image_data as input to the graph.\n softmax_tensor = sess.graph.get_tensor_by_name('final_result:0')\n \n print(\"Running inference on image: %s\" % os.path.basename(FLAGS.image_file))\n predictions = sess.run(softmax_tensor,\n {'DecodeJpeg/contents:0': image_data})\n predictions = np.squeeze(predictions)\n\n # Get indicies of top x predictions\n top_x = predictions.argsort()[-FLAGS.num_top_predictions:][::-1]\n # Get list of human readable class labels\n class_labels = get_label_list()\n \n #print results\n for top_i in top_x: \n print(class_labels[top_i] + ': %.2f%%' % (predictions[top_i] * 100))\n print('\\n')", "def run_inference_on_image(image):\n if not tf.gfile.Exists(image):\n tf.logging.fatal('File does not exist %s', image)\n image_data = tf.gfile.FastGFile(image, 'rb').read()\n\n\n\n with tf.Session() as sess:\n # Some useful tensors:\n # 'softmax:0': A tensor containing the normalized prediction across\n # 1000 labels.\n # 'pool_3:0': A tensor containing the next-to-last layer containing 2048\n # float description of the image.\n # 'DecodeJpeg/contents:0': A tensor containing a string providing JPEG\n # encoding of the image.\n # Runs the softmax tensor by feeding the image_data as input to the graph.\n\n # Creates node ID --> English string lookup.\n node_lookup = NodeLookup()\n\n softmax_tensor = sess.graph.get_tensor_by_name('final_result:0')\n predictions = sess.run(softmax_tensor,\n {'DecodeJpeg/contents:0': image_data})\n predictions = np.squeeze(predictions)\n\n\n\n top_k = predictions.argsort()[-FLAGS.num_top_predictions:][::-1]\n result =[]\n for node_id in top_k:\n human_string = node_lookup.id_to_string(node_id)\n score = predictions[node_id]\n\n point = collections.namedtuple('Point', ['humanString', 'score'])\n point.humanString = human_string\n point.score = score\n result.append(point)\n return result", "def inference(image, keep_prob):\n print('setting up vgg model initialized params')\n model_data = utils.get_model_data(\"data\", MODEL_URL)\n mean = model_data['normalization'][0][0][0]\n mean_pixel = np.mean(mean, axis=(0, 1))\n weights = np.squeeze(model_data['layers'])\n\n processed_image = utils.process_image(image, mean_pixel)\n\n with tf.name_scope('inference'):\n image_net = vgg_net(weights, processed_image)\n conv_final_layer = image_net['conv5_3']\n\n pool5 = utils.max_pool_2x2(conv_final_layer)\n\n W6 = utils.weights_variable([7, 7, 512, 4096], name=\"W6\")\n b6 = utils.bias_variable([4096], name='b6')\n conv6 = utils.conv2d_basic(pool5, W6, b6)\n relu6 = tf.nn.relu(conv6, name='relu6')\n\n relu_dropout6 = tf.nn.dropout(relu6, keep_prob=keep_prob)\n\n W7 = utils.weights_variable([1, 1, 4096, 4096], name=\"W7\")\n b7 = utils.bias_variable([4096], name=\"b7\")\n conv7 = utils.conv2d_basic(relu_dropout6, W7, b7)\n relu7 = tf.nn.relu(conv7, name=\"relu7\")\n\n relu_dropout7 = tf.nn.dropout(relu7, keep_prob=keep_prob)\n\n W8 = utils.weights_variable([1, 1, 4096, NUM_OF_CLASSESS], name='W8')\n b8 = utils.bias_variable([NUM_OF_CLASSESS], name=\"b8\")\n conv8 = utils.conv2d_basic(relu_dropout7, W8, b8)\n\n #unsampling to actual image size\n deconv_shape1 = image_net['pool4'].get_shape()\n W_t1 = utils.weights_variable([4, 4, deconv_shape1[3].value, NUM_OF_CLASSESS], name='W_t1')\n b_t1 = utils.bias_variable([deconv_shape1[3].value], name=\"b_t1\")\n conv_t1 = utils.conv2d_transpose_strided(conv8, W_t1, b_t1, output_shape=tf.shape(image_net['pool4']))\n fuse_1 = tf.add(conv_t1, image_net['pool4'], name='fuse_1')\n\n deconv_shape2 = image_net['pool3'].get_shape()\n W_t2 = utils.weights_variable([4, 4, deconv_shape2[3].value, deconv_shape1[3].value], name='W_t2')\n b_t2 = utils.bias_variable([deconv_shape2[3].value], name=\"b_t2\")\n conv_t2 = utils.conv2d_transpose_strided(fuse_1, W_t2, b_t2, output_shape=tf.shape(image_net['pool3']))\n fuse_2 = tf.add(conv_t2, image_net[\"pool3\"], name=\"fuse_2\")\n\n shape = tf.shape(image)\n output_shape = tf.stack([shape[0], shape[1], shape[2], NUM_OF_CLASSESS])\n W_t3 = utils.weights_variable([7, 7, NUM_OF_CLASSESS, deconv_shape2[3].value], name='W_t3')\n b_t3 = utils.bias_variable([NUM_OF_CLASSESS], name=\"b_t3\")\n conv_t3 = utils.conv2d_transpose_strided(fuse_2, W_t3, b_t3, output_shape=output_shape)\n\n annotation_pre = tf.argmax(conv_t3, dimension=3, name='prediction')\n\n return tf.expand_dims(annotation_pre, dim=3), conv_t3", "def stylize(network, initial, content, styles, iterations,\n content_weight, style_weight, style_blend_weights, tv_weight,\n learning_rate, print_iterations=None, checkpoint_iterations=None):\n shape = (1,) + content.shape\n style_shapes = [(1,) + style.shape for style in styles]\n content_features = {}\n style_features = [{} for _ in styles]\n\n # compute content features in feedforward mode\n g = tf.Graph()\n with g.as_default(), g.device('/cpu:0'), tf.Session() as sess:\n image = tf.placeholder('float', shape=shape)\n net, mean_pixel = vgg.net(network, image)\n content_pre = np.array([vgg.preprocess(content, mean_pixel)])\n content_features[CONTENT_LAYER] = net[CONTENT_LAYER].eval(\n feed_dict={image: content_pre})\n\n # compute style features in feedforward mode\n for i in range(len(styles)):\n g = tf.Graph()\n with g.as_default(), g.device('/cpu:0'), tf.Session() as sess:\n image = tf.placeholder('float', shape=style_shapes[i])\n net, _ = vgg.net(network, image)\n style_pre = np.array([vgg.preprocess(styles[i], mean_pixel)])\n for layer in STYLE_LAYERS:\n features = net[layer].eval(feed_dict={image: style_pre})\n features = np.reshape(features, (-1, features.shape[3]))\n gram = np.matmul(features.T, features) / features.size\n style_features[i][layer] = gram\n\n # make stylized image using backpropogation\n with tf.Graph().as_default():\n if initial is None:\n noise = np.random.normal(size=shape, scale=np.std(content) * 0.1)\n initial = tf.random_normal(shape) * 0.256\n else:\n initial = np.array([vgg.preprocess(initial, mean_pixel)])\n initial = initial.astype('float32')\n image = tf.Variable(initial)\n net, _ = vgg.net(network, image)\n\n # content loss\n content_loss = content_weight * (2 * tf.nn.l2_loss(\n net[CONTENT_LAYER] - content_features[CONTENT_LAYER]) /\n content_features[CONTENT_LAYER].size)\n # style loss\n style_loss = 0\n for i in range(len(styles)):\n style_losses = []\n for style_layer in STYLE_LAYERS:\n layer = net[style_layer]\n _, height, width, number = map(lambda i: i.value, layer.get_shape())\n size = height * width * number\n feats = tf.reshape(layer, (-1, number))\n gram = tf.matmul(tf.transpose(feats), feats) / size\n style_gram = style_features[i][style_layer]\n style_losses.append(2 * tf.nn.l2_loss(gram - style_gram) / style_gram.size)\n style_loss += style_weight * style_blend_weights[i] * reduce(tf.add, style_losses)\n # total variation denoising\n tv_y_size = _tensor_size(image[:,1:,:,:])\n tv_x_size = _tensor_size(image[:,:,1:,:])\n tv_loss = tv_weight * 2 * (\n (tf.nn.l2_loss(image[:,1:,:,:] - image[:,:shape[1]-1,:,:]) /\n tv_y_size) +\n (tf.nn.l2_loss(image[:,:,1:,:] - image[:,:,:shape[2]-1,:]) /\n tv_x_size))\n # overall loss\n loss = content_loss + style_loss + tv_loss\n\n # optimizer setup\n train_step = tf.train.AdamOptimizer(learning_rate).minimize(loss)\n\n def print_progress(i, last=False):\n stderr.write('Iteration %d/%d\\n' % (i + 1, iterations))\n if last or (print_iterations and i % print_iterations == 0):\n stderr.write(' content loss: %g\\n' % content_loss.eval())\n stderr.write(' style loss: %g\\n' % style_loss.eval())\n stderr.write(' tv loss: %g\\n' % tv_loss.eval())\n stderr.write(' total loss: %g\\n' % loss.eval())\n\n # optimization\n best_loss = float('inf')\n best = None\n with tf.Session() as sess:\n sess.run(tf.initialize_all_variables())\n for i in range(iterations):\n last_step = (i == iterations - 1)\n print_progress(i, last=last_step)\n train_step.run()\n\n if (checkpoint_iterations and i % checkpoint_iterations == 0) or last_step:\n this_loss = loss.eval()\n if this_loss < best_loss:\n best_loss = this_loss\n best = image.eval()\n yield (\n (None if last_step else i),\n vgg.unprocess(best.reshape(shape[1:]), mean_pixel)\n )", "def run_inference_on_image(image):\n if not tf.gfile.Exists(image):\n tf.logging.fatal('File does not exist %s', image)\n image_data = tf.gfile.FastGFile(image, 'rb').read()\n\n # Creates graph from saved GraphDef.\n #create_graph()\n\n with tf.Session() as sess:\n # Some useful tensors:\n # 'softmax:0': A tensor containing the normalized prediction across\n # 1000 labels.\n # 'pool_3:0': A tensor containing the next-to-last layer containing 2048\n # float description of the image.\n # 'DecodeJpeg/contents:0': A tensor containing a string providing JPEG\n # encoding of the image.\n # Runs the softmax tensor by feeding the image_data as input to the graph.\n global RESULTS_ANALYSIS\n softmax_tensor = sess.graph.get_tensor_by_name('softmax:0')\n\t\n predictions = sess.run(softmax_tensor,\n {'DecodeJpeg/contents:0': image_data})\n predictions = np.squeeze(predictions)\n\n # Creates node ID --> English string lookup.\n node_lookup = NodeLookup()\n #top_k = predictions.argsort()[-FLAGS.num_top_predictions:][::-1]\n top_k = predictions.argsort()[-3:][::-1]\n RESULTS_ANALYSIS=''\n for node_id in top_k:\n human_string = node_lookup.id_to_string(node_id)\n score = predictions[node_id]\n print('%s (score = %.5f)' % (human_string, score))\n RESULTS_ANALYSIS=RESULTS_ANALYSIS+'%s (score = %.5f)' % (human_string, score)+';'", "def single_inference_process_fn(inference_initializer, inference_mode_config, in_project_meta_json, request_queue,\n result_meta_queue, progress_queue, project):\n single_image_inference = inference_initializer()\n inference_mode = InferenceModeFactory.create(\n inference_mode_config, ProjectMeta.from_json(in_project_meta_json), single_image_inference)\n\n project_meta_sent = False\n req = ''\n while req is not None:\n req = request_queue.get()\n if req is not None:\n # Send the resulting project meta to the parent project to make sure we only write the meta JSON once.\n if not project_meta_sent:\n try:\n result_meta_queue.put(inference_mode.out_meta.to_json(), block=False)\n except queue.Full:\n pass\n project_meta_sent = True\n\n in_ann = Annotation.load_json_file(req.item_paths.ann_path, inference_mode.out_meta)\n ann = inference_mode.infer_annotate_image_file(req.item_paths.img_path, in_ann)\n out_dataset = project.datasets.get(req.ds_name)\n out_dataset.add_item_file(\n req.item_name, req.item_paths.img_path, ann=ann, _validate_item=False, _use_hardlink=True)\n progress_queue.put(1)", "def inference(images):\n # We instantiate all variables using tf.get_variable() instead of\n # tf.Variable() in order to share variables across multiple GPU training runs.\n # If we only ran this model on a single GPU, we could simplify this function\n # by replacing all instances of tf.get_variable() with tf.Variable().\n #\n # conv1\n with tf.variable_scope('conv1') as scope:\n kernel = _variable_with_weight_decay('weights',\n shape=[5, 5, 3, 64],\n stddev=5e-2,\n wd=None)\n conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME')\n biases = _create_variable('biases', [64], tf.constant_initializer(0.0))\n pre_activation = tf.nn.bias_add(conv, biases)\n conv1 = tf.nn.relu(pre_activation, name=scope.name)\n\n # pool1\n pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],\n padding='SAME', name='pool1')\n # norm1\n norm1 = tf.nn.lrn(pool1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,\n name='norm1')\n\n # conv2\n with tf.variable_scope('conv2') as scope:\n kernel = _variable_with_weight_decay('weights',\n shape=[5, 5, 64, 64],\n stddev=5e-2,\n wd=None)\n conv = tf.nn.conv2d(norm1, kernel, [1, 1, 1, 1], padding='SAME')\n biases = _create_variable('biases', [64], tf.constant_initializer(0.1))\n pre_activation = tf.nn.bias_add(conv, biases)\n conv2 = tf.nn.relu(pre_activation, name=scope.name)\n\n # norm2\n norm2 = tf.nn.lrn(conv2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,\n name='norm2')\n # pool2\n pool2 = tf.nn.max_pool(norm2, ksize=[1, 3, 3, 1],\n strides=[1, 2, 2, 1], padding='SAME', name='pool2')\n\n # local3\n with tf.variable_scope('local3') as scope:\n # Move everything into depth so we can perform a single matrix multiply.\n reshape = tf.reshape(pool2, [images.get_shape().as_list()[0], -1])\n dim = reshape.get_shape()[1].value\n weights = _variable_with_weight_decay('weights', shape=[dim, 384],\n stddev=0.04, wd=0.004)\n biases = _create_variable('biases', [384], tf.constant_initializer(0.1))\n local3 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name)\n\n # local4\n with tf.variable_scope('local4') as scope:\n weights = _variable_with_weight_decay('weights', shape=[384, 192],\n stddev=0.04, wd=0.004)\n biases = _create_variable('biases', [192], tf.constant_initializer(0.1))\n local4 = tf.nn.relu(tf.matmul(local3, weights) + biases, name=scope.name)\n\n # linear layer(WX + b),\n # We don't apply softmax here because\n # tf.nn.sparse_softmax_cross_entropy_with_logits accepts the unscaled logits\n # and performs the softmax internally for efficiency.\n with tf.variable_scope('softmax_linear') as scope:\n weights = _variable_with_weight_decay('weights', [192, NUM_CLASSES],\n stddev=1 / 192.0, wd=None)\n biases = _create_variable('biases', [NUM_CLASSES],\n tf.constant_initializer(0.0))\n softmax_linear = tf.add(tf.matmul(local4, weights), biases, name=scope.name)\n\n return softmax_linear", "def inference(images):\n # We instantiate all variables using tf.get_variable() instead of\n # tf.Variable() in order to share variables across multiple GPU training runs.\n # If we only ran this model on a single GPU, we could simplify this function\n # by replacing all instances of tf.get_variable() with tf.Variable().\n #\n # conv1\n with tf.variable_scope('conv1') as scope:\n kernel = _variable_with_weight_decay('weights',\n shape=[5, 5, 3, 64],\n stddev=5e-2,\n wd=0.0)\n conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME')\n biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.0))\n pre_activation = tf.nn.bias_add(conv, biases)\n conv1 = tf.nn.relu(pre_activation, name=scope.name)\n _activation_summary(conv1)\n\n # pool1\n pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],\n padding='SAME', name='pool1')\n # norm1\n norm1 = tf.nn.lrn(pool1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,\n name='norm1')\n\n # conv2\n with tf.variable_scope('conv2') as scope:\n kernel = _variable_with_weight_decay('weights',\n shape=[5, 5, 64, 64],\n stddev=5e-2,\n wd=0.0)\n conv = tf.nn.conv2d(norm1, kernel, [1, 1, 1, 1], padding='SAME')\n biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.1))\n pre_activation = tf.nn.bias_add(conv, biases)\n conv2 = tf.nn.relu(pre_activation, name=scope.name)\n _activation_summary(conv2)\n\n # norm2\n norm2 = tf.nn.lrn(conv2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,\n name='norm2')\n # pool2\n pool2 = tf.nn.max_pool(norm2, ksize=[1, 3, 3, 1],\n strides=[1, 2, 2, 1], padding='SAME', name='pool2')\n\n # local3\n with tf.variable_scope('local3') as scope:\n # Move everything into depth so we can perform a single matrix multiply.\n reshape = tf.reshape(pool2, [FLAGS.batch_size, -1])\n dim = reshape.get_shape()[1].value\n weights = _variable_with_weight_decay('weights', shape=[dim, 384],\n stddev=0.04, wd=0.004)\n biases = _variable_on_cpu('biases', [384], tf.constant_initializer(0.1))\n local3 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name)\n _activation_summary(local3)\n\n # local4\n with tf.variable_scope('local4') as scope:\n weights = _variable_with_weight_decay('weights', shape=[384, 192],\n stddev=0.04, wd=0.004)\n biases = _variable_on_cpu('biases', [192], tf.constant_initializer(0.1))\n local4 = tf.nn.relu(tf.matmul(local3, weights) + biases, name=scope.name)\n _activation_summary(local4)\n\n # linear layer(WX + b),\n # We don't apply softmax here because\n # tf.nn.sparse_softmax_cross_entropy_with_logits accepts the unscaled logits\n # and performs the softmax internally for efficiency.\n with tf.variable_scope('softmax_linear') as scope:\n weights = _variable_with_weight_decay('weights', [192, NUM_CLASSES],\n stddev=1/192.0, wd=0.0)\n biases = _variable_on_cpu('biases', [NUM_CLASSES],\n tf.constant_initializer(0.0))\n softmax_linear = tf.add(tf.matmul(local4, weights), biases, name=scope.name)\n _activation_summary(softmax_linear)\n\n return softmax_linear", "def image_inference(self, model_name: str, input_data):\n exec_net, image_input, image_info_input, (n, c, h, w), postprocessor = self.model_loading.load_model(model_name)\n cap, visualizer, tracker, presenter = self.image_visualizer.visualizer(input_data,model_name)\n\n while cap.isOpened():\n ret, frame = cap.read()\n if not ret:\n break\n # Resize the image to keep the same aspect ratio and to fit it to a window of a target size.\n scale_x = scale_y = min(h / frame.shape[0], w / frame.shape[1])\n input_image = cv2.resize(frame, None, fx=scale_x, fy=scale_y)\n\n input_image_size = input_image.shape[:2]\n input_image = np.pad(input_image, ((0, h - input_image_size[0]),\n (0, w - input_image_size[1]),\n (0, 0)),\n mode='constant', constant_values=0)\n # Change data layout from HWC to CHW.\n input_image = input_image.transpose((2, 0, 1))\n input_image = input_image.reshape((n, c, h, w)).astype(np.float32)\n input_image_info = np.asarray([[input_image_size[0], input_image_size[1], 1]], dtype=np.float32)\n # Run the net.\n feed_dict = {image_input: input_image}\n if image_info_input:\n feed_dict[image_info_input] = input_image_info\n outputs = exec_net.infer(feed_dict)\n # Parse detection results of the current request\n scores, classes, boxes, masks = postprocessor(\n outputs, scale_x, scale_y, *frame.shape[:2], h, w, 0.5)\n os.remove(input_data.filename)\n class_labels = self.fetch_labels.get_labels(model_name)\n\n t = 0\n for key2 in [class_labels[i] for i in classes]:\n x1 = str(boxes[t][0])\n y1 = str(boxes[t][1])\n x2 = str(boxes[t][2])\n y2 = str(boxes[t][3])\n\n if key2 in self.prediction.keys():\n value_init = self.prediction.get(key2)\n self.prediction[key2] = x1, y1, x2, y2\n value = value_init, self.prediction.get(key2)\n self.prediction[key2] = value\n\n else:\n self.prediction[key2] = x1, y1, x2, y2\n\n t = t + 1\n\n with open('./final_json.json', 'w') as file:\n json.dump(self.prediction, file)\n\n with open('./final_json.json','r') as file:\n json_object = json.load(file)\n\n return json_object\n cv2.destroyAllWindows()\n cap.release()", "def run_style_predict(self, style_image):\n # The style image has to be preprocessed to (1, 256, 256, 3)\n preprocessed_style_image = cv_utils.preprocess(style_image, self.style_predict_executor.get_data_type(),\n self.style_predict_executor.get_shape(), True, keep_aspect_ratio=False)\n # output[0] is the style bottleneck tensor\n style_bottleneck = self.style_predict_executor.run([preprocessed_style_image])[0]\n\n return style_bottleneck", "def train():\n args = arguments_st_train()\n\n h, w = map(int, args.input_size.split(','))\n input_size = (h, w)\n\n \n if args.use_random_seed:\n tf.set_random_seed(args.random_seed)\n \n # Create queue coordinator.\n coord = tf.train.Coordinator()\n \n # Load reader.\n with tf.name_scope(\"create_inputs\"):\n reader = ImageReader(\n args.data_dir,\n args.data_list,\n input_size=input_size,\n random_scale=args.random_scale,\n random_mirror=args.random_mirror,\n random_crop=args.random_crop,\n ignore_label=args.ignore_label,\n img_mean=IMG_MEAN,\n coord=coord,\n task=args.task)\n image_batch, label_batch = reader.dequeue(args.batch_size)\n \n # Create network.\n with slim.arg_scope(vgg_arg_scope(weight_decay=args.weight_decay, use_batch_norm=True, is_training=True)):\n if args.network == 'vgg_16_deeplab_st':\n net, end_points = vgg_16_deeplab_st(image_batch, num_classes=args.num_classes, is_training=True, dropout_keep_prob=args.keep_prob)\n elif args.network == 'vgg_16_shortcut_deeplab_st':\n net, end_points = vgg_16_shortcut_deeplab_st(image_batch, num_classes=args.num_classes, is_training=True, dropout_keep_prob=args.keep_prob)\n else:\n raise Exception('network name is not recognized!')\n \n \n # Predictions.\n raw_output = end_points['vgg_16/fc8_voc12']\n\n # gt labels\n raw_gt = prepare_label(label_batch, tf.stack(raw_output.get_shape()[1:3]), num_classes=args.num_classes,\n one_hot=False, task=args.task) # [batch_size, h, w]\n\n # losses\n if args.task == 'normal':\n loss = get_normal_loss(raw_output, raw_gt, args.num_classes, args.ignore_label) * args.loss_scale\n elif args.task == 'seg':\n loss = get_seg_loss(raw_output, raw_gt, args.num_classes, args.ignore_label) * args.loss_scale\n\n l2_losses = [args.weight_decay * tf.nn.l2_loss(v) for v in tf.trainable_variables() if 'weights' in v.name]\n reduced_loss = tf.reduce_mean(loss) + tf.add_n(l2_losses)\n \n # Image summary for visualisation.\n raw_output_up = tf.image.resize_bilinear(raw_output, tf.shape(image_batch)[1:3,])\n raw_output_up = tf.argmax(raw_output_up, axis=3)\n pred = tf.expand_dims(raw_output_up, dim=3)\n \n images_summary = tf.py_func(inv_preprocess, [image_batch, args.save_num_images, IMG_MEAN], tf.uint8)\n labels_summary = tf.py_func(decode_labels, [label_batch, args.save_num_images, args.num_classes, args.task], tf.uint8)\n preds_summary = tf.py_func(decode_labels, [pred, args.save_num_images, args.num_classes, args.task], tf.uint8)\n \n total_summary = tf.summary.image('images', \n tf.concat(axis=2, values=[images_summary, labels_summary, preds_summary]), \n max_outputs=args.save_num_images) # Concatenate row-wise.\n summary_writer = tf.summary.FileWriter(args.snapshot_dir,\n graph=tf.get_default_graph())\n \n # Define loss and optimisation parameters.\n train_op, step_ph = create_train_ops_st(reduced_loss, args)\n \n # Set up tf session and initialize variables. \n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n sess = tf.Session(config=config)\n init = tf.global_variables_initializer()\n \n sess.run(init)\n\n # Load variables if the checkpoint is provided.\n if args.restore_from is not None:\n load_st(sess, args)\n \n # Saver for storing checkpoints of the model.\n save_op = tf.train.Saver(var_list=tf.global_variables(), max_to_keep=args.max_to_keep)\n \n # Start queue threads.\n threads = tf.train.start_queue_runners(coord=coord, sess=sess)\n\n # Iterate over training steps.\n for step in range(args.num_steps):\n start_time = time.time()\n feed_dict = { step_ph : step }\n \n if step % args.save_pred_every == 0:\n loss_value, images, labels, preds, summary, _ = sess.run([reduced_loss, image_batch, label_batch, pred, total_summary, train_op], feed_dict=feed_dict)\n summary_writer.add_summary(summary, step)\n save(save_op, sess, args.snapshot_dir, step)\n else:\n loss_value, _ = sess.run([reduced_loss, train_op], feed_dict=feed_dict)\n duration = time.time() - start_time\n print('step {:d} \\t loss = {:.3f}, ({:.3f} sec/step)'.format(step, loss_value, duration))\n coord.request_stop()\n coord.join(threads)", "def main():\n args = get_arguments()\n \n # Create queue coordinator.\n coord = tf.train.Coordinator()\n \n # Load reader.\n with tf.name_scope(\"create_inputs\"):\n reader = ImageReader(\n args.data_dir,\n args.data_list,\n None, # No defined input size.\n False, # No random scale.\n False, # No random mirror.\n args.ignore_label,\n IMG_MEAN,\n coord)\n image, label = reader.image, reader.label\n image_batch, label_batch = tf.expand_dims(image, dim=0), tf.expand_dims(label, dim=0) # Add one batch dimension.\n\n # Create network.\n net = DeepLabResNetModel({'data': image_batch}, is_training=False, num_classes=args.num_classes)\n\n # Which variables to load.\n restore_var = tf.global_variables()\n \n # Predictions.\n raw_output = net.layers['fc_out']\n raw_output = tf.image.resize_bilinear(raw_output, tf.shape(image_batch)[1:3,])\n raw_output = tf.argmax(raw_output, dimension=3)\n pred = tf.expand_dims(raw_output, dim=3) # Create 4-d tensor.\n\n # mIoU\n\n pred_flatten = tf.reshape(pred, [-1,])\n gt = tf.reshape(label_batch, [-1,])\n weights = tf.cast(tf.less_equal(gt, args.num_classes - 1), tf.int32) # Ignoring all labels greater than or equal to n_classes.\n mIoU, update_op = tf.contrib.metrics.streaming_mean_iou(predictions=pred_flatten, labels=gt, num_classes=args.num_classes, weights=weights)\n \n # Set up tf session and initialize variables. \n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True \n sess = tf.Session(config=config)\n init = tf.global_variables_initializer()\n \n sess.run(init)\n sess.run(tf.local_variables_initializer())\n \n # Load weights.\n loader = tf.train.Saver(var_list=restore_var)\n\n ckpt = tf.train.get_checkpoint_state(SNAPSHOT_DIR)\n\n if ckpt and ckpt.model_checkpoint_path:\n loader = tf.train.Saver(var_list=restore_var)\n load_step = int(os.path.basename(ckpt.model_checkpoint_path).split('-')[1])\n load(loader, sess, ckpt.model_checkpoint_path)\n else:\n print('No checkpoint file found.')\n load_step = 0\n\n # Start queue threads.\n threads = tf.train.start_queue_runners(coord=coord, sess=sess)\n\n if not os.path.exists(SAVE_DIR):\n os.makedirs(SAVE_DIR)\n\n for step in range(args.num_steps):\n preds, _ = sess.run([pred, update_op])\n\n if IS_SAVE == True:\n msk = decode_labels(preds, num_classes=args.num_classes)\n im = Image.fromarray(msk[0])\n filename = 'mask' + str(step) + '.png'\n im.save(SAVE_DIR + filename)\n\n if step % 10 == 0:\n print('step {0} mIoU: {1}'.format(step, mIoU.eval(session=sess)))\n\n coord.request_stop()\n coord.join(threads)", "def make_tflite_inference(ndvi_img_array, model_interpreter):\n # Get input and output tensors.\n input_details = model_interpreter.get_input_details()\n output_details = model_interpreter.get_output_details()\n\n # Get Input shape\n input_shape = input_details[0]['shape']\n input_data = ndvi_img_array.reshape(input_shape)\n\n model_interpreter.set_tensor(input_details[0]['index'], input_data)\n model_interpreter.invoke()\n\n outputs = []\n\n for tensor in output_details:\n output_data = model_interpreter.get_tensor(tensor['index'])\n outputs.append(output_data[0][0])\n\n prediction = outputs[0]\n\n return prediction", "def create_inference_tasks(task_queue, image_layer_path, convnet_path, \n mask_layer_path, output_layer_path, output_block_start, output_block_size, \n grid_size, patch_size, patch_overlap, cropping_margin_size,\n output_key='output', num_output_channels=3, \n image_mip=1, output_mip=1, mask_mip=3):\n for z in tqdm(range(grid_size[0]), desc='z loop'):\n for y in range(grid_size[1]):\n for x in range(grid_size[2]):\n output_offset = tuple(s+x*b for (s, x, b) in \n zip(output_block_start, (z, y, x), \n output_block_size))\n task = InferenceTask(\n image_layer_path=image_layer_path,\n convnet_path=convnet_path,\n mask_layer_path=mask_layer_path,\n output_layer_path=output_layer_path,\n output_offset=output_offset,\n output_shape=output_block_size,\n patch_size=patch_size, \n patch_overlap=patch_overlap,\n cropping_margin_size=cropping_margin_size,\n output_key=output_key,\n num_output_channels=num_output_channels,\n image_mip=image_mip,\n output_mip=output_mip,\n mask_mip=mask_mip\n )\n task_queue.insert(task)\n task_queue.wait('Uploading InferenceTasks')\n\n vol = CloudVolume(output_layer_path, mip=output_mip)\n vol.provenance.processing.append({\n 'method': {\n 'task': 'InferenceTask',\n 'image_layer_path': image_layer_path,\n 'convnet_path': convnet_path,\n 'mask_layer_path': mask_layer_path,\n 'output_layer_path': output_layer_path,\n 'output_offset': output_offset,\n 'output_shape': output_block_size,\n 'patch_size': patch_size,\n 'patch_overlap': patch_overlap,\n 'cropping_margin_size': cropping_margin_size,\n 'output_key': output_key,\n 'num_output_channels': num_output_channels,\n 'image_mip': image_mip,\n 'output_mip': output_mip,\n 'mask_mip': mask_mip,\n },\n 'by': OPERATOR_CONTACT,\n 'date': strftime('%Y-%m-%d %H:%M %Z'),\n })\n vol.commit_provenance()", "def main():\n args = get_arguments()\n \n # Create queue coordinator.\n coord = tf.train.Coordinator()\n\n # Load reader.\n with tf.name_scope(\"create_inputs\"):\n reader = ImageReader(\n args.data_dir,\n args.data_list,\n None, # No defined input size.\n False, # No random scale.\n False, # No random mirror.\n args.ignore_label,\n IMG_MEAN,\n coord)\n image, label = reader.image, reader.label\n image_batch, label_batch = tf.expand_dims(image, dim=0), tf.expand_dims(label, dim=0) # Add one batch dimension.\n\n # Create network.\n net = DeepLabResNetModel({'data': image_batch}, is_training=False, num_classes=args.num_classes)\n\n # Which variables to load.\n restore_var = tf.global_variables()\n\n # Predictions.\n raw_output = net.layers['fc1_voc12']\n raw_output = tf.image.resize_bilinear(raw_output, tf.shape(image_batch)[1:3,])\n #raw_output = tf.argmax(raw_output, dimension=3)\n #pred = tf.expand_dims(raw_output, dim=3) # Create 4-d tensor.\n pred = raw_output\n\n # Set up TF session and initialize variables.\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n sess = tf.Session(config=config)\n init = tf.global_variables_initializer()\n\n sess.run(init)\n sess.run(tf.local_variables_initializer())\n\n # Load weights.\n loader = tf.train.Saver(var_list=restore_var)\n if args.restore_from is not None:\n load(loader, sess, args.restore_from)\n\n # Start queue threads.\n threads = tf.train.start_queue_runners(coord=coord, sess=sess)\n\n with open(args.data_list) as f:\n content = f.readlines()\n\n content = [x.strip() for x in content]\n\n\n \n for index, value in enumerate(content):\n print(\"outputting \"+str(index))\n \timg = tf.image.decode_png(tf.read_file(value.split()[0]), channels=3)\n raw_img = misc.imread(value.split()[0])\n print(type(raw_img))\n \t# Convert RGB to BGR.\n \timg_r, img_g, img_b = tf.split(axis=2, num_or_size_splits=3, value=img)\n \timg = tf.cast(tf.concat(axis=2, values=[img_b, img_g, img_r]), dtype=tf.float32)\n \t# Extract mean.\n \timg -= IMG_MEAN \n \t# Predictions.\n \traw_output = net.layers['fc1_voc12']\n\n \traw_output_up = tf.image.resize_bilinear(raw_output, tf.shape(img)[0:2,])\n \t#pred = raw_output_up\n probabilities = tf.nn.softmax(raw_output_up)\n pred = tf.argmax(raw_output_up, dimension=3)\n \tpred = tf.expand_dims(pred, dim=3)\n \t# Perform inference.\n \tpreds, probs = sess.run([pred, probabilities])\n print(preds.shape)\n print(probs.shape)\n print(\"probs\")\n print(probs)\n softmax = probs[0, :, :, :]\n print(\"softmax\")\n print(softmax)\n print(softmax.shape)\n print(type(softmax))\n processed_probabilities = softmax.transpose((2, 0, 1))\n print(processed_probabilities.shape)\n print(type(processed_probabilities))\n crf_processed = performCRF(processed_probabilities, raw_img)\n\n im_preds = Image.fromarray(np.uint8(preds[0, :, :, 0]))\n\n print(\"preds shape\", preds.shape)\n \tmsk = decode_labels(preds, num_classes=args.num_classes)\n \tim = Image.fromarray(msk[0])\n\n print(\"crf_processed shape\", crf_processed.shape)\n crf_processed = crf_processed.reshape(1, crf_processed.shape[0], crf_processed.shape[1], 1)\n msk_crf = decode_labels(crf_processed, num_classes=args.num_classes)\n im_crf = Image.fromarray(msk_crf[0])\n\n \tif not os.path.exists(args.save_dir):\n os.makedirs(args.save_dir)\n #im_preds.save(args.save_dir +str(index).zfill(8) +'_predlabels_'+args.train_set+'.png')\n \tim.save(args.save_dir +str(index).zfill(8) +'_pred_'+args.train_set+'.png')\n im_crf.save(args.save_dir +str(index).zfill(8) +'_predcrf_'+args.train_set+'.png')", "def inference():\n inf_dataset = dataset\n net.eval()\n frames_gen, frame_cnt, rel_props, prop_ticks, prop_scaling = inf_dataset[index]\n \n num_crop = args.test_crops\n length = 3\n if args.modality == 'Flow':\n length = 10\n elif args.modality == 'RGBDiff':\n length = 18\n \n # First get the base_out outputs\n base_output = torch.autograd.Variable(torch.zeros((num_crop, frame_cnt, base_out_dim)).cuda(),\n volatile=True)\n cnt = 0\n for frames in frames_gen:\n # frames.shape == [frame_batch_size * num_crops * 3, 224, 224]\n # frame_batch_size is 4 by default\n input_var = torch.autograd.Variable(frames.view(-1, length, frames.size(-2), frames.size(-1)).cuda(),\n volatile=True)\n base_out = net(input_var, None, None, None, None)\n bsc = base_out.view(num_crop, -1, base_out_dim)\n base_output[:, cnt:cnt+bsc.size(1), :] = bsc\n cnt += bsc.size(1)\n\n n_frames = base_output.size(1)\n assert frame_cnt == n_frames\n # GLCU\n step_features = base_output.mean(dim=0).mean(dim=0).unsqueeze(0)\n gate, glcu_task_pred = net.glcu(step_features)\n glcu_task_pred = F.softmax(glcu_task_pred.squeeze(), dim=0).data.cpu().numpy()\n gate = gate.repeat(1, num_crop * n_frames).view(num_crop, n_frames, base_out_dim)\n if net.additive_glcu:\n base_output = base_output + gate\n else:\n base_output = base_output * gate\n\n # output.shape == [num_frames, 7791]\n output = torch.zeros((frame_cnt, output_dim)).cuda()\n cnt = 0\n for i in range(0, frame_cnt, 4):\n base_out = base_output[:, i:i+4, :].contiguous().view(-1, base_out_dim)\n rst = net.test_fc(base_out)\n sc = rst.data.view(num_crop, -1, output_dim).mean(dim=0)\n output[cnt: cnt + sc.size(0), :] = sc\n cnt += sc.size(0)\n base_output = base_output.mean(dim=0).data\n\n # act_scores.shape == [num_proposals, K+1]\n # comp_scores.shape == [num_proposals, K]\n act_scores, comp_scores, reg_scores = reorg_stpp.forward(output, prop_ticks, prop_scaling)\n act_scores = torch.autograd.Variable(act_scores, volatile=True)\n comp_scores = torch.autograd.Variable(comp_scores, volatile=True)\n\n # Task Head\n combined_scores = F.softmax(act_scores[:, 1:], dim=1) * torch.exp(comp_scores)\n combined_scores = combined_scores.mean(dim=0).unsqueeze(0)\n task_pred = F.softmax(net.task_head(combined_scores).squeeze(), dim=0).data.cpu().numpy()\n\n act_scores = act_scores.data\n comp_scores = comp_scores.data\n\n if reg_scores is not None:\n reg_scores = reg_scores.view(-1, num_class, 2)\n reg_scores[:, :, 0] = reg_scores[:, :, 0] * stats[1, 0] + stats[0, 0]\n reg_scores[:, :, 1] = reg_scores[:, :, 1] * stats[1, 1] + stats[0, 1]\n\n torch.cuda.empty_cache() # To empty the cache from previous iterations\n\n # perform stpp on scores\n return ((inf_dataset.video_list[index].id,\n (rel_props.numpy(), act_scores.cpu().numpy(), comp_scores.cpu().numpy(), reg_scores.cpu().numpy(), \n glcu_task_pred, task_pred),\n output.cpu().numpy(),\n base_output.cpu().numpy()))", "def create_training_args(self, input_dict: Dict[Text, List[types.Artifact]],\n output_dict: Dict[Text, List[types.Artifact]],\n exec_properties: Dict[Text, Any],\n executor_class_path: Text,\n training_inputs: Dict[Text, Any],\n job_id: Optional[Text]) -> Dict[Text, Any]:\n training_inputs = training_inputs.copy()\n\n json_inputs = artifact_utils.jsonify_artifact_dict(input_dict)\n logging.info('json_inputs=\\'%s\\'.', json_inputs)\n json_outputs = artifact_utils.jsonify_artifact_dict(output_dict)\n logging.info('json_outputs=\\'%s\\'.', json_outputs)\n json_exec_properties = json.dumps(exec_properties, sort_keys=True)\n logging.info('json_exec_properties=\\'%s\\'.', json_exec_properties)\n\n # We use custom containers to launch training on AI Platform (unified),\n # which invokes the specified image using the container's entrypoint. The\n # default entrypoint for TFX containers is to call scripts/run_executor.py.\n # The arguments below are passed to this run_executor entry to run the\n # executor specified in `executor_class_path`.\n container_command = _CONTAINER_COMMAND + [\n '--executor_class_path',\n executor_class_path,\n '--inputs',\n json_inputs,\n '--outputs',\n json_outputs,\n '--exec-properties',\n json_exec_properties,\n ]\n\n if not training_inputs.get('worker_pool_specs'):\n training_inputs['worker_pool_specs'] = [{}]\n\n for worker_pool_spec in training_inputs['worker_pool_specs']:\n if not worker_pool_spec.get('container_spec'):\n worker_pool_spec['container_spec'] = {\n 'image_uri': _TFX_IMAGE,\n }\n\n # Always use our own entrypoint instead of relying on container default.\n if 'command' in worker_pool_spec['container_spec']:\n logging.warn('Overriding custom value of container_spec.command')\n worker_pool_spec['container_spec']['command'] = container_command\n\n # Pop project_id so AIP doesn't complain about an unexpected parameter.\n # It's been a stowaway in aip_args and has finally reached its destination.\n project = training_inputs.pop('project')\n with telemetry_utils.scoped_labels(\n {telemetry_utils.LABEL_TFX_EXECUTOR: executor_class_path}):\n job_labels = telemetry_utils.get_labels_dict()\n\n # 'tfx_YYYYmmddHHMMSS' is the default job display name if not explicitly\n # specified.\n job_id = job_id or 'tfx_{}'.format(\n datetime.datetime.now().strftime('%Y%m%d%H%M%S'))\n\n training_args = {\n 'job_id': job_id,\n 'project': project,\n 'training_input': training_inputs,\n 'job_labels': job_labels\n }\n\n return training_args", "def main(model_arch: str, images: List, batch_size: int,\n batches_per_step: int, loop: bool, num_iterations: int, num_ipus: int, mode: str, data: str,\n available_memory_proportion: float, gen_report: bool, save_graph_pb: bool, use_ipu_model: bool) -> None:\n\n if (available_memory_proportion <= 0.05) or (available_memory_proportion > 1):\n raise ValueError('Invalid \"availableMemoryProportion\" value: must be a float >=0.05'\n ' and <=1 (default value is 0.6)')\n\n if \"TF_POPLAR_FLAGS\" in os.environ:\n os.environ[\"TF_POPLAR_FLAGS\"] += \" --log_cycle_count=0\"\n else:\n os.environ[\"TF_POPLAR_FLAGS\"] = \"--log_cycle_count=0\"\n\n if data == \"synthetic\":\n if \"TF_POPLAR_FLAGS\" in os.environ:\n os.environ[\"TF_POPLAR_FLAGS\"] += \" --use_synthetic_data --synthetic_data_initializer=random\"\n else:\n os.environ[\"TF_POPLAR_FLAGS\"] = \"--use_synthetic_data --synthetic_data_initializer=random\"\n else:\n os.environ[\"TF_POPLAR_FLAGS\"] = \"\"\n\n if use_ipu_model:\n if \"TF_POPLAR_FLAGS\" in os.environ:\n os.environ[\"TF_POPLAR_FLAGS\"] += \" --use_ipu_model\"\n else:\n os.environ[\"TF_POPLAR_FLAGS\"] = \"--use_ipu_model\"\n\n # Select model architecture\n model_cls = model_dict[model_arch]\n if model_arch == 'googlenet':\n model_arch = 'inceptionv1'\n config = Path(f'configs/{model_arch}.yml')\n\n # Create graph and data iterator\n loop_op, infeed_initializer, outfeed_op = construct_graph(model_cls, config,\n f\"./checkpoints/{model_arch}/\",\n batch_size, batches_per_step,\n images, loop,\n model_cls.preprocess_method(), num_ipus,\n mode, save_graph_pb)\n # Run on model or device\n if gen_report:\n get_report(loop_op, infeed_initializer, outfeed_op, f\"{config.stem}_report.txt\",\n available_memory_proportion=available_memory_proportion)\n else:\n ground_truth = tuple([Path(filename).stem for filename in images])\n run_inference(loop_op, infeed_initializer, outfeed_op, batch_size, batches_per_step, config.stem,\n model_cls.decode_method(), ground_truth, num_iterations, num_ipus, mode, data,\n available_memory_proportion=available_memory_proportion)", "def main():\n args = get_arguments()\n \n # Create queue coordinator.\n coord = tf.train.Coordinator()\n \n # Load reader.\n with tf.name_scope(\"create_inputs\"):\n reader = ImageReader_MultiClass_Loss(\n args.data_dir,\n args.data_list,\n None, # No defined input size.\n RANDOM_SEED,\n False, # No random scale.\n False, # No random mirror.\n coord)\n image, l2_catg, binary_catg, hinge_catg = reader.image, reader.l2_catg, reader.binary_catg, reader.hinge_catg\n image_batch = tf.expand_dims(image, dim=0)\n binary_catg_batch = tf.expand_dims(binary_catg, dim=0)\n\n # Create network.\n net = DeepLabResNetModel({'data': image_batch}, is_training=False)\n\n # Which variables to load.\n restore_var = tf.global_variables()\n \n # Predictions.\n raw_output = net.layers['fc1_voc12']\n\n # Do the global average pooling\n raw_output_bcgd_rmvd = raw_output[:,:,:,1:]\n g_avg_pool = tf.reduce_mean(tf.reduce_mean(raw_output_bcgd_rmvd, axis=1, keep_dims=True),\\\n axis=2, keep_dims=True) # Avg across the width and height dimension -> [Bx21]\n g_avg_pool_sqzd = tf.squeeze(g_avg_pool, axis=[1, 2])\n pred = tf.nn.softmax(g_avg_pool_sqzd)\n\n # Get the class activation map\n raw_output_up = tf.image.resize_bilinear(raw_output_bcgd_rmvd, tf.shape(image_batch)[1:3,])\n raw_output_up = raw_output_up - tf.reduce_min(tf.reduce_min(raw_output_up, axis=1, keep_dims=True), axis=2, keep_dims=True) + EPSILON\n raw_output_up = raw_output_up / tf.reduce_max(tf.reduce_max(raw_output_up, axis=1, keep_dims=True), axis=2, keep_dims=True)\n cam_m_1 = tf.argmax(raw_output_up, dimension=3) + 1\n raw_output_catgs_rmvd = raw_output_up * tf.expand_dims(tf.expand_dims(binary_catg_batch, 1), 2)\n cam_m_2 = tf.argmax(raw_output_catgs_rmvd, dimension=3) + 1\n cam = tf.cast(tf.equal(cam_m_1, cam_m_2), tf.int64) * cam_m_1\n\n cam_batch = tf.expand_dims(cam, dim=3)\n\n # Set up tf session and initialize variables. \n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n sess = tf.Session(config=config)\n init = tf.global_variables_initializer()\n \n sess.run(init)\n sess.run(tf.local_variables_initializer())\n \n # Load weights.\n loader = tf.train.Saver(var_list=restore_var)\n if args.restore_from is not None:\n load(loader, sess, args.restore_from)\n \n # Start queue threads.\n threads = tf.train.start_queue_runners(coord=coord, sess=sess)\n \n # Iterate over training steps.\n for step in range(args.num_steps):\n preds, images, cams, bin_catg = sess.run([pred, image_batch, cam_batch, binary_catg])\n \"\"\"\n print(bin_catg)\n print(np.unique(np.unique(cams)))\n \"\"\"\n img = inv_preprocess(images)\n attMap = decode_labels(cams)\n output_dir = './output_maps_binary_without_norm/'\n img_name = output_dir + str(step) + '.jpg'\n map_name = output_dir + str(step) + '.png'\n misc.imsave(img_name, img[0,:,:,:])\n misc.imsave(map_name, attMap[0,:,:,:])\n coord.request_stop()\n coord.join(threads)", "def infer_on_stream(args, client):\n # Initialise the class\n infer_network = Network()\n # Set Probability threshold for detections\n prob_threshold = args.prob_threshold\n\n ### TODO: Load the model through `infer_network` ###\n infer_network.exec_network = infer_network.load_model\\\n (args.model, args.device, args.cpu_extension)\n # extract information about model input layer\n (b, c, input_height, input_width) = infer_network.get_input_shape()\n\n ### TODO: Handle the input stream ###\n # extenstion of input file\n input_extension = os.path.splitext(args.input)[1].lower()\n supported_vid_exts = ['.mp4', '.mpeg', '.avi', '.mkv']\n supported_img_exts = [\".bmp\",\".dib\", \".jpeg\", \".jp2\", \".jpg\", \".jpe\",\\\n \".png\", \".pbm\", \".pgm\", \".ppm\", \".sr\", \".ras\", \".tiff\", \".tif\"]\n single_image_mode = False\n # if input is camera\n if args.input.upper() == 'CAM':\n capture = cv2.VideoCapture(0)\n \n # if input is video\n elif input_extension in supported_vid_exts:\n capture = cv2.VideoCapture(args.input)\n \n # if input is image\n elif input_extension in supported_img_exts:\n single_image_mode = True\n capture = cv2.VideoCapture(args.input) \n capture.open(args.input)\n else:\n sys.exit(\"FATAL ERROR : The format of your input file is not supported\" \\\n \"\\nsupported extensions are : \" + \", \".join(supported_exts))\n prev_count = 0\n total_persons = 0\n ### TODO: Loop until stream is over ###\n while (capture.isOpened()):\n ### TODO: Read from the video capture ###\n ret, frame = capture.read()\n if not ret:\n break\n ### TODO: Pre-process the image as needed ###\n image = preprocessing(frame, input_width, input_height)\n ### TODO: Start asynchronous inference for specified request ###\n start_time = time.time()\n # run inference\n infer_network.exec_net(image)\n ### TODO: Wait for the result ###\n if infer_network.wait() == 0:\n infer_time = time.time() - start_time\n ### TODO: Get the results of the inference request ###\n outputs = infer_network.get_output()[0][0]\n ### Take model output and extract number of detections with confidence exceeding threshold\n ### and draw bounding boxes around detections\n out_image, current_count = apply_threshold(outputs, frame, prob_threshold)\n \n # show inference time on image\n cv2.putText(out_image, \"inference time: {:.5f} ms\".format(infer_time), (30, 30),\\\n cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 255, 0), 1)\n \n ### TODO: Extract any desired stats from the results ###\n # when any person exit\n if current_count < prev_count:\n ### Topic \"person/duration\": key of \"duration\" ###\n # send duration to mqtt server client\n client.publish(\"person/duration\", json.dumps({\"duration\": time.time() - p_start}))\n\n # when new person enters\n if current_count > prev_count:\n total_persons += current_count - prev_count\n p_start = time.time()\n \n prev_count = current_count\n \n ### TODO: Calculate and send relevant information on ###\n ### current_count, total_count and duration to the MQTT server ###\n ### Topic \"person\": keys of \"count\" and \"total\" ###\n client.publish(\"person\", json.dumps({\"count\": current_count,\"total\": total_persons}))\n ### TODO: Send the frame to the FFMPEG server ###\n sys.stdout.buffer.write(out_image)\n sys.stdout.buffer.flush()\n ### TODO: Write an output image if `single_image_mode` ###\n if single_image_mode:\n cv2.imwrite(\"output_frame.png\", out_image)\n # release resources\n capture.release()\n cv2.destroyAllWindows()\n client.disconnect()\n del infer_network" ]
[ "0.6420253", "0.5924102", "0.580996", "0.57128006", "0.56789887", "0.5605435", "0.5600299", "0.5599023", "0.5588424", "0.5582421", "0.5561573", "0.549888", "0.5475646", "0.54755354", "0.5463997", "0.5414356", "0.5413791", "0.54131633", "0.5407875", "0.5404318", "0.5400513", "0.53894883", "0.538744", "0.5377591", "0.53697145", "0.53655887", "0.5351366", "0.5330848", "0.5321642", "0.53207666" ]
0.7680035
0
Creates bottleneck tensor for a given style image.
def run_style_predict(self, style_image): # The style image has to be preprocessed to (1, 256, 256, 3) preprocessed_style_image = cv_utils.preprocess(style_image, self.style_predict_executor.get_data_type(), self.style_predict_executor.get_shape(), True, keep_aspect_ratio=False) # output[0] is the style bottleneck tensor style_bottleneck = self.style_predict_executor.run([preprocessed_style_image])[0] return style_bottleneck
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fit_style_transfer(style_image, content_image, style_weight=1e-2, content_weight=1e-4, \n optimizer='adam', epochs=1, steps_per_epoch=1):\n\n images = []\n step = 0\n\n # get the style image features \n style_targets = get_style_image_features(style_image)\n \n # get the content image features\n content_targets = get_content_image_features(content_image)\n\n # initialize the generated image for updates\n generated_image = tf.cast(content_image, dtype=tf.float32)\n generated_image = tf.Variable(generated_image) \n \n # collect the image updates starting from the content image\n images.append(content_image)\n \n for n in range(epochs):\n for m in range(steps_per_epoch):\n step += 1\n \n ### START CODE HERE ###\n # Update the image with the style using the function that you defined\n \n update_image_with_style(image=generated_image,\n style_targets=style_targets,\n content_targets=content_targets,\n style_weight=style_weight,\n content_weight=content_weight,\n optimizer=optimizer)\n\n ### END CODE HERE\n\n print(\".\", end='')\n if (m + 1) % 10 == 0:\n images.append(generated_image)\n \n # display the current stylized image\n clear_output(wait=True)\n display_image = tensor_to_image(generated_image)\n display_fn(display_image)\n\n # append to the image collection for visualization later\n images.append(generated_image)\n print(\"Train step: {}\".format(step))\n \n # convert to uint8 (expected dtype for images with pixels in the range [0,255])\n generated_image = tf.cast(generated_image, dtype=tf.uint8)\n \n return generated_image, images", "def create_img_tensor(img):\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n img_tensor = Variable(normalize(torch.from_numpy(np.transpose(img.astype(np.float32) / 255, (2, 0, 1)))))\n img_tensor = img_tensor.cuda()\n img_tensor = img_tensor.unsqueeze_(0)\n\n return img_tensor", "def get_or_create_bottleneck(sess, image_name, image_folder, bottleneck_dir,\n jpeg_data_tensor, bottleneck_tensor):\n # label_lists = image_lists[label_name]\n # sub_dir = label_lists['dir']\n image_folder_path = os.path.join(bottleneck_dir, image_folder)\n ensure_dir_exists(image_folder_path)\n bottleneck_path = get_bottleneck_path(image_name, image_folder)\n\n if not os.path.exists(bottleneck_path):\n create_bottleneck_file(sess, bottleneck_path, image_name,\n image_folder, jpeg_data_tensor, bottleneck_tensor)\n\n with open(bottleneck_path, 'r') as bottleneck_file:\n bottleneck_string = bottleneck_file.read()\n did_hit_error = False\n\n try:\n bottleneck_values = [float(x) for x in bottleneck_string.split(',')]\n except:\n print(\"Invalid float found, recreating bottleneck\")\n did_hit_error = True\n\n if did_hit_error:\n create_bottleneck_file(sess, bottleneck_path, image_name,\n image_folder, jpeg_data_tensor, bottleneck_tensor)\n\n with open(bottleneck_path, 'r') as bottleneck_file:\n bottleneck_string = bottleneck_file.read()\n\n # Allow exceptions to propagate here, since they shouldn't happen after\n # a fresh creation\n bottleneck_values = [float(x) for x in bottleneck_string.split(',')]\n\n return bottleneck_values", "def stylization(stretched_image, style_image,\r\n\t\talpha = 1.0, style_size = 512, crop_size = 0):\r\n\ttf.reset_default_graph()\r\n\r\n\tassert stretched_image.ndim == 3\r\n\t\r\n\tcp = [\"./models/relu5_1\",\r\n\t\t\t \"./models/relu4_1\",\r\n\t\t \"./models/relu3_1\",\r\n\t\t \"./models/relu2_1\",\r\n\t\t \"./models/relu1_1\"]\r\n\trelu_targets = [\"relu5_1\", \"relu4_1\", \"relu3_1\", \"relu2_1\", \"relu1_1\"]\r\n\t\t#*****************\r\n\t\t## need to modify checkpoints, relu_targets, and vgg_path\r\n\twct_model = WCT(checkpoints=cp,\r\n\t\t relu_targets=relu_targets,\r\n\t\t vgg_path='./models/vgg_normalised.t7'\r\n\r\n\t\t )\r\n\r\n\r\n\tfor style_fullpath in style_image:\r\n\t\tstyle_prefix, style_ext = os.path.splitext(style_fullpath)\r\n\t\tstyle_prefix = os.path.basename(style_prefix) # Extract filename prefix without ext\r\n\r\n\t\tstyle_img = skimage.io.imread(style_fullpath)\r\n\r\n\t\tif style_size > 0:\r\n\t\t\tstyle_img = resize_to(style_img, style_size)\r\n\t\tif crop_size > 0:\r\n\t\t\tstyle_img = center_crop(style_img, crop_size)\r\n\r\n\t\t\"\"\"\r\n\t if keep_colors:\r\n\t style_img = preserve_colors_np(style_img, content_img)\r\n\t \"\"\"\r\n\t # Run the frame through the style network\r\n\r\n\t\tstylized_rgb = wct_model.predict(stretched_image, style_img, alpha).astype(\"uint8\")\r\n\r\n\r\n\t ## the stylized_rgb size may not be equal to the original content image size\r\n\t\tstylized_rgb = image_align(stretched_image, stylized_rgb)\r\n\r\n\r\n\treturn stylized_rgb", "def style_transfer(vgg_model, content_tensor, style_tensor,\n prop, device, n_iterations, learning_rate):\n # creating a random image and set requires_grad to True\n target_image = torch.randn_like(content_tensor).requires_grad_(True).to(device)\n # extract content features\n content_features = __get_features(vgg_model, content_tensor)\n # create optimizer to optimize the target image\n optimizer = torch.optim.Adam([target_image], lr=learning_rate)\n for i in range(n_iterations):\n optimizer.zero_grad()\n\n target_features = __get_features(vgg_model, target_image)\n content_loss = __calculate_content_loss(content_features, target_features, \"10\")\n style_loss = __new_style_loss(target_image, style_tensor, prop, device)\n total_loss = content_loss + style_loss\n\n total_loss.backward()\n optimizer.step()\n\n if i % 50 == 0:\n print(\n f\"Iteration {i}, Total Loss: {total_loss.item():.2f}, Content Loss: {content_loss.item():.2f}\"\n f\", Style Loss {style_loss.item():.2f}\")\n\n return target_image", "def CreateTensor(tensor):\n return CreateTensorCC(_stringify_tensor(tensor))", "def style_transfer(content_image, style_image,\n content_layer_ids, style_layer_ids,\n weight_content=1.5, weight_style=10.0,\n weight_denoise=0.3,\n num_iterations=120, step_size=10.0):\n\n # Create an instance of the VGG16-model. This is done\n # in each call of this function, because we will add\n # operations to the graph so it can grow very large\n # and run out of RAM if we keep using the same instance.\n model = vgg16.VGG16()\n\n # Create a TensorFlow-session.\n session = tf.InteractiveSession(graph=model.graph)\n\n # Print the names of the content-layers.\n print(\"Content layers:\")\n print(model.get_layer_names(content_layer_ids))\n print()\n\n # Print the names of the style-layers.\n print(\"Style layers:\")\n print(model.get_layer_names(style_layer_ids))\n print()\n\n # Create the loss-function for the content-layers and -image.\n loss_content = create_content_loss(session=session,\n model=model,\n content_image=content_image,\n layer_ids=content_layer_ids)\n\n # Create the loss-function for the style-layers and -image.\n loss_style = create_style_loss(session=session,\n model=model,\n style_image=style_image,\n layer_ids=style_layer_ids)\n\n # Create the loss-function for the denoising of the mixed-image.\n loss_denoise = create_denoise_loss(model)\n\n # Create TensorFlow variables for adjusting the values of\n # the loss-functions. This is explained below.\n adj_content = tf.Variable(1e-10, name='adj_content')\n adj_style = tf.Variable(1e-10, name='adj_style')\n adj_denoise = tf.Variable(1e-10, name='adj_denoise')\n\n # Initialize the adjustment values for the loss-functions.\n session.run([adj_content.initializer,\n adj_style.initializer,\n adj_denoise.initializer])\n\n # Create TensorFlow operations for updating the adjustment values.\n # These are basically just the reciprocal values of the\n # loss-functions, with a small value 1e-10 added to avoid the\n # possibility of division by zero.\n update_adj_content = adj_content.assign(1.0 / (loss_content + 1e-10))\n update_adj_style = adj_style.assign(1.0 / (loss_style + 1e-10))\n update_adj_denoise = adj_denoise.assign(1.0 / (loss_denoise + 1e-10))\n\n # This is the weighted loss-function that we will minimize\n # below in order to generate the mixed-image.\n # Because we multiply the loss-values with their reciprocal\n # adjustment values, we can use relative weights for the\n # loss-functions that are easier to select, as they are\n # independent of the exact choice of style- and content-layers.\n loss_combined = weight_content * adj_content * loss_content + \\\n weight_style * adj_style * loss_style + \\\n weight_denoise * adj_denoise * loss_denoise\n\n # Use TensorFlow to get the mathematical function for the\n # gradient of the combined loss-function with regard to\n # the input image.\n gradient = tf.gradients(loss_combined, model.input)\n\n # List of tensors that we will run in each optimization iteration.\n run_list = [gradient, update_adj_content, update_adj_style, \\\n update_adj_denoise]\n\n # The mixed-image is initialized with random noise.\n # It is the same size as the content-image.\n mixed_image = np.random.rand(*content_image.shape) + 128\n\n for i in range(num_iterations):\n # Create a feed-dict with the mixed-image.\n feed_dict = model.create_feed_dict(image=mixed_image)\n\n # Use TensorFlow to calculate the value of the\n # gradient, as well as updating the adjustment values.\n grad, adj_content_val, adj_style_val, adj_denoise_val \\\n = session.run(run_list, feed_dict=feed_dict)\n\n # Reduce the dimensionality of the gradient.\n grad = np.squeeze(grad)\n\n # Scale the step-size according to the gradient-values.\n step_size_scaled = step_size / (np.std(grad) + 1e-8)\n\n # Update the image by following the gradient.\n mixed_image -= grad * step_size_scaled\n\n # Ensure the image has valid pixel-values between 0 and 255.\n mixed_image = np.clip(mixed_image, 0.0, 255.0)\n\n # Print a little progress-indicator.\n print(\". \", end=\"\")\n\n # Display status once every 10 iterations, and the last.\n if (i % 10 == 0) or (i == num_iterations - 1):\n print()\n print(\"Iteration:\", i)\n\n # Print adjustment weights for loss-functions.\n msg = \"Weight Adj. for Content: {0:.2e}, Style: {1:.2e}, Denoise: {2:.2e}\"\n print(msg.format(adj_content_val, adj_style_val, adj_denoise_val))\n\n # Plot the content-, style- and mixed-images.\n # plot_images(content_image=content_image,\n # style_image=style_image,\n # mixed_image=mixed_image)\n\n print()\n # print(\"Final image:\")\n # plot_image_big(mixed_image)\n\n # Close the TensorFlow session to release its resources.\n session.close()\n\n # Return the mixed-image.\n return mixed_image", "def tent(target_size: Optional[Tuple[int, int]] = None, rgb: bool = True) -> Tensor:\n return imread(HERE+'tent.jpg', target_size=target_size, rgb=rgb)", "def style_transfer(content_image,\n content_layer_ids,\n num_iterations=120, step_size=10.0):\n\n # Create an instance of the VGG16-model. This is done\n # in each call of this function, because we will add\n # operations to the graph so it can grow very large\n # and run out of RAM if we keep using the same instance.\n model = vgg16.VGG16()\n\n # Create a TensorFlow-session.\n session = tf.InteractiveSession(graph=model.graph)\n\n # Print the names of the content-layers.\n print(\"Content layers:\")\n print(model.get_layer_names(content_layer_ids))\n print()\n\n\n\n # Create the loss-function for the content-layers and -image.\n loss_content = create_content_loss(session=session,\n model=model,\n content_image=content_image,\n layer_ids=content_layer_ids)\n\n\n\n\n\n\n\n # This is the weighted loss-function that we will minimize\n # below in order to generate the mixed-image.\n # Because we multiply the loss-values with their reciprocal\n # adjustment values, we can use relative weights for the\n # loss-functions that are easier to select, as they are\n # independent of the exact choice of style- and content-layers.\n loss_combined = loss_content \n \n\n\n # Use TensorFlow to get the mathematical function for the\n # gradient of the combined loss-function with regard to\n # the input image.\n gradient = tf.gradients(loss_combined, model.input)\n\n # List of tensors that we will run in each optimization iteration.\n run_list = [gradient]\n\n # The mixed-image is initialized with random noise.\n # It is the same size as the content-image.\n mixed_image = np.random.rand(*content_image.shape) + 128\n\n for i in range(num_iterations):\n # Create a feed-dict with the mixed-image.\n feed_dict = model.create_feed_dict(image=mixed_image)\n\n # Use TensorFlow to calculate the value of the\n # gradient, as well as updating the adjustment values.\n grad = session.run(run_list, feed_dict=feed_dict)\n\n # Reduce the dimensionality of the gradient.\n grad = np.squeeze(grad)\n\n # Scale the step-size according to the gradient-values.\n step_size_scaled = step_size / (np.std(grad) + 1e-8)\n\n # Update the image by following the gradient.\n mixed_image -= grad * step_size_scaled\n\n # Ensure the image has valid pixel-values between 0 and 255.\n mixed_image = np.clip(mixed_image, 0.0, 255.0)\n\n # Print a little progress-indicator.\n print(\". \", end=\"\")\n\n # Display status once every 10 iterations, and the last.\n if (i % 10 == 0) or (i == num_iterations - 1):\n print()\n print(\"Iteration:\", i)\n # Plot the content-, style- and mixed-images.\n plot_images(content_image=content_image,\n mixed_image=mixed_image)\n \n print()\n print(\"Final image:\")\n plot_image_big(mixed_image)\n\n # Close the TensorFlow session to release its resources.\n session.close()\n \n # Return the mixed-image.\n return mixed_image", "def run_bottleneck_on_image(sess, image_data, image_data_tensor, bottleneck_tensor):\n bottleneck_values = sess.run(\n bottleneck_tensor,\n {image_data_tensor: image_data})\n bottleneck_values = np.squeeze(bottleneck_values)\n return bottleneck_values", "def create_style_loss(session, model, style_image, layer_ids):\n\n # Create a feed-dict with the style-image.\n feed_dict = model.create_feed_dict(image=style_image)\n\n # Get references to the tensors for the given layers.\n layers = model.get_layer_tensors(layer_ids)\n\n # Set the model's graph as the default so we can add\n # computational nodes to it. It is not always clear\n # when this is necessary in TensorFlow, but if you\n # want to re-use this code then it may be necessary.\n with model.graph.as_default():\n # Construct the TensorFlow-operations for calculating\n # the Gram-matrices for each of the layers.\n gram_layers = [gram_matrix(layer) for layer in layers]\n\n # Calculate the values of those Gram-matrices when\n # feeding the style-image to the model.\n values = session.run(gram_layers, feed_dict=feed_dict)\n\n # Initialize an empty list of loss-functions.\n layer_losses = []\n\n # For each Gram-matrix layer and its corresponding values.\n for value, gram_layer in zip(values, gram_layers):\n # These are the Gram-matrix values that are calculated\n # for this layer in the model when inputting the\n # style-image. Wrap it to ensure it is a const,\n # although this may be done automatically by TensorFlow.\n value_const = tf.constant(value)\n\n # The loss-function for this layer is the\n # Mean Squared Error between the Gram-matrix values\n # for the content- and mixed-images.\n # Note that the mixed-image is not calculated\n # yet, we are merely creating the operations\n # for calculating the MSE between those two.\n loss = mean_squared_error(gram_layer, value_const)\n\n # Add the loss-function for this layer to the\n # list of loss-functions.\n layer_losses.append(loss)\n\n # The combined loss for all layers is just the average.\n # The loss-functions could be weighted differently for\n # each layer. You can try it and see what happens.\n total_loss = tf.reduce_mean(layer_losses)\n\n return total_loss", "def controller(target_size: Optional[Tuple[int, int]] = None, rgb: bool = True) -> Tensor:\n return imread(HERE+'controller.jpg', target_size=target_size, rgb=rgb)", "def run_style_transfer(cnn, normalization, content_img, style_img, input_img, mask_img, num_steps = 3000,\n style_weight = 100, content_weight = 5):\n print('Building the style transfer model..')\n model, style_losses, content_losses = get_style_model_and_losses(cnn, normalization, style_img, content_img, mask_img)\n optimizer = LBFGS([input_img.requires_grad_()], max_iter=num_steps,lr = 1)\n\n print('Optimizing..')\n run = [0]\n def closure():\n optimizer.zero_grad()\n model(input_img)\n style_score = 0\n content_score = 0\n\n for sl in style_losses:\n style_score += sl.loss\n for cl in content_losses:\n content_score += cl.loss\n\n style_score *= style_weight\n content_score *= content_weight\n\n loss = style_score + content_score\n loss.backward()\n\n if run[0] % 100 == 0:\n print(\"run {}:\".format(run))\n print('Style Loss : {} Content Loss: {}'.format(style_score.item(), content_score.item()))\n # print()\n # plt.figure(figsize = (8, 8))\n #imshow(input_img.clone())\n run[0] += 1\n\n return style_score + content_score\n\n optimizer.step(closure)\n\n # a last correction...\n input_img.data.clamp_(0, 1)\n\n return input_img", "def black_cat(target_size: Optional[Tuple[int, int]] = None, rgb: bool = True) -> Tensor:\n return imread(HERE+'black_cat.jpg', target_size=target_size, rgb=rgb)", "def _load_target(self, index: int) -> Tensor:\n path = self.files[index][\"mask\"]\n with Image.open(path) as img:\n array: \"np.typing.NDArray[np.uint8]\" = np.array(img.convert(\"RGB\"))\n array = rgb_to_mask(array, self.colormap)\n tensor = torch.from_numpy(array)\n # Convert from HxWxC to CxHxW\n tensor = tensor.to(torch.long)\n return tensor", "def preprocess(image, gt_image, height, width):\n\n # Convert the image dtypes to tf.float32 if needed\n if image.dtype != tf.float32:\n image = tf.image.convert_image_dtype(image, dtype=tf.float32)\n\n # Convert the image dtypes to tf.int32 if needed\n if gt_image.dtype != tf.int32:\n gt_image = tf.image.convert_image_dtype(gt_image, dtype=tf.int32)\n\n '''# Compute number of pixels needed to pad images\n # in order to respect FCN factor requirement\n top, bottom, left, right = get_paddings(height, width, 32)\n new_height = height + top + bottom\n new_width = width + left + right\n\n # Pad images if necessary\n image = tf.image.resize_image_with_crop_or_pad(image, new_height, new_width)\n gt_image = tf.image.resize_image_with_crop_or_pad(gt_image, new_height, new_width)\n '''\n\n # Subtract off the mean and divide by the variance of the pixels\n image = tf.image.per_image_standardization(image)\n\n # Shape TF tensors\n image.set_shape(shape=(height, width, 3))\n gt_image.set_shape(shape=(height, width, 1))\n\n # Dowscale images to save memory and time ;)\n image = tf.image.resize_images(image, size=(256, 256))\n gt_image = tf.squeeze(tf.image.resize_images(gt_image, size=(256, 256)))\n\n # Perform one-hot-encoding on the ground truth image\n label_ohe = one_hot_encode(gt_image)\n\n return image, label_ohe", "def create_style_loss(session, model, style_image, layer_ids):\n\n # Create a feed-dict with the style-image.\n feed_dict = model.create_feed_dict(image=style_image)\n\n # Get references to the tensors for the given layers.\n layers = model.get_layer_tensors(layer_ids)\n\n # Set the model's graph as the default so we can add\n # computational nodes to it. It is not always clear\n # when this is necessary in TensorFlow, but if you\n # want to re-use this code then it may be necessary.\n with model.graph.as_default():\n # Construct the TensorFlow-operations for calculating\n # the Gram-matrices for each of the layers.\n gram_layers = [gram_matrix(layer) for layer in layers]\n\n # Calculate the values of those Gram-matrices when\n # feeding the style-image to the model.\n values = session.run(gram_layers, feed_dict=feed_dict)\n\n # Initialize an empty list of loss-functions.\n layer_losses = []\n \n # For each Gram-matrix layer and its corresponding values.\n for value, gram_layer in zip(values, gram_layers):\n # These are the Gram-matrix values that are calculated\n # for this layer in the model when inputting the\n # style-image. Wrap it to ensure it is a const,\n # although this may be done automatically by TensorFlow.\n value_const = tf.constant(value)\n\n # The loss-function for this layer is the\n # Mean Squared Error between the Gram-matrix values\n # for the content- and mixed-images.\n # Note that the mixed-image is not calculated\n # yet, we are merely creating the operations\n # for calculating the MSE between those two.\n loss = mean_squared_error(gram_layer, value_const)\n\n # Add the loss-function for this layer to the\n # list of loss-functions.\n layer_losses.append(loss)\n\n # The combined loss for all layers is just the average.\n # The loss-functions could be weighted differently for\n # each layer. You can try it and see what happens.\n total_loss = tf.reduce_mean(layer_losses)\n \n return total_loss", "def run_style_transfer(self, input_img, num_steps=500,\r\n style_weight=100000, content_weight=1):\r\n print('Building the style transfer model..')\r\n model, style_losses, content_losses = self.get_style_model_and_losses()\r\n optimizer = self.get_input_optimizer(input_img)\r\n print('Optimizing..')\r\n run = [0]\r\n while run[0] <= num_steps:\r\n def closure():\r\n input_img.data.clamp_(0, 1)\r\n\r\n optimizer.zero_grad()\r\n\r\n model(input_img)\r\n\r\n style_score = 0\r\n content_score = 0\r\n\r\n for sl in style_losses:\r\n style_score += sl.loss\r\n for cl in content_losses:\r\n content_score += cl.loss\r\n\r\n style_score *= style_weight\r\n content_score *= content_weight\r\n\r\n loss = style_score + content_score\r\n loss.backward()\r\n\r\n run[0] += 1\r\n if run[0] % 50 == 0:\r\n print(\"run {}:\".format(run))\r\n print('Style Loss : {:4f} Content Loss: {:4f}'.format(\r\n style_score.item(), content_score.item()))\r\n print()\r\n\r\n return style_score + content_score\r\n\r\n optimizer.step(closure)\r\n\r\n input_img.data.clamp_(0, 1)\r\n\r\n return input_img", "def _bottleneck(self,\n inputT,\n vanilla_conv,\n conv_params,\n bn,\n name):\n with tf.variable_scope(name) as scope:\n in_c = inputT.get_shape().as_list()[-1]\n\n # Reduce-block\n reduce_out = vanilla_conv(\n inputT,\n name=name+'_reduce',\n **conv_params.reduce)\n\n # Center-block\n center_out = vanilla_conv(\n inputT,\n name=name+'_center',\n **conv_params.center)\n\n # Increase-block\n increase_out = vanilla_conv(\n inputT,\n name=name+'_increase',\n **conv_params.increase)\n\n # Proj-block\n if inc != h_dim:\n skip_out = vanilla_conv(\n inputT,\n name=name+'_proj',\n **conv_params.proj)\n else:\n skip_out = inputT\n\n # Merge-block\n out = increase_out + skip_out\n if bn:\n out = self._batch_norm(out, self.phase_train)\n out = self._relu(out)\n\n return out", "def cache_bottlenecks(sess, image_list, bottleneck_dir, jpeg_data_tensor, bottleneck_tensor):\n how_many_bottlenecks = 0\n ensure_dir_exists(bottleneck_dir)\n\n for image_folder, image, label in image_list:\n get_or_create_bottleneck(sess, image, image_folder, bottleneck_dir,\n jpeg_data_tensor, bottleneck_tensor)\n\n how_many_bottlenecks += 1\n if how_many_bottlenecks % 100 == 0:\n print(str(how_many_bottlenecks) +\n ' bottleneck files created.')", "def loss_vgg(style_images, content_image, output_images, vggfile):\n c_layers = C_WEIGHTS.keys()\n s_layers = S_WEIGHTS.keys()\n vgg16_filters = load_vgg(vggfile)\n vgg16 = nn_build.Network(\n VGG16DESC, 'vgg16', initial=vgg16_filters, process=True)\n c_net = vgg16.layers(content_image, c_layers)\n\n c_loss = 0.\n s_loss = 0.\n tv_loss = 0.\n for style in style_images:\n s_net = vgg16.layers(style_images[style], s_layers)\n o_net = vgg16.layers(output_images[style], set(c_layers+s_layers))\n for layer in c_layers:\n _, h, w, c = c_net[layer].get_shape().as_list()\n c_loss += C_WEIGHTS[layer]*tf.nn.l2_loss(\n o_net[layer]-c_net[layer])/(h*w*c)\n for layer in s_layers:\n bs, _, _, c = o_net[layer].get_shape().as_list()\n s_loss += S_WEIGHTS[layer]*tf.nn.l2_loss(\n Gram(o_net[layer], bs) - Gram(s_net[layer], bs))\n tv_loss += TV_WEIGHTS*(\n tf.nn.l2_loss(output_images[style][:,1:,:,:]\n - output_images[style][:,:-1,:,:])\n + tf.nn.l2_loss(output_images[style][:,:,1:,:]\n - output_images[style][:,:,:-1,:]))\n style_num = len(style_images)\n return c_loss/style_num, s_loss/style_num, tv_loss/style_num", "def run_style_transfer(cnn, normalization_mean, normalization_std,\n args, content_layers_default, style_layers_default, num_steps,\n style_weight, content_weight): # default: style_weight = 1e6, content_weight = 1\n content_img = image_loader(args.content, args.img_size)\n style_img = image_loader(args.style, args.img_size)\n input_img = content_img.clone()\n assert style_img.size() == content_img.size(), \\\n \"we need to import style and content images of the same size\"\n \n logprint('Building the style transfer model..')\n model, style_losses, content_losses = get_style_model_and_losses(cnn,\n normalization_mean, normalization_std, style_img, content_img, \n args, content_layers_default, style_layers_default)\n \n if args.fft:\n input_img = fft_image(input_img.shape).to(device, torch.float) # convert to fft parameterization\n optimizer = get_input_optimizer(input_img)\n \n logprint('Optimizing..')\n run = [0]\n while run[0] <= num_steps:\n def closure():\n input_img.data.clamp_(0, 1) # correct the values of updated input image\n optimizer.zero_grad()\n model(input_img)\n style_score = 0\n content_score = 0\n\n for layer_name, sl in style_losses.items():\n style_score += sl.loss\n if args.plot_feature and run[0] == num_steps: # visualize feature maps at the last iter\n analyze_gram(sl.gram, layer_name) # analyze the gram matrix, like SVD analysis\n visualize_feature_map(sl.feat, layer_id=layer_name, save_dir=logger.gen_img_path, prefix=prefix, ext=args.ext)\n\n for layer_name, cl in style_losses.items():\n content_score += cl.loss\n\n style_score *= style_weight\n content_score *= content_weight\n loss = style_score + content_score\n loss.backward()\n\n run[0] += 1\n if run[0] % 50 == 0:\n logprint(\"run {}:\".format(run))\n logprint('Style Loss : {:4f} Content Loss: {:4f}'.format(style_score.item(), content_score.item()))\n return style_score + content_score\n\n optimizer.step(closure)\n if run[0] % 100 == 0:\n input_img.data.clamp_(0, 1)\n content_name = os.path.split(args.content)[1].split('.')[0] \n style_name = os.path.split(args.style)[1].split('.')[0]\n out_path = \"%s/%s__%s__%s_iter%d.jpg\" % (logger.gen_img_path, content_name, style_name, args.net, run[0])\n vutils.save_image(input_img, out_path)", "def run_style_transfer(self, content_and_style_class,\n num_iterations=3000,\n content_weight=1e-1,\n style_weight=1e2,\n ta_weight=1,\n save=False):\n # trainable to false.\n for layer in self.model.layers:\n layer.trainable = False\n\n # Get the style and content feature representations\n style_features, content_features = self._get_feature_representations(\n content_and_style_class)\n gram_style_features = [self._get_gram_matrix(style_feature)\n for style_feature in style_features]\n\n # Set initial image\n init_image = content_and_style_class.processed_content_image\n init_image = tf.Variable(init_image, dtype=tf.float32)\n # Create our optimizer\n opt = tf.train.AdamOptimizer(\n learning_rate=5, beta1=0.99, epsilon=1e-1)\n\n # Store our best result\n best_loss, best_img = float('inf'), None\n\n # Create a nice config\n loss_weights = (style_weight, content_weight, ta_weight)\n config = {\n 'loss_weights': loss_weights,\n 'init_image': init_image,\n 'gram_style_features': gram_style_features,\n 'content_features': content_features,\n }\n\n # For displaying\n global_start = time.time()\n\n norm_means = np.array([103.939, 116.779, 123.68])\n min_vals = -norm_means\n max_vals = 255 - norm_means\n\n imgs = []\n _, style_tail = os.path.split(\n content_and_style_class.path_to_style_img)\n _, content_tail = os.path.split(\n content_and_style_class.path_to_content_img)\n\n print(\n f\"Initializing Transfer of Style from image: {style_tail} upon \\\n image: {content_tail}\"\n )\n for i in tqdm(range(num_iterations)):\n grads, all_loss = self._compute_gradients(config)\n loss, _, _ = all_loss\n opt.apply_gradients([(grads, init_image)])\n clipped = tf.clip_by_value(init_image, min_vals, max_vals)\n init_image.assign(clipped)\n if loss < best_loss:\n # Update best loss and best image from total loss.\n best_loss = loss\n best_img = content_and_style_class.deprocess_image(\n init_image.numpy())\n if i % 100 == 0:\n imgs.append(content_and_style_class.deprocess_image(\n (init_image.numpy())))\n print('Finished Style Transfer; Total time: {:.4f}s'.format(\n time.time() - global_start))\n if save:\n plt.figure(figsize=(14, 4))\n fig, axes = plt.subplots(num_iterations // 100, 1)\n for i, img in enumerate(imgs):\n axes[i].imshow(img)\n fig.savefig(\"image\")\n fig_best, ax_best = plt.subplots(1, 1)\n ax_best.imshow(best_img)\n fig_best.savefig(\"image_best\")\n return best_img, best_loss", "def CreateTensor(tensor):\n return _C.CreateTensor(_stringify_tensor(tensor))", "def get_style_loss(curr_style,target_style):\n height,width,channels = curr_style.get_shape().as_list()\n normalization = 4.*(channels**2)*(width*height)**2\n gram_style = gram_matrix(curr_style)\n\n return tf.reduce_mean(tf.square(gram_style - target_style))#/normalization", "def __init__(self, hparams):\n # init superclass\n super(FastNeuralStyleSystem, self).__init__()\n self.hparams = hparams\n torch.manual_seed(hparams.seed)\n np.random.seed(hparams.seed)\n\n self.batch_size = hparams.batch_size\n if hparams.model == \"hrnet\":\n self.style_model = HRNet()\n else:\n self.style_model = TransformerNet()\n self.vgg_extractor = Vgg16(requires_grad=False)\n\n self.transform = transforms.Compose([\n transforms.Resize(hparams.image_size),\n transforms.CenterCrop(hparams.image_size),\n transforms.ToTensor(),\n transforms.Lambda(lambda x: x.mul(255))\n ])\n\n self.style_transform = transforms.Compose([\n transforms.Resize(hparams.image_size),\n transforms.CenterCrop(hparams.image_size),\n transforms.ToTensor(),\n transforms.Lambda(lambda x: x.mul(255))\n ])\n\n content_image = utils.load_image(\n self.hparams.content_image, scale=self.hparams.content_scale)\n self.content_image = self.style_transform(content_image)\n\n style = utils.load_image(os.path.join(\n 'images', 'style-images', f'{hparams.style_image}.jpg'), scale=0.5)\n style = self.style_transform(style).requires_grad_(False)\n self.style_image = style.repeat(hparams.batch_size, 1, 1, 1)\n\n self.features_style = self.vgg_extractor(\n utils.normalize_batch(self.style_image))\n self.gram_style = [utils.gram_matrix(y) for y in self.features_style]\n\n # self.temp_dir = f\"{self.hparams.output_dir}/{self.hparams.style_image}_steps_c_{self.hparams.content_weight}_s_{self.hparams.style_weight}\"\n # os.makedirs(self.temp_dir, exist_ok=True)", "def blue_tang(target_size: Optional[Tuple[int, int]] = None, rgb: bool = True) -> Tensor:\n return imread(HERE+'blue_tang.jpg', target_size=target_size, rgb=rgb)", "def run_style_transfer(cnn, normalization_mean, normalization_std,\n content_img, style_img, input_img, num_steps=300,\n style_weight=1000000, content_weight=1):\n model, style_losses, content_losses = get_style_model_and_losses(cnn,\n normalization_mean, normalization_std, style_img, content_img)\n optimizer = get_input_optimizer(input_img)\n run = [0]\n while run[0] < num_steps:\n def closure():\n # correct the values of updated input image\n input_img.data.clamp_(0, 1)\n\n optimizer.zero_grad()\n model(input_img)\n\n style_score = 0\n content_score = 0\n\n for sl in style_losses:\n style_score += sl.loss\n for cl in content_losses:\n content_score += cl.loss\n\n style_score *= style_weight\n content_score *= content_weight\n\n loss = style_score + content_score\n if run[0] < num_steps:\n loss.backward()\n\n run[0] += 1\n if run[0] % 50 == 0:\n print(run[0],\"von\",num_steps)\n return style_score + content_score\n if run[0] < num_steps:\n optimizer.step(closure)\n return input_img.data.clamp_(0, 1)", "def get_image_tensor(img_path):\n img_tensor = path_to_tensor(img_path) / 255.0\n return img_tensor", "def _bottleneck(x: tf.Tensor, depth: int, depth_bottleneck: int, stride: int, rate: int = 1) -> tf.Tensor:\n with tf.variable_scope(None, 'bottleneck_v2', [x]):\n depth_in = slim.utils.last_dimension(x.get_shape(), min_rank=4)\n preact = slim.batch_norm(x, activation_fn=tf.nn.relu, scope='preact')\n if depth == depth_in:\n shortcut = ResNet._pooling(x, stride, 'shortcut')\n else:\n shortcut = slim.conv2d(preact, depth, [1, 1], stride=stride, normalizer_fn=None, activation_fn=None,\n scope='shortcut')\n\n res = slim.conv2d(preact, depth_bottleneck, [1, 1], stride=1, scope='conv1')\n res = ResNet._conv2d_same(res, depth_bottleneck, 3, stride, rate=rate, scope='conv2')\n res = slim.conv2d(res, depth, [1, 1], stride=1, normalizer_fn=None, activation_fn=None, scope='conv3')\n\n return shortcut + res" ]
[ "0.59696984", "0.5862865", "0.5862255", "0.581168", "0.5622455", "0.55933154", "0.559133", "0.5581609", "0.54264534", "0.5406123", "0.54025626", "0.53983897", "0.5380382", "0.5368574", "0.53159803", "0.5289746", "0.52772886", "0.52625686", "0.52574396", "0.5250272", "0.5229245", "0.5208513", "0.51856256", "0.51837605", "0.5166365", "0.5155367", "0.5155253", "0.51545054", "0.5149288", "0.51459956" ]
0.6130885
0
Append a string to the self.message string.
def appendMsg(self, msg): # self.message += msg theTime = self.logger.mytime() # self.message += theTime + " " + str( msg ) self.message = str(self.message) + str(theTime) + " " + str(msg)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rawAppend(self, data):\n self.message = self.message + data", "def massage_addcontent(self) -> str:\n self.message_str += self.content if len(self.content) <= self.notification_message_max_len else self.content[\n :self.notification_message_max_len] + \"...\"", "def massage_addcontent(self) -> str:\n self.message_str += self.content if len(self.content) <= self.notification_message_max_len else self.content[\n :self.notification_message_max_len] + \"...\"", "def with_additional_message(self: _Diagnostic, message: str) -> _Diagnostic:\n if self.additional_message is None:\n self.additional_message = message\n else:\n self.additional_message = f\"{self.additional_message}\\n{message}\"\n return self", "def message(self, message: \"str\"):\n self._attrs[\"message\"] = message", "def characters(self, message):\n self._message = self._message + message", "def message(self, string):\n print (string)", "def add_message(self, msg):\n self.messages.append(msg)", "def _add_message(self, message):\r\n self.result = self.result + message", "def massage_addinfo(self) -> str:\n self.message_str= \"{}, {}\\n\".format(self.sent_by, self.time)", "def message(self, value: str):\n self._properties[\"message\"] = value", "def massage_addinfo(self) -> str:\n self.message_str = f'{self.time}\\n{self.sent_by}\\n'\n return self.message_str", "def log(self, string):\n if self.PRINT:\n print(string)\n sys.stdout.flush()\n self.message_list.append(string)", "def append_message(self, message_object):\n self.messages.append(message_object)", "def add(self, string: str) -> None:\n self._output.append(string)", "def q_send(send, in_string):\n self.message_q.append(in_string)", "def append(self, text):\n self.text += text", "def message(self, message):\n if python_utils.is_string(self._message):\n raise TypeError('self.message must be assigned to exactly once')\n if not python_utils.is_string(message):\n raise TypeError('self.message must be a string')\n if not message:\n raise ValueError('self.message must be a non-empty string')\n model_name, quoted_model_id = self._message\n self._message = '%s in %s(id=%s): %s' % (\n self.__class__.__name__, model_name, quoted_model_id, message)", "def message(self, message: str):\n\n self._message = message", "def message(self, message: \"str\"):\n if message is None:\n raise ValueError(\"Invalid value for `message`, must not be `None`\")\n self._attrs[\"message\"] = message", "def message(self, msg):\n self._message = msg", "def update_message(self, text):\n self.message = text\n if self.verbose:\n print self.message", "def add_message(self, msg):\n msg_string = json.dumps(msg)\n self.redis_client.publish(self.message_channel, msg_string)\n self.redis_client.lpush(self.message_list, msg_string)\n self.redis_client.ltrim(self.message_list, 0,\n app.config[\"MAX_MESSAGES\"]-1)", "def msg(self, msg: str):\n\n self._msg = msg", "def add_external_message(self, message: str):\n self._messages.append(message)", "def add_message(self, message):\n self.message_list.append(message)", "def set_message(self, message):\n if len(message) > globals.MAX_MESSEGE_LENGTH:\n mess = message[0:globals.MAX_MESSEGE_LENGTH-3]+\"...\"\n else:\n mess = message\n self._message.set_message(mess)", "def append(self, message, *tags):\n self._messages.append((message, time.time(), tags))", "def __addmsg(self, msg: str) -> None:\n # region Docstring\n # endregion\n self.record += msg\n self.textbox.kill()\n self.textbox = UITextBox(\n html_text=self.record,\n relative_rect=Rect((0, 0), (self.size[0], self.size[1] - 25)),\n container=self,\n manager=self.ui_manager,\n )", "def AddMessage(self, name, time, message):\n pass" ]
[ "0.69485325", "0.675597", "0.675597", "0.6682592", "0.66123986", "0.6595866", "0.65272105", "0.65110123", "0.65009505", "0.644948", "0.6436071", "0.6379012", "0.6360525", "0.6347536", "0.6322906", "0.62719226", "0.6262143", "0.6243334", "0.62312156", "0.6208512", "0.6207016", "0.62036824", "0.61903566", "0.619034", "0.618701", "0.6171121", "0.6168774", "0.6163711", "0.6146019", "0.6130236" ]
0.71520525
0
Get the NNNN number from the recvData.
def getMsgNumber(self): wData = self.recvData self.logIt(__name__ + ".getMsgNumber(): wData=" + str(wData) + "\n") msgNum = "" msgNum2 = None for i in range(0, len(str(wData))): if wData is None: break if str(wData[i]).isdigit(): msgNum += str(wData[i]) else: break if msgNum != "": msgNum2 = socket.ntohl(int(str(msgNum))) return msgNum2
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_NID():\n return NID", "def n(self) :\n\t\ttry :\n\t\t\treturn self._n\n\t\texcept Exception as e:\n\t\t\traise e", "def getN(self)->int:\n return self.n", "def get_neuron_number(self):\n return self.neuronNumber", "def recvnumber(self):\n\n data = self.recvraw()\n try:\n return int(data)\n except ValueError:\n try:\n return float(data)\n except ValueError:\n return complex(data)", "def readpacket(self, n):\n try:\n msg = self.sock.recv(n)\n except BaseException:\n msg = ''\n return msg", "def getN(self):\r\n return self.N", "def recvn(self, n):\n data = []\n while len(data) != n:\n data.append(self.recv(1))\n\n return b''.join(data)", "def getNumber():", "def _get(self, ndef_message, timeout=1.0):\n if not self.socket:\n try:\n self.connect('urn:nfc:sn:snep')\n except nfc.llcp.ConnectRefused:\n return None\n else:\n self.release_connection = True\n else:\n self.release_connection = False\n try:\n snep_request = b'\\x10\\x01'\n snep_request += struct.pack('>L', 4 + len(str(ndef_message)))\n snep_request += struct.pack('>L', self.acceptable_length)\n snep_request += str(ndef_message)\n if send_request(self.socket, snep_request, self.send_miu):\n response = recv_response(\n self.socket, self.acceptable_length, timeout)\n if response is not None:\n if response[1] != 0x81:\n raise SnepError(response[1])\n return response[6:]\n finally:\n if self.release_connection:\n self.close()", "def n(self):\n return self._n", "def n(self):\n return self._n", "def read(reader: BitStreamReader, _index: int) -> int:\n\n return reader.readVarInt()", "def sendnum(self, n):\n self.sendline(str(n))", "def _nn_read_data(self):\n\t\treaData = True\n\t\tnnIncomingData = False\n\t\tnnData = \"\"\n\t\twhile reaData and self._neuralNetwork.poll()==None:\n\t\t\tnnIncomingMsg = self._neuralNetwork.stdout.readline().rstrip(\"\\n\").split()\n\t\t\tif \"COMM_OUT\" in nnIncomingMsg: nnIncomingData = True\n\t\t\telif \"END\" in nnIncomingMsg: reaData = False\n\t\t\telif nnIncomingData: nnData += \" \".join(nnIncomingMsg)+\"\\n\"\n\t\t\tprint \"\\t\\tNeuron: \"+\" \".join(nnIncomingMsg)\n\t\treturn nnData", "def packet_read(self):\n bytes_received = 0\n \n if self.sock == NC.INVALID_SOCKET:\n return NC.ERR_NO_CONN\n \n if self.in_packet.command == 0:\n ba_data, errnum, errmsg = nyamuk_net.read(self.sock, 1)\n if errnum == 0 and len(ba_data) == 1:\n bytes_received += 1\n byte = ba_data[0]\n self.in_packet.command = byte\n \n if self.as_broker:\n if self.bridge is None and self.state == NC.CS_NEW and (byte & 0xF0) != NC.CMD_CONNECT:\n print \"RETURN ERR_PROTOCOL\"\n return NC.ERR_PROTOCOL, bytes_received\n else:\n if errnum == errno.EAGAIN or errnum == errno.EWOULDBLOCK:\n return NC.ERR_SUCCESS, bytes_received\n elif errnum == 0 and len(ba_data) == 0 or errnum == errno.ECONNRESET:\n return NC.ERR_CONN_LOST, bytes_received\n else:\n evt = event.EventNeterr(errnum, errmsg)\n self.push_event(evt)\n return NC.ERR_UNKNOWN, bytes_received\n \n if not self.in_packet.have_remaining:\n loop_flag = True\n while loop_flag:\n ba_data, errnum, errmsg = nyamuk_net.read(self.sock, 1)\n \n if errnum == 0 and len(ba_data) == 1: \n byte = ba_data[0]\n bytes_received += 1\n self.in_packet.remaining_count += 1\n if self.in_packet.remaining_count > 4:\n return NC.ERR_PROTOCOL, bytes_received\n \n self.in_packet.remaining_length += (byte & 127) * self.in_packet.remaining_mult\n self.in_packet.remaining_mult *= 128\n else:\n if errnum == errno.EAGAIN or errnum == errno.EWOULDBLOCK:\n return NC.ERR_SUCCESS, bytes_received\n elif errnum == 0 and len(ba_data) == 0 or errnum == errno.ECONNRESET:\n return NC.ERR_CONN_LOST, bytes_received\n else:\n evt = event.EventNeterr(errnum, errmsg)\n self.push_event(evt)\n return NC.ERR_UNKNOWN, bytes_received\n \n if (byte & 128) == 0:\n loop_flag = False\n \n if self.in_packet.remaining_length > 0:\n self.in_packet.payload = bytearray(self.in_packet.remaining_length)\n if self.in_packet.payload is None:\n return NC.ERR_NO_MEM, bytes_received\n self.in_packet.to_process = self.in_packet.remaining_length\n \n self.in_packet.have_remaining = True\n \n if self.in_packet.to_process > 0:\n ba_data, errnum, errmsg = nyamuk_net.read(self.sock, self.in_packet.to_process)\n if errnum == 0 and len(ba_data) > 0:\n readlen = len(ba_data)\n bytes_received += readlen\n for idx in xrange(0, readlen):\n self.in_packet.payload[self.in_packet.pos] = ba_data[idx]\n self.in_packet.pos += 1\n self.in_packet.to_process -= 1\n else:\n if errnum == errno.EAGAIN or errnum == errno.EWOULDBLOCK:\n return NC.ERR_SUCCESS, bytes_received\n elif errnum == 0 and len(ba_data) == 0 or errnum == errno.ECONNRESET:\n return NC.ERR_CONN_LOST, bytes_received\n else:\n evt = event.EventNeterr(errnum, errmsg)\n self.push_event(evt)\n return NC.ERR_UNKNOWN, bytes_received\n\n #all data for this packet is read\n self.in_packet.pos = 0\n \n ret = self.packet_handle()\n \n self.in_packet.packet_cleanup()\n \n self.last_msg_in = time.time()\n \n return ret, bytes_received", "def ny(self, n: int) -> float:\n result = self._read_inline(f\"ny({n})\")\n return result", "def __recv__(self):\n data = self.port.read(size=1)\n v = int.from_bytes(data, byteorder=\"little\")\n if(self.verbose):\n pc.color_stdout(\"RED\")\n print(\"<< %s\\t - %s\\t - %d\"% (hex(v),bin(v),v))\n pc.color_stdout(\"RESET\")\n return data", "def read(reader: BitStreamReader, _index: int) -> int:\n\n return reader.readVarInt16()", "def read(self, address):\n self._send_check([72, self._highNib(address), self._lowNib(address)])\n data = self._receive_check(3)\n self._debug_print('read: len(data) = %d (%s)' % (len(data), data))\n value = int(data, base=16)\n self._debug_print('read: value is %d' % value)\n try:\n return value\n except Exception as e:\n print('read: reply unparsable')", "def value(self):\n return self.__n", "def getNL(self):\r\n return self.nL;", "def ny(self):\n return self._ny", "def ny(self):\n return self._ny", "def ny(self):\n return self._ny", "def read(reader: BitStreamReader, _index: int) -> int:\n\n return reader.readVarUInt()", "def read(reader: BitStreamReader, _index: int) -> int:\n\n return reader.readVarInt64()", "def n_value(self) -> int:\n return self.my_n", "def ndnext(self, n: int) -> int:\n result = self._read_inline(f\"ndnext({n})\")\n return int(result)", "def nid(x):\n return x.__array_interface__['data'][0]" ]
[ "0.6444097", "0.61840373", "0.6120752", "0.6090942", "0.6069647", "0.60404944", "0.6036849", "0.60170084", "0.5792893", "0.57422215", "0.56789714", "0.56789714", "0.56575114", "0.5606321", "0.5583389", "0.5574941", "0.55717474", "0.5563853", "0.55533504", "0.55358994", "0.55295044", "0.5519767", "0.5510049", "0.5510049", "0.5510049", "0.5478874", "0.5463591", "0.545386", "0.54511213", "0.54391843" ]
0.66387296
0
Get the file data to be returned to the client. The file is assummed to bi in the current working directory. The file name will be name NNNN.txt where NNNN is the first 4 bytes of the received data. The rest of the received data is ignored.
def getFileData(self): # fileName = "./0000.txt" self.logIt(__name__ + ".getFileData(): data=" + str(self.recvData) + "\n") msgNum = self.getMsgNumber() if msgNum is None: return "" # fileName = "%-04.4d" % (msgNum ) + ".txt" fileName = "./files/" + str(msgNum) self.msgNum = msgNum self.logIt(__name__ + ".getFileData(): fileName=" + fileName + "\n") try: FH = open(fileName, "r") data = FH.read() FH.close() except IOError as inst: self.logIt(__name__ + ".getFileData(): Unable to open " + fileName + " for write." + " => " + str( inst.errno) + ":" + str(inst.strerror) + "\n") raise # Endtry return data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def file_request(self):\n magic_number = int.to_bytes(int(0x497E), 2, byteorder='big') \n type_bytes = int.to_bytes(1, 1, byteorder='big')\n filename_in_bytes = bytes(self.file_name, 'utf-8') #ENCODE FILENAME IN BYTES\n try:\n filename_len = int.to_bytes(len(filename_in_bytes), 2, byteorder='big')\n except OverflowError:\n self.socket.close()\n sys.exit(\"ERROR: Filename is too long, Stack Overflow\") \n \n message_to_send = bytearray(magic_number+type_bytes+filename_len+filename_in_bytes)\n self.socket.send(message_to_send)\n \n #WAIT FOR FILE RESPONSE FROM SERVER\n try:\n self.socket.settimeout(1)\n while True: #LOOP TO RECIEVE DATA IN CHUNKS <= 4096 BYTES\n data = self.socket.recv(4096)\n if data:\n self.data += data\n else:\n break\n\n self.socket.settimeout(None)\n self.socket.close()\n\n except socket.timeout:\n self.socket.close()\n sys.exit(\"ERROR: Connection timed out\")", "def get_file_data(filename):", "def get_file():\n sentence = ARGS.file\n outfile = ARGS.output\n CLIENT_SOCKET.send(\"GET||\".encode() + sentence.encode())\n\n with open(outfile, \"wb\") as file:\n data = CLIENT_SOCKET.recv(1024)\n while data:\n file.write(data)\n data = CLIENT_SOCKET.recv(1024)\n\n CLIENT_SOCKET.close()", "def returnFile(filename):\n filename = secure_filename(filename)\n return send_from_directory(receiverParameters[\"dataFolder\"], filename)", "def get_datafile(file_):\n\n if os.path.exists(file_):\n return file_\n else:\n\n # download file, then return file_ path\n\n (path_, fname) = os.path.split(file_)\n if path_ == '':\n path_ = '.' # relative to current path\n\n try:\n resp = urllib_request.urlopen(urljoin(DATA_SERVER, fname))\n except urllib_request.HTTPError as ex:\n ex.msg = (\"{0}. '{1}' not found on server or server is down\"\n .format(ex.msg, fname))\n raise ex\n\n # # progress bar\n # widgets = [fname + ': ',\n # pb.Percentage(),\n # ' ',\n # pb.Bar(),\n # ' ',\n # pb.ETA(),\n # ' ',\n # pb.FileTransferSpeed(),\n # ]\n\n # pbar = pb.ProgressBar(widgets=widgets,\n # maxval=int(resp.info().getheader('Content-Length'))\n # ).start()\n\n if not os.path.exists(path_):\n os.makedirs(path_)\n\n sz_read = 0\n with open(file_, 'wb') as fh:\n # while sz_read < resp.info().getheader('Content-Length')\n # goes into infinite recursion so break loop for len(data) == 0\n while True:\n data = resp.read(CHUNKSIZE)\n\n if len(data) == 0:\n break\n else:\n fh.write(data)\n sz_read += len(data)\n\n # if sz_read >= CHUNKSIZE:\n # pbar.update(CHUNKSIZE)\n\n # pbar.finish()\n return file_", "def file_data(self):\n return self.read(self.file)", "def get_file(self, msg_parameters):\n reg = self.get_regex_file_name(msg_parameters[0])\n # get all file parts\n result_files = [file_part for file_part in self.files.keys() if reg.search(file_part) is not None]\n # send to server - do not open thread unless there is a file to\n if len(result_files) > 0:\n data_server_command = Queue.Queue() # queue: [msg_type, msg_parameters]\n command_result_data_server = Queue.Queue() # queue: [msg_type, msg_parameters]\n send_file_parts = DataServerMainServer(data_server_command, command_result_data_server, \"\", SERVER_IP,\n msg_parameters[1])\n thread.start_new_thread(send_file_parts.main, ())\n # send all the parts that the data server has\n for file_part in result_files:\n command_result_data_server.put([4, [file_part, self.files[file_part]]])", "def cmd_get(self, msg_dict):\r\n filename = msg_dict[\"filename\"]\r\n filename_abs_path = \"%s\" % msg_dict[\"current_directory\"] + \"/\" + filename\r\n # print(\"in the cmd_get server:\", filename_abs_path)\r\n if os.path.isfile(filename_abs_path):\r\n # print(\"@@@@\")\r\n file_size = os.stat(filename_abs_path).st_size\r\n self.request.send(str(file_size).encode())\r\n reply = self.request.recv(1024)\r\n server_md5 = hashlib.md5()\r\n if reply:\r\n with open(filename_abs_path, 'rb') as f:\r\n for line in f:\r\n self.request.send(line)\r\n server_md5.update(line)\r\n server_md5_value = server_md5.hexdigest()\r\n replay2 = self.request.recv(1024)\r\n # print(\"in the cmd_get server:\", replay2)\r\n self.request.send(server_md5_value.encode())", "def get_file(self, path):\n file = self.get('data_request?id=file&parameters=%s' % path)\n return file", "def download_file():\n data = c.recv(BUFFER)\n \n if data == b\"terminate\":\n print(\"DOWNLOADING FAILED !!!\")\n return\n\n file = open(FILE_NAME,\"wb\")\n while True:\n if data == b\"DONE\":\n break\n \n print(\"Receiving. . . \")\n file.write(data)\n data = c.recv(BUFFER)\n \n file.close()\n print(\"Successfully received!!!\")\n \n print(\"Webpage saved as {} at {}\".format(FILE_NAME, getcwd())) \n return None", "def get_file():\n fname = get_var(request, \"fname\")\n return open(fname).read()", "def get(file: str, addr: tuple) -> bytes:\n assert type(file) == str or type(addr) == tuple, \"Invalid Parameter Types\"\n request = pickle.dumps([\"get\", file])\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect(addr)\n s.sendall(request)\n data = bdtp.new_receive_data_port((\"\", 0))\n data.recv(s)\n data = data.data\n s.close()\n return data", "async def read(self, n: int = -1) -> AnyStr:\n\n # load file\n if len(self._buffer) == 0 and \"r\" in self.mode:\n await self._download()\n\n # check size\n if n == -1:\n data = self._buffer\n self._pos = len(self._buffer) - 1\n else:\n # extract data to read\n data = self._buffer[self._pos : self._pos + n]\n self._pos += n\n\n # return data\n return data", "def request_file(sender_ip, sender_port, file_name):\n # create a new TCP socket.\n h = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n h.connect((sender_ip, sender_port))\n request = file_name + \"\\n\"\n h.send(request.encode())\n f = open(file_name, 'wb')\n l = h.recv(1024)\n while (l):\n f.write(l)\n l = h.recv(1024)\n f.close()\n h.close()", "def request_file(self, file_path_on_server):\n # Create a RRQ\n packet = bytearray()\n packet.append(0)\n packet.append(1)\n name_barr = bytearray(file_path_on_server.encode('ascii'))\n packet += name_barr\n packet.append(0)\n mode = bytearray(\"octet\".encode('ascii'))\n packet += mode\n packet.append(0)\n return packet", "def GetFileName():\r\n d = GetData()\r\n return d.filename", "def get_file(self, filename: str, directory: str = 'gcodes', binary: bool = False) -> str:\n raise NotImplementedError", "def get_file_data(file_name):\r\n try:\r\n with open(file_name, 'rb') as input_file:\r\n data = input_file.read()\r\n return data\r\n except Exception as err:\r\n return str(err).encode()", "def getFile(request,jobId,fileName):\n\tuser = request.user\n\tfileResponse = agaveRequestOutputGet(user,jobId,fileName)\n\tcontent_type = fileResponse.headers['Content-Type']\n\textension = os.path.splitext(fileName)[1]\n\tif extension not in ['png','txt']:\n\t\tcontent_type = 'text/plain'\n\tresponse = HttpResponse(fileResponse.content, content_type=content_type)\n\treturn response", "def getfile(self, name):\n try:\n datname = self.getname(name, \"dat\")\n tadname = self.getname(name, \"tad\")\n if datname and tadname:\n return Datafile(name, open(datname, \"rb\"), open(tadname, \"rb\"), self.kod)\n except IOError:\n return", "def read_data_nmt():\n data_dir = download_extract('fra-eng')\n with open(os.path.join(data_dir, 'fra.txt'), 'r') as f:\n return f.read()", "def read_full_file(self, filename):\n output_file = os.path.join(OUTPUT_PATH, filename)\n data_out = ''\n if os.path.exists(output_file):\n fid = open(output_file, 'rb')\n data_out = fid.read()\n fid.close()\n else:\n print('No file %s', filename)\n return data_out", "def read_file():\n with open(FILE_NAME) as f:\n data = f.read()\n return data", "def do_GET(self):\n self.send_response(200)\n self.send_header(\"Content-type\", self.mimetype)\n self.end_headers()\n\n with open(filename, \"rb\") as file_:\n self.wfile.write(file_.read())\n file_.close()", "def receive_file(self, file_name, length):\n self.client_socket.sendall(\"Ready\".encode(\"utf-8\"))\n print(\"Server ready to accept file: {} from client: {}:{}\".format(\n file_name, self.client_ip, self.client_port))\n\n save_file = open(\"{}\".format(file_name), \"wb\")\n\n amount_recieved_data = 0\n while amount_recieved_data < int(length):\n recv_data = self.client_socket.recv(1024)\n amount_recieved_data += len(recv_data)\n save_file.write(recv_data)\n\n save_file.close()\n\n self.client_socket.sendall(\"Received,{}\".format(\n amount_recieved_data).encode('utf-8'))\n print(\"Server done receiving from client {}:{}. File Saved.\".format(\n self.client_ip, self.client_port))", "def readFile(self, path):\n return self.session.request('diag/files/?q=%s'\n % (path))", "def get_file(filename):\n content = Tree.file_content(filename)\n file_name = \"attachment;filename={0}\".format(filename)\n return Response(content, mimetype=\"text/plain\",\n headers={\"Content-Disposition\": file_name})", "def get_response():\n result = ''\n line = ''\n while line != '\\n':\n result += line\n line = FROMFILE.readline()\n #print(\" I read line:[\"+line+\"]\")\n return result", "def download_file():\n\n if 'POST' == request.method:\n file_id = request.form['file_id']\n else:\n file_id = request.args.get('file_id')\n\n # 1 ==> example_1.tgz\n file_path = file_manager.get_file_path_from_id(file_id)\n print \"serving file: \" + file_path\n return send_file(file_path, as_attachment=True)", "def get_file(self, file_name: str) -> BytesIO:\n fl = BytesIO()\n self.client.download_fileobj(self.bucket, file_name, fl)\n fl.seek(0)\n return fl" ]
[ "0.69599164", "0.68891335", "0.68286395", "0.6660193", "0.65385926", "0.64575404", "0.644558", "0.6357686", "0.6305093", "0.62702876", "0.6221223", "0.61896163", "0.6177835", "0.61637133", "0.61625874", "0.6144655", "0.6137694", "0.6082975", "0.60770166", "0.60584915", "0.6022714", "0.60133624", "0.60121197", "0.59384644", "0.591261", "0.5908519", "0.5904436", "0.59039855", "0.5879855", "0.5874298" ]
0.7877845
0
Update the counts file.
def updateCounts(self): found = False fileName = "counts" if not os.access(fileName, os.F_OK): try: TFH = open(fileName, "w") TFH.close() except IOError as inst: # @UnusedVariable self.logIt(__name__ + ".updateCounts(): Unable to open " + fileName + " for write." + " => " + str( inst.errno) + ":" + str(inst.strerror) + "\n") raise self.logIt(__name__ + ".updateCounts(): fileName=" + fileName + "\n") try: FH = open(fileName, "rb+") # FH = posixfile.open(fileName, "rb+") # posixfile has been deprecated. # FH.lock('w|') data = None while 1: data = str(FH.readline()) if data is None or data == "": break data = re.sub("\n", "", data) self.debug(__name__ + ".updateCounts(): data is " + str(data) + "\n") ms = str(self.msgNum) + "=" self.debug(__name__ + ".updateCounts(): ms is" + str(ms) + "\n") if re.search(ms, data): found = True self.debug(__name__ + ".updateCounts(): DEBUG0.5\n") break self.debug(__name__ + ".updateCounts(): DEBUG1\n") if data and found: self.debug(__name__ + ".updateCounts(): DEBUG2\n") eloc = FH.tell() self.debug(__name__ + ".updateCounts(): eloc=" + str(eloc) + "\n") sloc = eloc - len(data) - 1 self.debug(__name__ + ".updateCounts(): sloc=" + str(sloc) + "\n") FH.seek(sloc, os.SEEK_SET) cloc = FH.tell() self.debug(__name__ + ".updateCounts(): cloc=" + str(cloc) + "\n") myList = list() myList = data.split('=') icount = int(myList[1]) + 1 FH.write(str(self.msgNum) + "=" + str(icount) + "\n") else: self.debug(__name__ + ".updateCounts(): DEBUG3\n") FH.write(str(self.msgNum) + "=1" + "\n") FH.lock('u') FH.close() except IOError as inst: # @UnusedVariable pass # self.logIt( __name__ + ".updateCounts(): Unable to open " + fileName + " for write." + " => " + str( inst.errno ) + ":" + str( inst.strerror ) + "\n" ) # Endtry
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_count(self):\n pass", "def update_count(self):\n count_metrics = self._fetch_count_metrics_and_clear()\n self._logger.info('update_count. count_metrics = %s',\n build_metrics_counter_data(count_metrics))", "def update_count(self):\n pass # Do nothing", "def update_frequencies():\n pass", "def _increment_file_counter(self):\n self._add_to_file_counter(1)", "def _update_counters(self, filepath, step):\n\n counters = {}\n\n # Load\n if os.path.exists(self.counters_file):\n with open(self.counters_file) as f:\n counters = json.load(f)\n\n counters[filepath] = dict(step=step)\n\n # Save\n with open(self.counters_file, \"w\") as f:\n json.dump(counters, f, indent=4)", "def write_counts(self):\n\n self.db_file.write(\n struct.pack(\"<HH\", self.number_of_entries, 0x0040))\n self.db_file.write(\n struct.pack(\"<HH\", self.number_of_genres, 0x0010))\n self.db_file.write(\n struct.pack(\"<HH\", self.number_of_performers, 0x0010))\n self.db_file.write(\n struct.pack(\"<HH\", self.number_of_albums, 0x0010))\n self.db_file.write(\n struct.pack(\"<HH\", self.number_of_playlists, 0x0010))\n self.db_file.write(\n struct.pack(\"<HH\", 0x0001, 0x0014))\n\n self.db_file.write(\n b\"\\x01\\x00\\x02\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x02\\x00\\x00\\x00\\x00\\x00\")\n self.db_file.write(\n b\"\\x00\\x00\\x06\\x00\\x04\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\")", "def _update_count(self):\n self._count = len(self._items)", "def update_usage_stats(self):\n self._usage.increment_usage_stats()", "def _update_cmd_counter(self) -> None:\n msg = '{} documents processed\\r'\n print(msg.format(self._docs_processed), end='\\r')", "def update_freq_dist(filename):\r\n pass", "def update_count(self, source, count):\n if source in self._counts:\n self._total_count -= self._counts[source]\n self._counts[source] = count\n self._total_count += count\n self.change_text()", "def _update(self, count=True, forced=False):", "def _setcounter():\n fname = os.path.basename(camera.status.lastfile)\n tname = fname.split('.')[0]\n i = len(tname)-1\n if i > -1:\n while tname[i].isdigit() and i>-1:\n i = i - 1\n nname = fname[:-4]\n bname = tname[:i+1]\n for file in glob.glob('/data/counters/'+bname+'[0-9][0-9][0-9].cntr'):\n os.remove(file)\n for file in glob.glob('/data/counters/'+bname+'[0-9][0-9][0-9][0-9].cntr'):\n os.remove(file)\n f = open('/data/counters/'+nname+'cntr','w')\n f.close()", "def update_term_count_dataframes(self):\n logging.info('Starting method that updates missing Term COUNT info on disk')\n list_records_no_termcount_data_on_disk = self.__look__missing_termcount_info()\n num_missing_tc_entries = len(list_records_no_termcount_data_on_disk)\n counter = 0\n num_vids_success = 0\n percent_tracker = PercentTracker(num_missing_tc_entries,\n int_output_every_x_percent=1, log_level='info')\n logging.info(str(num_missing_tc_entries) + \" records don't have term-COUNT data on disk.\")\n for a_vid in list_records_no_termcount_data_on_disk:\n execution_should_continue = self.var_mgr.var_retrieve(my_globals.str_execution_may_go_on)\n if not execution_should_continue:\n break\n # we'll only update the timestamp in the SimpleDS if there is an actual change\n # to the transcript. So here, we'll keep the existing timestamp\n timestamp_updated = self.transcripts_ds.fetch_lastupdated(a_vid)\n logging.info('Updating term COUNT for record # ' + str(counter + 1) + ' of ' + str(num_missing_tc_entries))\n transcript = Transcript(a_vid)\n transcript.set_transcript_directory(self.str_path_to_transcripts_files)\n transcript.load_transcript_object_from_dictionary(self.transcripts_ds.fetch_data(a_vid))\n transcript.get_transcript_from_disk()\n transcript.construct_terms_count()\n saved_to_disk_successfully = transcript.save_df_terms_count_2disk()\n dict_for_ds = transcript.dump_transcript_metadata_to_dictionary()\n self.transcripts_ds.update_entry(a_vid, dict_for_ds, timestamp_updated)\n logging.debug('Added (to SimpleDS) the term-COUNT data for entry: ' + a_vid)\n if saved_to_disk_successfully:\n num_vids_success += 1\n counter += 1\n percent_tracker.update_progress(counter, show_time_remaining_estimate=True,\n str_description_to_include_in_logging='Updating Term Count files.')\n logging.info(\"Successfully saved to disk term-count data for \" + str(num_vids_success) + ' records.')\n logging.info(\"Records processed: \" + str(counter))\n self.transcripts_ds.save2disk()", "def incr_counter(self, path):\n res = self.read_counter(path)\n # print 'incr_counter:', path, res, '->', res + 1\n res += 1\n self.cursor.execute('REPLACE INTO counter(fullpath, count) VALUES(?, ?)', (path, res))\n self.conn.commit()\n pass", "def incrementWriteCount(self):\n self.writeCount += 1", "def update(self):\n self.clear()\n self.score += 1\n self.write(f\"Score : {self.score}\",\n align=\"center\", font=(\"Arial Black\", 20))", "def _update_cmd_counter(self) -> None:\n if self._docs_processed == self._upper_bound:\n msg = 'Processing: document {} of {}'\n print(msg.format(self._docs_processed, self._upper_bound))\n else:\n msg = 'Processing: document {} of {}\\r'\n print(msg.format(self._docs_processed, self._upper_bound),\n end='\\r')", "def update_count(count):\n data = None\n\n with open(JSON_FILE) as json_file:\n data = json.load(json_file)\n\n if data is not None:\n data['count'] = count\n\n with open(JSON_FILE, 'w') as json_file:\n json.dump(data, json_file, sort_keys=True, indent=4)", "def updateCounter(self):\n self.counter = self.counter + 1\n self.syncDataStructure[\"+\"][str(self.instanceID)] = self.counter", "def _count_progress(self, count, done=False):\n if self.blank:\n return\n self.current_count = count\n now = time.time()\n if now - self.last_time < 1 and not done:\n return\n self.f.write('\b'*len(str(self.last_count))+str(count))\n self.f.flush()\n self.last_count = count\n self.last_time = now\n if done:\n self.f.write('\\n')\n self.f.flush()", "def update_count(self, source, geometry, count):\n if source in self._counts:\n if geometry in self._counts[source]:\n self._total_count -= self._counts[source][geometry]\n self._counts[source][geometry] = count\n else:\n self._counts[source] = {geometry: count}\n self._total_count += count\n self.change_text()", "def update_count(self):\n try:\n self._thread_pool_executor.submit(self._update_count_fn)\n except:\n self._logger.exception('Exception caught submitting count metrics update task.')", "def inc(self):\n \n self.count += 1", "def UpdateCountsHandler(self):\n\n self.response.out.write('<br/><br/>Updating counts<br/>')\n MAX_COUNT = 200\n changesets = Changeset.all().order('-created_at').fetch(MAX_COUNT)\n\n date_of_first_changeset = changesets[0].created_at.date()\n date_of_last_changeset = changesets[-1].created_at.date()\n\n # if the same day for first and last write MAX_COUNT, skip next steps\n if date_of_last_changeset == date_of_first_changeset:\n update_count(date_of_first_changeset, MAX_COUNT)\n self.response.out.write('MAX_COUNT (%d) in this date (%s)<br/>' %\n (MAX_COUNT, str(date_of_first_changeset)) )\n return\n\n date_last = changesets[0].created_at.date()\n count_last = 0\n\n one_day = timedelta(days=1)\n\n for c in changesets:\n date_current = c.created_at.date()\n if date_current == date_last:\n count_last += 1\n else:\n if date_last - date_current > one_day:\n self.response.out.write('need to iterate between dates<br/>')\n d = date_current + one_day\n # iterate between dates, set counts to 0\n while d < date_last:\n self.response.out.write(str(d) + '<br/>')\n update_count(d, 0)\n d += one_day\n self.response.out.write(str(date_last)+': '+str(count_last)+'<br/>')\n is_new_entry = update_count(date_last, count_last)\n if not is_new_entry:\n self.response.out.write('not new entry<br/>')\n if not date_last == date_of_first_changeset:\n self.response.out.write(\n 'count for %s is already in datastore' % \n str(date_last)\n )\n return\n\n\n date_last = c.created_at.date()\n count_last = 1\n if c.created_at.date() == date_of_last_changeset:\n break\n \n self.response.out.write(str(changesets[0].created_at)+'<br/>')\n self.response.out.write(str(changesets[-1].created_at)+'<br/>')", "def __cross_wiki_counts(self):\n\n print(\"Updating counts by merging with CrossWiki\")\n\n cnt = 0\n crosswiki_path = os.path.join(\n self.base_url, \"generic/p_e_m_data/crosswikis_p_e_m.txt\"\n )\n\n with open(crosswiki_path, \"r\", encoding=\"utf-8\") as f:\n for line in f:\n parts = line.split(\"\\t\")\n mention = unquote(parts[0])\n\n if (\"Wikipedia\" not in mention) and (\"wikipedia\" not in mention):\n if mention not in self.wiki_freq:\n self.wiki_freq[mention] = {}\n\n num_ents = len(parts)\n for i in range(2, num_ents):\n ent_str = parts[i].split(\",\")\n ent_wiki_id = int(ent_str[0])\n freq_ent = int(ent_str[1])\n\n if (\n ent_wiki_id\n not in self.wikipedia.wiki_id_name_map[\"ent_id_to_name\"]\n ):\n ent_name_re = self.wikipedia.wiki_redirect_id(ent_wiki_id)\n if (\n ent_name_re\n in self.wikipedia.wiki_id_name_map[\"ent_name_to_id\"]\n ):\n ent_wiki_id = self.wikipedia.wiki_id_name_map[\n \"ent_name_to_id\"\n ][ent_name_re]\n\n cnt += 1\n if (\n ent_wiki_id\n in self.wikipedia.wiki_id_name_map[\"ent_id_to_name\"]\n ):\n if mention not in self.mention_freq:\n self.mention_freq[mention] = 0\n self.mention_freq[mention] += freq_ent\n\n ent_name = self.wikipedia.wiki_id_name_map[\n \"ent_id_to_name\"\n ][ent_wiki_id].replace(\" \", \"_\")\n if ent_name not in self.wiki_freq[mention]:\n self.wiki_freq[mention][ent_name] = 0\n self.wiki_freq[mention][ent_name] += freq_ent", "def fileCounter(directory):", "def update_stat_file(self):\n logfile = \"../data/{}_stat.json\".format(self.ID)\n statobj = {\n 'hp': self.hp,\n 'max_hp': MAX_TANK_HP,\n 'ammo': self.ammo,\n 'score': self.score,\n 'age': self.age,\n 'alive': not self.is_dead(),\n 'color': TANK_COLORS[self.color],\n }\n if USE_SIMULATOR:\n js.globals.handle_stat(self.ID, json.dumps(statobj))\n else:\n with open(logfile, 'w') as f:\n f.write(json.dumps(statobj))", "def updateBotCounts(self, nextCard):\n nextVal = dnUtil.getValue(nextCard)\n state = self.getState()\n counts = self.getCounts(state)\n newCount = counts.copy()\n for value in dnUtil.valuesList:\n if counts[value][2] == 0:\n continue\n update = self.updateCount(value, nextVal, counts[value])\n newCount[value] = update\n self.setCounts(newCount)" ]
[ "0.74021804", "0.73627406", "0.7276364", "0.6980413", "0.6975578", "0.6750909", "0.66988015", "0.66070646", "0.6586439", "0.658481", "0.6576082", "0.6488782", "0.6481709", "0.6444592", "0.64294684", "0.63769114", "0.6369325", "0.63536334", "0.634653", "0.63409024", "0.6326619", "0.6266785", "0.6191584", "0.61889577", "0.6089516", "0.6084936", "0.60844857", "0.6069505", "0.60523707", "0.6048789" ]
0.8075696
0
Cut the input spectrum to the desired frequency range. It appends zero outside the desired frequency range.
def cut_spectrum(input_spectrum, desired_frequency_range): channels_ip = [] for ip in input_spectrum.GetChannels(): channel_ip = [] channel_op = [] for n, i in enumerate(ip): if n > desired_frequency_range[0] / input_spectrum.GetResolution() and n < desired_frequency_range[1] / \ input_spectrum.GetResolution(): channel_ip.append(i) else: channel_ip.append(0.0) channel_op.append(0.0) channels_ip.append(tuple(channel_ip)) input_spectrum_modified = sumpf.Spectrum(channels=tuple(channels_ip), resolution=input_spectrum.GetResolution(), labels=input_spectrum.GetLabels()) return input_spectrum_modified
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def spectrum_cut(spectrum, eVrange=(0.0, 0.0)):\n if eVrange[1] == 0.0:\n return spectrum\n else:\n if spectrum[-1,0] <= eVrange[0] or spectrum[0,0] >= eVrange[1]:\n return np.array([[0, 0]], dtype=np.float)\n else:\n idx1 = np.argmax(spectrum[:,0] >= eVrange[0])\n idx2 = np.argmax(spectrum[:,0] > eVrange[1])\n if spectrum[0,0] >= eVrange[0]:\n idx1 = 0\n if spectrum[-1,0] <= eVrange[1]:\n idx2 = -1\n return spectrum[idx1:idx2]", "def filter_spectrum(spectrum):\n # avoid division by 0\n spectrum.hs[1:] /= spectrum.fs[1:]\n spectrum.hs[0] = 0", "def range_spectrum_filter(self, center, width, k=3):\n fshift = _np.ones(self.shape[0])\n fshift[1::2] = -1\n slc_filter = _np.zeros(self.shape[0] // 2 + 1) * 0\n filter_slice = slice(center - width // 2, center + width // 2)\n slc_filter[filter_slice] = _sig.kaiser(width, k)\n raw_filter = _np.hstack([0, _np.fft.irfft(slc_filter) * fshift[1:]])\n return slc_filter", "def lowpassFFT(signal, rate, cutoff):\n fft = np.fft.fft(signal)\n fftfreq = np.fft.fftfreq(len(signal), 1/rate)\n for i, freq in enumerate(fftfreq):\n if abs(freq) >= cutoff:\n fft[i] = 0\n signal = np.fft.ifft(fft)\n return signal", "def trim_spectrum(freqs, power_spectra, f_range):\n\n # Create mask to index only requested frequencies\n f_mask = np.logical_and(freqs >= f_range[0], freqs <= f_range[1])\n\n # Restrict freqs & psd to requested range. The if/else is to cover both 1d or 2d arrays\n freqs_ext = freqs[f_mask]\n power_spectra_ext = power_spectra[f_mask] if power_spectra.ndim == 1 \\\n else power_spectra[:, f_mask]\n\n return freqs_ext, power_spectra_ext", "def trim_spectrum(freqs, power_spectra, f_range):\n\n # Create mask to index only requested frequencies\n f_mask = np.logical_and(freqs >= f_range[0], freqs <= f_range[1])\n\n # Restrict freqs & psd to requested range. The if/else is to cover both 1d or 2d arrays\n freqs_ext = freqs[f_mask]\n power_spectra_ext = power_spectra[f_mask] if power_spectra.ndim == 1 \\\n else power_spectra[:, f_mask]\n\n return freqs_ext, power_spectra_ext", "def mask(self):\n\n mask = self.freqs >= self.minimum_threshold\n mask = mask.astype(int)\n self.freqs = self.freqs * mask\n self.sums = self.sums * mask", "def change_length_signal(signal, length=None):\n if length is None:\n length = len(signal)\n if len(signal) >= length:\n signal = sumpf.modules.CutSignal(signal=signal, start=0, stop=length).GetOutput()\n else:\n signal = append_zeros(signal, length)\n return signal", "def overf_power_spectrum(amp, index, f0, dt, n, cut_off=0):\n \n if cut_off < 0:\n raise ValueError(\"Low frequency cut off must not be negative.\")\n # Sometimes the fitting routines do something weird that causes\n # an overflow from a ridiculous index. Limit the index.\n index = max(index, -20)\n # Get the frequencies represented in the FFT.\n df = 1.0/dt/n\n freq = sp.arange(n, dtype=float)\n freq[n//2+1:] -= freq[-1] + 1\n freq = abs(freq)*df\n # 0th (mean) mode is meaningless has IR divergence. Deal with it later (in\n # the cut off.\n freq[0] = 1\n # Make the power spectrum.\n power = (freq/f0)**index\n power *= amp\n # Restore frequency of mean mode.\n freq[0] = 0\n # Find the power just above the cut off frequency.\n p_cut = power[sp.amin(sp.where(freq > cut_off)[0])]\n # Flatten off the power spectrum.\n power[freq <= cut_off] = p_cut\n return power", "def callback_freq_cut(val):\n global plot_mode\n global idx_freq\n last_plot_mode = plot_mode\n plot_mode = 'freq_cut'\n# print( 'scale_freq', scale_freq)\n idx_freq = freq_to_idx( val, scale_freq )\n val_freq = idx_freq * scale_freq\n# print( 'val idx_freq val_freq', val, idx_freq, val_freq )\n update_num_shadow(int(sld['neighbors'].val))\n #plot 121\n lcutfreq.set_ydata( [val_freq, val_freq])\n lcuttime.set_alpha( 0.0 )\n lcutfreq.set_alpha( alpha_hm )\n #plot 122\n if plot_mode == last_plot_mode:\n replot_flags = get_replot_flag( idx_freq )\n replot_shadow( replot_flags )\n update_shadow( ~replot_flags )\n update_light()\n else:\n replot_shadow( [True, True])\n replot_light()\n reform_axis()\n \n fig.canvas.draw_idle()", "def truncate_data(self, width):\n times_from_mid = self.time - self.midtime\n idxs = np.abs(times_from_mid) < 0.5 * width * self.duration\n self.time = self.time[idxs]\n self.flux = self.flux[idxs]", "def cut_audio(old_path, new_path, start, end):\r\n fs, data = wavfile.read(old_path)\r\n indx_start = int(start*fs)\r\n indx_end = int(end*fs)+1\r\n wavfile.write(new_path,fs,data[indx_start:indx_end])\r\n\r\n return True", "def filter_freq(self, low_freq=None, high_freq=None, axes=None, win_fcn='boxcar'):\n axes = self._get_axes_numbers(axes)\n fdomain = self.fft(axes=axes)\n low_freq = self._cook_args(low_freq, axes)\n high_freq = self._cook_args(high_freq, axes)\n\n if low_freq is None:\n low_freq = [0]*len(axes)\n if high_freq is None:\n high_freq = [self.ts[ax]/2. for ax in axes]\n\n fupper, flower = fdomain.copy(), fdomain.copy()\n for ax in axes:\n fupper = fupper.select(lambda x: x >= 0, axis=ax)\n flower = flower.select(lambda x: x < 0, axis=ax)\n\n fupper = fupper.window(index1=low_freq, index2=high_freq, axes=axes, win_fcn=win_fcn)\n flower = flower.window(index1=-np.array(high_freq), index2=-np.array(low_freq),\n axes=axes, win_fcn=win_fcn)\n fdomain.update(fupper)\n fdomain.update(flower)\n vals = fftshift(fdomain.values, axes=axes)\n ift = ifft2(vals, axes=axes, shape=np.array(self.shape)[axes])\n return Signal2D(np.real(ift), index=self.index, columns=self.columns)", "def make_band(f0, band_type, num):\n \n if band_type == '1Hz-spaced':\n hwidth = num//2\n step = 1\n f = np.arange(f0-hwidth, f0+hwidth+step, step)\n \n else:\n lwr = -(num-1)//2 # lower range bound for frequency calculation\n upr = (num+1)//2 # upper range bound for frequency calculation\n f = np.array(list(f0*2**(k/(12*num)) for k in range(lwr,upr)))\n \n return f", "def bandpass(self, min_f, max_f, out_of_bounds_ok=True):\n\n if min_f >= max_f:\n raise ValueError(\n f\"min_f must be less than max_f (got min_f {min_f}, max_f {max_f}\"\n )\n\n if not out_of_bounds_ok:\n # self.frequencies fully coveres the spec's frequency range\n if min_f < min(self.frequencies) or max_f > max(self.frequencies):\n raise ValueError(\n \"with out_of_bounds_ok=False, min_f and max_f must fall\"\n \"inside the range of self.frequencies\"\n )\n\n # find indices of the frequencies in spec_freq closest to min_f and max_f\n lowest_index = np.abs(self.frequencies - min_f).argmin()\n highest_index = np.abs(self.frequencies - max_f).argmin()\n\n # take slices of the spectrogram and spec_freq that fall within desired range\n return self.__class__(\n self.spectrogram[lowest_index : highest_index + 1, :],\n frequencies=self.frequencies[lowest_index : highest_index + 1],\n times=self.times,\n decibel_limits=self.decibel_limits,\n window_samples=self.window_samples,\n overlap_samples=self.overlap_samples,\n window_type=self.window_type,\n audio_sample_rate=self.audio_sample_rate,\n scaling=self.scaling,\n )", "def bandstop_filter(data, lowcut, highcut, fs=2000, numtaps=255):\n nyq = fs / 2\n\n # design filter\n fe1 = lowcut / nyq\n fe2 = highcut / nyq\n b = firwin(numtaps, (fe1, fe2), pass_zero=True)\n\n filtered = lfilter(b, 1, data)\n\n return filtered", "def _standardize_cutoff(cutoff):\n cutoff = np.asarray(cutoff)\n cutoff[0] = max(0., cutoff[0])\n cutoff[1] = min(1., cutoff[1])\n cutoff[0] = np.min([cutoff[0], 0.09])\n cutoff[1] = np.max([cutoff[1], 0.91])\n return cutoff", "def centre_freqs(self, low_freq=DEFAULT_LOW_FREQ, high_freq=DEFAULT_HIGH_FREQ, num=DEFAULT_FILTER_NUM):\n\t\treturn self.erb_point(low_freq, high_freq, (np.arange(1.0, num+1.0)/num)[::-1])", "def trim(self, start_time, end_time):\n\n # find indices of the times in self.times closest to min_t and max_t\n lowest_index = np.abs(self.times - start_time).argmin()\n highest_index = np.abs(self.times - end_time).argmin()\n\n # take slices of the spectrogram and spec_freq that fall within desired range\n return self.__class__(\n self.spectrogram[:, lowest_index : highest_index + 1],\n frequencies=self.frequencies,\n times=self.times[lowest_index : highest_index + 1],\n decibel_limits=self.decibel_limits,\n window_samples=self.window_samples,\n overlap_samples=self.overlap_samples,\n window_type=self.window_type,\n audio_sample_rate=self.audio_sample_rate,\n scaling=self.scaling,\n )", "def set_freq_sweep(self, lower_bound, upper_bound, resolution=1, units='ghz'):\n convert = {'Hz':1.0, 'hz':1.0, 'KHz':1e3, 'khz':1e3, 'MHz':1e6,\n 'mhz':1e6, 'GHz':1e9, 'ghz':1e9}\n low = lower_bound*convert[units]\n high = upper_bound*convert[units]\n samples = (high-low)/resolution\n self.freq_sweep = np.linspace(low, high, samples)\n return", "def sub_spectrum(self, start_w: float, stop_w: float):\n self.__bounds_check(*[start_w, stop_w])\n start_ind = np.where(start_w <= self.spectrum[:, 0])[0][0]\n stop_ind = np.where(self.spectrum[:, 0] <= stop_w)[0][-1] + 1\n subspec = self.spectrum[start_ind:stop_ind, :].copy()\n return subspec", "def zero_blind_range(data):\n try:\n start_i = data['first_data_bin']\n except:\n start_i = 0\n data['data'][...,:start_i] = 0.0", "def get_kcut_profile(kmax,kcut,apo_over):\n ret = np.ones(kmax)\n if kcut+apo_over == 0:\n return ret\n \n rising = sin_profile(kcut+apo_over,kcut,1)\n falling = 1.0-sin_profile(kcut+apo_over,kcut,0)[::-1]\n \n ret[:len(rising)]=rising\n if len(falling) == 0:\n print (\"fourier resolution bigger than the cut size asked\")\n pass\n else:\n ret[-len(falling):]=falling\n \n ret[ret<0] = 0\n return np.fft.fftshift(ret)", "def cut( self, i_start, i_stop ):\n # create two series of indices, combine them and remove them from the data cube\n beginning = np.arange( i_start, dtype=int )\n end = np.arange( i_stop, self.n_steps, dtype=int )\n self._remove_steps( np.concatenate([beginning,end]).tolist() )", "def low_cut_filter(x, fs, cutoff=70):\n nyquist = fs // 2\n norm_cutoff = cutoff / nyquist\n\n # low cut filter\n fil = firwin(255, norm_cutoff, pass_zero=False)\n lcf_x = lfilter(fil, 1, x)\n\n return lcf_x", "def cut_sig(self):\n c = TCut(self.cut_both)\n c += TCut(self._return_if('_cut_sig'))\n return c", "def bandpass_filtfilt(rawsong, samp_freq, freq_cutoffs=(500, 10000)):\n if freq_cutoffs[0] <= 0:\n raise ValueError('Low frequency cutoff {} is invalid, '\n 'must be greater than zero.'\n .format(freq_cutoffs[0]))\n\n Nyquist_rate = samp_freq / 2\n if freq_cutoffs[1] >= Nyquist_rate:\n raise ValueError('High frequency cutoff {} is invalid, '\n 'must be less than Nyquist rate, {}.'\n .format(freq_cutoffs[1], Nyquist_rate))\n\n if rawsong.shape[-1] < 387:\n numtaps = 64\n elif rawsong.shape[-1] < 771:\n numtaps = 128\n elif rawsong.shape[-1] < 1539:\n numtaps = 256\n else:\n numtaps = 512\n\n cutoffs = np.asarray([freq_cutoffs[0] / Nyquist_rate,\n freq_cutoffs[1] / Nyquist_rate])\n # code on which this is based, bandpass_filtfilt.m, says it uses Hann(ing)\n # window to design filter, but default for matlab's fir1\n # is actually Hamming\n # note that first parameter for scipy.signal.firwin is filter *length*\n # whereas argument to matlab's fir1 is filter *order*\n # for linear FIR, filter length is filter order + 1\n b = scipy.signal.firwin(numtaps + 1, cutoffs, pass_zero=False)\n a = np.zeros((numtaps+1,))\n a[0] = 1 # make an \"all-zero filter\"\n padlen = np.max((b.shape[-1] - 1, a.shape[-1] - 1))\n filtsong = scipy.signal.filtfilt(b, a, rawsong, padlen=padlen)\n return filtsong", "def bandpassFilter (self, lowerFreq, upperFreq):\n self.bandpassLimits = (lowerFreq, upperFreq)\n # stuff to do", "def onCut(self):\n pass", "def _lowpass(self, signal, dx, Lmax):\n W = np.fft.rfftfreq(len(signal), dx)\n f_signal = np.fft.rfft(signal)\n filtered_signal = f_signal.copy()\n filtered_signal[np.where(self._div0(1,W) > Lmax)] = 0+0j\n return np.fft.irfft(filtered_signal, n=len(signal)), f_signal" ]
[ "0.62513006", "0.62132746", "0.60707045", "0.5944", "0.59013665", "0.59013665", "0.58729315", "0.5817833", "0.5806475", "0.5607447", "0.54500735", "0.5449314", "0.54282624", "0.5399753", "0.5364459", "0.53639007", "0.536371", "0.53561896", "0.5348532", "0.53258014", "0.53167987", "0.5301568", "0.5285399", "0.52758336", "0.52670455", "0.52151644", "0.52081263", "0.52007335", "0.51927817", "0.51925373" ]
0.7253377
0
Appends zeros until the signal has the given length. If no length is given, zeros will be appended until the length is a power of 2.
def append_zeros(input_signal, length=None): if length is None: length = 2 ** int(math.ceil(math.log(len(input_signal), 2))) zeros = length - len(input_signal) result = sumpf.Signal(channels=tuple([c + (0.0,) * zeros for c in input_signal.GetChannels()]), samplingrate=input_signal.GetSamplingRate(), labels=input_signal.GetLabels()) return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pad(signal, new_length, end):\n assert len(signal) > 1 and len(signal[0]) > 1\n signal = np.array(signal)\n if len(signal) < new_length:\n zero_row = np.zeros(len(signal[0]))\n zero_row = np.array([zero_row])\n count = 0\n while len(signal) < new_length:\n if end:\n signal = np.concatenate((signal, zero_row))\n else:\n if count % 2 == 0:\n signal = np.concatenate((zero_row, signal))\n else:\n signal = np.concatenate((signal, zero_row))\n count += 1\n return signal[:new_length]", "def change_length_signal(signal, length=None):\n if length is None:\n length = len(signal)\n if len(signal) >= length:\n signal = sumpf.modules.CutSignal(signal=signal, start=0, stop=length).GetOutput()\n else:\n signal = append_zeros(signal, length)\n return signal", "def pad_binary_signal(x, pad_len=10):\n n = len(x)\n one_idx = np.arange(n)[x == 1]\n\n if len(one_idx) == 0:\n return x\n\n y = np.zeros(n)\n for idx in one_idx:\n start = max(idx - pad_len, 0)\n end = min(idx + pad_len + 1, n)\n y[start:end] = 1.0\n\n return y", "def prepend_zeros(data: bytes, length: int):\n print(\"prepend \" + str(length))\n return length * b\"0\" + data", "def write_zeros(self, length, error=True, move_start=True):\n self.write_value(0, length, error=error, move_start=move_start)", "def zero_pad(data):\n N = len(data)\n pow_2 = np.ceil(np.log2(N))\n return np.pad(data,(0,int((2**pow_2)-N)),'constant')", "def fast_forward_to_length(sequences, length):\n return itertools.dropwhile(lambda seq: len(seq) != length, sequences)", "def vint_mask_for_length(length):\n\t\n\treturn 0b10000000 >> (length - 1)", "def pad_or_cut_vec(self,vec,length):\n if len(vec) >= length:\n return vec[:length]\n else:\n to_return = []\n for i in range(length):\n if (i < len(vec)):\n to_return.append(vec[i])\n else:\n to_return.append(0.)\n return to_return", "def __pad__(sequence, max_l):\n if max_l - len(sequence) < 0:\n sequence = sequence[:max_l]\n else: \n sequence = np.pad(sequence, (0, max_l - (len(sequence))), 'constant', constant_values=(0))\n return sequence", "def loop(array, length):\n if len(array) < length:\n array = np.asanyarray(array)\n if len(array) == 0:\n return np.zeros((length,) + array.shape[1:], dtype=array.dtype)\n factor = length // len(array)\n if factor > 1:\n array = np.tile(array, (factor,) + (1,) * (array.ndim - 1))\n missing = length - len(array)\n if missing:\n array = np.concatenate((array, array[:missing:]))\n return array", "def _extend(self, newlen: int) -> None:\n diff = newlen - len(self)\n if diff > 0:\n self.extend([0] * diff)", "def extend_signals(signals, length=None, samplerate=None):\n if length is None:\n return signals\n if samplerate is not None:\n length = round(samplerate * length)\n\n def extend(signal):\n padding = length - signal.shape[-1]\n if padding < 1:\n return signal.copy()\n padding = np.zeros(signal.shape[:-1] + (padding,))\n padded = np.concatenate([signal, padding], axis=-1)\n return padded\n\n return _apply_to_signals(extend, signals)", "def pad(seq, n):\n return", "def zero_pad(time_series, NFFT):\r\n\r\n n_dims = len(time_series.shape)\r\n n_time_points = time_series.shape[-1]\r\n\r\n if n_dims>1:\r\n n_channels = time_series.shape[:-1]\r\n shape_out = n_channels + (NFFT,)\r\n else:\r\n shape_out = NFFT\r\n # zero pad if time_series is too short\r\n if n_time_points < NFFT:\r\n tmp = time_series\r\n time_series = np.zeros(shape_out, time_series.dtype)\r\n time_series[..., :n_time_points] = tmp\r\n del tmp\r\n\r\n return time_series", "def seq_len(length_out: IntOrIter, base0_: bool = None) -> ArrayLikeType:\n base0_ = get_option(\"index.base.0\", base0_)\n if is_scalar(length_out):\n return Array(range(int(length_out))) + int(not base0_)\n if len(length_out) > 1:\n logger.warning(\n \"In seq_len(%r) : first element used of 'length_out' argument\",\n length_out,\n )\n length_out = int(list(length_out)[0])\n return Array(range(length_out)) + int(not base0_)", "def _add_slice_length(self, length):\n if length < pow(2, 7):\n self.add_int8(length << 1)\n elif length < pow(2, 14):\n self.add_int16(1 | length << 2)\n elif length < pow(2, 21):\n self.add_int24(3 | length << 3)\n elif length < pow(2, 29):\n self.add_int32(7 | length << 3)\n else:\n raise SliceLengthOutOfRange(\"slice length {} is out of range\".format(length))", "def add_zeros(i, length): # format index in photos.html\n return (\"{:0>\" + str(max(len(str(length)), 2)) + \"d}\").format(i)", "def extendSequenceLength(self, timeLength):\n timeLength = self.secToStep(timeLength)\n self._addNewSwitch(timeLength,0,0)", "def padding_zeroes(number, length_string):\n return str(number).zfill(length_string)", "def pad_sequence(sequence, max_length, pad):\n padN = max(max_length - len(sequence), 0)\n result = sequence[:max_length - padN] + [pad] * padN\n return result", "def pad(self, length):\n\n if length > self.event_roll.shape[0]:\n padding = numpy.zeros((length-self.event_roll.shape[0], self.event_roll.shape[1]))\n self.event_roll = numpy.vstack((self.event_roll, padding))\n\n elif length < self.event_roll.shape[0]:\n self.event_roll = self.event_roll[0:length, :]\n\n return self.event_roll", "def pad_from_beginning_fast(vals, maxlen):\r\n length = len(vals)\r\n matrix = np.zeros((length, maxlen))\r\n lens = [len(v) for v in vals] # only iteration\r\n mask = np.arange(maxlen)[::-1] < np.array(lens)[:, None] # key line\r\n matrix[mask] = np.concatenate(vals)\r\n return matrix", "def cyclic_index_i_plus_1(i, length):\n return i + 1 if i + 1 < length else 0", "def pad_sequence_to_length(sequence: List,\n desired_length: int,\n default_value: Callable[[], Any] = lambda: 0,\n padding_on_right: bool = True) -> List:\n # Truncates the sequence to the desired length.\n if padding_on_right:\n padded_sequence = sequence[:desired_length]\n else:\n padded_sequence = sequence[-desired_length:]\n # Continues to pad with default_value() until we reach the desired length.\n for _ in range(desired_length - len(padded_sequence)):\n if padding_on_right:\n padded_sequence.append(default_value())\n else:\n padded_sequence.insert(0, default_value())\n return padded_sequence", "def pad_sequence_to_length(sequence: List,\n desired_length: int,\n default_value: Callable[[], Any] = lambda: 0,\n padding_on_right: bool = True) -> List:\n # Truncates the sequence to the desired length.\n if padding_on_right:\n padded_sequence = sequence[:desired_length]\n else:\n padded_sequence = sequence[-desired_length:]\n # Continues to pad with default_value() until we reach the desired length.\n for _ in range(desired_length - len(padded_sequence)):\n if padding_on_right:\n padded_sequence.append(default_value())\n else:\n padded_sequence.insert(0, default_value())\n return padded_sequence", "def pad_array_index(low,high,segment_length,reverse=False):\n \n remainder = (segment_length-(high-low)%segment_length)\n if not reverse:\n return high + remainder\n else:\n return low - remainder", "def pad_with_zero(list, max_length, pad_type):\n padded_list = pad_sequences(list, maxlen=max_length, padding=pad_type, truncating='post')\n return padded_list", "def set_length(vec, length):\n return normalized(vec) * length", "def pad_descriptors(descriptors, length):\n \n padded = np.zeros(length, dtype='complex')\n degree = len(descriptors)\n descriptors = np.fft.fftshift(descriptors)\n \n center_index = length / 2\n left_index = center_index - degree / 2 # Left index always round down\n right_index = int(round(center_index + degree / 2.0)) # Right index rounded up \n \n padded[left_index:right_index] = descriptors\n padded = np.fft.ifftshift(padded)\n return padded" ]
[ "0.7238169", "0.6805896", "0.65239316", "0.62442094", "0.622205", "0.61135155", "0.59153557", "0.5907556", "0.5828405", "0.5755306", "0.57248324", "0.5612285", "0.56056225", "0.56051517", "0.55635965", "0.55608547", "0.5536763", "0.5488493", "0.54597217", "0.5417377", "0.5394482", "0.5366244", "0.5364351", "0.53631586", "0.53583014", "0.53583014", "0.53569525", "0.53209835", "0.53129476", "0.52919024" ]
0.75854605
0
Get the first output signal.
def GetFirstOutput(self): return self.__output_signal1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def GetSecondOutput(self):\n return self.__output_signal2", "def get_output(self):\r\n x = self.query('OUTP?')\r\n if x == None: return None\r\n return int(x)", "def first_value(self):\n return self._waveforms[0].first_value", "def output(self):\n try:\n return self.outputs[-1]\n except IndexError:\n pass\n raise ValueError(\"The sample method has not been called\")", "def first_value(self):\n return self.samples[0]", "def test_get_output_signal(network_with_devices):\n network = network_with_devices\n devices = network.devices\n names = devices.names\n\n [OR1_ID] = names.lookup([\"Or1\"])\n\n assert network.get_output_signal(OR1_ID, None) == devices.LOW\n\n # Set Or1 output to HIGH\n or1 = devices.get_device(OR1_ID)\n or1.outputs[None] = devices.HIGH\n\n assert network.get_output_signal(OR1_ID, None) == devices.HIGH", "def get_signal(self):\n\n return self._signal", "def get_output(self):\r\n x = self.query('OUTP1:STAT?')\r\n if x == None: return None\r\n return int(x)", "def get_output(self):\r\n x = self.query('OUTP1:STAT?')\r\n if x == None: return None\r\n return int(x)", "def get_first_input(self):\n return 0", "def get_next_signal(self):\n keypress = None\n\n while not keypress:\n #While no keypress received\n self.do_polling()\n if self.stream:\n keypress = self.get_stream()[0]\n time.sleep(0.01)\n\n return keypress", "def default_output(self):\r\n\r\n do = getattr(self.op, 'default_output', None)\r\n if do is None:\r\n if len(self.outputs) == 1:\r\n return self.outputs[0]\r\n else:\r\n raise AttributeError(\"%s.default_output should be an output index.\" % self.op)\r\n elif do < 0 or do >= len(self.outputs):\r\n raise AttributeError(\"%s.default_output is out of range.\" % self.op)\r\n return self.outputs[do]", "def get_next_output_packet(self):\n if self.num_packets != 0:\n return self.packet_buffer.pop(0)", "def get_first(self):\n return self.A[1][0] if self.n > 0 else None", "def get_output(self, last = 1):\n\t\tif last == -1:\n\t\t\ttmp = self.out_param[::]\n\t\t\tself.out_param = []\n\t\t\treturn tmp\n\t\treturn self.out_param[-last:]", "def FirstPwmOutput():\n devRef = YRefParam()\n neededsizeRef = YRefParam()\n serialRef = YRefParam()\n funcIdRef = YRefParam()\n funcNameRef = YRefParam()\n funcValRef = YRefParam()\n errmsgRef = YRefParam()\n size = YAPI.C_INTSIZE\n #noinspection PyTypeChecker,PyCallingNonCallable\n p = (ctypes.c_int * 1)()\n err = YAPI.apiGetFunctionsByClass(\"PwmOutput\", 0, p, size, neededsizeRef, errmsgRef)\n\n if YAPI.YISERR(err) or not neededsizeRef.value:\n return None\n\n if YAPI.YISERR(\n YAPI.yapiGetFunctionInfo(p[0], devRef, serialRef, funcIdRef, funcNameRef, funcValRef, errmsgRef)):\n return None\n\n return YPwmOutput.FindPwmOutput(serialRef.value + \".\" + funcIdRef.value)", "def get_output_dev(self):\n\t\treturn call_sdk_function('PrlVmDevSound_GetOutputDev', self.handle)", "def get_sequence_output(self):\n return self.sequence_output", "def get_next_signal(self):\n row, col = -1, -1\n while row == -1 and col == -1:\n row, col = self.do_polling()\n sleep(10/1000)\n return self.get_symbol(row, col)", "def input(self):\n connected_node = self.get_connected_node()\n if connected_node:\n #it is not possible to connect to an input\n return connected_node.output()\n return None", "def get_signal(self, signal_type):\n\n return self.signals[signal_type]", "def get_output(self):\r\n _debug('simq03b_api.get_output')\r\n \r\n x = self.query('OUTP:STAT?')\r\n if x == None: return None\r\n print('Result is ', x) # For knowing the bug that we something have\r\n return int(x)", "def input(self):\n try:\n return self.inputs[-1]\n except IndexError:\n pass\n raise ValueError(\"The sample method has not been called\")", "def first_input(self) -> Optional[str]:\n try:\n return next(self.iter_inputs())\n except StopIteration:\n return None", "def randoutput(self, input_meaning_p = None):\r\n signal = self.psys.randoutput(input_meaning_p)\r\n return self.rsys.randoutput(conc_p(self.n_signals(), signal))", "def get_single_output_feature(model: BaseModel) -> BaseFeatureMixin:\n return next(iter(model.output_features.values()))", "def get_output(self, name='0'):\n if name not in self._outputs:\n raise ValueError(\"Invalid port name '{0}'\".format(name))\n return self._outputs[name]", "def generateSignal(self, input):\n raise NotImplementedError(\"generateSignals() not implemented!\")", "def get_signal(self, chn):\n nsamples = self.get_samples_per_signal()\n if (chn < len(nsamples)):\n x = np.zeros(nsamples[chn], dtype=np.float64)\n\n v = x[chn * nsamples[chn]:(chn + 1) * nsamples[chn]]\n self.read_phys_signal(chn, 0, nsamples[chn], v)\n return x\n else:\n return np.array([])", "def first(self, trace):\n return trace[0]" ]
[ "0.72742707", "0.61670125", "0.61595243", "0.60887444", "0.6058285", "0.60497797", "0.6038734", "0.60080075", "0.60080075", "0.5924033", "0.58593434", "0.57831407", "0.57734317", "0.57566434", "0.57046473", "0.56800205", "0.56345075", "0.55731976", "0.5569899", "0.5553696", "0.5521735", "0.5518766", "0.5473015", "0.5471897", "0.5461816", "0.54252636", "0.54243964", "0.5417478", "0.5413258", "0.53569055" ]
0.88019943
0
Get the second output signal.
def GetSecondOutput(self): return self.__output_signal2
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def GetFirstOutput(self):\n return self.__output_signal1", "def test_get_output_signal(network_with_devices):\n network = network_with_devices\n devices = network.devices\n names = devices.names\n\n [OR1_ID] = names.lookup([\"Or1\"])\n\n assert network.get_output_signal(OR1_ID, None) == devices.LOW\n\n # Set Or1 output to HIGH\n or1 = devices.get_device(OR1_ID)\n or1.outputs[None] = devices.HIGH\n\n assert network.get_output_signal(OR1_ID, None) == devices.HIGH", "def SetSecondInput(self, input_signal2):\n self.__input_signal2 = input_signal2\n self._changelength()", "def raw_signal_even():\n signal_x = np.linspace(0, 2 * np.pi, 1000)\n signal_y = (\n np.sin(10 * signal_x)\n + np.sin(50 * signal_x)\n + np.sin(60 * signal_x)\n + np.sin(100 * signal_x)\n + 2\n )\n return signal_y", "def raw_signal_odd():\n signal_x = np.linspace(0, 2 * np.pi, 1001)\n signal_y = np.sin(10 * signal_x) + np.sin(50 * signal_x) + 2\n return signal_y", "def signal_rsi(self):\n pass", "def get_2nd_derivative(self, output_name, wrt):\n \n return self.hessian[wrt[0]][wrt[1]][output_name]", "def test_get_input_signal(network_with_devices):\n network = network_with_devices\n devices = network.devices\n names = devices.names\n\n [SW1_ID, SW2_ID, OR1_ID, I1, I2] = names.lookup([\"Sw1\", \"Sw2\", \"Or1\", \"I1\",\n \"I2\"])\n # Inputs are unconnected, get_input_signal should return None\n assert network.get_input_signal(OR1_ID, I1) is None\n assert network.get_input_signal(OR1_ID, I2) is None\n\n # Make connections\n network.make_connection(SW1_ID, None, OR1_ID, I1)\n network.make_connection(SW2_ID, None, OR1_ID, I2)\n\n # Set Sw2 output to HIGH\n switch2 = devices.get_device(SW2_ID)\n switch2.outputs[None] = devices.HIGH\n\n assert network.get_input_signal(OR1_ID, I1) == devices.LOW\n assert network.get_input_signal(OR1_ID, I2) == devices.HIGH", "def part2(input):\n sys = AmpSystem(input)\n return sys.max_thruster_signal([i for i in range(5, 10)])", "def get_output(self):\r\n x = self.query('OUTP?')\r\n if x == None: return None\r\n return int(x)", "def output(self):\n try:\n return self.outputs[-1]\n except IndexError:\n pass\n raise ValueError(\"The sample method has not been called\")", "def get_output(self):\r\n x = self.query('OUTP1:STAT?')\r\n if x == None: return None\r\n return int(x)", "def get_output(self):\r\n x = self.query('OUTP1:STAT?')\r\n if x == None: return None\r\n return int(x)", "def get_output_dev(self):\n\t\treturn call_sdk_function('PrlVmDevSound_GetOutputDev', self.handle)", "def part1(input):\n sys = AmpSystem(input)\n return sys.max_thruster_signal([i for i in range(5)])", "def d2(self):\n d1 = self.d1()\n return d1 - self.sigma * (self.t **(0.5))", "def get_second_incident_node(self):\n return self.second_incident_node # return the second incident node", "def d2(self):\r\n return self.d1() - self.sigma*self.t**0.5", "def d2out():\n\tsetState(\"D2\", \"-DI-PHDGN-02:CON\", CON_OUT)", "def second(self) -> Element:\n return typing.cast(Element, self[1])", "def get_output(self, **kwargs):\n return self.out", "def _emit(self):\n rc = self.pulldom.firstEvent[1][0]\n self.pulldom.firstEvent[1] = self.pulldom.firstEvent[1][1]\n return rc", "def get_output(self, last = 1):\n\t\tif last == -1:\n\t\t\ttmp = self.out_param[::]\n\t\t\tself.out_param = []\n\t\t\treturn tmp\n\t\treturn self.out_param[-last:]", "def sin_transition2(freq1, freq2, frames, start_idx=0, samplerate=SAMPLERATE):\n t = (start_idx + np.arange(frames)) / samplerate\n t = t.reshape(-1, 1)\n return np.sin(2 * np.pi * (freq1*(t[-1]-t) + freq2*(t-t[0]))/(t[-1]-t[0]) * t).reshape(-1,1)", "def feedforward_2nd_gain(self):\n return self._read(MX_FEEDFORWARD_2ND_GAIN)", "def _s2(self):\n return (self.t2, self.q2, self.p2)", "def second(self) -> int:\r\n return self._second", "def randoutput(self, input_meaning_p = None):\r\n signal = self.psys.randoutput(input_meaning_p)\r\n return self.rsys.randoutput(conc_p(self.n_signals(), signal))", "def get_sound_output_dev(self, nIndex):\n\t\treturn handle_to_object(call_sdk_function('PrlSrvCfg_GetSoundOutputDev', self.handle, nIndex))", "def get_second(self):\n\n # First we get the 8 bits stored in the second register\n # and translate it to an integer\n second_bcd = self.__read_register(_REGISTER_SECOND)\n\n # Then we extract the digits\n tens = (second_bcd & 0b01110000) >> 4\n digit = (second_bcd & 0b00001111)\n\n return 10 * (tens) + digit" ]
[ "0.71854687", "0.63554436", "0.5898584", "0.56793916", "0.5492311", "0.5456946", "0.54538345", "0.542673", "0.5402636", "0.5399631", "0.53979933", "0.53406817", "0.53406817", "0.5319457", "0.5187725", "0.518197", "0.5170489", "0.51457274", "0.5144491", "0.51254064", "0.51203173", "0.5118295", "0.51180845", "0.51155794", "0.5098691", "0.5072749", "0.5072486", "0.50568837", "0.5052444", "0.5043236" ]
0.87862813
0
Set the first input signal.
def SetFirstInput(self, input_signal1): self.__input_signal1 = input_signal1 self._changelength()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_input(self, input):\r\n\r\n self.reset()\r\n self.input = input", "def SetSecondInput(self, input_signal2):\n self.__input_signal2 = input_signal2\n self._changelength()", "def reset(self, signal):\n if not signal.readonly:\n self[signal] = signal.initial_value", "def set_input(self, input):\n self.input = transfer_to_device(input, self.device)", "def fit(self, signal):\n self.signal = signal", "def GetFirstOutput(self):\n return self.__output_signal1", "def set_input(self, input):\n pass", "def set_input(self, input):\n pass", "def SetInput(self, , , p_float_6):\n ...", "def init(self, signal):\n if signal in self:\n raise SignalError(\"Cannot add signal twice\")\n\n x = signal.initial_value\n if signal.is_view:\n if signal.base not in self:\n self.init(signal.base)\n\n # get a view onto the base data\n view = np.ndarray(\n shape=x.shape, strides=x.strides, offset=signal.offset,\n dtype=x.dtype, buffer=self[signal.base].data)\n assert np.array_equal(view, x)\n view.setflags(write=not signal.readonly)\n dict.__setitem__(self, signal, view)\n else:\n x = x.view() if signal.readonly else x.copy()\n dict.__setitem__(self, signal, x)", "def set_input(self, idx, input_stream):\n \n raise NotImplementedError", "def __call__(self, *args):\n if not args:\n return self._get_value()\n else:\n raise RuntimeError('Can only set signal values of InputSignal objects, '\n 'which signal %r is not.' % self._name)", "def initial_point(self, initial_point: Sequence[float] | None) -> None:\n self._initial_point = initial_point", "def get_first_input(self):\n return 0", "def generateSignal(self, input):\n raise NotImplementedError(\"generateSignals() not implemented!\")", "def prep_for_start(self, now, input_value):\n self.last_time = now\n self.last_input = input_value\n self.init_input = input_value", "def set_first(self, value):\n if value not in (\"player\", \"computer\", \"random\"):\n raise SettingsError(\"Invalid choice\")\n self._parser.set(\"settings\", \"first\", value)\n self._save()", "def input(self, source) -> None:\n if source is self._source:\n return\n self._source = source\n if self._socket is not None:\n self._output.input = source", "def add_raw_signal(self, signal):\n assert int(signal[0]) == signal[0], \"Raw signal are always integers\"\n assert len(signal) == len(self.scaled_signal) and len(signal) == self.signal_length, \\\n \"Raw signal must be same size as scaled signal input:{} != scale:{}\".format(signal, self.scaled_signal)\n self.raw_signal = signal", "def set_input(self, input_value):\r\n self.sample += 1\r\n self.drift_found = self.drift_detector.set_input(input_value)\r\n if self.drift_found:\r\n self.timestamp += 1\r\n if self.buffer.is_full:\r\n result_buffer = self.buffer.add(self.timestamp)\r\n self.reservoir.add_element(result_buffer)\r\n else:\r\n self.buffer.add(self.timestamp)\r\n interval = self.timestamp\r\n self.recent_interval[self.rolling_index] = interval\r\n self.rolling_index += 1\r\n if self.rolling_index == self.reservoir.size * 2:\r\n self.rolling_index = 0\r\n self.timestamp = 0\r\n self.pre_drift_point = self.sample\r\n if self.buffer.is_full and self.reservoir.check_full():\r\n relative_var = self.buffer.get_stddev() / self.reservoir.get_stddev()\r\n if relative_var > (1.0 + self.confidence) or relative_var < (1.0 - self.confidence):\r\n self.buffer.clear()\r\n # self.severity_buffer[:] = []\r\n self.vol_drift_found = True\r\n else:\r\n self.vol_drift_found = False\r\n else:\r\n self.timestamp += 1\r\n self.vol_drift_found = False\r\n\r\n return self.vol_drift_found", "def input(self, input):\n\n self._input = input", "def fit(self, signal):\n if signal.ndim == 1:\n self.signal = signal.reshape(-1, 1)\n else:\n self.signal = signal\n\n return self", "def first_tower(self, first_tower):\n\n self._first_tower = first_tower", "def setInput(self, x, fadetime=0.05):\n self._input = x\n self._in_fader.setInput(x, fadetime)", "def Reset_Input(self):\r\n self.Port.reset_input_buffer()", "def updateFirstPoint(self):\n x, y = self.machine.plot.dataToPixel(*self._firstPos, check=False)\n\n offset = self.machine.getDragThreshold()\n points = [(x - offset, y - offset),\n (x - offset, y + offset),\n (x + offset, y + offset),\n (x + offset, y - offset)]\n points = [self.machine.plot.pixelToData(xpix, ypix, check=False)\n for xpix, ypix in points]\n self.machine.setSelectionArea(points, fill=None,\n color=self.machine.color,\n name='first_point')", "def set_input(vtk_object, current_input):\n if isinstance(current_input, vtk.vtkPolyData):\n if vtk.VTK_MAJOR_VERSION <= 5:\n vtk_object.SetInput(current_input)\n else:\n vtk_object.SetInputData(current_input)\n elif isinstance(input, vtk.vtkAlgorithmOutput):\n vtk_object.SetInputConnection(current_input)\n\n vtk_object.Update()\n return vtk_object", "def sample(self):\n self.dev.write(1, 'S')", "def input(self, source):\n if self._input is source:\n return\n if self._input is not None:\n self._input.remove_callback(self._input_callback)\n self._input = source\n if self._input is not None:\n if not self._started:\n self._started = True\n self._stream.start()\n self._input.add_callback(self._input_callback)\n elif self._started:\n self._started = False\n self._stream.stop()", "def setValue(self,val):\n if val:\n self.input.setValue(val)" ]
[ "0.6282879", "0.6259233", "0.60873824", "0.6017913", "0.5990884", "0.5875082", "0.58168703", "0.58168703", "0.5788905", "0.57271546", "0.570479", "0.54833364", "0.54585993", "0.5397411", "0.5380954", "0.53617465", "0.5355876", "0.5352071", "0.5284869", "0.52694386", "0.5244554", "0.5236727", "0.52290547", "0.5210025", "0.5203835", "0.5202576", "0.51853704", "0.518117", "0.51737237", "0.5162926" ]
0.84033006
0
Set the second input signal.
def SetSecondInput(self, input_signal2): self.__input_signal2 = input_signal2 self._changelength()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def SetFirstInput(self, input_signal1):\n self.__input_signal1 = input_signal1\n self._changelength()", "def GetSecondOutput(self):\n return self.__output_signal2", "def reroute_input(ts0, ts1, op1):\n for i, t in enumerate(op1.inputs):\n if t is ts1:\n op1._update_input(i, ts0) # pylint: disable=protected-access", "def SetInput(self, , , p_float_6):\n ...", "def __rshift__(self, other):\n # softly check if the \"other\" is a Node with inputs\n if hasattr(other, \"inputs\"):\n for iname, iplug in other.inputs.items():\n if iname == self.name:\n target = iplug\n else:\n target = other\n self.connect(target)", "def set_io_types(self, other):\n other.set_input_type(self.input_type).set_output_type(self.output_type)\n self.set_input_type(other.input_type).set_output_type(other.output_type)\n return self", "def __rshift__(self, other):\n self.connect(other)", "def set_input(self, input):\r\n\r\n self.reset()\r\n self.input = input", "def setP2(self, p2):\n self.points[1] = p2", "def set_input(self, input):\n self.input = transfer_to_device(input, self.device)", "def set_measured_value(self, value_sig1, value_sig2):\n self.entry_measured_value_sig1.set(value_sig1)\n self.entry_measured_value_sig2.set(value_sig2)", "def set_second_incident_node(self, second_incident_node):\n # overwrite the existing second incident node with the input second incident Node object\n self.second_incident_node = second_incident_node", "def setVolume2(self):\n\n self.mediaplayer2.audio_set_volume(self.volumeslider2.value())", "def set_Second(self, value):\n super(GetTimestampFromDateParametersInputSet, self)._set_input('Second', value)", "def second_value_axis(self, second_value_axis):\n\n self.container['second_value_axis'] = second_value_axis", "def setup_signal_destination(cls, dest):\n cls.signal_destination = dest", "def set_H2(self):\n self.slot.H2 = self.lf_H2.value()\n self.w_out.comp_output()\n # Notify the machine GUI that the machine has changed\n self.saveNeeded.emit()", "def __call__(self, *args):\n if not args:\n return self._get_value()\n else:\n raise RuntimeError('Can only set signal values of InputSignal objects, '\n 'which signal %r is not.' % self._name)", "def set_modulation_input(self, mod_input):\n if (not isinstance(mod_input, Bus)) or (not mod_input.analog):\n raise TypeError(\n \"ERROR: Invalid modulation input. The modulation input must be 2 connector analog Bus.\")\n\n if (mod_input.width != self.mod_ip.width):\n raise Exception(\"ERROR: The bus must be a 2 connector Bus.\")\n\n with AutoUpdater._lock:\n AutoUpdater.remove_link(self.mod_ip)\n AutoUpdater.add_link(\n mod_input,\n self.mod_ip)", "def add_signal_to_noise(self):\n\n # noise\n noise = lal.CreateREAL8TimeSeries('blah', self.epoch, 0,\n self.td_noise.delta_t, lal.StrainUnit, \n int(self.td_noise.duration / self.td_noise.delta_t))\n noise.data.data = self.td_noise.data\n\n # signal\n signal = lal.CreateREAL8TimeSeries('blah',\n self.ext_params.geocent_peak_time, 0, self.td_signal.delta_t,\n lal.StrainUnit, int(self.td_signal.duration /\n self.td_signal.delta_t))\n signal.data.data = self.td_signal.data\n\n win = lal.CreateTukeyREAL8Window(len(signal.data.data),0.1)\n win.data.data[len(signal.data.data):] = 1.0\n #signal.data.data *= win.data.data\n\n # --- Scale to a target snr\n print '---'\n if self.target_snr is not None:\n\n tmp_sig = pycbc.types.TimeSeries(signal.data.data,\n delta_t=self.td_signal.delta_t)\n\n current_snr = pycbc.filter.sigma(tmp_sig, psd=self.psd,\n low_frequency_cutoff=self.f_low,\n high_frequency_cutoff=0.5/self.delta_t)\n\n signal.data.data *= self.target_snr / current_snr\n # ----\n\n # sum\n noise_plus_signal = lal.AddREAL8TimeSeries(noise, signal)\n\n self.td_response = \\\n pycbc.types.timeseries.TimeSeries(\\\n initial_array=np.copy(noise_plus_signal.data.data),\n delta_t=noise_plus_signal.deltaT,\n epoch=noise_plus_signal.epoch)\n\n # Finally, zero-pad the signal vector to have the same length as the actual data\n # vector\n no_noise = lal.CreateREAL8TimeSeries('blah', self.epoch, 0,\n self.td_noise.delta_t, lal.StrainUnit, \n int(self.td_noise.duration / self.td_noise.delta_t))\n\n no_noise.data.data = np.zeros(\\\n int(self.td_noise.duration / self.td_noise.delta_t))\n\n signal = lal.AddREAL8TimeSeries(no_noise, signal)\n\n self.td_signal = \\\n pycbc.types.timeseries.TimeSeries(initial_array=np.copy(signal.data.data),\n delta_t=signal.deltaT, epoch=noise_plus_signal.epoch)\n\n del noise, signal, noise_plus_signal", "def set_input(self, input):\n pass", "def set_input(self, input):\n pass", "def write_reg2(self, value: int) -> None:\n self.timer_low = value\n\n self.output.setFreq(1789773 / (((value | self.timer_high) + 1) << 5))", "def prepost_hook_two(self) -> None:\n self.poutput(\"two\")", "def write_reg2(self, value: int) -> None:\n self.timer_low = value\n\n self.output.setFreq(1789773 / (((value | self.timer_high) + 1) << 4))", "def test_get_input_signal(network_with_devices):\n network = network_with_devices\n devices = network.devices\n names = devices.names\n\n [SW1_ID, SW2_ID, OR1_ID, I1, I2] = names.lookup([\"Sw1\", \"Sw2\", \"Or1\", \"I1\",\n \"I2\"])\n # Inputs are unconnected, get_input_signal should return None\n assert network.get_input_signal(OR1_ID, I1) is None\n assert network.get_input_signal(OR1_ID, I2) is None\n\n # Make connections\n network.make_connection(SW1_ID, None, OR1_ID, I1)\n network.make_connection(SW2_ID, None, OR1_ID, I2)\n\n # Set Sw2 output to HIGH\n switch2 = devices.get_device(SW2_ID)\n switch2.outputs[None] = devices.HIGH\n\n assert network.get_input_signal(OR1_ID, I1) == devices.LOW\n assert network.get_input_signal(OR1_ID, I2) == devices.HIGH", "def setchi2(self,name,chi2):\n if (name not in KFNode.names):\n warning(' state not in node ',name)\n self.chi2[name]=chi2\n return", "def set_input(self, input):\n AtoB = self.opt.direction == 'AtoB'\n self.real_A = input['A' if AtoB else 'B'].to(self.device)\n self.real_B = input['B' if AtoB else 'A'].to(self.device)\n self.true_time = input['time_period'][0]\n self.image_paths = input['A_paths' if AtoB else 'B_paths']", "def setS2(self, num):\n self.space2 = num", "def fit(self, signal):\n self.signal = signal" ]
[ "0.65789753", "0.5970639", "0.54187995", "0.5303401", "0.52545244", "0.524003", "0.5232626", "0.52124405", "0.5209186", "0.5200294", "0.5187235", "0.5162863", "0.5161719", "0.51504976", "0.5139225", "0.51298094", "0.51160574", "0.5103286", "0.50904495", "0.50566167", "0.50540036", "0.50540036", "0.50197965", "0.5012416", "0.49724987", "0.4950923", "0.4948237", "0.4935982", "0.49314308", "0.4891075" ]
0.84657276
0
A function to change the length of signal. If the length of the signal is greater than the length then signal length is truncated, Else zeros are added to the signal.
def change_length_signal(signal, length=None): if length is None: length = len(signal) if len(signal) >= length: signal = sumpf.modules.CutSignal(signal=signal, start=0, stop=length).GetOutput() else: signal = append_zeros(signal, length) return signal
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pad(signal, new_length, end):\n assert len(signal) > 1 and len(signal[0]) > 1\n signal = np.array(signal)\n if len(signal) < new_length:\n zero_row = np.zeros(len(signal[0]))\n zero_row = np.array([zero_row])\n count = 0\n while len(signal) < new_length:\n if end:\n signal = np.concatenate((signal, zero_row))\n else:\n if count % 2 == 0:\n signal = np.concatenate((zero_row, signal))\n else:\n signal = np.concatenate((signal, zero_row))\n count += 1\n return signal[:new_length]", "def set_length(vec, length):\n return normalized(vec) * length", "def setLength(self, new_length):\n\n self.length = new_length", "def set_length(self, new_length):\n if(new_length == None):\n self._logger.write(\"Error! new_length cannot be a NoneType\")\n elif(type(new_length) != float):\n self._logger.write(\"Error! new_length must be of type float\")\n else:\n try:\n self._length = new_length\n except Exception as e:\n self._logger.write(\"Error! Could not set the new length:\\n %s\" % e)", "def truncate_signals(signals, length=None, samplerate=None):\n if length is None:\n return signals\n if samplerate is not None:\n length = round(samplerate * length)\n\n def truncation(signal):\n return signal[..., :length]\n\n return _apply_to_signals(truncation, signals)", "def change_tail_length(self, value):\n self.layer.tail_length = value", "def set_last_segment_length(self, length):\n prior_length = self.segments[-1].get_length()\n if prior_length != -1:\n self.end_time -= prior_length\n\n self.segments[-1].set_length(length)\n self.end_time += length", "def append_zeros(input_signal, length=None):\n if length is None:\n length = 2 ** int(math.ceil(math.log(len(input_signal), 2)))\n zeros = length - len(input_signal)\n result = sumpf.Signal(channels=tuple([c + (0.0,) * zeros for c in input_signal.GetChannels()]),\n samplingrate=input_signal.GetSamplingRate(),\n labels=input_signal.GetLabels())\n return result", "def extendSequenceLength(self, timeLength):\n timeLength = self.secToStep(timeLength)\n self._addNewSwitch(timeLength,0,0)", "def setLength(self, length):\n self.vector.norm = length", "def _on_len_change(self, event=None):\n with self.layer.events.length.blocker():\n self.lengthSpinBox.setValue(self.layer.length)", "def change_length(self, value):\n self.layer.length = value\n self.lengthSpinBox.clearFocus()\n self.setFocus()", "def _extend(self, newlen: int) -> None:\n diff = newlen - len(self)\n if diff > 0:\n self.extend([0] * diff)", "def set_length(self, ak_tpl: BKT, newLength: float): # -> None:\n ...", "def length(self, length: Union[int, float]):\n self._length = length\n self._update_length()\n self.events.length()\n\n self.refresh()", "def extend_signals(signals, length=None, samplerate=None):\n if length is None:\n return signals\n if samplerate is not None:\n length = round(samplerate * length)\n\n def extend(signal):\n padding = length - signal.shape[-1]\n if padding < 1:\n return signal.copy()\n padding = np.zeros(signal.shape[:-1] + (padding,))\n padded = np.concatenate([signal, padding], axis=-1)\n return padded\n\n return _apply_to_signals(extend, signals)", "def change_log_length(self, log_length):\n len_diff = abs(self.log_length - log_length)\n if log_length > self.log_length:\n for log_group in self.log_names.values():\n for log_array in log_group:\n tmparr = numpy.full(log_length, self.log_arrays[log_array][0]) # generate tmparr with first value from array\n tmparr[-self.log_arrays[log_array].size:] = self.log_arrays[log_array] # fill end with current array\n self.log_arrays[log_array] = tmparr\n tmparr = numpy.zeros(log_length)\n tmparr[:len_diff] = numpy.linspace(self.log_time[0] - len_diff/self.frequency,\n self.log_time[0], len_diff)\n tmparr[-self.log_time.size:] = self.log_time\n self.log_time = tmparr\n else:\n for log_group in self.log_names.values():\n for log_array in log_group:\n tmparr = numpy.zeros(log_length)\n tmparr[:] = self.log_arrays[log_array][-log_length:]\n self.log_arrays[log_array] = tmparr\n tmparr = numpy.zeros(log_length)\n tmparr[:] = self.log_time[-log_length:]\n self.log_time = tmparr\n self.log_length = log_length", "def length(self, length):\n\n self._length = length", "def _on_tail_length_change(self, event=None):\n with self.layer.events.tail_length.blocker():\n value = self.layer.tail_length\n value = np.clip(value, 1, MAX_TAIL_LENGTH)\n self.tail_length_slider.setValue(value)", "def ensure_length(x, length):\n x = nest.flatten(x)\n if len(x) == 1:\n x *= length\n\n return x", "def as_length(self, value):\n new_vec = self.copy()\n new_vec.length = value\n return new_vec", "def pulse_width(self) -> int:", "def length_changed(self, value):\n self.message.dlc = value\n self.validate_data_input(value)", "def _update_length(self, field, tag_id, value):\n # pylint: disable=unused-argument\n if tag_id not in {8, 9, 10}:\n self._message_length += len(field) + 1\n if self._message_length >= self._max_length:\n raise FIXLengthTooLongError(\n f'message too long: {self._message_length}')", "def __pad__(sequence, max_l):\n if max_l - len(sequence) < 0:\n sequence = sequence[:max_l]\n else: \n sequence = np.pad(sequence, (0, max_l - (len(sequence))), 'constant', constant_values=(0))\n return sequence", "def len_unpadded(self) -> int:", "def put_in_inverse_trig_range(length: float):\n if length < -1:\n return -1\n if length > 1:\n return 1\n return length", "def set_length(self, length):\n if length < 0:\n raise AttributeError('length should be positive')\n self.progress_char_length = length", "def normalize_return_length(self):\n length = self.length\n if length != 0:\n self.x /= length\n self.y /= length\n return length", "def pulse_width(self, value: int, /) -> None:" ]
[ "0.67467374", "0.65439796", "0.6456528", "0.6441888", "0.6438031", "0.6432366", "0.6301137", "0.62621", "0.6213264", "0.61647", "0.61597764", "0.6120923", "0.6111147", "0.61069745", "0.60383976", "0.60255015", "0.6018112", "0.601324", "0.5869847", "0.58192885", "0.5785484", "0.5757266", "0.5752699", "0.5737156", "0.5719443", "0.57045406", "0.5693797", "0.56932324", "0.5678397", "0.56659436" ]
0.85990524
0
Smooth the spectrum of the filter kernels, to make it suitable for curve fitting algorithm.
def smooth_filter_kernels(kernels=None, window_size=53, polynomial_order=3): kernels_smooth = [] for kernel in kernels: kernel_spec = sumpf.modules.FourierTransform(kernel).GetSpectrum() kernel_spec_channel = kernel_spec.GetChannels()[0] kernel_spec_channel_smooth = savitzky_golay(kernel_spec_channel, window_size, polynomial_order) kernel_spec_smooth = sumpf.Spectrum(channels=[kernel_spec_channel_smooth, ], resolution=kernel_spec.GetResolution(), labels=kernel_spec.GetLabels()) kernel_smooth = sumpf.modules.InverseFourierTransform(kernel_spec_smooth).GetSignal() kernels_smooth.append(kernel_smooth) return kernels_smooth
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def smooth(spectra, filter_win, window_type='flat', mode='reflect'):\n\n if window_type == 'flat':\n window = np.ones(filter_win)\n else:\n window = scipy.signal.windows.get_window(window_type, filter_win)\n window = window / np.sum(window)\n\n for column in range(spectra.shape[1]):\n spectra[:, column] = nd.convolve(spectra[:, column], window, mode=mode)\n\n return spectra", "def smooth_spectra(xarr, farr, sigma=3, nkern=20):\n xkern = np.arange(nkern)\n kern = np.exp(-(xkern - 0.5 * nkern) ** 2 / (sigma) ** 2)\n\n return gaussian_filter1d(farr, sigma)", "def _square_and_smooth(sig, center_freq, fs):\n # Frequency dependent time constant\n if center_freq <= 1000:\n tau = 2 / (3 * center_freq)\n else:\n tau = 2 / (3 * 1000)\n # Squaring\n sig = sig ** 2\n # Three smoothing low-pass filters\n a1 = np.exp(-1 / (fs * tau))\n b0 = 1 - a1\n # zi = signal.lfilter_zi([b0], [1 -a1])\n for i in range(3):\n sig = signal.lfilter([b0], [1, -a1], sig)\n return sig", "def _smooth_spectra(spectra, kernel, scale=False, decim=1):\n # define axes to use for smoothing\n axes = -1 if scale else (-2, -1)\n\n # frequency (in)dependent smoothing\n if isinstance(kernel, list):\n for n_k, kern in enumerate(kernel):\n spectra[..., n_k, :] = __smooth_spectra(\n spectra[..., n_k, :], kern, axes)\n else:\n spectra = __smooth_spectra(spectra, kernel, axes)\n\n # return decimated spectra\n return spectra[..., ::decim]", "def smoother(self):\n ok ,tchi2= True,0.\n if (self.status != 'filter'):\n warning('kfilter no smoothing as it is not filter!')\n debug(\"kfilter.smoother ok,chi2 \",(False,tchi2))\n return False,tchi2\n fstate = self.nodes[-1].getstate('filter')\n self.nodes[-1].setstate('smooth',fstate.copy())\n self.nodes[-1].setchi2('smooth',self.nodes[-1].getchi2('filter'))\n ks = range(0,len(self.nodes)-1)\n ks.reverse()\n for k in ks:\n node = self.nodes[k]\n node1 = self.nodes[k+1]\n sstate,schi2 = node.smooth(node1)\n node.setstate('smooth',sstate) \n node.setchi2('smooth',schi2) \n self.model.user_smooth(node)\n tchi2+=schi2\n self.status='smooth'\n debug(\"kfilter.smooth ok,chi2 \",(ok,tchi2))\n return ok,tchi2", "def scipy_smooth(img, sigma=5):\n return ndimage.gaussian_filter(img, sigma=sigma)", "def smooth(img, sigma):\n if sigma < 0:\n raise ValueError('smoothing kernel size is negative')\n elif sigma == 0:\n return img.get_data()\n else:\n sigma_vox = sigma / np.sqrt(np.sum(img.get_affine()[0:3, 0:3] ** 2, 0))\n return nd.gaussian_filter(img.get_data(), sigma_vox)", "def smooth_image(self, image, mask):\n \n filter_size = self.smoothing_filter_size.value\n if filter_size == 0:\n return image\n sigma = filter_size / 2.35\n #\n # We not only want to smooth using a Gaussian, but we want to limit\n # the spread of the smoothing to 2 SD, partly to make things happen\n # locally, partly to make things run faster, partly to try to match\n # the Matlab behavior.\n #\n filter_size = max(int(float(filter_size) / 2.0),1)\n f = (1/np.sqrt(2.0 * np.pi ) / sigma * \n np.exp(-0.5 * np.arange(-filter_size, filter_size+1)**2 / \n sigma ** 2))\n def fgaussian(image):\n output = scipy.ndimage.convolve1d(image, f,\n axis = 0,\n mode='constant')\n return scipy.ndimage.convolve1d(output, f,\n axis = 1,\n mode='constant')\n #\n # Use the trick where you similarly convolve an array of ones to find \n # out the edge effects, then divide to correct the edge effects\n #\n edge_array = fgaussian(mask.astype(float))\n masked_image = image.copy()\n masked_image[~mask] = 0\n smoothed_image = fgaussian(masked_image)\n masked_image[mask] = smoothed_image[mask] / edge_array[mask]\n return masked_image", "def smooth_scipy(self, mri_data):\n\n # image dimension\n if hasattr(mri_data.img_header, 'info'):\n dx, dy, dz = np.abs(mri_data.img_header.info['DELTA'])\n elif hasattr(mri_data.img_header, 'get_zooms'):\n dx, dy, dz = mri_data.img_header.get_zooms()[:3]\n else:\n self.errmsg(\"No voxel size information in mri_data header\")\n\n # Set gaussian sigma in image dimension\n sigma = (self.blur_fwhm / np.array((dx, dy, dz))) / 2.354820\n imgdata = mri_data.img_data.astype(np.float64)\n\n # Apply mask\n if hasattr(self, 'maskV'):\n imgdata[~self.maskV] = 0\n\n # Apply Gaussian filter\n filt_img = gaussian_filter(imgdata, sigma, mode='constant')\n\n if hasattr(self, 'maskV'):\n # Adjust voxels with out of the mask (0) convolution\n aux_img = np.ones_like(imgdata)\n aux_img[~self.maskV] = 0\n filt_aux_img = gaussian_filter(aux_img, sigma, mode='constant')\n filt_img[self.maskV] /= filt_aux_img[self.maskV]\n\n return filt_img.astype(mri_data.img_data.dtype)", "def smooth(dest, f):\n if f.is_Constant:\n # Return a scaled version of the input if it's a Constant\n dest.data[:] = .9 * f.data\n else:\n Operator(Eq(dest, f.avg(dims=f.dimensions[-1])), name='smoother').apply()", "def smooth(image):\n image = convolve(image, gaussian2d(), mode='same')\n return image", "def smooth_2d(res_array, window_len):\n\n gx, gy = np.mgrid[-window_len : window_len + 1, -window_len : window_len + 1]\n\n gauss = np.exp(-(gx ** 2 / float(window_len) + gy ** 2 / float(window_len)))\n gauss /= gauss.sum()\n\n smooth_array = sps.convolve(res_array, gauss, mode=\"same\")\n\n return smooth_array", "def smooth_spectrum(fluxes, window_width=7, passes=3):\n smoothed = numpy.array(fluxes)\n weights = numpy.ones(window_width) / window_width\n half_width = window_width // 2\n for i in range(passes):\n smoothed = numpy.concatenate((smoothed[half_width:0:-1], smoothed,\n smoothed[-2:-half_width - 2: -1]))\n smoothed = numpy.convolve(smoothed, weights, 'valid')\n return smoothed", "def focus(self, smooth=0):\n if self.image is None:\n self.load_image()\n # image = self.load_image()\n # print self.image\n if not self.bw:\n gray = rgb_2_gray(self.image)\n else:\n gray = self.image\n sx = ndimage.filters.sobel(gray, axis=0, mode='constant')\n sy = ndimage.filters.sobel(gray, axis=1, mode='constant')\n sob = np.hypot(sx, sy)\n self.image = None\n self.sob = sob\n if smooth > 0:\n sob = ndimage.filters.gaussian_filter(sob, sigma=smooth)\n return sob", "def _smooth(values, std):\n width = std * 4\n x = np.linspace(-width, width, min(2 * width + 1, len(values)))\n kernel = np.exp(-(x / 5)**2)\n\n values = np.array(values)\n weights = np.ones_like(values)\n\n smoothed_values = np.convolve(values, kernel, mode='same')\n smoothed_weights = np.convolve(weights, kernel, mode='same')\n\n return smoothed_values / smoothed_weights", "def smooth(y, box_pts):\r\n box = np.ones(box_pts)/box_pts\r\n y_smooth = np.convolve(y, box, mode='same')\r\n return y_smooth", "def smooth(array, binwidth):\n\tarray =scipy.convolve(array,scipy.ones(binwidth)/binwidth, mode='same')\n\treturn(array)", "def smoothSpectrum(f, X_f, r_oct):\n X_f_out = np.zeros(np.shape(X_f))\n for n in range(np.shape(f)[0]):\n # standard deviation\n sigma = f[n] / r_oct / np.pi\n # Gaussian window with the center frequnecy f[n] an dstandard deviation\n w = np.exp( -(f-f[n])**2 / (2*sigma**2) )\n w = w / np.sum(w, axis=0)\n X_f_out[n] = np.sum(w * X_f)\n \n return X_f_out", "def smooth(y, box_pts):\n box = np.ones(box_pts) / box_pts\n y_smooth = np.convolve(y, box, mode='same')\n return y_smooth", "def smooth(y, box_pts):\n box = np.ones(box_pts) / box_pts\n y_smooth = np.convolve(y, box, mode='same')\n return y_smooth", "def smooth(self, bandwidth, kernel='gaussian', normalize=False,\n periodic=False):\n if not self.bset.regular:\n raise ValueError(\"a {} instance must be defined over a {} \"\n \"instance which is regular (has bins of equal \"\n \"size and shape) for kernel filter smoothing\"\n .format(self.__class__.__name__,\n self.bset.__class__.__name__))\n\n if bandwidth == 0.0:\n return self\n\n size = [bandwidth / numpy.mean(w) for w in self.bset.binwidths]\n\n if periodic:\n options = dict(mode='wrap')\n else:\n options = dict(mode='constant', cval=0.0)\n if kernel == 'gaussian':\n def smoothfunc(arr):\n return filters.gaussian_filter(arr, size, **options)\n elif kernel == 'tophat':\n # Multiply by sqrt(12)\n size = [2.0 * _SQRT3 * s for s in size]\n # Round bin size to nearest odd integer\n size = [2 * int(0.5 * s) + 1 for s in size]\n\n def smoothfunc(arr):\n return filters.uniform_filter(arr, size, **options)\n else:\n raise ValueError(\"unknown filter {}\".format(kernel))\n\n new_data = _safe_mmap(normalize, smoothfunc, (self.data,))\n\n return type(self)(new_data, self.bset)", "def office_smooth_kernels(parser, args, params):\n parser.add_argument('-h_length', type=str,\n help='Variance of Gaussian [horizontal] (default=25)',\n metavar='', default='25')\n parser.add_argument('-v_length', type=str,\n help='Variance of Gaussian [vertical] (default=5)',\n metavar='', default='5')\n local_args = parser.parse_known_args(args)\n h_length = local_args[0].h_length\n v_length = local_args[0].v_length\n\n control.smooth_kernels(params, h_length, v_length)", "def apply_smoothing(image, kernel_size=3):\n return cv2.GaussianBlur(image, (kernel_size, kernel_size), 0)", "def smooth(f, g):\r\n chi_f = f.apply(lambda x: 0.0 if pd.isna(x) else 1.0)\r\n f_ext = pd.concat([f, chi_f], axis=1).prod(axis=1)\r\n a = convolve(f_ext, g)\r\n b = convolve(chi_f, g)\r\n return a.div(b)", "def eeg_smooth(array,window,window_len):\t\n\tarray_smooth = np.zeros(array.shape)\n\tif not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman', 'kaiser']:\n\t\traise ValueError, \"Window is one of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman', 'kaiser'\"\n\t\t\n\tif window == 'flat':\n\t\tw = np.ones(window_len)\n\telif window == 'kaiser':\n\t\tw = eval('np.'+window+'(window_len,4)')\t\t\n\telse:\n\t\tw = eval('np.'+window+'(window_len)')\t\t\n\t\t\n\t\n\tif len(array.shape) == 1:\n\t\tntpts = len(array)\n\t\tarray_smooth = np.convolve(array, w/w.sum(), mode='same')\n\t\n\telif len(array.shape) == 2:\n\t\t[nchan,ntpts] = array.shape\n\t\tfor i in range(0,nchan):\n\t\t\tarray_smooth[i,:] = np.convolve(array[i,:], w/w.sum(), mode='same')\n\t\n\telif len(array.shape) > 2:\n\t\tprint 'Error: only works with 1 or 2 dimensions'\n\t\t\n\treturn array_smooth", "def smoothing(data, mask):\n smooth_data = gaussian_filter(data, [2, 2, 2, 0])\n\n Y = smooth_data[mask].T\n\n return Y", "def smooth_data(rawsong, samp_freq, freq_cutoffs=None, smooth_win=2):\n\n if freq_cutoffs is None:\n # then don't do bandpass_filtfilt\n filtsong = rawsong\n else:\n filtsong = bandpass_filtfilt(rawsong, samp_freq, freq_cutoffs)\n\n squared_song = np.power(filtsong, 2)\n len = np.round(samp_freq * smooth_win / 1000).astype(int)\n h = np.ones((len,)) / len\n smooth = np.convolve(squared_song, h)\n offset = round((smooth.shape[-1] - filtsong.shape[-1]) / 2)\n smooth = smooth[offset:filtsong.shape[-1] + offset]\n return smooth", "def smooth_data(rawsong, samp_freq, freq_cutoffs=None, smooth_win=2):\n\n if freq_cutoffs is None:\n # then don't do bandpass_filtfilt\n filtsong = rawsong\n else:\n filtsong = bandpass_filtfilt(rawsong, samp_freq, freq_cutoffs)\n\n squared_song = np.power(filtsong, 2)\n len = np.round(samp_freq * smooth_win / 1000).astype(int)\n h = np.ones((len,)) / len\n smooth = np.convolve(squared_song, h)\n offset = round((smooth.shape[-1] - filtsong.shape[-1]) / 2)\n smooth = smooth[offset:filtsong.shape[-1] + offset]\n return smooth", "def smooth(im, n=15):\n g = gaussKern(n)\n improc = signal.convolve2d(im, g, mode='same', boundary='symm')\n return improc", "def smooth(self, width, kernel=\"boxcar\", scale_sigma=True, inplace=False):\n\n if kernel == \"boxcar\" or kernel == \"Boxcar\":\n kernel = Box1DKernel(width)\n elif kernel == \"gaussian\" or kernel == \"Gaussian\":\n kernel = Gaussian1DKernel(width)\n\n new_flux = convolve(self.flux, kernel)\n if self.flux_err is not None:\n new_flux_err = convolve(self.flux_err, kernel)\n if scale_sigma:\n new_flux_err /= np.sqrt(width)\n else:\n new_flux_err = self.flux_err\n\n\n if inplace:\n self.flux = new_flux\n self.flux_err = new_flux_err\n else:\n spec = self.copy()\n spec.flux = new_flux\n spec.flux_err = new_flux_err\n return spec" ]
[ "0.6824938", "0.65886134", "0.6477609", "0.6445923", "0.6416036", "0.64153475", "0.6384642", "0.6355332", "0.6299696", "0.6229597", "0.62203497", "0.6217186", "0.62166554", "0.61390394", "0.61057895", "0.60896516", "0.60855585", "0.607278", "0.6045842", "0.6045842", "0.6043269", "0.60352385", "0.6027448", "0.59964156", "0.5974307", "0.59692883", "0.5941168", "0.5941168", "0.5941056", "0.58799726" ]
0.67573184
1
Compute the linearly weighted spectrum.
def linearweighting(input): if isinstance(input, (sumpf.Signal)): ip = sumpf.modules.FourierTransform(signal=input).GetSpectrum() else: ip = input dummy = 0.0001 while True: dummy = dummy + 0.0001 low = 1 * (dummy ** 1) high = 1 * (dummy ** (len(input) - 1)) if low > 1 and high > 10000: break energy_allchannels = [] for c in ip.GetChannels(): energy_singlechannel = [] c = reversed(c) for i, s in enumerate(c): energy_singlechannel.append((abs(s)) * (1 * (dummy ** i))) energy_singlechannel = numpy.asarray(energy_singlechannel)[::-1] energy_allchannels.append(energy_singlechannel) energy_allchannels = sumpf.Spectrum(channels=tuple(energy_allchannels), resolution=ip.GetResolution(), labels=ip.GetLabels()) return energy_allchannels
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def weightedpower(time, signal, weight, freq):\n\n result = np.zeros(len(freq))\n\n for i in range(len(freq)):\n if (freq[i] != 0.0):\n sine = np.sin(2.0*pi*freq[i]*time)\n cosine = np.cos(2.0*pi*freq[i]*time)\n a11= np.sum(weight*sine*sine)\n a12 = np.sum(weight*sine*cosine)\n a21 = a12\n a22 = np.sum(weight*cosine*cosine)\n b1 = np.sum(weight*signal*sine)\n b2 = np.sum(weight*signal*cosine)\n denominator = a11*a22-a12*a21\n A = (b1*a22-b2*a12)/denominator\n B = (b2*a11-b1*a21)/denominator\n result[i] = A*A+B*B\n else:\n result[i] = np.sum(signal)/len(signal)\n\n return(result)", "def spectral(w, s=1.0):\n n_in, n_out = w.size()\n n = max(n_out, n_in)\n gain = s / math.sqrt(n)\n return w.normal_(0, 1).mul_(gain)", "def spectrum(self, wl: Union[float, ndarray]) -> Union[float, ndarray]:\n wlm = wl * 1e-9 # Wavelength to meters\n return 3.74183e-16 * wlm ** -5. / (np.exp(0.014388 / (wlm * self.temp)) - 1.)", "def _update_samples_weight(self):\n m, n = 0, self.u.shape[0]\n T = self.u.shape[1]\n N = n + T\n d_0 = matrix(self.d_0.reshape(n, 1))\n\n # Linear Inequallity Constraints, Gx <= h\n G = matrix(-1 * np.eye(N))\n h = matrix(np.zeros(shape=(N, 1)))\n\n # Linear Equality Constraints, Ax = b\n A = matrix(np.concatenate((np.ones(shape=(T, 1)), np.zeros(shape=(n, 1))), axis=0).T)\n b = matrix(1.0)\n\n def F(x=None, z=None):\n if x is None: return 0, matrix(0.5, (N, 1))\n w = x[:T, :]\n phi = x[T:, :]\n reg_inv = 1 / self.reg\n\n weighted_u = np.dot(self.u, w) # n x 1\n scores = -1 * reg_inv * (weighted_u + phi) # n x 1\n\n # Numeric correction\n scores -= max(scores)\n\n # Auxilliaries\n weighted_scores_exp = np.multiply(d_0, np.exp(scores))\n sum_weighted_scores_exp = np.sum(weighted_scores_exp)\n sum_weighted_scores_exp_square = sum_weighted_scores_exp ** 2\n squared_weighted_scores_exp = np.square(weighted_scores_exp)\n weighted_scores_exp_mults = np.dot(weighted_scores_exp, weighted_scores_exp.T)\n uw_mult = np.multiply(self.u, weighted_scores_exp)\n uw_mult_sum = np.sum(np.multiply(self.u, weighted_scores_exp), axis=0)\n\n f = self.reg * np.log(sum_weighted_scores_exp) + self.kappa * np.sum(phi) # f(x)\n\n dfdw = -1 * uw_mult_sum.T / sum_weighted_scores_exp\n dfdphi = (-1 * weighted_scores_exp / sum_weighted_scores_exp) + self.kappa\n Df = np.concatenate((dfdw, dfdphi), axis=0) # Gradient\n\n mf = matrix(f)\n mDf = matrix(Df.T)\n if z is None:\n return mf, mDf\n # Assumes d_0 is uniform\n H = np.zeros(shape=(N, N)) # Hessian\n dfdwiwi = np.zeros(shape=(T, 1))\n dfdphiiphij = -1 * reg_inv * (np.tril(weighted_scores_exp_mults)) / sum_weighted_scores_exp_square\n dfdphiiphii = reg_inv * (np.multiply(weighted_scores_exp,\n sum_weighted_scores_exp - weighted_scores_exp) / sum_weighted_scores_exp_square)\n # dfdwiwj, dfwiphij are zeros\n dfdphiiwj = reg_inv * ((\n uw_mult * sum_weighted_scores_exp - weighted_scores_exp * uw_mult_sum) / sum_weighted_scores_exp_square)\n\n H[T:, T:] = dfdphiiphij\n H[T:, :T] = dfdphiiwj\n H_diagonal = np.concatenate((dfdwiwi, dfdphiiphii), axis=0)\n np.fill_diagonal(H, H_diagonal)\n\n mH = matrix(z[0] * H)\n return mf, mDf, mH\n\n prev_w = self.w\n prev_slacks = self.slacks\n try:\n wphi = solvers.cp(F, G=G, h=h, A=A, b=b)['x']\n self.w = wphi[:T, :]\n self.slacks = wphi[T:, :]\n except Exception as e: # Catch rank errors and continue to next iteration\n self.slacks = prev_slacks\n self.w = prev_w\n try:\n self.w = np.concatenate((self.w, [[1 / (len(self.w) + 1)]]), axis=0)\n except:\n self.w = np.concatenate((self.w, [1 / (len(self.w) + 1)]), axis=0)\n self.w /= np.sum(self.w)\n\n scores = ((-1 / self.reg) * np.squeeze(np.asarray(np.dot(self.u, self.w) + self.slacks))) + np.log(\n self.d_0) # Update according to Equation (6)\n return self.softmax(scores)", "def _linear_weights(self, x, d, p):\n ws = self._phi_int(x, d, p)\n zs = self._xphi_int(x, d, p)\n return np.concatenate(\n [\n [\n x[1] / (x[1] - x[0]) * (ws[1] - ws[0])\n - 1 / (x[1] - x[0]) * (zs[1] - zs[0])\n ],\n x[2:] / (x[2:] - x[1:-1]) * (ws[2:] - ws[1:-1])\n - x[:-2] / (x[1:-1] - x[:-2]) * (ws[1:-1] - ws[:-2])\n + 1 / (x[1:-1] - x[:-2]) * (zs[1:-1] - zs[:-2])\n - 1 / (x[2:] - x[1:-1]) * (zs[2:] - zs[1:-1]),\n [\n -x[-2] / (x[-1] - x[-2]) * (ws[-1] - ws[-2])\n + 1 / (x[-1] - x[-2]) * (zs[-1] - zs[-2])\n ],\n ]\n )", "def A_weight(signal, fs):\n\n b, a = A_weighting(fs)\n return lfilter(b, a, signal)", "def l1(weights):\n\treturn np.sum(np.abs(weights))", "def l2(weights):\n\treturn np.sqrt(np.sum(weights * weights))", "def WLS(store):\n calcweighted(store)\n store['regsampler'].update_yvec(store['yvectil'])\n store['regsampler'].update_xmat(store['xmattil'])\n return store['regsampler'].sample()", "def weights(err):\n w = np.power(err, -2)\n w/= np.sum(w)\n return w", "def weighted_rmsd(x, y, dim=None, weights=None):\n dev = (x - y) ** 2\n dev_mean = weighted_mean(dev, dim, weights)\n return np.sqrt(dev_mean)", "def weights(self):\n return self.mul(self.P, self.mul(\n self.L * self.tril_mask + self.I,\n #self.U * self.triu_mask + self.s.diag()\n self.U * self.triu_mask + (self.sign_s * self.log_abs_s.exp()).diag()\n ))", "def weights(self):\n return np.array(self.intensity[self.idx])", "def spectral_norm_parallel(self):\n weights = {}\n for l in self.all_conv_layers:\n weight = l.weight_normalized\n weight_mat = weight.view(weight.size(0), -1)\n if weight_mat.shape not in weights:\n weights[weight_mat.shape] = []\n weights[weight_mat.shape].append(weight_mat)\n loss = 0\n for i in weights:\n weights[i] = torch.stack(weights[i], dim=0)\n with torch.no_grad():\n num_iter = self.num_power_iter\n if i not in self.sr_u:\n num_w, row, col = weights[i].shape\n self.sr_u[i] = F.normalize(torch.ones(num_w, row).normal_(0, 1), dim=1, eps=0.001)\n self.sr_v[i] = F.normalize(torch.ones(num_w, col).normal_(0, 1), dim=1, eps=0.001)\n num_iter = 10 * self.num_power_iter\n for j in range(num_iter):\n self.sr_v[i] = F.normalize(torch.matmul(self.sr_u[i].unsqueeze(1), weights[i]).squeeze(1), dim=1, eps=0.001)\n self.sr_u[i] = F.normalize(torch.matmul(weights[i], self.sr_v[i].unsqueeze(2)).squeeze(2), dim=1, eps=0.001)\n sigma = torch.matmul(self.sr_u[i].unsqueeze(1), torch.matmul(weights[i], self.sr_v[i].unsqueeze(2)))\n loss += torch.sum(sigma)\n return loss", "def weights(self) -> List[float]:", "def __call__(self, w):\n l1_term = self.alpha * np.linalg.norm(w, 1)\n l2_term = self.alpha * 0.5 * np.linalg.norm(w, 2)\n\n return self.r * l1_term + (1 - self.r) * l2_term", "def spectral_slope(sign, fs):\n f, ff = plotfft(sign, fs)\n if not(list(f)):\n return 0\n else:\n if not (len(f) * np.dot(f, f) - np.sum(f) ** 2):\n return 0\n else:\n return (len(f) * np.dot(f, ff) - np.sum(f) * np.sum(ff)) / (len(f) * np.dot(f, f) - np.sum(f) ** 2)", "def spectralwhitening(st):\n \n for trace in arange(len(st)):\n data = st[trace].data\n \n n = len(data)\n nfft = nextpow2(n)\n \n spec = fft(data, nfft)\n spec_ampl = sqrt(abs(multiply(spec, conjugate(spec))))\n \n spec /= spec_ampl #Do we need to do some smoothing here?\n ret = real(ifft(spec, nfft)[:n])\n \n st[trace].data = ret\n \n return st", "def spectral_power(img, avg_window_size=None, log=True): #COMPLETE spectrum generator\r\n image = img.copy()\r\n # to avoid large spectral power at the 0 frequency :\r\n image -= np.mean(image)\r\n # wiener filter to reduce non physical variability in the spectral power\r\n if avg_window_size:\r\n N = avg_window_size\r\n image = wiener(image, (N, N))\r\n # compute the spectral power function. Place the 0 frequency-component in the center\r\n fshift = np.fft.fftshift(np.fft.fft2(image))\r\n spectrum = np.abs(fshift)**2\r\n if log:\r\n spectrum = 10*np.log(spectrum)\r\n return spectrum", "def integrate_spectrum(self):\n flux = sum(self.spectrum)\n return flux", "def _whiten_wls(mat, weights):\n\n if weights.shape[0] != mat.shape[0]:\n raise ValueError(\n \"The number of weights must be the same as the number of observations\"\n )\n if mat.ndim == 1:\n return mat * np.sqrt(weights)\n elif mat.ndim == 2:\n # return np.column_stack([x[:,0], np.sqrt(weights)[:, None]*x[:,1:]])\n return np.sqrt(weights)[:, None] * mat", "def weighting(wb, m, a):\n s = control.tf([1, 0], [1])\n return (s/m + wb) / (s + wb*a)", "def srwf(xi):\n\treturn np.sqrt(wienergain(xi)) # SRWF gain function.", "def forward(self, weights):\n\n return (np.sum(np.square(weights))) * (self.lambd / 2)", "def w(self):\n # w must be a CArray\n raise NotImplementedError(\"Linear normalizer should define the slope.\")", "def local_weight_regression(\r\n training_data_x: np.mat, training_data_y: np.mat, bandwidth: float\r\n) -> np.mat:\r\n m, n = np.shape(training_data_x)\r\n ypred = np.zeros(m)\r\n\r\n for i, item in enumerate(training_data_x):\r\n ypred[i] = item * local_weight(\r\n item, training_data_x, training_data_y, bandwidth\r\n )\r\n\r\n return ypred", "def local_weighted_lr(test_point, x_arr, y_arr, k=1.0):\n\n x_mat = np.mat(x_arr)\n y_mat = np.mat(y_arr).T\n m = np.shape(x_mat)[0]\n weights = np.mat(np.eye(m))\n\n for j in range(m):\n diff_mat = test_point - x_mat[j, :]\n weights[j, j] = np.exp(diff_mat * diff_mat.T / (-2.0 * (k ** 2)))\n\n x_squared = x_mat.T * (weights * x_mat)\n if np.linalg.det(x_squared) == 0.0:\n print(\"Matrix is singular, cannot do inverse.\")\n return\n\n ws = x_squared.I * (x_mat.T * (weights * y_mat))\n return test_point * ws", "def get_weight(ew1, ew2):\n dw = flu.delta_epiweeks(ew1, ew2)\n yr = 52.2\n hl1, hl2, bw = yr, 1, 4\n a = 0.05\n #b = (np.cos(2 * np.pi * (dw / yr)) + 1) / 2\n b = np.exp(-((min(dw % yr, yr - dw % yr) / bw) ** 2))\n c = 2 ** -(dw / hl1)\n d = 1 - 2 ** -(dw / hl2)\n return (a + (1 - a) * b) * c * d", "def linear_error(X, y, w):\n\n return np.where(y != np.sign(np.dot(X, w)), 1.0, 0.0).mean()", "def RSS(X,Y,w):\n v = Y[:,0]- (np.dot(X,w[1:]) + w[0])\n return np.dot(v,v)" ]
[ "0.63407737", "0.61877424", "0.60627776", "0.6056086", "0.60317564", "0.5951333", "0.5945845", "0.5931498", "0.59037983", "0.5872269", "0.5865271", "0.5779288", "0.5768665", "0.5762712", "0.57522035", "0.5742496", "0.5730954", "0.57047904", "0.5703877", "0.57035995", "0.5682064", "0.5658398", "0.56502634", "0.564692", "0.5610643", "0.5604292", "0.5604173", "0.55814785", "0.55766547", "0.5575454" ]
0.67521065
0
Split the corpus into k equally sized ranges.
def Split(self, k): n = len(self) start = range(0, n, ceil(n / k)) end = list(start[1:]) + [n] return [range(first, last) for first, last in zip(start, end)]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_chunks(indivs, k):\r\n\tpair_chunk_collection=[]\r\n\tfor i in xrange(0, len(indivs[0])-k+1, k):\r\n\t\tchunks=[]\r\n\t\tfor x in indivs:\r\n\t\t\tchunks.append(x[i:i+k])\r\n\t\tpartial_phase_pairs=tune_em(chunks, 5)[1]\r\n\t\tprint partial_phase_pairs\r\n\t\tpair_chunk_collection.append(partial_phase_pairs)\r\n\treturn pair_chunk_collection", "def get_k_fold_data(ds, k=10):\n splits = ds.split(k)\n for i in range(k):\n yield (concatenate(splits[j] for j in range(k) if j != i), splits[i])", "def divide_corpus(corpus, number_of_partitions):\n partition_length = corpus_length(corpus) / number_of_partitions\n list_of_index = []\n for i in range(number_of_partitions + 1):\n list_of_index.append(partition_length*i)\n list_of_index = [int(i) for i in list_of_index]\n ind_bigr = nltk.bigrams(list_of_index)\n corpus_parts = []\n for bigr in ind_bigr:\n corpus_parts.append(corpus[bigr[0]:bigr[1]])\n return corpus_parts", "def split_list(l, k):\n\n\tn = len(l)\n\tsublists = []\n\tnsubs = n / k\n\tnrems = n % k\n\n\t# little algo to split lists.\n\n\ti = int(0)\n\twhile i < n:\n\t\tsublists.append(l[i:i+k])\n\t\ti += k\n\n\treturn sublists", "def get_batches(self, k=5):\n indexes = [x for x in range(len(self))]\n np.random.shuffle(indexes)\n s = 0\n size = int(np.ceil(len(indexes) / k))\n batches = []\n while s < len(indexes):\n batches += [indexes[s:s + size]]\n s = s + size\n return batches", "def generate_k_folds(dataset, k):\n\n # TODO: finish this.\n folds = []\n dataset = np.concatenate((dataset[0], np.array(dataset[1]).reshape(-1,1)), axis=1)\n dataset_shape = dataset.shape\n shape_test_set = int(round(dataset_shape[0]/k,0))\n split_dataset = np.array_split(dataset,k,axis=0)\n for i in range(k):\n test_set = split_dataset[i]\n c = [k for j,k in enumerate(split_dataset) if j!=i]\n training_set = np.concatenate(c,axis=0)\n if test_set.shape[0] != shape_test_set:\n step = test_set.shape[0] - shape_test_set\n test_set = test_set[:-step,:]\n training_set = np.concatenate((training_set, test_set[-step:,:]), axis=0)\n r_test_set = (test_set[:,:-1], list(test_set[:,-1]))\n r_train_set = (training_set[:,:-1], list(training_set[:,-1]))\n folds.append((r_train_set, r_test_set))\n return folds", "def k_random_subsets(x, y, k):\n if k > len(y):\n raise Exception(\n \"Cannot split a dataset into more folds than it has rows.\")\n if k < 2:\n raise Exception(\"Cannot split a dataset into fewer than 2 fold.\")\n # Randomly shuffle dataset\n y = [[i] for i in y]\n z = np.append(x, y, axis=1)\n np.random.seed(0)\n np.random.shuffle(z)\n x = z[:, :-1]\n y = z[:, -1]\n # Create k equally sized subsets from the randomly sorted dataset\n subset_size = int(len(y) / k)\n remainder = len(y) - (subset_size * k)\n folds_x = list()\n folds_y = list()\n start = 0\n end = subset_size\n for i in range(k):\n fold_x = list(x[start:end])\n fold_y = list(y[start:end])\n folds_x.append(fold_x)\n folds_y.append(fold_y)\n start += subset_size\n end += subset_size\n\n for i in range(remainder):\n folds_x[i].append(x[-i])\n folds_y[i].append(y[-i])\n\n folds_x = np.array(folds_x).astype(np.int)\n folds_y = np.array(folds_y)\n return folds_x, folds_y", "def get_kmers(seq, k):\n\n return [seq[i:i+k] for i in range(len(seq)-k+1)]", "def split_k(approx_k:float):\n l = math.floor(approx_k)\n u = math.ceil(approx_k)\n r = approx_k - l\n return (l, 1-r ), (u, r)", "def batch(iterable, k=3):\n\n for i in range(0, len(iterable), k):\n yield iterable[i:i + k]", "def partition(self, data, labels):\n\t\treturn self.kfold.split(labels)", "def makeFolds(data, k):\r\n # randomize columns\r\n order = data.columns.tolist()\r\n random.shuffle(order)\r\n # split into folds (specified by k)\r\n folds = []\r\n fold = 0\r\n dist = len(order) / k\r\n while fold < k:\r\n start = int(round(fold * dist))\r\n end = int(round(start + dist))\r\n folds.append(order[start:end])\r\n fold = fold + 1\r\n return folds", "def user_games_split(list_len: int, k: int) -> Tuple[List[List[int]], List[List[int]]]:\n logging.getLogger(__name__).debug('user_games spliting...')\n data_train, data_test = [], []\n rand_idx = [j for j in range(list_len)]\n random.shuffle(rand_idx)\n for i in range(k):\n start = int(i * list_len / k)\n end = int((i + 1) * list_len / k)\n data_train.append(rand_idx[0:start] + rand_idx[end:list_len])\n data_test.append(rand_idx[start: end])\n return data_train, data_test", "def _split(self, c, n):\n\tsubsets = []\n\tstart = 0\n\tfor i in range(n):\n\t subset = c[start:start + (len(c) - start) / (n - i)]\n\t subsets.append(subset)\n\t start = start + len(subset)\n\treturn subsets", "def split_kbucket(self):\n cur_range_size = self.range_max - self.range_min\n half_point = self.range_min + cur_range_size // 2\n\n # Ensure no empty range is created.\n assert self.range_min < half_point < self.range_max\n\n # Make the instantiation dependent on the actual class,\n # for easy inheritance.\n new_kbucket = self.__class__(half_point, self.range_max)\n\n # Halve the ID space of the split KBucket.\n self.range_max = half_point\n\n # Split the contact list into two, according to the new ranges.\n self._contacts, new_kbucket._contacts = util.partition(\n self._contacts,\n self.contact_in_range\n )\n\n return new_kbucket", "def get_kmers_from_sequence(sequence, kmin, kmax):\n limits = range(kmin, kmax + 1)\n seq_range = len(sequence) - kmax + 1\n for i in range(0, seq_range):\n for j in limits:\n yield sequence[i:i + j]", "def get_k_fold(examples, labels, k=10):\n example_fold = []\n label_fold = []\n interval = int(len(examples)/k)\n for i in range(k):\n \t#f_examples = [examples[j] for j in range(len(examples)) if j%k == i]\n #f_labels = [labels[j] for j in range(len(labels)) if j%k == i]\n f_examples = [examples[j] for j in range(interval*i,interval*(i+1))]\n f_labels = [labels[j] for j in range(interval*i,interval*(i+1))]\n example_fold.append(f_examples)\n label_fold.append(f_labels)\n return example_fold, label_fold", "def n_split(text1: Iterable, n: int) -> list:\n\n return [text1[k:k + n] for k in range(0, len(text1), n)]", "def split_data(num_samples, num_splits):\n\n kf = sklearn.model_selection.KFold(n_splits=num_splits, random_state=0);\n return kf.split(range(num_samples))", "def _tokens_partitions(tokens, min_number_of_tokens, number_of_partitions):\n if len(tokens) < min_number_of_tokens:\n # In this case we have few token and thus we split them\n tkns_per_partition = min_number_of_tokens / number_of_partitions\n step_size = ((2 ** 64) - 1) / min_number_of_tokens\n partition = []\n for fraction, to in tokens:\n while fraction < to - step_size:\n partition.append((fraction, fraction + step_size))\n fraction += step_size\n if len(partition) >= tkns_per_partition:\n yield partition\n partition = []\n # Adding the last token\n partition.append((fraction, to))\n if len(partition) > 0:\n yield partition\n else:\n # This is the case we have more tokens than partitions,.\n splits = max(len(tokens) / number_of_partitions, 1)\n\n for i in xrange(0, len(tokens), splits):\n yield tokens[i:i + splits]\n if len(tokens) % splits > 0:\n yield tokens[len(tokens) / splits * splits + 1:]", "def get_subsets(l, k):\n if k == 0:\n return [[]]\n else:\n res = []\n for i in range(len(l)):\n rest_subsets = get_subsets(l[i + 1:], k - 1)\n for subset in rest_subsets:\n subset.insert(0, l[i])\n res += rest_subsets\n return res", "def batch_split(self, batch_text, threads=8):\n pass", "def split(self, split_words, min_segments=10):\n valid_exemplars, total_words = self.count_exemplar_words()\n\n # Raise error if we inputs are invalid to avoid infinite loop\n if split_words < 0 or split_words > total_words:\n raise ValueError(\n \"cannot split corpus with {} words into split with {} words\".format(\n total_words, split_words\n )\n )\n\n exemplars_in_split = []\n word_counter, seg_counter = 0, 0\n while word_counter <= split_words or seg_counter <= min_segments:\n exemplars_in_split += [\n valid_exemplars.pop(random.randrange(len(valid_exemplars)))\n ]\n word_counter += exemplars_in_split[-1].n_words\n seg_counter += len(exemplars_in_split[-1].transcript_file.segments)\n\n new_corpus = corpus(\n {\n \"location\": self.location,\n \"exemplars\": exemplars_in_split,\n }\n )\n\n remaining_corpus = self - new_corpus\n remaining_corpus.location = self.location\n\n return remaining_corpus, new_corpus", "def kmer_list(s, k):\n kmer = []\n n = len(s)\n # n-k+1 is the available range of values or probablities.\n for x in range(0, n-k+1):\n kmer.append(s[x:x+k])\n return kmer", "def split_iters(iter_ranges, n_threads = None):\n\n\n if n_threads is None:\n n_threads = cpu_count()\n \n counts = [safediv(r[1] - r[0], r[2]) for r in iter_ranges]\n # largest_dim = np.max(counts)\n total_count = float(np.sum(counts))\n split_factors = [ (c / total_count) ** 2 for c in counts ]\n if len(counts) > 2:\n # kludgy heuristic\n # if you're reading across multiple dimensions\n # assume there might be reuse of data read in \n # and try to split up work so it fits into cache \n expected_bytes = 8 \n for dim in counts:\n expected_bytes *= dim\n expected_kb = expected_bytes / 1024\n l2_cache_size = 8192\n n_pieces = max(n_threads, expected_kb / l2_cache_size)\n else: \n n_pieces = 2*n_threads \n \n # initialize work_items with an empty single range \n work_items = [[]]\n for (dim_idx,dim_count) in enumerate(counts):\n\n dim_start, _, dim_step = iter_ranges[dim_idx]\n n_dim_pieces = int(math.ceil(split_factors[dim_idx] * n_pieces))\n dim_factor = float(dim_count) / n_dim_pieces\n \n old_work_items = [p for p in work_items]\n work_items = []\n for i in xrange(n_dim_pieces):\n # copy all the var ranges, after which we'll modifying \n # the biggest dimension \n\n start = dim_start + int(math.floor(dim_step * dim_factor * i))\n stop = dim_start + int(math.floor(dim_step * dim_factor * (i+1)))\n \n dim_work_item = (start,stop,dim_step)\n for old_work_item in old_work_items:\n new_work_item = [r for r in old_work_item]\n new_work_item.append(dim_work_item) \n work_items.append(new_work_item)\n\n return work_items", "def train_dev_split(docs, dev_size):\n pass", "def get_kmers(seq,k=2):\n pair_list = []\n for i in range(0,len(seq),k):\n pair_list.append(str(seq)[i:i+k])\n return pair_list", "def create_kmers(seq,kmer_size):\n\n return [seq[i:(i+kmer_size)] for i in range(len(seq)-kmer_size+1)]", "def get_folds(X, y, k):\n # temporarily change the 1/-1 nature of y to 1/0\n _y = (y + 1) / 2\n # partition the examples into postive and negative sets\n positive_indices = np.where(_y)[0]\n negative_indices = np.where(_y - 1)[0]\n assert len(positive_indices) + len(negative_indices) == len(y)\n\n # shuffle both lists\n np.random.shuffle(positive_indices)\n np.random.shuffle(negative_indices)\n\n # create k buckets of indices of (approximately) equal size\n positive_folds_indices = \\\n np.array(np.array_split(positive_indices, k))\n negative_folds_indices = \\\n np.array(np.array_split(negative_indices, k))\n\n train_X, train_y, test_X, test_y = [], [], [], []\n for i in range(k):\n train_folds = np.concatenate((np.arange(0, i), np.arange(i+1, k)))\n pos_train_indices = np.concatenate(positive_folds_indices[train_folds])\n neg_train_indices = np.concatenate(negative_folds_indices[train_folds])\n pos_test_indices = positive_folds_indices[i]\n neg_test_indices = negative_folds_indices[i]\n\n train_X.append(\n np.concatenate((X[pos_train_indices], X[neg_train_indices]))\n )\n train_y.append(\n np.concatenate((y[pos_train_indices], y[neg_train_indices]))\n )\n test_X.append(\n np.concatenate((X[pos_test_indices], X[neg_test_indices]))\n )\n test_y.append(\n np.concatenate((y[pos_test_indices], y[neg_test_indices]))\n )\n\n return zip(train_X, train_y, test_X, test_y)", "def split_data(y, num_folds=10):\r\n print(f\"Creating splits...\", end=\"\")\r\n\r\n fold_dict = dict()\r\n start_index = 0\r\n # if the number of proteins is not evenly divisible by the number of folds, the last samples are distributed\r\n # evenly across folds\r\n fold_size = math.floor(len(y) / num_folds)\r\n for fold in range(num_folds):\r\n fold_dict[fold] = list(range(start_index, start_index + fold_size))\r\n start_index += fold_size\r\n\r\n # distributing samples which are left over (due to the number of samples not being divisible by the number of folds)\r\n # evenly across folds\r\n fold = 0\r\n while start_index < len(y):\r\n fold_dict[fold] += [start_index]\r\n start_index += 1\r\n fold += 1\r\n\r\n # sanity check that we did not loose any samples while splitting\r\n assert sum([len(fold) for fold in fold_dict.values()]) == len(y), \"Number of samples after splitting does not \" \\\r\n \"match number of samples before splitting.\"\r\n\r\n additional_text = \"\" if len(y) % num_folds == 0 else f\" with {len(y) % num_folds} left over samples \" \\\r\n f\"being distributed evenly among folds\"\r\n print(f\"done! Created {num_folds} splits of size {fold_size}{additional_text}.\")\r\n\r\n # TODO: use the results of this to determine if we should proceed with the current folds\r\n test_stratification(fold_dict, y)\r\n\r\n return fold_dict" ]
[ "0.68880534", "0.66759235", "0.6476079", "0.64087355", "0.638673", "0.6291475", "0.6241639", "0.6200349", "0.6170776", "0.6123056", "0.6095629", "0.60687095", "0.60145557", "0.60063714", "0.5977845", "0.59738827", "0.59505767", "0.5923953", "0.58815414", "0.58560884", "0.5847014", "0.5828205", "0.58100563", "0.58020943", "0.57891417", "0.5774933", "0.5763682", "0.5760316", "0.573788", "0.5734069" ]
0.7745997
0
clean dataframe df_airport_code and return a dataframe
def clean_airport_code(spark, input_data): try: #read file df_airport_code = spark.read.option("header","true").option("recursiveFileLookup","true").parquet(input_data+'airport-codes_csv') # drop columns # filter closed , heliport and seaplace base airport, small_airport # keep us airport drop_cols = ["elevation_ft","continent", "gps_code", "coordinates"] drop_airport = ['closed', 'heliport', 'seaplane_base', 'small_airport', 'balloonport'] keep_us = ['US'] newdf =df_airport_code.drop(*drop_cols) \ .filter(~df_airport_code.type.isin(drop_airport)) \ .filter(df_airport_code.iso_country.isin(keep_us)) #airport_code.groupBy('iso_country', 'iso_region').agg(count("*")).show() #l = ['US'] newdf = newdf.withColumn("myisocountry", split(col("iso_region"), "-").getItem(0)) \ .withColumn("myisoregion", split(col("iso_region"), "-").getItem(1)) newdf = newdf.withColumn("myisocountry",coalesce(newdf.myisocountry,newdf.iso_country)) drop_cols = ['myisocountry', 'iso_region', 'local_code'] newdf = newdf.drop(*drop_cols) airport_code = newdf.filter(~newdf.iata_code.isNull()).dropDuplicates() df_clean_airport_code = (airport_code.withColumnRenamed("ident", "ident") \ .withColumnRenamed("type", "airport_type") \ .withColumnRenamed("name", "airport_name") \ .withColumnRenamed("iso_country", "country_iso2") \ .withColumnRenamed("municipality", "city_name" ) \ .withColumnRenamed("iata_code", "iata_code") \ .withColumnRenamed("myisoregion", "state_id")) print('***** Make df_clean_airport_code processing ') df_clean_airport_code.printSchema() #df_clean_airport_code.show(2) except Exception as e: print("Unexpected error: %s" % e) else: return(df_clean_airport_code)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clean(df):", "def clean_data(df, start = 1995, stop = 2018):\n country_iso3_code = pd.read_html('https://unstats.un.org/unsd/methodology/m49/')\n country_iso3_code = country_iso3_code[0]['ISO-alpha3 code']\n df = df.loc[df.country_iso3_code.isin(country_iso3_code)]\n df = df.set_index(['indicator', 'country_iso3_code', 'country', 'year']).unstack(level = 0)\n df.columns = df.columns.get_level_values(1)\n df = df.rename(columns = {'NY.GDP.PCAP.KD.ZG': 'pc_GDP_growth',\n 'NY.GDP.PCAP.PP.CD': 'pc_GDP_PPP'})\n df = df.reset_index()\n df = df.loc[(df.year >= (start - 1)) & (df.year <= stop)]\n df = df.dropna()\n return df", "def clean_global_airports(spark, input_data):\n try:\n #read file\n df_global_airports = spark.read.option(\"header\",\"true\").csv(input_data+'airports-extended.csv')\n drop_cols = [\"icao\",\"type\", \"latitude\", \"longitude\", \"altitude\", \"timezone\", \"dst\", \"tz_timezone\", \"data_source\"]\n newdf = df_global_airports.filter(df_global_airports.type.isin('airport', 'unknown')) \\\n .drop(*drop_cols)\n\n df_clean_global_airports = newdf.select(F.col(\"airport_ID\").alias(\"airport_id\").cast(\"int\"), \\\n F.col(\"name\").alias(\"airport_name\"), \\\n F.col(\"city\").alias(\"city_name\"), \\\n F.col(\"country\").alias(\"country_name\"), \\\n F.col(\"iata\").alias(\"iata_code\")) \\\n .dropDuplicates() \\\n .fillna(\"unknown\", subset=['city_name',\"iata_code\"]) \n print('***** Make df_clean_global_airports processing ')\n df_clean_global_airports.printSchema()\n #df_clean_global_airports.show(2)\n except Exception as e:\n print(\"Unexpected error: %s\" % e)\n else:\n return(df_clean_global_airports)", "def clean_data(df):\n \n any_location_id_missing = (df.PULocationID > 263) | (df.DOLocationID > 263)\n df = df.drop(df.index[any_location_id_missing])\n \n df = df[df.tpep_dropoff_datetime > df.tpep_pickup_datetime]\n\n df.PULocationID.replace([104, 105], 103)\n \n return df", "def clean_dataframe(self, df_in , what = ''): \n \n if what == 'era5fb': # cleaning the era5 feedback only \n df = df_in[np.isfinite(df_in['obsvalue@body'])]\n try: \n df = df.loc[ df['vertco_type@body'] != 2 ] \n except:\n pass\n df = df.reindex()\n df = df[np.isfinite(df['vertco_reference_1@body'])]\n #print('check lengths: ' , len(df_in) , len(df) )\n new_ind = np.array ( range(len(df))) \n df['index'] =new_ind\n df = df.set_index('index')\n \n else: \n ### check if can be optimized ???\n df = df_in.loc[ df_in['z_coordinate_type'] != 2 ] # case where the levels are given in terms of geopotential only (pressure not available)\n \n df = df.loc[ (df['observation_value'] != -99999.0) \n & (df['observation_value'] != -999.0) \n & (df['observation_value'] != -9999) \n & (df['observation_value'] != -9999.0) \n & (df['observation_value'] != -999.9) \n & (df['observation_value'] != -8888 )\n & (df['observation_value'] != -8888.0 )\n \n #& (df['z_coordinate_type'] != 2) \n & (df['z_coordinate'] != -99999.0) \n & (df['z_coordinate'] != -9999.0 )\n & (df['z_coordinate'] != 999 )\n & (df['z_coordinate'] != 999.0 )\n \n \n ] #cleaning the values \n #clean = clean.loc[ (clean['z_coordinate_type'] != 2)] #cleaning the values\n #clean = clean.loc[ (clean['z_coordinate'] != -99999.0 )] #cleaning the values\n \n df = df[np.isfinite(df['observation_value'])] # excluding nan values \n df = df[np.isfinite(df['z_coordinate'])]\n \n return df", "def clean_data(df):\n\n # Extract the malware name\n df['malware'] = df['malware'].str.extract('Master Indicator Feed for ([a-z]+) non-sinkholed domains',\n expand=True).fillna(0)\n\n # Parse the IPs of the ISPs\n split_fn = lambda x: pd.Series([i for i in x.split('|')])\n domain_reg_df = df['domain_registrar_ip'].apply(split_fn)\n column_names = list(domain_reg_df.columns)\n domain_reg_df.columns = ['domain_registrar_ip_' + str(column_names[x]) for x in range(len(column_names))]\n\n final_osint_df = df.join(domain_reg_df)\n return final_osint_df", "def i94_airports(spark, df):\n df.createOrReplaceTempView('i94_airports')\n airports = spark.sql(\"\"\"\n SELECT\n DISTINCT\n STRING(ident) AS airport_id,\n type AS airport_type,\n name AS airpot_name,\n elevation_ft,\n continent,\n iso_country,\n iso_region,\n CASE WHEN iso_region LIKE 'US-%' THEN SPLIT(iso_region, '-')[1] ELSE NULL END AS us_cities,\n municipality,\n gps_code,\n iata_code,\n local_code,\n CAST(SPLIT(coordinates, ',')[0] AS DOUBLE) AS latitude,\n CAST(SPLIT(coordinates, ',')[1] AS DOUBLE) AS longitude\n FROM\n i94_airports\n \"\"\")\n return airports", "def clean_weather_df(weather_df):\n col = weather_df.columns\n drop_col = list(col[7::2])\n clean_num = weather_df[weather_df['LATITUDE'].str.contains(\n \"LATITUDE\") == False]\n num_weather = clean_num.drop(drop_col, axis=1)\n just_num = num_weather.drop(['NAME', 'STATION'], axis=1)\n all_weatherdf = just_num.apply(pd.to_numeric)\n all_weatherdf['name'] = num_weather['NAME']\n return all_weatherdf", "def get_madrid_codes(df):\n df.dropna(inplace=True)\n return df", "def clean_df(dataframe: pd.DataFrame) -> pd.DataFrame:\n dataframe[\"Close Date\"] = pd.to_datetime(dataframe['Close Date']).dt.strftime('%Y-%m-%d')\n dataframe[\"Min_salary\"] = dataframe[\"Min_salary\"].astype(int)\n dataframe[\"Max_salary\"] = dataframe[\"Max_salary\"].astype(int)\n dataframe['HiringPath'] = dataframe['HiringPath'].astype(str)\n return dataframe", "def clean(dataframe):\n # replace 'unknown' in Pop. density with np.nan\n dataframe = dataframe.replace('unknown', np.nan)\n\n # remove spaces from column names and content\n dataframe.columns = dataframe.columns.str.strip()\n\n # change YYYYMMDD to days of the year\n date_debug = []\n for i in range(1, 366):\n date_debug.append(i)\n\n dataframe2 = {'YYYYMMDD': date_debug}\n dataframe['YYYYMMDD'] = dataframe2['YYYYMMDD']\n\n return dataframe", "def _clean_dataset(df: pd.DataFrame) -> pd.DataFrame:\n df = df.loc[:, ~df.columns.str.contains(\"^Unnamed\")]\n df = df.dropna()\n return df", "def clean_data(df):\n\t# create a dataframe of the 36 individual category columns\n\tcategories = df['categories'].str.split(';', expand=True)\n\t# select the first row of the categories dataframe\n\trow = categories.loc[0]\n\t# extract a list of new column names for categories. Remove unnecessary chars.\n\tcategory_colnames = row.str.replace(r'-\\w','')\n\t# rename the columns of `categories`\n\tcategories.columns = category_colnames\n\t# Convert category values to just numbers 0 or 1.\n\tcategories = categories.applymap(lambda x: int(x.split('-')[1]))\n\t# drop the original categories column from `df`\n\tdf.drop(['categories'],axis=1, inplace=True)\n\t# concatenate the original dataframe with the new `categories` dataframe\n\tdf = pd.concat([df,categories],axis=1)\n\t# find duplicates\n\tdups = df.duplicated(subset=None, keep='first')\n\t# drop duplicates\n\tdf = df[~(dups)]\n\treturn df", "def clean_data(df): \n # Now we first determen the column names\n columns = df['categories'].str.replace(\"-0\",\"\").str.replace(\"-1\",\"\").str.replace(\"-2\",\"\").drop_duplicates().str.split(';')[0]\n\n # And split the 'categories' column\n df[columns] = df['categories'].str.split(';',expand=True)\n\n # The columns are still an object/string. Now make it a number\n for column in columns :\n df[column]=df[column].str.replace(column+\"-\",\"\").astype(int)\n\n # We don't need the column 'categories' anymore, so delete it\n df.drop('categories',1,inplace=True)\n \n # There are some duplicate records, so drop them\n df.drop_duplicates(inplace=True)\n \n return df", "def data_cleaning(df):\n df = df.dropna(how='all')\n \n return df", "def clean_up_raw(df_raw):\n # exclude_subset = ['well', 'tile', 'cell', 'intensity', 'blob'] # causes issues with later joins, maybe a pandas bug\n import lasagna.utils\n df_raw[CYCLE] = df_raw[CYCLE].astype(int)\n df_raw = df_raw.sort_values([WELL, TILE, CELL, BLOB, CYCLE, CHANNEL])\n return df_raw", "def preprocess_dataframe(self, dataframe):\n return dataframe", "def clean_data(df_name):\n\n wines = df_name\n wines = wines.rename(columns={'Vintage': 'Year'})\n wines['Location'] = wines['Appellation'].apply(lambda x: x['Region']['Name'])\n wines['Region'] = wines['Appellation'].apply(lambda x: x['Name'])\n wines['Type'] = wines['Varietal'].apply(lambda x: x['WineType']['Name'])\n wines['Grape'] = wines['Varietal'].apply(lambda x: x['Name'])\n wines['Reviews'] = wines['Community'].apply(lambda x: x['Reviews']['Url'])\n drop_columns = ['Appellation', 'Community', 'Description', 'GeoLocation', 'Labels', 'ProductAttributes','Ratings','Retail', 'Url', 'Varietal', 'Vineyard', 'Vintages']\n wines.drop(drop_columns, axis=1, inplace=True)\n wines = wines[['Id', 'Name', 'Year', 'Type', 'Grape', 'Location', 'Region', 'PriceRetail', 'PriceMin', 'PriceMax', 'Reviews']]\n wines['CurrentReviews'] = '' #wines['CurrentReviews'].apply(lambda x: [\"\"])\n wines['PriorReviews'] = '' #wines['PriorReviews'].apply(lambda x: [''])\n\n return wines", "def clean_data(df):\n # Clean Country and Region column.\n df['Country'] = df['Country'].str.strip()\n df['Region'] = df['Region'].str.strip()\n\n # Clean Pop. Density column.\n df['Pop. Density (per sq. mi.)'] = df['Pop. Density (per sq. mi.)'].str.replace(',', '.')\n df['Pop. Density (per sq. mi.)'] = pd.to_numeric(df['Pop. Density (per sq. mi.)'], errors='coerce')\n\n # Clean Infant mortality column.\n df['Infant mortality (per 1000 births)'] = df['Infant mortality (per 1000 births)'].str.replace(',', '.')\n df['Infant mortality (per 1000 births)'] = pd.to_numeric(df['Infant mortality (per 1000 births)'], errors='coerce')\n\n # Clean GDP column.\n df['GDP ($ per capita) dollars'] = df['GDP ($ per capita) dollars'].str.strip(' dollars')\n df['GDP ($ per capita) dollars'] = pd.to_numeric(df['GDP ($ per capita) dollars'], errors='coerce')\n\n return df", "def create_dataframe():\n df = pd.read_csv(\"data/311-calls.csv\", parse_dates=[\"created\"])\n df[\"created\"] = df[\"created\"].dt.date\n df.drop(columns=[\"incident_zip\"], inplace=True)\n num_complaints = df[\"complaint_type\"].value_counts()\n to_remove = num_complaints[num_complaints <= 30].index\n df.replace(to_remove, np.nan, inplace=True)\n return df", "def clean_iso_country(spark, input_data):\n try:\n #read file\n df_iso_country = spark.read.option(\"header\",\"true\").csv(input_data+'wikipedia-iso-country-codes.csv')\n df = (df_iso_country.withColumnRenamed('English short name lower case','country_name') \\\n .withColumnRenamed('Alpha_2', 'country_iso2') \\\n .withColumnRenamed('Alpha_3', 'country_iso3') \\\n .withColumnRenamed('Num_code','country_num'))\n\n df_clean_iso_country = df_iso_country.drop(\"ISO_3166-2\") \\\n .select(F.col(\"Country\").alias(\"country_name\"), \\\n F.col(\"Alpha_2\").alias(\"country_iso2\"), \\\n F.col(\"Alpha_3\").alias(\"country_iso3\"), \\\n F.col(\"Num_code\").alias(\"country_num\") \\\n .cast(\"int\")) \\\n .dropDuplicates()\n print('***** Make df_clean_iso_country processing ')\n df_clean_iso_country.printSchema()\n #df_clean_iso_country.show(2)\n except Exception as e:\n print(\"Unexpected error: %s\" % e)\n else:\n return(df_clean_iso_country)", "def get_China_exhubei(df) -> pandas.core.frame.DataFrame:\n return df[(df['countryCode']=='CN') & (df['province']!='Hubei Province') & ~(df['province'].isnull()) \\\n & ~(df['city'].isnull())]", "def clean_postcodes(postcodes):\n postcode_df = pd.DataFrame({'Postcode':postcodes})\n postcode_df['Postcode'] = postcode_df['Postcode'].str.upper()\n\n # If length is not 7 get rid of spaces. This fixes e.g. \"SW19 2AZ\" -> \"SW192AZ\"\n postcode_df['Postcode'] = postcode_df['Postcode'].where(\n postcode_df['Postcode'].str.len() == 7, postcode_df['Postcode'].str.replace(\" \", \"\"))\n\n # If length is 5 (e.g. \"W67HZ\") add two spaces in the middle (-> \"W6 7HZ\")\n postcode_df['Postcode'] = postcode_df['Postcode'].where(\n postcode_df['Postcode'].str.len() != 5,\n postcode_df['Postcode'].str[:2]+ \" \" + postcode_df['Postcode'].str[2:])\n\n # If length is 6 (e.g. \"SW72AZ\") add a space in the middle and end(-> \"SW7 2AZ\")\n postcode_df['Postcode'] = postcode_df['Postcode'].where(\n postcode_df['Postcode'].str.len() != 6,\n postcode_df['Postcode'].str[:3]+ \" \" + postcode_df['Postcode'].str[3:])\n\n return postcode_df['Postcode'].to_numpy()", "def trimDf(df):\n cols = set(df.columns)\n\n cols.remove('exclamationCount') # bug in our feature extraction code\n cols.remove('price') # considered only free apps\n cols.remove('appName') # removing appNames\n\n # return df[list(cols)]\n\n\n\n return df[list(('revSent', 'appLabel'))]", "def mopub_dataframe_cleaner(dataframe):\r\n print(\"Cleaning Mopub data...\")\r\n df = dataframe\r\n df['App'] = df.App.map({\"IMVU iOS - #1 3D Avatar Social App\":'IMVU iOS', \r\n \"IMVU Android - #1 3D Avatar Social App\":\"IMVU Android\"})\r\n \r\n df['Total_Code_Served'] = df['Requests']\r\n df['Partner'] = 'MoPub'\r\n \r\n df = df.rename(columns={'App ID':\"App_ID\", \"AdUnit ID\":\"AdUnit_ID\", \r\n 'AdUnit Format':\"AdUnit_Format\"})\r\n\r\n df_pivot = df.pivot_table(index=['Day', 'App', 'AdUnit', 'AdUnit_Format', \r\n 'Country', 'Partner'], \r\n values=['Total_Code_Served', 'Requests', \r\n 'Impressions', 'Clicks', 'Revenue'], \r\n aggfunc='sum')\r\n\r\n df_pivot.to_csv(\"mopub-pivot.csv\")\r\n\r\n df = pd.read_csv(\"mopub-pivot.csv\")\r\n\r\n df = df.rename(columns={\r\n 'AdUnit_Format':'UnitType'})\r\n\r\n df['UnitType'] = df.UnitType.map({'Banner':'banner',\r\n 'Native':'native', 'Rewarded video': 'video'})\r\n\r\n df = df[['Day', 'App', 'AdUnit', 'UnitType', 'Country', 'Total_Code_Served',\r\n 'Requests', 'Impressions', 'Clicks', 'Revenue', 'Partner']]\r\n \r\n os.remove(\"mopub-pivot.csv\")\r\n\r\n return df", "def get_clean_data(path = 'ucr_offenses_known_monthly_1960_2016_dta/', \n identifier_variables = ['fips_state_county_code', 'state', 'date', 'year', 'zip_code', 'month'], \n crime_category = ['act_aggravated_assault', 'act_simple_assault', 'act_murder', 'act_robbery_total', \n 'act_manslaughter', 'act_theft_total', 'act_mtr_vhc_theft_total', 'act_burglary_total', 'act_rape_total'], \n start_year = 1980, end_year = 2009, selected_area = 'all'):\n all_df = []\n for i in get_filenames(start_year, end_year):\n file = path + i\n print(file)\n each_df = pd.read_stata(file)\n each_df = each_df[identifier_variables + crime_category]\n each_df = each_df[each_df['fips_state_county_code'] == '06001']\n each_df['zipcode'] = each_df['zip_code'].apply(lambda x: str(x)[0:5])\n #split Alameda into West and East Alameda according to zip code\n if selected_area == 'east':\n each_df = each_df[(each_df['zipcode'] == '94550') | (each_df['zipcode'] == '94566') | \n (each_df['zipcode'] == '94586') | (each_df['zipcode'] == '94568') | \n (each_df['zipcode'] == '94588') | (each_df['zipcode'] == '94551')]\n elif selected_area == 'west':\n each_df = each_df[(each_df['zipcode'] != '94550') & (each_df['zipcode'] != '94566') & \n (each_df['zipcode'] != '94586') & (each_df['zipcode'] != '94568') & \n (each_df['zipcode'] != '94588') & (each_df['zipcode'] != '94551') &\n (each_df['zipcode'] != '0') & (each_df['zipcode'] != '0.0') & \n (each_df['zipcode'] != 'not r') & (each_df['zipcode'] != 'missi')]\n each_df.loc[:, 'YearMonth'] = [int(re.sub('-', '', date)[0:6]) for date in each_df.loc[:, 'date']]\n #sum up amount of crimes taken place in each category for each month\n each_df = each_df.groupby(['YearMonth'])[crime_category].sum()\n each_df['crime_sum'] = each_df.sum(axis = 1)\n each_df = each_df['crime_sum'].reset_index()\n all_df.append(each_df)\n df = pd.concat(all_df).fillna(0)\n df = df.sort_values('YearMonth').reset_index()\n #split variable 'YearMonth\" into two variables 'year' and \"month' for Poission regression\n del df['index']\n df['year'] = df['YearMonth'].apply(lambda x: str(x)[:4])\n df['month'] = df['YearMonth'].apply(lambda x: str(x)[4:])\n if selected_area == 'east':\n df.to_csv('east_alameda_crime.csv')\n elif selected_area == 'west':\n df.to_csv('west_alameda_crime.csv')\n else:\n df.to_csv('all_alameda_crime.csv')\n return(df)", "def clean_up(df: pd.DataFrame) -> pd.DataFrame:\n\n # Hereafter df is sorted by date, which is helpful as it allows using .iloc[-1]\n # to get current (or most recent known) situation per location\n # (Otherwise we'd have to groupby agg -> min date, and then filter)\n df = df.sort_values(\n [Columns.LOCATION_NAME, Columns.DATE, Columns.CASE_TYPE], ascending=True\n )\n\n return df", "def prepare_input_df(df: DataFrame) -> DataFrame:\r\n df = df.fillna('') # Fill np.nan values with blanks (\"\").\r\n df = to_upper(df) # Force case to UPPER for all columns.\r\n df = strip_columns(df) # Remove trailing whitespace.\r\n return df", "def clean_location(df):\n \n local = df['location'].astype(str)\n \n #geocoders read X St at Y St better than X & Y or X/Y\n local = local.str.replace(\"&\", \"at\")\n local = local.str.replace(\"/\", \"at\")\n \n #OpenAddress dataset has addresses in title case\n local = local.str.title()\n\n return df.assign(location=local.values)", "def _flip_wdi(df: pd.DataFrame) -> pd.DataFrame:\n\n log.info(\"Flipping WDI\")\n\n df = df.rename(columns=lambda x: x.replace(\" \", \"\"))\n df = df.rename(columns=lambda x: x.lower())\n\n # Headache-magic, tbh I don't remember how it works.\n df = df.drop([\"countryname\", \"indicatorname\"], axis=1)\n df = df.set_index([\"countrycode\", \"indicatorcode\"])\n df.columns.name = \"year\"\n df = df.stack().unstack(\"indicatorcode\")\n df = df.reset_index()\n df[\"year\"] = df[\"year\"].astype(\"int32\")\n df = df.set_index([\"year\", \"countrycode\"]).sort_index()\n\n df = df.rename(columns=lambda x: x.replace(\".\", \"_\"))\n df = df.rename(columns=lambda x: x.lower())\n\n log.info(\"Done flipping WDI\")\n\n return df" ]
[ "0.6836619", "0.6605874", "0.6549241", "0.64679146", "0.6422063", "0.6272814", "0.6042544", "0.60288787", "0.60243547", "0.5989035", "0.597916", "0.5965574", "0.5909759", "0.58966345", "0.5879114", "0.58763796", "0.58461595", "0.58415276", "0.5823047", "0.58043087", "0.57851946", "0.5774138", "0.5759947", "0.57574254", "0.57517993", "0.5737151", "0.57264423", "0.5699673", "0.56801766", "0.5663001" ]
0.7142323
0
clean dataframe df_global_airports and return a dataframe
def clean_global_airports(spark, input_data): try: #read file df_global_airports = spark.read.option("header","true").csv(input_data+'airports-extended.csv') drop_cols = ["icao","type", "latitude", "longitude", "altitude", "timezone", "dst", "tz_timezone", "data_source"] newdf = df_global_airports.filter(df_global_airports.type.isin('airport', 'unknown')) \ .drop(*drop_cols) df_clean_global_airports = newdf.select(F.col("airport_ID").alias("airport_id").cast("int"), \ F.col("name").alias("airport_name"), \ F.col("city").alias("city_name"), \ F.col("country").alias("country_name"), \ F.col("iata").alias("iata_code")) \ .dropDuplicates() \ .fillna("unknown", subset=['city_name',"iata_code"]) print('***** Make df_clean_global_airports processing ') df_clean_global_airports.printSchema() #df_clean_global_airports.show(2) except Exception as e: print("Unexpected error: %s" % e) else: return(df_clean_global_airports)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clean(df):", "def i94_airports(spark, df):\n df.createOrReplaceTempView('i94_airports')\n airports = spark.sql(\"\"\"\n SELECT\n DISTINCT\n STRING(ident) AS airport_id,\n type AS airport_type,\n name AS airpot_name,\n elevation_ft,\n continent,\n iso_country,\n iso_region,\n CASE WHEN iso_region LIKE 'US-%' THEN SPLIT(iso_region, '-')[1] ELSE NULL END AS us_cities,\n municipality,\n gps_code,\n iata_code,\n local_code,\n CAST(SPLIT(coordinates, ',')[0] AS DOUBLE) AS latitude,\n CAST(SPLIT(coordinates, ',')[1] AS DOUBLE) AS longitude\n FROM\n i94_airports\n \"\"\")\n return airports", "def clean_dataframe(self, df_in , what = ''): \n \n if what == 'era5fb': # cleaning the era5 feedback only \n df = df_in[np.isfinite(df_in['obsvalue@body'])]\n try: \n df = df.loc[ df['vertco_type@body'] != 2 ] \n except:\n pass\n df = df.reindex()\n df = df[np.isfinite(df['vertco_reference_1@body'])]\n #print('check lengths: ' , len(df_in) , len(df) )\n new_ind = np.array ( range(len(df))) \n df['index'] =new_ind\n df = df.set_index('index')\n \n else: \n ### check if can be optimized ???\n df = df_in.loc[ df_in['z_coordinate_type'] != 2 ] # case where the levels are given in terms of geopotential only (pressure not available)\n \n df = df.loc[ (df['observation_value'] != -99999.0) \n & (df['observation_value'] != -999.0) \n & (df['observation_value'] != -9999) \n & (df['observation_value'] != -9999.0) \n & (df['observation_value'] != -999.9) \n & (df['observation_value'] != -8888 )\n & (df['observation_value'] != -8888.0 )\n \n #& (df['z_coordinate_type'] != 2) \n & (df['z_coordinate'] != -99999.0) \n & (df['z_coordinate'] != -9999.0 )\n & (df['z_coordinate'] != 999 )\n & (df['z_coordinate'] != 999.0 )\n \n \n ] #cleaning the values \n #clean = clean.loc[ (clean['z_coordinate_type'] != 2)] #cleaning the values\n #clean = clean.loc[ (clean['z_coordinate'] != -99999.0 )] #cleaning the values\n \n df = df[np.isfinite(df['observation_value'])] # excluding nan values \n df = df[np.isfinite(df['z_coordinate'])]\n \n return df", "def trimDf(df):\n cols = set(df.columns)\n\n cols.remove('exclamationCount') # bug in our feature extraction code\n cols.remove('price') # considered only free apps\n cols.remove('appName') # removing appNames\n\n # return df[list(cols)]\n\n\n\n return df[list(('revSent', 'appLabel'))]", "def clean_weather_df(weather_df):\n col = weather_df.columns\n drop_col = list(col[7::2])\n clean_num = weather_df[weather_df['LATITUDE'].str.contains(\n \"LATITUDE\") == False]\n num_weather = clean_num.drop(drop_col, axis=1)\n just_num = num_weather.drop(['NAME', 'STATION'], axis=1)\n all_weatherdf = just_num.apply(pd.to_numeric)\n all_weatherdf['name'] = num_weather['NAME']\n return all_weatherdf", "def clean_data(df, start = 1995, stop = 2018):\n country_iso3_code = pd.read_html('https://unstats.un.org/unsd/methodology/m49/')\n country_iso3_code = country_iso3_code[0]['ISO-alpha3 code']\n df = df.loc[df.country_iso3_code.isin(country_iso3_code)]\n df = df.set_index(['indicator', 'country_iso3_code', 'country', 'year']).unstack(level = 0)\n df.columns = df.columns.get_level_values(1)\n df = df.rename(columns = {'NY.GDP.PCAP.KD.ZG': 'pc_GDP_growth',\n 'NY.GDP.PCAP.PP.CD': 'pc_GDP_PPP'})\n df = df.reset_index()\n df = df.loc[(df.year >= (start - 1)) & (df.year <= stop)]\n df = df.dropna()\n return df", "def clean_airport_code(spark, input_data):\n try:\n #read file\n df_airport_code = spark.read.option(\"header\",\"true\").option(\"recursiveFileLookup\",\"true\").parquet(input_data+'airport-codes_csv')\n # drop columns\n # filter closed , heliport and seaplace base airport, small_airport\n # keep us airport\n drop_cols = [\"elevation_ft\",\"continent\", \"gps_code\", \"coordinates\"]\n drop_airport = ['closed', 'heliport', 'seaplane_base', 'small_airport', 'balloonport']\n keep_us = ['US']\n newdf =df_airport_code.drop(*drop_cols) \\\n .filter(~df_airport_code.type.isin(drop_airport)) \\\n .filter(df_airport_code.iso_country.isin(keep_us))\n #airport_code.groupBy('iso_country', 'iso_region').agg(count(\"*\")).show()\n #l = ['US']\n newdf = newdf.withColumn(\"myisocountry\", split(col(\"iso_region\"), \"-\").getItem(0)) \\\n .withColumn(\"myisoregion\", split(col(\"iso_region\"), \"-\").getItem(1))\n newdf = newdf.withColumn(\"myisocountry\",coalesce(newdf.myisocountry,newdf.iso_country))\n drop_cols = ['myisocountry', 'iso_region', 'local_code']\n newdf = newdf.drop(*drop_cols)\n airport_code = newdf.filter(~newdf.iata_code.isNull()).dropDuplicates()\n df_clean_airport_code = (airport_code.withColumnRenamed(\"ident\", \"ident\") \\\n .withColumnRenamed(\"type\", \"airport_type\") \\\n .withColumnRenamed(\"name\", \"airport_name\") \\\n .withColumnRenamed(\"iso_country\", \"country_iso2\") \\\n .withColumnRenamed(\"municipality\", \"city_name\" ) \\\n .withColumnRenamed(\"iata_code\", \"iata_code\") \\\n .withColumnRenamed(\"myisoregion\", \"state_id\"))\n print('***** Make df_clean_airport_code processing ')\n df_clean_airport_code.printSchema()\n #df_clean_airport_code.show(2)\n except Exception as e:\n print(\"Unexpected error: %s\" % e)\n else:\n return(df_clean_airport_code)", "def filter(df: pd.DataFrame = pd.DataFrame()):\n if df.empty:\n df = read()\n print('Filtering data...')\n df = df.dropna()\n df2 = pd.DataFrame()\n df2['Longitude'] = df['Longitude']\n df2['Latitude'] = df['Latitude']\n df2['Month'] = df['Date'].dt.strftime('%m').astype(int)\n df2['Day'] = df['Date'].dt.strftime('%d').astype(int)\n df2['Day_of_Week'] = df['Day_of_Week']\n df2['Time'] = np.array([t.timestamp() for t in df['Time']]) - df['Time'].min().timestamp()\n df2['Weather_Conditions'] = df['Weather_Conditions']\n return pd.get_dummies(df2)", "def clean_data(df):\n \n any_location_id_missing = (df.PULocationID > 263) | (df.DOLocationID > 263)\n df = df.drop(df.index[any_location_id_missing])\n \n df = df[df.tpep_dropoff_datetime > df.tpep_pickup_datetime]\n\n df.PULocationID.replace([104, 105], 103)\n \n return df", "def filterAndTransform(self, df):\n\n # removing as is stated in the task along with the 'Year' and 'DepTime'\n col_to_drop = ['ArrTime',\n 'ActualElapsedTime',\n 'AirTime',\n 'TaxiIn',\n 'Diverted',\n 'CarrierDelay',\n 'WeatherDelay',\n 'NASDelay',\n 'SecurityDelay',\n 'LateAircraftDelay',\n 'Year',\n 'TailNum',\n 'CancellationCode'] # Only those 3 I added up to delay, others\n # are delayed as is stated in the task\n df = df.drop(*col_to_drop)\n\n df = df.filter(\"Cancelled == 0\") # select only those flights that happened\n df = df.drop(\"Cancelled\")\n\n df = df.drop(*[\"UniqueCarrier\",\n \"DayofMonth\",\n \"FlightNum\"]) # Droping unimportant categorical variables\n\n df = df.na.drop(\"any\")\n\n df = df.withColumn('OrigDest',\n sf.concat(sf.col('Origin'), sf.lit('_'), sf.col('Dest')))\n df = df.drop(*[\"Origin\", \"Dest\"])\n df = df.withColumn(\"Speed\", sf.round(col(\"Distance\") / col(\"CRSElapsedTime\"), 2).cast(DoubleType()))\n\n return df", "def _clean_dataset(df: pd.DataFrame) -> pd.DataFrame:\n df = df.loc[:, ~df.columns.str.contains(\"^Unnamed\")]\n df = df.dropna()\n return df", "def mopub_dataframe_cleaner(dataframe):\r\n print(\"Cleaning Mopub data...\")\r\n df = dataframe\r\n df['App'] = df.App.map({\"IMVU iOS - #1 3D Avatar Social App\":'IMVU iOS', \r\n \"IMVU Android - #1 3D Avatar Social App\":\"IMVU Android\"})\r\n \r\n df['Total_Code_Served'] = df['Requests']\r\n df['Partner'] = 'MoPub'\r\n \r\n df = df.rename(columns={'App ID':\"App_ID\", \"AdUnit ID\":\"AdUnit_ID\", \r\n 'AdUnit Format':\"AdUnit_Format\"})\r\n\r\n df_pivot = df.pivot_table(index=['Day', 'App', 'AdUnit', 'AdUnit_Format', \r\n 'Country', 'Partner'], \r\n values=['Total_Code_Served', 'Requests', \r\n 'Impressions', 'Clicks', 'Revenue'], \r\n aggfunc='sum')\r\n\r\n df_pivot.to_csv(\"mopub-pivot.csv\")\r\n\r\n df = pd.read_csv(\"mopub-pivot.csv\")\r\n\r\n df = df.rename(columns={\r\n 'AdUnit_Format':'UnitType'})\r\n\r\n df['UnitType'] = df.UnitType.map({'Banner':'banner',\r\n 'Native':'native', 'Rewarded video': 'video'})\r\n\r\n df = df[['Day', 'App', 'AdUnit', 'UnitType', 'Country', 'Total_Code_Served',\r\n 'Requests', 'Impressions', 'Clicks', 'Revenue', 'Partner']]\r\n \r\n os.remove(\"mopub-pivot.csv\")\r\n\r\n return df", "def clean_weather_csv(df: pd.DataFrame) -> pd.DataFrame:\n if 'Min_VisibilitykM' in df.columns:\n df.rename(columns={'Min_VisibilitykM': 'Min_VisibilityKm'},\n inplace=True)\n if 'Min_DewpointC' in df.columns:\n df.rename(columns={'Min_DewpointC': 'MinDew_pointC'}, inplace=True)\n\n cols = map(convert_to_snake_case, df.columns)\n df.columns = cols\n\n for col in ['max_visibility_km', 'min_visibility_km', 'mean_visibility_km',\n 'max_gust_speed_km_h', 'cloud_cover']:\n df[col] = df[col].fillna(df[col].mean())\n\n df['events'] = df.events.fillna('No Events')\n return df", "def clean_ferc714_hourly_demand_matrix(df: pd.DataFrame) -> pd.DataFrame:\n ts = pudl.analysis.timeseries_cleaning.Timeseries(df)\n ts.flag_ruggles()\n return ts.to_dataframe(copy=False)", "def clean_up(df: pd.DataFrame) -> pd.DataFrame:\n\n # Hereafter df is sorted by date, which is helpful as it allows using .iloc[-1]\n # to get current (or most recent known) situation per location\n # (Otherwise we'd have to groupby agg -> min date, and then filter)\n df = df.sort_values(\n [Columns.LOCATION_NAME, Columns.DATE, Columns.CASE_TYPE], ascending=True\n )\n\n return df", "def clean_up_raw(df_raw):\n # exclude_subset = ['well', 'tile', 'cell', 'intensity', 'blob'] # causes issues with later joins, maybe a pandas bug\n import lasagna.utils\n df_raw[CYCLE] = df_raw[CYCLE].astype(int)\n df_raw = df_raw.sort_values([WELL, TILE, CELL, BLOB, CYCLE, CHANNEL])\n return df_raw", "def fyber_video_dataframe_cleaner(dataframe):\r\n print(\"Cleaning Fyber Video...\")\r\n df = dataframe\r\n df = df.fillna(0)\r\n \r\n delete_list = ['application_id', 'completions', 'ecpm_eur', 'ecpm_usd', \r\n 'fills', 'revenue_eur', 'unique_impressions']\r\n \r\n for entry in delete_list:\r\n del df[entry]\r\n \r\n df['Ad Type'] = 'video'\r\n df['Partner'] = 'Fyber_Video'\r\n df['Total_Code_Served'] = 0\r\n df['Clicks'] = 0\r\n \r\n df = df.rename(columns={'date':'Day', 'application_name':'App',\r\n \"ad_format\":\"AdUnit\", \"Ad Type\":\"UnitType\", \r\n \"country\":\"Country\", \"requests\":\"Requests\", \r\n \"impressions\":\"Impressions\", \"clicks\":\"Clicks\", \r\n \"revenue_usd\":\"Revenue\"})\r\n \r\n df['App'] = df['App'].replace({\"IMVU iOS Primary Wall\":\"IMVU iOS\", \r\n \"IMVU iOS External Offer Wall\":\"IMVU iOS\", \r\n \"IMVU Google Play\":\"IMVU Android\"}) \r\n\r\n df['Impressions'] = df['Impressions'].apply(lambda x:int(x))\r\n df['Requests'] = df['Requests'].apply(lambda x:int(x))\r\n\r\n df = df[[\"Day\", \"App\", \"AdUnit\", \"UnitType\", \"Country\", \r\n \"Total_Code_Served\", \"Requests\", \"Impressions\", \r\n \"Clicks\", \"Revenue\", \"Partner\"]]\r\n\r\n drop_index_list = []\r\n for num in list(df.index):\r\n if df.loc[num, 'App'] == 'Blue Bar Bundle ' or df.loc[num, 'App'] == 'NEXT Featured Offers':\r\n drop_index_list.append(num)\r\n\r\n df = df.drop(drop_index_list, axis=0)\r\n\r\n return df", "def clean_data(df):\n\n # Extract the malware name\n df['malware'] = df['malware'].str.extract('Master Indicator Feed for ([a-z]+) non-sinkholed domains',\n expand=True).fillna(0)\n\n # Parse the IPs of the ISPs\n split_fn = lambda x: pd.Series([i for i in x.split('|')])\n domain_reg_df = df['domain_registrar_ip'].apply(split_fn)\n column_names = list(domain_reg_df.columns)\n domain_reg_df.columns = ['domain_registrar_ip_' + str(column_names[x]) for x in range(len(column_names))]\n\n final_osint_df = df.join(domain_reg_df)\n return final_osint_df", "def macd_fix():\n df = macd_test_data()\n return df", "def clean_station_data(station_df):\n # TODO implement data preparation here\n # Fix the datetime field\n\n # Cast to numeric fields where necessary\n\n # Interpolate missing data", "def clean_store_csv(df: pd.DataFrame) -> pd.DataFrame:\n cols = map(convert_to_snake_case, df.columns)\n df.columns = cols\n\n for col in ['promo2_since_week', 'promo2_since_year',\n 'competition_distance', 'competition_open_since_month',\n 'competition_open_since_year']:\n df[col] = df[col].fillna(df[col].mean())\n\n df['promo_interval'] = df.promo_interval.fillna('None')\n return df", "def prepare_output_df(df: DataFrame, kind: str) -> DataFrame:\r\n columns = get_export_columns(kind)\r\n to_drop = list(filter(lambda x: x not in columns.keys(), df.columns.to_list())) # For any columns not in the get_export_columns()\r\n df = df.drop(columns=to_drop) # mapping, drop them from the DataFrame.\r\n df = df.rename(columns=columns)\r\n return df", "def data_cleaning(df):\n df = df.dropna(how='all')\n \n return df", "def clean_data(df_name):\n\n wines = df_name\n wines = wines.rename(columns={'Vintage': 'Year'})\n wines['Location'] = wines['Appellation'].apply(lambda x: x['Region']['Name'])\n wines['Region'] = wines['Appellation'].apply(lambda x: x['Name'])\n wines['Type'] = wines['Varietal'].apply(lambda x: x['WineType']['Name'])\n wines['Grape'] = wines['Varietal'].apply(lambda x: x['Name'])\n wines['Reviews'] = wines['Community'].apply(lambda x: x['Reviews']['Url'])\n drop_columns = ['Appellation', 'Community', 'Description', 'GeoLocation', 'Labels', 'ProductAttributes','Ratings','Retail', 'Url', 'Varietal', 'Vineyard', 'Vintages']\n wines.drop(drop_columns, axis=1, inplace=True)\n wines = wines[['Id', 'Name', 'Year', 'Type', 'Grape', 'Location', 'Region', 'PriceRetail', 'PriceMin', 'PriceMax', 'Reviews']]\n wines['CurrentReviews'] = '' #wines['CurrentReviews'].apply(lambda x: [\"\"])\n wines['PriorReviews'] = '' #wines['PriorReviews'].apply(lambda x: [''])\n\n return wines", "def preprocess(df):\n drop_cols = ['duration_ms', 'key', 'mode', 'time_signature', 'popularity', 'tempo']\n drop_cols += ['track_id', 'track_name', 'artist_name']\n for col in drop_cols:\n if col in list(df.columns):\n df = df.drop(columns=col)\n return df", "def clean(dataframe):\n # replace 'unknown' in Pop. density with np.nan\n dataframe = dataframe.replace('unknown', np.nan)\n\n # remove spaces from column names and content\n dataframe.columns = dataframe.columns.str.strip()\n\n # change YYYYMMDD to days of the year\n date_debug = []\n for i in range(1, 366):\n date_debug.append(i)\n\n dataframe2 = {'YYYYMMDD': date_debug}\n dataframe['YYYYMMDD'] = dataframe2['YYYYMMDD']\n\n return dataframe", "def reduceUniverse(self):\r\n self.bondList = list(set([bond for grid in self.parent.gridList for bond in grid.bondList]))#set removes duplicates\r\n self.df = self.df.reindex(self.bondList)\r\n self.df = self.df[pandas.notnull(self.df['ISIN'])]\r\n self.rfbonds = list(self.df.loc[self.df['TICKER'].isin(self.riskFreeIssuers)].index)\r\n self.embondsisins = self.df.loc[~self.df['TICKER'].isin(self.riskFreeIssuers), 'ISIN']\r\n self.rfbondsisins = self.df.loc[self.df['TICKER'].isin(self.riskFreeIssuers), 'ISIN']", "def _cleanProcessDf(self):\n # Se applica la funcion a todo el DataFrame exepto la columna \"State\"\n self._df.iloc[:, 1:] = self._df.iloc[:, 1:].applymap(\n lambda x: self._extractTuples(x))", "def _dataframe_preprocess(self):\n # 1. add baisc feature like date, time in day, ....\n if self.data_type != 'porto':\n self.df['TIMESTAMP'] = self.df.apply(lambda df: df['TIMESTAMPS'][0], axis=1)\n self.df['TIME'] = pd.to_datetime(self.df['TIMESTAMP'], unit='s', utc=True)\n \n self.df.TIME = self.df.TIME.dt.tz_convert(self.timezone)\n # 2. group df for specific driver analysis\n self.grouped_df = self.df.groupby('LABEL')\n if self.count_od_info:\n if 'SD' not in self.df.columns:\n self._add_OD_info()\n self.grouped_od = self.df.groupby('SD')", "def clean_pokemon_database(\n df: pd.DataFrame,\n selected_generations: list,\n remove_legendary: bool = True\n):\n\n # Filter only selected generations\n df = df[df.generation.isin(selected_generations)]\n\n # Remove legendary\n if remove_legendary:\n df = df[df.is_legendary == 0]\n\n # Rename type columns\n df = df.rename(columns={\"type1\": \"primary_type\", \"type2\": \"secondary_type\"})\n\n # Drop NA values\n df = df.dropna()\n\n # When returning many variables, it is a good practice to give them names:\n return df" ]
[ "0.6587388", "0.6141118", "0.5938045", "0.5910094", "0.5879471", "0.58774984", "0.58656204", "0.5842229", "0.5795698", "0.57706887", "0.57476074", "0.574605", "0.57356423", "0.56972283", "0.5619846", "0.5593961", "0.5565028", "0.55453414", "0.553814", "0.5526195", "0.55125594", "0.54994726", "0.5481897", "0.5475846", "0.5458268", "0.545346", "0.54496366", "0.5430649", "0.5430266", "0.542994" ]
0.7597343
0
clean dataframe df_iso_country and return a dataframe
def clean_iso_country(spark, input_data): try: #read file df_iso_country = spark.read.option("header","true").csv(input_data+'wikipedia-iso-country-codes.csv') df = (df_iso_country.withColumnRenamed('English short name lower case','country_name') \ .withColumnRenamed('Alpha_2', 'country_iso2') \ .withColumnRenamed('Alpha_3', 'country_iso3') \ .withColumnRenamed('Num_code','country_num')) df_clean_iso_country = df_iso_country.drop("ISO_3166-2") \ .select(F.col("Country").alias("country_name"), \ F.col("Alpha_2").alias("country_iso2"), \ F.col("Alpha_3").alias("country_iso3"), \ F.col("Num_code").alias("country_num") \ .cast("int")) \ .dropDuplicates() print('***** Make df_clean_iso_country processing ') df_clean_iso_country.printSchema() #df_clean_iso_country.show(2) except Exception as e: print("Unexpected error: %s" % e) else: return(df_clean_iso_country)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clean_data(df, start = 1995, stop = 2018):\n country_iso3_code = pd.read_html('https://unstats.un.org/unsd/methodology/m49/')\n country_iso3_code = country_iso3_code[0]['ISO-alpha3 code']\n df = df.loc[df.country_iso3_code.isin(country_iso3_code)]\n df = df.set_index(['indicator', 'country_iso3_code', 'country', 'year']).unstack(level = 0)\n df.columns = df.columns.get_level_values(1)\n df = df.rename(columns = {'NY.GDP.PCAP.KD.ZG': 'pc_GDP_growth',\n 'NY.GDP.PCAP.PP.CD': 'pc_GDP_PPP'})\n df = df.reset_index()\n df = df.loc[(df.year >= (start - 1)) & (df.year <= stop)]\n df = df.dropna()\n return df", "def clean(df):", "def get_China_exhubei(df) -> pandas.core.frame.DataFrame:\n return df[(df['countryCode']=='CN') & (df['province']!='Hubei Province') & ~(df['province'].isnull()) \\\n & ~(df['city'].isnull())]", "def clean_data(df):\n # Clean Country and Region column.\n df['Country'] = df['Country'].str.strip()\n df['Region'] = df['Region'].str.strip()\n\n # Clean Pop. Density column.\n df['Pop. Density (per sq. mi.)'] = df['Pop. Density (per sq. mi.)'].str.replace(',', '.')\n df['Pop. Density (per sq. mi.)'] = pd.to_numeric(df['Pop. Density (per sq. mi.)'], errors='coerce')\n\n # Clean Infant mortality column.\n df['Infant mortality (per 1000 births)'] = df['Infant mortality (per 1000 births)'].str.replace(',', '.')\n df['Infant mortality (per 1000 births)'] = pd.to_numeric(df['Infant mortality (per 1000 births)'], errors='coerce')\n\n # Clean GDP column.\n df['GDP ($ per capita) dollars'] = df['GDP ($ per capita) dollars'].str.strip(' dollars')\n df['GDP ($ per capita) dollars'] = pd.to_numeric(df['GDP ($ per capita) dollars'], errors='coerce')\n\n return df", "def clean_and_save_country(country_name, df):\n drop_columns = ['Lat', \n 'Long', \n 'Province/State']\n\n df.drop(columns=drop_columns, inplace = True)\n df_group = df.groupby(['Country/Region'])\n\n country = df_group.get_group(country_name)\n country.drop(columns = ['Country/Region'], inplace=True)\n country = country.agg(['sum'])\n country = country.T\n country.reset_index(level=0, inplace=True)\n country['index'] = pd.to_datetime(country['index'])\n country.rename(columns={'index': 'date'}, inplace=True)\n\n country.to_csv('../data/' + country_name + '_timeseries.csv', index=False)", "def clean_dataframe(self, df_in , what = ''): \n \n if what == 'era5fb': # cleaning the era5 feedback only \n df = df_in[np.isfinite(df_in['obsvalue@body'])]\n try: \n df = df.loc[ df['vertco_type@body'] != 2 ] \n except:\n pass\n df = df.reindex()\n df = df[np.isfinite(df['vertco_reference_1@body'])]\n #print('check lengths: ' , len(df_in) , len(df) )\n new_ind = np.array ( range(len(df))) \n df['index'] =new_ind\n df = df.set_index('index')\n \n else: \n ### check if can be optimized ???\n df = df_in.loc[ df_in['z_coordinate_type'] != 2 ] # case where the levels are given in terms of geopotential only (pressure not available)\n \n df = df.loc[ (df['observation_value'] != -99999.0) \n & (df['observation_value'] != -999.0) \n & (df['observation_value'] != -9999) \n & (df['observation_value'] != -9999.0) \n & (df['observation_value'] != -999.9) \n & (df['observation_value'] != -8888 )\n & (df['observation_value'] != -8888.0 )\n \n #& (df['z_coordinate_type'] != 2) \n & (df['z_coordinate'] != -99999.0) \n & (df['z_coordinate'] != -9999.0 )\n & (df['z_coordinate'] != 999 )\n & (df['z_coordinate'] != 999.0 )\n \n \n ] #cleaning the values \n #clean = clean.loc[ (clean['z_coordinate_type'] != 2)] #cleaning the values\n #clean = clean.loc[ (clean['z_coordinate'] != -99999.0 )] #cleaning the values\n \n df = df[np.isfinite(df['observation_value'])] # excluding nan values \n df = df[np.isfinite(df['z_coordinate'])]\n \n return df", "def _clean_dataset(df: pd.DataFrame) -> pd.DataFrame:\n df = df.loc[:, ~df.columns.str.contains(\"^Unnamed\")]\n df = df.dropna()\n return df", "def country_code_update(df):\n from pycountry import countries as ct\n new_df = country_grouping(df)\n # country names in the data set that are not fit ISO standard\n completion = pd.DataFrame(np.array([['Bolivia', 'BO'],\n ['Brunei', 'BN'],\n ['Congo (Brazzaville)', 'CG'],\n ['Congo (Kinshasa)', 'CD'],\n ['Cote d\\'Ivoire', 'CI'],\n ['Holy See', 'VA'],\n ['Iran', 'IR'],\n ['Korea, South', 'KR'],\n ['Moldova', 'MD'],\n ['Russia', 'RU'],\n ['Taiwan*', 'TW'],\n ['Tanzania', 'TZ'],\n ['US', 'US'],\n ['Venezuela', 'VE'],\n ['Vietnam', 'VN'],\n ['Syria', 'SY'],\n ['Laos', 'LA'],\n ['West Bank and Gaza', 'PS'],\n ['Kosovo', 'XK'],\n ['Burma', 'MM']\n ]),\n columns=['c_name', 'c_code']\n )\n country_code_list = []\n for country_name in new_df['Country/Region']:\n try:\n if country_name in completion['c_name'].tolist():\n # print('exception covered: ', country_name)\n country_code = completion['c_code'].loc[completion['c_name'] == country_name].item()\n # identifies the cruise ships in the data set considered as a 'country'\n elif country_name == 'Diamond Princess' or country_name == 'MS Zaandam':\n country_code = 'Cruise Ship'\n else:\n country_code = ct.get(name=country_name).alpha_2\n except KeyError:\n print('no result: ', country_name)\n country_code = 'None'\n pass\n country_code_list.append(country_code)\n # print(country_code_list)\n new_df.insert(0, \"country_code\", country_code_list, True)\n new_df = new_df.drop(columns='Country/Region')\n unknown_index = new_df[new_df['country_code'] == 'Cruise Ship'].index\n new_df.drop(unknown_index, inplace=True) # drop when country_code = 'None', most likely are Cruise ships\n # new_df.set_index(new_df['country_code'])\n return new_df", "def _feature_country_process(self):\n if 'Country' not in self._df_invoice_line.columns:\n return\n\n list_countries_keep = ['United Kingdom']\n rows_before = self._df_invoice_line.shape[0]\n \n df_invoice_line_new = pd.DataFrame()\n for country in list_countries_keep : \n df_invoice_line_new = df_invoice_line_new.append(\\\n self._df_invoice_line[self._df_invoice_line['Country']==country]\\\n , ignore_index=True)\n\n self.df_invoice_line = df_invoice_line_new\n del(df_invoice_line_new)\n \n rows_after = self._df_invoice_line.shape[0] \n _print_stat_rows(\"Countries filtering : \",rows_before, rows_after)\n\n \n #-------------------------------------------------------------------------\n # Due to the fact only one country is used, then this feature is dropped\n #-------------------------------------------------------------------------\n list_col_to_keep = [col for col in self._df_invoice_line.columns \\\n if col not in 'Country']\n \n self._df_invoice_line = self._df_invoice_line[list_col_to_keep] \n\n return", "def clean_countries(event_db):\n event_db[\"country_edb\"] = event_db[\"country_edb\"].apply(_clean_country_str)\n event_db = my_utils.split_strings_at_comma_and_distribute_to_new_rows(event_db, 'country_edb')\n return event_db", "def get_countries_geo_df() -> geopandas.GeoDataFrame:\n\n geo_df: geopandas.GeoDataFrame = geopandas.read_file(\n GEO_DATA_DIR / \"ne_110m_admin_0_map_units\" / \"ne_110m_admin_0_map_units.shp\"\n )\n\n geo_df = geo_df.rename(columns={\"ADMIN\": CODE}, errors=\"raise\")\n\n # Keys are what's in the geo df, values are what we want to rename them to\n # Values must match the names in the original data source. If you don't like those\n # names, change them there and then come back and change the values here.\n geo_df[CODE] = (\n geo_df[CODE]\n .map(\n {\n \"Central African Republic\": \"Central African Rep.\",\n \"Democratic Republic of the Congo\": \"Dem. Rep. Congo\",\n \"Equatorial Guinea\": \"Eq. Guinea\",\n \"eSwatini\": \"Eswatini\",\n \"Georgia (Country)\": \"Georgia\",\n \"Republic of Serbia\": \"Serbia\",\n \"United Arab Emirates\": \"UAE\",\n \"United Kingdom\": \"Britain\",\n \"United Republic of Tanzania\": \"Tanzania\",\n \"Western Sahara\": \"W. Sahara\",\n \"United States of America\": \"United States\",\n }\n )\n .fillna(geo_df[CODE])\n )\n geo_df = geo_df[geo_df[CODE] != \"Antarctica\"]\n\n colonial_power_main_countries = {\n \"Britain\": \"England\",\n \"France\": \"France, Metropolitan\",\n \"Norway\": \"Norway\",\n \"Papua New Guinea\": \"Papua New Guinea\",\n }\n\n is_main_country_idx = geo_df[CODE].map(colonial_power_main_countries).isna() | (\n geo_df[\"NAME_SORT\"] == geo_df[CODE].map(colonial_power_main_countries)\n )\n\n geo_df[CODE] = geo_df[CODE].where(\n is_main_country_idx, geo_df[CODE].str.cat(geo_df[\"NAME_SORT\"], sep=\" - \"),\n )\n geo_df[\"name\"] = geo_df[CODE]\n\n geo_df = geo_df[\n [\n \"featurecla\",\n \"scalerank\",\n \"LABELRANK\",\n # \"SOVEREIGNT\",\n # \"SOV_A3\",\n # \"ADM0_DIF\",\n \"LEVEL\",\n # \"TYPE\",\n CODE,\n \"name\",\n # \"ADM0_A3\",\n # \"GEOU_DIF\",\n # \"GEOUNIT\",\n # \"GU_A3\",\n # \"SU_DIF\",\n # \"SUBUNIT\",\n # \"SU_A3\",\n # \"BRK_DIFF\",\n # \"NAME\",\n # \"NAME_LONG\",\n # \"BRK_A3\",\n # \"BRK_NAME\",\n # \"BRK_GROUP\",\n \"ABBREV\",\n # \"POSTAL\",\n # \"FORMAL_EN\",\n # \"FORMAL_FR\",\n # \"NAME_CIAWF\",\n # \"NOTE_ADM0\",\n # \"NOTE_BRK\",\n \"NAME_SORT\",\n # \"NAME_ALT\",\n # \"MAPCOLOR7\",\n # \"MAPCOLOR8\",\n # \"MAPCOLOR9\",\n # \"MAPCOLOR13\",\n # \"POP_EST\",\n # \"POP_RANK\",\n # \"GDP_MD_EST\",\n # \"POP_YEAR\",\n # \"LASTCENSUS\",\n # \"GDP_YEAR\",\n \"ECONOMY\",\n \"INCOME_GRP\",\n # \"WIKIPEDIA\",\n # \"FIPS_10_\",\n # \"ISO_A2\",\n # \"ISO_A3\",\n # \"ISO_A3_EH\",\n # \"ISO_N3\",\n # \"UN_A3\",\n # \"WB_A2\",\n # \"WB_A3\",\n # \"WOE_ID\",\n # \"WOE_ID_EH\",\n # \"WOE_NOTE\",\n # \"ADM0_A3_IS\",\n # \"ADM0_A3_US\",\n # \"ADM0_A3_UN\",\n # \"ADM0_A3_WB\",\n \"CONTINENT\",\n \"REGION_UN\",\n \"SUBREGION\",\n \"REGION_WB\",\n # \"NAME_LEN\",\n # \"LONG_LEN\",\n # \"ABBREV_LEN\",\n # \"TINY\",\n # \"HOMEPART\",\n # \"MIN_ZOOM\",\n # \"MIN_LABEL\",\n # \"MAX_LABEL\",\n # \"NE_ID\",\n # \"WIKIDATAID\",\n # \"NAME_AR\",\n # \"NAME_BN\",\n # \"NAME_DE\",\n # \"NAME_EN\",\n # \"NAME_ES\",\n # \"NAME_FR\",\n # \"NAME_EL\",\n # \"NAME_HI\",\n # \"NAME_HU\",\n # \"NAME_ID\",\n # \"NAME_IT\",\n # \"NAME_JA\",\n # \"NAME_KO\",\n # \"NAME_NL\",\n # \"NAME_PL\",\n # \"NAME_PT\",\n # \"NAME_RU\",\n # \"NAME_SV\",\n # \"NAME_TR\",\n # \"NAME_VI\",\n # \"NAME_ZH\",\n \"geometry\",\n ]\n ]\n\n return geo_df", "def _flip_wdi(df: pd.DataFrame) -> pd.DataFrame:\n\n log.info(\"Flipping WDI\")\n\n df = df.rename(columns=lambda x: x.replace(\" \", \"\"))\n df = df.rename(columns=lambda x: x.lower())\n\n # Headache-magic, tbh I don't remember how it works.\n df = df.drop([\"countryname\", \"indicatorname\"], axis=1)\n df = df.set_index([\"countrycode\", \"indicatorcode\"])\n df.columns.name = \"year\"\n df = df.stack().unstack(\"indicatorcode\")\n df = df.reset_index()\n df[\"year\"] = df[\"year\"].astype(\"int32\")\n df = df.set_index([\"year\", \"countrycode\"]).sort_index()\n\n df = df.rename(columns=lambda x: x.replace(\".\", \"_\"))\n df = df.rename(columns=lambda x: x.lower())\n\n log.info(\"Done flipping WDI\")\n\n return df", "def clean(dataframe):\n # replace 'unknown' in Pop. density with np.nan\n dataframe = dataframe.replace('unknown', np.nan)\n\n # remove spaces from column names and content\n dataframe.columns = dataframe.columns.str.strip()\n\n # change YYYYMMDD to days of the year\n date_debug = []\n for i in range(1, 366):\n date_debug.append(i)\n\n dataframe2 = {'YYYYMMDD': date_debug}\n dataframe['YYYYMMDD'] = dataframe2['YYYYMMDD']\n\n return dataframe", "def usa_geo_filter(\n df: pd.DataFrame, state_col: str, country_col: str = None, usa_val: str = None\n) -> pd.DataFrame:\n # Read in names columns = [state, state_initial]\n us_state_abbrev = pd.read_csv(\n get_project_root() / \"data/external/other/state_names.csv\"\n )\n\n # initial passed\n if df[state_col].str.len().mode()[0] == 2:\n state_col_new = \"state_initial\"\n else:\n state_col_new = \"state\"\n\n # Naming Convention\n df.rename(columns={state_col: state_col_new}, inplace=True)\n # Filter USA\n if country_col is not None:\n df = df[df[country_col] == usa_val]\n\n # Filter State\n df = df[df[state_col_new].isin(us_state_abbrev[state_col_new])]\n # Add other col\n df = df.merge(us_state_abbrev, how=\"left\", on=state_col_new)\n\n # move state and state_initial cols to leftmost column indices\n cols = list(df)\n for col in [\"state\", \"state_initial\"]:\n cols.insert(0, cols.pop(cols.index(col)))\n df = df.reindex(columns=cols)\n\n # Validate Expected Output\n validate_usa_geo_filter(df, us_state_abbrev, state_col_new, country_col, usa_val)\n\n return df", "def clean_location(df):\n \n local = df['location'].astype(str)\n \n #geocoders read X St at Y St better than X & Y or X/Y\n local = local.str.replace(\"&\", \"at\")\n local = local.str.replace(\"/\", \"at\")\n \n #OpenAddress dataset has addresses in title case\n local = local.str.title()\n\n return df.assign(location=local.values)", "def clean_data():\n datapath = Path(os.getcwd()) / \"data\"\n files = [str(file) for file in datapath.glob(\"*.csv\")]\n for file in files:\n if file.endswith(\"confirmed.csv\"):\n Confirmed = pd.read_csv(file)\n elif file.endswith(\"deaths.csv\"):\n Deaths = pd.read_csv(file)\n elif file.endswith(\"recovered.csv\"):\n Recovered = pd.read_csv(file)\n\n dataFrames = [Confirmed, Deaths, Recovered]\n countryList = list(dataFrames[0][\"Country/Region\"]) #list of valid countries\n countryList = list(dict.fromkeys(countryList))\n\n #create country population dictionary and align values with those in countryList\n countriesPop = {}\n countriesPop[\"US\"] = CountryInfo(\"USA\").population()\n countriesPop[\"Czechia\"] = CountryInfo(\"Czech Republic\").population()\n countriesPop[\"Taiwan*\"] = CountryInfo(\"Taiwan\").population()\n countriesPop[\"Korea, South\"] = CountryInfo(\"South Korea\").population()\n countriesPop[\"Eswatini\"] = CountryInfo(\"Swaziland\").population()\n countriesPop[\"Cote d'Ivoire\"] = CountryInfo(\"Ivory Coast\").population()\n\n for country in countryList:\n try:\n countriesPop[country] = CountryInfo(country).population()\n except KeyError:\n pass\n\n #remove unnecessary information from dataframes\n for count in range(len(dataFrames)):\n dataFrames[count] = dataFrames[count].drop(\"Province/State\",axis=1)\n dataFrames[count] = dataFrames[count].drop(\"Lat\",axis=1)\n dataFrames[count] = dataFrames[count].drop(\"Long\",axis=1)\n dataFrames[count] = dataFrames[count].rename(columns={\"Country/Region\": \"Country\"})\n dataFrames[count][\"Country\"] = dataFrames[count][\"Country\"].replace({\"Korea, South\": \"South Korea\"})\n dataFrames[count] = dataFrames[count].groupby(\"Country\").sum()\n\n # create per 100k capita values by dividing country data by population\n ConfirmedPC = dataFrames[0].copy()\n DeathsPC = dataFrames[1].copy()\n RecoveredPC = dataFrames[2].copy()\n countryList.append(\"South Korea\")\n\n for country in countryList:\n try:\n ConfirmedPC.loc[country] = ConfirmedPC.loc[country].divide(countriesPop[country]).multiply(100000) #confirmed cases per 100k inhabitants\n DeathsPC.loc[country] = DeathsPC.loc[country].divide(countriesPop[country]).multiply(100000) #deaths per 100k inhabitants\n RecoveredPC.loc[country] = RecoveredPC.loc[country].divide(countriesPop[country]).multiply(100000) #recovered cases per 100k inhabitants\n except KeyError:\n pass\n\n dataFrames.extend([ConfirmedPC, DeathsPC, RecoveredPC])\n\n return dataFrames, countryList", "def clean_data(df):\n \n any_location_id_missing = (df.PULocationID > 263) | (df.DOLocationID > 263)\n df = df.drop(df.index[any_location_id_missing])\n \n df = df[df.tpep_dropoff_datetime > df.tpep_pickup_datetime]\n\n df.PULocationID.replace([104, 105], 103)\n \n return df", "def clean_weather_df(weather_df):\n col = weather_df.columns\n drop_col = list(col[7::2])\n clean_num = weather_df[weather_df['LATITUDE'].str.contains(\n \"LATITUDE\") == False]\n num_weather = clean_num.drop(drop_col, axis=1)\n just_num = num_weather.drop(['NAME', 'STATION'], axis=1)\n all_weatherdf = just_num.apply(pd.to_numeric)\n all_weatherdf['name'] = num_weather['NAME']\n return all_weatherdf", "def clean_store_csv(df: pd.DataFrame) -> pd.DataFrame:\n cols = map(convert_to_snake_case, df.columns)\n df.columns = cols\n\n for col in ['promo2_since_week', 'promo2_since_year',\n 'competition_distance', 'competition_open_since_month',\n 'competition_open_since_year']:\n df[col] = df[col].fillna(df[col].mean())\n\n df['promo_interval'] = df.promo_interval.fillna('None')\n return df", "def filter(df: pd.DataFrame = pd.DataFrame()):\n if df.empty:\n df = read()\n print('Filtering data...')\n df = df.dropna()\n df2 = pd.DataFrame()\n df2['Longitude'] = df['Longitude']\n df2['Latitude'] = df['Latitude']\n df2['Month'] = df['Date'].dt.strftime('%m').astype(int)\n df2['Day'] = df['Date'].dt.strftime('%d').astype(int)\n df2['Day_of_Week'] = df['Day_of_Week']\n df2['Time'] = np.array([t.timestamp() for t in df['Time']]) - df['Time'].min().timestamp()\n df2['Weather_Conditions'] = df['Weather_Conditions']\n return pd.get_dummies(df2)", "def cleandata(filename, keepcolumns=['Country Name', '1990', '2015'], value_variables=['1990', '2015']):\n df = pd.read_csv(filename, skiprows=4)\n\n # Keep only the columns of interest (years and country name)\n df = df[keepcolumns]\n\n top10country = ['United States', 'China', 'Japan', 'Germany', 'United Kingdom', 'India', 'France', 'Brazil',\n 'Italy', 'Canada']\n df = df[df['Country Name'].isin(top10country)]\n\n # melt year columns and convert year to date time\n df_melt = df.melt(id_vars='Country Name', value_vars=value_variables)\n df_melt.columns = ['country', 'year', 'variable']\n df_melt['year'] = df_melt['year'].astype('datetime64[ns]').dt.year\n\n # output clean csv file\n return df_melt", "def data_cleaning(df):\n df = df.dropna(how='all')\n \n return df", "def clean_and_save_worldwide(df):\n drop_columns = ['FIPS',\n 'Lat', \n 'Long_', \n 'Combined_Key', \n 'Admin2', \n 'Province_State']\n\n df.drop(columns=drop_columns, inplace=True)\n\n df_cases = df.groupby(['Country_Region'], as_index=False).sum()\n df_cases.to_csv('../data/Total_cases_worldwide.csv', index=False)", "def preprocess_dataframe(self, dataframe):\n return dataframe", "def country_codes():\n\n iso_sel = [\n Freedom_short.iso_code,\n Freedom_short.year,\n Freedom_short.country,\n Freedom_short.region,\n Freedom_short.hf_score,\n Freedom_short.hf_rank,\n Freedom_short.hf_quartile,\n ]\n\n # Use Pandas to perform the sql query\n #Grab 2017 Data Only for Dropdown\n codes_stmt = db.session.query(*iso_sel).filter(Freedom_short.year == 2017).order_by(Freedom_short.iso_code).statement\n codes_df = pd.read_sql_query(codes_stmt, db.session.bind)\n\n # Return a list of the column names (sample names)\n return jsonify(list(codes_df[\"iso_code\"]))", "def remove_rows_with_non_english_movies(df):\n df = df[df['original_language'] == 'en']\n return df", "def clean_up_raw(df_raw):\n # exclude_subset = ['well', 'tile', 'cell', 'intensity', 'blob'] # causes issues with later joins, maybe a pandas bug\n import lasagna.utils\n df_raw[CYCLE] = df_raw[CYCLE].astype(int)\n df_raw = df_raw.sort_values([WELL, TILE, CELL, BLOB, CYCLE, CHANNEL])\n return df_raw", "def clean_df(df):\n # TODO: get birthdays for people missing birthdays\n df = df[~df.birthday.isnull()]\n df[\"birthday\"] = pd.to_datetime(df[\"birthday\"])\n return df", "def clean_data(df):\n\n # Extract the malware name\n df['malware'] = df['malware'].str.extract('Master Indicator Feed for ([a-z]+) non-sinkholed domains',\n expand=True).fillna(0)\n\n # Parse the IPs of the ISPs\n split_fn = lambda x: pd.Series([i for i in x.split('|')])\n domain_reg_df = df['domain_registrar_ip'].apply(split_fn)\n column_names = list(domain_reg_df.columns)\n domain_reg_df.columns = ['domain_registrar_ip_' + str(column_names[x]) for x in range(len(column_names))]\n\n final_osint_df = df.join(domain_reg_df)\n return final_osint_df", "def clean_up(df: pd.DataFrame) -> pd.DataFrame:\n\n # Hereafter df is sorted by date, which is helpful as it allows using .iloc[-1]\n # to get current (or most recent known) situation per location\n # (Otherwise we'd have to groupby agg -> min date, and then filter)\n df = df.sort_values(\n [Columns.LOCATION_NAME, Columns.DATE, Columns.CASE_TYPE], ascending=True\n )\n\n return df" ]
[ "0.7635082", "0.68735754", "0.6863959", "0.6713602", "0.65476346", "0.6416419", "0.62426144", "0.622032", "0.61710155", "0.61320084", "0.6127082", "0.6116876", "0.6109993", "0.6106656", "0.61057997", "0.60770345", "0.60273945", "0.595269", "0.5889864", "0.5830102", "0.5820315", "0.5815816", "0.58117", "0.58064044", "0.5788559", "0.5782848", "0.5777202", "0.575268", "0.5724369", "0.5722421" ]
0.73624325
1
clean dataframe df_demograph and return a dataframe
def clean_demograph(spark, input_data): try: #read file df_demograph = spark.read.option("header","true").option("recursiveFileLookup","true").parquet(input_data+'us-cities-demographics') drop_cols = ["Number_of_Veterans"] newdf = df_demograph.drop(*drop_cols) \ .select(F.col("city").alias("city_name"), \ F.col("state").alias("state_name"), \ F.col("median_age"), \ F.col("male_population"), \ F.col("female_population"), \ F.col("total_population"), \ F.col("foreign-born"), \ F.col("state_code").alias("state_id"), \ F.col("race").alias("ethnic"), \ F.col("count")) df_clean_demograph = newdf.groupBy("state_name", "state_id", "city_name", "median_age", "male_population", "female_population", "ethnic") \ .agg(F.avg("count").cast('int').alias("ethnic_count")) \ .orderBy("state_name", "city_name", "ethnic") \ .dropDuplicates() \ .fillna(-1, subset=['male_population','female_population']) print('***** Make df_clean_demograph processing ') df_clean_demograph.printSchema() #df_clean_demograph.show(2) except Exception as e: print("Unexpected error: %s" % e) else: return(df_clean_demograph)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clean(df):", "def clean_data():\n pd.set_option('display.max_columns', None)\n try:\n df = pd.read_csv('test1/movie.csv')\n except FileNotFoundError:\n df = pd.read_csv('movie.csv')\n\n df.drop(labels=[\"actor_3_facebook_likes\", \"actor_2_name\",\n \"actor_1_facebook_likes\", \"actor_1_name\",\n \"num_voted_users\",\n \"cast_total_facebook_likes\", \"actor_3_name\",\n \"facenumber_in_poster\", \"movie_imdb_link\",\n \"num_user_for_reviews\", \"actor_2_facebook_likes\",\n \"aspect_ratio\", \"color\", \"num_critic_for_reviews\",\n \"director_facebook_likes\"], axis=1, inplace=True)\n df.dropna(subset=[\"gross\"], axis=0, inplace=True)\n return df", "def data_cleaning(df):\n df = df.dropna(how='all')\n \n return df", "def _clean_dataset(df: pd.DataFrame) -> pd.DataFrame:\n df = df.loc[:, ~df.columns.str.contains(\"^Unnamed\")]\n df = df.dropna()\n return df", "def clean_data(df):\r\n \r\n # list of categories to use as column names \r\n categories_cols = [names.split('-')[0] for names in df['categories'][0].split(';')]\r\n \r\n # creating 36 individual category columns\r\n for i in range(len(categories_cols)):\r\n df[categories_cols[i]] = [int(row.split(';')[i].split('-')[1]) for row in df['categories']]\r\n \r\n # labels 0 and 2 in 'related' class are similar (refer to notebook)\r\n # change 2s into 0s to make it more simple\r\n df['related'] = df['related'].map({0:0,1:1,2:0})\r\n \r\n # drop 'categories' column\r\n df.drop('categories', axis=1, inplace=True)\r\n \r\n # drop duplicates\r\n df.drop_duplicates(inplace=True)\r\n \r\n return df", "def df_cleaner(df):\n df = df.dropna()\n return df", "def clean_data(df):\n # Copy dataframe to local dataframe\n df_clean = df\n # Split category into subcategories\n categories = df.categories.str.split(\";\", expand=True)\n # Label columns according to new label\n categories.columns = categories.iloc[0].str[:-2]\n # Make columns numeric, i.e. remove the label substring from the content\n for label, content in categories.iteritems():\n categories[label] = pd.to_numeric(content.str.replace(f\"{label}-\", \"\"))\n # Clean related category to 0/1 - there are outliers with 2s\n categories[\"related\"] = categories[\"related\"].map(lambda x: 1 if x == 2 else x)\n # Drop original category column\n df_clean = df_clean.drop(labels=\"categories\", axis=1)\n # Add categories to dataframe\n df_clean = df_clean.join(categories)\n\n return df_clean", "def clean_data(df):\n # expand categories as new data frame\n new_columns = [re.sub('[^a-zA-Z]', ' ', i).strip() for i in df['categories'][0].split(';')]\n cat_df = df['categories'].str.split(';', expand=True)\n cat_df.columns = new_columns\n \n # remove anything except numerical value\n # change new feature's type\n for column in cat_df:\n cat_df[column] = cat_df[column].apply(lambda x: re.sub('[^0-9]', '', x)).astype('str')\n \n # concatenate old dataframe and new features dataframe\n # remove olf categories column\n new_df = pd.concat([df, cat_df], axis=1)\n new_df = new_df.drop('categories', axis=1).drop_duplicates()\n binary_df = new_df[new_df['related']!='2']\n \n return binary_df", "def clean_data(df):\n\t# create a dataframe of the 36 individual category columns\n\tcategories = df['categories'].str.split(';', expand=True)\n\t# select the first row of the categories dataframe\n\trow = categories.loc[0]\n\t# extract a list of new column names for categories. Remove unnecessary chars.\n\tcategory_colnames = row.str.replace(r'-\\w','')\n\t# rename the columns of `categories`\n\tcategories.columns = category_colnames\n\t# Convert category values to just numbers 0 or 1.\n\tcategories = categories.applymap(lambda x: int(x.split('-')[1]))\n\t# drop the original categories column from `df`\n\tdf.drop(['categories'],axis=1, inplace=True)\n\t# concatenate the original dataframe with the new `categories` dataframe\n\tdf = pd.concat([df,categories],axis=1)\n\t# find duplicates\n\tdups = df.duplicated(subset=None, keep='first')\n\t# drop duplicates\n\tdf = df[~(dups)]\n\treturn df", "def fyber_display_dataframe_cleaner(dataframe):\r\n print(\"Cleaning Fyber Display...\")\r\n df = dataframe\r\n \r\n delete_list = ['contentCategories', 'contentId', 'contentName', 'publisherId', \r\n 'distributorName', 'ecpm', 'ctr', 'fillRate']\r\n \r\n for entry in delete_list:\r\n del df[entry]\r\n\r\n df['App'] = 'IMVU iOS'\r\n df['Partner'] = 'Fyber'\r\n df['Total_Code_Served'] = 0\r\n df['UnitType'] = 'banner'\r\n\r\n df = df.rename(columns={'adRequests':'Requests', 'applicationName':'AdUnit', \r\n \"clicks\":\"Clicks\", \"country\":\"Country\", 'date':'Day', \r\n \"revenue\":\"Revenue\", \"impressions\":\"Impressions\"})\r\n\r\n df = df[['Day', 'App', 'AdUnit', 'UnitType', 'Country', 'Total_Code_Served',\r\n 'Requests', 'Impressions', 'Clicks', 'Revenue', 'Partner']]\r\n \r\n df['Day'] = pd.to_datetime(df['Day'], unit='s')\r\n df['Day'] = df['Day'].apply(lambda x: x.date())\r\n \r\n return df", "def preprocess_dataframe(self, dataframe):\n return dataframe", "def fetch_training_df(df):\n\n gen_df = df.copy()\n gen_df.drop(['artist_name', 'title', 'release'], axis=1, inplace=True)\n return gen_df", "def clean(dataframe):\n # replace 'unknown' in Pop. density with np.nan\n dataframe = dataframe.replace('unknown', np.nan)\n\n # remove spaces from column names and content\n dataframe.columns = dataframe.columns.str.strip()\n\n # change YYYYMMDD to days of the year\n date_debug = []\n for i in range(1, 366):\n date_debug.append(i)\n\n dataframe2 = {'YYYYMMDD': date_debug}\n dataframe['YYYYMMDD'] = dataframe2['YYYYMMDD']\n\n return dataframe", "def df_cleaner(df):\n return df.dropna()", "def cleaning_Dataset(dataset):\n cols = dataset.select_dtypes([np.number]).columns\n diff = dataset[cols].diff().sum()\n\n dataset = dataset.drop([diff==0].index, axis=1)\n dataset = dataset.drop('adj close', 1)\n dataset = dataset.fillna(method='bfill')\n dataset = dataset[1:-1]\n return dataset", "def clean_data(df): \n # Now we first determen the column names\n columns = df['categories'].str.replace(\"-0\",\"\").str.replace(\"-1\",\"\").str.replace(\"-2\",\"\").drop_duplicates().str.split(';')[0]\n\n # And split the 'categories' column\n df[columns] = df['categories'].str.split(';',expand=True)\n\n # The columns are still an object/string. Now make it a number\n for column in columns :\n df[column]=df[column].str.replace(column+\"-\",\"\").astype(int)\n\n # We don't need the column 'categories' anymore, so delete it\n df.drop('categories',1,inplace=True)\n \n # There are some duplicate records, so drop them\n df.drop_duplicates(inplace=True)\n \n return df", "def mopub_dataframe_cleaner(dataframe):\r\n print(\"Cleaning Mopub data...\")\r\n df = dataframe\r\n df['App'] = df.App.map({\"IMVU iOS - #1 3D Avatar Social App\":'IMVU iOS', \r\n \"IMVU Android - #1 3D Avatar Social App\":\"IMVU Android\"})\r\n \r\n df['Total_Code_Served'] = df['Requests']\r\n df['Partner'] = 'MoPub'\r\n \r\n df = df.rename(columns={'App ID':\"App_ID\", \"AdUnit ID\":\"AdUnit_ID\", \r\n 'AdUnit Format':\"AdUnit_Format\"})\r\n\r\n df_pivot = df.pivot_table(index=['Day', 'App', 'AdUnit', 'AdUnit_Format', \r\n 'Country', 'Partner'], \r\n values=['Total_Code_Served', 'Requests', \r\n 'Impressions', 'Clicks', 'Revenue'], \r\n aggfunc='sum')\r\n\r\n df_pivot.to_csv(\"mopub-pivot.csv\")\r\n\r\n df = pd.read_csv(\"mopub-pivot.csv\")\r\n\r\n df = df.rename(columns={\r\n 'AdUnit_Format':'UnitType'})\r\n\r\n df['UnitType'] = df.UnitType.map({'Banner':'banner',\r\n 'Native':'native', 'Rewarded video': 'video'})\r\n\r\n df = df[['Day', 'App', 'AdUnit', 'UnitType', 'Country', 'Total_Code_Served',\r\n 'Requests', 'Impressions', 'Clicks', 'Revenue', 'Partner']]\r\n \r\n os.remove(\"mopub-pivot.csv\")\r\n\r\n return df", "def preprocess(df):\n drop_cols = ['duration_ms', 'key', 'mode', 'time_signature', 'popularity', 'tempo']\n drop_cols += ['track_id', 'track_name', 'artist_name']\n for col in drop_cols:\n if col in list(df.columns):\n df = df.drop(columns=col)\n return df", "def fetchAndCleanDataframe(self):\n\n df = pd.read_csv('/Users/apple4u/Desktop/goksel tez/results_with_scenarios.csv')\n df.insider_label.fillna(0, inplace=True) # replaces null fields with 0\n df = df.drop(columns=['employee_name', 'scenario', 'role'])\n df = df.rename(columns={'insider_label':'label'})\n #df['label'] = df['insider_label'].astype('int64')\n #df.drop(columns='insider_label', inplace=True)\n df.set_index('user_id', inplace=True)\n X = df.iloc[:, :5].values #fetch all records first 5 columns\n y = df.label.values\n print(df.head())\n return X, y", "def prepare_data(df):\n X = df.drop(\"y\",axis=1)\n\n y=df[\"y\"]\n\n return X, y", "def clean_data(df_name):\n\n wines = df_name\n wines = wines.rename(columns={'Vintage': 'Year'})\n wines['Location'] = wines['Appellation'].apply(lambda x: x['Region']['Name'])\n wines['Region'] = wines['Appellation'].apply(lambda x: x['Name'])\n wines['Type'] = wines['Varietal'].apply(lambda x: x['WineType']['Name'])\n wines['Grape'] = wines['Varietal'].apply(lambda x: x['Name'])\n wines['Reviews'] = wines['Community'].apply(lambda x: x['Reviews']['Url'])\n drop_columns = ['Appellation', 'Community', 'Description', 'GeoLocation', 'Labels', 'ProductAttributes','Ratings','Retail', 'Url', 'Varietal', 'Vineyard', 'Vintages']\n wines.drop(drop_columns, axis=1, inplace=True)\n wines = wines[['Id', 'Name', 'Year', 'Type', 'Grape', 'Location', 'Region', 'PriceRetail', 'PriceMin', 'PriceMax', 'Reviews']]\n wines['CurrentReviews'] = '' #wines['CurrentReviews'].apply(lambda x: [\"\"])\n wines['PriorReviews'] = '' #wines['PriorReviews'].apply(lambda x: [''])\n\n return wines", "def trimDf(df):\n cols = set(df.columns)\n\n cols.remove('exclamationCount') # bug in our feature extraction code\n cols.remove('price') # considered only free apps\n cols.remove('appName') # removing appNames\n\n # return df[list(cols)]\n\n\n\n return df[list(('revSent', 'appLabel'))]", "def clean_dataframe(self, df_in , what = ''): \n \n if what == 'era5fb': # cleaning the era5 feedback only \n df = df_in[np.isfinite(df_in['obsvalue@body'])]\n try: \n df = df.loc[ df['vertco_type@body'] != 2 ] \n except:\n pass\n df = df.reindex()\n df = df[np.isfinite(df['vertco_reference_1@body'])]\n #print('check lengths: ' , len(df_in) , len(df) )\n new_ind = np.array ( range(len(df))) \n df['index'] =new_ind\n df = df.set_index('index')\n \n else: \n ### check if can be optimized ???\n df = df_in.loc[ df_in['z_coordinate_type'] != 2 ] # case where the levels are given in terms of geopotential only (pressure not available)\n \n df = df.loc[ (df['observation_value'] != -99999.0) \n & (df['observation_value'] != -999.0) \n & (df['observation_value'] != -9999) \n & (df['observation_value'] != -9999.0) \n & (df['observation_value'] != -999.9) \n & (df['observation_value'] != -8888 )\n & (df['observation_value'] != -8888.0 )\n \n #& (df['z_coordinate_type'] != 2) \n & (df['z_coordinate'] != -99999.0) \n & (df['z_coordinate'] != -9999.0 )\n & (df['z_coordinate'] != 999 )\n & (df['z_coordinate'] != 999.0 )\n \n \n ] #cleaning the values \n #clean = clean.loc[ (clean['z_coordinate_type'] != 2)] #cleaning the values\n #clean = clean.loc[ (clean['z_coordinate'] != -99999.0 )] #cleaning the values\n \n df = df[np.isfinite(df['observation_value'])] # excluding nan values \n df = df[np.isfinite(df['z_coordinate'])]\n \n return df", "def fyber_video_dataframe_cleaner(dataframe):\r\n print(\"Cleaning Fyber Video...\")\r\n df = dataframe\r\n df = df.fillna(0)\r\n \r\n delete_list = ['application_id', 'completions', 'ecpm_eur', 'ecpm_usd', \r\n 'fills', 'revenue_eur', 'unique_impressions']\r\n \r\n for entry in delete_list:\r\n del df[entry]\r\n \r\n df['Ad Type'] = 'video'\r\n df['Partner'] = 'Fyber_Video'\r\n df['Total_Code_Served'] = 0\r\n df['Clicks'] = 0\r\n \r\n df = df.rename(columns={'date':'Day', 'application_name':'App',\r\n \"ad_format\":\"AdUnit\", \"Ad Type\":\"UnitType\", \r\n \"country\":\"Country\", \"requests\":\"Requests\", \r\n \"impressions\":\"Impressions\", \"clicks\":\"Clicks\", \r\n \"revenue_usd\":\"Revenue\"})\r\n \r\n df['App'] = df['App'].replace({\"IMVU iOS Primary Wall\":\"IMVU iOS\", \r\n \"IMVU iOS External Offer Wall\":\"IMVU iOS\", \r\n \"IMVU Google Play\":\"IMVU Android\"}) \r\n\r\n df['Impressions'] = df['Impressions'].apply(lambda x:int(x))\r\n df['Requests'] = df['Requests'].apply(lambda x:int(x))\r\n\r\n df = df[[\"Day\", \"App\", \"AdUnit\", \"UnitType\", \"Country\", \r\n \"Total_Code_Served\", \"Requests\", \"Impressions\", \r\n \"Clicks\", \"Revenue\", \"Partner\"]]\r\n\r\n drop_index_list = []\r\n for num in list(df.index):\r\n if df.loc[num, 'App'] == 'Blue Bar Bundle ' or df.loc[num, 'App'] == 'NEXT Featured Offers':\r\n drop_index_list.append(num)\r\n\r\n df = df.drop(drop_index_list, axis=0)\r\n\r\n return df", "def _filter_df(adjmat, df, verbose=3):\n remcols = df.columns[~np.isin(df.columns.values, adjmat.columns.values)].values\n if len(remcols)>0:\n if verbose>=3: print('[bnlearn] >Removing columns from dataframe to make consistent with DAG [%s]' %(remcols))\n df.drop(labels=remcols, axis=1, inplace=True)\n return df", "def _filter_df(adjmat, df, verbose=3):\n remcols = df.columns[~np.isin(df.columns.values, adjmat.columns.values)].values\n if len(remcols)>0:\n if verbose>=3: print('[bnlearn] >Removing columns from dataframe to make consistent with DAG [%s]' %(remcols))\n df.drop(labels=remcols, axis=1, inplace=True)\n return df", "def clean_up_raw(df_raw):\n # exclude_subset = ['well', 'tile', 'cell', 'intensity', 'blob'] # causes issues with later joins, maybe a pandas bug\n import lasagna.utils\n df_raw[CYCLE] = df_raw[CYCLE].astype(int)\n df_raw = df_raw.sort_values([WELL, TILE, CELL, BLOB, CYCLE, CHANNEL])\n return df_raw", "def __clean_df(self):\n self.__convert_min()", "def clean_data(dataframe):\n # split categories into seperate\n categories = dataframe.categories.str.split(';', expand=True)\n \n # select the first row&col of the categories dataframe\n row&col = categories.iloc[0]\n cate_col = row&col.apply(lambda x: x[:-2])\n cate.columns = cate_colnames\n \n #convert categories values to numeric instead of strings\n for column in categories:\n categories[column] = categories[column].str[-1]\n categories[column] = categories[column].astype(int)\n \n # replace categories column in dataframe \n dataframe.drop(columns = ['categories'], inplace=True)\n # concatenate the original dataframe with the new `categories` dataframe\n dataframe = dataframe.join(categories)\n \n #drop duplicates\n dataframe.drop_duplicates(inplace=True)\n \n return dataframe", "def normalize_data(self, df: pd.DataFrame, leak_id: int = None) -> pd.DataFrame:\n # replace NaN with None\n return df.where(pd.notnull(df), None)" ]
[ "0.7641442", "0.6639029", "0.6506264", "0.65000397", "0.64830804", "0.64705735", "0.6425508", "0.64208907", "0.6354982", "0.63121957", "0.6298637", "0.6252741", "0.6214534", "0.62129265", "0.6210216", "0.6193213", "0.61802834", "0.61434525", "0.6137183", "0.61009353", "0.6073495", "0.6061282", "0.60444933", "0.60252833", "0.6020017", "0.6020017", "0.6016792", "0.60130364", "0.6012015", "0.6011587" ]
0.69470125
1
Computes ``log(exp(x) + exp(y))`` in a numerically stable way.
def logaddexp(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor: return torch.max(x, y) + torch.log(1 + torch.exp(-torch.abs(y - x)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _log_add(logx: float, logy: float) -> float:\n a, b = min(logx, logy), max(logx, logy)\n if a == -np.inf: # adding 0\n return b\n # Use exp(a) + exp(b) = (exp(a - b) + 1) * exp(b)\n return math.log1p(math.exp(a - b)) + b # log1p(x) = log(x + 1)", "def log_sum_exp(x):\n # TF ordering\n axis = len(x.shape) - 1\n m = paddle.max(x, axis=axis)\n m2 = paddle.max(x, axis=axis, keepdim=True)\n return m + paddle.log(paddle.sum(paddle.exp(x - m2), axis=axis))", "def logaddexp(X, Y):\n XY_max = T.maximum(X, Y)\n XY_min = T.minimum(X, Y)\n return XY_max + T.log1p(T.exp(XY_min - XY_max))", "def logadd(logx, logy):\n\n if logy > logx:\n logx, logy = logy, logx\n\n if logx == -float(\"inf\"):\n return logx\n\n diff = logy - logx\n if diff < -53: # does not make a difference at least in python 2.7.6\n return logx\n\n return logx + log2(1.0 + 2**diff)", "def log_sum_exp(x):\n x_max = x.max()\n return torch.log(torch.sum(torch.exp(x - x_max), 1, keepdim=True)) + x_max", "def log_sum_exp(x):\n x_max = x.data.max()\n return torch.log(torch.sum(torch.exp(x - x_max), 1, keepdim=True)) + x_max", "def log_sum_exp(x):\n x_max = x.data.max()\n return torch.log(torch.sum(torch.exp(x - x_max), 1, keepdim=True)) + x_max", "def log_sum_exp(self, x):\n b = numpy.max(x[(x<sys.maxsize)]) # ignore inf values\n\n s = b + numpy.log(numpy.sum(numpy.exp(x-b)))\n\n return s", "def addlogs(a,b):\n \n if a>b:\n return a + np.log(1+np.exp(b-a))\n else:\n return b + np.log(1+np.exp(a-b))", "def log_sum_exp(x):\n log_reduce_sum = P.ReduceSum()\n log = P.Log()\n exp = P.Exp()\n x_max = max(x.data)\n return log(log_reduce_sum(exp(x - x_max), 1)) + x_max", "def log_add(x, y):\n maximum = np.maximum(x,y)\n minimum = np.minimum(x,y)\n if(np.abs(maximum - minimum) > 30):\n # the difference is too small, return the just the maximum\n return maximum\n return maximum + np.log1p(np.exp(minimum - maximum))", "def logits_or(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:\n f = -(x + y) / 2\n t = logaddexp(logaddexp((x - y) / 2, (y - x) / 2), -f)\n return t - f", "def _log_sum_exp(x):\n axis = len(x.get_shape())-1\n m = tf.reduce_max(x, axis)\n m2 = tf.reduce_max(x, axis, keep_dims=True)\n return m + tf.log(tf.reduce_sum(tf.exp(x-m2), axis))", "def ln(x):\n return log(x, const.e)", "def log_sum_exp(x):\n axis = len(x.get_shape()) - 1\n m = tf.reduce_max(x, axis)\n m2 = tf.reduce_max(x, axis, keep_dims=True)\n return m + tf.log(tf.reduce_sum(tf.exp(x - m2), axis))", "def log_sum_exp(x, axis=None):\n xmax = x.max(axis=axis, keepdims=True)\n xmax_ = x.max(axis=axis)\n return xmax_ + T.log(T.exp(x - xmax).sum(axis=axis))", "def log_sum_exp(x, dim=0):\n max_x = torch.max(x, dim)[0]\n new_x = x - max_x.unsqueeze(dim).expand_as(x)\n return max_x + (new_x.exp().sum(dim)).log()", "def __call__(self, y1: torch.Tensor, y2: torch.Tensor) -> torch.Tensor:\n return torch.log(1 + torch.exp(-y1 * y2))", "def log_mean_exp(y):\n # init\n batch_size = y.size(0)\n sample_size = y.size(1)\n\n # log_mean_exp\n max_sample = torch.max(y, dim=1)[0]\n reshaped_max_sample = max_sample.view(batch_size, -1)\n log_mean_exp = torch.log(torch.mean(torch.exp(y - reshaped_max_sample), dim=1)) + max_sample\n return log_mean_exp", "def log2(x):\n raise NotImplementedError", "def log2(x):\n return math.log(x) / math.log(2)", "def logits_and(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:\n t = (x + y) / 2\n f = logaddexp(logaddexp((x - y) / 2, (y - x) / 2), -t)\n return t - f", "def logistic_loss(x, y):\n N = x.shape[0]\n x = np.squeeze(x)\n y_prime = (y + 1)/2\n h = 1 /(1 + np.exp(-x))\n loss = np.sum(-np.log( (h**y_prime) * ((1-h)**(1-y_prime)) ))/N\n dx = np.exp(-y*x)*(-y)/(1+np.exp(-y*x))/N\n return loss, dx", "def logSumExp(ns):\n mx = np.max(ns)\n ds = ns - mx\n sumOfExp = np.exp(ds).sum()\n return mx + np.log(sumOfExp)", "def logistic_loss(x, y):\n N = x.shape[0]\n x_flat = np.squeeze(x)\n ex = np.exp(x_flat)\n loss = np.sum(-y*x_flat+np.log(1+ex))/N\n dx = (-y+ex/(1+ex))/N\n # dx = np.reshape(dx,(len(dx),1))\n return loss, dx", "def logarithmic():\n return Equivalency(\n [(dimensionless_unscaled, function_units.dex, np.log10, lambda x: 10.0**x)],\n \"logarithmic\",\n )", "def logsum_pair(logx, logy):\n if logx == logzero():\n return logy\n elif logx > logy:\n return logx + np.log1p(np.exp(logy-logx))\n else:\n return logy + np.log1p(np.exp(logx-logy))", "def log_sum_exp(v):\n\tm = max(v)\n\tx = m * np.ones(np.size(v))\n\treturn m + np.log(sum(np.exp(v - x)))", "def logistic(x):\n try:\n denom = (1 + math.e ** -x)\n except OverflowError:\n return 0.0\n return 1.0 / denom", "def _logsumexp(x):\n # Search maximum.\n max_x = None\n length = len(x)\n for i in range(length):\n if max_x is None or x[i] > max_x:\n max_x = x[i]\n\n # Calculate sum of exponential differences.\n sum_exp = 0\n for i in range(length):\n diff = x[i] - max_x\n sum_exp += np.exp(diff)\n\n log_sum_exp = max_x + np.log(sum_exp)\n\n return log_sum_exp" ]
[ "0.7809119", "0.7479544", "0.74780834", "0.744767", "0.74172765", "0.73048776", "0.73048776", "0.7300466", "0.7290193", "0.72830254", "0.72562283", "0.71559936", "0.7111937", "0.7094438", "0.7093719", "0.7079512", "0.70648754", "0.7059741", "0.705626", "0.7040542", "0.702694", "0.6931855", "0.6913358", "0.6901445", "0.6886081", "0.6876139", "0.68657094", "0.6859496", "0.68590873", "0.6858728" ]
0.7994726
0
Computes ``tensor.exp().sum(dim, keepdim).log()`` in a numerically stable way.
def logsumexp(tensor: torch.Tensor, dim: Optional[int] = None, keepdim: bool = False) -> torch.Tensor: if dim is None: tensor = tensor.reshape(-1) dim = -1 inputs_max = tensor.max(dim=dim, keepdim=True)[0] tensor = tensor - inputs_max if not keepdim: inputs_max = inputs_max.squeeze(dim) out = _safe_log(tensor.exp().sum(dim=dim, keepdim=keepdim)) + inputs_max return out
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def log_sum_exp(value, dim=None, keepdim=False):\n if dim is not None:\n m, _ = torch.max(value, dim=dim, keepdim=True)\n value0 = value - m\n if keepdim is False:\n m = m.squeeze(dim)\n return m + torch.log(torch.sum(torch.exp(value0), dim=dim, keepdim=keepdim))\n else:\n m = torch.max(value)\n sum_exp = torch.sum(torch.exp(value - m))\n return m + torch.log(sum_exp)", "def log_sum_exp(tensor, dim=-1, sum_op=torch.sum):\n max, _ = torch.max(tensor, dim=dim, keepdim=True)\n return torch.log(sum_op(torch.exp(tensor - max), dim=dim, keepdim=True) + 1e-8) + max", "def log_sum_exp(value, dim=None, keepdim=False):\n # TODO: torch.max(value, dim=None) threw an error at time of writing\n if dim is not None:\n m, _ = torch.max(value, dim=dim, keepdim=True)\n value0 = value - m\n if keepdim is False:\n m = m.squeeze(dim)\n return m + torch.log(torch.sum(torch.exp(value0),\n dim=dim, keepdim=keepdim))\n else:\n m = torch.max(value)\n sum_exp = torch.sum(torch.exp(value - m))\n if isinstance(sum_exp, Number):\n return m + math.log(sum_exp)\n else:\n return m + torch.log(sum_exp)", "def log_sum_exp(value, dim=None, keepdim=False):\n # TODO: torch.max(value, dim=None) threw an error at time of writing\n if dim is not None:\n m, _ = torch.max(value, dim=dim, keepdim=True)\n value0 = value - m\n if keepdim is False:\n m = m.squeeze(dim)\n return m + torch.log(torch.sum(torch.exp(value0),\n dim=dim, keepdim=keepdim))\n else:\n m = torch.max(value)\n sum_exp = torch.sum(torch.exp(value - m))\n if isinstance(sum_exp, Number):\n return m + math.log(sum_exp)\n else:\n return m + torch.log(sum_exp)", "def log_sum_exp(x, dim=0):\n max_x = torch.max(x, dim)[0]\n new_x = x - max_x.unsqueeze(dim).expand_as(x)\n return max_x + (new_x.exp().sum(dim)).log()", "def logsumexp(value, dim=None, keepdim=False):\n if dim is not None:\n m, _ = torch.max(value, dim=dim, keepdim=True)\n value0 = value - m\n if keepdim is False:\n m = m.squeeze(dim)\n return m + torch.log(torch.sum(torch.exp(value0),\n dim=dim, keepdim=keepdim))\n else:\n m = torch.max(value)\n sum_exp = torch.sum(torch.exp(value - m))\n if isinstance(sum_exp, Number):\n return m + math.log(sum_exp)\n else:\n return m + torch.log(sum_exp)", "def logsumexp(value, dim=None, keepdim=False):\n if dim is not None:\n m, _ = torch.max(value, dim=dim, keepdim=True)\n value0 = value - m\n if keepdim is False:\n m = m.squeeze(dim)\n return m + torch.log(torch.sum(torch.exp(value0),\n dim=dim, keepdim=keepdim))\n else:\n m = torch.max(value)\n sum_exp = torch.sum(torch.exp(value - m))\n if isinstance(sum_exp, Number):\n return m + math.log(sum_exp)\n else:\n return m + torch.log(sum_exp)", "def _log_sum_exp(x):\n axis = len(x.get_shape())-1\n m = tf.reduce_max(x, axis)\n m2 = tf.reduce_max(x, axis, keep_dims=True)\n return m + tf.log(tf.reduce_sum(tf.exp(x-m2), axis))", "def log_sum_exp(x):\n # TF ordering\n axis = len(x.shape) - 1\n m = paddle.max(x, axis=axis)\n m2 = paddle.max(x, axis=axis, keepdim=True)\n return m + paddle.log(paddle.sum(paddle.exp(x - m2), axis=axis))", "def log_sum_exp(x):\n axis = len(x.get_shape()) - 1\n m = tf.reduce_max(x, axis)\n m2 = tf.reduce_max(x, axis, keep_dims=True)\n return m + tf.log(tf.reduce_sum(tf.exp(x - m2), axis))", "def log_mean_exp(x, dim):\n return log_sum_exp(x, dim) - np.log(x.size(dim))", "def log_sum_exp(x, axis=None):\n xmax = x.max(axis=axis, keepdims=True)\n xmax_ = x.max(axis=axis)\n return xmax_ + T.log(T.exp(x - xmax).sum(axis=axis))", "def log_sum_exp(x):\n x_max = x.max()\n return torch.log(torch.sum(torch.exp(x - x_max), 1, keepdim=True)) + x_max", "def log_sum_exp(x):\n x_max = x.data.max()\n return torch.log(torch.sum(torch.exp(x - x_max), 1, keepdim=True)) + x_max", "def log_sum_exp(x):\n x_max = x.data.max()\n return torch.log(torch.sum(torch.exp(x - x_max), 1, keepdim=True)) + x_max", "def logsumexp(x, axis=None):\n xmax = K.max(x, axis=axis, keepdims=True)\n xmax_ = K.max(x, axis=axis)\n return xmax_ + K.log(K.sum(K.exp(x - xmax), axis=axis))", "def logSumExp(ns):\n mx = np.max(ns)\n ds = ns - mx\n sumOfExp = np.exp(ds).sum()\n return mx + np.log(sumOfExp)", "def log_sum_exp(vec):\r\n\r\n\r\n max_score, idx = torch.max(vec, -1, keepdim = True) # ( B, to_target, 1)\r\n # max_score = torch.gather(vec, 1, idx.view(-1, 1, m_size)).view(-1, 1, m_size) # B * M\r\n # max_score.expand_as(vec)\r\n # to_target = vec.size(1)\r\n\r\n return max_score.squeeze(-1) + torch.log(torch.sum(torch.exp(vec - max_score.expand_as(vec)), -1)) # B * to_target\r", "def log_sum_exp(v):\n\tm = max(v)\n\tx = m * np.ones(np.size(v))\n\treturn m + np.log(sum(np.exp(v - x)))", "def log_sum_exp(ns):\n ns = np.array(ns)\n max_num = max(ns)\n ds = ns - max_num\n sumOfExp = np.exp(ds).sum()\n return max_num + np.log(sumOfExp)", "def logsumexp_trick(sum_term):\n max_term = np.max(sum_term)\n return max_term + np.log(np.sum(np.exp(sum_term-max_term)))", "def log_softmax(x: jnp.DeviceArray, *, axis: int = 0) -> jnp.DeviceArray:\n return x - jnp.expand_dims(jnp.log(jnp.sum(jnp.exp(x), axis=axis)), axis)", "def log_sum_exp(Z):\n return np.max(Z) + np.log(np.sum(np.exp(Z - np.max(Z))))", "def log_sum_exp_pytorch(vec: torch.Tensor) -> torch.Tensor:\n maxScores, idx = torch.max(vec, 1)\n maxScores[maxScores == -float(\"Inf\")] = 0\n maxScoresExpanded = maxScores.view(vec.shape[0] ,1 , vec.shape[2]).expand(vec.shape[0], vec.shape[1], vec.shape[2])\n return maxScores + torch.log(torch.sum(torch.exp(vec - maxScoresExpanded), 1))", "def log_sum_exp(x):\n log_reduce_sum = P.ReduceSum()\n log = P.Log()\n exp = P.Exp()\n x_max = max(x.data)\n return log(log_reduce_sum(exp(x - x_max), 1)) + x_max", "def log_prob_from_logits(x):\n axis = len(x.get_shape()) - 1\n m = tf.reduce_max(x, axis, keep_dims=True)\n return x - m - tf.log(tf.reduce_sum(tf.exp(x - m), axis, keep_dims=True))", "def logsumexp(a, axis=None, b=None, keepdims=False, return_sign=False):\n if b is not None:\n a, b = np.broadcast_arrays(a, b)\n if np.any(b == 0):\n a = a + 0. # promote to at least float\n a[b == 0] = -np.inf\n\n a_max = np.amax(a, axis=axis, keepdims=True)\n\n if a_max.ndim > 0:\n a_max[~np.isfinite(a_max)] = 0\n elif not np.isfinite(a_max):\n a_max = 0\n\n if b is not None:\n b = np.asarray(b)\n tmp = b * np.exp(a - a_max)\n else:\n tmp = np.exp(a - a_max)\n\n # suppress warnings about log of zero\n with np.errstate(divide='ignore'):\n s = np.sum(tmp, axis=axis, keepdims=keepdims)\n if return_sign:\n sgn = np.sign(s)\n s *= sgn # /= makes more sense but we need zero -> zero\n out = np.log(s)\n\n if not keepdims:\n a_max = np.squeeze(a_max, axis=axis)\n out += a_max\n\n if return_sign:\n return out, sgn\n else:\n return out", "def logsumexp(input_matrix, reduction_indices=1, keep_dims=False):\r\n\r\n max_input_matrix1 = input_matrix.max(reduction_indices, keepdims=keep_dims)\r\n max_input_matrix2 = max_input_matrix1\r\n if not keep_dims:\r\n max_input_matrix2 = np.expand_dims(max_input_matrix2, reduction_indices)\r\n return np.log(\r\n np.sum(\r\n np.exp(input_matrix - max_input_matrix2),\r\n reduction_indices,\r\n keepdims=keep_dims)) + max_input_matrix1", "def log_prob_from_logits(x):\n axis = len(x.get_shape())-1\n m = tf.reduce_max(x, axis, keep_dims=True)\n return x - m - tf.log(tf.reduce_sum(tf.exp(x-m), axis, keep_dims=True))", "def log_prob_from_logits(x):\n axis = len(x.shape) - 1\n m = x.max(dim=axis, keepdim=True)[0]\n return x - m - torch.log(torch.exp(x - m).sum(dim=axis, keepdim=True))" ]
[ "0.817568", "0.81515825", "0.81142867", "0.81142867", "0.8017166", "0.80124635", "0.80124635", "0.793551", "0.7929734", "0.7846384", "0.7771402", "0.7649828", "0.7639434", "0.75482917", "0.75482917", "0.72301334", "0.7201139", "0.71290755", "0.71036375", "0.7100838", "0.70914286", "0.6955224", "0.6924537", "0.69094276", "0.6835218", "0.6812634", "0.6799375", "0.67986846", "0.6794213", "0.6769389" ]
0.8387753
0
Computes ``torch.matmul(mat1.exp(), mat2.exp()).log()`` in a numerically stable way.
def logmatmulexp(mat1: torch.Tensor, mat2: torch.Tensor, use_mm: bool = False) -> torch.Tensor: mat1_shape = mat1.size() mat2_shape = mat2.size() mat1 = mat1.contiguous().view(-1, mat1_shape[-1]) mat2 = move_dim(mat2, 0, -1) mat2 = mat2.contiguous().view(-1, mat2_shape[0]) if use_mm: mat1_max = mat1.max(dim=-1, keepdim=True)[0] mat2_max = mat2.max(dim=-1, keepdim=True)[0] mat1 = mat1 - mat1_max mat2 = mat2 - mat2_max out = _safe_log(torch.matmul(mat1.exp(), mat2.exp().t())) out = out + mat1_max + mat2_max.t() else: out_sum = mat1.unsqueeze(1) + mat2.unsqueeze(0) out = logsumexp(out_sum, dim=-1) return out.view(concat_shape(mat1_shape[:-1], mat2_shape[1:]))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def batch_logmatmulexp(mat1: torch.Tensor, mat2: torch.Tensor, use_mm: bool = False) -> torch.Tensor:\n mat1_shape = mat1.size()\n mat2_shape = mat2.size()\n mat1 = mat1.contiguous().view(mat1_shape[0], -1, mat1_shape[-1])\n mat2 = move_dim(mat2, 1, -1)\n mat2 = mat2.contiguous().view(mat2_shape[0], -1, mat2_shape[1])\n\n if use_mm:\n mat1_max = mat1.max(dim=-1, keepdim=True)[0]\n mat2_max = mat2.max(dim=-1, keepdim=True)[0]\n mat1 = mat1 - mat1_max\n mat2 = mat2 - mat2_max\n\n out = _safe_log(torch.bmm(mat1.exp(), mat2.exp().permute(0, 2, 1)))\n out = out + mat1_max + mat2_max.permute(0, 2, 1)\n else:\n out_sum = mat1.unsqueeze(2) + mat2.unsqueeze(1)\n out = logsumexp(out_sum, dim=-1)\n\n return out.view(concat_shape(mat1_shape[:-1], mat2_shape[2:]))", "def Log(A, B):\n return logm(inv(A).dot(B))", "def Exp(A, B):\n return A.dot(expm(B))", "def logaddexp(a, b):\n\n return torch.logsumexp(torch.cat([a.unsqueeze(0), b.unsqueeze(0)]), dim=0)", "def __call__(self, y1: torch.Tensor, y2: torch.Tensor) -> torch.Tensor:\n return torch.log(1 + torch.exp(-y1 * y2))", "def logaddexp(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:\n return torch.max(x, y) + torch.log(1 + torch.exp(-torch.abs(y - x)))", "def forward(log_emlik, log_startprob, log_transmat):\n logPi=log_startprob[:-1]\n logB=log_emlik\n logA=log_transmat[:-1,:-1]\n alpha = np.zeros_like(logB)\n alpha[0]=logB[0]+logPi\n for i in range(1,logB.shape[0]):\n for j in range(logA.shape[0]):\n alpha[i][j]=logsumexp(alpha[i-1]+logA[:,j]+logB[i][j])\n return alpha", "def calculate_matmul(mat_a, mat_b):\n assert mat_a.shape[-2] == 1 and mat_b.shape[-1] == 1\n return tf.reduce_sum(tf.squeeze(mat_a, -2) * tf.squeeze(mat_b, -1), axis=2, keepdims=True)", "def log1mexp(x: torch.Tensor) -> torch.Tensor:\n mask = (x < _log05).to(x.dtype)\n impl1 = torch.log1p(-torch.exp(x))\n impl2 = torch.log(-torch.expm1(x))\n return impl1 * mask + impl2 * (1 - mask)", "def power(base, exp):\n base_v, base_d = Tensor.get_value_and_deriv(base)\n exp_v, exp_d = Tensor.get_value_and_deriv(exp)\n\n result = base_v ** exp_v\n a = base_d.mul(exp_v * base_v ** (exp_v - 1.0))\n b = exp_d.mul(result * np.log(base_v))\n return Tensor(result, a + b)", "def log_sum_exp(x):\n # TF ordering\n axis = len(x.shape) - 1\n m = paddle.max(x, axis=axis)\n m2 = paddle.max(x, axis=axis, keepdim=True)\n return m + paddle.log(paddle.sum(paddle.exp(x - m2), axis=axis))", "def gmmloglik(log_emlik, weights):\n N,_ = log_emlik.shape;\n ll = 0;\n for i in range(N):\n ll += logsumexp(log_emlik[i, :] + np.log(weights));\n return ll", "def np_matmul(mat1, mat2):\n return np.matmul(mat1, mat2)", "def log_sum_exp(vec):\r\n\r\n\r\n max_score, idx = torch.max(vec, -1, keepdim = True) # ( B, to_target, 1)\r\n # max_score = torch.gather(vec, 1, idx.view(-1, 1, m_size)).view(-1, 1, m_size) # B * M\r\n # max_score.expand_as(vec)\r\n # to_target = vec.size(1)\r\n\r\n return max_score.squeeze(-1) + torch.log(torch.sum(torch.exp(vec - max_score.expand_as(vec)), -1)) # B * to_target\r", "def forward(log_emlik, log_startprob, log_transmat):\n \n alpha = np.zeros(np.shape(log_emlik))\n N=len(alpha)\n # recursiveAlpha(N-1, alpha, log_emlik, log_startprob, log_transmat)\n alpha[0][:] = log_startprob.T + log_emlik[0]\n\n for n in range(1,len(alpha)):\n for i in range(alpha.shape[1]):\n alpha[n, i] = logsumexp(alpha[n - 1] + log_transmat[:,i]) + log_emlik[n,i]\n return alpha, logsumexp(alpha[N-1])", "def logits_and(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:\n t = (x + y) / 2\n f = logaddexp(logaddexp((x - y) / 2, (y - x) / 2), -t)\n return t - f", "def test_perform_sigm_times_exp(self):\r\n x, y, z, t = tensor.vectors('x', 'y', 'z', 't')\r\n exp = tensor.exp\r\n\r\n def ok(expr1, expr2):\r\n trees = [parse_mul_tree(e) for e in (expr1, expr2)]\r\n perform_sigm_times_exp(trees[0])\r\n trees[0] = simplify_mul(trees[0])\r\n good = theano.gof.graph.is_same_graph(\r\n compute_mul(trees[0]),\r\n compute_mul(trees[1]))\r\n if not good:\r\n print trees[0]\r\n print trees[1]\r\n print '***'\r\n theano.printing.debugprint(compute_mul(trees[0]))\r\n print '***'\r\n theano.printing.debugprint(compute_mul(trees[1]))\r\n assert good\r\n ok(sigmoid(x) * exp(-x), sigmoid(-x))\r\n ok(-x * sigmoid(x) * (y * (-1 * z) * exp(-x)),\r\n -x * sigmoid(-x) * (y * (-1 * z)))\r\n ok(-sigmoid(-x) *\r\n (exp(y) * (-exp(-z) * 3 * -exp(x)) *\r\n (y * 2 * (-sigmoid(-y) * (z + t) * exp(z)) * sigmoid(z))) *\r\n -sigmoid(x),\r\n sigmoid(x) *\r\n (-sigmoid(y) * (-sigmoid(-z) * 3) * (y * 2 * ((z + t) * exp(z)))) *\r\n -sigmoid(x))\r\n ok(exp(-x) * -exp(-x) * (-sigmoid(x) * -sigmoid(x)),\r\n -sigmoid(-x) * sigmoid(-x))\r\n ok(-exp(x) * -sigmoid(-x) * -exp(-x),\r\n -sigmoid(-x))", "def log_sum_exp(tensor, dim=-1, sum_op=torch.sum):\n max, _ = torch.max(tensor, dim=dim, keepdim=True)\n return torch.log(sum_op(torch.exp(tensor - max), dim=dim, keepdim=True) + 1e-8) + max", "def __matmul__(self, tensor):\n return self.matmul(tensor)", "def logsumexp(tensor: torch.Tensor, dim: Optional[int] = None, keepdim: bool = False) -> torch.Tensor:\n if dim is None:\n tensor = tensor.reshape(-1)\n dim = -1\n\n inputs_max = tensor.max(dim=dim, keepdim=True)[0]\n tensor = tensor - inputs_max\n if not keepdim:\n inputs_max = inputs_max.squeeze(dim)\n\n out = _safe_log(tensor.exp().sum(dim=dim, keepdim=keepdim)) + inputs_max\n return out", "def forward(log_emlik, log_startprob, log_transmat):\n N = log_emlik.shape[0]\n M = log_emlik.shape[1]\n forward_prob = np.zeros((N,M))\n for i in range(N):\n if i == 0:\n forward_prob[0, :] = log_startprob[0, :M] + log_emlik[0, :M]\n else:\n for j in range(M):\n forward_prob[i, j] = logsumexp(forward_prob[i-1] + log_transmat[:M, j]) + log_emlik[i, j]\n\n return forward_prob", "def log_sum_exp(x):\n x_max = x.max()\n return torch.log(torch.sum(torch.exp(x - x_max), 1, keepdim=True)) + x_max", "def np_matmul(mat1, mat2):\n return mat1.dot(mat2)", "def log_hchg(x, a1, a2, mu1, mu2):\n assert np.alltrue(mu1 > 0) and np.alltrue(mu2 > 0)\n assert np.alltrue(a1 > 0) and np.alltrue(a2 > 0)\n \n out_shp = np.broadcast(x, a1, a2, mu1, mu2).shape\n if out_shp == ():\n out_shp = (1,)\n \n x = np.broadcast_to(x, out_shp).ravel()[:, np.newaxis]\n a1 = np.broadcast_to(a1, out_shp).ravel()[:, np.newaxis]\n a2 = np.broadcast_to(a2, out_shp).ravel()[:, np.newaxis]\n mu1 = np.broadcast_to(mu1, out_shp).ravel()[:, np.newaxis]\n mu2 = np.broadcast_to(mu2, out_shp).ravel()[:, np.newaxis]\n \n j = np.arange(250)\n \n out = j * np.log(mu1 * x) - sp.gammaln(j+1)\n out += log_poch(a1+a2, j) - log_poch(a1, j)\n out += np.log(sp.hyp1f1(a1+a2+j, a2, mu2*(1-x)))\n out = sp.logsumexp(out, axis=1)\n return out.reshape(out_shp) if out.size > 1 else float(out)", "def test_log_exp_bijective(Group: Type[jaxlie.MatrixLieGroup]):\n transform = sample_transform(Group)\n\n tangent = transform.log()\n assert tangent.shape == (Group.tangent_dim,)\n\n exp_transform = Group.exp(tangent)\n assert_transforms_close(transform, exp_transform)\n assert_arrays_close(tangent, exp_transform.log())", "def logsumexp(value, dim=None, keepdim=False):\n if dim is not None:\n m, _ = torch.max(value, dim=dim, keepdim=True)\n value0 = value - m\n if keepdim is False:\n m = m.squeeze(dim)\n return m + torch.log(torch.sum(torch.exp(value0),\n dim=dim, keepdim=keepdim))\n else:\n m = torch.max(value)\n sum_exp = torch.sum(torch.exp(value - m))\n if isinstance(sum_exp, Number):\n return m + math.log(sum_exp)\n else:\n return m + torch.log(sum_exp)", "def logsumexp(value, dim=None, keepdim=False):\n if dim is not None:\n m, _ = torch.max(value, dim=dim, keepdim=True)\n value0 = value - m\n if keepdim is False:\n m = m.squeeze(dim)\n return m + torch.log(torch.sum(torch.exp(value0),\n dim=dim, keepdim=keepdim))\n else:\n m = torch.max(value)\n sum_exp = torch.sum(torch.exp(value - m))\n if isinstance(sum_exp, Number):\n return m + math.log(sum_exp)\n else:\n return m + torch.log(sum_exp)", "def log_sum_exp_pytorch(vec: torch.Tensor) -> torch.Tensor:\n maxScores, idx = torch.max(vec, 1)\n maxScores[maxScores == -float(\"Inf\")] = 0\n maxScoresExpanded = maxScores.view(vec.shape[0] ,1 , vec.shape[2]).expand(vec.shape[0], vec.shape[1], vec.shape[2])\n return maxScores + torch.log(torch.sum(torch.exp(vec - maxScoresExpanded), 1))", "def log_sum_exp(x):\n x_max = x.data.max()\n return torch.log(torch.sum(torch.exp(x - x_max), 1, keepdim=True)) + x_max", "def log_sum_exp(x):\n x_max = x.data.max()\n return torch.log(torch.sum(torch.exp(x - x_max), 1, keepdim=True)) + x_max" ]
[ "0.7710715", "0.69934046", "0.6830569", "0.668147", "0.6496658", "0.6243134", "0.61634815", "0.6114593", "0.598911", "0.5959243", "0.59460145", "0.591172", "0.59081787", "0.5908115", "0.5863151", "0.58574504", "0.58257955", "0.5823142", "0.57903874", "0.57836646", "0.5780225", "0.5779136", "0.57653433", "0.57442325", "0.5743858", "0.5731994", "0.5731994", "0.57318515", "0.57117474", "0.57117474" ]
0.8188148
0
Computes ``torch.bmm(mat1.exp(), mat2.exp()).log()`` in a numerically stable way.
def batch_logmatmulexp(mat1: torch.Tensor, mat2: torch.Tensor, use_mm: bool = False) -> torch.Tensor: mat1_shape = mat1.size() mat2_shape = mat2.size() mat1 = mat1.contiguous().view(mat1_shape[0], -1, mat1_shape[-1]) mat2 = move_dim(mat2, 1, -1) mat2 = mat2.contiguous().view(mat2_shape[0], -1, mat2_shape[1]) if use_mm: mat1_max = mat1.max(dim=-1, keepdim=True)[0] mat2_max = mat2.max(dim=-1, keepdim=True)[0] mat1 = mat1 - mat1_max mat2 = mat2 - mat2_max out = _safe_log(torch.bmm(mat1.exp(), mat2.exp().permute(0, 2, 1))) out = out + mat1_max + mat2_max.permute(0, 2, 1) else: out_sum = mat1.unsqueeze(2) + mat2.unsqueeze(1) out = logsumexp(out_sum, dim=-1) return out.view(concat_shape(mat1_shape[:-1], mat2_shape[2:]))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def logmatmulexp(mat1: torch.Tensor, mat2: torch.Tensor, use_mm: bool = False) -> torch.Tensor:\n mat1_shape = mat1.size()\n mat2_shape = mat2.size()\n mat1 = mat1.contiguous().view(-1, mat1_shape[-1])\n mat2 = move_dim(mat2, 0, -1)\n mat2 = mat2.contiguous().view(-1, mat2_shape[0])\n\n if use_mm:\n mat1_max = mat1.max(dim=-1, keepdim=True)[0]\n mat2_max = mat2.max(dim=-1, keepdim=True)[0]\n mat1 = mat1 - mat1_max\n mat2 = mat2 - mat2_max\n\n out = _safe_log(torch.matmul(mat1.exp(), mat2.exp().t()))\n out = out + mat1_max + mat2_max.t()\n else:\n out_sum = mat1.unsqueeze(1) + mat2.unsqueeze(0)\n out = logsumexp(out_sum, dim=-1)\n\n return out.view(concat_shape(mat1_shape[:-1], mat2_shape[1:]))", "def Log(A, B):\n return logm(inv(A).dot(B))", "def Exp(A, B):\n return A.dot(expm(B))", "def gmmloglik(log_emlik, weights):", "def gmmloglik(log_emlik, weights):", "def logaddexp(a, b):\n\n return torch.logsumexp(torch.cat([a.unsqueeze(0), b.unsqueeze(0)]), dim=0)", "def gmmloglik(log_emlik, weights):\n N,_ = log_emlik.shape;\n ll = 0;\n for i in range(N):\n ll += logsumexp(log_emlik[i, :] + np.log(weights));\n return ll", "def backward(log_emlik, log_startprob, log_transmat):\n N, M = log_emlik.shape\n logPi=log_startprob[:-1]\n logB=log_emlik\n logA=log_transmat[:-1,:-1]\n beta = np.zeros_like(logB)\n for t in range(N-2,-1,-1):\n for j in range(M):\n beta[t][j]=logsumexp(beta[t+1]+logA[j]+logB[t+1])\n\n return beta", "def forward(log_emlik, log_startprob, log_transmat):\n logPi=log_startprob[:-1]\n logB=log_emlik\n logA=log_transmat[:-1,:-1]\n alpha = np.zeros_like(logB)\n alpha[0]=logB[0]+logPi\n for i in range(1,logB.shape[0]):\n for j in range(logA.shape[0]):\n alpha[i][j]=logsumexp(alpha[i-1]+logA[:,j]+logB[i][j])\n return alpha", "def gmmloglik(log_emlik, weights):\n gmm_loglik = np.mean(log_emlik)\n\n\n return gmm_loglik", "def θ(a, b, dimA=2, dimB=2, normBy=2):\n a_norm = torch.norm(a, normBy, dimA, keepdim=True).expand_as(a) + δ\n b_norm = torch.norm(b, normBy, dimB, keepdim=True).expand_as(b) + δ\n\n x = torch.bmm(a, b.transpose(1, 2)).transpose(1, 2) / (\n torch.bmm(a_norm, b_norm.transpose(1, 2)).transpose(1, 2) + δ)\n # apply_dict(locals())\n return x", "def __call__(self, y1: torch.Tensor, y2: torch.Tensor) -> torch.Tensor:\n return torch.log(1 + torch.exp(-y1 * y2))", "def calculate_matmul(mat_a, mat_b):\n assert mat_a.shape[-2] == 1 and mat_b.shape[-1] == 1\n return tf.reduce_sum(tf.squeeze(mat_a, -2) * tf.squeeze(mat_b, -1), axis=2, keepdims=True)", "def expectation(N,K,log_M):\n\tg0 = log_M[0,0]\n\tg = log_M[1:]\n\ta = forward(g0,g,N,K)\n\tb = backward(g,N,K)\n\tprint \"Forward:\"\n\tprint a\n\tprint \"Backward:\"\n\tprint b\n\t# log-normalizing constant\n\tlogZ = misc.logsumexp(a[N-1,:])\n\n\tE = defaultdict(float)\n\n\t# The first factor needs to be special case'd\n\t# E[ f( y_0 ) ] = p(y_0 | y_[1:N], x) * f(y_0)\n\tc = exp(g0 + b[0,:] - logZ).clip(0.0, 1.0)\n\tfor y in xrange(K):\n\t\tp = c[y]\n\t\tif p < 1e-40: continue # skip really small updates.\n\t\tfor k in f[0, None, y]:\n\t\t\tE[k] += p\n\n\tfor t in xrange(1,N):\n\t\t# vectorized computation of the marginal for this transition factor\n\t\tc = exp((add.outer(a[t-1,:], b[t,:]) + g[t-1,:,:] - logZ)).clip(0.0, 1.0)\n\n\t\tfor yp in xrange(K):\n\t\t\tfor y in xrange(K):\n\t\t\t\t# we can also use the following to compute ``p`` but its quite\n\t\t\t\t# a bit slower than the computation of vectorized quantity ``c``.\n\t\t\t\t#p = exp(a[t-1,yp] + g[t-1,yp,y] + b[t,y] - logZ).clip(0.0, 1.0)\n\t\t\t\tp = c[yp, y]\n\t\t\t\tif p < 1e-40: continue # skip really small updates.\n\t\t\t\t# expectation of this factor is p*f(t, yp, y)\n\t\t\t\tfor k in f[t, yp, y]:\n\t\t\t\t\tE[k] += p\n\n\treturn E", "def backward(log_emlik, log_startprob, log_transmat):\n N = log_emlik.shape[0]\n M = log_emlik.shape[1]\n backward_prob = np.zeros((N, M))\n for n in reversed(range(N-1)):\n for i in range(M):\n backward_prob[n, i] = logsumexp(backward_prob[n + 1, :M] + log_transmat[i, :M] + log_emlik[n + 1, :M])\n\n return backward_prob", "def mmd_loss_exact(distribution_a, distribution_b, num_samples, gamma=1.):\n assert distribution_a.event_shape == distribution_b.event_shape\n assert distribution_a.batch_shape[1:] == distribution_b.batch_shape[1:]\n\n # shape (num_samples * batch_size_a, dim_x)\n samples_a = get_samples(distribution_a, num_samples)\n # shape (num_samples * batch_size_b, dim_x)\n samples_b = get_samples(distribution_b, num_samples)\n\n # Make matrices of shape\n # (size_b, size_a, dim_x)\n # where:\n # size_a = num_samples * batch_size_a\n # size_b = num_samples * batch_size_b\n size_a = samples_a.shape[0]\n size_b = samples_b.shape[0]\n x_a = jnp.expand_dims(samples_a, axis=0)\n x_a = jnp.tile(x_a, (size_b, 1, 1))\n x_b = jnp.expand_dims(samples_b, axis=1)\n x_b = jnp.tile(x_b, (1, size_a, 1))\n\n def kernel_mean(x, y):\n \"\"\"Gaussian kernel mean.\"\"\"\n\n diff = x - y\n\n # Contract over dim_x.\n exponent = - jnp.einsum('ijk,ijk->ij', diff, diff) / gamma\n\n # This has shape (size_b, size_a).\n kernel_matrix = jnp.exp(exponent)\n\n # Shape ().\n return jnp.mean(kernel_matrix)\n\n # Equation 7 from arxiv 1511.00830\n return (\n kernel_mean(x_a, x_a)\n + kernel_mean(x_b, x_b)\n - 2 * kernel_mean(x_a, x_b))", "def logaddexp(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:\n return torch.max(x, y) + torch.log(1 + torch.exp(-torch.abs(y - x)))", "def _dot(self, s1, s2, tf_embs):\n mat1 = tf.gather(tf_embs, s1)\n mat2 = tf.gather(tf_embs, s2)\n return tf.matmul(mat1, tf.transpose(mat2))", "def forward(log_emlik, log_startprob, log_transmat):\n N = log_emlik.shape[0]\n M = log_emlik.shape[1]\n forward_prob = np.zeros((N,M))\n for i in range(N):\n if i == 0:\n forward_prob[0, :] = log_startprob[0, :M] + log_emlik[0, :M]\n else:\n for j in range(M):\n forward_prob[i, j] = logsumexp(forward_prob[i-1] + log_transmat[:M, j]) + log_emlik[i, j]\n\n return forward_prob", "def backward(log_emlik, log_startprob, log_transmat):\n #print(log_transmat)\n beta = np.zeros(log_emlik.shape)\n n = len(log_emlik)-2\n log_transmat = log_transmat[0:-1,0:-1];\n\n\n\n while n >= 0:\n\n for j in range(0,len(log_emlik[0])):\n\n beta[n,j] = logsumexp(log_transmat[j,:] + log_emlik[n+1,:] + beta[n+1,:])\n\n n = n -1\n #print(beta[n,:])\n #print(beta)\n return beta", "def logits_and(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:\n t = (x + y) / 2\n f = logaddexp(logaddexp((x - y) / 2, (y - x) / 2), -t)\n return t - f", "def lazy_matrix_mul(m_a, m_b):\n return np.dot(m_a, m_b)", "def beta_log_likelihood(x, shape1, shape2):\n logbeta = loggamma(shape1) + loggamma(shape2) - loggamma(shape1+shape2)\n return (1.0-shape1)*np.sum(np.log(x)) + (1.0-shape2)*np.sum(np.log(1.0-x)) + len(x)*logbeta", "def log_loss(m_true, alpha, alpha0, m_probs, lambd=1.0):\n \n m_probs = tf.clip_by_value(m_probs, 1e-15, 1 - 1e-15)\n loss = -tf.reduce_mean(input_tensor=tf.reduce_sum(input_tensor=m_true * tf.math.log(m_probs), axis=1))\n if lambd > 0:\n kl = kullback_leibler_dirichlet(m_true, alpha)\n loss = loss + lambd * kl\n return loss", "def forward(log_emlik, log_startprob, log_transmat):\n \n alpha = np.zeros(np.shape(log_emlik))\n N=len(alpha)\n # recursiveAlpha(N-1, alpha, log_emlik, log_startprob, log_transmat)\n alpha[0][:] = log_startprob.T + log_emlik[0]\n\n for n in range(1,len(alpha)):\n for i in range(alpha.shape[1]):\n alpha[n, i] = logsumexp(alpha[n - 1] + log_transmat[:,i]) + log_emlik[n,i]\n return alpha, logsumexp(alpha[N-1])", "def addlogs(a,b):\n \n if a>b:\n return a + np.log(1+np.exp(b-a))\n else:\n return b + np.log(1+np.exp(a-b))", "def np_matmul(mat1, mat2):\n return np.matmul(mat1, mat2)", "def logm(self, x):\n\n if K.backend() == 'theano':\n # construct theano tensor operation\n from theano.tensor.nlinalg import svd, diag\n from theano.tensor.elemwise import Elemwise\n from theano.scalar import log\n import theano.tensor as T\n # This implementation would be extremely slow. but efficient?\n u, d, v = svd(x)\n d += self.eps\n inner = diag(T.log(d))\n res = T.dot(u, T.dot(inner, v))\n return res\n else:\n from kyu.tensorflow.ops.svd_gradients import batch_matrix_log\n return batch_matrix_log(x, self.eps)", "def log_hchg(x, a1, a2, mu1, mu2):\n assert np.alltrue(mu1 > 0) and np.alltrue(mu2 > 0)\n assert np.alltrue(a1 > 0) and np.alltrue(a2 > 0)\n \n out_shp = np.broadcast(x, a1, a2, mu1, mu2).shape\n if out_shp == ():\n out_shp = (1,)\n \n x = np.broadcast_to(x, out_shp).ravel()[:, np.newaxis]\n a1 = np.broadcast_to(a1, out_shp).ravel()[:, np.newaxis]\n a2 = np.broadcast_to(a2, out_shp).ravel()[:, np.newaxis]\n mu1 = np.broadcast_to(mu1, out_shp).ravel()[:, np.newaxis]\n mu2 = np.broadcast_to(mu2, out_shp).ravel()[:, np.newaxis]\n \n j = np.arange(250)\n \n out = j * np.log(mu1 * x) - sp.gammaln(j+1)\n out += log_poch(a1+a2, j) - log_poch(a1, j)\n out += np.log(sp.hyp1f1(a1+a2+j, a2, mu2*(1-x)))\n out = sp.logsumexp(out, axis=1)\n return out.reshape(out_shp) if out.size > 1 else float(out)", "def _logaddexp(a, b, mask):\n output = torch.zeros_like(a)\n # find the mask to output b when a contain -inf values\n out_put_b_mask = torch.isinf(a) & (a < 0)\n\n # find the mask to output a when b contain -inf values\n out_put_a_mask = torch.isinf(b) & (b < 0)\n # in order not to take the padded number into account\n # stop do accumulating when iteration gets in padded data\n out_put_a_mask = out_put_a_mask | ~ mask[:, None, None]\n\n # if no singularity cases happen, set the masks for logsumexp computations\n rest_mask = ~(out_put_a_mask | out_put_b_mask)\n\n # set value for found masks\n output[out_put_b_mask] = b[out_put_b_mask]\n output[out_put_a_mask] = a[out_put_a_mask]\n c = torch.cat((a[None,:], b[None,:]), dim=0)\n output[rest_mask] = torch.logsumexp(c, dim=0)[rest_mask]\n \n return output" ]
[ "0.8067815", "0.6765245", "0.6756691", "0.65030116", "0.65030116", "0.6466798", "0.63860583", "0.62043005", "0.6199516", "0.6040961", "0.6021661", "0.59460133", "0.5943857", "0.58040553", "0.5803235", "0.5770154", "0.57698894", "0.5767673", "0.5760093", "0.5728608", "0.56762016", "0.5636193", "0.562517", "0.56162834", "0.5615764", "0.56123", "0.5610725", "0.55971694", "0.55926174", "0.55884546" ]
0.81068563
0
Computes ``log(1 exp(x))`` in a numerically stable way.
def log1mexp(x: torch.Tensor) -> torch.Tensor: mask = (x < _log05).to(x.dtype) impl1 = torch.log1p(-torch.exp(x)) impl2 = torch.log(-torch.expm1(x)) return impl1 * mask + impl2 * (1 - mask)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def log1p_exp(x):\n x_ = x * x.ge(0).to(torch.float32)\n res = x_ + torch.log1p(torch.exp(-torch.abs(x)))\n return res", "def log_sum_exp(x):\n x_max = x.max()\n return torch.log(torch.sum(torch.exp(x - x_max), 1, keepdim=True)) + x_max", "def log_sum_exp(self, x):\n b = numpy.max(x[(x<sys.maxsize)]) # ignore inf values\n\n s = b + numpy.log(numpy.sum(numpy.exp(x-b)))\n\n return s", "def logistic(x):\n try:\n denom = (1 + math.e ** -x)\n except OverflowError:\n return 0.0\n return 1.0 / denom", "def log1p(x):\n return 0.0", "def log_sum_exp(x):\n x_max = x.data.max()\n return torch.log(torch.sum(torch.exp(x - x_max), 1, keepdim=True)) + x_max", "def log_sum_exp(x):\n x_max = x.data.max()\n return torch.log(torch.sum(torch.exp(x - x_max), 1, keepdim=True)) + x_max", "def log1p(x):\n return Log1p().apply((x,))[0]", "def logit_link(x):\n\n return 1 / (1 + math.exp(-0.05 * x))\n # return 1 / (1 + math.exp(-0.01 * x))", "def log(x, base=math.e):\n return 0.0", "def log_sum_exp(x):\n # TF ordering\n axis = len(x.shape) - 1\n m = paddle.max(x, axis=axis)\n m2 = paddle.max(x, axis=axis, keepdim=True)\n return m + paddle.log(paddle.sum(paddle.exp(x - m2), axis=axis))", "def ln(x):\n return log(x, const.e)", "def log_sum_exp(x):\n log_reduce_sum = P.ReduceSum()\n log = P.Log()\n exp = P.Exp()\n x_max = max(x.data)\n return log(log_reduce_sum(exp(x - x_max), 1)) + x_max", "def apply(cls, x):\n return 1.0 / (1.0 + np.exp(-x))", "def prob_logit(x):\n try:\n if len(x.shape) != 1:\n raise ValueError(\"unexpected shape of input vector\\nexpected:\" + str(1) + \", actual: \" + str(len(x.shape)))\n except ValueError as e:\n print(e)\n print()\n raise\n\n x = 1.0 * np.exp(-x)\n\n probability = np.concatenate(\n (\n (x / (1.0 + x)).reshape(x.shape[0], 1),\n (1.0 / (1.0 + x)).reshape(x.shape[0], 1)\n ),\n axis=1\n )\n\n return probability", "def exp_integral(x):\n gamma = 0.577215665\n return (-gamma - expn(x,1) - np.log(x))", "def exp(x):\n raise NotImplementedError", "def log_sum_exp(x, dim=0):\n max_x = torch.max(x, dim)[0]\n new_x = x - max_x.unsqueeze(dim).expand_as(x)\n return max_x + (new_x.exp().sum(dim)).log()", "def log_pic50_exponential(x):\n if x < pic50_exp_lower:\n return -np.inf\n else:\n return -pic50_exp_rate*x", "def log(self, x, base=2):\n if x == 0:\n return 0\n return math.log(x, base)", "def logit(x: torch.Tensor, eps=1e-5) -> torch.Tensor:\n x = torch.clamp(x, eps, 1.0 - eps)\n return torch.log(x / (1.0 - x))", "def lognormalize(x, temp = 1):\n if type(x) is list: x = np.array(x)\n\n x = x - np.max(x)\n # anneal\n xp = np.power(np.exp(x), temp)\n return xp / xp.sum()", "def log_prob_from_logits(x):\n axis = len(x.shape) - 1\n m = x.max(dim=axis, keepdim=True)[0]\n return x - m - torch.log(torch.exp(x - m).sum(dim=axis, keepdim=True))", "def _logsumexp(x):\n # Search maximum.\n max_x = None\n length = len(x)\n for i in range(length):\n if max_x is None or x[i] > max_x:\n max_x = x[i]\n\n # Calculate sum of exponential differences.\n sum_exp = 0\n for i in range(length):\n diff = x[i] - max_x\n sum_exp += np.exp(diff)\n\n log_sum_exp = max_x + np.log(sum_exp)\n\n return log_sum_exp", "def asigmoid(x):\n return -log(1 / x - 1)", "def logarithm(x, eps=10e-5):\n if abs(x) >= 1:\n return float('Nan')\n\n pre_x = x\n tmp = x ** 2\n sign = -1\n i = 2\n res_x = pre_x + sign * tmp / i\n\n while abs(res_x - pre_x) > eps:\n sign = -sign\n i += 1\n tmp *= x\n pre_x = res_x\n res_x += sign * tmp / i\n\n return res_x", "def log_sum_exp(v):\n\tm = max(v)\n\tx = m * np.ones(np.size(v))\n\treturn m + np.log(sum(np.exp(v - x)))", "def _log_sum_exp(x):\n axis = len(x.get_shape())-1\n m = tf.reduce_max(x, axis)\n m2 = tf.reduce_max(x, axis, keep_dims=True)\n return m + tf.log(tf.reduce_sum(tf.exp(x-m2), axis))", "def exp(X):\n X = np.maximum(X,100)\n return np.exp(X)", "def log_sum_exp(x):\n axis = len(x.get_shape()) - 1\n m = tf.reduce_max(x, axis)\n m2 = tf.reduce_max(x, axis, keep_dims=True)\n return m + tf.log(tf.reduce_sum(tf.exp(x - m2), axis))" ]
[ "0.85669684", "0.76878935", "0.768062", "0.7642967", "0.7581363", "0.75357825", "0.75357825", "0.74347", "0.74209934", "0.73896044", "0.738145", "0.7318129", "0.72950226", "0.72420496", "0.7240786", "0.72397685", "0.71956825", "0.7173167", "0.7148945", "0.71363455", "0.7123995", "0.7121961", "0.7118467", "0.71108687", "0.70948493", "0.70467013", "0.7040198", "0.7030947", "0.7017597", "0.69948524" ]
0.8291577
1
Return the malware id
def get_malware_id(self, date, malware_hash): malware_id = self.get_one(""" SELECT M.id FROM malwares M WHERE M.date = %s AND M.hash = %s; """, (date, malware_hash)) if malware_id: return malware_id else: self.insert(""" INSERT INTO malwares (date, hash) VALUES (%s, %s); """, (str(date), malware_hash)) return self.get_malware_id(date, malware_hash)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_verifier_id():\n cmd = (\"rally verify list-verifiers | awk '/\" +\n getattr(config.CONF, 'tempest_verifier_name') +\n \"/ {print $2}'\")\n with subprocess.Popen(\n cmd, shell=True, stdout=subprocess.PIPE,\n stderr=subprocess.DEVNULL) as proc:\n verifier_uuid = proc.stdout.readline().rstrip()\n return verifier_uuid.decode(\"utf-8\")", "def get_verifier_id():\n cmd = (\"rally verify list-verifiers | awk '/\" +\n getattr(config.CONF, 'tempest_verifier_name') +\n \"/ {print $2}'\")\n proc = subprocess.Popen(cmd, shell=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n verifier_uuid = proc.stdout.readline().rstrip()\n return verifier_uuid", "def get_build_id(self, elf_file_path):\n try:\n output = subprocess.check_output([self.readelf_path, '-n', elf_file_path])\n output = bytes_to_str(output)\n result = re.search(r'Build ID:\\s*(\\S+)', output)\n if result:\n build_id = result.group(1)\n if len(build_id) < 40:\n build_id += '0' * (40 - len(build_id))\n else:\n build_id = build_id[:40]\n build_id = '0x' + build_id\n return build_id\n except subprocess.CalledProcessError:\n pass\n return \"\"", "def get_device_id(self) -> str:\n return hexlify(self.message)[36:42].decode()", "def get_debug_firmware_id_string(self):\n # Read the address via get_var_strict; this will fetch the value\n # from chipdata as well, but we can ignore it.\n chip_str = self.chipdata.get_var_strict('$_build_identifier_string')\n rawstr = self.debuginfo.get_dm_const(chip_str.address, chip_str.size)\n\n decoded_str = \"\"\n for chars in rawstr:\n if Arch.addr_per_word == 4:\n # The raw string is encoded with four chars per word\n string = cu.get_string_from_word(Arch.addr_per_word, chars)\n stop_decoding = False\n for char in string:\n if char != '\\0':\n decoded_str += char\n else:\n stop_decoding = True\n break\n if stop_decoding:\n break\n else:\n # The raw string is encoded with two chars per word\n upper_part = (chars & 0xFF00) >> 8\n lower_part = chars & 0x00FF\n # strip the null terminator.\n if upper_part != 0:\n decoded_str += chr(upper_part)\n else:\n break\n if lower_part != 0:\n decoded_str += chr(lower_part)\n else:\n break\n\n return decoded_str.strip() # Strip any leading/trailing whitespace", "def get_ident():\n return -1", "def get_firmware_id(self):\n build_id_addr = self._get_slt_entry(1)\n build_id_int = self.get_data(build_id_addr)\n if build_id_addr % Arch.addr_per_word != 0:\n # in a 32 bit word, the 16 bit build ID int can be in either the 2\n # MS bytes or 2 LS bytes\n build_id_int >>= 16\n return build_id_int & 0xFFFF", "def get_uniprot_id(uniprot_result):\n id_lines = [l for l in uniprot_result.split('\\n') if l.startswith('ID')]\n\n for id_line in id_lines:\n return id_line.split()[1]\n\n return None", "def extract_id(file_path):\n # An example of file path is AlkEthOH_tripos/AlkEthOH_chain_filt1/AlkEthOH_c555.crd\n return os.path.splitext(os.path.basename(file_path))[0][9:]", "def torrent_id(url, debug):\n id = url[url.find('tid=')+4:]\n\n if not debug:\n return id\n\n if debug == 'Y':\n print \"ID :\", id\n return id", "def scan_id(barcode):\n return scanner.scan(barcode)", "def honeypot_probe_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"honeypot_probe_id\")", "def getid(data):\n return int(data.split('/')[-1])", "def generate_fwan_process_id() -> str:\n return str(uuid.uuid4())", "async def steamid(message):\n s = message.content.strip()\n id = await parse_steam_id(s)\n return \"**ID64:** {}\\n**ID32:** {}\\n**URL:** {}\".format(\n id.to_64(),\n id.to_text(),\n id.community_url(),\n )", "def get_id(self):\n try:\n return self.inst.query('*IDN?')[:36]\n except errors.VisaIOError as e:\n logger.warning(e)\n return 'Device not connected.'", "def getApplicationProcessId(binaryString, startPos=0):\n if (len(binaryString) - startPos) < PRIMARY_HEADER_BYTE_SIZE:\n raise Error(\"packet header is too small\")\n return (((binaryString[startPos + 0] * 256) + binaryString[startPos + 1]) & 0x07FF)", "async def steamid_64(message):\n s = message.content.strip()\n return str((await parse_steam_id(s)).to_64())", "def tubeid():\n return binascii.hexlify(os.urandom(12))", "def getID():", "def getSteamid(value):\r\n value = str(value)\r\n if value.startswith( (\"STEAM_\", \"BOT_\")):\r\n return value\r\n userid = es.getuserid(value)\r\n if userid:\r\n steamid = playerlib.uniqueid( userid, True )\r\n return steamid\r\n return None", "async def steamid_32(message):\n s = message.content.strip()\n return (await parse_steam_id(s)).to_text()", "def _extract_image_short_id(scan_result: dict[str, Any]) -> str:\n\n if \"id\" not in scan_result:\n return \"sha256:unknown\"\n\n image_id: str = scan_result[\"id\"]\n\n if image_id.startswith(\"sha256:\"):\n return image_id[:17]\n return image_id[:10]", "def find_id(href):\n ID = idRE.search(href)\n if ID:\n return ID.group(1)", "def get_hardware_id():\r\n try:\r\n return utils.run('crossystem hwid').stdout.strip()\r\n except:\r\n logging.info(\"Not Found\")\r\n return -1", "def fingerprint(self):\n return self.identifier[:4]", "def _get_kid(message) -> str:\n if KID in message.phdr.keys():\n return base64.b64encode(message.phdr[KID]).decode(\"UTF-8\")\n return base64.b64encode(message.uhdr[KID]).decode(\"UTF-8\")", "def get_debug_firmware_id(self):\n # Read the address via get_var_strict; this will fetch the value\n # from chipdata as well, but we can ignore it.\n int_addr = self.debuginfo.get_var_strict(\n '$_build_identifier_integer'\n ).address\n build_id_int = self.debuginfo.get_dm_const(int_addr, 0)\n if Arch.addr_per_word == 4:\n # in a 32 bit word, the 16 bit build ID int can be in either the 2\n # MS bytes or 2 LS bytes\n if int_addr % Arch.addr_per_word != 0:\n build_id_int >>= 16\n build_id_int &= 0xFFFF\n return build_id_int", "def get_lis_id(chamber, url):\n match = re.search(lis_id_patterns[chamber], url)\n if match.groups:\n return match.group(1)", "def get_id(share_url):\n url = get_redirect_url(share_url)\n id_num = re.findall('(\\d*)\\?', url)[0]\n if id_num.isnumeric():\n return id_num\n else:\n print(\"Something wrong with id number\")" ]
[ "0.62486625", "0.6210062", "0.61946636", "0.61686426", "0.6033995", "0.60120165", "0.6008355", "0.60002166", "0.5939298", "0.59316343", "0.5922716", "0.5900999", "0.589496", "0.58657694", "0.5857072", "0.58481187", "0.5813568", "0.57995117", "0.5798", "0.5792419", "0.5778246", "0.5775658", "0.5769629", "0.57454264", "0.57023764", "0.5692642", "0.56882715", "0.56849694", "0.568488", "0.56722444" ]
0.6238994
1
Get python library based on sysconfig
def find_python_library(): python_library = sysconfig.get_config_var('LIBRARY') if (not python_library or os.path.splitext(python_library)[1][-2:] == '.a'): candidate_lib_prefixes = ['', 'lib'] candidate_implementations = ['python'] if hasattr(sys, "pypy_version_info"): candidate_implementations = ['pypy-c', 'pypy3-c'] candidate_extensions = ['.lib', '.so', '.a'] if sysconfig.get_config_var('WITH_DYLD'): candidate_extensions.insert(0, '.dylib') candidate_versions = [] candidate_versions.append('') candidate_versions.insert(0, str(sys.version_info.major) + "." + str(sys.version_info.minor)) abiflags = getattr(sys, 'abiflags', '') candidate_abiflags = [abiflags] if abiflags: candidate_abiflags.append('') # Ensure the value injected by virtualenv is # returned on windows. # Because calling `sysconfig.get_config_var('multiarchsubdir')` # returns an empty string on Linux, `du_sysconfig` is only used to # get the value of `LIBDIR`. libdir = du_sysconfig.get_config_var('LIBDIR') if sysconfig.get_config_var('MULTIARCH'): masd = sysconfig.get_config_var('multiarchsubdir') if masd: if masd.startswith(os.sep): masd = masd[len(os.sep):] libdir = os.path.join(libdir, masd) if libdir is None: libdir = os.path.abspath(os.path.join( sysconfig.get_config_var('LIBDEST'), "..", "libs")) no_valid_candidate = True for (pre, impl, ext, ver, abi) in itertools.product(candidate_lib_prefixes, candidate_implementations, candidate_extensions, candidate_versions, candidate_abiflags): candidate = os.path.join(libdir, ''.join((pre, impl, ver, abi, ext))) if os.path.exists(candidate): python_library = candidate no_valid_candidate = False break # If there is not valid candidate then set the python_library is empty if no_valid_candidate: python_library = "" return python_library
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_python_lib(plat_specific=0, standard_lib=0, prefix=None):\n if prefix is None:\n prefix = PREFIX\n if standard_lib:\n return os.path.join(prefix, \"lib-python\", sys.version[0])\n return os.path.join(prefix, 'site-packages')", "def library():\n finder = LibraryFinder()\n p = finder.find()\n return p if p else ''", "def python_lib_non_arch_dir(self):\n return get_python_lib()", "def findLibrary(name):\n assert is_unix, (\"Current implementation for Unix only (Linux, Solaris, \"\n \"AIX, FreeBSD)\")\n\n lib = None\n\n # Look in the LD_LIBRARY_PATH according to platform.\n if is_aix:\n lp = compat.getenv('LIBPATH', '')\n elif is_darwin:\n lp = compat.getenv('DYLD_LIBRARY_PATH', '')\n else:\n lp = compat.getenv('LD_LIBRARY_PATH', '')\n for path in lp.split(os.pathsep):\n libs = glob(os.path.join(path, name + '*'))\n if libs:\n lib = libs[0]\n break\n\n # Look in /etc/ld.so.cache\n # Solaris does not have /sbin/ldconfig. Just check if this file exists.\n if lib is None:\n utils.load_ldconfig_cache()\n lib = utils.LDCONFIG_CACHE.get(name)\n if lib:\n assert os.path.isfile(lib)\n\n # Look in the known safe paths.\n if lib is None:\n # Architecture independent locations.\n paths = ['/lib', '/usr/lib']\n # Architecture dependent locations.\n if compat.architecture == '32bit':\n paths.extend(['/lib32', '/usr/lib32', '/usr/lib/i386-linux-gnu'])\n else:\n paths.extend(['/lib64', '/usr/lib64', '/usr/lib/x86_64-linux-gnu'])\n\n\n # On Debian/Ubuntu /usr/bin/python is linked statically with libpython.\n # Newer Debian/Ubuntu with multiarch support putsh the libpythonX.Y.so\n # To paths like /usr/lib/i386-linux-gnu/.\n try:\n # Module available only in Python 2.7+\n import sysconfig\n # 'multiarchsubdir' works on Debian/Ubuntu only in Python 2.7 and 3.3+.\n arch_subdir = sysconfig.get_config_var('multiarchsubdir')\n # Ignore if None is returned.\n if arch_subdir:\n arch_subdir = os.path.basename(arch_subdir)\n paths.append(os.path.join('/usr/lib', arch_subdir))\n else:\n logger.debug('Multiarch directory not detected.')\n except ImportError:\n logger.debug('Multiarch directory not detected.')\n\n if is_aix:\n paths.append('/opt/freeware/lib')\n elif is_hpux:\n if compat.architecture == '32bit':\n paths.append('/usr/local/lib/hpux32')\n else:\n paths.append('/usr/local/lib/hpux64')\n elif is_freebsd or is_openbsd:\n paths.append('/usr/local/lib')\n for path in paths:\n libs = glob(os.path.join(path, name + '*'))\n if libs:\n lib = libs[0]\n break\n\n # give up :(\n if lib is None:\n return None\n\n # Resolve the file name into the soname\n if is_freebsd or is_aix or is_openbsd:\n # On FreeBSD objdump doesn't show SONAME,\n # and on AIX objdump does not exist,\n # so we just return the lib we've found\n return lib\n else:\n dir = os.path.dirname(lib)\n return os.path.join(dir, _get_so_name(lib))", "def get_python_library_path():\n def _find_lib_in_libdirs(*libdirs):\n for libdir in libdirs:\n for name in PYDYLIB_NAMES:\n full_path = os.path.join(libdir, name)\n if os.path.exists(full_path):\n return full_path\n return None\n\n # Try to get Python library name from the Python executable. It assumes that Python\n # library is not statically linked.\n executable = getattr(sys, '_base_executable', sys.executable)\n dlls = getImports(executable)\n for filename in dlls:\n for name in PYDYLIB_NAMES:\n if os.path.basename(filename) == name:\n # On Windows filename is just like 'python27.dll'. Convert it\n # to absolute path.\n if is_win and not os.path.isabs(filename):\n filename = getfullnameof(filename)\n # Python library found. Return absolute path to it.\n return filename\n\n # Python library NOT found. Resume searching using alternative methods.\n\n # Work around for python venv having VERSION.dll rather than pythonXY.dll\n if is_win and 'VERSION.dll' in dlls:\n pydll = 'python%d%d.dll' % sys.version_info[:2]\n return getfullnameof(pydll)\n\n # Applies only to non Windows platforms and conda.\n\n if is_conda:\n # Conda needs to be the first here since it overrules the operating\n # system specific paths.\n python_libname = _find_lib_in_libdirs(\n os.path.join(compat.base_prefix, 'lib'))\n if python_libname:\n return python_libname\n\n elif is_unix:\n for name in PYDYLIB_NAMES:\n python_libname = findLibrary(name)\n if python_libname:\n return python_libname\n\n elif is_darwin:\n # On MacPython, Analysis.assemble is able to find the libpython with\n # no additional help, asking for sys.executable dependencies.\n # However, this fails on system python, because the shared library\n # is not listed as a dependency of the binary (most probably it's\n # opened at runtime using some dlopen trickery).\n # This happens on Mac OS X when Python is compiled as Framework.\n\n # Python compiled as Framework contains same values in sys.prefix\n # and exec_prefix. That's why we can use just sys.prefix.\n # In virtualenv PyInstaller is not able to find Python library.\n # We need special care for this case.\n python_libname = _find_lib_in_libdirs(compat.base_prefix)\n if python_libname:\n return python_libname\n\n # Python library NOT found. Provide helpful feedback.\n msg = \"\"\"Python library not found: %s\n This would mean your Python installation doesn't come with proper library files.\n This usually happens by missing development package, or unsuitable build parameters of Python installation.\n\n * On Debian/Ubuntu, you would need to install Python development packages\n * apt-get install python3-dev\n * apt-get install python-dev\n * If you're building Python by yourself, please rebuild your Python with `--enable-shared` (or, `--enable-framework` on Darwin)\n \"\"\" % (\", \".join(PYDYLIB_NAMES),)\n raise IOError(msg)", "def python_lib_arch_dir(self):\n return get_python_lib(plat_specific=True)", "def get_lib_extension():\r\n if sys.platform == 'win32':\r\n return 'pyd'\r\n else:\r\n return 'so'", "def python_implementation():\n return _sys_version()[0]", "def initLibPath():\n libHash = {\n 'Framework': 1,\n 'UserControlleLib': 1,\n 'CaseLib': 1\n }\n\n binPath = os.path.split(os.path.realpath(__file__))[0]\n\n for key in libHash:\n sys.path.append(os.path.join(__getLibAbsPath(binPath, libHash[key]), key))", "def get_python_shared_library():\n for path in filter(is_valid, get_candidate_paths()):\n return path\n raise FileNotFoundError(\n \"Could not find Python shared library. Please report this bug at \"\n \"https://github.com/RomeoDespres/reapy/issues/new so that we can \"\n \"support more cases.\"\n )", "def selected_libs(args: Namespace) -> List[str]:\n return args.lib or [\"python\", \"lkt\"]", "def get_linked_libpython():\n if is_windows():\n return\n libdl = ctypes.CDLL(ctypes.util.find_library(\"dl\"))\n libdl.dladdr.argtypes = [ctypes.c_void_p, ctypes.POINTER(_Dl_info)]\n libdl.dladdr.restype = ctypes.c_int\n\n dlinfo = _Dl_info()\n retcode = libdl.dladdr(\n ctypes.cast(ctypes.pythonapi.Py_GetVersion, ctypes.c_void_p),\n ctypes.pointer(dlinfo))\n if retcode == 0: # means error\n return\n path = os.path.realpath(dlinfo.dli_fname.decode())\n if path == os.path.realpath(sys.executable):\n return\n return path", "def load_lib():\n root_dir = command.get_base_dirs(bin_dir)[0]\n _bin_dir, lib_dir = command.get_bin_lib_dirs(root_dir)\n magic_so = os.path.join(lib_dir, 'libmagic' + system.lib_ext)\n\n # add lib path to the front of the PATH env var\n new_path = os.pathsep.join([lib_dir, os.environ['PATH']])\n os.environ['PATH'] = new_path\n\n if os.path.exists(magic_so):\n lib = ctypes.CDLL(magic_so)\n if lib and lib._name:\n return lib\n raise ImportError('Failed to load libmagic from %(magic_so)r' % locals())", "def find_lib_directory(self):\n lib_directory = None\n if self.lib_micro_version in self.lib_directories:\n lib_directory = self.lib_micro_version\n elif self.lib_minor_version in self.lib_directories:\n lib_directory = self.lib_minor_version\n elif self.lib_major_version in self.lib_directories:\n lib_directory = self.lib_major_version\n else:\n for lv in [self.lib_micro_version, self.lib_minor_version, self.lib_major_version]:\n for d in self.lib_directories:\n if lv in d:\n lib_directory = d\n break\n else:\n continue\n break\n return lib_directory", "def _get_site_packages():\n paths_to_try = [\n # typically win32\n os.path.join(base, \"Lib\", \"site-packages\"),\n # standard\n os.path.join(base, \"lib\", \"python%s\" % sys.version[:3], \"site-packages\"),\n # typically pypy\n os.path.join(base, \"site-packages\"),\n ]\n for p in paths_to_try:\n if os.path.isdir(p):\n return p\n return os.path.join(base, \"lib\", \"python%s\" % sys.version[:3], \"site-packages\")", "def try_lib_load():\n # If we are building the documentation, then we abort the import\n rtd_build_environ = 'PYGORPHO_BUILD_READTHEDOCS'\n if rtd_build_environ in os.environ:\n import warnings\n warnings.warn('Environment variable {} exists - we assume '\n 'documentation is being built and are aborting the '\n 'import'.format(rtd_build_environ))\n return _DummyLib(), __file__\n\n path_candidates = []\n # If PYGORPHO_PATH was set we start looking there\n if os.getenv('PYGORPHO_PATH') is not None:\n path_candidates.append(os.path.abspath(os.getenv('PYGORPHO_PATH')))\n # Look in the dir. where this python file is placed\n path_candidates.append(os.path.dirname(__file__))\n # Look in dir. one level up from current file dir.\n path_candidates.append(os.path.dirname(path_candidates[-1]))\n # Start looking\n for path in path_candidates:\n try:\n if platform.system() == 'Windows':\n lib = ctl.load_library('pygorpho', path)\n else:\n lib = ctl.load_library('libpygorpho', path)\n # Load was successful, so return path and lib now\n return lib, path\n except OSError:\n # Lib was not here so move on...\n pass\n else:\n raise ImportError('could not find pygorpho dynamic library file '\n '(try setting PYGORPHO_PATH environment variable)')", "def resolve(config, interpreter, logger=print):\r\n\r\n setuptools_requirement = failsafe_parse(\r\n 'setuptools==%s' % config.get('python-setup', 'setuptools_version', default='2.2'))\r\n wheel_requirement = failsafe_parse(\r\n 'wheel==%s' % config.get('python-setup', 'wheel_version', default='0.22.0'))\r\n\r\n interpreter = resolve_interpreter(config, interpreter, setuptools_requirement, logger=logger)\r\n if interpreter:\r\n return resolve_interpreter(config, interpreter, wheel_requirement, logger=logger)", "def get_oslo_config():\n global cfg\n\n # First Call\n if not cfg:\n try:\n from oslo_config import cfg\n except ImportError:\n cfg = _NOT_FOUND\n\n if cfg is _NOT_FOUND:\n raise ImportError(\"oslo.config is not an automatic dependency of \"\n \"keystoneauth. If you wish to use oslo.config \"\n \"you need to import it into your application's \"\n \"requirements file. \")\n\n return cfg", "def __get_non_python_library_module_file(module_name, environment=sys.path):\n found = None\n\n # Use the longer paths first\n paths = reversed(sorted(environment))\n for path in paths:\n base_path = path.replace(\"\\\\\", \"/\")\n if stypy_parameters_copy.type_inference_file_directory_name in path:\n base_path = base_path.replace(\"/\" + stypy_parameters_copy.type_inference_file_directory_name, \"\")\n\n temp = base_path + \"/\" + module_name.replace('.', '/') + \".py\"\n if os.path.isfile(temp):\n found = temp\n # Module (__init__) names have precedence over file names\n temp = base_path + \"/\" + module_name.replace('.', '/') + \"/__init__.py\"\n if os.path.isfile(temp):\n found = temp\n break\n if found is None:\n pass\n\n return found", "def get_module_config(name):\n return _CONFIGS[name]", "def _find_mkl():\n mkl_lib = None\n if _blas_info() == 'INTEL MKL':\n plat = sys.platform\n python_dir = os.path.dirname(sys.executable)\n if plat in ['darwin', 'linux2', 'linux']:\n python_dir = os.path.dirname(python_dir)\n\n if plat == 'darwin':\n lib = '/libmkl_rt.dylib'\n elif plat == 'win32':\n lib = r'\\mkl_rt.dll'\n elif plat in ['linux2', 'linux']:\n lib = '/libmkl_rt.so'\n else:\n raise Exception('Unknown platfrom.')\n\n if plat in ['darwin', 'linux2', 'linux']:\n lib_dir = '/lib'\n else:\n lib_dir = r'\\Library\\bin'\n # Try in default Anaconda location first\n try:\n mkl_lib = cdll.LoadLibrary(python_dir+lib_dir+lib)\n except Exception:\n pass\n\n # Look in Intel Python distro location\n if mkl_lib is None:\n if plat in ['darwin', 'linux2', 'linux']:\n lib_dir = '/ext/lib'\n else:\n lib_dir = r'\\ext\\lib'\n try:\n mkl_lib = \\\n cdll.LoadLibrary(python_dir + lib_dir + lib)\n except Exception:\n pass\n return mkl_lib", "def _get_loaded_api() -> Optional[str]:\n for api in _API_LIST:\n if sys.modules.get(f\"{api}.QtCore\"):\n return api\n try:\n return _ENV_TO_MODULE[_QT_API_ENV]\n except KeyError:\n raise RuntimeError(\n \"The environment variable QT_API has the unrecognized value \"\n f\"{_QT_API_ENV!r}; \"\n f\"valid values are {[k for k in _ENV_TO_MODULE if k is not None]}\"\n ) from None", "def get_generic_unix_settings():\n if bits == 64:\n library_dirs = ['/opt/mqm/lib64']\n else:\n library_dirs = ['/opt/mqm/lib']\n\n include_dirs = ['/opt/mqm/inc']\n\n if build_server:\n libraries = ['mqm_r']\n else:\n libraries = ['mqic_r']\n\n return library_dirs, include_dirs, libraries", "def _get_installed_api() -> Optional[str]:\n # Fix [AttributeError: module 'importlib' has no attribute 'util']\n # See https://stackoverflow.com/a/39661116/13452582\n from importlib import util\n\n for api in _API_LIST:\n if util.find_spec(api) is not None:\n return api\n return None", "def get_backend():\n return sys.modules[__name__]", "def get_backend():\n return sys.modules[__name__]", "def get_library_name(name):\n suffix = get_sharedlib_suffix()\n if not is_windows() and name.startswith(\"lib\"):\n name = name[len(\"lib\"):]\n if suffix and name.endswith(suffix):\n name = name[:-len(suffix)]\n return name", "def get_library(self, name = 'default'):\n from ambry.library import new_library\n\n config = self.server_rc.library(name)\n\n l = new_library(config, reset = True)\n\n return l", "def get_library(self, name = 'default'):\n from ambry.library import new_library\n\n config = self.server_rc.library(name)\n\n l = new_library(config, reset = True)\n\n return l", "def getLibVersion():\n return \"Software Development Library for Linux 1.999.1\"" ]
[ "0.71885324", "0.66699564", "0.65992355", "0.6495816", "0.6380809", "0.6369318", "0.6262431", "0.616101", "0.61334044", "0.6068237", "0.6055533", "0.59773564", "0.59681046", "0.59629935", "0.5912528", "0.5888469", "0.5869429", "0.5857408", "0.58537686", "0.58526", "0.580894", "0.5808041", "0.5790803", "0.57842916", "0.5784005", "0.5784005", "0.57707506", "0.57668096", "0.57668096", "0.57651645" ]
0.7147325
1
Callback function for updating Peloton apps so that we can run integration tests inbetween updates of each app.
def update_callback(app): print("Update callback invoked for %s" % app.name) # TODO: Add integration tests here return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_duo_application_update(self):\n pass", "def update_app(self):\n\n param = self.chose_param_value(\"--app\")\n self._check_path_availability([\"get_project_dir\", \"get_project_dir_to\"])\n if self._check_whether_has_params(param):\n self.updater.update_files(\n self.analizer.get_project_dir(),\n self.analizer.get_project_dir_to(),\n param\n )\n return self.write_debug_message(\"App files upgrade is done!\\n\")\n return self.write_error_message(\"You haven't passed any params about application files\")", "def AppUpdateApp(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def sync_apps(self):\n pass", "def __update_application(self, apps, **extra_args):\n update_on_error = extra_args.get('update_on_error', False)\n # auto_enable_auth = extra_args.get(\n # 'auto_enable_auth', self.auto_enable_auth)\n\n for app in apps:\n state = app.execution.state\n old_state = state\n gc3libs.log.debug(\n \"About to update state of application: %s (currently: %s)\",\n app,\n state)\n try:\n if state not in [\n Run.State.NEW,\n Run.State.TERMINATING,\n Run.State.TERMINATED,\n ]:\n lrms = self.get_backend(app.execution.resource_name)\n try:\n state = lrms.update_job_state(app)\n # pylint: disable=broad-except\n except Exception as ex:\n gc3libs.log.debug(\n \"Error getting status of application '%s': %s: %s\",\n app, ex.__class__.__name__, ex, exc_info=True)\n state = Run.State.UNKNOWN\n # run error handler if defined\n ex = app.update_job_state_error(ex)\n if isinstance(ex, Exception):\n raise ex\n if state != old_state:\n app.changed = True\n # set log information accordingly\n if (app.execution.state == Run.State.TERMINATING\n and app.execution.returncode is not None\n and app.execution.returncode != 0):\n # there was some error, try to explain\n app.execution.info = (\n \"Execution failed on resource: %s\" %\n app.execution.resource_name)\n signal = app.execution.signal\n if signal in Run.Signals:\n app.execution.info = (\n \"Abnormal termination: %s\" % signal)\n else:\n if os.WIFSIGNALED(app.execution.returncode):\n app.execution.info = (\n \"Remote job terminated by signal %d\" %\n signal)\n else:\n app.execution.info = (\n \"Remote job exited with code %d\" %\n app.execution.exitcode)\n\n if state != Run.State.UNKNOWN or update_on_error:\n app.execution.state = state\n\n except (gc3libs.exceptions.InvalidArgument,\n gc3libs.exceptions.ConfigurationError,\n gc3libs.exceptions.UnrecoverableAuthError,\n gc3libs.exceptions.FatalError):\n # Unrecoverable; no sense in continuing --\n # pass immediately on to client code and let\n # it handle this...\n raise\n\n except gc3libs.exceptions.UnknownJob:\n # information about the job is lost, mark it as failed\n app.execution.returncode = (Run.Signals.Lost, -1)\n app.execution.state = Run.State.TERMINATED\n app.changed = True\n continue\n\n except gc3libs.exceptions.InvalidResourceName:\n # could be the corresponding LRMS has been removed\n # because of an unrecoverable error mark application\n # as state UNKNOWN\n gc3libs.log.warning(\n \"Cannot access computational resource '%s',\"\n \" marking task '%s' as UNKNOWN.\",\n app.execution.resource_name, app)\n app.execution.state = Run.State.TERMINATED\n app.changed = True\n continue\n\n # This catch-all clause is needed otherwise the loop stops\n # at the first erroneous iteration\n #\n # pylint: disable=broad-except\n except Exception as ex:\n if gc3libs.error_ignored(\n # context:\n # - module\n 'core',\n # - class\n 'Core',\n # - method\n 'update_job_state',\n # - actual error class\n ex.__class__.__name__,\n # - additional keywords\n 'update',\n ):\n gc3libs.log.warning(\n \"Ignored error in Core.update_job_state(): %s\", ex)\n # print again with traceback at a higher log level\n gc3libs.log.debug(\n \"(Original traceback follows.)\", exc_info=True)\n continue\n else:\n # propagate generic exceptions for debugging purposes\n raise", "def update_app(AppId=None, Name=None, Description=None, DataSources=None, Type=None, AppSource=None, Domains=None, EnableSsl=None, SslConfiguration=None, Attributes=None, Environment=None):\n pass", "def update_apps(env='development', upgrade_apps='n'):\n\n project_settings = get_settings()\n projects = build_projects_vars()\n project = projects[env]\n\n for app in project_settings.EXTRA_APPS:\n option = ''\n if app[env]['type'] == 'git' and upgrade_apps == 'y':\n option = '--upgrade'\n if app[env]['type'] == 'editable':\n option = '-e'\n\n run('workon %(name)s && pip install %(option)s %(source)s' % {'name': project['name'], 'option': option, 'source': app[env]['source']})", "def test_19_admin_update_app(self, Mock, Mock2):\r\n html_request = FakeRequest(json.dumps(self.pkg_json_not_found), 200,\r\n {'content-type': 'application/json'})\r\n Mock.return_value = html_request\r\n self.register()\r\n self.signout()\r\n self.register(fullname=\"Juan Jose\", name=\"juan\",\r\n email=\"[email protected]\", password=\"juan\")\r\n self.new_application()\r\n self.signout()\r\n # Sign in with the root user\r\n self.signin()\r\n res = self.app.get('/app/sampleapp/settings')\r\n err_msg = \"Admin users should be able to get the settings page for any app\"\r\n assert res.status == \"200 OK\", err_msg\r\n res = self.update_application(method=\"GET\")\r\n assert \"Update the application\" in res.data,\\\r\n \"The app should be updated by admin users\"\r\n res = self.update_application(new_name=\"Root\",\r\n new_short_name=\"rootsampleapp\")\r\n res = self.app.get('/app/rootsampleapp', follow_redirects=True)\r\n assert \"Root\" in res.data, \"The app should be updated by admin users\"\r\n\r\n app = db.session.query(App)\\\r\n .filter_by(short_name=\"rootsampleapp\").first()\r\n juan = db.session.query(User).filter_by(name=\"juan\").first()\r\n assert app.owner_id == juan.id, \"Owner_id should be: %s\" % juan.id\r\n assert app.owner_id != 1, \"The owner should be not updated\"\r\n res = self.update_application(short_name=\"rootsampleapp\",\r\n new_short_name=\"sampleapp\",\r\n new_long_description=\"New Long Desc\")\r\n res = self.app.get('/app/sampleapp', follow_redirects=True)\r\n err_msg = \"The long description should have been updated\"\r\n assert \"New Long Desc\" in res.data, err_msg", "def check_for_updates(appname, use_appimageupdate=True):\n z = Zap(appname)\n z.check_for_updates(use_appimageupdate=use_appimageupdate)", "def test_48_update_app_info(self, Mock, mock):\r\n html_request = FakeRequest(json.dumps(self.pkg_json_not_found), 200,\r\n {'content-type': 'application/json'})\r\n Mock.return_value = html_request\r\n\r\n self.register()\r\n self.new_application()\r\n app = db.session.query(App).first()\r\n err_msg = \"Task Presenter should be empty\"\r\n assert not app.info.get('task_presenter'), err_msg\r\n\r\n res = self.app.post('/app/sampleapp/tasks/taskpresentereditor',\r\n data={'editor': 'Some HTML code!'},\r\n follow_redirects=True)\r\n assert \"Sample App\" in res.data, \"Does not return to app details\"\r\n app = db.session.query(App).first()\r\n for i in range(10):\r\n key = \"key_%s\" % i\r\n app.info[key] = i\r\n db.session.add(app)\r\n db.session.commit()\r\n _info = app.info\r\n\r\n self.update_application()\r\n app = db.session.query(App).first()\r\n for key in _info:\r\n assert key in app.info.keys(), \\\r\n \"The key %s is lost and it should be here\" % key\r\n assert app.name == \"Sample App\", \"The app has not been updated\"\r\n error_msg = \"The app description has not been updated\"\r\n assert app.description == \"Description\", error_msg\r\n error_msg = \"The app long description has not been updated\"\r\n assert app.long_description == \"Long desc\", error_msg", "def test_05d_get_nonexistant_app_update(self):\r\n self.register()\r\n # GET\r\n res = self.app.get('/app/noapp/update', follow_redirects=True)\r\n assert res.status == '404 NOT FOUND', res.status\r\n # POST\r\n res = self.update_application(short_name=\"noapp\")\r\n assert res.status == '404 NOT FOUND', res.status", "def AppUpdateApp(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def set_apps(self, new_apps):\n self.remove_apps()\n for app_id in new_apps:\n self.add_app(Webapp.objects.get(pk=app_id))\n index_webapps.delay(new_apps)", "def upgrade():\n config = ConfigManager()\n apps = config['apps']\n for i, app in progressbar(enumerate(apps), redirect_stdout=True):\n z = Zap(app)\n if i == 0:\n z.update(show_spinner=False)\n else:\n z.update(check_appimage_update=False, show_spinner=False)", "def AppUpdateApp(self, request, timeout, metadata=None, with_call=False, protocol_options=None):\n raise NotImplementedError()", "def test_12_update_application(self, Mock, mock):\r\n with self.flask_app.app_context():\r\n html_request = FakeRequest(json.dumps(self.pkg_json_not_found), 200,\r\n {'content-type': 'application/json'})\r\n Mock.return_value = html_request\r\n\r\n self.register()\r\n self.new_application()\r\n\r\n # Get the Update App web page\r\n res = self.update_application(method=\"GET\")\r\n msg = \"Application: Sample App &middot; Update\"\r\n assert self.html_title(msg) in res.data, res\r\n msg = 'input id=\"id\" name=\"id\" type=\"hidden\" value=\"1\"'\r\n assert msg in res.data, res\r\n assert \"Save the changes\" in res.data, res\r\n\r\n # Check form validation\r\n res = self.update_application(new_name=\"\",\r\n new_short_name=\"\",\r\n new_description=\"New description\",\r\n new_long_description='New long desc',\r\n new_hidden=True)\r\n assert \"Please correct the errors\" in res.data, res.data\r\n\r\n # Update the application\r\n res = self.update_application(new_name=\"New Sample App\",\r\n new_short_name=\"newshortname\",\r\n new_description=\"New description\",\r\n new_long_description='New long desc',\r\n new_hidden=True)\r\n app = db.session.query(App).first()\r\n assert \"Application updated!\" in res.data, res\r\n err_msg = \"App name not updated %s\" % app.name\r\n assert app.name == \"New Sample App\", err_msg\r\n err_msg = \"App short name not updated %s\" % app.short_name\r\n assert app.short_name == \"newshortname\", err_msg\r\n err_msg = \"App description not updated %s\" % app.description\r\n assert app.description == \"New description\", err_msg\r\n err_msg = \"App long description not updated %s\" % app.long_description\r\n assert app.long_description == \"New long desc\", err_msg\r\n err_msg = \"App hidden not updated %s\" % app.hidden\r\n assert app.hidden == 1, err_msg\r\n\r\n\r\n # Check that the owner can access it even though is hidden\r\n\r\n user = db.session.query(User).filter_by(name='johndoe').first()\r\n user.admin = False\r\n db.session.add(user)\r\n db.session.commit()\r\n res = self.app.get('/app/newshortname/')\r\n err_msg = \"Owner should be able to see his hidden app\"\r\n assert app.name in res.data, err_msg\r\n self.signout()\r\n\r\n res = self.register(fullname='Paco', name='paco')\r\n url = '/app/newshortname/'\r\n res = self.app.get(url, follow_redirects=True)\r\n assert \"Forbidden\" in res.data, res.data\r\n assert res.status_code == 403\r\n\r\n tmp = db.session.query(App).first()\r\n tmp.hidden = 0\r\n db.session.add(tmp)\r\n db.session.commit()\r\n\r\n url = '/app/newshortname/update'\r\n res = self.app.get(url, follow_redirects=True)\r\n assert res.status_code == 403, res.status_code\r\n\r\n tmp.hidden = 1\r\n db.session.add(tmp)\r\n db.session.commit()\r\n\r\n\r\n user = db.session.query(User).filter_by(name='paco').first()\r\n user.admin = True\r\n db.session.add(user)\r\n db.session.commit()\r\n res = self.app.get('/app/newshortname/')\r\n err_msg = \"Root user should be able to see his hidden app\"\r\n assert app.name in res.data, err_msg", "def test_update_app_running():\n app_state_manager = ApplicationStateManager(MockDeploymentStateManager())\n app_state_manager.deploy_application(\"test_app\", {})\n app_status = app_state_manager.get_app_status(\"test_app\")\n assert app_status.status == ApplicationStatus.DEPLOYING\n app_state_manager.deployment_state_manager.set_deployment_statuses_healthy(0)\n app_state_manager.update()\n app_status = app_state_manager.get_app_status(\"test_app\")\n assert app_status.status == ApplicationStatus.DEPLOYING\n app_state_manager.deployment_state_manager.set_deployment_statuses_healthy(1)\n app_state_manager.update()\n app_status = app_state_manager.get_app_status(\"test_app\")\n assert app_status.status == ApplicationStatus.RUNNING\n\n # rerun update, application status should not make difference\n app_state_manager.update()\n app_status = app_state_manager.get_app_status(\"test_app\")\n assert app_status.status == ApplicationStatus.RUNNING", "def test_update_hyperflex_app_catalog(self):\n pass", "def test_update(app):\n\n assert False", "def sync_apps(self):\n cherrypy.server.httpserver.wsgi_app = self.get_app()", "def update(appname, use_appimageupdate=True):\n z = Zap(appname)\n z.update(use_appimageupdate=use_appimageupdate)", "def PatchApps(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def reload_apps(self, **kwargs) -> None:\n kwargs[\"namespace\"] = \"admin\"\n kwargs[\"__name\"] = self.name\n self.call_service(\"app/reload\", **kwargs)\n return None", "def test_with_ext_apps(self, mock_async_post, mock_settings):\n lang, _ = Language.objects.get_or_create(name=\"Test Language\", code=\"text-x-lang\")\n app1 = {\"url\": \"fakeUrl\", \"key\": \"fakeKey\"}\n mock_settings.configure_mock(**{\"EXT_APP_PUSH\": [app1]})\n\n status = notify_external_apps(instance=lang, action=\"UPDATE\")\n self.assertTrue(status.success)\n self.assertEqual(len(status.message), 0)\n self.assertEqual(mock_async_post.call_count, 1)\n\n mock_async_post.reset_mock()\n app2 = {\"url\": \"fakeUrl\"}\n mock_settings.configure_mock(**{\"EXT_APP_PUSH\": [app1, app2]})\n\n status = notify_external_apps(instance=lang, action=\"UPDATE\")\n self.assertTrue(status.success)\n self.assertEqual(len(status.message), 0)\n self.assertEqual(mock_async_post.call_count, 2)", "def test_update_deployment(self):\n pass", "def test_update_system(self):\n pass", "def update_application(self, method=\"POST\", short_name=\"sampleapp\", id=1,\r\n new_name=\"Sample App\", new_short_name=\"sampleapp\",\r\n new_description=\"Description\",\r\n new_allow_anonymous_contributors=\"False\",\r\n new_category_id=\"2\",\r\n new_long_description=\"Long desc\",\r\n new_sched=\"random\",\r\n new_hidden=False):\r\n if method == \"POST\":\r\n if new_hidden:\r\n return self.app.post(\"/app/%s/update\" % short_name,\r\n data={\r\n 'id': id,\r\n 'name': new_name,\r\n 'short_name': new_short_name,\r\n 'description': new_description,\r\n 'allow_anonymous_contributors': new_allow_anonymous_contributors,\r\n 'category_id': new_category_id,\r\n 'long_description': new_long_description,\r\n 'sched': new_sched,\r\n 'hidden': new_hidden,\r\n 'btn': 'Save'},\r\n follow_redirects=True)\r\n else:\r\n return self.app.post(\"/app/%s/update\" % short_name,\r\n data={'id': id, 'name': new_name,\r\n 'short_name': new_short_name,\r\n 'allow_anonymous_contributors': new_allow_anonymous_contributors,\r\n 'category_id': new_category_id,\r\n 'long_description': new_long_description,\r\n 'sched': new_sched,\r\n 'description': new_description,\r\n 'btn': 'Save'\r\n },\r\n follow_redirects=True)\r\n else:\r\n return self.app.get(\"/app/%s/update\" % short_name,\r\n follow_redirects=True)", "def update():\n appname = request.form['applicationname']\n appdetails = request.form['appinfo']\n obj = json.loads(appdetails)\n if appname and obj:\n db.hset('applications', appname, appdetails)\n return json.dumps({'message':'success'})", "def update_urls_file(self, app_name):\n\n logger.info(\"\\n--------------------------------------------------------\\n\\t\\tRefreshing application list in urls.py\")\n copyfile(settings.SITE_ROOT + \"/\" + settings.APPLICATION_NAME + \"/urls.py\", settings.SITE_ROOT + \"/\" + settings.APPLICATION_NAME + \"/urls.py.backup\")\n t = loader.get_template('applicationManager/applicationFileTemplates/project_urls_py.txt')\n\n apps = Application.objects.all()\n\n c = {'applist': apps}\n rendered = t.render(c)\n open(settings.SITE_ROOT + \"/\" + settings.APPLICATION_NAME + \"/urls.py\", \"w+\").write(rendered)", "def test_03_app_put(self):\r\n for i in range(300):\r\n app = App(name=str(i), short_name=str(i),\r\n description=str(i), owner_id=1)\r\n db.session.add(app)\r\n db.session.commit()\r\n\r\n url = '?api_key=%s' % (self.api_key)\r\n self.check_limit(url, 'put', 'app')" ]
[ "0.68930477", "0.68622243", "0.67786133", "0.67368746", "0.67218983", "0.6712058", "0.6680836", "0.6652264", "0.64698905", "0.6449498", "0.63019586", "0.63001305", "0.62754816", "0.6259966", "0.625121", "0.618488", "0.6163178", "0.6054064", "0.6050889", "0.60431993", "0.5977026", "0.5895361", "0.58819693", "0.58642983", "0.5844571", "0.583642", "0.57716465", "0.5750817", "0.5730453", "0.5729522" ]
0.76656497
0
Load the cluster config from a yaml file
def load(cfg_file): with open(cfg_file, "r") as f: try: cfg = yaml.load(f) except yaml.YAMLError as ex: print("Failed to unmarshal cluster config %s" % cfg_file) raise ex return Cluster(cfg_file, **cfg)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_yaml(yaml_name):\n print('training network configuration file is {0}'.format(yaml_name))\n util.check_file_exist(yaml_name)\n config = util.load_yaml_file(yaml_name)\n return config", "def load_cluster_config(path):\n if path:\n path = os.path.join(os.path.dirname(__file__), os.path.expandvars(path))\n default_cluster_config = io.load_configfile(path)\n else:\n default_cluster_config = {}\n if \"__default__\" not in default_cluster_config:\n default_cluster_config[\"__default__\"] = {}\n return default_cluster_config", "def load_config(filename):\n AS[\"config\"] = load_yaml_file(filename)", "def load_cfg(filepath=\"./config.yaml\"):\n with open(filepath, \"r\") as f:\n return yaml.load(f, Loader=yaml.FullLoader)", "def load_yaml(cls, file=None):\n if file is None: file = f'{rcp.base_path}cfg.yml'\n try:\n with open(file, 'r') as f:\n config = yaml.load(f, Loader=yaml.FullLoader)\n cfg.__dict__ = config\n return cfg\n except FileNotFoundError:\n print(\"Config file doesn't exist.\")", "def load_config(config_file):\n with open(config_file) as f:\n return yaml.load(f)", "def load_config(path):\n return yaml.load(open('config.yaml', 'r'), Loader=yaml.SafeLoader)", "def load_config(filename):\n with open(filename, \"r\") as stream:\n try:\n global CONFIG\n CONFIG = yaml.load(stream)\n except yaml.YAMLError as ex:\n print(ex)", "def get_cfg_from_yaml(self):\n try:\n with open(self.parsed_cfg_path, 'r') as cfg_yaml:\n self.from_yaml_cfg_dict = yaml.load(cfg_yaml)\n except Exception as exc:\n print(exc)\n traceback.print_exc()\n self.from_yaml_cfg_dict = {}", "def _cfg_from_file(filename):\n import yaml\n with open(filename, 'r') as f:\n cfg = yaml.load(f)\n return cfg", "def load_config(config_file_path):\n global config\n try:\n config_file_path = os.path.abspath(config_file_path)\n assert config_file_path\n with open(file=config_file_path) as yaml_data:\n loaded_config = yaml.safe_load(yaml_data)\n for k in config:\n if k in loaded_config:\n config[k] = loaded_config[k]\n except AssertionError:\n print(f\"Config file {config_file_path} not found or unreadable ! Exiting..\")\n quit(1)", "def load_config_file(path):\n with open(path) as file:\n return yaml.load(file, Loader=yaml.FullLoader)", "def config_from_yaml(self, filename):\n with open(filename, 'r') as f:\n config = yaml.load(f)\n config = self._process_config_imports(config)\n self._config.update(config)", "def load_config(path):\n return yaml.load(open(path, 'r'), Loader=yaml.SafeLoader)", "def load_cluster_config_json(cluster_config):\n\n if \".json\" not in cluster_config:\n cluster_config = \"{}.json\".format(cluster_config)\n\n with open(cluster_config) as f:\n cluster = json.loads(f.read())\n\n return cluster", "def load_cfg(yaml_filepath):\n # Read YAML experiment definition file\n with open(yaml_filepath, 'r') as stream:\n cfg = yaml.load(stream)\n cfg = make_paths_absolute(os.path.dirname(yaml_filepath), cfg)\n return cfg", "def load_config():\n global config\n with open('config.yml', 'r') as file:\n config = yaml.load(file)", "def load_yaml_file(yaml_file):\n try:\n # Get the configuration parameters which contain the region, vpc name, template filename, VPC CIDR blocks\n s = open(yaml_file).read()\n config = list(yaml.load_all(s))[0]\n\n except Exception as e:\n # We're expecting the user parameters to be encoded as YAML\n # so we can pass multiple values. If the YAML can't be decoded\n # then return failure with a helpful message.\n print(e)\n raise Exception('Input configuration parameters could not be decoded as YAML')\n\n return config", "def load_config(path):\n with open(path, 'r') as stream:\n return yaml.load(stream)", "def load(file):\n _config.load(file)", "def load_config():\n proj_dir = os.path.dirname(os.path.abspath(__file__))\n config_path = os.path.join(proj_dir, \"config.yml\")\n conf = yaml.safe_load(open(config_path))\n return conf", "def read_config(self,confile):\n\n\n print(\"reading:\",confile)\n with open(confile) as parf:\n data=yaml.load(parf)\n\n\n return data", "def load_yaml_config(config_path):\n\n with open(config_path, 'r') as user_config_file:\n return yaml.load(user_config_file)", "def testLoadConfigs(self):\n config_path = GetTestFilePath('valid/config.yaml')\n pool = lab_config.LabConfigPool(\n lab_config.LocalFileEnumerator(config_path, lab_config.IsYaml))\n pool.LoadConfigs()\n self.assertIsNotNone(pool.GetLabConfig())\n self.assertIsNotNone(pool.GetHostConfigs('cluster1'))\n self.assertIsNotNone(pool.GetHostConfigs('cluster2'))", "def load_config(filepath=None):\n if filepath is None:\n raise ValueError(\"The filepath is None, please check the config file is exist\")\n\n with open(filepath, \"r\") as stream:\n output = dict()\n try:\n content = yaml.load(stream)\n output.update(content)\n return output\n except yaml.YAMLError as e:\n print(e)", "def load_config(fpath):\n assert os.path.isfile(fpath), 'File does not exist'\n\n with open(fpath, 'r') as file:\n cfg = yaml.load(file, Loader=yaml.FullLoader)\n\n return cfg", "def _load_yaml_config(config_file):\n if type(config_file) is file:\n Config.CONFIG.update(yaml.load(config_file) or {})\n return Config.CONFIG\n else:\n try:\n with open(config_file, 'r') as f:\n return yaml.load(f)\n except IOError as e:\n e.message = \"Could not open configuration file \\\"{}\\\".\".format(config_file)\n raise e", "def load_config(file_path):\n _, ext = os.path.splitext(file_path)\n assert ext in ['.yml', '.yaml'], \"only support yaml files for now\"\n config = yaml.load(open(file_path, 'rb'), Loader=yaml.Loader)\n return config", "def load_config(file_path):\n _, ext = os.path.splitext(file_path)\n assert ext in [\".yml\", \".yaml\"], \"only support yaml files for now\"\n config = yaml.load(open(file_path, \"rb\"), Loader=yaml.Loader)\n return config", "def load_config(logdir):\n with open(os.path.join(logdir, 'config.yml'), 'r') as f:\n return Namespace(**yaml.load(f))" ]
[ "0.7430753", "0.7339824", "0.7308093", "0.7155157", "0.70941925", "0.7040105", "0.6956988", "0.68890375", "0.68878645", "0.6852141", "0.68291074", "0.68223083", "0.6813968", "0.6801743", "0.6793971", "0.6786656", "0.6752283", "0.67177564", "0.6716593", "0.6700956", "0.6686671", "0.6676107", "0.6639133", "0.66248316", "0.6624194", "0.6614809", "0.6612239", "0.6589353", "0.6584853", "0.6559534" ]
0.78930426
0
Print the diff between current and desired job config
def diff_config(self, app, verbose=False): print(">>>>>>>> Job config diff for %s <<<<<<<<" % app.name) cfg_dicts = [] factory = TSimpleJSONProtocolFactory() for cfg in app.current_job_config, app.desired_job_config: if cfg: cfg_json = TSerialization.serialize( cfg, protocol_factory=factory ) cfg_dict = json.loads(cfg_json) # Unset task resources to avoid confusing the job config differ cfg_dict["taskConfig"]["resources"] = None else: cfg_dict = {} cfg_dicts.append(cfg_dict) if verbose: for cfg_dict in cfg_dicts: print(json.dumps(cfg_dict, indent=4, sort_keys=True)) for line in json_delta.udiff(cfg_dicts[0], cfg_dicts[1]): print(line)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compare(self, delay_factor=1):\n delay_factor = self.select_delay_factor(delay_factor)\n compare_command = \"show candidate diff all\"\n no_changes = '% No configuration changes found.'\n self.config_mode()\n output = self.send_command(compare_command, delay_factor=delay_factor)\n if no_changes in output:\n return \"\"\n return output", "def test_get_job_config(self):\n test_app = self._create_app()\n class_path = \"spark.jobserver.VeryShortDoubleJob\"\n config = {\"test_config\": \"test_config_value\"}\n job = self.client.jobs.create(test_app, class_path,\n ctx=self._get_functional_context(),\n conf=config)\n time.sleep(3)\n self._wait_till_job_is_done(job)\n job = self.client.jobs.get(job.jobId)\n job_config = job.get_config()\n self.assertEqual(\"FINISHED\", job.status)\n self.assertEqual(config[\"test_config\"], job_config[\"test_config\"])", "def job_changes(self):\n cols = \"{:25}{:12.1f}\"\n cols2 = \"{:25}{:12.1f}{:12.1f}\"\n\n lines = [\"Benefit from job creation: \" + self.plant.name + \"\\n\"]\n\n row7 = self.farmer.labor()[1]\n row1 = self.farmer.labor_cost()[1]\n row8 = self.reseller.driving_work()[1]\n row2 = self.reseller.driving_wages()[1]\n row11 = self.reseller.loading_work()[1]\n row12 = self.reseller.loading_wages()[1]\n row9 = self.cofiring_plant.cofuel_om_work()[1]\n row3 = self.cofiring_plant.cofuel_om_wages()[1]\n row6 = -self.coal_work_lost[1]\n row5 = -self.coal_wages_lost[1]\n row10 = self.labor[1]\n row4 = self.wages[1]\n\n display_as(row6, \"FTE\")\n display_as(row7, \"FTE\")\n display_as(row8, \"FTE\")\n display_as(row9, \"FTE\")\n display_as(row10, \"FTE\")\n display_as(row11, \"FTE\")\n\n lines.append(cols2.format(\"Biomass collection\", row7, row1))\n lines.append(cols2.format(\"Biomass transportation\", row8, row2))\n lines.append(cols2.format(\"Biomass loading\", row11, row12))\n lines.append(cols2.format(\"O&M\", row9, row3))\n lines.append(cols2.format(\"Mining\", row6, row5))\n lines.append(cols2.format(\"Total\", row10, row4))\n lines.append(\"\")\n lines.append(cols.format(\"Area collected\", self.supply_chain.area()))\n lines.append(\n cols.format(\"Collection radius\", self.supply_chain.collection_radius())\n )\n lines.append(\n cols.format(\"Maximum transport time\", self.reseller.max_trip_time())\n )\n lines.append(cols.format(\"Number of truck trips\", self.reseller.truck_trips[1]))\n lines.append(\"\")\n lines.append(\"Mining job lost from co-firing at \" + self.plant.name + \"\\n\")\n lines.append(cols.format(\"Coal saved\", self.coal_saved[1]))\n lines.append(\n cols.format(\"Productivity\", self.mining_parameter.productivity_underground)\n )\n lines.append(cols.format(\"Job lost\", self.coal_work_lost[1]))\n lines.append(cols.format(\"Job lost\", display_as(self.coal_work_lost[1], \"FTE\")))\n lines.append(\n cols.format(\"Wage\", display_as(self.mining_parameter.wage_mining, \"USD/hr\"))\n )\n lines.append(cols.format(\"Wage lost\", self.coal_wages_lost[1]))\n return \"\\n\".join(lines)", "def test_configurator(self):\n runner = Runner(YamlManifest(manifest))\n run1 = runner.run(JobOptions(resource=\"test1\"))\n assert not run1.unexpectedAbort, run1.unexpectedAbort.getStackTrace()\n assert len(run1.workDone) == 1, run1.workDone\n result = list(run1.workDone.values())[0].result\n self.assertEqual(result.outputs, {\"fact1\": \"test1\", \"fact2\": \"test\"})\n self.assertEqual(result.result.get(\"stdout\"), sys.executable)\n assert run1.status == Status.ok, run1.summary()", "def test_job_config_history(self):\n self.config_page.visit()\n self.config_page.expand_advanced()\n assert self.job_config_history['HISTORY_ROOT_DIR'] == self.config_page.get_history_root_dir()\n assert self.job_config_history['MAX_HISTORY_ENTRIES'] == self.config_page.get_max_history_entries()\n assert str(self.job_config_history['SKIP_DUPLICATE_HISTORY']).lower() == self.config_page.get_skip_duplicate_history()\n assert self.job_config_history['SHOW_BUILD_BADGES'] == self.config_page.get_show_build_badges()", "def print_config(_run):\n final_config = _run.config\n config_mods = _run.config_modifications\n print(_format_config(final_config, config_mods))", "def get_config_diff(context, target, file1, file2):\n\n result = context.get_operation('get_config_diff')\n return result", "def _get_job_defaults():\n\n lines = []\n lines += '[Job]\\n'\n j = Job()\n for cj in j._config_names:\n v = getattr(j, cj)\n lines += '%s = %s\\n' % (cj, v)\n lines += '\\n'\n return lines", "def print_configuration():\n configlog.info(\"-\" * 50)\n configlog.info(\"Initializing with the following configuration\")\n configlog.info(\"Check constants.py to change any of the following\")\n configlog.info(\"-\" * 50)\n configlog.info(\"COMPANY_NAME: {}\".format(COMPANY_NAME))\n configlog.info(\"ACTIVITY_TYPE_FILTER: {}\".format(ACTIVITY_TYPE_FILTER))\n configlog.info(\"APPLY_ACTIVITY_FILTER: {}\".format(APPLY_ACTIVITY_FILTER))\n configlog.info(\"-\" * 50)\n configlog.info(\"Assuming an input dataset with the following features\")\n configlog.info(\"-\" * 50)\n configlog.info(\"BUDGET_COLUMN_NAME: {}\".format(BUDGET_COLUMN_NAME))\n configlog.info(\"COMPANY_COLUMN_NAME: {}\".format(COMPANY_COLUMN_NAME))\n configlog.info(\"ACTIVITY_COLUMN_NAME: {}\".format(ACTIVITY_COLUMN_NAME))\n configlog.info(\"COUNTRY_COLUMN_NAME: {}\".format(COUNTRY_COLUMN_NAME))\n configlog.info(\"-\" * 50)\n configlog.info(\"Fallback data sources\")\n configlog.info(\"-\" * 50)\n configlog.info(\"DEFAULT_URL: {}\".format(DEFAULT_URL))\n configlog.info(\"DEFAULT_LOCAL_DATA_PATH: {}\".format(DEFAULT_LOCAL_DATA_PATH))\n configlog.info(\"-\" * 50)", "def test_print_config(self) -> None:\n out = io.StringIO()\n with contextlib.redirect_stdout(out):\n self.config.print()\n self.assertEqual(\n out.getvalue().rstrip(),\n \"{}: {}\\n{}\".format(\"q2\", \"abcdefghij\", \"^\".rjust(7)),\n )", "def running_config_diff(self, **kwargs):\n\n return self.api_request(self._get_method_fullname(\"running_config_diff\"), kwargs)", "def pretty_jobs_status(jobs):\n print(tabulate.tabulate(jobs, headers=\"keys\"))", "def test_job_result_with_conf(self):\n test_app = self._create_app()\n conf = \"stress.test.longpijob.duration = 1\"\n class_path = \"spark.jobserver.LongPiJob\"\n job = self._create_job(test_app, class_path,\n conf=conf,\n ctx=self._get_functional_context())\n time.sleep(3)\n created_job = self.client.jobs.get(job.jobId)\n self.assertEqual(job.jobId, created_job.jobId)\n status = created_job.status\n self.assertTrue(status == \"RUNNING\" or status == \"FINISHED\")\n self._wait_till_job_is_done(created_job)\n job = self.client.jobs.get(job.jobId)\n self.assertEqual(\"FINISHED\", job.status)\n sys.stderr.write(\"duration %s\" % job.duration)\n self.assertTrue(\"1.\" in job.duration)", "def print_config(self):\n for pod in self.pods:\n for lb in pod.limbs:\n print '%s limb %s ' % (pod.name, lb.name)\n for br in lb.branches:\n br.printInfo()\n sys.stdout.flush()", "def print_configuration_info():\n print(\"Selected dataset:\", DATASET) \n print(\"Dataset base directory:\", BASE_INPUT_DIR) \n print(\"Daytime option:\", DAYTIME) \n print(\"Nones option:\", NONES) \n print(\"Selected action/activity representation:\", OP)\n print(\"Number of epochs: \", EPOCHS)\n print(\"Number of folds for cross-validation: \", FOLDS)\n print(\"Input directory for data files:\", INPUT_DIR) \n print(\"Embedding matrix file:\", EMBEDDING_WEIGHTS)\n print(\"Action sequences (X) file:\", X_FILE) \n print(\"Word embedding file for activities:\", ACTIVITY_EMBEDDINGS) \n print(\"Activity to int mappings:\", ACTIVITY_TO_INT)\n print(\"Int to activity mappings:\", INT_TO_ACTIVITY) \n print(\"Experiment ID:\", EXPERIMENT_ID)\n print(\"Treat imbalance data:\", TREAT_IMBALANCE)\n print(\"Save intermediate plots:\", SAVE)\n print(\"Batch size:\", BATCH_SIZE)\n print(\"Dropout:\", DROPOUT)\n print(\"Loss:\", LOSS)", "def running_config(self):\n return self.show(\"show running-config\")", "def print_cfg(self, out=stdout):\n print(self.cmaboss_sim.str_cfg(), file=out)", "def get_config():\r\n config = argparser()\r\n\r\n # use the configurations to reproduce the experiments\r\n if not config.reproduct_mode == \"free\":\r\n config = reproduct_config(config)\r\n \r\n print(config)\r\n\r\n with open(config.out_dir + \"/test_results.txt\", \"w\") as txt_f:\r\n txt_f.write(str(config) + \"\\n\")\r\n\r\n return config", "def perform_diff_config_result_page():\n #  Get all fields from form\n module = request.forms.getall('module')\n client = request.forms.getall('client')\n version1 = request.forms.getall('version1')\n version2 = request.forms.getall('version2')\n\n # Build html\n modif = do_ck5050_ini_diff_request(module, client, version1, version2)\n\n # Build template page\n with open(\"./header.html\") as header, open('./config.tpl') as config, open('./footer.html') as footer:\n template_html = header.read() + config.read() + footer.read()\n\n if not modif:\n modif = {}\n\n output = template(template_html, module=module, client=client, version1=version1,\n version2=version2, modif=modif)\n\n return output", "def statusJob(self, job):\n with self.thread_lock:\n name = job.name\n job_container = self.shared_dags[job]\n job_dag = job_container.getDAG()\n\n # If there is no timing, then the job is not finished\n if job_container.getTime():\n job_container.addCaveat('time: ' + job_container.getTime())\n if job.getResult() == False:\n self.active.remove(job)\n self.killJobs()\n return\n else:\n self.job_queue_count -= 1\n job_dag.delete_node(job)\n self.active.remove(job)\n if self.args.download_only:\n result = ' -Downloaded | '\n else:\n result = ' --Finished | '\n\n else:\n result = ' Launching | '\n\n # Format job name length field\n name_cnt = (self.term_width - len(job.name)) + 2 # 2 character buffer\n result = strftime(\"%H:%M\") + result + job.name + ' '*name_cnt\n\n # Format caveat length\n caveats = job_container.getCaveats()\n caveat_cnt = self.max_caveat_length - len(caveats)\n\n if caveats:\n result = result + caveats + ' '*caveat_cnt\n else:\n result = result + ' '*caveat_cnt\n\n remaining = job_dag.size()\n print(result, \"remaining: %-3d active: %-2d\" % (remaining, len(self.active)), [x.name for x in self.active])", "def print_JobProperties(self,mode='minimal'):\n print_view=''\n if self._context_name.count('.')==0:\n additionalinfo=''\n if (mode=='tree&valuenondefault'):\n additionalinfo=\"(Only non default values)\"\n if mode != \"minimal\":\n additionalinfo+=\"(X indicates locked properties)\"\n \n self._log.info(\"### Printing the job properties container %s %s ###\",\n self.__name__ ,additionalinfo)\n\n if(mode=='tree' or mode.startswith('tree&value')):\n print (' [-]'+self.__name__)\n print (' | ')\n elif(mode=='print_v'):\n print_view+=' [-]'+self.__name__+'\\n'+' | '+'\\n' \n else:\n self._log.info(' [-]'+self.__name__)\n \n for k in sorted(self.__dict__.keys()):\n m=self.__dict__.get(k)\n if hasattr(m,'print_JobProperty'):\n m.print_JobProperty(mode)\n if mode=='print_v': \n print_view+=str(m)+'\\n'\n elif hasattr(m,'print_JobProperties'):\n indent='-'\n for i in range(m._context_name.count('.')-1):\n indent+='-'\n if(mode=='tree' or mode.startswith('tree&value')): \n print (' /'+indent+'> ## '+m.__name__+' ## ')\n elif(mode=='print_v'): \n print_view+=' /'+indent+'> ## '+m.__name__+' ## '+'\\n' \n else:\n self._log.info(' /'+indent+'> ## '+m.__name__+' ## ')\n\n if(mode=='print_v'):\n print_view+=m.print_JobProperties(mode)\n else:\n m.print_JobProperties(mode)\n \n if mode=='print_v':\n return print_view \n if self._context_name.count('.')==0:\n self._log.info(\"### Ends the job properties container %s ###\",\n self.__name__ )", "def print_test_comparison(test_name, expected, result):\n line = \"\\n\"\n line += \"-\" * 60 + \"\\n\"\n line += \"{}\\n\".format(test_name)\n line += \"-\" * 60 + \"\\n\"\n line += \"-\" * 26 + \"EXPECTED\" + \"-\" * 26 + \"\\n\"\n line += \"{}\\n\".format(expected)\n line += \"-\" * 28 + \"END\" + \"-\" * 29 + \"\\n\"\n line += \"-\" * 27 + \"RESULT\" + \"-\" * 27 + \"\\n\"\n line += \"{}\\n\".format(result)\n line += \"-\" * 28 + \"END\" + \"-\" * 29 + \"\\n\"\n line += \"\\n\"\n return line", "def show_game_status(self, game, diff, step):\n if self.verbose:\n print('========== Step {} =========='.format(step))\n print('Time cost ===> {:.3f}s'.format(diff))\n game.print_game()", "def print_JobProperty(self,mode='minimal'):\n indent=''\n obj_p=object.__getattribute__(self, 'StoredValue')\n if self.statusOn: \n obj_ps=obj_p \n else: \n obj_ps=None \n for i in range(self._context_name.count('.')-2):\n indent+='-'\n if self.is_locked():\n indent+='x'\n else:\n indent+='-'\n \n if mode=='minimal': \n self._log.info(\" %s-> %s = %s \",indent,\n self._context_name,pprint.pformat(obj_ps) )\n \n elif(mode=='full'):\n if len(self.allowedTypes)>0:\n at=self.allowedTypes\n else:\n at='- - -'\n if len(self.allowedValues)>0:\n av=pprint.pformat(self.allowedValues)\n else:\n av='- - -' \n self._log.info(\"%s-> %s = %s\\n %40s %s\\n %40s \\\n %s\\n %40s %s\\n %40s %s\\n %40s %s\\n %40s %s\",\n indent,\n self._context_name,\n self.__getattribute__('StoredValue'),\n 'allowedTypes :',at,\n 'allowedValues :',av,\n 'default value :', self.__class__.StoredValue,\n 'statusOn :',self.statusOn,\n 'locked :',self.is_locked(),\n 'StoredValue :',pprint.pformat(obj_p))\n elif(mode=='tree'):\n print (' |'+indent+' '+self.__name__)\n elif(mode.startswith('tree&value')): \n if mode=='tree&value':\n printit=True\n elif mode=='tree&valuenondefault': \n if self.isDefault():\n printit=False\n else:\n printit=True\n else:\n raise RuntimeError(\"This is a non valid print mode %s \" % (mode,))\n if printit: \n print (' |'+indent+' '+self.__name__+\" = \"+\\\n self.toBePrinted())\n# fnToBePrinted(self)\n\n elif(mode=='print_v'):\n return ' |'+indent+' '+self.__name__+\" = \"+\\\n self.toBePrinted()\n # fnToBePrinted(self)\n\n else:\n raise ValueError('Unknow mode, possible modes are: '\n 'minimal, full, tree, tree&value ')", "def config_changes(cli):\n result = []\n in_config = False\n for line in cli.splitlines():\n if not in_config and line == 'Building configuration...':\n in_config = True\n elif in_config:\n result.append(line)\n\n return '\\n'.join(result)", "def config(ctx):\n if not ctx.invoked_subcommand:\n cfg = ctx.obj['cfg']\n for section in cfg.sections():\n print(\"[\", section, \"]\")\n for option in cfg[section]:\n print(option, \" = \", cfg[section][option])", "def test_run_pretty_print():\n date = datetime.now()\n date2 = date + timedelta(days=2)\n duration_in_minutes = 65\n run1 = Run(date, duration_in_minutes/60)\n run2 = Run(date2, duration_in_minutes/60)\n\n assert [run1.pretty_print(), run2.pretty_print() ] == [\n \"{} - {}\".format(date.strftime(\"%H:%M\"), (date + timedelta(minutes=65)).strftime(\"%H:%M\")),\n \"{} - {}\".format(date2.strftime(\"%a, %H:%M\"), (date2 + timedelta(minutes=65)).strftime(\"%H:%M\")),\n ]", "def bisect_status():\n return \"Bisecting: {} revisions left to test after this (roughly {} steps).\".format(\n ceil((bisect_revisions() - 1) / 2), bisect_steps_remaining() - 1,\n )", "def display_config_info():\n print(\"Merlin Configuration\")\n print(\"-\" * 25)\n print(\"\")\n\n conf = default_config_info()\n sconf = {}\n excpts = {}\n try:\n conf[\"broker server\"] = broker.get_connection_string(include_password=False)\n sconf[\"broker server\"] = broker.get_connection_string()\n conf[\"broker ssl\"] = broker.get_ssl_config()\n except Exception as e:\n conf[\"broker server\"] = \"Broker server error.\"\n excpts[\"broker server\"] = e\n\n try:\n conf[\"results server\"] = results_backend.get_connection_string(include_password=False)\n sconf[\"results server\"] = results_backend.get_connection_string()\n conf[\"results ssl\"] = results_backend.get_ssl_config()\n except Exception as e:\n conf[\"results server\"] = \"No results server configured or error.\"\n excpts[\"results server\"] = e\n\n print(tabulate(conf.items(), tablefmt=\"presto\"))\n\n if excpts:\n print(\"\\nExceptions:\")\n for k, v in excpts.items():\n print(f\"{k}: {v}\")\n\n check_server_access(sconf)", "def updater_job_status(self,request):\n\t\t# ----------- DEBUG -----------------\n\t\tMODULE.info(\"updater/installer/status invoked with:\")\n\t\tpp = pprint.PrettyPrinter(indent=4)\n\t\tst = pp.pformat(request.options).split(\"\\n\")\n\t\tfor s in st:\n\t\t\t\tMODULE.info(\" << %s\" % s)\n\t\t# -----------------------------------\n\n\t\t# First check if a job is running. This will update the\n\t\t# internal field self._current_job, or if the job is finished,\n\t\t# it would return an empty string.\n\t\tinst = self.__which_job_is_running()\n\n\t\tjob = request.options.get('job','')\n\t\tresult = {}\n\t\tif job in INSTALLERS:\n\t\t\t# make a copy, not a reference!\n#\t\t\tresult = {}\n#\t\t\tfor arg in INSTALLERS[job]:\n#\t\t\t\tresult[arg] = INSTALLERS[job][arg]\n\t\t\tresult = deepcopy(INSTALLERS[job])\n\n\t\t\tif 'statusfile' in INSTALLERS[job]:\n\t\t\t\ttry:\n\t\t\t\t\tfor line in open(INSTALLERS[job]['statusfile']):\n\t\t\t\t\t\tfields = line.strip().split('=')\n\t\t\t\t\t\tif len(fields) == 2:\n\t\t\t\t\t\t\tresult['_%s_' % fields[0]] = fields[1]\n\t\t\t\texcept:\n\t\t\t\t\tpass\n\t\t\t# if we encounter that the frontend asks about the last job we\n\t\t\t# have executed -> include its properties too.\n\t\t\tif self._current_job:\n\t\t\t\tif self._current_job['job'] == job:\n\t\t\t\t\tfor f in self._current_job:\n\t\t\t\t\t\tresult[f] = self._current_job[f]\n\t\t\t\t\t\tif isinstance(result[f],str) and result[f].isdigit():\n\t\t\t\t\t\t\tresult[f] = int(result[f])\n\t\t\t\tif inst == '':\n\t\t\t\t\tresult['running'] = False\n\t\t\telse:\n\t\t\t\t# no job running but status for release was asked? \n\t\t\t\t# maybe the server restarted after job finished\n\t\t\t\t# and the frontend did not get that information\n\t\t\t\t# Bug #26318\n\t\t\t\tif job == 'release':\n\t\t\t\t\tresult['detail'] = '%s-%s' % (self.ucr.get('version/version'), self.ucr.get('version/patchlevel'))\n\t\t\t\telse:\n\t\t\t\t\tresult['detail'] = _('Unknown')\n\n\t\t\t# -------------- additional fields -----------------\n\n\t\t\t# elapsed time, ready to be displayed. (not seconds, but rather\n\t\t\t# the formatted string)\n\t\t\tif 'time' in result and 'started' in result:\n\t\t\t\telapsed = result['time'] - result['started']\n\t\t\t\tif elapsed < 60:\n\t\t\t\t\tresult['elapsed'] = '%ds' % elapsed\n\t\t\t\telse:\n\t\t\t\t\tmins = int(elapsed/60)\n\t\t\t\t\tsecs = elapsed - (60 * mins)\n\t\t\t\t\tif mins < 60:\n\t\t\t\t\t\tresult['elapsed'] = '%d:%02dm' % (mins,secs)\n\t\t\t\t\telse:\n\t\t\t\t\t\thrs = int(mins/60)\n\t\t\t\t\t\tmins = mins - (60*hrs)\n\t\t\t\t\t\tresult['elapsed'] = '%d:%02d:%02dh' % (hrs,mins,secs)\n\t\t\t# Purpose is now formatted in the language of the client (now that\n\t\t\t# this LANG is properly propagated to us)\n\t\t\tif 'purpose' in result:\n\t\t\t\tif result['purpose'].find('%') != -1:\n\t\t\t\t\t# make sure to not explode (Bug #26318), better show nothing\n\t\t\t\t\tif 'detail' in result:\n\t\t\t\t\t\tresult['label'] = result['purpose'] % result['detail']\n\t\t\t\telse:\n\t\t\t\t\tresult['label'] = result['purpose']\n\t\t\t# Affordance to reboot... hopefully this gets set before\n\t\t\t# we stop polling on this job status\n\t\t\tself.ucr.load()\t# make it as current as possible\n\t\t\tresult['reboot'] = self.ucr.is_true('update/reboot/required',False)\n\n\t\t# ----------- DEBUG -----------------\n\t\tMODULE.info(\"updater/installer/status returns:\")\n\t\tpp = pprint.PrettyPrinter(indent=4)\n\t\tst = pp.pformat(result).split(\"\\n\")\n\t\tfor s in st:\n\t\t\t\tMODULE.info(\" >> %s\" % s)\n\t\t# -----------------------------------\n\n\t\tself.finished(request.id,result)" ]
[ "0.6342718", "0.60944885", "0.5940871", "0.5862255", "0.5860077", "0.5827088", "0.5746669", "0.5744944", "0.56751513", "0.56675583", "0.5650532", "0.5633678", "0.5613628", "0.5562561", "0.550574", "0.54969937", "0.54956335", "0.5475676", "0.5475087", "0.5468679", "0.5431841", "0.54151195", "0.54123646", "0.53846407", "0.5375134", "0.53652877", "0.5363582", "0.5356866", "0.5350987", "0.5350704" ]
0.6959558
0
Rolling update the Peloton apps in the cluster
def update(self, force, verbose): # Print the job config diffs print('Update Peloton cluster "%s" to new config: ' % self.name) for app in self.apps: self.diff_config(app, verbose) if not force and not yesno("Proceed with the update ?"): return updated_apps = [] for app in self.apps: updated_apps.append(app) if not app.update_or_create_job(update_callback): # Rollback the updates for all apps that have been updated self.rollback(updated_apps) return False return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def upgrade():\n config = ConfigManager()\n apps = config['apps']\n for i, app in progressbar(enumerate(apps), redirect_stdout=True):\n z = Zap(app)\n if i == 0:\n z.update(show_spinner=False)\n else:\n z.update(check_appimage_update=False, show_spinner=False)", "def __update_application(self, apps, **extra_args):\n update_on_error = extra_args.get('update_on_error', False)\n # auto_enable_auth = extra_args.get(\n # 'auto_enable_auth', self.auto_enable_auth)\n\n for app in apps:\n state = app.execution.state\n old_state = state\n gc3libs.log.debug(\n \"About to update state of application: %s (currently: %s)\",\n app,\n state)\n try:\n if state not in [\n Run.State.NEW,\n Run.State.TERMINATING,\n Run.State.TERMINATED,\n ]:\n lrms = self.get_backend(app.execution.resource_name)\n try:\n state = lrms.update_job_state(app)\n # pylint: disable=broad-except\n except Exception as ex:\n gc3libs.log.debug(\n \"Error getting status of application '%s': %s: %s\",\n app, ex.__class__.__name__, ex, exc_info=True)\n state = Run.State.UNKNOWN\n # run error handler if defined\n ex = app.update_job_state_error(ex)\n if isinstance(ex, Exception):\n raise ex\n if state != old_state:\n app.changed = True\n # set log information accordingly\n if (app.execution.state == Run.State.TERMINATING\n and app.execution.returncode is not None\n and app.execution.returncode != 0):\n # there was some error, try to explain\n app.execution.info = (\n \"Execution failed on resource: %s\" %\n app.execution.resource_name)\n signal = app.execution.signal\n if signal in Run.Signals:\n app.execution.info = (\n \"Abnormal termination: %s\" % signal)\n else:\n if os.WIFSIGNALED(app.execution.returncode):\n app.execution.info = (\n \"Remote job terminated by signal %d\" %\n signal)\n else:\n app.execution.info = (\n \"Remote job exited with code %d\" %\n app.execution.exitcode)\n\n if state != Run.State.UNKNOWN or update_on_error:\n app.execution.state = state\n\n except (gc3libs.exceptions.InvalidArgument,\n gc3libs.exceptions.ConfigurationError,\n gc3libs.exceptions.UnrecoverableAuthError,\n gc3libs.exceptions.FatalError):\n # Unrecoverable; no sense in continuing --\n # pass immediately on to client code and let\n # it handle this...\n raise\n\n except gc3libs.exceptions.UnknownJob:\n # information about the job is lost, mark it as failed\n app.execution.returncode = (Run.Signals.Lost, -1)\n app.execution.state = Run.State.TERMINATED\n app.changed = True\n continue\n\n except gc3libs.exceptions.InvalidResourceName:\n # could be the corresponding LRMS has been removed\n # because of an unrecoverable error mark application\n # as state UNKNOWN\n gc3libs.log.warning(\n \"Cannot access computational resource '%s',\"\n \" marking task '%s' as UNKNOWN.\",\n app.execution.resource_name, app)\n app.execution.state = Run.State.TERMINATED\n app.changed = True\n continue\n\n # This catch-all clause is needed otherwise the loop stops\n # at the first erroneous iteration\n #\n # pylint: disable=broad-except\n except Exception as ex:\n if gc3libs.error_ignored(\n # context:\n # - module\n 'core',\n # - class\n 'Core',\n # - method\n 'update_job_state',\n # - actual error class\n ex.__class__.__name__,\n # - additional keywords\n 'update',\n ):\n gc3libs.log.warning(\n \"Ignored error in Core.update_job_state(): %s\", ex)\n # print again with traceback at a higher log level\n gc3libs.log.debug(\n \"(Original traceback follows.)\", exc_info=True)\n continue\n else:\n # propagate generic exceptions for debugging purposes\n raise", "def do_update(self, node_role_map, node_roles, first_run=False):\n require('use_rds')\n require('pstat_instance')\n require('pstat_url')\n require('project_root')\n require('config_folder')\n require('ssl_prefix')\n require('backup')\n require('aws_access_key_id')\n require('aws_secret_access_key')\n require('sphinx_counter')\n require('key_filename')\n require('calabar_conf_context')\n require('loggly_inputs')\n require('sphinx_counter')\n require('ipsec_confs')\n require('hostname')\n require('enable_periodic_tasks')\n\n logger.info(\"Starting to provision %s\", env.host_string)\n\n for ipsec_name, _ in env.ipsec_confs.items():\n # Require all of the pre-shared key configs\n require('ipsec_psk_%s' % ipsec_name)\n\n if first_run:\n self.do_first_launch_config()\n\n self._stop_celery()\n\n self._update_cache_settings(node_role_map['memcached']['all'])\n self._update_sphinx_settings(\n node_role_map['celery_backend']['same_az'],\n node_roles,\n )\n self._update_celery_backend_settings(\n node_role_map['sphinx_search_indexer']['same_az'],\n )\n ldap_api_nodes = node_role_map['has_ldap_access']\n self._update_ldap_api_endpoint_settings(\n all_ldap_api_nodes=ldap_api_nodes['all'],\n same_az_ldap_api_nodes=ldap_api_nodes['same_az'],\n node_roles=node_roles,\n )\n self._update_celery_ldap_settings(node_roles)\n\n # Package and push the app to the new instance\n env.project_root_src = '/opt/pstat/versions/%(timestamp)s' % env\n source_dir = env.project_root_src\n current_source_dir = None\n if not first_run:\n current_source_dir = env.project_root\n with hide(*fab_output_hides):\n push_source(\n new_source_dir=source_dir,\n current_source_dir=current_source_dir,\n chown=F_CHOWN,\n chmod=\"u+rw,g+rw,o-rw\",\n )\n self._make_media_readable(source_dir)\n self._configure_settings_local(\n source_dir,\n env.pstat_settings,\n chown=F_CHOWN,\n )\n self._configure_settings_target(\n source_dir,\n env.settings_target,\n chown=F_CHOWN,\n )\n self.configure_terrarium(source_dir=source_dir, user=FILE_OWNER)\n self._activate_new_source(\n source_dir,\n [ACTIVE_SOURCE_SYMLINK, env.project_root],\n )\n self._run_db_migrations(user=FILE_OWNER)\n\n # Link up the attachments and upload directories from /mnt/\n self._link_storage_dirs()\n\n self._configure_webservers(node_roles)\n building_search_index = self._build_search_index()\n\n self._create_media_folder()\n self._collect_static_media()\n\n self._create_500_page()\n self._restart_webservers()\n\n # Services managed via supervisord\n self._configure_celery(node_roles)\n self._update_supervisord()\n self._configure_calabar()\n self._configure_ipsec()\n self._start_celery()\n\n self._configure_loggly()\n self._configure_pstat_cron_jobs()\n self._configure_email_sending()\n\n if first_run:\n self._sync_s3_media()\n\n if building_search_index:\n self._wait_for_search_indexing()\n self._ensure_sphinx_running()\n self._configure_sphinx_cron()\n\n logger.info(\"Provisioner completed successfully\")", "def in_cluster_app_config(app_config):\n app_config['INDEX_MIGRATOR_RECIPES'] = dict(\n my_recipe=dict(\n cls='invenio_index_migrator.api.Migration',\n params=dict(\n strategy=Migration.IN_CLUSTER_STRATEGY,\n src_es_client=dict(\n prefix='old-',\n version=7,\n params=dict(\n host='localhost',\n port=9200,\n use_ssl=False,\n ),\n ),\n jobs=dict(\n reindex_author_job=dict(\n cls='invenio_index_migrator.api.job.ReindexAndSyncJob',\n pid_type='authid',\n index='authors-author-v1.0.0',\n rollover_threshold=10,\n refresh_interval=None,\n wait_for_completion=True\n ),\n reindex_record_job=dict(\n cls='invenio_index_migrator.api.job.ReindexAndSyncJob',\n pid_type='recid',\n index='records-record-v1.0.0',\n rollover_threshold=10,\n refresh_interval=None,\n wait_for_completion=True\n )\n )\n )\n )\n )\n app_config['SEARCH_ELASTIC_HOSTS'] = [\n dict(host='localhost', port=9200)\n ]\n app_config['SEARCH_INDEX_PREFIX'] = 'old-'\n app_config['PIDSTORE_RECID_FIELD'] = 'recid'\n return app_config", "def main():\n # Wait for dependency services (ES and RE) to be live\n wait_for_dependencies(timeout=180)\n logging.info('Services started! Now starting the app..')\n # Initialize worker group of ESIndexer\n es_indexers = WorkerGroup(ESIndexer, (), count=config()['workers']['num_es_indexers'])\n # Initialize a worker group of RelengImporter\n releng_importers = WorkerGroup(RelengImporter, (), count=config()['workers']['num_re_importers'])\n # All worker groups to send kafka messages to\n receivers = [es_indexers, releng_importers]\n\n # used to check update every minute\n last_updated_minute = int(time.time()/60)\n _CONFIG_TAG = _query_for_config_tag()\n\n # Initialize and run the Kafka consumer\n consumer = _set_consumer()\n\n while True:\n msg = consumer.poll(timeout=0.5)\n if msg is None:\n continue\n curr_min = int(time.time()/60)\n if curr_min > last_updated_minute:\n config_tag = _query_for_config_tag()\n # update minute here\n last_updated_minute = curr_min\n if config_tag is not None and config_tag != _CONFIG_TAG:\n _CONFIG_TAG = config_tag\n # send message to es_indexers to update config.\n es_indexers.queue.put(('ws_event', {\n 'evtype': \"RELOAD_ELASTIC_ALIASES\",\n \"msg\": f\"updating to tag {_CONFIG_TAG}\"\n }))\n if msg.error():\n if msg.error().code() == KafkaError._PARTITION_EOF:\n logging.info('End of stream.')\n else:\n logging.error(f\"Kafka message error: {msg.error()}\")\n continue\n val = msg.value().decode('utf-8')\n try:\n data = json.loads(val)\n except ValueError as err:\n logging.error(f'JSON parsing error: {err}')\n logging.error(f'Message content: {val}')\n for receiver in receivers:\n receiver.queue.put(('ws_event', data))", "def deploy():\n update_treesheets()\n restart_treesheets()", "def apply_maintenance_update(self):\n logger.info(\"Applying maintenance updates on master node\")\n self.env.admin_install_updates()\n\n logger.info(\"Applying maintenance updates on slaves\")\n slaves_mu_script_url = (\n \"https://github.com/Mirantis/tools-sustaining/\"\n \"raw/master/scripts/mos_apply_mu.py\")\n\n path_to_mu_script = \"/tmp/mos_apply_mu.py\"\n\n with self.env.d_env.get_admin_remote() as remote:\n remote.check_call(\"wget {uri} -O {path}\".format(\n uri=slaves_mu_script_url,\n path=path_to_mu_script)\n )\n\n remote.check_call(\n \"python {path} \"\n \"--env-id={identifier} \"\n \"--user={username} \"\n \"--pass={password} \"\n \"--tenant={tenant_name} --update\".format(\n path=path_to_mu_script,\n identifier=self.cluster_id,\n **conf.KEYSTONE_CREDS\n )\n )\n\n controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles(\n self.cluster_id, roles=['controller', ])\n\n computes = self.fuel_web.get_nailgun_cluster_nodes_by_roles(\n self.cluster_id, roles=['compute', ])\n\n logger.info(\"Restarting all OpenStack services\")\n\n logger.info(\"Restarting services on controllers\")\n ha_services = (\n \"p_heat-engine\",\n \"p_neutron-plugin-openvswitch-agent\",\n \"p_neutron-dhcp-agent\",\n \"p_neutron-metadata-agent\",\n \"p_neutron-l3-agent\")\n non_ha_services = (\n \"heat-api-cloudwatch\",\n \"heat-api-cfn\",\n \"heat-api\",\n \"cinder-api\",\n \"cinder-scheduler\",\n \"nova-objectstore\",\n \"nova-cert\",\n \"nova-api\",\n \"nova-consoleauth\",\n \"nova-conductor\",\n \"nova-scheduler\",\n \"nova-novncproxy\",\n \"neutron-server\",\n )\n for controller in controllers:\n with self.fuel_web.get_ssh_for_nailgun_node(\n controller) as remote:\n for service in ha_services:\n remote_ops.manage_pacemaker_service(remote, service)\n for service in non_ha_services:\n remote_ops.manage_service(remote, service)\n\n logger.info(\"Restarting services on computes\")\n compute_services = (\n \"neutron-plugin-openvswitch-agent\",\n \"nova-compute\",\n )\n for compute in computes:\n with self.fuel_web.get_ssh_for_nailgun_node(compute) as remote:\n for service in compute_services:\n remote_ops.manage_service(remote, service)", "def cluster_reboot(cluster):\n map(reboot, cluster)", "def sync_apps(self):\n pass", "def update(appname, use_appimageupdate=True):\n z = Zap(appname)\n z.update(use_appimageupdate=use_appimageupdate)", "def update_all(self):\n self.update_head_node_ip()\n self.get_database_info()\n self.update_users()", "def reload_apps(self, **kwargs) -> None:\n kwargs[\"namespace\"] = \"admin\"\n kwargs[\"__name\"] = self.name\n self.call_service(\"app/reload\", **kwargs)\n return None", "def sync_apps(self):\n cherrypy.server.httpserver.wsgi_app = self.get_app()", "def AppUpdateApp(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def scale_app(self, name, replicas):\n raise NotImplementedError", "def sync_marathon_app():\n # Identify the hosts and ports of executing tasks\n try:\n c = MarathonClient(MARATHON_ROOT_URL)\n\n app = c.get_app(MARATHON_APP)\n\n container_port = MARATHON_APP_PORT\n\n port_index = None\n if app and app.container and app.container.docker and app.container.docker.port_mappings:\n for i in range(len(app.container.docker.port_mappings)):\n if container_port == app.container.docker.port_mappings[i].container_port:\n # Set port index to use for identifying the exposed port\n # that maps to internal container port\n port_index = i\n break\n\n if port_index is None:\n raise Exception('Unable to correlate container to host port.')\n\n instances = []\n for task in app.tasks:\n logging.info('Queuing configuration refresh of %s at %s:%s' %\n (task.id, task.host, task.ports[port_index]))\n instances.append('%s:%s' % (task.host, task.ports[port_index]))\n\n reload_config(instances)\n\n except MarathonError, ex:\n print 'Error making Marathon API call: %s' % ex.message", "def test_upgrade_apply_all_fine(setup, platform, skuba):\n\n setup_kubernetes_version(skuba)\n\n # node upgrade apply\n outs = {}\n for (r, n) in [(\"master\", 0), (\"worker\", 0)]:\n node = \"my-{}-{}\".format(r, n)\n outs[node] = skuba.node_upgrade(\"apply\", r, n)\n\n master = outs[\"my-master-0\"]\n assert master.find(\n \"Node my-master-0 is up to date\"\n ) != -1\n\n worker = outs[\"my-worker-0\"]\n assert worker.find(\n \"Node my-worker-0 is up to date\"\n ) != -1", "def scale_app(marathon_url, app_id, instances = 1):\n\n api_endpoint = '/v2/apps/'\n headers = {'Content-Type': 'application/json'}\n payload = {'instances': instances}\n url = marathon_url + api_endpoint + app_id\n print(url)\n r = requests.put(url, data=json.dumps(payload), headers=headers)\n #TODO : Add some control based on status code\n print(r.status_code)\n return", "def update_data(update_method):\n log.debug('Starting update')\n cmd = ['/usr/bin/python', wf.workflowfile('update.py')]\n if update_method == 'force':\n cmd.append('--update')\n cmd.append('force')\n\n # Update projects data\n log.debug('Run update command : {}'.format(cmd))\n run_in_background('update', cmd)\n\n return 0", "def AppUpdateApp(self, request, timeout, metadata=None, with_call=False, protocol_options=None):\n raise NotImplementedError()", "def test_upgrade_apply_from_previous(setup, platform, skuba):\n\n setup_kubernetes_version(skuba, PREVIOUS_VERSION)\n\n outs = {}\n for (r, n) in [(\"master\", 0), (\"worker\", 0)]:\n node = \"my-{}-{}\".format(r, n)\n outs[node] = skuba.node_upgrade(\"apply\", r, n)\n\n master = outs[\"my-master-0\"]\n assert master.find(\"successfully upgraded\") != -1\n\n worker = outs[\"my-worker-0\"]\n assert worker.find(\"successfully upgraded\") != -1", "def set_apps(self, new_apps):\n self.remove_apps()\n for app_id in new_apps:\n self.add_app(Webapp.objects.get(pk=app_id))\n index_webapps.delay(new_apps)", "def update_apps(env='development', upgrade_apps='n'):\n\n project_settings = get_settings()\n projects = build_projects_vars()\n project = projects[env]\n\n for app in project_settings.EXTRA_APPS:\n option = ''\n if app[env]['type'] == 'git' and upgrade_apps == 'y':\n option = '--upgrade'\n if app[env]['type'] == 'editable':\n option = '-e'\n\n run('workon %(name)s && pip install %(option)s %(source)s' % {'name': project['name'], 'option': option, 'source': app[env]['source']})", "def publish_updates():\n run_subprocess(['osg-batch-update'])", "def update_worker():\n from test import get_remote_runner\n runner = get_remote_runner()\n runner.run(\"python2.7 /vagrant/bootstrap_lxc_manager.py --update_only=True\")", "def upgrade(self):", "def upgrade(self):", "def test_update_hyperflex_cluster(self):\n pass", "def test_multiple_mon_pod_stays_on_same_node(self):\n ocs_version = config.ENV_DATA[\"ocs_version\"]\n # Check that we have LSO cluster and OCS version is 4.8 and below\n # This is a workaround due to issue https://github.com/red-hat-storage/ocs-ci/issues/4937\n if not (\n is_lso_cluster() and Version.coerce(ocs_version) <= Version.coerce(\"4.8\")\n ):\n pytest.skip(\n \"Skip the test because mons are not node assignment from Rook, if cluster is not \"\n \"LSO based. And also currently, we want to run the test only with OCS 4.8 and \"\n \"below. This is a workaround due to issue \"\n \"https://github.com/red-hat-storage/ocs-ci/issues/4937\"\n )\n # Initialize\n rook_ceph_mon = \"rook-ceph-mon\"\n\n # Get mons running on pod\n mon_pods = get_mon_pods()\n mon_name_to_del = mon_pods[0].get().get(\"metadata\").get(\"labels\").get(\"mon\")\n mon_name_to_edit = mon_pods[1].get().get(\"metadata\").get(\"labels\").get(\"mon\")\n mon_node = get_pod_node(mon_pods[1])\n\n # Edit the rook-ceph-mon-endpoints\n log.info(f\"Edit the configmap {ROOK_CEPH_MON_ENDPOINTS}\")\n configmap_obj = OCP(kind=CONFIGMAP, namespace=OPENSHIFT_STORAGE_NAMESPACE)\n rook_ceph_mon_configmap = configmap_obj.get(\n resource_name=ROOK_CEPH_MON_ENDPOINTS\n )\n json_val = json.loads(rook_ceph_mon_configmap[\"data\"][\"mapping\"])\n json_val[\"node\"][mon_name_to_del].update(json_val[\"node\"][mon_name_to_edit])\n rook_ceph_mon_configmap[\"data\"][\"mapping\"] = json.dumps(json_val)\n new_data = rook_ceph_mon_configmap[\"data\"]\n params = f'{{\"data\": {json.dumps(new_data)}}}'\n configmap_obj.patch(\n resource_name=ROOK_CEPH_MON_ENDPOINTS,\n params=params,\n format_type=\"strategic\",\n )\n log.info(f\"Configmap {ROOK_CEPH_MON_ENDPOINTS} edited successfully\")\n log.info(\n f\"Rook-ceph-mon-endpoints updated configmap: {rook_ceph_mon_configmap}\"\n )\n\n # Delete one mon deployment which had been edited\n dep_obj = OCP(kind=DEPLOYMENT, namespace=OPENSHIFT_STORAGE_NAMESPACE)\n mon_deployment_name_to_del = f\"{rook_ceph_mon}-{mon_name_to_del}\"\n log.info(f\"Deleting mon {mon_deployment_name_to_del} deployments\")\n dep_obj.delete(resource_name=mon_deployment_name_to_del)\n\n # Edit other mon deployment to remove mon anti-affinity\n mon_deployment_name_to_edit = f\"{rook_ceph_mon}-{mon_name_to_edit}\"\n log.info(\n f\"Edit mon {mon_deployment_name_to_edit} deployment \"\n \"to remove the required mon anti-affinity\"\n )\n params = '[{\"op\": \"remove\", \"path\": \"/spec/template/spec/affinity\"}]'\n dep_obj.patch(\n resource_name=mon_deployment_name_to_edit, params=params, format_type=\"json\"\n )\n log.info(\n f\"Successfully removed defined mon anti-affinity {mon_deployment_name_to_edit}\"\n )\n\n # Restart operator\n operator_pod_obj = get_operator_pods()\n delete_pods(pod_objs=operator_pod_obj)\n POD_OBJ.wait_for_resource(condition=STATUS_RUNNING, selector=OPERATOR_LABEL)\n\n # Validate deleted deployment mon came up and in pending state\n # Initially mon stucks in pending state, remove defined anti-affinity\n POD_OBJ.wait_for_resource(\n condition=STATUS_PENDING,\n resource_count=1,\n selector=MON_APP_LABEL,\n timeout=1200,\n )\n # Edit mon deployment to remove mon anti-affinity\n log.info(\n f\"Edit mon {mon_deployment_name_to_del} deployment \"\n \"to remove the required mon anti-affinity\"\n )\n params = '[{\"op\": \"remove\", \"path\": \"/spec/template/spec/affinity\"}]'\n dep_obj.patch(\n resource_name=mon_deployment_name_to_del, params=params, format_type=\"json\"\n )\n log.info(\n f\"Successfully removed defined mon anti-affinity {mon_deployment_name_to_del}\"\n )\n\n # Validate mon pod moved to another node such that 2 mons are running on same node\n log.info(\"Waiting for 5 seconds for mon recovery\")\n time.sleep(5)\n new_mon_pods = get_mon_pods()\n new_node = [\n get_pod_node(mon)\n for mon in new_mon_pods\n if mon.get().get(\"metadata\").get(\"labels\").get(\"mon\") == mon_name_to_del\n ]\n assert (\n new_node[0].name == mon_node.name\n ), f\"Mon moved to node {mon_node} such that 2 mons are running on same node\"\n\n # Verify rook deletes one of the mon and move to another node\n timeout = 60\n log.info(f\"Waiting for {timeout} seconds for mon recovery\")\n time.sleep(timeout)\n\n POD_OBJ.wait_for_resource(\n condition=STATUS_RUNNING,\n resource_count=len(mon_pods),\n selector=MON_APP_LABEL,\n timeout=3600,\n sleep=5,\n )\n log.info(\n \"Mons are up and running state and validate are running on different nodes\"\n )\n mon_pods_running_on_same_node()", "def update_app(AppId=None, Name=None, Description=None, DataSources=None, Type=None, AppSource=None, Domains=None, EnableSsl=None, SslConfiguration=None, Attributes=None, Environment=None):\n pass" ]
[ "0.58729935", "0.5843566", "0.58405876", "0.5839117", "0.57458085", "0.56953675", "0.56308955", "0.55375355", "0.5398979", "0.5395155", "0.537317", "0.53511757", "0.5333919", "0.53233975", "0.53211194", "0.5281121", "0.52565503", "0.52545494", "0.52454", "0.5234684", "0.52275306", "0.5198869", "0.5195976", "0.51567256", "0.5149543", "0.51113015", "0.51113015", "0.51034296", "0.5100409", "0.50857717" ]
0.6399314
0
Rollback the updates to the list of apps in the cluster
def rollback(self, apps): while len(apps) > 0: app = apps.pop() print("Rolling back app %s ..." % app.name) app.rollback_job()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rollback(self):\n for db in self.values():\n db.rollback()", "def _do_rollback(self):\n self.backend.rollback()", "def rollback(self):\n pass", "def rollback(self):\r\n self.db.rollback()", "def __update_application(self, apps, **extra_args):\n update_on_error = extra_args.get('update_on_error', False)\n # auto_enable_auth = extra_args.get(\n # 'auto_enable_auth', self.auto_enable_auth)\n\n for app in apps:\n state = app.execution.state\n old_state = state\n gc3libs.log.debug(\n \"About to update state of application: %s (currently: %s)\",\n app,\n state)\n try:\n if state not in [\n Run.State.NEW,\n Run.State.TERMINATING,\n Run.State.TERMINATED,\n ]:\n lrms = self.get_backend(app.execution.resource_name)\n try:\n state = lrms.update_job_state(app)\n # pylint: disable=broad-except\n except Exception as ex:\n gc3libs.log.debug(\n \"Error getting status of application '%s': %s: %s\",\n app, ex.__class__.__name__, ex, exc_info=True)\n state = Run.State.UNKNOWN\n # run error handler if defined\n ex = app.update_job_state_error(ex)\n if isinstance(ex, Exception):\n raise ex\n if state != old_state:\n app.changed = True\n # set log information accordingly\n if (app.execution.state == Run.State.TERMINATING\n and app.execution.returncode is not None\n and app.execution.returncode != 0):\n # there was some error, try to explain\n app.execution.info = (\n \"Execution failed on resource: %s\" %\n app.execution.resource_name)\n signal = app.execution.signal\n if signal in Run.Signals:\n app.execution.info = (\n \"Abnormal termination: %s\" % signal)\n else:\n if os.WIFSIGNALED(app.execution.returncode):\n app.execution.info = (\n \"Remote job terminated by signal %d\" %\n signal)\n else:\n app.execution.info = (\n \"Remote job exited with code %d\" %\n app.execution.exitcode)\n\n if state != Run.State.UNKNOWN or update_on_error:\n app.execution.state = state\n\n except (gc3libs.exceptions.InvalidArgument,\n gc3libs.exceptions.ConfigurationError,\n gc3libs.exceptions.UnrecoverableAuthError,\n gc3libs.exceptions.FatalError):\n # Unrecoverable; no sense in continuing --\n # pass immediately on to client code and let\n # it handle this...\n raise\n\n except gc3libs.exceptions.UnknownJob:\n # information about the job is lost, mark it as failed\n app.execution.returncode = (Run.Signals.Lost, -1)\n app.execution.state = Run.State.TERMINATED\n app.changed = True\n continue\n\n except gc3libs.exceptions.InvalidResourceName:\n # could be the corresponding LRMS has been removed\n # because of an unrecoverable error mark application\n # as state UNKNOWN\n gc3libs.log.warning(\n \"Cannot access computational resource '%s',\"\n \" marking task '%s' as UNKNOWN.\",\n app.execution.resource_name, app)\n app.execution.state = Run.State.TERMINATED\n app.changed = True\n continue\n\n # This catch-all clause is needed otherwise the loop stops\n # at the first erroneous iteration\n #\n # pylint: disable=broad-except\n except Exception as ex:\n if gc3libs.error_ignored(\n # context:\n # - module\n 'core',\n # - class\n 'Core',\n # - method\n 'update_job_state',\n # - actual error class\n ex.__class__.__name__,\n # - additional keywords\n 'update',\n ):\n gc3libs.log.warning(\n \"Ignored error in Core.update_job_state(): %s\", ex)\n # print again with traceback at a higher log level\n gc3libs.log.debug(\n \"(Original traceback follows.)\", exc_info=True)\n continue\n else:\n # propagate generic exceptions for debugging purposes\n raise", "def rollback(self):\n self.db.rollback()", "def rollback(self, stage, enodes, exception):", "def test_redeploy_same_app():\n\n app_state_manager = ApplicationStateManager(MockDeploymentStateManager())\n app_state_manager.deploy_application(\"test_app\", [{\"name\": \"d1\"}, {\"name\": \"d2\"}])\n app_status = app_state_manager.get_app_status(\"test_app\")\n assert app_status.status == ApplicationStatus.DEPLOYING\n\n # Deploy the same app with different deployments\n unused_deployments = app_state_manager.deploy_application(\n \"test_app\", [{\"name\": \"d2\"}, {\"name\": \"d3\"}]\n )\n assert unused_deployments == [\"d1\"]\n\n app_state_manager.deployment_state_manager.add_deployment_status(\n DeploymentStatusInfo(\"d3\", DeploymentStatus.UPDATING)\n )\n assert app_state_manager._application_states[\"test_app\"].deployments_to_delete == {\n \"d1\"\n }\n\n # After updating, the deployment should be deleted successfully, and\n # deployments_to_delete should be empty\n app_state_manager.deployment_state_manager.delete_deployment(\"d1\")\n app_state_manager.update()\n assert (\n app_state_manager._application_states[\"test_app\"].deployments_to_delete == set()\n )", "def rollback():\n with cd(env.basepath):\n run('mv current/rollback rollback')\n run('mv current undeployed')\n run('mv rollback current')\n version = run('readlink current')\n previous = run('readlink undeployed')\n puts(green('>>> Rolled back from %(previous)s to %(version)s' % { 'previous': previous, 'version': version }))\n run('rm -fr %s' % previous)\n run('rm undeployed')\n sudo('service nginx reload')\n with cd(env.nodejs):\n for n in [1, 2]:\n with settings(warn_only=True):\n sudo('stop nodejs N=%s' % n)\n run('mv instance%s/rollback rollback%s' % (n, n))\n run('mv instance%s undeployed' % n)\n run('mv rollback%s instance%s' % (n, n))\n version = run('readlink instance%s' % n)\n previous = run('readlink undeployed')\n puts(green('>>> Rolled back nodejs %(n)s from %(previous)s to %(version)s' % { 'n': n, 'previous': previous, 'version': version }))\n run('rm -fr %s' % previous)\n run('rm undeployed')\n sudo('start nodejs N=%s' % n)", "def rollback(self):\n # PEP 249\n raise impala.error.NotSupportedError()", "def rollback(self):\n self.conn.rollback()", "def rollback(self):\n self.remove_repo_config()\n self.remove_repos()\n\n self.supervisor.start_all_services()", "def rollback(self):\n self._rollback = True", "def RollBack(self):\r\n self.conn.rollback()", "def test_create_deployment_config_rollback_for_all_namespaces(self):\n pass", "def update(self, force, verbose):\n\n # Print the job config diffs\n print('Update Peloton cluster \"%s\" to new config: ' % self.name)\n for app in self.apps:\n self.diff_config(app, verbose)\n\n if not force and not yesno(\"Proceed with the update ?\"):\n return\n\n updated_apps = []\n for app in self.apps:\n updated_apps.append(app)\n if not app.update_or_create_job(update_callback):\n # Rollback the updates for all apps that have been updated\n self.rollback(updated_apps)\n return False\n\n return True", "def rollback(self):\n raise NotImplementedError", "def rollback(self):\n try:\n if self._cur_batch:\n self._cur_batch.rollback()\n except ValueError:\n # ignore \"Batch must be in progress to rollback\" error\n pass\n self._cur_batch = None\n self._num_mutations = 0", "def test_update_app_deploy_failed():\n app_state_manager = ApplicationStateManager(MockDeploymentStateManager())\n app_state_manager.deploy_application(\"test_app\", {})\n app_status = app_state_manager.get_app_status(\"test_app\")\n assert app_status.status == ApplicationStatus.DEPLOYING\n app_state_manager.deployment_state_manager.set_deployment_statuses_unhealthy(0)\n app_state_manager.update()\n app_status = app_state_manager.get_app_status(\"test_app\")\n assert app_status.status == ApplicationStatus.DEPLOY_FAILED\n # rerun update, application status should not make difference\n app_state_manager.update()\n assert app_status.status == ApplicationStatus.DEPLOY_FAILED", "def rollback(self):\n self._connection.execute_nonquery(\"sql\", \"ROLLBACK\", True)", "def sync_apps(self):\n pass", "def roll_back_demo():\n # return harvey rupp to belmont hill team\n bh = Team.query.get(161)\n print(f'retrieved {bh}')\n hr = Runner.query.get(1700)\n print(f'retrieved {hr}')\n if bh not in hr.teams:\n bh.runners.append(hr)\n db.session.commit()\n\n # set primary_key values below which will be untouched\n first_deleted_race = 19\n first_deleted_runner = 3712\n first_deleted_result = 4750\n first_deleted_school = 68\n first_deleted_team = 315\n first_deleted_location = 8\n first_deleted_course = 9\n first_deleted_league = 4\n\n # do not allow unless user is administrator\n if not current_user.is_administrator():\n return redirect(url_for('races.results', race_id=race.id))\n\n # delete races and associated results for races in delete range\n races = Race.query.all()\n for race in races:\n if race.id >= first_deleted_race:\n delete_race_by_id(race.id)\n\n # disassociate runners from teams and delete\n teams = Team.query.all()\n for team in teams:\n if team.id >= first_deleted_team:\n team.runners.clear()\n db.session.commit()\n\n runners = Runner.query.all()\n for runner in runners:\n if runner.id >= first_deleted_runner:\n db.session.delete(runner)\n db.session.commit()\n\n # delete teams\n for team in teams:\n if team.id >= first_deleted_team:\n db.session.delete(team)\n db.session.commit()\n\n # delete courses\n courses = Course.query.all()\n for course in courses:\n if course.id >= first_deleted_course:\n db.session.delete(course)\n db.session.commit()\n\n # disassociate locaions from schools and delete\n schools = School.query.all()\n for school in schools:\n if school.id >= first_deleted_school:\n school.locations.clear()\n db.session.commit()\n\n locations = Location.query.all()\n for location in locations:\n if location.id >= first_deleted_location:\n db.session.delete(location)\n db.session.commit()\n\n # disassociate schools from leagues and delete\n leagues = League.query.all()\n for league in leagues:\n if league.id >= first_deleted_league:\n league.schools.clear()\n db.session.commit()\n\n for school in schools:\n if school.id >= first_deleted_school:\n db.session.delete(school)\n db.session.commit()\n\n # delete leagues\n for league in leagues:\n if league.id >= first_deleted_league:\n db.session.delete(league)\n db.session.commit()\n\n # recalculate all runners seed times\n async_update_all_seed_times.delay()\n\n # update league standings via background task\n for league_id in [1, 2]:\n async_update_league_standings.delay(league_id=league_id)\n return redirect(url_for('core.index'))", "def migrate(*apps):\n # First sync db\n print(apps)\n\n if len(apps) > 0:\n for app in apps:\n try:\n _manage('migrate %s' % app)\n except Exception as e:\n print(red('Failed to migrate {} app! {}'.format(app, str(e))))\n else:\n _manage('migrate')", "def upgrade():\n\n conn = op.get_bind()\n invalid_acr = get_invalid_acrs(conn, models_names)\n\n if invalid_acr:\n invalid_acr_ids = [x.id for x in invalid_acr]\n add_to_objects_without_revisions_bulk(conn,\n invalid_acr_ids,\n acr,\n \"deleted\")\n delete_invalid_acr(conn, models_names)", "def rollback_images(self):\n for deployment in self.all_deployments():\n if deployment.get(\"updated_image\", False) is False:\n continue\n step = \"Rolling Back Deployment Image:\\ndeployment={}\\nattempted_image={}\\nrollback_image={}\".format(\n deployment[\"name\"],\n self.get_new_image(deployment[\"image\"]),\n deployment[\"image\"],\n )\n try:\n self.slacker.send_thread_reply(step)\n self.kuber.set_deployment_image(\n deployment[\"name\"], deployment[\"image\"], verify_update=True\n )\n deployment[\"updated_image\"] = False\n except Exception as e:\n self.raise_step_error(step=step, error=e)", "def rollback(isamAppliance, check_mode=False, force=False):\n if force is True or _changes_available(isamAppliance) is True:\n if check_mode is True:\n return isamAppliance.create_return_object(changed=True)\n else:\n return isamAppliance.invoke_delete(\"Rollback the changes\",\n \"/isam/pending_changes\")\n\n return isamAppliance.create_return_object()", "def rollback():\n current_timestamp = current()\n previous_timestamp = previous()\n\n if previous_timestamp:\n execute(symlink, *(previous_timestamp, ))\n run('rm -rf %s' % os.path.join(env.releases_dir, current_timestamp))", "def _revert(self):\n self.release_from_output(\"data\")\n # delete ONA submissions on ONA", "def rollback(migrator, database, fake=False, **kwargs):\n\n migrator.remove_model('tea_teas_types')\n migrator.remove_model('tea_types')\n migrator.remove_model('tea_lists_items')\n migrator.remove_model('tea_lists')\n migrator.remove_model('tea_teas')\n migrator.remove_model('tea_vendors')", "def rollback(checkpoints, config):\n # Misconfigurations are only a slight problems... allow the user to rollback\n installer = determine_installer(config)\n\n # No Errors occurred during init... proceed normally\n # If installer is None... couldn't find an installer... there shouldn't be\n # anything to rollback\n if installer is not None:\n installer.rollback_checkpoints(checkpoints)\n installer.restart()" ]
[ "0.6322182", "0.62385505", "0.6148077", "0.61378366", "0.6104422", "0.607502", "0.5983937", "0.5958292", "0.5950454", "0.5916126", "0.5878117", "0.58364695", "0.5808297", "0.5802136", "0.57791936", "0.57087785", "0.5707182", "0.5705441", "0.5698692", "0.56929046", "0.557553", "0.55456513", "0.5530202", "0.55105287", "0.548663", "0.54850817", "0.5475125", "0.5452098", "0.5446209", "0.5438944" ]
0.8059082
0
Update docstring for constructed method.
def update_docstring(instance): try: docstring = instance.api_map['doc'] except (KeyError, TypeError): docstring = 'No docstring provided.' instance.__class__.__doc__ = docstring instance.__class__.__call__.__signature__ = construct_signature(instance) return docstring
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init_doc(self):\n raise NotImplementedError()", "def __init__(self):\n super(MethodInfo, self).__init__()\n self.DocString = None", "def __doc__(self, ???):", "def docstring_hack():\n pass", "def __init__ (self, isInternal, docstring, name, args, isConst):\n\n self.name = name\n self.isConst = isConst\n self.isInternal = isInternal\n\n if isInternal:\n if language == 'java':\n # We have a special Javadoc doclet that understands a non-standard\n # Javadoc tag, @internal. When present in the documentation string\n # of a method, it causes it to be excluded from the final\n # documentation output. @internal is something doxygen offers.\n #\n p = re.compile('(\\s+?)\\*/', re.MULTILINE)\n self.docstring = p.sub(r'\\1* @internal\\1*/', docstring)\n elif language == 'csharp':\n # We mark internal methods in a different way for C#.\n self.docstring = docstring\n else:\n self.docstring = \" @internal\\n\" + docstring\n else:\n self.docstring = docstring\n\n # In Java and C#, if a method is const and swig has to translate the type,\n # then for some reason swig cannot match up the resulting doc strings\n # that we put into %javamethodmodifiers. The result is that the java\n # documentation for the methods are empty. I can't figure out why, but\n # have figured out that if we omit the argument list in the doc string\n # that is put on %javamethodmodifiers for such case, swig does generate \n # the comments for those methods. This approach is potentially dangerous\n # because swig might attach the doc string to the wrong method if a\n # methods has multiple versions with varying argument types, but the\n # combination doesn't seem to arise in antimony currently, and anyway,\n # this fixes a real problem in the Java documentation for antimony.\n\n if language == 'java' or language == 'csharp':\n if isConst and (args.find('unsigned int') >= 0):\n self.args = ''\n elif not args.strip() == '()':\n if isConst:\n self.args = args + ' const'\n else:\n self.args = args\n else:\n if isConst:\n self.args = '() const'\n else:\n self.args = ''\n else:\n self.args = args", "def _add_doc(func, doc):\r\n func.__doc__ = doc", "def _add_doc(func, doc):\r\n func.__doc__ = doc", "def _add_doc(func, doc):\r\n func.__doc__ = doc", "def _add_doc(func, doc):\n func.__doc__ = doc", "def __call__(self, func):\n func.__doc__ = self.doc\n return func", "def __init__(self, func):\n self.doc = func.__doc__", "def main_docstring():", "def DocString():\n return", "def inherit_docstring_from(cls):\n def _doc(func):\n cls_docstring = getattr(cls, func.__name__).__doc__\n func_docstring = func.__doc__\n if func_docstring is None:\n func.__doc__ = cls_docstring\n else:\n new_docstring = func_docstring % dict(super=cls_docstring)\n func.__doc__ = new_docstring\n return func\n return _doc", "def public_fn_with_googley_docstring(self, name, another, state=None):\n return 0", "def __init__ (self, docstring, name, isInternal):\n\n # Take out excess leading blank lines.\n docstring = re.sub('/\\*\\*(\\s+\\*)+', r'/** \\n *', docstring)\n\n self.docstring = docstring\n self.name = name\n self.isInternal = isInternal", "def format_method(cls, **kwargs): \n _doc_formatter = cls._format_obj(**kwargs) \n ## using functools.wraps: this will work but the method type of any bounded\n ## function (static, instance or class method) is also altered\n #def _func_decorator(func):\n # new_func = functools.wraps(func)(func)\n # new_func.__doc__ = _doc_formatter(func)\n # return new_func\n try:\n assert USE_WRAPT_OR_NOT and wrapt\n except: \n class _func_decorator(__MethodDecorator):\n def __init__(self, func, obj=None, cls=None, method_type='function'):\n #super(_func_decorator,self).__init__(func, obj=obj, cls=cls, method_type=method_type)\n __MethodDecorator.__init__(self, func, obj=obj, cls=cls, method_type=method_type)\n # we had one attribute wrt. a standard method_decorator instance\n setattr(self,'__doc__',_doc_formatter(self.func))\n def __getattribute__(self, attr_name): \n # we ensure that the docstring which is the __doc__ attribute of the\n # decorator, not that of the function itself\n if attr_name in ('__doc__',):\n return object.__getattribute__(self, attr_name) \n # otherwise behaves like the superclass class\n #return super(_func_decorator,self).__getattribute__(attr_name)\n return __MethodDecorator.__getattribute__(self, attr_name)\n else:\n def _func_decorator(func):\n #@my_wrapper\n #def new_func(*_args, **_kwargs):\n # return func(*_args, **_kwargs)\n new_func = method_decorator(func)\n #new_func = method_wrapper(func)\n # now we update the '__doc__' by recycling the doc already commited in \n # the FunctionWrapper object new_func: this enables avoiding issues when\n # dealing with classmethod or staticmethod methods:\n # \"AttributeError: 'classmethod' object attribute '__doc__' is read-only\"\n try: # write on the wrapper...\n new_func.__doc__ = _doc_formatter(new_func)\n except: \n # still, we allow this type of error, as it may occur in the case the\n # order of closures was not well set, e.g. by implementing:\n # @classmethod\n # @Docstring.format_class(**kwargs)\n # instead of:\n # @Docstring.format_class(**kwargs)\n # @classmethod\n pass\n return new_func\n return _func_decorator", "def __init__(self, cls_method) -> None:\n self.method = cls_method\n self.__doc__ = self.method.__doc__", "def docstring(self, docstring): # type: (str) -> None\n self._tmp_docstring = inspect.cleandoc(docstring)", "def brief_documentation(method: object) -> str:\n doc = method.__doc__\n if doc is not None:\n lines = doc.splitlines()\n if len(lines) > 0:\n return lines[0]\n return ''", "def func_doc():", "def get_documentation(self, *args, **dargs):\n pass", "def doc_apply(doc):\n\n def wrapper(func):\n func.__doc__ = doc\n return func\n\n return wrapper", "def method_description(self):\n pass", "def test_docstring(self):\n self.assertIsNotNone(Base.__doc__)", "def wrapper(func):\n docstring = func.__doc__\n helpdict = parse_docstring(\n docstring, key_symbol=key_symbol,\n description_symbol=description_symbol)\n func.helpdict = helpdict\n # remove markers\n docstring = docstring.replace(key_symbol, '')\n func.__doc__ = docstring.replace(description_symbol, '')\n return func", "def _generate_autodoc(\n self, entry: _MemberDocumenterEntry,\n summary=False) -> Tuple[sphinx.addnodes.desc, Optional[str]]:\n\n rst_strings = docutils.statemachine.StringList()\n entry.documenter.directive.result = rst_strings\n\n if entry.overload and entry.overload.overload_id is not None:\n # Force autodoc to use the overload-specific signature. autodoc already\n # has an internal mechanism for overriding the docstrings based on the\n # `_new_docstrings` member.\n entry.documenter._new_docstrings = [ # pylint: disable=protected-access\n sphinx.util.docstrings.prepare_docstring(\n entry.overload.doc or '',\n tabsize=self.state.document.settings.tab_width)\n ]\n # Workaround for https://github.com/sphinx-doc/sphinx/pull/9518\n orig_get_doc = entry.documenter.get_doc\n\n def get_doc(ignore: Optional[int] = None) -> List[List[str]]:\n if entry.documenter._new_docstrings is not None: # pylint: disable=protected-access\n return entry.documenter._new_docstrings # pylint: disable=protected-access\n return orig_get_doc(ignore) # type: ignore\n\n entry.documenter.get_doc = get_doc\n\n else:\n # Force autodoc to obtain the docstring through its normal mechanism,\n # which includes the \"ModuleAnalyzer\" for reading docstrings of\n # variables/attributes that are only contained in the source code.\n entry.documenter._new_docstrings = None # pylint: disable=protected-access\n orig_get_doc = None\n\n if summary and entry.is_inherited:\n overridename = entry.name\n else:\n overridename = _get_python_object_name_for_signature(entry)\n entry.documenter.format_name = lambda: overridename\n\n # Record the documenter for use by _process_docstring in `autodoc.py`.\n current_documenter_map = self.env.temp_data.setdefault(\n 'tensorstore_autodoc_current_documenter', {})\n current_documenter_map[entry.documenter.fullname] = entry.documenter\n entry.documenter.generate()\n if orig_get_doc is not None:\n del entry.documenter.get_doc\n del current_documenter_map[entry.documenter.fullname]\n\n group_name = _postprocess_autodoc_rst_output(rst_strings, summary=summary)\n\n entry.documenter.titles_allowed = True\n nodes = [\n x for x in sphinx.ext.autodoc.directive.parse_generated_content(\n self.state, entry.documenter.directive.result, entry.documenter)\n if isinstance(x, sphinx.addnodes.desc)\n ]\n assert len(nodes) == 1\n node = nodes[0]\n\n if entry.subscript:\n _mark_subscript_parameterlist(node)\n if entry.full_name.endswith(_INIT_SUFFIX):\n _clean_init_signature(node)\n if entry.full_name.endswith(_CLASS_GETITEM_SUFFIX):\n _clean_class_getitem_signature(node)\n\n return node, group_name", "def inherits_doc():\n pass", "def docstring_parameter(*args, **kwargs):\n\n def dec(obj):\n obj.__doc__ = obj.__doc__.format(*args, **kwargs)\n return obj\n\n return dec", "def add_documentation(cls, documentation):\n cls.__doc__ = documentation.CBAMLibrary\n methods = list(filter(lambda x: not x.startswith(\"_\"), dir(cls)))\n for method_name in methods:\n method = getattr(cls, method_name)\n if callable(method):\n name = method.__name__\n if hasattr(documentation, name):\n getattr(cls, name).__doc__ = getattr(documentation, name)" ]
[ "0.77076185", "0.75691295", "0.7252381", "0.7235469", "0.7157194", "0.7153221", "0.7153221", "0.7153221", "0.70771676", "0.70201385", "0.7007644", "0.6961431", "0.6957143", "0.6916432", "0.68973947", "0.689267", "0.6856933", "0.68178123", "0.67774934", "0.6775329", "0.6735545", "0.67350274", "0.6708701", "0.6592206", "0.6580099", "0.65420324", "0.65196353", "0.6508239", "0.6506877", "0.6483334" ]
0.7789328
0
Make a list of valid parameters. This accumulates all known parameters from any keys embedded in _path_, _default_params_, and _valid_params_.
def get_all_valid_params(instance): params = {} path_params = instance.find_path_keys(instance.api_map.get('path', '')) for param in path_params: params[param] = '' # Always make a list of valid parameters from endpoint mapping valid_params = instance.api_map.get('valid_params', []) if isinstance(valid_params, str): valid_params = [valid_params] for param in valid_params: params[param] = '' params.update(instance.api_map.get('default_params', {})) LOG.debug('Full list of params: %s', params) return params
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_path_parameters(self):\n url_params = URL_PARAMS_PATTERN.findall(self.path)\n params = []\n\n for param in url_params:\n params.append({\n 'name': param,\n 'type': 'string',\n 'in': 'path',\n 'required': True\n })\n\n return params", "def _validate_params(self):\n assert set(self.required_params) - set(self._params) == set()\n for par, val in self.optional_params.items():\n if par not in self._params:\n self._params[par] = val", "def _GetParameters(\n self,\n required_path_params: Iterable[FieldDescriptor],\n optional_path_params: Iterable[FieldDescriptor],\n query_params: Iterable[FieldDescriptor],\n ) -> List[Dict[str, Union[str, bool, SchemaReference, ArraySchema,\n DescribedSchema]]]:\n parameters = []\n\n req_path_params_set = set(required_path_params)\n opt_path_params_set = set(optional_path_params)\n query_params_set = set(query_params)\n for field_d in req_path_params_set | opt_path_params_set | query_params_set:\n parameter_obj = {\"name\": casing.SnakeToCamel(field_d.name)}\n if field_d in req_path_params_set:\n parameter_obj[\"in\"] = \"path\"\n parameter_obj[\"required\"] = True\n elif field_d in opt_path_params_set:\n parameter_obj[\"in\"] = \"path\"\n else:\n parameter_obj[\"in\"] = \"query\"\n\n parameter_obj[\"schema\"] = self._GetDescribedSchema(field_d)\n\n parameters.append(parameter_obj)\n\n return parameters", "def create_parameter_list(path_params):\n param_list = []\n for param in path_params:\n parameter = {}\n parameter['in'] = 'path'\n parameter['name'] = str(param)\n parameter['description'] = 'ID of ' + str(param)[:-2]\n parameter['required'] = True\n parameter['type'] = 'string'\n param_list.append(parameter)\n return param_list", "def get_required_parameters(self) -> list:\n results = []\n if self.no_params or self.params_optional:\n return []\n else:\n for parameter, parameter_details in self.parameters.items():\n # Fixing issue #92\n # if parameter == \"effect\":\n # continue\n if not parameter_details.default_value:\n results.append(parameter_details.name)\n return results", "def build_parameters(self) -> List[str]:\n param_bits = []\n for name in self.parameters:\n param_bits.extend(self.build_parameter_by_name(name) or [])\n return param_bits", "def _validate_params(self):\n raise NotImplementedError('Must be implemented in subclasses.')", "def _buildImageParams(self, items):\n params = {}\n # Empty items returns empty params\n if not items:\n return params\n\n for item in items:\n if item.find('=') != -1:\n param, value = item.split('=', 1)\n params[param] = value\n else:\n params[item] = True\n\n if 'page' in params and params['page'] is not True:\n params['link'] = self._getWikiLink(params['page'])\n\n # Validate params with limited # of values\n for param_allowed in IMAGE_PARAMS:\n if (param_allowed in params and\n not (params[param_allowed] in IMAGE_PARAMS[param_allowed])):\n del params[param_allowed]\n\n return params", "def params(self):\n params = []\n\n for v in vars(self).values():\n params.extend(self.__computeParams(v))\n\n if isinstance(v, list):\n for p in v:\n params.extend(self.__computeParams(p))\n\n return params", "def get_params_iter(self):\n return []", "def _required_parameters(self) -> RequiredParameters:\n return RequiredParameters([])", "def _required_parameters(self) -> RequiredParameters:\n return RequiredParameters([])", "def _filter_params(self):\n default_params = self.get_default_params()\n complete_params = dict(self.get_default_params())\n complete_params.update(self.params)\n\n return utils.format_dictionary(complete_params)", "def _prepare_params(self, params):\n for key, value in params.items():\n if type(value) is list:\n params[key] = [(6, 0, value)]\n\n return params", "def _build_param_request(self):\n search_params = []\n for param in self.params:\n # print(param)\n if self.params[param] is not None:\n search_params.append(param + '={}'.format(self.params[param]))\n search_params = '&' + '&'.join(search_params)\n return search_params", "def _validate_parameter_combinations(self):\n parameters = [\"type\", \"path\", \"mode\", \"default\", \"min\", \"max\"]\n parameters = {key: getattr(self, key, None) for key in parameters}\n type = parameters.pop(\"type\")\n\n # validate parameter combination\n if type in self._TYPE_COMBINATION_MAPPING:\n valid_parameters = self._TYPE_COMBINATION_MAPPING[type]\n for key, value in parameters.items():\n if key not in valid_parameters and value is not None:\n msg = \"Invalid parameter for '{}' Input, parameter '{}' should be None but got '{}'\"\n raise ValidationException(\n message=msg.format(type, key, value),\n no_personal_data_message=msg.format(\"[type]\", \"[parameter]\", \"[parameter_value]\"),\n error_category=ErrorCategory.USER_ERROR,\n target=ErrorTarget.PIPELINE,\n )", "def _validate_parameters(self):\n errors = []\n for key in self.PARAMETERS.keys():\n if key not in self.request_obj.data_params:\n errors.append(key)\n\n if errors:\n raise DataParsingError('Following data items are missing: {}'.format(', '.join(errors)))\n\n for key, params in self.PARAMETERS.items():\n params[0].validate_type(key, self.request_obj.data_params.get(key), params[1])", "def get_required_params():\n return {}", "def get_params(self):\n return []", "def check_parameters_valid(self) :\n for check_parameter in self.parameters :\n if (not self.parameters[check_parameter]['set']) :\n error_message = \"Missing key -> '\" + check_parameter + \"'\"\n if (Config.logger) :\n dtpo_log('fatal', error_message)\n raise ParseError(error_message)\n\n if self.parameters[check_parameter]['type'] == 'dir' :\n value = self.parameters[check_parameter]['value']\n return_string = check_directory_permissions(value)\n if return_string :\n error_message = \"{0} not accessible \" \\\n \"-> {1}\".format(\n check_parameter,\n return_string)\n raise ParseError(error_message)\n elif self.parameters[check_parameter]['type'] == 'file' :\n value = self.parameters[check_parameter]['value']\n try :\n file_pointer = open(value)\n file_pointer.close()\n except IOError as io_error :\n error_message = \"File {0} not accessible -> {2}\" \\\n .format(\n check_parameter,\n self.parameters[check_parameter]['value'],\n str(io_error))\n raise ParseError(error_message)", "def validate_parameters(self):\n\n # env and fixed_env\n self._validate_envs()\n # checking optional data and scripts\n self._validate_download_data()\n self.data_path = self.params[\"data\"][\"location\"]\n self._validate_scripts()\n # checking optional data_ref (if not data_ref provided, path is the same as data path)\n if \"data_ref\" in self.params:\n self._validate_download_data(data_nm=\"data_ref\")\n# self.data_ref_path = self.params[\"data_ref\"][\"location\"]\n# else:\n# self.data_ref_path = self.data_path\n # checking analysis\n self._validate_analysis()\n # checking tests\n self._validate_tests()\n\n self.params.setdefault(\"post_build\", None)\n # if copy in post_build part that I'm changing the build_context\n if self.params[\"post_build\"] and \"copy\" in self.params[\"post_build\"]:\n self.build_context = self.workflow_path\n else:\n self.build_context = self.working_dir\n\n self.params.setdefault(\"plots\", [])\n if self.params[\"plots\"]:\n if not isinstance(self.params[\"plots\"], (list, tuple)):\n raise SpecificationError(\n \"Value of key 'plots' must be a list or a tuple\"\n )\n else:\n if any(not isinstance(j, dict) for j in self.params[\"plots\"]):\n raise SpecificationError(\n \"Every item in 'plots' must be a dictionary.\"\n )", "def _check_params(self):\n pass", "def _populate_params(self):\n self.params = []\n for root, dirs, files in os.walk(os.curdir):\n for file in files:\n fullfile = str(os.path.join(root, file))\n if self.config.regex_find_params.match(fullfile):\n self.params.append(fullfile)", "def _validate_query_parameters(self, parameters=None):\n if parameters is None:\n parameters = {}\n\n validated_parameters = {} # dictionary to return\n\n for key in parameters:\n if key in self._query_parameters:\n if key in ['limit', 'offset']:\n if isinstance(parameters[key], int):\n validated_parameters[key] = \"%i\" % parameters[key]\n elif key == 'format':\n if parameters[key] in self._query_parameters[key]:\n validated_parameters[key] = parameters[key]\n elif key == 'columns':\n # replace '+' by a space and separate the commas\n column_entry = parameters[key].replace('+', ' ').split(',')\n\n set_a = set(self._query_parameters[key])\n set_b = set(column_entry)\n validated_items = set_a.intersection(set_b) # disordered set\n validated_column_entry = [] # ordered list\n for item in column_entry:\n # reorder the set in a list to keep the same order as the entry\n if item in validated_items:\n validated_column_entry.append(item)\n validated_parameters[key] = \",\".join(validated_column_entry).replace(' ', '+')\n elif key in ['include', 'compress', 'sort']:\n if parameters[key] in self._query_parameters[key]:\n validated_parameters[key] = parameters[key]\n\n return validated_parameters", "def _get_param_combs(self):\n\n search_keys = [k for k, v in self.params.items() if type(v) is list]\n\n layer_combs = [list(product(*self.params[k]))\n if type(self.params[k][0]) is list\n else self.params[k]\n for k in search_keys]\n\n combs = list(product(*layer_combs))\n\n comb_dict = [{k: v for k, v in zip(search_keys, comb)}\n for comb in combs]\n\n fixed_params = {k: v for k, v in self.params.items()\n if k not in search_keys}\n \n return comb_dict, fixed_params", "def validate_params(context):\n schema = isinstance(params_schema, ValidateViewHook) and params_schema(request) or params_schema\n try:\n data = request.json_body\n except ValueError:\n data = request.params\n\n if variable_decode is True:\n data = formencode.variabledecode.variable_decode(data,\n dict_char=variable_decode_dict_char,\n list_char=variable_decode_list_char)\n state = Dummyobj()\n state.request = request\n state.context = context\n try:\n return schema.to_python(data, state=state)\n except formencode.Invalid as exc:\n unpacked = exc.unpack_errors()\n request.set_property(lambda ctx: unpacked,\n invalid_params_attr, reify=True)\n\n def cursedict(d1, d2):\n for k, v in d1.items():\n if isinstance(v, dict):\n if k in d2:\n yield k, dict(cursedict(v, d2[k]))\n else:\n yield k, v\n else:\n if k not in d2:\n yield k, v\n\n if raise_exc is True:\n _raise(invalid_params_exc, unpacked)\n else:\n return dict(curseschema(schema, data, unpacked, state=state))", "def build_param_bindings(self, params: list_of(str)) -> list:\n constraints = []\n \n for var_name in params:\n \n def param_binding(vn):\n return lambda vd = { vn : vn } : \"%s == self.%s\" % (vd[vn], vn)\n \n constraints.append( ({var_name}, param_binding(var_name)) )\n \n return constraints", "def get_params(self, deep = True, bounds = True):\n params = dict() \n for p in self._LIST_PARAMETERS:\n params[p] = self._get_one_param(p)\n if(bounds):\n params[p + '_bounds'] = self._get_one_bound(p)\n if(deep and self._FLAG_TYPE == 'collection' and p == 'list_func'):\n for n, sub_obj in enumerate(params[p]):\n sub_params = sub_obj.get_params(deep, bounds)\n params.update({'f' + str(n) + '__' + key: val for key, val in sub_params.items()})\n \n return params", "def get_parameters(self):\n params = []\n query_params = self.build_query_parameters()\n pagination_params = self.build_pagination_parameters()\n query_params.extend(self.build_query_params_from_default_backends())\n\n if django_filters is not None:\n query_params.extend(self.build_query_parameters_from_django_filters())\n\n if query_params:\n params += query_params\n\n if pagination_params:\n params += pagination_params\n\n return params", "def test_build_params( self ):\n r = Requester( self.logger )\n ( partnership_id, authorization_id, pickup_location, search_key, search_value ) = ( 'a', 'b', 'c', 'd', 'e' )\n params = r.build_params( partnership_id, authorization_id, pickup_location, search_key, search_value )\n self.assertEqual(\n ['ExactSearch', 'Notes', 'PartnershipId', 'PickupLocation'],\n sorted(params.keys()) )" ]
[ "0.7357251", "0.6719663", "0.650476", "0.6196918", "0.6181368", "0.615529", "0.6153257", "0.6026366", "0.6008636", "0.60070795", "0.59976304", "0.59976304", "0.59857446", "0.5972139", "0.59693515", "0.58892715", "0.5857361", "0.58370304", "0.5835673", "0.5816805", "0.5784921", "0.57839024", "0.5770627", "0.57365453", "0.57355535", "0.57287693", "0.572265", "0.5708917", "0.5707113", "0.5706239" ]
0.74553144
0
Exports a dictionary of inputs to a string. Inputs
def write_config_string(input_dict, entry_char='>', attribution_char='=', usekeys=None): # Selects the desired entries of the input_dict if usekeys is not None: input_dict = {key: input_dict[key] for key in usekeys} result_str = "" for key, value in input_dict.items(): result_str += entry_string(key, value, entry_char, attribution_char) return result_str
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_output_data(\n self,\n inputs: Dict[str, Any]) -> Any:\n return inputs", "def write_input_params(file_output,input_params):\r\n\r\n with open(file_output,'w+') as f:\r\n for key in input_params.keys():\r\n f.write( ''.join(key)+','+str(input_params[key])+'\\n')\r\n f.close()", "def export_str(self):\n return \"\\n\".join(\"export %s=%s\" % (k, v) for k, v in self.items())", "def format_inputs_outputs(self, values):\n return ', '.join('%s=%s' % (key, value)\n for key, value in sorted(values.iteritems()))", "def export_inputs(self):\n vocab_dict = load_vocab_dict(self.text_vocab_file_path)\n vocab_size = len(vocab_dict)\n label_vocab_dict = load_vocab_dict(self.label_vocab_file_paths[0])\n label_vocab_size = len(label_vocab_dict)\n self.config['data']['vocab_size'] = vocab_size\n self.config['data']['label_vocab_size'] = label_vocab_size\n\n input_sentence = tf.placeholder(\n shape=(None,), dtype=tf.string, name=\"input_sentence\")\n\n input_pipeline_func = self.get_input_pipeline(for_export=True)\n\n token_ids = input_pipeline_func(input_sentence)\n token_ids_len = tf.map_fn(lambda x: compute_sen_lens(x, padding_token=0),\n token_ids)\n\n export_data = {\n \"export_inputs\": {\n \"input_sentence\": input_sentence\n },\n \"model_inputs\": {\n \"input_enc_x\": token_ids,\n \"input_x_len\": token_ids_len\n }\n }\n\n return export_data", "def to_string(inputs, outputs):\n r_val = '# Column 01: frequency\\n'\n r_val += '# 02: hp - real\\n'\n r_val += '# 03: hp - imaginary\\n'\n r_val += '# 04: hc - real\\n'\n r_val += '# 05: hc - imaginary\\n'\n for f_i, hp_i, hc_i in zip(inputs.freqs, outputs.hp, outputs.hc):\n r_val += \"%8.2f %12.5e %12.5e %12.5e %12.5e\\n\" % (f_i, hp_i.real, hp_i.imag, hc_i.real, hc_i.imag)\n return r_val", "def dict_2_string(d):\n buff = io.StringIO()\n print_dictionary(d, output=buff)\n return buff.getvalue()", "def dict2argstr(d: Dict[str, Any]) -> str:\n return \",\".join(\"{!s}={!r}\".format(key, val) for (key, val) in d.items())", "def output(self, *args, **kwargs):\n bodies = []\n for name, body in self.input(*args, **kwargs):\n body = self.body_value(body, **kwargs)\n name = self.name_value(name, **kwargs)\n\n if name:\n bodies.append(\"{} = {}\".format(name, body))\n else:\n bodies.append(body)\n\n #bodies.append(\"\\n\")\n path = self.path_value(**kwargs)\n if path:\n bodies.append(path)\n bodies.append(\"\\n\")\n\n return self._printstr(bodies)", "def out(self, inputs):", "def possession_stringer(input_dict):\r\n\treturn ', '.join(' x'.join((k, str(v))) for k,v in sorted(input_dict.items())) #output formatted skill list string\r", "def to_json(self) -> str:\n return json.dumps([x.to_dict() for x in self.inputs])", "def dump_line(self, outputs: JsonDict) -> str:\n return json.dumps(outputs, ensure_ascii=False) + \"\\n\"", "def dump_line(self, outputs: JsonDict) -> str:\n return json.dumps(outputs, ensure_ascii=False) + \"\\n\"", "def export_model_config_with_inputs(self, inputs):\n structure_exporter_tensors = self.sess.run(\n self.exporter_eval.tensors, feed_dict={self.inputs: inputs})\n self.exporter_eval.populate_tensor_values(structure_exporter_tensors)\n path = self.exporter_eval.create_file_and_save_alive_counts(\n self.log_dir, self.global_step)\n return path", "def write_dict_txtfile(input_file_name, data_dict):\n \n output_file = open(input_file_name, 'w')\n output_file.write('Human Metabolome database')\n output_file.write('\\n\\n')\n\n for keys, values in data_dict.items():\n output_file.write(str(keys)+', '+str(values)+'\\n')", "def strings_for_writeout():\n newline = '\\n'\n divider = '-------------------------'\n spc = ' '\n colon = ':'\n equaldivider = '========================='\n outfilename_suffix = '_FD_coefficients.dat'\n number_of_lines_to_read = 'number of lines to read: '\n\n table_strings = dict(newline = newline,\n divider = divider,\n spc = spc,\n colon = colon,\n equaldivider = equaldivider,\n outfilename_suffix = outfilename_suffix,\n number_of_lines_to_read = number_of_lines_to_read\n )\n\n return table_strings", "def dummy_transform(inputs: Dict[str, str]) -> Dict[str, str]:\n outputs = inputs\n outputs[\"greeting\"] = f\"{inputs['first_name']} {inputs['last_name']} says hello\"\n del outputs[\"first_name\"]\n del outputs[\"last_name\"]\n return outputs", "def dict_to_perl_string(input_dict):\n pairs = []\n for k, v in sorted(filter(lambda k_v: k_v[1] != None, input_dict.items())):\n k = str(k)\n t = type(v).__name__\n if t == 'str':\n pairs.append(\"\\\"%s\\\" => \\\"%s\\\"\" % (k, escape_perl_string(v)))\n elif t == 'int':\n pairs.append(\"\\\"%s\\\" => %d\" % (k, v))\n elif t == 'float':\n pairs.append(\"\\\"%s\\\" => %f\" % (k, v))\n elif t == 'list':\n pairs.append(\"\\\"%s\\\" => %s\" % (k, list_to_perl_string(v)))\n elif t == 'dict':\n pairs.append(\"\\\"%s\\\" => %s\" % (k, dict_to_perl_string(v)))\n elif t == 'bool':\n if str(v) == \"True\":\n pairs.append(\"\\\"%s\\\" => %d\" % (k, 1))\n else:\n raise Exception(\"Unsupported type \" + str(t))\n return \"{%s}\" % \", \".join(pairs)", "def export_file(self):\n if self.args.keyfilter:\n self.filter_keys()\n if self.args.datafilter:\n self.filter_values()\n json.dump(self.outputdata, self.outfile, indent=self.args.indent)\n self.outfile.write('\\n')", "def facts_to_str(user_data: Dict[str, str]) -> str:\n arg = list()\n\n for key, value in user_data.items():\n arg.append(f'{key} - {value}')\n\n return \"\\n\".join(arg).join(['\\n', '\\n'])", "def printdict(input_dict):\n for key in input_dict:\n print key, \":\", input_dict[key]", "def DictFunction():\r\n print \"{name} is from {city}, and he likes {cake} cake, {fruit} fruit, {salad} salad and {pasta} pasta\".format(**food_prefs)", "def input_dictionary_to_parameter(input_dict: Optional[Dict[str, Any]]) -> str:\n if not input_dict:\n return ''\n out = json.dumps(json.dumps(input_dict))\n return out[1:-1] # remove the outside quotes, e.g., \"foo\" -> foo", "def write_pecfile_dict(output_file, record_dict):\n for key in sorted(record_dict.keys()):\n output_file.write('%s\\n' % str(record_dict[key]))\n return output_file", "def write_input(eval_points, filename='input.txt'):\n util.save(eval_points, filename)", "def get_output(self, name_dict):\n return self.expand_vars(self.options.output_pattern, name_dict)", "def _reprOfStringToValueMap (stringMap : Map) -> String:\n\n entrySeparator = u\"§\"\n entryTemplate = \"%s: %s\"\n keyList = sorted(list(stringMap.keys()))\n result = \"\"\n \n for key in keyList:\n value = stringMap[key] \n result += (iif(result == \"\", \"\", entrySeparator)\n + entryTemplate % (key, value))\n \n result = \"{\" + result + \"}\";\n return result", "def write_dict(outputfilename, dictionary):\r\n # May want to modify this code to pickle the key and value and alter the read dictionary to do the same.\r\n try:\r\n with open(outputfilename, 'w', newline='', encoding='utf-8') as outfile:\r\n for key, value in dictionary.items():\r\n outfile.write('%s,%s\\n' % (key, value))\r\n except:\r\n input(\"File still open! Please close and press enter to continue\")\r\n with open(outputfilename, 'w', newline='', encoding='utf-8') as outfile:\r\n for key, value in dictionary.items():\r\n outfile.write('%s,%s\\n' % (key, value))", "def dict_print(self, output_file = \"dict.csv\"):\t\n\n\t\twith codecs.open(output_file,\"w\",encoding='utf-8') as f:\n\t\t\tfor (v,k) in self.token_key.items(): f.write(\"%s,%d\\n\" % (v,k))" ]
[ "0.6439612", "0.6250485", "0.60094106", "0.5903446", "0.5860807", "0.58248734", "0.5657922", "0.5620445", "0.56183684", "0.56107336", "0.5566386", "0.55293655", "0.55053955", "0.55053955", "0.5470858", "0.5464469", "0.5427735", "0.5419561", "0.5416688", "0.5392507", "0.53901833", "0.53843886", "0.5378928", "0.5351163", "0.5328649", "0.53203714", "0.53165597", "0.5288419", "0.52697116", "0.5268765" ]
0.63496745
1