query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
sequencelengths 30
30
| negative_scores
sequencelengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Test conversion of html with opacity style to pdf Opacity style in a web pages causes Segmentation Fault only if wkhtmltopdf is not connected to a graphical service like Xorg. | def testConvertHtmlWithOpacityStyleToPdf(self):
self._testBase("data/test_with_opacity_style.html") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def testConvertHtmlWithScriptToPdf(self):\n self._testBase(\"data/test_with_script.html\")",
"def testConvertHtmlWithPngDataUrlToPdf(self):\n self._testBase(\"data/test_with_png_dataurl.html\")",
"def test_html_output(self):\n pass",
"def get_raw_pdf(html_path, pdf_path, width='', height=''):\n debug = False\n if mg.EXPORT_IMAGES_DIAGNOSTIC: debug = True\n try:\n url = html_path.as_uri()\n cmd_make_pdf = 'cmd_make_pdf not successfully generated yet'\n \"\"\"\n Unless Linux, MUST be in report directory otherwise won't carry across\n internal links.\n\n Re: http://www.microsoft.com/resources/documentation/windows/xp/all/proddocs/en-us/ntcmds_shelloverview.mspx?mfr=true\n \"\"\"\n ## clear decks first so we can tell if image made or not\n try:\n os.remove(pdf_path)\n except Exception:\n pass\n rel_url = os.path.split(url)[1]\n cd_path = os.path.split(html_path)[0]\n if mg.PLATFORM == mg.WINDOWS: ## using Pyinstaller\n cmd_make_pdf = (\n f'cd \"{cd_path}\" && '\n f'\"{export_output.EXE_TMP}\\\\wkhtmltopdf.exe\" '\n f'{width} {height} \"{rel_url}\" \"{pdf_path}\"')\n elif mg.PLATFORM == mg.MAC:\n cmd_make_pdf = (\n f'cd \"{cd_path}\" && '\n f'\"{mg.MAC_FRAMEWORK_PATH}/wkhtmltopdf\" '\n f'{width} {height} \"{rel_url}\" \"{pdf_path}\"')\n elif mg.PLATFORM == mg.LINUX:\n cmd_make_pdf = f'wkhtmltopdf {width} {height} \"{url}\" \"{pdf_path}\"'\n else:\n raise Exception('Encountered an unexpected platform!')\n ## wkhtmltopdf uses stdout to actually output the PDF - a good feature but stuffs up reading stdout for message\n if debug: print(f'cmd_make_pdf: {cmd_make_pdf}')\n export_output.shellit(cmd_make_pdf)\n if not os.path.exists(pdf_path):\n raise Exception(\n f\"wkhtmltopdf didn't generate error but {pdf_path} not made \"\n f'nonetheless. cmd_make_pdf: {cmd_make_pdf}')\n if debug: print(f'Initial processing of {html_path} complete')\n except Exception as e:\n raise Exception(\n f'get_raw_pdf command failed: {cmd_make_pdf}. Orig error: {b.ue(e)}')\n return pdf_path",
"def testHTML(self):\n\n html = self.E.html()",
"def pdf2split_html(pdf, saveto, left=0, right=0, top=0, bottom=0, res=100):\n print(\"- Opening pdf file: \", pdf)\n with(wand.image.Image(filename=pdf, resolution=res)) as document:\n print(\"- getting pages\")\n pages=document.sequence\n n_pages=len(pages)\n width, height, _, _ = pages[0].page\n mid = width//2\n html = []\n\n print(\"- creating output dir\")\n if not os.path.exists(saveto):\n os.makedirs(saveto)\n\n print(\"- splitting pages\")\n for i, page in enumerate(pages):\n left_side = page[left:mid, top:height-bottom]\n right_side = page[mid:width-right, top:height-bottom]\n left_side.save(filename=os.path.join(saveto, \"{:03d}_a.jpg\".format(i)))\n right_side.save(filename=os.path.join(saveto, \"{:03d}_b.jpg\".format(i)))\n\n # Append these two images to the html page\n html.append(\"<img src='{0:03d}_a.jpg'/><br><img src='{0:03d}_b.jpg'/><br>\".format(i))\n\n print(\"- creating html page\")\n with open(os.path.join(saveto, \"index.html\"), mode = \"w\") as textFile:\n html = \"\\n\".join(html)\n textFile.write(html)\n print(\"- DONE!\")",
"def test_render_pdf(self):\n charname = \"ChupStudent\"\n bad_char_name = \"Rumplestiltskin\" # pylint: disable=unused-variable\n config.OUTDIR = \".\"\n retval = -1\n expected_out_name = GAMEBASE + \"/Charsheets/\" + charname + \"_Charsheet.pdf\"\n retval = name_pdfs.render_pdf(charname, \"COS_Student_Adopted.tex\")\n self.assertEqual(retval, 0)\n self.assertTrue(\n Path.exists(Path(expected_out_name)),\n f\"Failed for expected output name {expected_out_name}\",\n )\n retval = -1\n for badname in self.bad_filenames:\n with self.subTest(badname=badname):\n try:\n retval = name_pdfs.render_pdf(charname, badname, draft=True)\n except TypeError:\n retval = 1\n except FileNotFoundError:\n retval = 1\n except subprocess.CalledProcessError:\n retval = 1\n else:\n self.fail()\n finally:\n self.assertNotEqual(retval, 0)",
"def preview():\r\n html = create_html_report()\r\n return html",
"def test_repr_html_(curve):\n html = curve._repr_html_()\n assert html[77] == '<'",
"def test_get_html(self):\r\n _html = self.peer_grading.get_html()",
"def benchmark(extract_size=800):\n random_file = random_html_file()\n with open(join(DATA_PATH, random_file), 'r') as f:\n html_string = f.read()\n\n # GOOSE\n try:\n g = Goose({'browser_user_agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:61.0) Gecko/20100101 Firefox/61.0',\n 'enable_image_fetching': False})\n goose_article = g.extract(raw_html=html_string)\n goose_result = goose_article.cleaned_text\n except:\n goose_result = ' Goose error.'\n\n # EATIHT\n try:\n eatiht_result = eatiht.extract(html_string)\n except:\n eatiht_result = ' Eatiht error.'\n\n # DRAGNET\n\n try:\n dragnet_result = dragnet.extract_content(html_string)\n except Exception as e:\n dragnet_result = ' Dragnet error: ' + str(e)\n\n # LIBEXTRACT\n\n try:\n textnodes = list(libextract.api.extract(html_string))\n libextract_result = textnodes[0].text_content()\n except:\n libextract_result = ' Libextract error.'\n\n # BOILERPIPE (CanolaExtractor)\n\n try:\n extractor = Extractor(\n extractor='CanolaExtractor', html=html_string)\n boilerpipe_result = extractor.getText()\n except:\n boilerpipe_result = ' Boilerpipe error.'\n\n # NEWSPAPER\n\n try:\n article = Article('url')\n article.download(input_html=html_string)\n article.parse()\n print('Auteurs:', article.authors)\n print('Date de publication:', article.publish_date)\n newspaper_result = article.text\n except:\n newspaper_result = ' Newspaper error.'\n\n # JUSTEXT\n\n try:\n paragraphs = justext.justext(\n html_string, justext.get_stoplist(\"French\"))\n print('PARAGRAPHS')\n for p in paragraphs:\n if not p.is_boilerplate:\n print(p.text)\n justext_result = '\\n'.join(\n paragraph.text for paragraph in paragraphs if not paragraph.is_boilerplate)\n print('JUSTEXT_RESULT', justext_result)\n\n except Exception as e:\n justext_result = ' Justext error: ' + str(e)\n print(justext_result)\n\n # Results\n\n try:\n # finds the url associated with the file in a \"filename-url\" csv\n with open('./data/urls.csv', 'r') as csvfile:\n\n urls = dict((line['id'], line['url'])\n for line in csv.DictReader(csvfile))\n url = urls[random_file[:-5]]\n\n print('\\n\\n >>> URL n.' + random_file[:-5] + ' : ' + url)\n except:\n print('\\n\\n (URL of the html file not found. To print the associated URL, please provide a urls.csv file featuring filename & url in /data)')\n # webbrowser.open(url, autoraise=False)\n path = abspath('temp.html')\n local_url = 'file://' + path\n with open(path, 'w') as f:\n f.write(html_string)\n webbrowser.open(local_url)\n\n # print('\\n\\n /// GOOSE /// \\n')\n # print(goose_result[:extract_size])\n # print('\\n\\n /// EATIHT /// \\n')\n # print(eatiht_result[:extract_size])\n print('\\n ------ [[DRAGNET]] ------',\n len(dragnet_result), 'caractères\\n')\n print(dragnet_result[:extract_size] +\n '\\n...\\n' + dragnet_result[-extract_size:])\n print('\\n ------ [[NEWSPAPER]] ------',\n len(newspaper_result), 'caractères\\n')\n print(newspaper_result[:extract_size] +\n '\\n...\\n' + newspaper_result[-extract_size:])\n print('\\n ------ [[JUSTEXT]] ------',\n len(justext_result), 'caractères\\n')\n print(justext_result[:extract_size] +\n '\\n...\\n' + justext_result[-extract_size:])\n # print('\\n\\n /// LIBEXTRACT /// \\n')\n # print(libextract_result[:extract_size])\n # print('\\n\\n /// BOILERPIPE (CanolaExtractor) /// \\n\\n')\n # print(boilerpipe_result[:extract_size])\n # print('\\n\\n')\n return(url)",
"def test_weblogo(self):\n self.m.weblogo(os.devnull)",
"def test_weblogo(self):\n self.m.weblogo(os.devnull)",
"def test_weblogo(self):\n self.m.weblogo(os.devnull)",
"def test_page(self):\n self.write(self.ASCII_DC2, 'T')\n self.timeout_set(\n self._dot_print_time * 24 * 26 + # 26 lines w/text (ea. 24 dots high)\n self._dot_feed_time *\n (6 * 26 + 30)) # 26 text lines (feed 6 dots) + blank line",
"def create_pdf(html, options):\n\n # TODO: we will change this path, or use an other library for converting PDF!\n # TODO: otherwise just say that wkhtmltopdf needs to be pre-installed (and how) and added to windows path\n path_wkthmltopdf = \"C:/Program Files/wkhtmltopdf/bin/wkhtmltopdf.exe\"\n config = pdfkit.configuration(wkhtmltopdf=path_wkthmltopdf)\n\n pdfkit.from_file(html, html.replace(\".html\", \".pdf\"), configuration=config, options=options)\n return html + \".pdf\"",
"def handle_failed_plot(htmlfile, header, qatype):\n import sys\n import traceback\n lines = traceback.format_exception(*sys.exc_info())\n msg = f'ERROR generating {htmlfile}\\n' + ''.join(lines)\n print(msg)\n print('Proceeding with making other plots')\n pc = write_placeholder_html(\n htmlfile, header, \"PER_CAMFIBER\", message=msg)\n return pc",
"def _huge_math_page_texhtml( env_dict ):\n wiki_xml_math_output = env_dict[\"wiki\"][\"big_xml\"]\n #wiki_xml_math_output = env_dict[\"wiki\"][\"xml_math_output_big\"]\n wiki_xml_math_output = env_dict[\"wiki\"][\"xml_math_output_test\"]\n\n # load wiki dump\n #\n wiki_page_dumper = dump.pager(wiki_xml_math_output,\n env_dict[\"pager\"][\"delimiter\"],\n env_dict[\"pager\"][\"buffer\"])\n\n from HTMLParser import HTMLParser\n\n ht = HTMLParser()\n\n titles = []\n title_pattern = re.compile(env_dict[\"pager\"][\"re_title\"], re.DOTALL)\n uniq = set()\n\n # def do_texhtml( page ):\n # total = 0\n # for r in re.compile(u\"<.*?texhtml.*?>(.*?)</.*?>\").finditer(page):\n # found = True\n # total += 1\n # #html = r.group()\n # #msg = u\"%s\\n\\t%s\\n\\t%s\" % (ht.unescape(html), html, r.group(1))\n # norm = converters.latex.normalise(r.group(1).strip())\n # if not norm in uniq:\n # uniq.add(norm)\n # msg = ht.unescape(r.group(1)).replace(u\" \", \" \"). \\\n # replace(u\"<sub>\", u\"_\"). \\\n # replace(u\"<sup>\", u\"^\"). \\\n # replace(u\"<var >\", u\" \")\n # logger.info(msg)\n # return total\n\n def do_title( page ):\n try:\n title = title_pattern.search(page).group(1)\n titles.append(title)\n except:\n logger.warning(u\"Could not parse title [%s]\", page[:500])\n\n # try to load pickled mathml (ok/fail)\n # <span class="texhtml">?</span>\n total = 0\n total_pages = 0\n pages_done = 0\n for pages_done, page in enumerate(wiki_page_dumper.pages(templates.htmltemplate)):\n\n if pages_done % 100000 == 0:\n logger.info(u\"Total formulas: %s, On pages: %s, Unique: %s, Done [%s]\" %\n ( total, total_pages, len(uniq), pages_done ))\n do_title(page)\n # found = do_texhtml( page )\n # if found > 0:\n # total_pages += 1\n\n if len(titles) > 0:\n with codecs.open(\"all.titles\", mode=\"w+\", encoding=\"utf-8\") as fout:\n for title in titles:\n fout.write(title + \"\\n\")\n print \"Pages done: %s, Total formulas: %s, On pages: %s, Unique: %s\" % \\\n ( pages_done, total, total_pages, len(uniq) )",
"def html2pdf(html_filename, output_filename=None, **options):\n\n if not output_filename:\n output_filename = newTempfile(suffix='.pdf')\n\n if not pdfreactor_available:\n raise RuntimeError(\"The external 'pdfreactor' converter isn't available\")\n\n cmd = '%s \"pdfreactor\" \"%s\" \"%s\"' % \\\n (execution_shell, html_filename, output_filename)\n \n status, output = runcmd(cmd)\n if status != 0:\n raise ConversionError('Error executing: %s' % cmd, output)\n return dict(output_filename=output_filename,\n status=status,\n output=output)",
"def test_html_export_roundtrip(self):\r\n module_store = modulestore('direct')\r\n content_store = contentstore()\r\n\r\n import_from_xml(module_store, 'common/test/data/', ['toy'])\r\n\r\n course_id = SlashSeparatedCourseKey('edX', 'toy', '2012_Fall')\r\n\r\n # Export the course\r\n root_dir = path(mkdtemp_clean())\r\n export_to_xml(module_store, content_store, course_id, root_dir, 'test_roundtrip')\r\n\r\n # Reimport and get the video back\r\n import_from_xml(module_store, root_dir)\r\n\r\n # get the sample HTML with styling information\r\n html_module = module_store.get_item(course_id.make_usage_key('html', 'with_styling'))\r\n self.assertIn('<p style=\"font:italic bold 72px/30px Georgia, serif; color: red; \">', html_module.data)\r\n\r\n # get the sample HTML with just a simple <img> tag information\r\n html_module = module_store.get_item(course_id.make_usage_key('html', 'just_img'))\r\n self.assertIn('<img src=\"/static/foo_bar.jpg\" />', html_module.data)",
"def to_pdf(self, wkhtmltopdf: str, f, output_file: Optional[str] = None):\n if output_file is None:\n output_file = \"-\"\n html = self(f)\n with tempfile.NamedTemporaryFile(\"wb\", suffix=\".html\") as fd:\n html.write(fd)\n fd.flush()\n res = subprocess.run([wkhtmltopdf, fd.name, output_file], stdin=subprocess.DEVNULL, capture_output=True)\n if res.returncode != 0:\n raise RuntimeError(\"%s exited with error %d: stderr: %s\", self.wkhtmltopdf, res.returncode, res.stderr)\n if output_file == \"-\":\n return res.stdout",
"def test_write_rgba(tmp_path):\n\n temp_file = str(tmp_path / \"temp.webp\")\n\n pil_image = Image.new(\"RGBA\", (10, 10), (255, 0, 0, 20))\n pil_image.save(temp_file)\n\n if _webp.WebPDecoderBuggyAlpha():\n return\n\n with Image.open(temp_file) as image:\n image.load()\n\n assert image.mode == \"RGBA\"\n assert image.size == (10, 10)\n assert image.format == \"WEBP\"\n image.load()\n image.getdata()\n\n # Early versions of WebP are known to produce higher deviations:\n # deal with it\n if _webp.WebPDecoderVersion() <= 0x201:\n assert_image_similar(image, pil_image, 3.0)\n else:\n assert_image_similar(image, pil_image, 1.0)",
"def test_no_css_option(self):\n f1 = self.write_file(\"foobar\")\n f2 = self.write_file(\"foobarbaz\")\n out = io.BytesIO()\n ghdiff.main([f1, f2, \"--no-css\"], stdout=out)\n output = out.getvalue()\n self.assertFalse(b\"<style\" in output)",
"def exportTable(self):\n\t\tself.pdf = \tself.dir + \"/application.pdf\"\n\t\tpdf = pisa.CreatePDF(\n\t\t\tfile(self.html, \"r\" ),\n\t\t\tfile(self.pdf, \"wb\")\n\t\t\t)",
"def test_invalid_rule(self):\n html = '<div class=\"pink\">test</div>'\n css = '.pink { opacity: 0.8; }'\n expected = '<div class=\"pink\">test</div>'\n result = inline_css(html, css, pretty_print=False)\n self.assertEqual(expected, result)",
"def convert_pdf_to_web(input_file, quality=90, resolution=150):\n input_file = Path(input_file)\n input_file.resolve(strict=True) # Raises FileNotFound\n output_file, no_change = get_output_file(input_file, 'WEB')\n if no_change:\n return output_file\n\n rgb_profile = Path(__file__).parent / 'sRGB.icc'\n if not rgb_profile.exists():\n msg = f'Color profile \"{rgb_profile.name}\" is missing'\n raise RuntimeError(msg)\n args = [\n GHOSTSCRIPT,\n '-q',\n '-dColorConversionStrategy=/DeviceRGB',\n '-dColorConversionStrategyForImages=/DeviceRGB',\n '-dBATCH',\n '-dNOPAUSE',\n '-sDEVICE=pdfwrite',\n '-dConvertCMYKImagesToRGB=true',\n '-dDownsampleColorImages=true',\n '-dDownsampleGrayImages=true',\n '-dDownsampleMonoImages=true',\n f'-sDefaultRGBProfile={rgb_profile}',\n f'-dJPEGQ={quality}',\n f'-dColorImageResolution={resolution}',\n f'-dGrayImageResolution={resolution}',\n f'-dMonoImageResolution={resolution}',\n '-o',\n output_file,\n input_file,\n ]\n subprocess.run(map(str, args))\n logger.debug(\n f'{input_file} ({input_file.stat().st_size}) -> '\n f'{output_file} ({output_file.stat().st_size})'\n )\n return output_file",
"def render(self, output_name, wide=False):\n\n html = self.html(wide)\n print 'You can ignore the GLib-Gobject errors (if they occur)'\n if wide:\n HTML(string=html).write_pdf(\n output_name, stylesheets=[CSS(string=esCSS.replace(\"8.8cm\", \"18cm\"))])\n else:\n HTML(string=html).write_pdf(\n output_name, stylesheets=[CSS(string=esCSS)])",
"def test_failing_rendering(self):\n with self.assertRaisesMessage(\n LatexConversionException, \"Couldn't compile LaTeX document\"\n ):\n render_latex_to_image(r\"invalid $ LaTeX\")",
"def convert_html():\n return",
"def test_alpha_trace():\n # Ensure the functions are not run eagerly\n run_eager(False)\n setname = LIST_PDF[0]\n # Do it for one single replica\n pdfset = f\"{setname}/0\"\n pex = pdf.mkPDF(pdfset, f\"{DIRNAME}/\")\n pex.alphas_trace()\n # Do it for many replicas\n pex2 = pdf.mkPDFs(setname, [0, 1, 2])\n pex2.alphas_trace()"
] | [
"0.5745884",
"0.57315564",
"0.5591224",
"0.5549338",
"0.5359335",
"0.5307319",
"0.5298692",
"0.52929",
"0.52359253",
"0.52262336",
"0.5177624",
"0.517652",
"0.517652",
"0.517652",
"0.51568556",
"0.5088497",
"0.50467056",
"0.5041735",
"0.502852",
"0.5027255",
"0.4982086",
"0.4949871",
"0.49393764",
"0.49231285",
"0.49191383",
"0.4916028",
"0.4875366",
"0.4864283",
"0.485091",
"0.48402455"
] | 0.8207992 | 0 |
Test conversion of html with an additional table of content | def testConvertHtmlWithTableOfContent(self):
self._testBase(
"data/test_with_toc.html",
toc=True,
xsl_style_sheet_data=b64encode(open("data/test_toc.xsl").read()),
)
# XXX how to check for table of content presence ? | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_html_output(self):\n pass",
"def test_table_html():\n in_json = {\n \"pandoc-api-version\": [1, 17, 5, 1],\n \"meta\": {\n \"$$references\": {\n \"t\": \"MetaMap\",\n \"c\": {\n \"tbl:id\": {\n \"t\": \"MetaMap\",\n \"c\": {\n \"type\": {\"t\": \"MetaString\", \"c\": \"Table\"},\n \"number\": {\"t\": \"MetaString\", \"c\": \"1\"},\n },\n }\n },\n }\n },\n \"blocks\": [\n {\n \"t\": \"Para\",\n \"c\": [\n {\"t\": \"Str\", \"c\": \"Some\"},\n {\"t\": \"Space\"},\n {\"t\": \"Str\", \"c\": \"text\"},\n ],\n },\n {\n \"t\": \"Div\",\n \"c\": [\n [\"tbl:id\", [\"labelled-Table\"], []],\n [\n {\n \"t\": \"Table\",\n \"c\": [\n [{\"t\": \"Str\", \"c\": \"Caption.\"}, {\"t\": \"Space\"}],\n [{\"t\": \"AlignDefault\"}, {\"t\": \"AlignDefault\"}],\n [0, 0],\n [\n [{\"t\": \"Plain\", \"c\": [{\"t\": \"Str\", \"c\": \"a\"}]}],\n [{\"t\": \"Plain\", \"c\": [{\"t\": \"Str\", \"c\": \"b\"}]}],\n ],\n [\n [\n [{\"t\": \"Plain\", \"c\": [{\"t\": \"Str\", \"c\": \"1\"}]}],\n [{\"t\": \"Plain\", \"c\": [{\"t\": \"Str\", \"c\": \"2\"}]}],\n ],\n [\n [{\"t\": \"Plain\", \"c\": [{\"t\": \"Str\", \"c\": \"4\"}]}],\n [{\"t\": \"Plain\", \"c\": [{\"t\": \"Str\", \"c\": \"5\"}]}],\n ],\n ],\n ],\n }\n ],\n ],\n },\n ],\n }\n out_string = apply_filter(\n in_json, format_label_elements.main, \"html\", in_format=\"json\"\n )\n\n assert out_string.strip() == \"\\n\".join(\n [\n \"<p>Some text</p>\",\n '<a id=\"tbl:id\" class=\"anchor-link\" name=\"#tbl:id\">',\n \"<table>\",\n \"<caption>Caption. </caption>\",\n \"<thead>\",\n '<tr class=\"header\">',\n \"<th>a</th>\",\n \"<th>b</th>\",\n \"</tr>\",\n \"</thead>\",\n \"<tbody>\",\n '<tr class=\"odd\">',\n \"<td>1</td>\",\n \"<td>2</td>\",\n \"</tr>\",\n '<tr class=\"even\">',\n \"<td>4</td>\",\n \"<td>5</td>\",\n \"</tr>\",\n \"</tbody>\",\n \"</table>\",\n \"</a>\",\n ]\n )",
"def testHTML(self):\n\n html = self.E.html()",
"def convert_html():\n return",
"def testBeautifulSoup(self):\n\n textractor = Textractor(tika=False)\n text = textractor(Utils.PATH + \"/tabular.csv\")\n self.assertEqual(len(text), 125)",
"def test_format_html2text(self):\n html = (\n \"<div>Lorem Ipsum</div>\"\n \"Lorem ipsum dolor sit amet, consectetur adipiscing elit.\"\n \"Morbi eleifend magna sit amet sem gravida sollicitudin.\"\n \"<br/>Vestibulum metus ipsum, varius in ultricies eget, vulputate eu felis.\"\n )\n text = format_html2text(self.report, html)\n self.assertEqual(\n text,\n (\n \"Lorem Ipsum\"\n \"\\n\\n\"\n \"Lorem ipsum dolor sit amet, consectetur adipiscing elit.\"\n \"Morbi eleifend magna sit amet sem gravida sollicitudin. \\n\"\n \"Vestibulum metus ipsum, varius in ultricies eget, vulputate eu felis.\\n\"\n ),\n )",
"def test_as_table(self, output=None, form=None):\n setup = {'start_tag': '<tr><th>', 'label_end': '</th><td>', 'input_end': '<br>', 'end_tag': '</td></tr>'}\n setup['as_type'] = as_type = 'as_table'\n setup['form'] = form or self.form\n output = output or setup['form'].as_table().strip()\n expected = self.get_expected_format(setup)\n errors = []\n if output != expected:\n errors = self.log_html_diff(expected, output, as_type=as_type, full=False)\n message = \"Suite {}, had {} lines of HTML errors for {} \".format(self.__class__.__name__, len(errors), as_type)\n self.assertNotEqual('', output)\n self.assertEqual(expected, output, message)",
"def test_html(self):\n \n tags = (('<form',1),\n ('<input',6),\n ('type=\"text\"',3),\n ('type=\"email\"',1),\n ('type=\"submit\"',1))\n \n for text, count in tags:\n with self.subTest():\n self.assertContains(self.resp, text, count)",
"def test_gen_diff_html(mock_diff):\n from_title = \"from_title_content\"\n from_lines = \"left content here\"\n to_title = \"to_title_content\"\n to_lines = \"different content on the right here\"\n mock_diff.return_value.make_table.return_value = \"<t>{} {}</t>\".format(\n from_lines, to_lines\n )\n\n html = cmds._gen_diff_html(from_title, [from_lines], to_title, [to_lines])\n\n assert html.count(from_title) == 2\n assert html.count(from_lines) == 1\n assert html.count(to_title) == 2\n assert html.count(to_lines) == 1",
"def test_html(self):\n tags = (('<input', 3),\n ('<span', 1),\n ('<button', 1))\n\n for text, count in tags:\n with self.subTest():\n self.assertContains(self.resp, text, count)",
"def parse_tables_from_html(html, md_file):\n soup = BeautifulSoup(html, features=\"lxml\")\n table_contents = \"\"\n for table in soup.select('table'):\n try:\n table_content = process_table(table)\n table_contents += table_content\n except:\n continue\n\n if not table_contents:\n print(\"NO VALID TABLE\")\n return\n\n # write to the file\n with codecs.open(md_file, mode='w', encoding='utf-8') as file:\n file.write(table_contents)\n print(\"The Table is saved in\" + md_file)",
"def test_content():\n # PREPARE\n expected_f = open(\n 'tests/pages/expected/stepanenkoartem-github-io.html',\n 'rb',\n )\n expected_dom = BeautifulSoup(\n expected_f.read(),\n 'html.parser',\n )\n\n actual_f = open(\n os.path.join(TEMP_DIR, path.for_page(URL)),\n )\n actual_dom = BeautifulSoup(actual_f, 'html.parser')\n\n # CHECK\n assert actual_dom.decode() == expected_dom.decode()",
"def test_HTMLChunker(self):\n text = \"\"\"hello<html><head><title>my title</title></head><body>this is a\n <b>simple</b> HTML document for <p> test<i>ing</i> purposes</p>.\n It < contains > various <-- special characters.\n \"\"\"\n tkns = get_tokenizer(\"en_US\", chunkers=(HTMLChunker,))(text)\n out = [t for t in tkns]\n exp = [(\"hello\", 0), (\"my\", 24), (\"title\", 27), (\"this\", 53), (\"is\", 58),\n (\"a\", 61), (\"simple\", 82), (\"HTML\", 93), (\"document\", 98), (\"for\", 107),\n (\"test\", 115), (\"ing\", 122), (\"purposes\", 130), (\"It\", 160),\n (\"contains\", 165), (\"various\", 176), (\"special\", 188),\n (\"characters\", 196)]\n self.assertEqual(out, exp)\n for (word, pos) in out:\n self.assertEqual(text[pos:pos + len(word)], word)",
"def test_si_sample_html_partial(self):\n sample = load_sample('si-game.sample.html')\n doc = Document('http://sportsillustrated.cnn.com/baseball/mlb/gameflash/2012/04/16/40630_preview.html',\n sample)\n res = doc.get_clean_article()\n self.assertEqual('<div><div class=\"', res[0:17])",
"def test_gettesttools_html(self):\n pass",
"def _test_html_content(self, xblock, expected_section_tag, expected_breadcrumbs):\n html = self.get_page_html(xblock)\n self.assertIn(expected_section_tag, html)\n self.assertRegex(html, re.compile(expected_breadcrumbs, re.DOTALL))",
"def test_export_html(self):\r\n resp = self.client.get_html(self.url)\r\n self.assertEquals(resp.status_code, 200)\r\n self.assertContains(resp, \"Export My Course Content\")",
"def test_prep_fields_called_html_output(self):\n pass",
"def _get_markup(self):\n return make_soup(self.driver.find_element_by_id(\"contestDetailTable\").get_attribute(\"innerHTML\"))",
"def from_html(self, content):\r\n pass",
"def test_mocked_get_simpleHtml(self):\n c = Client()\n response = c.get(\"/apimock/mocked/mocked_get\")\n self.assertEqual(response.status_code, 200)\n self.assertIn(\n '<table border=\"1\"><tr><th>value</th><td>testValue</td></tr></table>', response.content)",
"def test_html_export_roundtrip(self):\r\n module_store = modulestore('direct')\r\n content_store = contentstore()\r\n\r\n import_from_xml(module_store, 'common/test/data/', ['toy'])\r\n\r\n course_id = SlashSeparatedCourseKey('edX', 'toy', '2012_Fall')\r\n\r\n # Export the course\r\n root_dir = path(mkdtemp_clean())\r\n export_to_xml(module_store, content_store, course_id, root_dir, 'test_roundtrip')\r\n\r\n # Reimport and get the video back\r\n import_from_xml(module_store, root_dir)\r\n\r\n # get the sample HTML with styling information\r\n html_module = module_store.get_item(course_id.make_usage_key('html', 'with_styling'))\r\n self.assertIn('<p style=\"font:italic bold 72px/30px Georgia, serif; color: red; \">', html_module.data)\r\n\r\n # get the sample HTML with just a simple <img> tag information\r\n html_module = module_store.get_item(course_id.make_usage_key('html', 'just_img'))\r\n self.assertIn('<img src=\"/static/foo_bar.jpg\" />', html_module.data)",
"def html_content(data, title):\r\n # add html header into text\r\n html_text = \"\"\"\r\n <!DOCTYPE html>\r\n <html>\r\n <head>\r\n <style>\r\n table {\r\n width: 25%;\r\n font-family: arial, sans-serif;\r\n border-collapse: collapse;\r\n }\r\n\r\n tr:nth-child(odd) {\r\n background-color: #dddddd;\r\n }\r\n\r\n td, th {\r\n border: 1px solid #dddddd;\r\n text-align: left;\r\n padding: 8px;\r\n }\r\n </style>\r\n </head>\r\n \"\"\"\r\n\r\n # Starting body of html\r\n html_text += \"<body>\"\r\n\r\n # adding title\r\n html_text += \"<h2> {} </h2>\".format(title)\r\n\r\n # Adding table content\r\n html_text += \"<table>\"\r\n # Each row in table\r\n for index, row in enumerate(data):\r\n #Each collumn in table\r\n html_text += \"<tr>\"\r\n if index == 0:\r\n # Table Head\r\n for name in row:\r\n html_text += \"<th>{}</th>\".format(name)\r\n else:\r\n # Table body\r\n for element in row:\r\n html_text += \"<td>{}</td>\".format(element)\r\n # Exit collumn\r\n html_text += \"</tr>\"\r\n # Exit row\r\n html_text += \"</table>\"\r\n # End of html\r\n html_text += \"\"\"</body>\r\n </html>\"\"\"\r\n # Return\r\n return html_text",
"def test_drop_html():\n cleaner = TextCleaner()\n assert cleaner.transform([[\"<table>test</table>\"]])[\"corpus\"][0] == \"test\"\n assert not cleaner.drops[\"html\"].dropna().empty",
"def to_html(content):\n headers = content[0].keys()\n rows = (r.values() for r in content)\n return html_table(headers, rows)",
"def test_clean_storytranslation_html(self):\n unclean_summary = (\"<p>This is the first paragraph</p>\"\n \"<script src=\\\"/static/js/fake.js\\\"></script>\")\n unclean_call = (\"<blockquote>Quote in call</blockquote>\"\n \"<iframe src=\\\"/fake/\\\"></iframe>\")\n story = create_story(title=\"Test Story\", summary=unclean_summary,\n call_to_action=unclean_call, status='draft')\n story.save()\n story = Story.objects.get(story_id=story.story_id)\n # Test that the invalid tags aren't in the saved field values.\n # Right now, we don't care whether the tags were escaped or removed\n self.assertNotIn(\"<script\", story.summary)\n self.assertNotIn(\"<iframe\", story.call_to_action)",
"def testConvertHtmlWithScriptToPdf(self):\n self._testBase(\"data/test_with_script.html\")",
"def test_html_structure(self):\n self.assertContains(self.response, '<form', 1)\n self.assertContains(self.response, '<input', 3)\n #3 pois são 2 filefield mais o csrf\n self.assertContains(self.response, 'type=\"file\"', 1)\n self.assertContains(self.response, 'type=\"submit\"', 1)",
"def test_make_HTML_table(self):\r\n\r\n # test pie charts\r\n fracs_labels_other, fracs_labels, all_counts, other_cat, red, other_frac = \\\r\n get_fracs(self.counts1, 5, 10, 'pie')\r\n\r\n img_data = make_HTML_table(\"Phylum\", other_frac, 10, red, other_cat,\r\n fracs_labels_other, fracs_labels,\r\n self.dir_path, all_counts, 1, self.prefs,\r\n self.color_prefs, 'black', 'white', 'pie',\r\n 'Test1',\r\n self.generate_image_type, self.plot_width,\r\n self.plot_height, self.bar_width, self.dpi, 0,\r\n 'categorical', False, False)\r\n\r\n self.assertEqual(len(img_data), 2)\r\n\r\n # test area charts\r\n fracs_labels_other, fracs_labels, all_counts, other_cat, red, other_frac = \\\r\n get_fracs(self.counts1, 5, 10, 'area')\r\n\r\n img_data = make_HTML_table(\"Phylum\", other_frac, 10, red, other_cat,\r\n fracs_labels_other, fracs_labels,\r\n self.dir_path, all_counts, 1, self.prefs,\r\n self.color_prefs, 'black', 'white', 'pie',\r\n 'Test1',\r\n self.generate_image_type, self.plot_width,\r\n self.plot_height, self.bar_width, self.dpi, 0,\r\n 'categorical', False, False)\r\n\r\n self.assertEqual(len(img_data), 2)\r\n\r\n # test bar charts\r\n fracs_labels_other, fracs_labels, all_counts, other_cat, red, other_frac = \\\r\n get_fracs(self.counts1, 5, 10, 'bar')\r\n\r\n img_data = make_HTML_table(\"Phylum\", other_frac, 10, red, other_cat,\r\n fracs_labels_other, fracs_labels,\r\n self.dir_path, all_counts, 1, self.prefs,\r\n self.color_prefs, 'black', 'white', 'pie',\r\n 'Test1',\r\n self.generate_image_type, self.plot_width,\r\n self.plot_height, self.bar_width, self.dpi, 0,\r\n 'categorical', False, False)\r\n\r\n self.assertEqual(len(img_data), 2)\r\n self._paths_to_clean_up = [\"/tmp/qiimewebfiles/charts/\" + f\r\n for f in listdir(\"/tmp/qiimewebfiles/charts\")]",
"def assert_studio_view_valid_html(block, html):\r\n pass"
] | [
"0.7307433",
"0.70207417",
"0.692731",
"0.6697066",
"0.6632389",
"0.66201836",
"0.647758",
"0.64540756",
"0.63525",
"0.6324208",
"0.62797827",
"0.62352824",
"0.6216845",
"0.61915964",
"0.61899865",
"0.6164262",
"0.61589146",
"0.61560684",
"0.61545765",
"0.6143673",
"0.6142696",
"0.6141647",
"0.61110747",
"0.60936517",
"0.60890543",
"0.60862184",
"0.60817605",
"0.6072297",
"0.604435",
"0.60430926"
] | 0.8216785 | 0 |
The worst performance score. | def worst_score(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def personal_best(scores):\n return max(scores)",
"def get_high_score(self) -> float:\n return max(self._scores)",
"def personal_best(scores: list) -> int:\n return max(scores)",
"def getHighScore(self):\n return max(self.scores)",
"def personal_best(scores):\n# return sorted(scores, reverse=True)[0]\n return max(scores)",
"def max_score(self):\r\n return self.lcp.get_max_score()",
"def max_score(self):\n return self.raw_possible",
"def max_score(self):\r\n max_score = None\r\n if self.check_if_done_and_scored():\r\n max_score = self._max_score\r\n return max_score",
"def max_score(self):\n return self.points",
"def max_score_test(self):\n max_score_tuple = self.results.max_score(molecules=[\"DDSPDLPK\"])\n assert max_score_tuple[0] == 1 # score\n assert max_score_tuple[3].scaling_factor == 100 # intensity\n\n assert self.results.max_score(molecules=[\"_DDSPDLPK_\"]) == [0, None, None, None]\n return",
"def max_score(self):\n return max(self._extract_set('score') or [0])",
"def get_max_score(self):\r\n return sum(self.maxpoints.values())",
"def __get_best_score(scores):\n best = max(scores.items(), key=operator.itemgetter(1))[0]\n print(\"The best classification for this corpus is: \" + str(best))\n return best",
"def test_get_max_score(self):\r\n self.combinedoe.update_task_states()\r\n self.combinedoe.state = \"done\"\r\n self.combinedoe.is_scored = True\r\n max_score = self.combinedoe.max_score()\r\n self.assertEqual(max_score, 1)",
"def get_worst_fitness(self):\n f = min(self.characters, key=operator.attrgetter('fitness'))\n self.worst_fitness = round(f.fitness, 3)",
"def get_best( self ):\n if len(self.listScore) < 1:\n if self.bMinimumIsBest: return 9999,\"Unknown\"\n else: return -1,\"Unknown\"\n return self.listScore[0]",
"def negamax(self):\n if self.check_winner():\n return 1\n elif self.full():\n return 0\n else:\n bestScore = -10\n for r, c in self.empty_cells():\n self.grid[r][c] = self.player\n self.next_player() \n score = -self.negamax()\n if score > bestScore:\n bestScore = score\n self.grid[r][c] = GameModel.EMPTY\n self.next_player()\n return bestScore",
"def get_highscore(self, score):\n scores = list(self.history_score.values())\n \n # Compare current score with the last placing in leaderboard.\n if score > max(scores):\n return 0\n else:\n if score < min(scores):\n return 2\n else:\n return 1",
"def get_best_score_and_time(self):\n\n best_time = 10000\n best_score = 0\n\n for game in self.games:\n if game.status == \"won\":\n if best_time > game.timing:\n best_time = game.timing\n if best_score < game.score:\n best_score = game.score\n\n if best_time == 10000:\n best_time = 0\n\n return (best_score, best_time)",
"def _get_lip_best(self) -> float:\n pass",
"def best(self):\n alpha = -1\n beta = +1\n move = self.__negamax(alpha, beta, tt=DictTT())\n return move[1]",
"def max_e_score(self, entity):\n return float(entity['es_bb'][1])",
"def get_score(self):\n return np.max(self._scores) if self._scores is not None else self._score_history[-1]",
"def scoring(self):\n return -100 if self.loss_condition() else 0",
"def pwm_max_score(self):\n if self.max_score is None:\n score = 0\n for row in self.pwm:\n score += log(max(row) / 0.25 + 0.01)\n self.max_score = score\n \n return self.max_score",
"def best_value(self):\r\n return self._best_value",
"def get_max_score(self):\r\n maxscore = 0\r\n for responder in self.responders.values():\r\n maxscore += responder.get_max_score()\r\n return maxscore",
"def get_bad_score(self):\n logger.debug('Function get_bad_score start')\n\n cur = self.conn.cursor()\n\n # Selecting worst score\n cur.execute(\n \"SELECT id, img_name, img_score FROM sensor_data ORDER BY img_score ASC LIMIT 1\")\n\n # Getting worst score\n row = cur.fetchone()\n\n logger.debug('Function get_bad_score end')\n return row",
"def best_score(scores):\n idx, score = sorted(\n enumerate(scores), key=lambda e: e[1], reverse=scores[0].higher_better\n )[0]\n return (idx + 1, score)",
"def worst(self) -> float:\n return float(self.tsdf.pct_change().min())"
] | [
"0.74220115",
"0.7369095",
"0.73529595",
"0.73461914",
"0.73064435",
"0.7243378",
"0.7224825",
"0.7153096",
"0.71314156",
"0.70326936",
"0.7029879",
"0.69835705",
"0.69788337",
"0.683283",
"0.68274",
"0.68151915",
"0.681463",
"0.6754643",
"0.6749474",
"0.67342293",
"0.6727097",
"0.6707055",
"0.6704922",
"0.6700227",
"0.66372585",
"0.6626587",
"0.66239095",
"0.6607669",
"0.6595707",
"0.65788627"
] | 0.8989782 | 0 |
Function to return whether current performance score is better than current best. This should be implemented. | def is_better(self, curr, best, **kwargs):
score_threshold = kwargs.pop('score_threshold', 1e-3)
relative_eps = 1.0 + score_threshold
return curr >= best*relative_eps | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_best(self, metric: float) -> bool:",
"def is_greater_better(scoring_function):\n if scoring_function in [\n 'accuracy', 'adjusted_rand_score', 'average_precision',\n 'balanced_accuracy','f1', 'f1_macro', 'f1_micro', 'f1_samples',\n 'f1_weighted', 'precision', 'precision_macro', 'precision_micro',\n 'precision_samples','precision_weighted', 'recall',\n 'recall_macro', 'recall_micro','recall_samples',\n 'recall_weighted', 'roc_auc'] + \\\n ['r2', 'neg_median_absolute_error', 'neg_mean_absolute_error',\n 'neg_mean_squared_error']:\n return True\n elif scoring_function in ['median_absolute_error',\n 'mean_absolute_error',\n 'mean_squared_error']:\n return False\n else:\n warnings.warn('The scoring_function: \"{}\" not found; continuing assuming'\n ' greater score is better'.format(scoring_function))\n return True",
"def is_better(self, a: float, best: float) -> bool:\n if self.mode == \"min\" and self.threshold_mode == \"rel\":\n rel_epsilon = 1.0 - self.threshold\n comp = best * rel_epsilon if best >= 0 else best * (1 + self.threshold)\n return a < comp\n\n elif self.mode == \"min\" and self.threshold_mode == \"abs\":\n return a < best - self.threshold\n\n elif self.mode == \"max\" and self.threshold_mode == \"rel\":\n rel_epsilon = self.threshold + 1.0\n return a > best * rel_epsilon\n\n else: # mode == 'max' and epsilon_mode == 'abs':\n return a > best + self.threshold",
"def is_best(self, val) -> bool:\n if self.val is None or (val > self.val):\n self.val = val\n print(\"Updating Best\")\n return True\n else:\n return False",
"def isHighscore(self, score):\r\n score = float(score)\r\n if len(self.scores) < 10:\r\n return True\r\n\r\n lowest = float('inf')\r\n for s in self.scores:\r\n if s.score < lowest:\r\n lowest = s.score\r\n if score > lowest:\r\n return True\r\n else:\r\n return False",
"def best(self):\n\n last = self.timer.times[-1]\n return last != 'DNF' and all(last <= t for t in self.timer.times if t != 'DNF')",
"def is_better_than(self, other):\n return better_candidate(self, other) is self",
"def important_features_(self):\n return self.scores_ > self.score_cutoff_",
"def hasScore(self) -> bool:\n return bool(self.getMatchScore())",
"def _is_fitter(actual_fitness: float, best_fitness: float) -> bool:\n return actual_fitness < best_fitness # important: the general problem is considered\n # to be a minimization problem, thus a lower fitness is better",
"def get_validation_performance(self) -> float:\n return self.best_performance",
"def is_improvement(\n best_value: float,\n current_value: float,\n larger_is_better: bool,\n relative_delta: float = 0.0,\n) -> bool:\n if larger_is_better:\n return current_value > (1.0 + relative_delta) * best_value\n\n # now: smaller is better\n return current_value < (1.0 - relative_delta) * best_value",
"def likely_to_be_offered(self):\n if self.score >= 5:\n return True\n return False",
"def it_got_better(new_metric_val, old_metric_val, metric):\n to_maximize = is_metric_to_maximize(metric)\n if to_maximize:\n got_better = new_metric_val > old_metric_val\n else:\n got_better = new_metric_val < old_metric_val\n return got_better",
"def needsScores(self):\n return self.opt.needsScores()",
"def apply_test_if_better(self):\n\n if self.__genes_test is None or self.__fitness_test is None:\n raise ValueError(\"Test values should not be None.\")\n\n # if test is better\n if self.__fitness_test < self.__fitness:\n self.genes = self.__genes_test\n self.__fitness = self.__fitness_test\n\n self.__genes_test = None\n self.__fitness_test = None\n\n return True\n\n # if original is better\n else:\n self.__genes_test = None\n self.__fitness_test = None\n\n return False",
"def worst_score(self):\r\n pass",
"def _loss_smaller(best_eval_result, current_eval_result):\n default_key = metric_keys.MetricKeys.LOSS\n if not best_eval_result or default_key not in best_eval_result:\n raise ValueError(\n 'best_eval_result cannot be empty or no loss is found in it.')\n\n if not current_eval_result or default_key not in current_eval_result:\n raise ValueError(\n 'current_eval_result cannot be empty or no loss is found in it.')\n\n return best_eval_result[default_key] > current_eval_result[default_key]",
"def __decide(\n self,\n test_loss: float,\n overtrain_epochs: int) -> bool:\n\n if self.min_test_loss is None:\n self.min_test_loss = test_loss\n self.best_model_state_dict = self.model.state_dict()\n return False\n\n overtrained = test_loss > self.min_test_loss\n\n if overtrained:\n self.overtrained_losses.append(test_loss)\n else:\n self.min_test_loss = test_loss\n self.best_model_state_dict = self.model.state_dict()\n self.overtrained_losses = [] # reset overtrained losses to empty\n\n if len(self.overtrained_losses) == overtrain_epochs:\n return True\n else:\n return False",
"def check_performance(self):\n self.lg.debug('Checking performance.')\n avg_up = (sum(self.results_up)) / len(self.results_up)\n avg_down = (sum(self.results_down)) / len(self.results_down)\n if (\n avg_up < self.tolerance * self.up or\n avg_down < self.tolerance * self.down\n ):\n self.bad_performance = True\n else:\n self.bad_performance = False",
"def game_is_tied(self):\n tie_score = False\n if self.my_score == self.opponent_score:\n tie_score = True\n my_moves = self.steps_available(self.loc)\n opponent_moves = self.steps_available(self.opponent_loc)\n if my_moves == 0 and opponent_moves == 0 and tie_score:\n return True\n else:\n penalty = self.penalty_score\n if my_moves == 0 and opponent_moves != 0:\n return (self.my_score - penalty) == self.opponent_score\n elif my_moves != 0 and opponent_moves == 0:\n return self.my_score == (self.opponent_score - penalty)\n else:\n return False",
"def get_best_thres(self, data, label, score_func = f1_score):\n pred_prob = self.model.predict(data)\n best_score = 0\n for i_thres in range(0, 100):\n pred_label = [int(i > (i_thres / 100.0)) for i in pred_prob]\n fs = score_func(label, pred_label)\n if best_score < fs:\n best_score = fs\n self.config.thres = i_thres / 100.0\n print ('best score: %0.2f best_thres: %0.2f' % (best_score, self.config.thres))",
"def is_close_eval(pss_score, actual_rating)-> bool:\n return math.isclose(pss_score, actual_rating, abs_tol=0.05)",
"def is_correct(self):\r\n score_dict = self.get_score()\r\n return score_dict['score'] == score_dict['total']",
"def compare_performance(self):\n\n if self.label_type == \"categorical\":\n self._eval_classifier()\n\n elif self.label_type == \"numerical\":\n self._eval_regressor()\n\n return self.performance_comparison",
"def __gt__(self, other):\n return self.eval_score < other.eval_score",
"def test_if_improving(self, metrics: torch.Tensor) -> bool:\n # convert `metrics` to float, in case it's a zero-dim Tensor\n current = float(metrics)\n\n epoch = self.last_epoch + 1\n self.last_epoch = epoch\n\n if self.is_better(current, self.best):\n self.best = current\n self.num_bad_epochs = 0\n else:\n self.num_bad_epochs += 1\n\n if self.in_cooldown:\n self.cooldown_counter -= 1\n self.num_bad_epochs = 0 # ignore any bad epochs in cooldown\n\n if self.num_bad_epochs > self.patience:\n self.cooldown_counter = self.cooldown\n self.num_bad_epochs = 0\n return True\n return False",
"def check_high_score(self):\r\n if self.stats.score > self.stats.high_score:\r\n self.stats.high_score = self.stats.score\r\n self.prep_placar_score()",
"def personal_best(scores):\n return max(scores)",
"def __get_best_score(scores):\n best = max(scores.items(), key=operator.itemgetter(1))[0]\n print(\"The best classification for this corpus is: \" + str(best))\n return best"
] | [
"0.747881",
"0.69427603",
"0.68955714",
"0.6772527",
"0.67018735",
"0.6515344",
"0.64800745",
"0.6424358",
"0.637624",
"0.6329296",
"0.6233212",
"0.6227178",
"0.62026936",
"0.61884624",
"0.6162716",
"0.6149905",
"0.6149203",
"0.60921496",
"0.6070137",
"0.6038295",
"0.6034611",
"0.59961045",
"0.59882957",
"0.5954604",
"0.59176546",
"0.5917344",
"0.5908308",
"0.5907755",
"0.58996403",
"0.5897191"
] | 0.7845648 | 0 |
prettyfy the name for pysc2 map lookup | def standardizeMapName(mapName):
newName = os.path.basename(mapName)
newName = newName.split(".")[0]
newName = newName.split("(")[0]
newName = re.sub("[LTE]+$", "", newName)
return re.sub(' ', '', newName, flags=re.UNICODE) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __str__(self):\n tmp = {str(c)+\" (id=\"+str(id(c))+\")\":v for c,v in self.items()}\n return \"ComponentMap(\"+str(tmp)+\")\"",
"def __str__(self) -> str:\n mapping_str = ', '.join(\n f'{variable!s} -> {term!s}'\n for (variable, term) in self.mapping.items()\n )\n return f'{{{mapping_str}}}'",
"def GetPrettyName(self):\r\n return \"Softimage 2011\"",
"def _get_pretty_name(name):\n pretty = ''\n if name.countryName:\n pretty += '/C=' + name.countryName\n if name.stateOrProvinceName:\n pretty += '/ST=' + name.stateOrProvinceName\n if name.localityName:\n pretty += '/L=' + name.localityName\n if name.organizationName:\n pretty += '/O=' + name.organizationName\n if name.organizationalUnitName:\n pretty += '/OU=' + name.organizationalUnitName\n if name.commonName:\n pretty += '/CN=' + name.commonName\n if name.emailAddress:\n pretty += '/email=' + name.emailAddress\n return pretty",
"def diff_name(self):\n return self.__class__.__name__ # .replace(\"Context\",\"\").replace(\"Mapping\",\"\")",
"def get_pretty_name(name):\n return pretty_names.get(name, name)",
"def displayName(self):\r\n return self.tr(\"PDOK Reverse Geocoder\")",
"def get_name():",
"def _GetMapEntryTypeName(field_name: str) -> str:\n capitalized_name_components = map(str.capitalize, field_name.split(\"_\"))\n\n return f\"{''.join(capitalized_name_components)}Entry\"",
"def simplifyOutName(name):\n return \"HLTNav_\" + name.replace(\"HLTNav_\", \"\").replace(\"Trig\", \"\").replace(\"Alg\", \"\")",
"def transform_name_mapping(self) -> pulumi.Output[Mapping[str, str]]:\n return pulumi.get(self, \"transform_name_mapping\")",
"def _pretty_print(self,s):\n if not self.pretty_parameters:\n return s\n else:\n n = s.replace(\"_\",\" \")\n n = n.capitalize()\n return n",
"def name(self):\r\n return \"pdok-reverse-geocoder\"",
"def printname(bruce):",
"def nice_name():\n\n pass",
"def conclusion_title_map(self):\n pass",
"def name(self) -> str:",
"def name(self) -> str:",
"def name(self) -> str:",
"def name(self) -> str:",
"def name(self) -> str:",
"def print_current_mappings(self):\n for mapped_name in self.__mapped_names:\n func = getattr(self, mapped_name)\n name = f'{func.__module__}.{func.__name__}'\n print(f'* {mapped_name} -- {name}')",
"def getName():",
"def getName():",
"def getName():",
"def getName():",
"def getName():",
"def getName():",
"def pretty_name(self) -> str:\n try:\n return self._names_from_attrs('pretty_name')\n except AttributeError: # todo: what exception\n warnings.warn('pretty name not found in metadata, fallback to globals.py')\n if self.__short_name in globals._dataset_pretty_names.keys():\n return globals._dataset_pretty_names[self.__short_name]\n else:\n warnings.warn('pretty name also not found in globals.py, use short name')\n return self.__short_name",
"def s(x):\n return x.name.lower().replace('_', '-')"
] | [
"0.6393425",
"0.60165125",
"0.58980197",
"0.5882158",
"0.58780026",
"0.58731437",
"0.58240795",
"0.58021384",
"0.57964367",
"0.5765265",
"0.57649297",
"0.57419497",
"0.57268083",
"0.57015693",
"0.56951237",
"0.5690049",
"0.5683217",
"0.5683217",
"0.5683217",
"0.5683217",
"0.5683217",
"0.5680206",
"0.56703275",
"0.56703275",
"0.56703275",
"0.56703275",
"0.56703275",
"0.56703275",
"0.56466895",
"0.56451714"
] | 0.62320554 | 1 |
Return the default class hierarchy dictionary. | def default_class_hierarchy_dict():
return {} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def derive_class_hierarchy():\n logger.info('Deriving class hierarchy ...')\n data = statistics.get_json_data('classes')\n\n hierarchy = defaultdict(dict)\n keys = ['i', 's', 'ai', 'as', 'sc', 'sb', 'r']\n\n for cid in data:\n for key in keys:\n if key in data[cid] and data[cid][key]:\n hierarchy[cid][key] = data[cid][key]\n\n statistics.update_json_data('classes/hierarchy', hierarchy)\n statistics.update_split_json_data('classes/hierarchy', hierarchy, 1000)",
"def _get_inherited_dict(self):\n\t\tsubclass_dict = {}\n\t\tfor term in self.terms():\n\t\t\tsubclass_dict[term.id] = [t.id for t in self[term.id].superclasses(with_self=True)]\n\t\treturn(subclass_dict)",
"def default_file_hierarchy_dict():\n return {\n directory(\"include\"): {\n directory(\"with spaces\"): {\n file(\"with spaces.hpp\"): {\n namespace(\"with_spaces\"): {\n function(\"int\", \"value\"): parameters()\n }\n }\n }\n }\n }",
"def _tree():\n return collections.defaultdict(_tree)",
"def node_dictionary():\r\n\r\n classes = node_subclasses(Node)\r\n dictionary = {}\r\n\r\n for c in classes:\r\n try:\r\n name = c.identifier()\r\n dictionary[name] = c\r\n except AttributeError:\r\n # If node does not provide identifier, we consider it to be\r\n # private or abstract class\r\n pass\r\n\r\n return dictionary",
"def get_subclasses(cls) -> dict:\n return dict(cls._subclasses)",
"def _fill_class_dicts():\n global _taxonomy_classes\n global _data_classes\n if not _taxonomy_classes:\n _taxonomy_classes = get_taxonomies()\n if not _data_classes:\n stack = []\n next_module = data\n while next_module is not None:\n stack += _inspect_module(next_module)\n if stack:\n next_module = stack.pop()\n else:\n next_module = None",
"def _getDefaultGroupDict(self, container):\n ddict = dict(container._dict_)\n ddict.update({\n \"_def_for_repos\": container.for_repos,\n \"_def_for_paths\": container.for_paths,\n })\n\n return ddict",
"def config_mapping(self) -> typing.Dict[str, type]:\n return self._subclasses",
"def _default_config(cls):\n return dict()",
"def _deco_class_settings_dict(self) -> OrderedDict:\n return self._classname2SettingsData_dict[self.deco_class.__name__]",
"def tree():\n return defaultdict(tree)",
"def all_roots(cls):\r\n return dict(cls._TYPES_BY_ROOT)",
"def whoAreYou(self):\n tempDict = {}\n tempDict['Class'] = '{0:15}'.format(self.__class__.__name__) +' from '+' '.join([str(base) for base in self.__class__.__bases__])\n tempDict['Type' ] = self.type\n tempDict['Name' ] = self.name\n return tempDict",
"def get_tree(self) -> dict:\n return dict(self._nodes)",
"def get_cls_dict(config_path):\n return {i: n for i, n in enumerate(get_names(config_path))}",
"def get_deco_class_settings_dict(cls, clsname) -> OrderedDict:\n return cls._classname2SettingsData_dict[clsname]",
"def default(self, obj):\n return {'__{}__'.format(obj.__class__.__name__): obj.__dict__}",
"def classes(class_name):\r\n\td = {}\r\n\tfor k, v in class_name.__dict__.items():\r\n\t\tif not (k.startswith('__') and k.endswith('__')):\r\n\t\t\td[k] = v\r\n\treturn d",
"def get_all_object_classes(cls) -> Dict[str, Type[objects.BaseObject]]:\n cls._refresh_registry()\n return copy.deepcopy(cls.objects_dict)",
"def getHierarchies():",
"def getHierarchies():",
"def parent_classes(self):\n response = check_defined(self, inspect.stack()[0][3])\n if not response:\n return response\n root_node = list(nx.topological_sort(self.se.full_class_only_graph))\n # When a schema is not a tree with only one root node\n # Set \"Thing\" as the root node by default\n if 'http://schema.org/Thing' in root_node:\n root_node = 'http://schema.org/Thing'\n else:\n root_node = root_node[0]\n paths = nx.all_simple_paths(self.se.full_class_only_graph,\n source=root_node,\n target=self.uri)\n paths = [_path[:-1] for _path in paths]\n result = restructure_output(self,\n paths,\n inspect.stack()[0][3],\n self.output_type)\n return result",
"def get_default_paths(self):\n return {key: value.default_path for key, value in self}",
"def defaults():\n return {}",
"def child_to_parent_dict(self,):\n\n self.ParentMap = dict((c, p) for p in self.tree.iter() for c in p)\n self.ParentMap[self.tree.getroot()] = None\n return self.ParentMap",
"def _to_class(self):\n from . import _user, _group, _page\n\n return {\n ThreadType.USER: _user.User,\n ThreadType.GROUP: _group.Group,\n ThreadType.PAGE: _page.Page,\n }[self]",
"def _get_all_loaded_classes(self):\n classes = {}\n for module in self.modules.values():\n for k,v in module.__dict__.items():\n # skip anything that's not a game class\n if not type(v) is type:\n continue\n base_classes = (game_object.GameObject, game_hud.GameHUD, game_room.GameRoom)\n # TODO: find out why above works but below doesn't!! O___O\n #base_classes = self.builtin_base_classes\n if issubclass(v, base_classes):\n classes[k] = v\n return classes",
"def defaults() -> dict:\n pass",
"def assign_defaults(self):\n\n def module_default_sort_key(module):\n sort_key = (\n 1 if module.marked_as_default else -1,\n module.version,\n module.variant,\n -self.index(module.modulepath),\n )\n return sort_key\n\n self.defaults = {}\n grouped = groupby(\n [module for path in self.path for module in path.modules], lambda x: x.name\n )\n for (_, modules) in grouped:\n for module in modules:\n module.is_default = False\n if len(modules) > 1:\n modules = sorted(modules, key=module_default_sort_key, reverse=True)\n modules[0].is_default = True\n self.defaults[modules[0].name] = modules[0]"
] | [
"0.6781914",
"0.65062606",
"0.63813835",
"0.6267545",
"0.62473226",
"0.62175924",
"0.6178537",
"0.5954751",
"0.5953997",
"0.5932108",
"0.59195054",
"0.59142965",
"0.5901989",
"0.58595765",
"0.58321863",
"0.5818339",
"0.5804834",
"0.5788862",
"0.5709798",
"0.56741977",
"0.56467104",
"0.56467104",
"0.5599762",
"0.5596434",
"0.55941176",
"0.5587426",
"0.5583581",
"0.55732185",
"0.5570387",
"0.55637586"
] | 0.9351468 | 0 |
Return the default file hierarchy dictionary. | def default_file_hierarchy_dict():
return {
directory("include"): {
directory("with spaces"): {
file("with spaces.hpp"): {
namespace("with_spaces"): {
function("int", "value"): parameters()
}
}
}
}
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def default_class_hierarchy_dict():\n return {}",
"def get_default_paths(self):\n return {key: value.default_path for key, value in self}",
"def get(self):\n for path, dirs, files in os.walk(self.directory):\n folders = path[self.start:].split(os.sep)\n if self.branches:\n if self._filter(folders, 'folders'):\n files = dict.fromkeys(files)\n parent = reduce(dict.get, folders[:-1], self.tree_dict)\n parent[folders[-1]] = files\n else:\n files = dict.fromkeys(files)\n parent = reduce(dict.get, folders[:-1], self.tree_dict)\n parent[folders[-1]] = files\n return self.tree_dict",
"def _tree():\n return collections.defaultdict(_tree)",
"def create_file_dict():\n import os\n file_dict = {}\n for root, dirs, files in os.walk('.'):\n dirs[:] = [ # add any extra dirs to ignore #\n d for d in dirs\n if '.' not in d\n and 'ENV' not in d\n and '__' not in d\n and 'build' not in d\n ]\n for f in files:\n try:\n with open(f, 'r') as thing:\n res = thing.readline()\n except:\n res = ''\n file_name = os.path.join(root, f).lstrip('./')\n file_dict[file_name] = res\n return file_dict",
"def get_fs_dict (\n initial_root, create_item=None, dict_cls=dict,\n dirname_filter=None, filename_filter=None,\n include_root=False, toplevel_files=True, prune_empty=False, file_key=None,\n):\n # TODO(could-do): max_depth=N\n fsdict = dict_cls()\n get_file_key = ( lambda x: x ) if file_key is None else file_key\n\n\n for root, dict_relpath, dirnames, filenames in walk_relpath (\n initial_root, include_root=include_root, prune_empty=prune_empty,\n dirname_filter=dirname_filter, filename_filter=filename_filter\n ):\n if dict_relpath:\n dictpath = dict_relpath.split ( os.sep )\n parent = functools.reduce ( dict_cls.get, dictpath[:-1], fsdict )\n\n if create_item is None:\n parent [dictpath[-1]] = dict_cls.fromkeys (\n map ( get_file_key, filenames )\n )\n else:\n parent [dictpath[-1]] = dict_cls (\n (\n get_file_key ( fname ),\n create_item ( ( root + os.sep + fname ), fname, root )\n )\n for fname in filenames\n )\n\n elif not toplevel_files:\n pass\n\n elif create_item is None:\n for fname in filenames:\n fsdict [get_file_key(fname)] = None\n\n else:\n for fname in filenames:\n fsdict [get_file_key(fname)] = create_item (\n ( root + os.sep + fname ), fname, root\n )\n # -- end for\n\n return fsdict",
"def get_default_paths():\n DATA_ROOT = os.environ.get(\"DATA_ROOT\", \"data\")\n defaults = {\n \"TOKENIZE_DATA_DIR\": DATA_ROOT + \"/tokenize\",\n \"MWT_DATA_DIR\": DATA_ROOT + \"/mwt\",\n \"LEMMA_DATA_DIR\": DATA_ROOT + \"/lemma\",\n \"POS_DATA_DIR\": DATA_ROOT + \"/pos\",\n \"DEPPARSE_DATA_DIR\": DATA_ROOT + \"/depparse\",\n \"ETE_DATA_DIR\": DATA_ROOT + \"/ete\",\n \"NER_DATA_DIR\": DATA_ROOT + \"/ner\",\n \"CHARLM_DATA_DIR\": DATA_ROOT + \"/charlm\",\n \"SENTIMENT_DATA_DIR\": DATA_ROOT + \"/sentiment\",\n \"CONSTITUENCY_DATA_DIR\": DATA_ROOT + \"/constituency\",\n\n # Set directories to store external word vector data\n \"WORDVEC_DIR\": \"extern_data/wordvec\",\n\n # TODO: not sure what other people actually have\n # TODO: also, could make this automatically update to the latest\n \"UDBASE\": \"extern_data/ud2/ud-treebanks-v2.11\",\n \"UDBASE_GIT\": \"extern_data/ud2/git\",\n\n \"NERBASE\": \"extern_data/ner\",\n \"CONSTITUENCY_BASE\": \"extern_data/constituency\",\n \"SENTIMENT_BASE\": \"extern_data/sentiment\",\n\n # there's a stanford github, stanfordnlp/handparsed-treebank,\n # with some data for different languages\n \"HANDPARSED_DIR\": \"extern_data/handparsed-treebank\",\n\n # directory with the contents of https://nlp.stanford.edu/projects/stanza/bio/\n # on the cluster, for example, /u/nlp/software/stanza/bio_ud\n \"BIO_UD_DIR\": \"extern_data/bio\",\n\n # data root for other general input files, such as VI_VLSP\n \"EXTERN_DIR\": \"extern_data\",\n }\n\n paths = { \"DATA_ROOT\" : DATA_ROOT }\n for k, v in defaults.items():\n paths[k] = os.environ.get(k, v)\n\n return paths",
"def _build_file_tree(self):\n # Build file tree with packmode and weigth info (# of file in the packmode)\n root = {\"packmode\": None, \"weight\": None, \"children\": {}}\n for filepath, packmode in self.override_packmode_map.items():\n node = root\n for part in filepath:\n node = node[\"children\"].setdefault(\n part, {\"packmode\": None, \"weight\": None, \"children\": {}}\n )\n node[\"weight\"] = 1\n node[\"packmode\"] = packmode\n return root",
"def getDictOfRoot(tree, fromNode=None):\r\n if fromNode == None:\r\n fromNode = tree.root\r\n Dict = {fromNode.name:{\"__files__\":fromNode.files}}\r\n Dict = tree.getChildren(fromNode, Dict)\r\n return Dict",
"def tree():\n return defaultdict(tree)",
"def assign_defaults(self):\n\n def module_default_sort_key(module):\n sort_key = (\n 1 if module.marked_as_default else -1,\n module.version,\n module.variant,\n -self.index(module.modulepath),\n )\n return sort_key\n\n self.defaults = {}\n grouped = groupby(\n [module for path in self.path for module in path.modules], lambda x: x.name\n )\n for (_, modules) in grouped:\n for module in modules:\n module.is_default = False\n if len(modules) > 1:\n modules = sorted(modules, key=module_default_sort_key, reverse=True)\n modules[0].is_default = True\n self.defaults[modules[0].name] = modules[0]",
"def getHierarchy(unique_name):",
"def getHierarchy(unique_name):",
"def getHierarchy(unique_name):",
"def get_defaults():\n\n return {\n \"numberofrules\": 0,\n \"datapath\": path_join_robust(BASEDIR_PATH, \"data\"),\n \"freshen\": True,\n \"replace\": False,\n \"backup\": False,\n \"skipstatichosts\": False,\n \"keepdomaincomments\": True,\n \"extensionspath\": path_join_robust(BASEDIR_PATH, \"extensions\"),\n \"extensions\": [],\n \"compress\": False,\n \"minimise\": False,\n \"outputsubfolder\": \"\",\n \"hostfilename\": \"hosts\",\n \"targetip\": \"0.0.0.0\",\n \"sourcedatafilename\": \"update.json\",\n \"sourcesdata\": [],\n \"readmefilename\": \"readme.md\",\n \"readmetemplate\": path_join_robust(BASEDIR_PATH, \"readme_template.md\"),\n \"readmedata\": {},\n \"readmedatafilename\": path_join_robust(BASEDIR_PATH, \"readmeData.json\"),\n \"exclusionpattern\": r\"([a-zA-Z\\d-]+\\.){0,}\",\n \"exclusionregexes\": [],\n \"exclusions\": [],\n \"commonexclusions\": [\"hulu.com\"],\n \"blacklistfile\": path_join_robust(BASEDIR_PATH, \"blacklist\"),\n \"whitelistfile\": path_join_robust(BASEDIR_PATH, \"whitelist\"),\n }",
"def recursive_make_defaultdict(conf):\n if isinstance(conf, dict):\n for key in conf.keys():\n conf[key] = recursive_make_defaultdict(conf[key])\n return defaultdict(lambda: None, conf)\n return conf",
"def build_tree(path: str, ignore_dirs: Optional[Sequence[str]] = None) -> dict:\n if ignore_dirs is None:\n ignore_dirs = []\n if is_module(path):\n key = uuid.uuid4().hex\n name = os.path.splitext(os.path.basename(path))[0]\n item = {key: {\n \"name\": name,\n \"path\": os.path.abspath(path),\n \"components\": [name],\n \"type\": \"module\",\n }}\n return item\n if is_shared_object(path):\n key = uuid.uuid4().hex\n name = os.path.basename(path).partition(\".\")[0]\n return {key: {\n \"name\": name,\n \"path\": os.path.abspath(path),\n \"components\": [name],\n \"type\": \"shared_object\"\n }}\n if is_file(path):\n key = uuid.uuid4().hex\n return {key: {\n \"name\": None,\n \"path\": os.path.abspath(path),\n \"components\": [None],\n \"type\": \"file\"\n }}\n if is_directory(path):\n key = uuid.uuid4().hex\n name = os.path.basename(path)\n item = {key: {\n \"name\": name if is_package(path) else None,\n \"path\": os.path.abspath(path),\n \"components\": [name] if is_package(path) else [None],\n \"type\": \"package\" if is_package(path) else \"directory\",\n \"children\": {}\n }}\n for child in os.listdir(path):\n if child not in ignore_dirs:\n child_path = os.path.join(path, child)\n info = build_tree(child_path, ignore_dirs)\n if info:\n if \"children\" in item[key]:\n apply_tree(info, lambda x: x[\"components\"].insert(0, item[key][\"name\"]))\n item[key][\"children\"].update(info)\n return item\n return {}",
"def get_dictionary_default(path):\n if path in defaults_dict.keys():\n return defaults_dict[path]\n else:\n return ''",
"def to_dict(self):\n non_terminal = {\n \"name\": self.name,\n \"children\": [child.to_dict() for child in self.children]\n }\n\n if self.file:\n non_terminal[\"file\"] = self.file\n\n return non_terminal",
"def create_tree_hash_dict(cls, current_dir, file_path, dirs, files, ref_table):\n\n # we sort just to ensure there are no arrangement issues that could affect the hash outcome\n file_hashs = sorted([ref_table['%s/%s' % (file_path, file)]['hash'] for file in files])\n dir_hashs = sorted([ref_table['%s/%s' % (file_path, dir_name)]['hash'] for dir_name in dirs])\n\n tree_info = {}\n tree_info['path'] = file_path\n tree_info['content'], tree_info['hash'] = cls.get_tree_contents(file_path, dirs, files, ref_table)\n tree_info['type'] = 'tree'\n tree_info['name'] = current_dir\n tree_info['perm'] = stat.S_IMODE(os.lstat(file_path).st_mode)\n\n return tree_info",
"def create_path_dict(save_path):\n act_fn = [sorted(['relu', 'antirelu', 'identity', 'tanh', 'sigmoid']),\n sorted(['relu', 'antirelu', 'identity', 'sigmoid']),\n sorted(['relu', 'antirelu', 'identity', 'tanh']),\n sorted(['relu', 'antirelu', 'sigmoid', 'tanh']),\n sorted(['relu', 'identity', 'sigmoid', 'tanh']),\n sorted(['antirelu', 'identity', 'sigmoid', 'tanh']),\n ['relu'],\n ['sigmoid'],\n ['tanh'],\n ['antirelu'],\n ['None']]\n # ['identity']]\n\n act_fn = ['_'.join(act) for act in act_fn]\n path_dict = defaultdict(list)\n for (filepath, dirname, filename) in os.walk(save_path):\n if 'results.json' in filename:\n for act in act_fn:\n temp = filepath.split('/')\n if act == temp[-1] or act == temp[-2]:\n path_dict[act].append(filepath)\n print(path_dict)\n return path_dict",
"def _getDefaultGroupDict(self, container):\n ddict = dict(container._dict_)\n ddict.update({\n \"_def_for_repos\": container.for_repos,\n \"_def_for_paths\": container.for_paths,\n })\n\n return ddict",
"def _default_config(cls):\n return dict()",
"def forge_files(self) -> Dict[str, BaseForge]:\n\t\treturn self._forge_files",
"def pre_lookup(self, file):\n return {}",
"def get_pathes(self) -> Dict[str, str]:\n\n pathes: Dict[str, str] = {}\n\n for path in self.files:\n name = path.split(\"/\")[-1].split(\".\")[0]\n pathes[name] = os.path.join(self.home_folder, path)\n return pathes",
"def getDefaults():\n return {\n 'minsize': 10, # minimum size in MB\n 'pattern': [], # file name patterns\n }",
"def path_to_dict(self, someDir, level=9001, relativeFolders=True, relativeFiles=False):\n someDir = someDir.rstrip(os.path.sep)\n assert os.path.isdir(someDir)\n numSep = someDir.count(os.path.sep)\n\n outputDict = {}\n for root, dirs, files in os.walk(someDir):\n for d in dirs + files:\n path = os.path.join(root, d)[(len(someDir)):]\n path = path.rstrip(os.sep).lstrip(os.sep)\n pathSplit = paths.os_path_split_asunder(path)\n if os.path.isfile(os.path.join(root, d)) and not relativeFiles:\n pathSplit[-1] = os.path.join(root, d)\n if len(pathSplit) == 1:\n outputDict[pathSplit[0]] = {}\n else:\n nestedDict = self.list_flattened_to_dict(pathSplit)\n mergedDict = dict(mergedicts(outputDict, nestedDict))\n for key in nestedDict.keys():\n outputDict = dict(outputDict, **nestedDict)\n outputDict = dict(outputDict, **mergedDict)\n\n numSepCurrent = root.count(os.path.sep)\n if numSep + level <= numSepCurrent:\n del dirs[:]\n return outputDict",
"def defaults():\n return {}",
"def getHierarchies():"
] | [
"0.7365556",
"0.6702656",
"0.6633775",
"0.623178",
"0.6229957",
"0.61596197",
"0.6079739",
"0.5987825",
"0.59239",
"0.58614475",
"0.5858239",
"0.58376956",
"0.58376956",
"0.58376956",
"0.5832336",
"0.57800734",
"0.57799923",
"0.5694619",
"0.5684486",
"0.56160605",
"0.5611224",
"0.56102467",
"0.56060594",
"0.5604444",
"0.5568331",
"0.5567285",
"0.553413",
"0.5527589",
"0.55034643",
"0.54896975"
] | 0.7459949 | 0 |
Converts the specified memory quantity from one unit to another. Supported units are ["B", "KB", "MB", "GB", "TB", "PB"]. | def convertMem(mem, fromCode="GB", toCode="MB"):
assert mem is not None and (isinstance(mem, int) or isinstance(mem, float)) and mem >= 0, "Invalid memory: " % (mem)
indices = ["B", "KB", "MB", "GB", "TB", "PB"]
assert fromCode is not None and fromCode.strip().upper() in indices, "Invalid from code: %s" % (fromCode)
assert toCode is not None and toCode.strip().upper() in indices, "Invalid to code: %s" % (toCode)
fromIdx = indices.index(fromCode.strip().upper())
toIdx = indices.index(toCode.strip().upper())
return mem * 1024 ** (fromIdx - toIdx) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def convert_unit(size_in_bytes, unit):\n if unit == 'KB':\n return size_in_bytes/1024\n elif unit == 'MB':\n return size_in_bytes/(1024*1024)\n elif unit == 'GB':\n return size_in_bytes/(1024*1024*1024)\n else:\n return size_in_bytes",
"def convertFromBytes(size, unit):\n\tif (unit == 'kb'):\n\t\treturn size / 10000\n\telif (unit == 'mb'):\n\t\treturn size / 1000000\n\telif (size == 'gb'):\n\t\treturn size / 1000000000",
"def cast_value(value, unit):\n if isinstance(value, u.Quantity):\n return value.to(unit)\n return value * unit",
"def convert_from_to(from_unit, to_unit, quantity):\n if not is_valid_unit(from_unit):\n raise ValueError('Invalid from_unit: {}. Cannot convert.'.format(from_unit))\n if not is_valid_unit(to_unit):\n raise ValueError('Invalid to_unit: {}. Cannot convert.'.format(to_unit))\n return quantity * CONVERSIONS_AMERICAN[from_unit][to_unit]",
"def tranfer_unit(number):\n count = 0\n unit_name = \"\"\n if 2 ** 20 > number > 2 ** 10:\n unit_name = \"Kb\"\n count = 1\n elif 2 ** 30 > number > 2 ** 20:\n unit_name = \"Mb\"\n count = 2\n elif number > 2 ** 30:\n unit_name = \"Gb\"\n count = 3\n else:\n unit_name = \"b\"\n if count != 0:\n unit_number = round(number / ((2 ** 10) ** count), 2)\n else:\n unit_number = round(number, 2)\n unit_str = \"{num}{name}\".format(num=unit_number, name=unit_name)\n return unit_str",
"def format_memory(B, unit=\"infer\", return_units=True):\n B = float(B)\n KB = float(1024)\n MB = float(KB ** 2) # 1,048,576\n GB = float(KB ** 3) # 1,073,741,824\n TB = float(KB ** 4) # 1,099,511,627,776\n \n # Human readable\n size_in_b = int(B)\n size_in_kb = B/KB\n size_in_mb = B/MB\n size_in_gb = B/GB\n size_in_tb = B/TB\n \n if return_units:\n size_in_b = '{0} B'.format(size_in_b)\n size_in_kb = '{0:.3f} KB'.format(size_in_kb)\n size_in_mb = '{0:.3f} MB'.format(size_in_mb)\n size_in_gb = '{0:.3f} GB'.format(size_in_gb)\n size_in_tb = '{0:.3f} TB'.format(size_in_tb)\n \n unit = unit.lower()\n assert_acceptable_arguments(unit.lower(), {\"infer\", \"b\", \"kb\", \"mb\", \"gb\", \"tb\"})\n if unit != \"infer\":\n return {\"b\":size_in_b, \"kb\":size_in_kb, \"mb\":size_in_mb, \"gb\":size_in_gb, \"tb\":size_in_tb}[unit]\n else:\n if B < KB:\n return size_in_b\n elif KB <= B < MB:\n return size_in_kb\n elif MB <= B < GB:\n return size_in_mb\n elif GB <= B < TB:\n return size_in_gb\n elif TB <= B:\n return size_in_tb",
"def convert_units(self, units):\n self.unit_array = self.unit_array.to(units)",
"def unit_converter(val, from_u, to_u):\n\tconverter = {'b':0, 'k':1, 'm':2, 'g':3, 't':4}\n\tif converter[from_u] < converter[to_u]:\n\t\tval = float(val)\n\t\tfor _ in range(converter[to_u] - converter[from_u]):\n\t\t\tval = val/1024\n\telse:\n\t\tfor _ in range(converter[from_u] - converter[to_u]):\n\t\t\tval = val * 1024\n\t\t\t\n\treturn val",
"def to_units(number):\n unit = 0\n while number >= 1024.:\n unit += 1\n number = number / 1024.\n if unit == len(UNITS) - 1:\n break\n if unit:\n return '%.2f%s' % (number, UNITS[unit])\n return '%d' % number",
"def convert_to_kib(mem):\n mem_size, unit = mem\n multiplier = CONVERSION[unit]\n return mem_size * multiplier",
"def convert_qty (qty,unit,ing) :\n portion_presence = False\n try :\n div = re.search(r\"[^ \\w]\", qty).start()\n portion = float(qty[div-1]) / float(qty[div+1])\n qty_float=portion\n portion_presence = True\n qty = qty[:div-1]\n except :\n try : \n qty_float = float(qty)\n except :\n qty_float = 10\n\n if portion_presence == True :\n if len(qty) > 0 :\n qty_float += float(qty[:div-2])\n \n #use the unit to have in ml\n #qty_float*=conversion_unit[unit]\n \n #convert in grammes with the database of density\n #qty_float*=density[ing]\n \n return qty_float",
"def convertUnits(self, varname, arr):\n if varname == \"SPDQ\" or varname == \"PHQ\":\n return arr*2.5e6/1000.\n return arr",
"def ensure_unit(arg, unit):\n if not isinstance(arg, u.Quantity):\n arg = arg * unit\n return arg.to(unit)",
"def to_bytes(num, unit):\n unit_lr = unit.lower()\n if unit_lr == \"pb\":\n return num * PB\n if unit_lr == \"tb\":\n return num * TB\n if unit_lr == \"gb\":\n return num * GB\n if unit_lr == \"mb\":\n return num * MB\n if unit_lr == \"kb\":\n return num * KB\n if unit_lr == \"bytes\":\n return num\n raise \"unexpected unit %s of number %i, cannot process filter cache stats\" % (\n unit,\n num,\n )",
"def convert(self, value, units, newunits):\n return value * self._units[units] / self._units[newunits]",
"def from_units(text):\n match = re.match(r'^([0-9\\.]+)(|[' + ''.join(UNITS[1:]) + r'])$', text)\n if not match:\n return None\n\n number = float(match.group(1))\n unit = match.group(2)\n return int(number * 1024**UNITS.index(unit))",
"def convert(self, value):\n\n\t\tif self.converter is not None:\n\t\t\treturn self.converter(value)\n\t\telif self.units is not None:\n\t\t\tq = Quantity(value)\n\t\t\tq.assert_dimensions(self.units)\n\n\t\t\treturn q\n\t\telse:\n\t\t\treturn value",
"def _convert_unit(self, unit):\n if unit in self.units:\n return self.units[unit]\n elif unit in unit_map:\n return unit_map[unit]\n else:\n raise SBMLError('Unit not recognized: ' + str(unit))",
"def pressures_in_mb( pressures ):\n if not hasattr( pressures, 'units' ): return None\n if pressures.units=='mb':\n pressures.units = 'mbar' # udunits uses mb for something else\n return pressures[:]\n tmp = udunits(1.0,pressures.units)\n s,i = tmp.how('mbar')\n pressmb = s*pressures[:] + i\n return pressmb",
"def memory(value):\n if re.match(r\"[0-9]{1,9}Mi?\", str(value)):\n mem = re.sub(\"[^0-9]\", \"\", value)\n elif re.match(r\"[0-9]{1,9}Ki?\", str(value)):\n mem = re.sub(\"[^0-9]\", \"\", value)\n mem = int(mem) // 1024\n elif re.match(r\"[0-9]{1,9}Gi?\", str(value)):\n mem = re.sub(\"[^0-9]\", \"\", value)\n mem = int(mem) * 1024\n return int(mem)",
"def units(self, size=\"G\", transfer='GB/s'): # YAML",
"def convertUnit(*args, fromUnit: AnyStr=\"\", toUnit: AnyStr=\"\", **kwargs)->float:\n pass",
"def convert_units(self):\n for prod in (\"ier\", \"ier_inc_rain\"):\n self.data[prod].data[:] /= 1e6",
"def machine_type_to_memory(machine_type):\n if machine_type in MACHINE_TYPES:\n return MACHINE_TYPES[machine_type]['memory']\n m = re.match(CUSTOM_MACHINE_TYPE_RE, machine_type)\n assert m, machine_type\n return int(m.group(2)) / 1024",
"def convert(value, units: UnitLike, registry: unyt.UnitRegistry = None):\n return process_unit_input(value, units, convert=True, registry=registry).v",
"def convert_unit_size_to_num(size, unit=None):\n if unit:\n unit = MemoryUnit.validate_unit(unit)\n else:\n unit = MemoryUnit.UNIT_SIZE_DEFAULT\n log.info(_('A memory unit is not provided for size; using the '\n 'default unit %(default)s.') % {'default': 'B'})\n regex = re.compile('(\\d*)\\s*(\\w*)')\n result = regex.match(str(size)).groups()\n if result[1]:\n unit_size = MemoryUnit.validate_unit(result[1])\n converted = int(str_to_num(result[0])\n * MemoryUnit.UNIT_SIZE_DICT[unit_size]\n * math.pow(MemoryUnit.UNIT_SIZE_DICT\n [unit], -1))\n log.info(_('Given size %(size)s is converted to %(num)s '\n '%(unit)s.') % {'size': size,\n 'num': converted, 'unit': unit})\n else:\n converted = (str_to_num(result[0]))\n return converted",
"def to_unit(self, unit):\n unit = _find_unit(unit)\n self.value = _convert_value(self.value, self.unit, unit)\n self.unit = unit",
"def _fromBytes(self, size, unity):\n size_map = {'B': 1, 'KB': 1024, 'MB': 1024 ** 2, 'GB': 1024 ** 3,\n 'TB': 1024 ** 4}\n return size / size_map[unity]",
"def convert_bytes(self,num):\n for x in ['bytes', 'KB', 'MB', 'GB', 'TB']:\n if num < 1024.0:\n return \"%3.1f %s\" % (num, x)\n num /= 1024.0",
"def convert_bytes(self,num):\n for x in ['bytes', 'KB', 'MB', 'GB', 'TB']:\n if num < 1024.0:\n return \"%3.1f %s\" % (num, x)\n num /= 1024.0"
] | [
"0.7277126",
"0.6947695",
"0.68115354",
"0.67552584",
"0.67162085",
"0.66954976",
"0.65725",
"0.63952094",
"0.63424766",
"0.63257724",
"0.6288342",
"0.62289834",
"0.62005645",
"0.6162397",
"0.6162149",
"0.61009115",
"0.60763556",
"0.60662353",
"0.6020919",
"0.6008452",
"0.59890294",
"0.5957952",
"0.589946",
"0.58990514",
"0.5888739",
"0.58630913",
"0.58590496",
"0.58582765",
"0.58460426",
"0.58460426"
] | 0.7538796 | 0 |
Merges the specified output lines into a single string. | def formatOutput(output):
assert output is not None, "Output is None"
return "\n" + " ".join(output) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def output_lines(*args, **kwargs):\n return check_output(*args, **kwargs).splitlines()",
"def print_output():\n\tprint ''.join([str(x)+\"\" for x in output])",
"def return_fixed_output(output, rstrip=True):\n fixed_output = filter(_non_debug_line, output.split('\\r\\n'))\n joiner = '' if rstrip else '\\r\\n'\n return joiner.join(fixed_output)",
"def joinlines(lines: Iterable[str], newline: str = \"\\n\") -> str:\n return \"\".join(f\"{line}{newline}\" for line in lines)",
"def svn_diff_mem_string_output_merge(*args):\n return _diff.svn_diff_mem_string_output_merge(*args)",
"def concatena(*args):\n linea = ''\n for l in args:\n linea += str(l if l else '')\n return linea",
"def format_output(list_to_output):\n return \" \".join(str(item) for item in list_to_output)",
"def output(strings):\n cCommand_ScrollToEnd = 259 # cudatext_cmd.py\n\n if not isinstance(strings, list):\n strings = [strings]\n for s in strings:\n app_log(LOG_ADD, s, panel=LOG_PANEL_OUTPUT)\n\n oed = Command._output_ed\n if option_tail_log and oed:\n oed.cmd(cCommand_ScrollToEnd)",
"def output(self) -> str:\n imports = ''\n if self._import_lines:\n imports += ''.join(self._import_lines)\n imports += ''.join(self.import_tracker.import_lines())\n if imports and self._output:\n imports += '\\n'\n return imports + ''.join(self._output)",
"def output(self) -> str:\n self._contents = str(self._line) + \", \" + str(self._line)\n return(super().output(0, None))",
"def get_output_str(self):\n out_str = \"\"\n path_size = len(self.path)\n for i in range(0, path_size):\n out_str += self.path[i].flight_num\n if i == path_size - 1:\n out_str += \"\\n\"\n else:\n out_str += \",\"\n return out_str",
"def one_linestring_per_intersection(lines):\n lines_merged = shapely.ops.linemerge(lines)\n\n # intersecting multiline with its bounding box somehow triggers a first\n bounding_box = box(*lines_merged.bounds)\n\n # perform linemerge (one linestring between each crossing only)\n # if this fails, write function to perform this on a bbox-grid and then\n # merge the result\n lines_merged = lines_merged.intersection(bounding_box)\n lines_merged = shapely.ops.linemerge(lines_merged)\n return lines_merged",
"def getMergeLine(desc_line,CC3_sample,GP2_sample):\n return desc_line.strip(\"\\n\") + \"\" + CC3_sample + \"\" + GP2_sample + \"\\n\"",
"def output(self):\n return \" \".join(self.pieces)",
"def output(self):\n\t\treturn \"\".join(self.pieces)",
"def join_lines(strings):\n liness = [string.splitlines() for string in strings]\n return '\\n'.join(''.join(lines) for lines in zip(*liness))",
"def linemerge(linestrings_or_multilinestrings):\n lines = []\n for line in linestrings_or_multilinestrings:\n if isinstance(line, MultiLineString):\n # line is a multilinestring, so append its components\n lines.extend(line)\n else:\n # line is a line, so simply append it\n lines.append(line)\n \n return shapely.ops.linemerge(lines)",
"def output(self):\n\n return \"\".join(self.pieces)",
"def __call__(self, *args, **kwargs):\n kwargs.setdefault(\"print_output\", self.PRINT_OUTPUT)\n kwargs.setdefault(\"return_output\", self.RETURN_OUTPUT)\n\n s = self.output(*args, **kwargs)\n if kwargs[\"print_output\"]:\n self.writeline(s)\n\n return s.strip() if kwargs[\"return_output\"] else None",
"def _write_output(output: List[str], output_file: Optional[str]) -> None:\n if output_file:\n with open(output_file, 'w+') as file:\n file.write('\\n'.join(output))\n else:\n for line in output:\n print(line)",
"def outputs(self) -> str:\n return self.stdout + self.stderr",
"def output_result(bundlesForConfig, ofile):\n\n result = ('{0}\\n'.format('\\n'.join(str(x) for x in bundlesForConfig)))\n\n if ofile:\n try:\n with open(ofile, \"w\") as outfile:\n outfile.write(result)\n\n except IOError as err:\n logging.error(\"{0}\".format(err))\n sys.exit(1)\n\n else:\n print(result)",
"def dump_line(self, outputs: JsonDict) -> str:\n return json.dumps(outputs, ensure_ascii=False) + \"\\n\"",
"def dump_line(self, outputs: JsonDict) -> str:\n return json.dumps(outputs, ensure_ascii=False) + \"\\n\"",
"def combine_text(evt):\n global output\n output = output + evt.result.text\n print(evt.result.text)",
"def output(self) -> str:\n output = '- ' + self.output_location() + '\\n'\n output += self.INDENT + self.lines[0][2:] + '\\n'\n for line in self.lines[1:]:\n output += self.INDENT + line + '\\n'\n\n return output",
"def svn_diff_mem_string_output_merge3(*args):\n return _diff.svn_diff_mem_string_output_merge3(*args)",
"def flush_output():\n if len(buffered) == 1:\n code.add_line(\"append_result(%s)\" % buffered[0])\n elif len(buffered) > 1:\n code.add_line(\"extend_result([%s])\" % \", \".join(buffered))\n del buffered[:]",
"def output(*args):\n print(*args, end='', file=file)",
"def all(self):\n str = \"\"\n for line in self.__contents:\n str = str + line + \"\\n\"\n return str.rstrip(\"\\n\")"
] | [
"0.66480833",
"0.6211222",
"0.60794544",
"0.6075036",
"0.58680034",
"0.5830498",
"0.5799025",
"0.5767128",
"0.57403636",
"0.5715186",
"0.570294",
"0.5701719",
"0.56530124",
"0.5616031",
"0.5598391",
"0.5587614",
"0.55386454",
"0.54745513",
"0.54627925",
"0.54505545",
"0.5441662",
"0.5417682",
"0.53966576",
"0.53966576",
"0.5393489",
"0.53709835",
"0.53437454",
"0.5336545",
"0.53251576",
"0.5318752"
] | 0.65789884 | 1 |
Returns a subcollection of the specified one, whose elements are not empty strings. | def filterEmptyStrings(collection):
assert collection is not None, "Collection is None"
return filter(lambda s: str(s).strip() != "", collection) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def empty_collection(self):\n raise NotImplementedError",
"def without_empty(s):\n return {i for i in s if not i.is_empty()}",
"def _sanitize_value(self, value):\n if isinstance(value, collections.Iterable):\n newcollection = []\n for i in value:\n if len(i) == 1:\n newcollection.append((i[0], None))\n elif len(i) >= 2:\n newcollection.append((i[0], i[1]))\n return newcollection\n else:\n return value",
"def remove_empty(data):\n out = []\n for item in data:\n if item == '':\n continue\n out.append(item)\n return out",
"def filter_empty(word_list):\n new_list = []\n for x in word_list:\n if(x):\n new_list.append(x)\n return new_list",
"def empty_if_none(x):\n if x:\n return x\n else:\n return []",
"def populated_collection(self, empty_collection, plain_collection):\n empty_collection.extend(plain_collection)\n return empty_collection, plain_collection",
"def filterNull(self, result):\n\t\treturn [_ for _ in result if _]",
"def nonempty(self, r=0):\n if r != 0:\n try:\n new_plist = plist([x.nonempty(r=r - 1) for x in self if len(x)])\n except Exception:\n new_plist = self\n else:\n new_plist = self\n return plist([x for x in new_plist if len(x)],\n root=plist([self.__root__[i] for i, x in enumerate(new_plist) if len(x)]))",
"def populated_collection(self, empty_collection, plain_collection):\n raise NotImplementedError",
"def _filter_empty(lst):\n return [cell for cell in lst if cell is not Sudoku.EMPTY_CELL]",
"def compact(items):\n return filter(lambda item: item is not None and len(item) > 0, items)",
"def AllSubElements(self):\n return []",
"def c(*elems: Any) -> Collection:\n return Collection(*elems)",
"def _clean_list(self, items):\n itemlist = list(filter(None, items))\n if len(itemlist) < 3:\n itemlist.append(\"\")\n return itemlist\n\n return itemlist",
"def remove_blanks_list(src):\n return [el for el in src if el]",
"def empty(self):\n return [cell for cell in self.compact if not cell.peg]",
"def get_only_element_from_collection(one_element_collection):\n if len(one_element_collection) != 1:\n raise AssertionError(u'Expected a collection with exactly one element, but got: {}'\n .format(one_element_collection))\n return funcy.first(one_element_collection)",
"def remove_empty_string(str_list):\n return list(filter(None, str_list))",
"def making_sets(lists):\n empty_set = []\n lists =lists.split()\n for elements in lists:\n if elements == \" \":\n next\n else:\n if elements not in empty_set:\n empty_set.append(elements) \n return empty_set",
"def is_collection(var):\n return isinstance(var, Iterable) and not isinstance(var, str)",
"def get_first_non_empty(inputList, num):\n i = num\n outputList = []\n for item in inputList:\n if item.strip() == '':\n continue\n outputList.append(item.strip())\n i -= 1\n if i <= 0:\n break\n return outputList",
"def empty(self):\n return _uhd_swig.uhd_string_vector_t_empty(self)",
"def EmptyPartners(length):\n return Partners([None] * length)",
"def test_get_substrings_all(self):\n\n ans = [s.value() for s in self.sf.get_substrings(0, False)]\n\n expected_values = [(0, 1, 2, 3, 4, 5), (0, 1, 2, 3, 4), (1, 2, 3, 4, 5), (1, 2, 3, 4), \\\n (2, 3, 4, 5), (2, 3, 4), (3, 4, 5), (3, 4), (4, 5), (4,), (5,)]\n\n self.assertEqual(ans, expected_values)",
"def sub0(self, *args, **kwargs):\r\n\r\n # Resultant collection initialization\r\n coll = CollectionIp()\r\n coll = self\r\n\r\n for key, value in kwargs.iteritems():\r\n coll = coll._with_attr(key, value)\r\n\r\n for value in args:\r\n # filter usign lambda function\r\n f = value\r\n coll = CollectionIp(ip for ip in coll if f(ip))\r\n\r\n return coll",
"def test_empty(self):\n eq_([], list(collate()))",
"def empty(*_):",
"def from_first(self, value: Any) -> List:\n matches = self._slice_helper(value, multiple_matches_forbidden=False)\n return type(self.parent)() if not matches else type(self.parent)(self.parent[matches[0]:])",
"def compact_list(self):\n return [ele for ele in self if ele is not None]"
] | [
"0.54911816",
"0.5484122",
"0.5454712",
"0.5401307",
"0.53774446",
"0.53146577",
"0.5274968",
"0.5272938",
"0.5188137",
"0.51744777",
"0.51707447",
"0.51608723",
"0.5136659",
"0.5076466",
"0.5059283",
"0.5058749",
"0.5056052",
"0.5051178",
"0.5041587",
"0.50303704",
"0.4979725",
"0.49640462",
"0.4955528",
"0.49389154",
"0.49087876",
"0.48882207",
"0.48851272",
"0.48714325",
"0.48609248",
"0.48425025"
] | 0.6488247 | 0 |
Return the version prefix if any | def prefix(self) -> Optional[str]:
return RE_VERSION.match(str(self._version)).group(1) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def verPrefix(version, versionPattern=''):\n if not versionPattern:\n versionPattern = os.environ.get('KOMBI_VERSION_PATTERN', DEFAULT_VERSION_PATTERN)\n\n patternParts = __splitVersionPattern(versionPattern)\n return str(version)[:len(patternParts['prefix'])]",
"def GetPrefix():\n m = BRANCH_REGEX.match(RCS_FILE)\n if m:\n return m.group(1)\n return DEFAULT_DEPOT",
"def get_version():\r\n return '.'.join((str(each) for each in VERSION[:3]))",
"def get_version():\n return '.'.join(map(str, VERSION))",
"def version_get(self, string, prefix):\n\n regex = r\"[/_.]{}\\d+\".format(prefix)\n matches = re.findall(regex, string, re.IGNORECASE)\n\n if not len(matches):\n msg = \"No '_{}#' found in '{}'\".format(prefix, string)\n raise ValueError(msg)\n return matches[-1:][0][1], re.search(r\"\\d+\", matches[-1:][0]).group()",
"def prefix(self) -> typing.Optional[str]:\n return self._values.get('prefix')",
"def prefix(self) -> typing.Optional[str]:\n return self._values.get('prefix')",
"def prefix(self) -> typing.Optional[str]:\n return self._values.get('prefix')",
"def getversion():\r\n\r\n global VERSION\r\n\r\n if len(VERSION) == 3:\r\n return '{}.{}.{}'.format(VERSION[0], VERSION[1], VERSION[2])\r\n else:\r\n return '{}.{}.{}-{}'.format(VERSION[0], VERSION[1], VERSION[2], VERSION[3])",
"def default_prefix(self) -> str:",
"def get_version():\n return \".\".join([str(i) for i in config[\"version\"]])",
"def get_major_version(version):\n return str(check_version(version)[0])",
"def get_major_version(version):\n parsed_version = version.split('.')\n return '.'.join(parsed_version[0:2])",
"def get_version():\n return \"0.0.1 (prerelease prototype)\"",
"def getPrefix(self):\n return( self.id.split('.')[0] )",
"def _branch_name(cls, version: Version) -> str:\n suffix = version.public[len(version.base_version) :]\n components = version.base_version.split(\".\") + [suffix]\n if suffix != \"\" and not (\n suffix.startswith(\"rc\")\n or suffix.startswith(\"a\")\n or suffix.startswith(\"b\")\n or suffix.startswith(\".dev\")\n ):\n raise ValueError(f\"Unparseable pants version number: {version}\")\n return \"{}.{}.x\".format(*components[:2])",
"def version_min():\n return VERSION_MIN",
"def get_prefix(self):\n return self.prefix",
"def versionstr():\n return \"%d.%d.%d%s\" % (version[0], version[1], version[2],\n '-' + gitstr() if gitstr() else '')",
"def get_min_build_version(version: str) -> str:\n return Version(version).replace(micro=0).get_stable().dumps()",
"def prefix(self):\n return self[\"prefix\"]",
"def prefix(self):\n return self[\"prefix\"]",
"def get_version_tag(self, version: str) -> str:\n return version",
"def _version_to_shorthand(version):\n parts = version.split('.')\n if len(parts) != 2 and len(parts) != 3:\n tmpl = 'Version string must be like X.Y or X.Y.Z, not `{}`'\n raise ValueError(tmpl.format(version))\n return parts[0] + parts[1]",
"def _get_prefix(obj):\n return obj._prefix if obj._prefix is not PREFIX_NOT_SET else DEFAULT_PREFIX",
"def get_package_version(item: str) -> Union[str, None]:\n return remove_prefix(item, PackageInfoPrefix.VERSION)",
"def get_version(version=None):\n if version is None:\n version = VERSION\n assert len(version) == 5\n assert version[3] in (\"alpha\", \"beta\", \"rc\", \"final\")\n\n parts = 2 if version[2] == 0 else 3\n main = \".\".join(str(digit) for digit in version[:parts])\n\n sub = \"\"\n if version[3] != \"final\":\n mapping = {\"alpha\": \"a\", \"beta\": \"b\", \"rc\": \"rc\"}\n sub = mapping[version[3]] + str(version[4])\n\n return main + sub",
"def get_prefix(self):\n return self._prefix",
"def get_prefix(self):\n return self._prefix",
"def _major_version(self):\n version_tuple = StrictVersion(self.plugin.version).version\n major = '.'.join(map(str, version_tuple[:2]))\n\n return major"
] | [
"0.7351603",
"0.71091646",
"0.6788119",
"0.6688422",
"0.6561797",
"0.6536458",
"0.6536458",
"0.6536458",
"0.65189993",
"0.65139455",
"0.64992476",
"0.6489926",
"0.64207184",
"0.6407056",
"0.6391019",
"0.6385414",
"0.6351009",
"0.6347237",
"0.6335778",
"0.63176703",
"0.6288917",
"0.6288917",
"0.62818205",
"0.6275864",
"0.62728024",
"0.6262725",
"0.62607163",
"0.6250515",
"0.6250515",
"0.62493664"
] | 0.86012506 | 0 |
Return a bool to indicate alpha version. | def alpha(self) -> bool:
return "a" in self.modifier if self.modifier else False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def hasAlpha(self) :\n return self.m_hasAlpha",
"def is_stable(self) -> bool:\n return not self.is_prerelease",
"def enable_kubernetes_alpha(self) -> bool:\n return pulumi.get(self, \"enable_kubernetes_alpha\")",
"def is_on(self) -> bool:\n current = self.coordinator.data.info.version\n beta = self.coordinator.data.info.version_latest_beta\n stable = self.coordinator.data.info.version_latest_stable\n\n return current is not None and (\n (stable is not None and stable > current)\n or (\n beta is not None\n and (current.alpha or current.beta or current.release_candidate)\n and beta > current\n )\n )",
"def stable(self):\n return(self.zeta > 0)",
"def is_versioning_enabled(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"is_versioning_enabled\")",
"def isalpha(self) -> bool:\n pass",
"def is_versioning_enabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"is_versioning_enabled\")",
"def enabled():\n installed = installedVersion()\n required = MIN_DCM2NIIX_VERSION\n return ((installed is not None) and\n (compareVersions(installed, required) >= 0))",
"def comp_alpha(self):\n pass",
"def alpha(self):\n return self._alpha",
"def alpha(self):\n return self._alpha",
"def alpha(self):\n return self._alpha",
"def beta(self) -> bool:\n return \"b\" in self.modifier if self.modifier else \"beta\" in self.string",
"def is_stable_version(version):\n if not isinstance(version, tuple):\n version = version.split('.')\n last_part = version[-1]\n\n if not re.search('[a-zA-Z]', last_part):\n return True\n else:\n return False",
"def is_valid_version(self):\n pass",
"def is_valid_version(self) -> bool:\n return self._is_valid_version()",
"def Alpha(self):\r\n return self._alpha",
"def _is_alpha_only(self, pvm: PermissionView) -> bool:\n\n if (\n pvm.view_menu.name in self.GAMMA_READ_ONLY_MODEL_VIEWS\n and pvm.permission.name not in self.READ_ONLY_PERMISSION\n ):\n return True\n return (\n pvm.view_menu.name in self.ALPHA_ONLY_VIEW_MENUS\n or pvm.permission.name in self.ALPHA_ONLY_PERMISSIONS\n )",
"def _is_alpha(argument):\n\n if not isinstance(argument, str):\n return False\n\n if argument.lower() == 'alpha':\n is_alpha = True\n else:\n argument, Z = _extract_charge_state(argument)\n\n if Z != 2:\n is_alpha = False\n elif argument[-2:] != '-4':\n is_alpha = False\n else:\n\n dash_position = argument.find('-')\n argument = argument[:dash_position]\n\n if argument.lower() == 'helium' or argument == 'He':\n is_alpha = True\n else:\n is_alpha = False\n\n return is_alpha",
"def checkAlpha():\n node = nuke.thisNode()\n file_type = node.knob('file_type').value()\n channels = node.knob('channels').value()\n renderFormat = node.knob('renderFormat')\n if renderFormat:\n if file_type == 'dpx':\n node.knob('channels').setValue('rgb')\n if not renderFormat.value() == 'dpx' and not file_type == 'dpx' and not channels == 'all':\n node.knob('channels').setValue('rgba')\n else:\n if file_type == 'dpx':\n node.knob('channels').setValue('rgb')\n if not file_type == 'dpx' and not channels == 'all':\n node.knob('channels').setValue('rgba')",
"def is_version_3_1_or_newer() -> bool:\n if is_apptainer_1_or_newer():\n return True # this is equivalent to singularity-ce > 3.9.5\n v = get_version()\n return v[0][0] >= 4 or (v[0][0] == 3 and v[0][1] >= 1)",
"def _test_image_alpha(self, image):\n\n # In the interest of speed, let's see if we've already done this one...\n result = self._alphatest.get(image, None)\n if result is not None:\n return result\n\n if image.channels != 4:\n result = False\n elif not image.use_alpha:\n result = False\n else:\n # Using bpy.types.Image.pixels is VERY VERY VERY slow...\n key = _Texture(image=image)\n with GLTexture(key, fast=True) as glimage:\n result = glimage.has_alpha\n\n self._alphatest[image] = result\n return result",
"def isalpha(self):\n return isalpha(self)",
"def no_afni():\n if Info.version() is None:\n return True\n return False",
"def getEnabled(self):\n if getattr(self, 'installedversion', None) != __version__ :\n return False\n return self.getField('enabled').get(self)",
"def isalpha(a):\n return _vec_string(a, bool_, 'isalpha')",
"def alpha(self) -> float:\n return self._alpha",
"def is_v1_enabled(self) -> pulumi.Input[bool]:\n return pulumi.get(self, \"is_v1_enabled\")",
"def test_beta_to_stable(self):\n self.change_version(self.version_1_2_0, '1.2beta')\n self.change_status(self.version_1_2_0, amo.STATUS_BETA)\n\n version, file = self.get('1.2beta', self.version_int,\n self.app, self.platform)\n assert version == self.version_1_2_2"
] | [
"0.71558696",
"0.68556184",
"0.66215545",
"0.6604386",
"0.62436414",
"0.6219088",
"0.6163636",
"0.6142863",
"0.6137229",
"0.6127662",
"0.6059142",
"0.6059142",
"0.6059142",
"0.6052503",
"0.6046842",
"0.6028268",
"0.59896886",
"0.5884365",
"0.58294576",
"0.57802516",
"0.5762358",
"0.5708911",
"0.56950194",
"0.56856996",
"0.56776667",
"0.56656796",
"0.56282467",
"0.56131655",
"0.5585826",
"0.55688477"
] | 0.7095586 | 1 |
Return a bool to indicate beta version. | def beta(self) -> bool:
return "b" in self.modifier if self.modifier else "beta" in self.string | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_installed_beta_no_newer_stable(self):\n self.change_version(self.version_1_2_2, '1.2beta')\n self.change_status(self.version_1_2_2, amo.STATUS_BETA)\n\n version, file = self.get('1.2beta', self.version_int,\n self.app, self.platform)\n assert version == self.version_1_2_1",
"def test_beta_to_stable(self):\n self.change_version(self.version_1_2_0, '1.2beta')\n self.change_status(self.version_1_2_0, amo.STATUS_BETA)\n\n version, file = self.get('1.2beta', self.version_int,\n self.app, self.platform)\n assert version == self.version_1_2_2",
"def is_on(self) -> bool:\n current = self.coordinator.data.info.version\n beta = self.coordinator.data.info.version_latest_beta\n stable = self.coordinator.data.info.version_latest_stable\n\n return current is not None and (\n (stable is not None and stable > current)\n or (\n beta is not None\n and (current.alpha or current.beta or current.release_candidate)\n and beta > current\n )\n )",
"def getBeta(self):\n\t\treturn self.relativistic_beta",
"def is_release():\n return VERSION[-1]",
"def is_stable(self) -> bool:\n return not self.is_prerelease",
"def test_beta_updates_to_stable(self):\n self.change_version(self.version_1_2_0, '1.2beta')\n self.change_status(self.version_1_2_0, amo.STATUS_BETA)\n self.change_status(self.version_1_2_2, amo.STATUS_BETA)\n\n version, file = self.get('1.2beta', self.version_int,\n self.app, self.platform)\n assert version == self.version_1_2_1",
"def beta(self):\n return self._beta",
"def beta(self):\n return self._beta",
"def is_versioning_enabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"is_versioning_enabled\")",
"def test_public_not_beta(self):\n self.change_status(self.version_1_2_2, amo.STATUS_PENDING)\n self.addon.reload()\n assert self.addon.status == amo.STATUS_PUBLIC\n version, file = self.get('1.2', self.version_int,\n self.app, self.platform)\n assert version == self.version_1_2_1\n\n # Make sure we don't return a beta if there is one.\n self.change_version(self.version_1_2_1, '1.2beta')\n self.change_status(self.version_1_2_1, amo.STATUS_BETA)\n self.addon.reload()\n assert self.addon.status == amo.STATUS_PUBLIC\n version, file = self.get('1.2', self.version_int,\n self.app, self.platform)\n assert version == self.version_1_2_0",
"def is_versioning_enabled(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"is_versioning_enabled\")",
"def is_release(self):\n # version string: N.N.N.N is for release.\n return bool(re.match(r'^[\\d.]+$', self.version))",
"def is_dev_version(cls):\n\n # We initiate the command we have to run in order to\n # get the branch we are currently working with.\n command = \"git branch\"\n\n # We execute and get the command output.\n command_result = PyFunceble.helpers.Command(command).execute()\n\n for branch in command_result.split(\"\\n\"):\n # We loop through each line of the command output.\n\n if branch.startswith(\"*\") and (\"dev\" in branch or \"3.x\" in branch):\n # The current branch is `dev`.\n\n # We return True.\n return True\n\n # The current branch is not `dev`.\n\n # We return False.\n return False",
"def is_valid_version(self):\n pass",
"def is_release_branch():\n diff_string_config_yml = run_command(\"git diff origin/master .circleci/config.yml\")\n if re.search(r'[+-][ ]+CONTENT_VERSION: \".*', diff_string_config_yml):\n return True\n\n return False",
"def is_valid_version(self) -> bool:\n return self._is_valid_version()",
"def auto_minor_version_upgrade(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"auto_minor_version_upgrade\")",
"def auto_minor_version_upgrade(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"auto_minor_version_upgrade\")",
"def auto_minor_version_upgrade(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"auto_minor_version_upgrade\")",
"def auto_minor_version_upgrade(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"auto_minor_version_upgrade\")",
"def beta_channel(self, value):\n self._data[ATTR_BETA_CHANNEL] = bool(value)\n self.save()",
"def is_version_2_6() -> bool:\n v = get_version()\n if v[1] != \"singularity\" and v[1] != \"singularity-ce\":\n return False\n return v[0][0] == 2 and v[0][1] == 6",
"def minor_version_auto_upgrade(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"minor_version_auto_upgrade\")",
"def is_dev(version):\n return re.match(r'^.*\\.dev\\d+$', version)",
"def is_hide_prod_version_enabled(cluster_config):\n\n cluster = load_cluster_config_json(cluster_config)\n try:\n return cluster[\"environment\"][\"hide_product_version\"]\n except KeyError:\n return False",
"def auto_minor_version_upgrade(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"auto_minor_version_upgrade\")",
"def auto_minor_version_upgrade(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"auto_minor_version_upgrade\")",
"def _beta(self):\n return _handle_ab(self.solution, self.use_const)[1]",
"def test_release_tag_for_dev_version(self) -> None:\n self.assertEqual(\"v42.12\", release_tag())"
] | [
"0.6776148",
"0.6726279",
"0.6702003",
"0.660505",
"0.65905607",
"0.6581529",
"0.6487351",
"0.64485204",
"0.64485204",
"0.628221",
"0.6265156",
"0.62314755",
"0.6192408",
"0.61612093",
"0.61580074",
"0.61532915",
"0.6114675",
"0.608962",
"0.608962",
"0.608962",
"0.608962",
"0.6068473",
"0.6059471",
"0.60501295",
"0.60456216",
"0.6027348",
"0.6011656",
"0.6011656",
"0.5976811",
"0.58893824"
] | 0.7676527 | 0 |
Return a int representaion of the number of sections in the version. | def sections(self) -> int:
return len(self.string.split(".")) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def number_of_sections(self):\n #print (len(self.config.sections()))\n return len(self.config.sections())",
"def number_of_sections(self):\n sections = self.config.sections()\n return len(sections)",
"def testSectionCount(self):\n\n self.sectionCount(3640)",
"def section(self, idx: int) -> int:\n if self.sections >= (idx + 1):\n return int(RE_DIGIT.match(self.string.split(\".\")[idx]).group(1))\n return 0",
"def num_parts(self):\n return self._num_parts",
"def getSectionIndex(self) -> int:\n ...",
"def read_ram_sections_count(self):\n count = ctypes.c_uint32()\n \n result = self._lib.NRFJPROG_read_ram_sections_count(ctypes.byref(count))\n if result != NrfjprogdllErr.SUCCESS:\n raise APIError(result)\n\n return count.value",
"def voxel_count(self):\n return self.cols * self.rows * self.sections",
"def getSegmentCount(self) -> int:\n ...",
"def segment_n(self):\n return len(self.segment_lengths)",
"def Test_NumSegments(Daten):\n N_Leitungen = len(Daten.PipeSegments)\n\n return N_Leitungen",
"def get_num_chunks(self) -> int:",
"def get_num_pieces(self):\n return self.num_pieces",
"def version_number() -> int:\n return 0",
"def __len__(self):\n nlines = self.get_endline() - self.get_startline() + 1\n if nlines < 0:\n nlines = 0\n return nlines",
"def count(self):\n return len(self.read_ints())",
"def size(self):\n size = 1\n for current_slice in self.slices:\n size *= current_slice.stop - current_slice.start\n return size",
"def get_part_size(self): # -> int:\n ...",
"def numSegments(self):\n\n return self.getHierView().numSegments()",
"def sectionNofTotal (inputList, currentSection, numSections):\n currentSection -= 1 # internally, we want 0..N-1, not 1..N\n size = len (inputList)\n perSection = size // numSections\n extra = size % numSections\n start = perSection * currentSection\n num = perSection\n if currentSection < extra:\n # the early sections get an extra item\n start += currentSection\n num += 1\n else:\n start += extra\n stop = start + num\n return inputList[ start:stop ]",
"def get_length(self):\n\t\treturn len(self._blocks)",
"def get_num_of_pages(self):",
"def __get_size(self):\n\t\treturn 4*self.version + 17",
"def _len_version(v_list: list) -> int:\n l = len(v_list)\n return l - 1 if v_list[-1].startswith(\"dev\") or v_list[-1].startswith(\"post\") else l",
"def count(self):\n return int()",
"def count(self):\n return int()",
"def total_nt(self) -> int:\n return self.sequence.length",
"def calculate_number_of_segments(self):\n return sum(len(eg.transcript_file.segments) for eg in self.exemplars)",
"def count(self):\n return self.vcount",
"def get_node_size(self):\n range_start = self._node_map[self._partid - 1] if self._partid > 0 else 0\n range_end = self._node_map[self._partid]\n return range_end - range_start"
] | [
"0.7290255",
"0.71390265",
"0.7022638",
"0.6800033",
"0.6529658",
"0.6438036",
"0.63203466",
"0.629719",
"0.62520695",
"0.6229295",
"0.6214037",
"0.61139905",
"0.6018662",
"0.6015436",
"0.5944503",
"0.592994",
"0.5904244",
"0.58882594",
"0.58861005",
"0.58535624",
"0.5848062",
"0.5837166",
"0.5796502",
"0.5795574",
"0.579454",
"0.579454",
"0.5785518",
"0.57836175",
"0.577897",
"0.5771659"
] | 0.7857107 | 0 |
Return the modifier of the version if any. | def modifier(self) -> str:
match = RE_MODIFIER.match(self.string.split(".")[-1])
return match.group(1) if match else None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def modifier(self):\n return self._modifier",
"def modifier(self) -> str:\n return self._modifier",
"def last_modifier(self) -> str:\n return pulumi.get(self, \"last_modifier\")",
"def version_patch(self):\n assert self._version_patch != NotImplemented\n return self._version_patch",
"def modifiers(self):\n return self._modifiers",
"def modifiers(self):\n return self._modifiers",
"def modifiers(self):\n return self._modifiers",
"def getModifier(self, *args):\n return _libsbml.Reaction_getModifier(self, *args)",
"def get_last_modifier_name(self):\n\t\treturn call_sdk_function('PrlVmCfg_GetLastModifierName', self.handle)",
"def version(self):\n return \"%d.%d\" % (self._vmajor, self._vminor)",
"def get_modifier_state() -> Modifier:\n return Modifier(lib.SDL_GetModState())",
"def version_minor(self):\n assert self._version_patch != NotImplemented\n return self._version_patch",
"def get_modifier(self, keycode):\n for n, l in self.modmap.items():\n if keycode in l:\n return n\n return None",
"def getVersion(self,fileName):\n if not fileName in self.data or not self.data[fileName].tes3:\n return ''\n maVersion = reVersion.search(self.data[fileName].tes3.hedr.description)\n return (maVersion and maVersion.group(2)) or ''",
"def FormatVersion(self):\n return self._get_attr('FormatVersion')",
"def ACE_MODIFIER() -> int:\n return 10",
"def version(self):\n a = re.search('(?<=_V)\\d{1,2}', self.fname)\n if a is None:\n return None\n else:\n return int(a.group())",
"def get_modifier(self, *, term: str) -> Union[ModifierModel, None]:\n return next((m for m in self.modifiers if m.name == term), None)",
"def getVersion(self):\n return self.get('Version', type=\"numeric\")",
"def get_version(self):\n return 0",
"def get_version():\n return 1",
"def version(self) -> Union[int, str]:",
"def version(self) -> Optional[str]:\n return pulumi.get(self, \"version\")",
"def version(self) -> Optional[str]:\n return pulumi.get(self, \"version\")",
"def version(self) -> Optional[str]:\n return pulumi.get(self, \"version\")",
"def version(self) -> Optional[str]:\n return pulumi.get(self, \"version\")",
"def version(self) -> Optional[str]:\n return pulumi.get(self, \"version\")",
"def do_version(self):\n return \"1.0.0\", True",
"def get_version():\n return magpy.get_version()",
"def version(self) -> str:\n data = \"none yet\"\n if self.STARTED:\n data = (\n self.about.get(\"Version\")\n or self.about.get(\"Installed Version\")\n or \"DEMO\"\n )\n data = data.replace(\"_\", \".\")\n return data"
] | [
"0.7436179",
"0.74236995",
"0.6400592",
"0.63046837",
"0.62790924",
"0.62790924",
"0.62790924",
"0.617146",
"0.6152442",
"0.61241037",
"0.6110316",
"0.60718215",
"0.605894",
"0.6057274",
"0.605401",
"0.60382944",
"0.60236037",
"0.59833145",
"0.5965063",
"0.59624547",
"0.5955425",
"0.59447205",
"0.59314793",
"0.59314793",
"0.59314793",
"0.59314793",
"0.59314793",
"0.591418",
"0.59099066",
"0.59021336"
] | 0.78408784 | 0 |
Return the version strategy. | def strategy(self) -> AwesomeVersionStrategy:
return version_strategy(self.string) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def strategy(self) -> Optional[pulumi.Input['UpgradeSettingsStrategy']]:\n return pulumi.get(self, \"strategy\")",
"def get_version(self):\n pass",
"def get_algorithm_version(self):\n return self.get_attr('algorithm_version')",
"def get_version():\n return 1",
"def get_version(self):\n return self.cur_config['version']['name']",
"def get_version(self):\n return self.version",
"def _get_version(self):",
"def get_version(self):\n return 0",
"def version(self):\n return self.get_current_version()",
"def engine_version(self) -> Optional[str]:\n return pulumi.get(self, \"engine_version\")",
"def get_version(self):\n return self._version",
"def get_version(self):\n return self._version",
"def version(self):\n if \"version\" in self._prop_dict:\n return self._prop_dict[\"version\"]\n else:\n return None",
"def get_version(self):\n return version.__version__",
"def get_version(self):\n return version.__version__",
"def version(self) -> Optional[str]:\n return pulumi.get(self, \"version\")",
"def version(self) -> Optional[str]:\n return pulumi.get(self, \"version\")",
"def version(self) -> Optional[str]:\n return pulumi.get(self, \"version\")",
"def version(self) -> Optional[str]:\n return pulumi.get(self, \"version\")",
"def version(self) -> Optional[str]:\n return pulumi.get(self, \"version\")",
"def get(self):\n return self._version",
"def get_version(self):\n return self.api_version",
"def get_version_comparitor(self, requirement):\n if manage.is_inhouse_package(requirement.project_name):\n if self._prefer_final:\n log.debug(' in-house package, prefer-final')\n return easy_install._final_version\n else:\n log.debug(' in-house package, prefer-dev')\n return self.is_dev_version\n else:\n log.debug(' third-party package, always prefer-final')\n return easy_install._final_version",
"def get_version(self, params):\n return self.version",
"def engine_version(self) -> str:\n return pulumi.get(self, \"engine_version\")",
"def get_solver_version(self):\n return self.agent.version_info.get(\"SolverVersion\")",
"def getversion(self):\n return self.__version",
"def model_version(self) -> str:\n return pulumi.get(self, \"model_version\")",
"def get_version(self) -> str:\n return versioning.get_version()",
"def get_current_version(self) -> str:\n raise NotImplementedError()"
] | [
"0.7340707",
"0.7004464",
"0.6950592",
"0.68829256",
"0.6777264",
"0.6774704",
"0.6755827",
"0.6669227",
"0.65694803",
"0.6558051",
"0.6553694",
"0.6553694",
"0.65459174",
"0.65300995",
"0.65300995",
"0.6527395",
"0.6527395",
"0.6527395",
"0.6527395",
"0.6527395",
"0.6495148",
"0.6471164",
"0.64473337",
"0.64461005",
"0.64361924",
"0.64335674",
"0.6387753",
"0.63807464",
"0.637868",
"0.63710034"
] | 0.8231897 | 0 |
Return True if the version string is simple. | def simple(self) -> bool:
return is_simple(self.string) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _is_python_version(s: str) -> bool:\n\n return s.startswith(\"2\") or s.startswith(\"3\")",
"def is_simple(self): # -> bool:\n ...",
"def version_is_full_release(version_string):\n match = VERSION_REGEX.match(version_string)\n\n if match and match.groupdict()[\"modifier\"] == \"\":\n return True\n else:\n return False",
"def is_simple() -> bool:\n raise NotImplementedError()",
"def is_stable_version(version):\n if not isinstance(version, tuple):\n version = version.split('.')\n last_part = version[-1]\n\n if not re.search('[a-zA-Z]', last_part):\n return True\n else:\n return False",
"def semantic_version(value):\n try:\n semantic_version_module.Version(value)\n return True\n except ValueError:\n return False",
"def is_valid_version(self):\n pass",
"def check_version_str(version):\n if not version.startswith('v') and version != 'current':\n version = 'v%s' % version\n return version",
"def is_release(self):\n # version string: N.N.N.N is for release.\n return bool(re.match(r'^[\\d.]+$', self.version))",
"def is_string(self):\n answer = self._call('is_string')\n return answer.yes",
"def test_versionString(self):\n self.assertIn(\"%d.%d.%d\" % nevow.__version_info__, nevow.__version__)",
"def stringable(self):\n return True",
"def is_valid_version(self) -> bool:\n return self._is_valid_version()",
"def get_version():\r\n try:\r\n with open('version', 'r') as version_file:\r\n return str(version_file.readline())\r\n except:\r\n return False",
"def is_simple(self) -> bool:\n return self.data in ('int', 'bool', 'float', 'str')",
"def is_stringified(self) -> bool:\n return self._stringify",
"def is_simple(self):\n return _property_op(arctern.ST_IsSimple, self).astype(bool, copy=False)",
"def test_literal_comparison(self):\n\n given = \"1.0.0.dev (Hello, World!)\"\n expected = True\n actual = Version.literally_compare(given, given)\n\n self.assertEqual(expected, actual)",
"def is_simple_in_opt(self) -> bool:\n return self.inner_part_of_optional.is_simple",
"def test_version(self):\n v = version('/no/such/executable')\n self.assertEqual(v, '0.0.1.dev0')\n v = version('false')\n self.assertEqual(v, '0.0.1.dev0')\n v = version('echo')\n self.assertEqual(v, 'describe .devrev-list --count HEAD')",
"def is_version_2_6() -> bool:\n v = get_version()\n if v[1] != \"singularity\" and v[1] != \"singularity-ce\":\n return False\n return v[0][0] == 2 and v[0][1] == 6",
"def _pattern_is_simple(pattern):\n return bool(re.match('[\\\\w_]+$', tostring(pattern)))",
"def model_is_valid(self, model: OscalBaseModel) -> bool:\n oscal_version = model.metadata.oscal_version.__root__\n p = re.compile(OSCAL_VERSION_REGEX)\n matched = p.match(oscal_version)\n return matched is not None",
"def IsStandalone(self):\n return True",
"def test_true(self):\n result = self.flag.parseString('Y')\n self.assertEqual('Y', result[0])",
"def to_boolean(self,string):\n if self.debug:\n print('to_boolean'+lineno())\n # FIXME\n sys.exit(1)\n #string.to_s.casecmp('true').zero?",
"def test_get_short_version(self):\n pass",
"def testStratisVersion(self):\n version = Manager.Properties.Version.Get(get_object(TOP_OBJECT))\n (major, _, _) = version.split(\".\")\n self.assertEqual(major, \"0\")",
"def __bool__(self):\n return _libsbml.string___bool__(self)",
"def test_version_type(self):\n self.assertIsInstance(get_version(), str)"
] | [
"0.6629746",
"0.65817094",
"0.6550173",
"0.6395718",
"0.6303441",
"0.6069128",
"0.59459835",
"0.59038144",
"0.58978236",
"0.5895466",
"0.58661574",
"0.5808034",
"0.58048254",
"0.58044857",
"0.57614726",
"0.57409894",
"0.5707732",
"0.56662035",
"0.56323236",
"0.55764836",
"0.55670017",
"0.55624753",
"0.5562406",
"0.55569917",
"0.55366224",
"0.5529682",
"0.55111325",
"0.55086255",
"0.54966867",
"0.54933345"
] | 0.7469431 | 0 |
Request the list of groups for the account. Print out how many groups there are, then use the first group where the test property lives. | def getGroup():
print
print "Requesting the list of groups for this account"
groups_result = getResult('/papi/v0/groups')
return (groups_result) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_groups(self):\n response = self.client.get_groups()\n uri, args = response[\"uri\"].split(\"?\")\n\n self.assertEqual(response[\"method\"], \"GET\")\n self.assertEqual(uri, \"/admin/v1/groups\")\n self.assertEqual(util.params_to_dict(args), {\"account_id\": [self.client.account_id]})",
"def groups(self):\n #return self.get('{}/groups'.format(ApiVersion.A1.value))\n return self.get('{}/groups'.format(ApiVersion.CM1.value))",
"def test_get_groups(self):\n pass",
"def test_get_groups(self):\n pass",
"def test_api_v1_groups_get(self):\n pass",
"def list_groups(self, **params):\n url = 'groups'\n if params:\n url += '?%s' % urllib.urlencode(params)\n resp, body = self.get(url)\n self.expected_success(200, resp.status)\n body = json.loads(body)\n return rest_client.ResponseBody(resp, body)",
"def get_list_groups(self):\n list_response = requests.get(self.groups_url, headers=self.headers)\n return list_response.json()[\"groups\"]",
"def test_request_groups(self):\n response = requests.get(self.url + '/groups')\n\n self.assertEqual(response.status_code, 200)\n\n json = response.json()\n self.assertIsInstance(json, dict)\n self.assertEqual(len(json.keys()), 1)\n self.assertIn('groups', json.keys())\n\n groups = json.get('groups')\n self.assertIsInstance(groups, list)\n self.assertEqual(len(groups), 3)\n self.assertIn('Human', groups)\n self.assertIn('Male', groups)\n self.assertIn('Female', groups)",
"def test_getGroups(self):\n\t\turl = \"/groups/\"\n\t\tresponse = self.client.get(url, format='json')\n\t\tself.assertEqual(response.status_code, status.HTTP_200_OK)\n\t\tself.assertEqual(response.data[\"count\"], 1)",
"def get_groups():\r\n if 'username' not in flask.session:\r\n return flask.jsonify(**{'message': 'Forbidden', 'status_code': 403})\r\n\r\n context = {}\r\n context['url'] = flask.request.path\r\n context['groups'] = []\r\n\r\n # Retreive query variables\r\n query_num_groups = flask.request.args.get('size') \r\n query_page = flask.request.args.get('page') \r\n num_groups = int(query_num_groups) if query_num_groups != None else 10\r\n page_number = int(query_page) if query_page != None else 0\r\n\r\n groups = get_group_listing(flask.session['username'], \r\n num_groups, page_number)\r\n for g in groups:\r\n context['groups'].append({\r\n 'id': g[0],\r\n 'name': g[1]\r\n })\r\n\r\n if (num_groups == 10):\r\n context['next'] = '{}?page={}'.format(context['url'], page_number + 1)\r\n else:\r\n context['next'] = '{}?page={}&size={}'.format(context['url'], \r\n page_number + 1, num_groups)\r\n\r\n return flask.jsonify(**context)",
"def test_020_query_groups(self):\n\n testflow.step(\"Querying for groups\")\n assert self.query_cli.run(\n what='group'\n )[0], \"Failed to search for groups\"",
"def test_groups_get(self):\n pass",
"def test_groups_get(self):\n pass",
"def test_get_groups(self):\n group0 = self.test_save('TestGroup0')\n group1 = self.test_save('TestGroup1')\n \n group0.grant('Perm1', object0)\n group0.grant('Perm3', object1)\n group1.grant('Perm2', object1)\n \n self.assert_(group0 in get_groups(object0))\n self.assertFalse(group1 in get_groups(object0))\n self.assert_(group0 in get_groups(object1))\n self.assert_(group1 in get_groups(object1))\n self.assert_(len(get_groups(object1))==2)",
"def list_group():\n data, code, message = FIELD_SERVICE.list_group()\n return __result(data, code, message)",
"def list_groups(args):\n\n for group in get_groups(args):\n print(group)",
"def get_groups(self):\n return Client._get(self)",
"def get_RegisteredGroupsList(test_case, only_discoverable=False, override_headers=null, override_cookies=null):\n # type: (AnyMagpieTestCaseType, bool, Optional[HeadersType], Optional[CookiesType]) -> List[Str]\n app_or_url = get_app_or_url(test_case)\n path = \"/register/groups\" if only_discoverable else \"/groups\"\n resp = test_request(app_or_url, \"GET\", path,\n headers=override_headers if override_headers is not null else test_case.json_headers,\n cookies=override_cookies if override_cookies is not null else test_case.cookies)\n json_body = check_response_basic_info(resp, 200, expected_method=\"GET\")\n return json_body[\"group_names\"]",
"def list_groups(request):\n groups = models.UserGroup.all().order('name')\n return utility.respond(request, 'admin/list_groups', {'groups': groups})",
"def get_groups(self):\n response = self._get(\"groups\")\n\n return response.json()",
"def get_GroupInfo(test_case, # type: AnyMagpieTestCaseType\n override_body=None, # type: JSON\n override_group_name=null, # type: Optional[Str]\n override_version=null, # type: Optional[Str]\n override_headers=null, # type: Optional[HeadersType]\n override_cookies=null, # type: Optional[CookiesType]\n ): # type: (...) -> JSON\n version = override_version if override_version is not null else TestSetup.get_Version(test_case)\n grp_name = override_group_name if override_group_name is not null else test_case.test_group_name\n if TestVersion(version) < TestVersion(\"0.6.4\"): # route did not exist before that\n if override_body and \"group\" in override_body:\n return override_body[\"group\"]\n if override_body and \"group_name\" in override_body:\n return override_body\n return {\"group_name\": grp_name or {}}\n if override_body:\n if override_body and \"group\" in override_body:\n return override_body[\"group\"]\n if override_body and \"group_name\" in override_body:\n return override_body\n resp = test_request(test_case, \"GET\", \"/groups/{}\".format(grp_name),\n headers=override_headers if override_headers is not null else test_case.json_headers,\n cookies=override_cookies if override_cookies is not null else test_case.cookies)\n body = check_response_basic_info(resp)\n check_val_is_in(\"group\", body)\n return body[\"group\"] or {}",
"def get_groups():\n\n # FUTURE: Properly reutrn error, Mongo is giving it's own\n if current_user.groups:\n return Response(response=json.dumps([g.to_dict() for g in current_user.groups]), status=200, mimetype=\"application/json\")\n else:\n return return_json_error('No groups assigned to', 500)",
"def list_groups(self, order_by: str = None, next_link: str = None, top: int = None, filter_: str = None):\n if next_link: # pagination\n return self.ms_client.http_request(method='GET', full_url=next_link)\n # default value = 100\n params = {'$top': top}\n if order_by:\n params['$orderby'] = order_by # type: ignore\n if filter_:\n params['$filter'] = filter_ # type: ignore\n return self.ms_client.http_request(\n method='GET',\n url_suffix='groups',\n params=params)",
"def get_groups(args):\n\n args.suppress_verify_output = True\n if verify(args) != 0:\n # restore stdout\n sys.stdout = sys.__stdout__\n print(\"Config file not valid, please use the verify function to debug\")\n return []\n\n with open(args.file, \"r\") as f:\n config_json = json.load(f)\n\n groups = []\n for group in config_json[\"groups\"]:\n groups.append(group[\"name\"])\n return groups",
"def list_groups():\n return _list_tindyb_unique_values(\"group\", dbpath=__dbpath__)",
"def test_api_v1_groups_names_get(self):\n pass",
"def groups():\n access_token = session['access_token']\n return \"%s\" % list_groups(access_token)",
"def groups(self):\n return self.get_data(\"groups\")",
"def list_groups(access_token):\n request_url = OKTA_URL + \"api/v1/groups\"\n headers = {\"Authorization\": \"Bearer \" + access_token}\n group_request = requests.get(request_url, headers=headers).json()\n return group_request",
"def test_users_groups_get(self):\n pass"
] | [
"0.76318985",
"0.72561604",
"0.7251496",
"0.7251496",
"0.7171868",
"0.69862527",
"0.69726115",
"0.69587696",
"0.69490767",
"0.6907517",
"0.68522304",
"0.6844985",
"0.6844985",
"0.67527145",
"0.6748885",
"0.6688267",
"0.66860783",
"0.66860723",
"0.66638684",
"0.66486186",
"0.6637195",
"0.66163844",
"0.66129535",
"0.6591105",
"0.65879375",
"0.65810937",
"0.65740806",
"0.65573895",
"0.65056396",
"0.6501933"
] | 0.7699433 | 0 |
Get the properties for the associated group/contract combination | def getProperties(groupId, contractId):
print "Getting properties for group %s and contract %s" % (groupId, contractId)
property_parameters = { "contractId":contractId, "groupId":groupId }
property_result = getResult('/papi/v0/properties', property_parameters)
if "properties" in property_result:
property_items = property_result['properties']['items']
else:
property_items = []
return (property_items) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_properties():",
"def getPropertiesAll():",
"def get_params(self):\n return {'physical_properties_actor': {'group': self.group}}",
"def getProperties():",
"def test_properties_stats_group_by_group_by_and_sub_group_by_get(self):\n pass",
"def get_properties(self):\n return COMMON_PROPERTIES",
"def test_properties_stats_group_by_group_by_get(self):\n pass",
"def get_properties(self):\n return self.properties",
"def getMemberProperties(self, member, exclude_props=[], include_props=None):\n if not self.is_compatible: return {}\n props = {}\n user = member.getUser()\n for sheet in user.getOrderedPropertySheets():\n for item in sheet.propertyItems():\n field = item[0]\n value = item[1]\n if type(value) is UnicodeType:\n value = value.encode('UTF8')\n if not props.has_key(field): props[field] = value\n #id property isn't stored in property sheet, we can get it from member or user object\n props['id'] = member.getProperty('id')\n return props",
"def get_properties(self):\n\n properties = {}\n for iface_name in self.all_interfaces:\n iface = getattr(self, iface_name, None)\n if iface:\n properties.update(iface.get_properties())\n return properties",
"def get_properties(self):\n return self.properties",
"def get_properties(self):\n return irmc_common.COMMON_PROPERTIES",
"def getSingleProperty(propertyId, groupId, contractId ):\n\tproperty_parameters = { \"contractId\":contractId, \"groupId\":groupId }\n\tproperty_result = getResult('/papi/v0/properties/%s/' % propertyId, \n\t\t\t\t\t\t\t\tproperty_parameters)\n\treturn (property_result)",
"def get_prop_comfort(self, name_building):\n return self._prop_comfort.loc[name_building].to_dict()",
"def propertyGroup(self, p_int): # real signature unknown; restored from __doc__\n return \"\"",
"def properties_get(self):\n return self._get('properties')",
"def getProperties(self, owner: unicode) -> List[ghidra.program.model.util.PropertyMap]:\n ...",
"def properties(self):\n return self._props",
"def _properties(self) -> dict[str, dict[str, str]]:\n schema = self.schema(by_alias=False)\n if schema.get('properties') is not None:\n return schema.get('properties', {})\n return schema.get('definitions', {}).get(self.__class__.__name__, {}).get('properties', {})",
"def get_details(self):\n owner = self.fake.random_element(elements=self.owners)\n return {\n 'jurisdiction_property_id': self.fake.numerify(text='#####'),\n 'pm_parent_property_id': self.fake.numerify(text='#####'),\n 'lot_number': self.fake.numerify(text='#####'),\n 'address_line_1': self.address_line_1(),\n 'city': 'Boring',\n 'state': 'Oregon',\n 'postal_code': \"970{}\".format(self.fake.numerify(text='##')),\n 'year_built': self.fake.random_int(min=1880, max=2015),\n 'site_eui': self.fake.random_int(min=50, max=600),\n 'gross_floor_area': self.fake.random_number(digits=6),\n 'owner': owner.name,\n 'owner_email': owner.email,\n 'owner_telephone': owner.telephone,\n 'owner_address': owner.address,\n 'owner_city_state': owner.city_state,\n 'owner_postal_code': owner.postal_code,\n }",
"def _retrieve(self):\n all_groups_settings = []\n iam_groups_settings = []\n\n model_manager = self.service_config.model_manager\n scoped_session, data_access = model_manager.get(self.model_name)\n with scoped_session as session:\n for settings in data_access.scanner_fetch_groups_settings(session,\n True):\n email = settings[0].split('group/')[1]\n iam_groups_settings.append(groups_settings.GroupsSettings\n .from_json(email, settings[1]))\n for settings in data_access.scanner_fetch_groups_settings(session,\n False):\n email = settings[0].split('group/')[1]\n all_groups_settings.append(groups_settings.GroupsSettings\n .from_json(email, settings[1]))\n\n return all_groups_settings, iam_groups_settings",
"def getProperties(self):\n return self.properties",
"def get_properties():\n properties = dict()\n properties['size'] = list()\n properties['color'] = list()\n properties['quality'] = list()\n u = models.Size.query.all()\n for i in u:\n properties['size'].append(i.size_name)\n u = models.Color.query.all()\n for i in u:\n properties['color'].append(i.color_name)\n u = models.Quality.query.all()\n for i in u:\n properties['quality'].append(i.quality_name)\n return make_response(jsonify(properties))",
"def get_details(self):\n owner = self.fake.random_element(elements=self.owners)\n return {\n 'jurisdiction_property_id': self.fake.numerify(text='#####'),\n 'pm_parent_property_id': self.fake.numerify(text='#####'),\n 'lot_number': self.fake.numerify(text='#####'),\n 'address_line_1': self.address_line_1(),\n 'city': 'Boring',\n 'state': 'Oregon',\n 'postal_code': \"970{}\".format(self.fake.numerify(text='##')),\n 'year_built': self.fake.random_int(min=1880, max=2015),\n 'site_eui': self.fake.random_int(min=50, max=600),\n 'owner': owner.name,\n 'owner_email': owner.email,\n 'owner_telephone': owner.telephone,\n 'owner_address': owner.address,\n 'owner_city_state': owner.city_state,\n 'owner_postal_code': owner.postal_code,\n }",
"def get_component_properties(self):\n\n self.get_max_mid_diameter()\n self.get_logPs()\n self.get_logSs()\n self.get_SAs()\n self.get_purchasability_class()\n self.get_bertzCTs()",
"def base_properties(self):\n return self.properties.GetAll(self.mpris_base)",
"def properties(self):\n return self.properties_with_uid[1:]",
"def properties(self) -> Optional['outputs.AzureDevOpsOrgPropertiesResponse']:\n return pulumi.get(self, \"properties\")",
"def get_provider_properties_dict(self):\n pass",
"def properties(self, filters={}):\n return self.__get_list_client(Property)(filters=filters)"
] | [
"0.595744",
"0.58036405",
"0.5743681",
"0.5725618",
"0.56826746",
"0.5637038",
"0.5632564",
"0.54945195",
"0.5491524",
"0.54664433",
"0.5392897",
"0.5355023",
"0.5322294",
"0.52961135",
"0.5288239",
"0.5287506",
"0.52869225",
"0.52772146",
"0.5272553",
"0.52705836",
"0.52164966",
"0.5198015",
"0.519095",
"0.5186552",
"0.5184338",
"0.51737726",
"0.51661897",
"0.51547515",
"0.51441306",
"0.51394886"
] | 0.69252104 | 0 |
Create a custom range instance | def create_range(range_class):
if not hasattr(range_class, 'name'):
raise exceptions.ValidationError(
"A custom range must have a name attribute")
return Range.objects.create(
name=range_class.name,
proxy_class=_class_path(range_class)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create(self, range):\n raise NotImplementedError",
"def range_maker(low, hi, step, lst=None):\n return numpy.arange(low, hi, step)",
"def new_range(r):\n if isinstance(r, list) or isinstance(r, tuple) and len(r) == 2:\n lower = r[0]\n upper = r[1]\n else:\n lower = r\n upper = r\n lower = int(lower)\n upper = int(upper)\n return range(lower, upper + 1)",
"def range100(self):\r\n return self.init(100)",
"def define_range():\n\n def_range = {'lt': [0.0, 24.0],\n 'lon': [0.0, 360.0],\n 'angle': [0.0, 2.0 * np.pi]}\n\n return def_range",
"def range(start: int, stop: int = None, step: int = None) -> ObservableBase:\n from ..operators.observable.range import from_range\n return from_range(start, stop, step)",
"def __init__(self, *args: Union[Rangelike, Iterable[Rangelike]]):\n # flatten args\n temp_list = []\n for arg in args:\n if _is_iterable_non_string(arg):\n temp_list.extend(Range(x) for x in arg)\n else:\n temp_list.append(Range(arg))\n # assign own Ranges\n self._ranges = RangeSet._merge_ranges(temp_list)",
"def build_range(identity: str, type_uri: str = SBOL_RANGE):\n start = 1\n end = 1\n obj = Range(PYSBOL3_MISSING, start, end, identity=identity, type_uri=type_uri)\n # Remove the placeholder values\n obj._properties[SBOL_SEQUENCES] = []\n obj._properties[SBOL_START] = []\n obj._properties[SBOL_END] = []\n return obj",
"def Range(self, from: int, to: int) -> BaseVector:",
"def __init__(self, start, end, max):",
"def test_get_range(self):\n pass",
"def open_range(start, stop, step):\n return np.arange(start, stop+step/2, step)",
"def __init__(self, start, stop):\n if start > stop:\n raise IndexError(f'range is invalid: start={start} > stop={stop}')\n self._start = start - 1\n self._stop = stop - 1",
"def __init__(self, rng, low, high):\n\t\tself.rng = rng\n\t\tself.low = low\n\t\tself.high = high",
"def __init__(self, low, high, step_name, variable_name):\n super().__init__(step_name, variable_name, list(range(low, high + 1)))\n self.low = min(low, high)\n self.high = max(low, high)",
"def __init__(self, start, end, value):\n self.start = start\n self.end = end\n self.value = value",
"def add_range(self, start=None, stop=None, step=None, name=None):\n input_names = []\n start = self._maybe_add_const(start, \"range_start\")\n stop = self._maybe_add_const(stop, \"range_stop\")\n step = self._maybe_add_const(step, \"range_step\")\n if start is not None:\n input_names.append(start)\n input_names = input_names + [stop, step]\n\n return self._build_op('Range', input_names, name=name)",
"def create(self, range_value):\n return product(range(2), repeat=range_value[0])",
"def set_visualization_range(self, start: int, end: int):\n self.__range = (start, end)",
"def __init__(self, ranges):\n if not ranges:\n raise Exception(\"You must supply at least one non-null sampling range\")\n if hasattr(ranges[0], \"__len__\"):\n assert all(len(x) == 2 for x in ranges)\n self.ranges = ranges\n else:\n assert len(ranges) > 1\n lows = [x for x in ranges[:-1]]\n highs = [x for x in ranges[1:]]\n myranges = []\n for i, pair in enumerate(zip(lows, highs)):\n if i % 2 == 0:\n myranges.append(pair)\n assert len(myranges) == len(ranges) // 2\n self.ranges = myranges",
"def range(self):\n\n return time_stat(self, stat=\"range\")",
"def range(self):\n return self.range_array",
"def f_get_range(self, copy=True):\n raise NotImplementedError(\"Should have implemented this.\")",
"def range1000(self):\r\n return self.init(1000)",
"def __init__(self, start, end):\n self.start = start\n self.end = end",
"def range (self):\n return self._range",
"def range (self):\n return self._range",
"def __init__(self, ranges=None, *args, **kwargs):\n self.ranges = ranges\n super(DiscreteGeneticAlgorithm, self).__init__(*args, **kwargs)",
"def __init__(self, domain, range):\n self.domain = domain.cloneSpace()\n self.range = range.cloneSpace()",
"def __init__ (self, start, end):\n\n self.start = start\n self.end = end"
] | [
"0.7898044",
"0.7348169",
"0.707552",
"0.7064517",
"0.7046968",
"0.6874818",
"0.6799003",
"0.67599416",
"0.67424244",
"0.6694247",
"0.66876024",
"0.666282",
"0.66363037",
"0.6542488",
"0.65385115",
"0.64849585",
"0.64830506",
"0.64606047",
"0.6450502",
"0.6440675",
"0.6436779",
"0.64169616",
"0.6400877",
"0.6393992",
"0.63930565",
"0.6361558",
"0.6361558",
"0.6356207",
"0.6326798",
"0.6318353"
] | 0.7600169 | 1 |
Create a custom condition instance | def create_condition(condition_class):
return Condition.objects.create(
proxy_class=_class_path(condition_class)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create( name ):\n if name == 'splunk':\n return SplunkCondition()\n\n elif name == 'base':\n return _BaseCondition()\n\n else:\n raise EncoreException('Unrecognised condition: {0}'.format( name ))",
"def __init__(self, condition: typing.Callable[..., bool]):\n super().__init__()\n self.condition = condition",
"def condition(self) -> global___Expression:",
"def condition(self) -> global___Expression:",
"def __init__(self, *args, **kwargs):\n\t\tsuper().__init__(*args, **kwargs)\n\t\tself.condition = None",
"def create_condition(lock: Optional[Lock] = None) -> Condition:\n return Condition(lock=lock)",
"def _dyncond(self, attr_name: str) -> Condition:\n\n @Condition\n def dynamic() -> bool:\n value = cast(FilterOrBool, getattr(self, attr_name))\n return to_filter(value)()\n\n return dynamic",
"def add_custom_condition(self,ns,cond,content=None):\n c=self.xmlnode.newTextChild(None,to_utf8(cond),content)\n ns=c.newNs(to_utf8(ns),None)\n c.setNs(ns)\n return c",
"def Condition(condition_type,\n status=\"Unknown\",\n severity=\"\",\n msg=\"\",\n reason=\"\",\n last_transition_time=\"\"):\n condition = run_v1_messages.GoogleCloudRunV1Condition()\n condition.type = condition_type\n condition.status = status\n condition.severity = severity\n condition.message = msg\n condition.reason = reason\n condition.lastTransitionTime = last_transition_time\n return condition",
"def AddCondition(self, name, expression):\n self.conditions[name] = expression",
"def condition(self) -> ExpressionNode:\n return self.__condition",
"def create_condition(self, url_data, service_id, service_version):\n request_dict = {k: v[0] for k, v in url_data}\n\n create_condition = {\n u\"type\": \"REQUEST\",\n u\"comment\": \"\",\n u\"name\": \"condition\",\n u\"version\": service_version,\n u\"service_id\": service_id,\n u\"statement\": request_dict['statement'],\n u\"priority\": request_dict['priority']\n }\n\n if 'condition_list' not in self.fastly_cache[service_id]:\n self.fastly_cache[service_id]['condition_list'] = []\n\n self.fastly_cache[service_id][\n 'condition_list'].append(create_condition)\n return create_condition",
"def where(self, condition):\n raise NotImplementedError(\"This should have been implemented.\")",
"def add_condition(self, name, condition, statements, comment):\n new = Block(name)\n self.data[name] = new\n\n new.condition = condition\n new.statements = statements\n new.comment = comment\n return new",
"def MembershipCondition(self) -> IMembershipCondition:",
"def conditional(self) -> global___Statement.Conditional:",
"def meta(condition):\n\n class M(metaclass=_MetaMC):\n @classmethod\n def chk(cls, sub):\n return condition(sub)\n\n return M",
"def __init__(__self__, *,\n condition: Optional[str] = None,\n error_message: Optional[str] = None,\n time: Optional[str] = None):\n if condition is not None:\n pulumi.set(__self__, \"condition\", condition)\n if error_message is not None:\n pulumi.set(__self__, \"error_message\", error_message)\n if time is not None:\n pulumi.set(__self__, \"time\", time)",
"def __init__(__self__, *,\n condition: Optional[str] = None,\n error_message: Optional[str] = None,\n time: Optional[str] = None):\n if condition is not None:\n pulumi.set(__self__, \"condition\", condition)\n if error_message is not None:\n pulumi.set(__self__, \"error_message\", error_message)\n if time is not None:\n pulumi.set(__self__, \"time\", time)",
"def __init__(__self__, *,\n condition: Optional[str] = None,\n error_message: Optional[str] = None,\n time: Optional[str] = None):\n if condition is not None:\n pulumi.set(__self__, \"condition\", condition)\n if error_message is not None:\n pulumi.set(__self__, \"error_message\", error_message)\n if time is not None:\n pulumi.set(__self__, \"time\", time)",
"def __init__(__self__, *,\n condition: Optional[str] = None,\n error_message: Optional[str] = None,\n time: Optional[str] = None):\n if condition is not None:\n pulumi.set(__self__, \"condition\", condition)\n if error_message is not None:\n pulumi.set(__self__, \"error_message\", error_message)\n if time is not None:\n pulumi.set(__self__, \"time\", time)",
"def __init__(__self__, *,\n condition: Optional[str] = None,\n error_message: Optional[str] = None,\n time: Optional[str] = None):\n if condition is not None:\n pulumi.set(__self__, \"condition\", condition)\n if error_message is not None:\n pulumi.set(__self__, \"error_message\", error_message)\n if time is not None:\n pulumi.set(__self__, \"time\", time)",
"def __init__(__self__, *,\n condition: Optional[str] = None,\n error_message: Optional[str] = None,\n time: Optional[str] = None):\n if condition is not None:\n pulumi.set(__self__, \"condition\", condition)\n if error_message is not None:\n pulumi.set(__self__, \"error_message\", error_message)\n if time is not None:\n pulumi.set(__self__, \"time\", time)",
"def _build_conditions():\n empty = Condition()\n lt = Document.id < 10\n gt = Document.id > 12\n\n path = Document.data[\"Rating\"] == 3.4\n\n # Order doesn't matter for multi conditions\n basic_and = lt & gt\n swapped_and = gt & lt\n multiple_and = lt & lt & gt\n\n basic_or = lt | gt\n swapped_or = gt | lt\n multiple_or = lt | lt | gt\n\n not_lt = ~lt\n not_gt = ~gt\n\n not_exists_data = Document.data.is_(None)\n not_exists_id = Document.id.is_(None)\n exists_id = Document.id.is_not(None)\n\n begins_hello = Document.some_string.begins_with(\"hello\")\n begins_world = Document.some_string.begins_with(\"world\")\n\n contains_hello = Document.some_string.contains(\"hello\")\n contains_world = Document.some_string.contains(\"world\")\n contains_numbers = Document.numbers.contains(9)\n\n between_small = Document.id.between(5, 6)\n between_big = Document.id.between(100, 200)\n between_strings = Document.some_string.between(\"alpha\", \"zebra\")\n\n in_small = Document.id.in_([3, 7, 11])\n in_big = Document.id.in_([123, 456])\n in_numbers = Document.numbers.in_([120, 450])\n\n conditions.update((\n empty,\n lt, gt, path,\n basic_and, swapped_and, multiple_and,\n basic_or, swapped_or, multiple_or,\n not_lt, not_gt,\n not_exists_data, not_exists_id, exists_id,\n begins_hello, begins_world, between_strings,\n contains_hello, contains_world, contains_numbers,\n between_small, between_big, between_strings,\n in_small, in_big, in_numbers\n ))",
"def testConditionBuilder(self):\n\t\ttests = {\n\t\t\tr'elem==1': [['elem'], op.EQ, 1],\n\t\t\tr'(elem==2)': [[['elem'], op.EQ, 2]],\n\t\t\tr'elem > 1 and elem2 < 5.54':\n\t\t\t\t[['elem'], op.GT, 1, op.AND, ['elem2'], op.LT, 5.54],\n\t\t\tr'not elem <= \"aaa\" and not elem2 ~ \"zzz\"':\n\t\t\t\t[op.NOT, ['elem'], op.LTE, \"aaa\", op.AND, op.NOT,\n\t\t\t\t['elem2'], op.REGEXP, \"zzz\"],\n\t\t\tr'not (elem.abc.def == 0xabcd) and (elem2 < 3)':\n\t\t\t\t[op.NOT, [['elem', 'abc', 'def'], op.EQ, b\"\\xab\\xcd\"],\n\t\t\t\top.AND, [['elem2'], op.LT, 3]],\n\t\t\tr'not (elem.2 != 4) or (not key.2.3 != 1) and not key.1 !~ \"abc\"':\n\t\t\t\t[op.NOT, [op.NOT, ['elem', 2], op.EQ, 4], op.OR,\n\t\t\t\t[['key', 2, 3], op.EQ, 1], op.AND,\n\t\t\t\t['key', 1], op.REGEXP, 'abc'],\n\t\t\tr'and == 2': [['and'], op.EQ, 2],\n\t\t\tr'': None\n\t\t}\n\n\t\tfor test in tests:\n\t\t\tself.assertEqual(tests[test], parseSearchCondition(test))",
"def convert_to_condition(self):\n if self._equivalent_condition is None:\n conditions = list()\n for sub_effect in self.get_sub_effects():\n conditions.append(sub_effect.convert_to_condition())\n if len(conditions) == 0:\n self._equivalent_condition = VoidCondition()\n elif len(conditions) == 1:\n self._equivalent_condition = conditions[0]\n elif len(self.get_output_variables()) == 1:\n self._equivalent_condition = ComplexCondition(conditions, BinaryOperator.OR)\n else:\n self._equivalent_condition = ComplexCondition(conditions, BinaryOperator.AND)\n return self._equivalent_condition",
"def condition(self, condition):\n\n self._condition = condition",
"def instantiate(cls):\n default_xml = '<condition class=\"{0}\" plugin=\"[email protected]\"/>'\n default_xml = default_xml.format(cls.get_jenkins_plugin_name())\n root_node = ElementTree.fromstring(default_xml)\n\n return cls(root_node)",
"def add_condition(self):\n m = self.get_current_measurement()\n result = PriorityDialog()\n if result.exec_():\n # Update Survey.priority based on the input\n key, val1, val2, weight = result.key, result.val1, result.val2, result.weight\n \n # If the condition is x == val1, determine whether val1 is str or int\n if result.type == 'value':\n val1 = get_type(val1)(val1)\n\n # Add the condition to Survey.priority\n arr = np.array([[val1, val2, weight]])\n if key not in m.priority:\n m.priority[key] = np.zeros(shape=(0, 3))\n m.priority[key] = np.append(m.priority[key], arr, axis=0)\n \n self.mgr.changed = True\n \n self.load_conditions()",
"def by(cls, gen_clause=\"\", params=[]):\r\n return _Conditional(lambda m: Q.of(gen_clause(m), params))"
] | [
"0.74090016",
"0.7075004",
"0.6859647",
"0.6859647",
"0.68583304",
"0.6705134",
"0.6695043",
"0.6613352",
"0.631761",
"0.6232662",
"0.61909944",
"0.6052121",
"0.6037356",
"0.6036718",
"0.6023041",
"0.59498423",
"0.5853378",
"0.5791596",
"0.5791596",
"0.5791596",
"0.5791596",
"0.5791596",
"0.5791596",
"0.5789949",
"0.5747084",
"0.5658552",
"0.5630916",
"0.56238884",
"0.56092405",
"0.55939776"
] | 0.7530871 | 0 |
Returns a date and time formatted as per HTTP RFC 2616. | def http_date_time(when):
gmtime = time.gmtime(when)
return '%s, %02d %3s %4d %02d:%02d:%02d GMT' % (
WEEKDAY_ABR[gmtime.tm_wday], gmtime.tm_mday,
MONTH_ABR[gmtime.tm_mon - 1], gmtime.tm_year, gmtime.tm_hour,
gmtime.tm_min, gmtime.tm_sec) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def formatRFC2616Date(secs=None):\n\tif secs is None:\n\t\tsecs = time.time()\n\treturn emailutils.formatdate(secs, localtime=False, usegmt=True)",
"def rfc3999(date):\n if not date: return ''\n date = date + datetime.timedelta(seconds = -time.timezone)\n if time.daylight:\n date += datetime.timedelta(seconds = time.altzone)\n return date.strftime('%m-%d-%YT%H:%M:%SZ')",
"def http_date(d=None):\n if not d:\n d = datetime.datetime.utcnow()\n return d.strftime(\"%a, %d %b %Y %H:%M:%S GMT\")",
"def _time_str(self):\n try:\n if not self._time:\n raise ValueError\n format_ = '%a, %d %b %Y %H:%M:%S'\n return datetime.fromtimestamp(float(self._time)).strftime(format_)\n except ValueError:\n return plastic_date()",
"def datetimeToRFC2616(dt):\n\treturn dt.strftime('%a, %d %b %Y %H:%M:%S GMT')",
"def httpdate(dt):\n weekday = [\"Mon\", \"Tue\", \"Wed\", \"Thu\", \"Fri\", \"Sat\", \"Sun\"][dt.weekday()]\n month = [\"Jan\", \"Feb\", \"Mar\", \"Apr\", \"May\", \"Jun\", \"Jul\", \"Aug\", \"Sep\",\n \"Oct\", \"Nov\", \"Dec\"][dt.month - 1]\n return \"%s, %02d %s %04d %02d:%02d:%02d GMT\" % (weekday, dt.day, month,\n dt.year, dt.hour, dt.minute, dt.second)",
"def time(self):\r\n now = datetime.datetime.now()\r\n month = rfc822._monthnames[now.month - 1].capitalize()\r\n return ('[%02d/%s/%04d:%02d:%02d:%02d]' %\r\n (now.day, month, now.year, now.hour, now.minute, now.second))",
"def get_tlo_send_date():\n 'YYYY-MM-DD HH:MM:SS'\n return strftime(\"%Y-%m-%d %H:%M:%S\")",
"def httpdate(date_obj):\n return date_obj.strftime(\"%a, %d %b %Y %H:%M:%S GMT\")",
"def format_time(self, data):\r\n if self.datetime_formatting == 'rfc-2822':\r\n return format_time(data)\r\n\r\n return data.isoformat()",
"def usformat(date):\r\n return date.strftime('%m-%d-%Y %H:%M:%S')",
"def datetime_timeplotxml(self):\n if self.time:\n return self.date.strftime(\"%b %d %Y\") + \" \" + self.time.strftime(\"%H:%M:%S\") + \" GMT\"\n else:\n return self.date.strftime(\"%b %d %Y\") + \" \" + \"00:00:00\" + \" GMT\"",
"def get_time_and_date(self):\n date_str = ''\n time_str = ''\n for header in self.frd.headers:\n if header.code != 'U':\n continue\n elif header.string.startswith('DATE'):\n date_str = header.string.replace('DATE', '').strip()\n elif header.string.startswith('TIME'):\n time_str = header.string.replace('TIME', '').strip()\n return FRDParser._parse_ccx_date(date_str, time_str)",
"def time():\n return datetime.datetime.now().strftime(\"%Y%m%dT%H%M%SZ\")",
"def getdate():\n return strftime(\"%A %B %d, %I:%M %p\")",
"def datetime_timeplotxml(self):\n return self.date.strftime(\"%b %d %Y\") + \" \" + \"00:00:00\" + \" GMT\"",
"def _rfc822_datetime(self, t=None):\n\n if t is None:\n t = datetime.utcnow()\n\n return formatdate(timegm(t.timetuple()), usegmt=True)",
"def w3c_time():\n return time.strftime(\"%Y-%m-%dT%H:%M:%SZ\", time.gmtime())",
"def friendly_date(self):\n return self.created_at.strftime(\"%a %b %#d %Y, %#I:%M %p\")",
"def nersc_format_datetime(timetup=None):\n if timetup is None:\n timetup = time.localtime()\n # YYYY-MM-DD[THH:MM[:SS]]\n return time.strftime('%Y-%m-%dT%H:%M:%S', timetup)",
"def rfc3339date(date):\n if not date: return ''\n date = date + datetime.timedelta(seconds=-time.timezone)\n if time.daylight:\n date += datetime.timedelta(seconds=time.altzone)\n return date.strftime('%Y-%m-%dT%H:%M:%SZ')",
"def http_date(epoch_seconds=None):\n return formatdate(epoch_seconds, usegmt=True)",
"def getTimeString():\n\tfrom time import strftime\n\treturn strftime(\"%d-%m-%Y__%H-%M-%S\")",
"def get_datetime_str():\n return time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())",
"def time_hack(self):\n now = datetime.datetime.now()\n monthnames = ['jan', 'feb', 'mar', 'apr', 'may', 'jun',\n 'jul', 'aug', 'sep', 'oct', 'nov', 'dec']\n month = monthnames[now.month - 1].capitalize()\n return ('[%02d/%s/%04d:%02d:%02d:%02d.%06d]' %\n (now.day, month, now.year, now.hour, now.minute, now.second, now.microsecond))",
"def format_date(self, date):\n return time.strftime('%-I:%-M %p - %-d %b %Y',\n self.parse_twitter_date(date))",
"def datetime(self):\r\n if 'observation_time_rfc822' in self.data \\\r\n and self.data['observation_time_rfc822']:\r\n tstr = self.data['observation_time_rfc822']\r\n tstr = ' '.join(tstr.split(' ')[:-2])\r\n return datetime.strptime(tstr, '%a, %d %b %Y %H:%M:%S')\r\n elif 'observation_time' in self.data:\r\n return datetime.strptime(self.data['observation_time'] \\\r\n +' %s'%datetime.now().year,\r\n 'Last Updated on %b %d, %H:%M %p %Z %Y')\r\n return ''",
"def get_time_str():\n return time.strftime(time_fmt)",
"def ptime():\n return time.strftime(\"%Y-%m-%dT%H:%M:%SZ\",time.gmtime(time.time()))",
"def format_datetime(self, data):\r\n data = make_naive(data)\r\n if self.datetime_formatting == 'rfc-2822':\r\n return format_datetime(data)\r\n\r\n return data.isoformat()"
] | [
"0.73714656",
"0.7116985",
"0.69873196",
"0.6974283",
"0.68409675",
"0.6838362",
"0.68314415",
"0.681626",
"0.6791276",
"0.6748883",
"0.67287123",
"0.6700428",
"0.6695328",
"0.6688156",
"0.668528",
"0.6676679",
"0.6670514",
"0.66421837",
"0.66373765",
"0.66150707",
"0.6611124",
"0.6606479",
"0.6588093",
"0.6585339",
"0.6571443",
"0.6566973",
"0.65653384",
"0.64667386",
"0.6450395",
"0.6448656"
] | 0.72268707 | 1 |
Handles incoming WSGI requests. Requests that start with the configured path simply serve up any files under the configured location on the file system. Other requests are passed on to the next WSGI app in the chain. | def __call__(self, env, start_response):
path = os.path.normpath(env['PATH_INFO'].strip('/'))
if path == self.path:
path = '.'
elif path.startswith(self.path + '/'):
path = path[len(self.path) + 1:]
if not path:
path = '.'
elif self.path:
return self.next_app(env, start_response)
if path == '..' or path.startswith('..' + os.path.sep):
return http.HTTPForbidden()(env, start_response)
path = os.path.join(self.serve_path, path)
if not os.path.exists(path):
return http.HTTPNotFound()(env, start_response)
if os.path.isdir(path):
if not env['PATH_INFO'].endswith('/'):
return http.HTTPMovedPermanently(
headers={'Location': env['PATH_INFO'] + '/'})(
env, start_response)
dirpath = path
path = os.path.join(path, 'index.html')
if not os.path.exists(path):
return self.listing(dirpath, env, start_response)
content_type = mimetypes.guess_type(path)[0] or \
'application/octet-stream'
stat = os.stat(path)
if not stat.st_size:
start_response(
'204 No Content',
[('Content-Length', '0'), ('Content-Type', content_type)])
start_response(
'200 OK',
[('Content-Length', str(stat.st_size)),
('Content-Type', content_type),
('Last-Modified',
http_date_time(min(stat.st_mtime, time.time())))])
if env['REQUEST_METHOD'] == 'HEAD':
return ''
return _openiter(path, 65536, stat.st_size) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def handler ( self, environ, start_response ) :\n url = urlparse ( reconstruct_url ( environ ) ) \n \n if self.mount_point is not None:\n split_url = url.path.split ( self.mount_point, 1 ) \n serve_file = split_url [ 1 ] \n else:\n serve_file = url.path\n \n serve_file = serve_file.replace ( '%20', ' ' ) \n \n def do_get ( ):\n if serve_file.endswith ( '/' ) or os.path.isdir ( os.path.join ( self.path, serve_file ) ):\n if os.path.isdir ( os.path.join ( self.path, serve_file ) ):\n start_response ( '200 OK', [ ( 'Cache-Control', 'no-cache' ), ( 'Pragma','no-cache' ) ,\n ( 'Content-Type', 'text/html; charset=utf-8' ) ] )\n return [ \n '<html>' + \n '<br>'.join ( [ '<a href=\"%s/%s\">%s</a>' % ( serve_file.replace ( filename, '' ), filename, filename ) \n for filename in os.listdir ( os.path.join ( self.path, serve_file ) ) ] )\n + '</html>' \n ]\n else:\n logger.error ( 'failed to list directory %s/%s' % ( self.path, serve_file ) )\n start_response ( '404 Not found', [ ( 'Content-Type', 'text/plain') ] )\n return ['404 Not Found' ] \n \n try:\n if os.name == 'nt' or sys.platform == 'cygwin':\n f = open ( os.path.join ( self.path, serve_file ), 'rb' ) \n else:\n f = open ( os.path.join ( self.path, serve_file ), 'r' ) \n logger.debug ( 'opened file %s' % serve_file ) \n except IOError:\n logger.error ( 'failed to open file %s/%s' % ( self.path, serve_file ) )\n start_response ( '404 Not found', [ ( 'Content-Type', 'text/plain') ] )\n return [ '404 Not Found' ] \n\n start_response ( '200 OK', [ ( 'Cache-Control', 'no-cache' ), ( 'Pragma','no-cache' ),\n ( 'Content-Type', self.guess_content_type ( environ['PATH_INFO' ] ) ) ] )\n return [ f.read ( ) ] \n \n def do_put ( ):\n #Write file\n try:\n f = open ( os.path.join ( self.path, serve_file ) , 'w' ) \n logger.debug ( 'opened file for writing %s' % serve_file ) \n except:\n logger.error ( 'failed to open file for writiing %s/%s' % ( self.path, serve_file ) )\n start_response ( '403 Forbidden', [ ( 'Content-Type', 'text/plain' ) ] )\n return ['403 Forbidden' ] \n \n f.write ( environ [ 'wsgi.input' ] .read ( ) )\n \n def do_mkcollection ( ):\n pass\n \n http_method_map = { 'GET': do_get, 'PUT': do_put, 'MKCOLLECTION': do_mkcollection }\n return http_method_map[environ['REQUEST_METHOD' ] ] ( )",
"def application(environ, start_response):\n params = cgi.FieldStorage(fp=environ['wsgi.input'], environ=environ)\n path = params.getfirst('p', default='')\n callback = not_found\n for regex, cllbck in urls:\n if re.match(regex, path):\n callback = cllbck\n return callback(params, start_response)",
"def handle_request(self,host,path,data=b''):\n\t\tif data:\n\t\t\tself.response_code(4,\"Uploads are not accepted.\")\n\t\t\treturn\n\t\tif not hasattr(self,\"root\"):\n\t\t\tself.response_code(5,\"Server is unable to handle requests at this time due to misconfiguration.\")\n\t\t\treturn\n\t\tself.root = os.path.abspath(self.root)\n\t\tif not (prefix:=os.path.abspath(os.path.join(self.root,host))).startswith(self.root):\n\t\t\tself.response_code(4,\"Cowardly refusing to serve file outside of root.\")\n\t\t\treturn\n\t\tif not (filepath:=os.path.abspath(os.path.join(prefix,unquote(path.lstrip(\"/\"))))).startswith(prefix):\n\t\t\tself.response_code(4,\"Cowardly refusing to serve file outside of root.\")\n\t\t\treturn\n\t\tif not os.path.exists(filepath):\n\t\t\tself.response_code(4,\"Not Found\")\n\t\t\treturn\n\t\tif os.path.isdir(filepath):\n\t\t\tif os.path.exists(os.path.join(filepath,\"index.gmi\")):\n\t\t\t\tfilepath = os.path.join(filepath,\"index.gmi\")\n\t\t\telse:\n\t\t\t\tself.response_code(5,\"Cowardly refusing to generate folder listing.\")\n\t\t\t\treturn\n\t\text = os.path.splitext(filepath)[1]\n\t\tmimetype = mimetypes.guess_type(filepath,False)\n\t\tif ext in self.OVERRIDE_MIMETYPES:\n\t\t\tmimetype = self.OVERRIDE_MIMETYPES[ext]\n\t\tmimetype = mimetype or \"application/octet-stream\"\n\t\twith open(filepath,\"rb\") as f:\n\t\t\tself.response_code(2,mimetype)\n\t\t\tshutil.copyfileobj(f,self.wfile)",
"def application(environ, start_response):\n path = environ.get('PATH_INFO', '').lstrip('/')\n for regex, callback in urls:\n match = re.search(regex, path)\n if match is not None:\n environ['myapp.url_args'] = match.groups()\n return callback(environ, start_response)\n return not_found(environ, start_response)",
"def wsgi_app(self):\n \n def myapp(environ, start_response):\n \n path_info = environ['PATH_INFO']\n query_string = environ['QUERY_STRING']\n \n #if not hasattr(self, 'pattern_cache'):\n # self.pattern_cache = [(pth, args, pfunc) for pth, args, pfunc in self.patterns()]\n self.pattern_cache = [(re.compile('/path_xml'),[],self.path_xml),(re.compile('/transit_path'),[],self.transit_path),(re.compile('/getUrbanExplorerBlob'),[],self.getUrbanExplorerBlob)]\n \n for ppath, pargs, pfunc in self.pattern_cache:\n if ppath.match(path_info):\n \n args = cgi.parse_qs(query_string)\n args = dict( [(k,v[0]) for k,v in args.iteritems()] )\n \n try:\n #use simplejson to coerce args to native types\n #don't attempt to convert an arg 'jsoncallback'; just ignore it.\n arglist = []\n for k,v in args.iteritems():\n if k==\"jsoncallback\":\n arglist.append( (k,v) )\n elif k != \"_\":\n arglist.append( (k,json_loads(v)) )\n args = dict( arglist )\n \n if hasattr(pfunc, 'mime'):\n mime = pfunc.mime\n else:\n mime = self.DEFAULT_MIME\n \n start_response('200 OK', [('Content-type', mime)])\n #try:\n# for value in pfunc(**args):\n# rr = xstr( value )\n #except TypeError:\n # problem = \"Arguments different than %s\"%str(pargs)\n # start_response('500 Internal Error', [('Content-type', 'text/plain'),('Content-Length', str(len(problem)))])\n # return [problem]\n \n return pfunc(**args)\n except:\n problem = traceback.format_exc()\n start_response('500 Internal Error', [('Content-type', 'text/plain'),('Content-Length', str(len(problem)))])\n return [problem]\n \n # no match:\n problem = \"No method corresponds to path '%s'\"%environ['PATH_INFO']\n start_response('404 Not Found', [('Content-type', 'text/plain'),('Content-Length', str(len(problem)))])\n return [problem]\n \n return myapp",
"def __call__(self, environ, start_response):\n # TODO: Consider supporting multiple applications mounted at root URL.\n # Then, consider providing priority of mounted applications.\n # One application could explicitly override some routes of other.\n script = environ.get('PATH_INFO', '')\n path_info = ''\n while '/' in script:\n if script in self.mounts:\n app = self.mounts[script]\n break\n items = script.split('/')\n script = '/'.join(items[:-1])\n path_info = '/%s%s' % (items[-1], path_info)\n else:\n app = self.mounts.get(script, self.app)\n original_script_name = environ.get('SCRIPT_NAME', '')\n environ['SCRIPT_NAME'] = original_script_name + script\n environ['PATH_INFO'] = path_info\n return app(environ, start_response)",
"def application(self, environ, start_response):\n uri = environ['PATH_INFO'].encode('latin-1').decode()\n is_test = request_uri(environ) == BASE_NAME + uri\n\n # Guess the file type required\n if re.match(\".*\\.html\", uri):\n mime = \"html\"\n elif re.match(\".*\\.rdf\", uri):\n mime = \"pretty-xml\"\n elif re.match(\".*\\.ttl\", uri):\n mime = \"turtle\"\n elif re.match(\".*\\.nt\", uri):\n mime = \"nt\"\n elif re.match(\".*\\.json\", uri):\n mime = \"json-ld\"\n elif 'HTTP_ACCEPT' in environ:\n if (SPARQL_PATH and\n (uri == SPARQL_PATH or uri == (SPARQL_PATH+\"/\"))):\n mime = self.best_mime_type(environ['HTTP_ACCEPT'],\n \"sparql-json\")\n else:\n mime = self.best_mime_type(environ['HTTP_ACCEPT'], \"html\")\n else:\n mime = \"html\"\n\n # The welcome page\n if uri == \"/\" or uri == \"/index.html\":\n start_response('200 OK', [('Content-type',\n 'text/html; charset=utf-8')])\n if not exists(DB_FILE):\n return [self.render_html(DISPLAY_NAME, pystache.render(\n open(resolve(\"html/onboarding.mustache\")).read(),\n {'context': CONTEXT}), is_test)]\n else:\n return [self.render_html(\n DISPLAY_NAME,\n pystache.render(open(resolve(\"html/index.html\")).read(),\n {'property_facets': FACETS, 'context': CONTEXT}),\n is_test).encode('utf-8')]\n # The search page\n elif (SEARCH_PATH and\n (uri == SEARCH_PATH or uri == (SEARCH_PATH + \"/\"))):\n if 'QUERY_STRING' in environ:\n qs_parsed = parse_qs(environ['QUERY_STRING'])\n if 'query' in qs_parsed:\n query = qs_parsed['query'][0]\n if 'property' in qs_parsed:\n prop = qs_parsed['property'][0]\n else:\n prop = None\n if 'offset' in qs_parsed:\n offset = int(qs_parsed['offset'][0])\n else:\n offset = 0\n return self.search(start_response, query, prop, offset)\n else:\n return self.send400(start_response, YZ_NO_RESULTS)\n else:\n return self.send400(start_response, YZ_NO_QUERY)\n # The dump file\n elif uri == DUMP_URI:\n start_response('200 OK', [('Content-type', 'appliction/x-gzip'),\n ('Content-length',\n str(os.stat(DUMP_FILE).st_size))])\n return [open(resolve(DUMP_FILE), \"rb\").read()]\n # The favicon (i.e., the logo users see in the\n # browser next to the title)\n elif (uri.startswith(\"/favicon.ico\") and\n exists(resolve(\"assets/favicon.ico\"))):\n start_response(\n '200 OK', [('Content-type', 'image/png'),\n ('Content-length',\n str(os.stat(\n resolve(\"assets/favicon.ico\")).st_size))])\n return [open(resolve(\"assets/favicon.ico\"), \"rb\").read()]\n # Any assets requests\n elif uri.startswith(ASSETS_PATH) and exists(resolve(uri[1:])):\n start_response(\n '200 OK', [('Content-type', mimetypes.guess_type(uri)[0]),\n ('Content-length',\n str(os.stat(resolve(uri[1:])).st_size))])\n x = open(resolve(uri[1:]), \"rb\").read()\n return [x]\n # SPARQL requests\n elif SPARQL_PATH and (uri == SPARQL_PATH or uri == (SPARQL_PATH+\"/\")):\n if 'QUERY_STRING' in environ:\n qs = parse_qs(environ['QUERY_STRING'])\n if 'query' in qs:\n return self.sparql_query(\n qs['query'][0], mime,\n qs.get('default-graph-uri', [None])[0],\n start_response)\n else:\n start_response('200 OK', [('Content-type',\n 'text/html; charset=utf-8')])\n s = open(resolve(\"html/sparql.html\")).read()\n return [self.render_html(\n DISPLAY_NAME,\n s, is_test).encode('utf-8')]\n else:\n start_response('200 OK', [('Content-type',\n 'text/html; charset=utf-8')])\n s = open(resolve(\"html/sparql.html\")).read()\n return [self.render_html(DISPLAY_NAME, s,\n is_test).encode('utf-8')]\n elif LIST_PATH and (uri == LIST_PATH or uri == (LIST_PATH + \"/\")):\n offset = 0\n prop = None\n obj = None\n obj_offset = 0\n if 'QUERY_STRING' in environ:\n qs = parse_qs(environ['QUERY_STRING'])\n if 'offset' in qs:\n try:\n offset = int(qs['offset'][0])\n except ValueError:\n return self.send400(start_response)\n if 'prop' in qs:\n prop = \"<%s>\" % qs['prop'][0]\n if 'obj' in qs:\n obj = qs['obj'][0]\n if 'obj_offset' in qs and re.match(\"\\d+\", qs['obj_offset'][0]):\n obj_offset = int(qs['obj_offset'][0])\n\n return self.list_resources(start_response, offset,\n prop, obj, obj_offset)\n elif METADATA_PATH and (uri == METADATA_PATH or\n uri == (\"/\" + METADATA_PATH) or\n uri == (\"/\" + METADATA_PATH + \".rdf\") or\n uri == (METADATA_PATH + \".rdf\") or\n uri == (\"/\" + METADATA_PATH + \".ttl\") or\n uri == (METADATA_PATH + \".ttl\") or\n uri == (\"/\" + METADATA_PATH + \".nt\") or\n uri == (METADATA_PATH + \".nt\") or\n uri == (\"/\" + METADATA_PATH + \".json\") or\n uri == (METADATA_PATH + \".json\")):\n graph = dataid()\n if mime == \"html\":\n content = self.rdfxml_to_html(graph, BASE_NAME + METADATA_PATH,\n YZ_METADATA, is_test)\n else:\n try:\n self.add_namespaces(graph)\n if mime == \"json-ld\":\n content = yuzu.jsonld.write(\n graph, BASE_NAME + id)\n else:\n content = graph.serialize(format=mime).decode('utf-8')\n except Exception as e:\n print (e)\n return self.send501(start_response)\n start_response(\n '200 OK',\n [('Content-type', self.mime_types[mime] + \"; charset=utf-8\"),\n ('Vary', 'Accept'), ('Content-length', str(len(content)))])\n return [content.encode('utf-8')]\n elif exists(resolve(\"html/%s.html\" % re.sub(\"/$\", \"\", uri))):\n start_response('200 OK', [('Content-type',\n 'text/html; charset=utf-8')])\n s = pystache.render(open(resolve(\n \"html/%s.html\" % re.sub(\"/$\", \"\", uri))).read(),\n {'context': CONTEXT,\n 'dump_uri': DUMP_URI})\n return [self.render_html(DISPLAY_NAME, s,\n is_test).encode('utf-8')]\n # Anything else is sent to the backend\n elif re.match(\"^/(.*?)(|\\.nt|\\.html|\\.rdf|\\.ttl|\\.json)$\", uri):\n id, _ = re.findall(\n \"^/(.*?)(|\\.nt|\\.html|\\.rdf|\\.ttl|\\.json)$\", uri)[0]\n graph = self.backend.lookup(id)\n if graph is None:\n return self.send404(start_response)\n labels = sorted([str(o) for s, p, o in\n graph.triples(\n (URIRef(BASE_NAME + id), RDFS.label, None))])\n if labels:\n title = ', '.join(labels)\n else:\n title = DISPLAYER.uri_to_str(BASE_NAME + id)\n if mime == \"html\":\n content = self.rdfxml_to_html(graph, BASE_NAME + id, title,\n is_test)\n else:\n try:\n self.add_namespaces(graph)\n if mime == \"json-ld\":\n content = yuzu.jsonld.write(\n graph, BASE_NAME + id)\n else:\n content = graph.serialize(format=mime).decode('utf-8')\n except Exception as e:\n print (e)\n return self.send501(start_response)\n start_response(\n '200 OK',\n [('Content-type', self.mime_types[mime] + \"; charset=utf-8\"),\n ('Vary', 'Accept'), ('Content-length', str(len(content)))])\n return [content.encode('utf-8')]\n else:\n return self.send404(start_response)",
"def __call__(self, environ, start_response):\n path_info = environ['PATH_INFO']\n for key, redirect in self.redirects.items():\n if self.match(key, path_info):\n environ['PATH_INFO'] = redirect\n return self(environ, start_response)\n else:\n path, cut, prefix = self.first_path_segment(path_info)\n root = path[:cut]\n rest = path[cut:]\n if root in self.routes:\n environ['PATH_INFO'] = rest\n #XXX shouldn't we += to SCRIPT_NAME?\n environ['SCRIPT_NAME'] = prefix + root\n app = self.routes[root]\n else:\n app = webob.exc.HTTPNotFound()\n return app(environ, start_response)",
"def _handle_browse(self, relpath, params):\r\n abspath = os.path.normpath(os.path.join(self._root, relpath))\r\n if not abspath.startswith(self._root):\r\n raise ValueError # Prevent using .. to get files from anywhere other than root.\r\n if os.path.isdir(abspath):\r\n self._serve_dir(abspath, params)\r\n elif os.path.isfile(abspath):\r\n self._serve_file(abspath, params)",
"def __call__(self, environ, start_response):\n if environ['PATH_INFO'].startswith(self.prefix):\n environ['PATH_INFO'] = environ['PATH_INFO'][len(self.prefix):]\n environ['SCRIPT_NAME'] = self.prefix\n return self.app(environ, start_response)\n else:\n start_response(\n HTTP_CODES['NOT_FOUND'],\n [('Content-Type', 'text/plain')]\n )\n return ['This app is submounted under prefix: [{0}]'\n .format(self.prefix).encode()]",
"def application(environ, start_response):\n path = environ.get('PATH_INFO', '').lstrip('/')\n for regex, callback in urls:\n match = re.search(regex, path)\n if match is not None:\n print regex, callback\n environ['myapp.url_args'] = match.groups()\n return callback(environ, start_response)\n return not_found(environ, start_response)",
"def handle_request(request, *args, **kwargs):\n root = request.route.grit_params[\"root\"]\n logger.info(\"WSGIApplication::handle_request path: %s method: %s\", request.path_qs, request.method)\n\n reqctx = RequestCtx(request, request.response, kwargs)\n def run_pipeline(l):\n if l:\n handler_cls = l.pop(0)\n logger.debug(\"running pipeline entry %s\", handler_cls)\n with handler_cls.begin(reqctx):\n if 0 <= reqctx.response.status_int <= 299:\n run_pipeline(l)\n\n run_pipeline(list(root.pipeline))\n\n rv = request.response\n if isinstance(rv, basestring):\n rv = webapp2.Response(rv)\n elif isinstance(rv, tuple):\n rv = webapp2.Response(*rv)\n request.response = rv\n logger.debug(\"Pipeline completed with response status %s\", rv.status)",
"def do_GET(self):\n\n files = { \"/index.html\": \"index.html\",\n \"/\" : \"index.html\",\n \"/timeline-min.js\": \"timeline-min.js\",\n \"/timeline.js\": \"timeline.js\",\n \"/timeline.css\": \"timeline.css\"\n }\n if self.path in files:\n self._ServeFile(files[self.path])\n return\n\n if self.path.startswith(\"/api/data\"):\n self._ServeData()\n return\n\n self.send_error(404,'File Not Found: %s' % self.path)",
"def application( environ, start_response ):\n\n # Set the default request handler.\n handler = _handlers[ None ]\n\n # Get request path.\n path = environ.get( 'PATH_INFO', '' )\n\n # Expand request information for handlers.\n environ[ 'request.path' ] = path.lstrip( '/' ).split( '/' )\n environ[ 'request.query' ] = urllib.parse.parse_qs(\n environ[ 'QUERY_STRING' ]\n )\n\n # Handler is specified.\n if environ[ 'request.path' ] and ( environ[ 'request.path' ][ 0 ] != '' ):\n\n # See if a handler is available.\n handler = _handlers.get(\n environ[ 'request.path' ][ 0 ],\n _handlers[ '?' ]\n )\n\n # Delegate to the request handler.\n status, headers, content = handler( environ )\n\n # Define default headers.\n default_headers = {\n 'Content-Type' : 'text/html',\n 'Content-Length' : str( len( content ) ),\n }\n\n # Merge headers from handler.\n handler_headers = dict( headers )\n default_headers.update( handler_headers )\n merged_headers = [ ( k, v ) for k, v in default_headers.items() ]\n\n # Set the status string.\n status_string = '{} {}'.format( status, http_responses[ status ] )\n\n # Start the response.\n start_response( status_string, merged_headers )\n\n # Produce the content.\n if isinstance( content, bytes ):\n yield content\n else:\n yield bytes( content, 'utf-8' )",
"def serve(self):\n\t\tself.keep_running=1\n\t\tif self.debug:\n\t\t\tprint \"server started\"\n\t\ttry:\n\t\t\twhile self.keep_running:\n\t\t\t\tself.handle_request()\n\t\tfinally:\n\t\t\tif self.debug:\n\t\t\t\tprint \"server finished\"\n\t\t\tself.keep_running=0\n\t\t\tself.close()",
"def app(environ, start_response):\n headers = [('Content-type', 'text/plain; charset=utf-8')]\n\n path = environ['PATH_INFO']\n\n if path in Routes.registry:\n status = '200 OK'\n resp = bytes(Routes.registry[path](), 'utf-8')\n else:\n status = '404 Not Found'\n resp = b'Not Found'\n\n start_response(status, headers)\n return [resp + b'\\n']",
"def app(environ: t.Dict, start_response):\n # Print the request object details in environ.items()\n for k, v in environ.items():\n print(k, v)\n\n # Let's capture the request path\n path = environ.get(\"PATH_INFO\")\n\n # Handle our different routes. Render different templates.\n # Allow user to add \"/\" or not to URL string\n # NOTE: Don't use elif statement! It skips 'data' assignment!\n if path.endswith(\"/\"):\n path = path[:-1] # remove the trailing \"/\"\n if path == \"\": # the root / index\n data = home(environ)\n elif path == \"/contact\":\n data = contact_us(environ)\n elif path == \"/box-office\":\n data = read_box_office_data(environ)\n else:\n data = render_template(template_name=\"404.html\", context={\"path\": path})\n\n # Encode data to BYTE string\n data = data.encode(\"utf-8\")\n\n # Gunicorn's start_response to get a response going\n start_response(\n f\"200 OK\",\n [(\"Content-Type\", \"text/html\"), (\"Content-Length\", str(len(data)))],\n # You can remove these headers and the browser will still parse it.\n # Modern browsers are smart enough to infer how to parse the request\n )\n # Where does this print to? Server logs I bet... YES!\n # print(f\"{data=}\\n{iter([data])}\")\n return iter([data]) # <list_iterator object at 0x10f9f1340>",
"def do_GET(self):\n\n path = self.file_path\n\n if os.path.exists(path):\n # Symbolic link judgement.\n # Paths with denied symbolic links will pretend to be 404 errors.\n if args[TITLE_LOCAL_LINKS] and not (\"%s/\" % os.path.realpath(path)).startswith(os.getcwd() + \"/\"):\n return self.send_error(404, \"File not found\")\n elif args[TITLE_NO_LINKS]:\n # If all symbolic links are banned, then we must trace our\n # way down an existing path to make sure that no symbolic link exists\n curr = path\n while True:\n if os.path.islink(curr):\n return self.send_error(404, \"File not found\")\n if curr == path:\n break\n curr = os.path.dirname(path);\n\n f = None\n if os.path.isdir(path):\n\n if not getattr(self, common.ATTR_PATH, \"\").endswith(\"/\"):\n return self.send_redirect(\"%s/\" % getattr(self, common.ATTR_PATH, \"\"))\n\n for index in [\"index.html\", \"index.htm\"]:\n index = os.path.join(path, index)\n if os.path.exists(index):\n path = index\n break\n if path == self.file_path:\n return self.list_directory(path)\n\n return self.serve_file(path)",
"def _handle_get_request(self):\n docroot = self._get_config_value('main', 'staticdocumentroot')\n local_path = sanitize_path(self.path)\n path = docroot + local_path\n try:\n # actually try deliver the requested file - First we try to send\n # every static content\n requested_file = open(path)\n text = requested_file.read()\n requested_file.close()\n except IOError:\n try:\n parsed_path = urlparse(self.path)\n params = dict([p.split('=') for p in parsed_path[4].split('&')])\n if params['addurl']:\n tmp = self._insert_url_to_db(params['addurl'])\n if tmp and tmp < 0:\n self._send_database_problem()\n return\n blocked = self._db.is_hash_blocked(tmp)\n if blocked:\n self._send_blocked_page(blocked[3])\n return\n elif tmp:\n self._send_return_page(tmp)\n return\n else:\n # There was a general issue with URL\n self._send_homepage('''<p class=\"warning\">Please check your input.</p>''')\n return\n else:\n # There was a general issue with URL\n self._send_homepage('''<p class=\"warning\">Please check your input.</p>''')\n return\n except YuDatabaseError:\n self._send_database_problem()\n return\n except:\n if self.path in ('/', '/URLRequest'):\n self._send_homepage()\n return\n elif self.path.startswith('/stats') or self.path.endswith('+'):\n if self.path == '/stats':\n # Doing general statistics here\n # Let's hope this page is not getting to popular ....\n # Create a new stats objekt which is fetching data in background\n self._show_general_stats()\n return\n else:\n # Check whether we do have the + or the stats kind of URL\n if self.path.endswith('+'):\n # Well I guess this is the proof you can write\n # real ugly code in Python too.\n try:\n if self.path.startswith('/show/'):\n request_path = self.path[6:]\n elif self.path.startswith('/s/'):\n request_path = self.path[3:]\n elif self.path.startswith('/stats/'):\n request_path = self.path[7:]\n else:\n request_path = self.path[1:]\n self._show_link_stats(request_path[:request_path.rfind('+')])\n return\n except Exception, e:\n # Oopps. Something went wrong. Most likely\n # a malformed link\n # TODO raise a (yet to be written) FileNotFoundException\n self._logger.error(\n u'An exception occurred: %s' % unicode(e), exc_info=True)\n self._send_404()\n return\n else:\n # Trying to understand for which link we shall print\n # out stats.\n splitted = self.path[1:].split('/')\n try:\n self._show_link_stats(splitted[1])\n return\n except IndexError:\n # Something went wrong. Most likely there was a\n # malformed URL for accessing the stats.\n self._send_404()\n return\n # Any other page\n else:\n # First check, whether we want to have a real redirect\n # or just an info\n request_path = self.path\n if self.path.startswith('/show/'):\n request_path = self.path[5:]\n show = True\n elif self.path.startswith('/s/'):\n request_path = self.path[2:]\n show = True\n else:\n show = False\n # Assuming, if there is anything else than an\n # alphanumeric character after the starting /, it's\n # not a valid hash at all\n if request_path[1:].isalnum():\n try:\n result = self._db.get_link_from_db(request_path[1:])\n blocked = self._db.is_hash_blocked(request_path[1:])\n except YuDatabaseError:\n self._send_database_problem()\n return\n if result and blocked == None:\n if show == True:\n template_filename = self._get_config_template('showpage')\n url = \"/\" + request_path[1:]\n new_url = '<p><a href=\"%(url)s\">%(result)s</a></p>' % \\\n {'result': result, 'url': url}\n stats = self._db.get_statistics_for_hash(request_path[1:])\n text = read_template(\n template_filename,\n title=SERVER_NAME,\n header=SERVER_NAME,\n msg=new_url,\n stat=stats,\n statspage=\"/stats/\" + request_path[1:])\n else:\n self._db.add_logentry_to_database(request_path[1:])\n self._send_301(result)\n return\n elif blocked:\n self._send_blocked_page(blocked[3])\n return\n else:\n self._send_404()\n return\n else:\n self._send_404()\n return\n self._send_response(text, 200)",
"def do_GET(self):\n if \"mock_configurations\" in self.path:\n resource = self.path.replace(\"/mock_configurations\", \"\")\n print resource\n self.recover_request(resource)\n\n else:\n \"\"\"Otherwise, serve the previously uploaded content.\"\"\"\n self.store_request(self.path)\n self.serve_response()",
"def main():\r\n run_wsgi_app(app)",
"def application(environ, start_response):\n\n headers = [('Content-type', 'text/html')]\n\n try:\n path = environ.get('PATH_INFO', None)\n if path is None:\n raise NameError\n\n func, args = resolve_path(path)\n status = \"200 OK\"\n body = func(*args)\n except NameError:\n status = '404 Not Found'\n body = \"<h1>Not Found</h1>\"\n except Exception:\n status = \"500 Internal Server Error\"\n body = \"<h1>Internal Server Error</h1>\"\n print(traceback.format_exc())\n finally:\n headers.append(('Content-length', str(len(body))))\n start_response(status, headers)\n return [body.encode('utf8')]",
"def handle(self):\n self.raw_requestline = self.rfile.readline()\n if not self.parse_request(): # An error code has been sent, just exit\n return\n\n # next line is where we'd have expect a configuration key somehow\n handler = self.WebSocketWSGIHandler(\n self.rfile, self.wfile, self.get_stderr(), self.get_environ()\n )\n handler.request_handler = self # backpointer for logging\n handler.run(self.server.get_app())",
"def serve(self, name=None):\n # Note: this talks to the manager; it has nothing to do with the server\n if self._is_served:\n raise RuntimeError('This app (%s) is already served.' % self.name)\n if name is not None:\n self._path = name\n manager.register_app(self)\n self._is_served = True",
"async def process_request(sever_root, path, request_headers):\n\n if \"Upgrade\" in request_headers:\n return # Probably a WebSocket connection\n\n if path == '/':\n path = '/index.html'\n\n response_headers = [\n ('Server', 'asyncio websocket server'),\n ('Connection', 'close'),\n ]\n\n # Derive full system path\n full_path = os.path.realpath(os.path.join(sever_root, path[1:]))\n\n # Validate the path\n if os.path.commonpath((sever_root, full_path)) != sever_root or \\\n not os.path.exists(full_path) or not os.path.isfile(full_path):\n print(\"HTTP GET {} 404 NOT FOUND\".format(path))\n return HTTPStatus.NOT_FOUND, [], b'404 NOT FOUND'\n\n # Guess file content type\n extension = full_path.split(\".\")[-1]\n mime_type = MIME_TYPES.get(extension, \"application/octet-stream\")\n response_headers.append(('Content-Type', mime_type))\n\n # Read the whole file into memory and send it out\n body = open(full_path, 'rb').read()\n response_headers.append(('Content-Length', str(len(body))))\n print(\"HTTP GET {} 200 OK\".format(path))\n return HTTPStatus.OK, response_headers, body",
"def application(environ, start_response):\n\n headers = [(\"Content-type\", \"text/html\")]\n body = ''\n status = ''\n try:\n path = environ.get('PATH_INFO', None)\n if path is None:\n raise NameError\n func, args = resolve_path(path)\n body = func(*args)\n status = \"200 OK\"\n except NameError:\n status = \"404 Not Found\"\n body = \"<h1>Not Found</h1>\"\n except Exception:\n status = \"500 Internal Server Error\"\n body = \"<h1>Internal Server Error</h1>\"\n print(traceback.format_exc())\n finally:\n headers.append(('Content-length', str(len(body))))\n start_response(status, headers)\n return [body.encode('utf8')]",
"def main(methods=[\"GET\"]):\n validate_auth()\n ## issue with path resolution after build\n return send_from_directory(\n #todo: remove templates directory reference; index.html isn't a jinja template\n safe_join(current_app.static_folder, 'templates'),\n 'index.html',\n cache_timeout=-1\n )",
"def catch_all(path=''):\n return render_template('index.html')",
"def protected_serve(request, path, document_root=None, show_indexes=False):\n return serve(request, path, document_root, show_indexes)",
"def staticFile(path):\n logger.info('[FLASKWEB /fs] Static File Request for `%s`' % path)\n local = os.path.join(webapp.config['DIR'], path)\n if not os.path.exists(local):\n return returnError(\"File not found: %s\" % path, 404)\n if os.path.isdir(local):\n contents = sorted(os.listdir(local))\n for i, f in enumerate(contents):\n if os.path.isdir(f):\n contents[i] += '/'\n\n if request.headers['Accept'] == 'application/json':\n return jsonify(dict(cwd=local, contents=contents)), 200\n else:\n return render_template('listing.html', cwd=path, listing=contents), 200\n\n else:\n if 'stdout' in local or 'output' in local or local.split('.')[-1] in ['txt', 'yaml', 'yml', 'json', 'log']:\n with open(local, 'r') as file:\n # output = unicode(file.read(), 'utf-8')\n output = file.read()\n\n if request.headers['Accept'] == 'application/json':\n return output, 200\n else:\n return render_template(\"output.html\", output=output)\n\n return send_from_directory(webapp.config['DIR'], path)"
] | [
"0.65474683",
"0.63074946",
"0.62397385",
"0.60925007",
"0.60817075",
"0.60779226",
"0.599752",
"0.59761715",
"0.5958712",
"0.59400415",
"0.5929748",
"0.5829383",
"0.5764965",
"0.5761285",
"0.5758303",
"0.57550424",
"0.572396",
"0.5721223",
"0.56893045",
"0.5639996",
"0.55924946",
"0.55922407",
"0.5588426",
"0.55698574",
"0.556562",
"0.556468",
"0.5556168",
"0.55374455",
"0.55369896",
"0.5535771"
] | 0.7011241 | 0 |
Translates the overall server configuration. The conf is translated into an appspecific configuration dict suitable for passing as ``parsed_conf`` in the | def parse_conf(cls, name, conf):
parsed_conf = {
'path': conf.get(name, 'path', '/').strip('/'),
'serve_path': conf.get_path(name, 'serve_path').rstrip('/')}
if not parsed_conf['serve_path']:
raise Exception('[%s] serve_path must be set' % name)
return parsed_conf | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def render_synapse_config(self):\n hookenv.log(\n \"Rendering synapse configuration to {}\".format(self.synapse_config),\n hookenv.DEBUG,\n )\n if self.pgsql_configured():\n render_result = templating.render(\n \"homeserver.yaml.j2\",\n self.synapse_config,\n {\n \"conf_dir\": self.synapse_conf_dir,\n \"signing_key\": self.get_synapse_signing_key(),\n \"registration_shared_secret\": self.get_shared_secret(),\n \"pgsql_configured\": self.pgsql_configured(),\n \"pgsql_host\": self.kv.get(\"pgsql_host\"),\n \"pgsql_port\": self.kv.get(\"pgsql_port\"),\n \"pgsql_db\": self.kv.get(\"pgsql_db\"),\n \"pgsql_user\": self.kv.get(\"pgsql_user\"),\n \"pgsql_pass\": self.kv.get(\"pgsql_pass\"),\n \"server_name\": self.get_server_name(),\n \"public_baseurl\": self.get_public_baseurl(),\n \"enable_tls\": self.get_tls(),\n \"enable_search\": self.charm_config[\"enable-search\"],\n \"enable_user_directory\": self.charm_config[\"enable-user-directory\"],\n \"enable_room_list_search\": self.charm_config[\n \"enable-room-list-search\"\n ],\n \"enable_registration\": self.charm_config[\"enable-registration\"],\n \"enable_federation\": self.charm_config[\"enable-federation\"],\n \"use_presence\": self.charm_config[\"track-presence\"],\n \"require_auth_for_profile_requests\": self.charm_config[\n \"require-auth-profile-requests\"\n ],\n \"default_room_version\": self.charm_config[\"default-room-version\"],\n \"block_non_admin_invites\": not bool(\n self.charm_config[\"enable-non-admin-invites\"]\n ),\n \"report_stats\": self.charm_config[\"enable-reporting-stats\"],\n \"allow_public_rooms_without_auth\": self.charm_config[\n \"allow-public-rooms-unauthed\"\n ],\n \"allow_public_rooms_over_federation\": self.charm_config[\n \"allow-public-rooms-federated\"\n ],\n \"federation_domain_whitelist\": self.get_domain_whitelist(),\n \"federation_ip_range_blacklist\": self.get_federation_iprange_blacklist(),\n },\n )\n if render_result:\n if any_file_changed([self.synapse_config]):\n self.restart_synapse()\n return True\n return False",
"def parse_conf(conf):\n global Message, Command\n Message = conf['message']\n Command = conf['command']\n write_streak()\n Parent.Log(ScriptName, 'Load conf: {}'.format((Message, Command)))",
"def loadConfig():\n global abs_path, app_list, app_api_subs\n\n # load application details\n with open(abs_path + '/../../../../config/apim.yaml', 'r') as file:\n apim_config = yaml.load(file, Loader=yaml.FullLoader)\n apps = apim_config['apps']\n\n for app in apps:\n app_list[app['name']] = []\n app_api_subs[app['name']] = app['api_subscriptions'].split(',')",
"def _parse_conf(args):\n conf = args.config_dir\n assert os.path.isdir(conf), \\\n \"configuration directory {0} does not exist, run init\".format(conf)\n conf_file = os.path.join(conf, 'conf_{0}.json'.format(APP_NAME))\n assert os.path.isfile(conf_file), \\\n \"configuration file does not exist {0}, \\\n not properly initialized\".format(conf_file)\n with open(conf_file) as f:\n data = json.load(f)\n # validate data\n assert 'data_url' in data, \\\n \"data_url': '' not found in {0}\".format(conf_file)\n assert 'archive_paths' in data, \\\n \"'archive_paths': [] not found in {0}\".format(conf_file)\n assert 'min_loop' in data, \\\n \"'min_loop': [] not found in {0}\".format(conf_file)\n\n # build up nested named tuple to hold parsed config\n app_config = namedtuple(\n 'fixity',\n 'json_dir, conf_file, errors, ignore_re',\n )\n daemon_config = namedtuple('FixityDaemon', 'pid, log', )\n daemon_config.pid = os.path.abspath(\n os.path.join(conf, 'logs', '{0}.pid'.format(APP_NAME)))\n daemon_config.log = os.path.abspath(\n os.path.join(conf, 'logs', '{0}.log'.format(APP_NAME)))\n app_config.json_dir = os.path.abspath(os.path.join(conf, 'json_dir'))\n app_config.errors = os.path.abspath(os.path.join(conf, 'errors'))\n if 'ignore_paths' in data and data['ignore_paths'] != []:\n # http://stackoverflow.com/a/5141829/1763984\n app_config.ignore_re = r'|'.join(\n [fnmatch.translate(x) for x in data['ignore_paths']]\n ) or r'$.'\n else:\n app_config.ignore_re = False\n c = namedtuple('FixityConfig', 'app, daemon, args, data, conf_file')\n c.app = app_config\n c.daemon = daemon_config\n c.args = args\n c.data = data\n c.conf_file = os.path.abspath(conf_file)\n return c",
"def configure(self, host_cfg, app_cfg):\n self.LOG.debug(\"Midolman app configuration for [\" +\n host_cfg.name + \"]\")\n\n if 'cassandra_ips' in app_cfg.kwargs:\n for i in app_cfg.kwargs['cassandra_ips']:\n self.cassandra_ips.append(IP(i))\n\n if 'zookeeper_ips' in app_cfg.kwargs:\n for i in app_cfg.kwargs['zookeeper_ips']:\n self.zookeeper_ips.append(IP(i))\n\n if 'hypervisor' in app_cfg.kwargs:\n self.hv_active = app_cfg.kwargs['hypervisor']\n\n if 'id' in app_cfg.kwargs:\n self.num_id = str(app_cfg.kwargs['id'])\n\n self.my_ip = self.host.main_ip\n self.LOG.debug(\"Found host IP[\" + self.my_ip + \"]\")\n\n subdir = '.' + self.num_id if self.num_id != '' else ''\n self.config_dir = '/etc/midolman' + subdir\n self.lib_dir = '/var/lib/midolman' + subdir\n self.log_dir = '/var/log/midolman' + subdir\n self.runtime_dir = '/run/midolman' + subdir\n\n if self.cli.exists(self.config_dir + '/host_uuid.properties'):\n self.unique_id = self.cli.read_from_file(\n self.config_dir + \"/host_uuid.properties\").replace(\n 'host_uuid=', '').strip()\n else:\n self.unique_id = uuid.uuid4()\n\n log_dir = '/var/log/midolman' + subdir\n self.host.log_manager.add_external_log_file(\n FileLocation(log_dir + '/midolman.log'), self.num_id,\n '%Y.%m.%d %H:%M:%S.%f')",
"def get_cfg():\n cfg = {}\n base = home_link()['href']\n if '://' not in base:\n protocol = capp.config['PREFERRED_URL_SCHEME']\n print(base)\n base = '{0}://{1}'.format(protocol, base)\n\n cfg['base'] = base\n cfg['domains'] = {}\n cfg['server_name'] = capp.config['SERVER_NAME']\n cfg['api_name'] = capp.config.get('API_NAME', 'API')\n # 1. parse rules from url_map\n cfg['domains'] = parse_map(capp.url_map)\n # 2. Load schemas and paths from the config and update cfg\n domains = {}\n for domain, resource in list(capp.config['DOMAIN'].items()):\n if resource['item_methods'] or resource['resource_methods']:\n # hide the shadow collection for document versioning\n if 'VERSIONS' not in capp.config or not \\\n domain.endswith(capp.config['VERSIONS']):\n domains[domain] = endpoint_definition(domain, resource)\n\n cfg['domains'].update(domains)\n return cfg",
"def _setup_app_config():\n try:\n file = _app_config_file()\n conf = pyhocon.ConfigFactory.parse_file(file)\n except FileNotFoundError:\n conf = pyhocon.ConfigFactory.parse_file(_generate_global_config())\n # test if since_version is deprecated and regenerate a newer config\n return conf",
"def __initConfiguration(self):\n conf = configparser.ConfigParser()\n with open(self.configFile, \"r\") as f:\n conf.readfp(f)\n self.orgConf = conf\n # check additionalSection\n adSection = self.additionalSection\n if adSection in conf:\n adSection = conf[adSection]\n self.conf = {}\n for i in [self.CLIENT_ID, self.CLIENT_SECRET, self.AUTHZ_ENDPOINT,\n self.TOKEN_ENDPOINT, self.REDIRECT_URI, self.SCOPE]:\n if adSection != None and i in adSection:\n self.conf[i] = adSection[i]\n else:\n self.conf[i] = conf[\"DEFAULT\"][i]",
"def activate_nginx_config(self, app):\n conf_mode = int(app.config['nginx']['config_mode'], 8)\n\n # first, generate all domains/sites\n for domain in app.domains:\n # replace wildcards\n safe_name = domain.replace('*', '_')\n\n # generate the domain configuration\n output_fn = (Path(app.config['nginx']['sites_path'])\n / 'scotch_domain_{}'.format(safe_name))\n\n domain_path = (Path(app.config['nginx']['domains_path']) /\n '{}'.format(safe_name))\n include_path = str(domain_path) + '/*'\n\n self.output_template('domain.conf', output_fn, config=app.config,\n domain=domain, safe_name=safe_name,\n include_path=include_path,\n _mode=conf_mode)\n\n # for every domain, generate the site configuration\n output_fn = domain_path / app.name\n self.output_template('app.conf', output_fn, config=app.config,\n _mode=conf_mode)\n\n subprocess.check_call([app.config['nginx']['reload_command']],\n shell=True)",
"def configure(self, conf):\n return",
"def configure_app(app, config_dict, config_file_folder):\n # write the config dict to app config as a read-only proxy of a mutable dict\n app.config[APP_CONFIG_JSON] = MappingProxyType(config_dict)\n config_file_folder = config_file_folder\n app.config[CONFIG_FILE_FOLDER] = config_file_folder\n app.config[AVAILABLE_PAGES_DICT] = make_pages_dict(\n config_dict.get(AVAILABLE_PAGES, []), app.config[CONFIG_FILE_FOLDER]\n )\n configure_backend(app)\n return app",
"def configure(self):\n\n log.info(\"Loading configuration from the database...\")\n settings = dict(db.query(\"\"\"SELECT `key`, `value` FROM settings\"\"\"))\n \n log.info(\"Config loaded\")\n log.info(\"HoN Version: %s Chat Port: %s Protocol: %s\" % (settings['honver'], settings['chatport'], settings['chatver']))\n if 'username' in settings:\n acc_config['username'] = settings['username']\n \n if 'password' in settings:\n acc_config['password'] = settings['password']\n \n if 'invis' in settings:\n settings['invis'] = True if settings['invis'] == \"True\" else False\n \n if 'chatport' in settings:\n settings['chatport'] = int(settings['chatport'])\n \n if 'chatver' in settings:\n settings['chatver'] = int(settings['chatver'])\n \n for key in settings:\n if key in basic_config:\n basic_config[key] = settings[key]\n \n self._configure(chatport=settings['chatport'], protocol=settings['chatver'], invis=settings['invis'],\n masterserver=settings['masterserver'], basicserver=settings['basicserver'], honver=settings['honver'])",
"def parse_conf(confpath):\n \n if isinstance(confpath, dict):\n return confpath\n \n fullpath = os.path.abspath(confpath)\n root = os.path.dirname(fullpath)\n \n conf = json.loads(open(fullpath).read())\n conf['srcDir'] = os.path.join(root, conf['srcDir'])\n conf['deployDir'] = os.path.join(root, conf['deployDir'])\n \n return conf",
"def map_to_app(self, app):\n app['safe-deployment'] = {}\n app['safe-deployment']['load_balancer_type'] = self.lb_type.data\n app['safe-deployment']['wait_before_deploy'] = self.safe_deploy_wait_before.data\n app['safe-deployment']['wait_after_deploy'] = self.safe_deploy_wait_after.data\n if self.lb_type.data == \"haproxy\":\n app['safe-deployment']['app_tag_value'] = self.haproxy_app_tag.data.strip()\n app['safe-deployment']['ha_backend'] = self.haproxy_backend.data.strip()\n app['safe-deployment']['api_port'] = self.haproxy_api_port.data",
"def config_as(self, app_name):\n # Cast to target app\n # ====================================================================\n app_names = [ app_cls.__name__ for app_cls in LazyApp.AVAILABLE_APPS ]\n target_app_idx = app_names.index(app_name)\n target_app = LazyApp.AVAILABLE_APPS[target_app_idx]\n self.__class__ = target_app\n\n # Configure remote worker\n # ====================================================================\n # Tell remote worker to become target worker\n message = { 'action': 'worker', 'content': target_app.MATCHED_WORKER }\n self.send(message)\n\n # Use default worker config\n # ====================================================================\n response = self.recv()\n if response is None:\n raise Exception(\"Remote worker has been closed\")\n\n # Send changed worker config to server\n worker_config = response['content']\n message = { 'action': 'config', 'content': worker_config }\n self.send(message)\n\n # You're ready to go\n logger.info(f\"{self.__class__.__name__} has been configured properly\")",
"def _load_from_conf(self, parser, section, db, conf_dir, cloud_confs, conf_file):\n\n iaas = config_get_or_none(parser, section, \"iaas\", self.iaas)\n iaas_url = config_get_or_none(parser, section, \"iaas_url\", self.iaas_url)\n\n sshkey = config_get_or_none(parser, section, \"sshkeyname\", self.keyname)\n localssh = config_get_or_none(parser, section, \"localsshkeypath\", self.localkey)\n ssh_user = config_get_or_none(parser, section, \"ssh_username\", self.username)\n scp_user = config_get_or_none(parser, section, \"scp_username\", self.scp_username)\n bootconf = config_get_or_none(parser, section, \"bootconf\", self.bootconf)\n bootpgm = config_get_or_none(parser, section, \"bootpgm\", self.bootpgm)\n bootpgm_args = config_get_or_none(parser, section, \"bootpgm_args\", self.bootpgm_args)\n hostname = config_get_or_none(parser, section, \"hostname\", self.hostname)\n readypgm = config_get_or_none(parser, section, \"readypgm\", self.readypgm)\n readypgm_args = config_get_or_none(parser, section, \"readypgm_args\", self.readypgm_args)\n iaas_key = config_get_or_none(parser, section, \"iaas_key\", self.iaas_key)\n iaas_secret = config_get_or_none(parser, section, \"iaas_secret\", self.iaas_secret)\n securitygroups = config_get_or_none(parser, section, \"securitygroups\", self.securitygroups)\n\n terminatepgm = config_get_or_none(parser, section, \"terminatepgm\", self.terminatepgm)\n terminatepgm_args = config_get_or_none(parser, section, \"terminatepgm_args\", self.terminatepgm_args)\n\n pgm_timeout = config_get_or_none(parser, section, \"pgm_timeout\", self.pgm_timeout)\n\n local_exe = config_get_or_none_bool(parser, section, \"local_exe\", self.local_exe)\n\n\n allo = config_get_or_none(parser, section, \"allocation\", self.allocation)\n image = config_get_or_none(parser, section, \"image\", self.image)\n cloudconf = config_get_or_none(parser, section, \"cloud\")\n if cloudconf:\n try:\n conf = cloud_confs[cloudconf]\n except:\n raise APIUsageException(\"%s is not a valud cloud description in this plan\" % (cloudconf))\n\n if not iaas:\n iaas = conf.iaas\n if not iaas_url:\n iaas_url = conf.iaas_url\n if not sshkey:\n sshkey = conf.sshkey\n if not localssh:\n localssh = conf.localssh\n if not ssh_user:\n ssh_user = conf.ssh_user\n if not scp_user:\n scp_user = conf.scp_user\n if not iaas_key:\n iaas_key = conf.iaas_key\n if not iaas_secret:\n iaas_secret = conf.iaas_secret\n if not securitygroups:\n securitygroups = conf.securitygroups\n\n if not iaas:\n iaas = db.default_iaas\n if not iaas_url:\n iaas_url = db.default_iaas_url\n if not allo:\n allo = db.default_allo\n if not sshkey:\n sshkey = db.default_sshkey\n if not localssh:\n localssh = db.default_localssh\n if not ssh_user:\n ssh_user = db.default_ssh_user\n if not scp_user:\n scp_user = db.default_scp_user\n if not iaas_key:\n iaas_key = db.default_iaas_key\n if not iaas_secret:\n iaas_secret = db.default_iaas_secret\n if not securitygroups:\n securitygroups = db.default_securitygroups\n if not image:\n image = db.default_image\n if not bootconf:\n bootconf = db.default_bootconf\n if not bootpgm:\n bootpgm = db.default_bootpgm\n if not bootpgm_args:\n bootpgm_args = db.default_bootpgm_args\n if not readypgm:\n readypgm = db.default_readypgm\n if not readypgm_args:\n readypgm_args = db.default_readypgm_args\n if not terminatepgm:\n terminatepgm = db.default_terminatepgm\n if not terminatepgm_args:\n terminatepgm_args = db.default_terminatepgm_args\n if not pgm_timeout:\n pgm_timeout = db.default_pgm_timeout\n\n if not local_exe:\n local_exe = db.default_local_exe\n\n\n self.image = image\n self.bootconf = _resolve_file_or_none(conf_dir, bootconf, conf_file)\n self.bootpgm = _resolve_file_or_none(conf_dir, bootpgm, conf_file, has_args=True)\n self.bootpgm_args = bootpgm_args\n self.terminatepgm = _resolve_file_or_none(conf_dir, terminatepgm, conf_file, has_args=True)\n self.terminatepgm_args = terminatepgm_args\n self.pgm_timeout = pgm_timeout\n self.local_exe = local_exe\n\n self.hostname = hostname\n self.readypgm = _resolve_file_or_none(conf_dir, readypgm, conf_file, has_args=True)\n self.readypgm_args = readypgm_args\n self.username = ssh_user\n self.scp_username = scp_user\n self.localkey = _resolve_file_or_none(conf_dir, localssh, conf_file)\n self.keyname = sshkey\n self.allocation = allo\n self.iaas = iaas\n self.iaas_url = iaas_url\n\n self.iaas_secret = iaas_secret\n self.iaas_key = iaas_key\n self.securitygroups = securitygroups\n\n x = config_get_or_none(parser, section, \"iaas_launch\")\n if x:\n if x.lower() == 'true':\n self.iaas_launch = True\n else:\n self.iaas_launch = False\n else:\n if self.hostname:\n self.iaas_launch = False\n else:\n self.iaas_launch = True\n\n # allow the plan to over ride the default image if they want to use a hostname\n if self.iaas_launch is False:\n self.image = None\n\n item_list = parser.items(section)\n deps_list = []\n for (ka,val) in item_list:\n ndx = ka.find(\"deps\")\n if ndx == 0:\n deps_list.append(ka)\n deps_list.sort()\n for i in deps_list:\n deps = config_get_or_none(parser, section, i)\n deps_file = _resolve_file_or_none(conf_dir, deps, conf_file)\n if deps_file:\n parser2 = ConfigParser.ConfigParser()\n parser2.read(deps_file)\n keys_val = parser2.items(\"deps\")\n for (ka,val) in keys_val:\n val2 = config_get_or_none(parser2, \"deps\", ka)\n if val2 is not None:\n bao = BagAttrsObject(ka, val2)\n self.attrs.append(bao)",
"def parse_config(self):\n # TODO: parse config file\n pass",
"def _initConfig(self):\n from tg import config as tg_config\n\n # Set config defaults\n config = DEFAULT_CONFIG.copy()\n temp_verbose = config[\"verbose\"]\n\n # Configuration file overrides defaults\n default_config_file = os.path.abspath(DEFAULT_CONFIG_FILE)\n config_file = tg_config.get('wsgidav.config_path', default_config_file)\n fileConf = self._readConfigFile(config_file, temp_verbose)\n config.update(fileConf)\n\n if not useLxml and config[\"verbose\"] >= 1:\n print(\n \"WARNING: Could not import lxml: using xml instead (slower). Consider installing lxml from http://codespeak.net/lxml/.\")\n from wsgidav.dir_browser import WsgiDavDirBrowser\n from tracim.lib.webdav.tracim_http_authenticator import TracimHTTPAuthenticator\n from wsgidav.error_printer import ErrorPrinter\n from tracim.lib.webdav.utils import TracimWsgiDavDebugFilter\n\n config['middleware_stack'] = [\n WsgiDavDirBrowser,\n TracimHTTPAuthenticator,\n ErrorPrinter,\n TracimWsgiDavDebugFilter,\n ]\n\n config['provider_mapping'] = {\n config['root_path']: Provider(\n # TODO: Test to Re enabme archived and deleted\n show_archived=False, # config['show_archived'],\n show_deleted=False, # config['show_deleted'],\n show_history=False, # config['show_history'],\n manage_locks=config['manager_locks']\n )\n }\n\n config['domaincontroller'] = TracimDomainController(presetdomain=None, presetserver=None)\n\n return config",
"def configure(self, conf):\n\n for node in conf.children:\n key = node.key\n val = node.values[0]\n if key == 'Vcenter':\n self.vcenters = val.split()\n elif key == 'Username':\n self.username = val\n elif key == 'Password':\n self.password = val\n elif key == 'Verbose':\n self.verbose = bool(val)\n elif key == 'Sleep':\n self.sleep_time = int(val)\n else:\n self.log.warn('Unknown config key: %s' % (key,))",
"def _parse_opensslconf(self):\n# print \"parse_opensslconf\"\n _log.debug(\"__init__::parse_opensslconf\")\n if not self.config.read(self.configfile):\n# print \"could not parse config file\"\n # Empty openssl.conf file or could not successfully parse the file.\n self.new_opensslconf()\n configuration = {}\n for section in self.__class__.DEFAULT.keys():\n for option in self.__class__.DEFAULT[section].keys():\n raw = self.config.get(section, option)\n value = raw.split(\"#\")[0].strip() # Remove comments\n\n if \"$\" in value: # Manage OpenSSL variables\n variable = \"\".join(value.split(\"$\")[1:])\n variable = variable.split(\"/\")[0]\n if variable == \"calvindir\":\n varvalue = _conf.install_location()\n else:\n varvalue = self.config.get(section, variable).split(\"#\")[0].strip()\n if \"$calvindir\" in varvalue:\n varvalue = _conf.install_location() + \"/\" + \"/\".join(varvalue.split(\"/\")[1:])\n path = \"/\" + \"/\".join(value.split(\"/\")[1:])\n value = varvalue + path\n try:\n configuration[section].update({option: value})\n except KeyError:\n configuration[section] = {} # New section\n configuration[section].update({option: value})\n return configuration",
"def app_config(self):\n return self._app_conf[\"aiscalator\"]",
"def load_environment(global_conf, app_conf):\n # Pylons paths\n root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n paths = dict(root=root,\n controllers=os.path.join(root, 'controllers'),\n static_files=os.path.join(root, 'public'),\n templates=[os.path.join(root, 'templates')])\n\n # Initialize config with the basic options\n config.init_app(global_conf, app_conf, package='popserver',\n template_engine='mako', paths=paths)\n\n config['routes.map'] = make_map()\n config['pylons.g'] = app_globals.Globals()\n config['pylons.h'] = popserver.lib.helpers\n\n # easter egg :)\n config['pylons.response_options']['headers']['X-Pop'] = 'Ego!'\n\n # hacer que config esté disponible en el scope de popserver.lib.helpers\n popserver.lib.helpers.config = config\n\n # Customize templating options via this variable\n tmpl_options = config['buffet.template_options']\n\n # CONFIGURATION OPTIONS HERE (note: all config options will override\n # any Pylons config options)\n\n # Assume that templates are encoded in utf-8\n # Can override on a per-template basis using a 'magic encoding comment'\n tmpl_options['mako.input_encoding'] = 'utf-8'\n\n # Configures DB Engine\n elixir.metadata.bind = config['sqlalchemy.url']\n elixir.metadata.bind.echo = True if config['sqlalchemy.echo'] == 'True' \\\n else None\n elixir.options_defaults['autosetup'] = True\n\n if asbool(config.get('popego.cache_manager.enable', False)):\n from popserver.config import cache\n cache.enable_cache()",
"def get_ipa_conf():\n\n parser = RawConfigParser()\n parser.read(paths.IPA_DEFAULT_CONF)\n result = dict()\n for item in ['basedn', 'realm', 'domain', 'server', 'host', 'xmlrpc_uri']:\n if parser.has_option('global', item):\n value = parser.get('global', item)\n else:\n value = None\n if value:\n result[item] = value\n\n return result",
"def parse_conf(self):\n\n parser = configparser.RawConfigParser()\n parser.read(self.filename)\n\n try:\n self.id_node = parser['CONF_MACHINE']['ID_NODE']\n\n # eliminate possible white spaces between metrics\n temp = parser['CONF_MACHINE']['METRICS'].split(',')\n for itr in temp:\n self.metrics.append(itr.strip())\n\n except Exception:\n raise Exception(\"missing id or metrics\")\n\n try:\n self.interval = parser['CONF_MAHCINE']['INTERVAL']\n except Exception:\n self.interval = 1\n\n try:\n self.ampq_url = parser['ampq']['url']\n self.ampq_port = parser['ampq']['port']\n self.ampq_vhost = parser['ampq']['vhost']\n self.ampq_user = parser['ampq']['user']\n self.ampq_password = parser['ampq']['password']\n except Exception:\n raise Exception(\"missing ampq configs\")",
"def get_config(cls):\n path_home = os.path.expanduser('~')\n path_app = os.path.dirname(__file__)\n config = ConfigParser.RawConfigParser()\n paths = [Config.DEFAULT_CONFIGURATION_FILE,\n \"%s/.compta/server.cfg\" % path_home,\n \"%s/../server.cfg\" % path_app\n ]\n get_file = False\n for path in paths:\n if os.path.exists(path):\n try:\n config.read(path)\n get_file = True\n except ConfigPArser.ParsingError as error:\n print error\n sys.exit(1)\n break\n if not get_file:\n print \"No config files found\"\n sys.exit(1)\n\n dict_config = {}\n try:\n dict_config[\"database_path\"] = config.get(\"Database\", \"path\")\n dict_config[\"database_name\"] = config.get(\"Database\", \"name\")\n except ConfigParser.NoSectionError as error:\n print error\n sys.exit(1)\n except ConfigParser.NoOptionError as error:\n print error\n sys.exit(1)\n return dict_config",
"def configure(conf):\n unit_prompt = '\\nUnits:\\nUnit\\t-\\tExample\\nGB\\t-\\t10GB\\nMB\\t-\\t10MB\\nKB\\t-\\t10KB\\nB\\t-\\t10'\n units = [\n ('GB', lambda x: int(x * 1e9)),\n ('MB', lambda x: int(x * 1e6)),\n ('KB', lambda x: int(x * 1e3)),\n ('B', lambda x: int(x)),\n ]\n if conf.get('root', None) is None:\n conf['root'] = simple_response(\n 'What is the path of the directory you wish to sync?')\n conf['root'] = os.path.abspath(conf['root'])\n\n conf = configure_handshake(conf)\n conf = configure_deletes(conf)\n conf = configure_limits(conf, unit_prompt, units)\n conf = configure_logging(conf, unit_prompt, units)\n conf = configure_misc(conf)\n return conf",
"def configServer():\n try:\n config = open(r\"./server.conf\",\"r+\")\n except IOError,e:\n print e\n return 0\n configLines = []\n try:\n while True:\n configLines.append(config.next())\n except StopIteration:\n pass\n finally:\n config.close()\n configInfo = {}\n for line in configLines:\n if line[0] == \"#\" or line[0] == \"\\n\":\n continue\n configLineArgumentList = line[:-1].split(\"=\")\n key = configLineArgumentList[0]\n value = configLineArgumentList[1]\n configInfo.update({key:value})\n logging.info(\"Configuration done sucssesfully\")\n return configInfo",
"def load_server_conf(self):\n if os.path.exists(os.path.join(self.data_path,\"settings.txt\")):\n settings_file = file(os.path.join(self.data_path,\"settings.txt\"),\"rb\")\n self.server_conf = settings_file.read().split(';')\n else:\n self.server_conf = None",
"def init_config(self, conf_map):\n pass",
"def load_from_conf(self):\n raise NotImplementedError"
] | [
"0.56515485",
"0.5574661",
"0.5523583",
"0.5518153",
"0.55050313",
"0.5503376",
"0.54650676",
"0.54127175",
"0.53931737",
"0.53750205",
"0.5345372",
"0.53380966",
"0.5308977",
"0.5239369",
"0.5234539",
"0.52209145",
"0.52013206",
"0.51886344",
"0.5155737",
"0.51472735",
"0.51165193",
"0.51158524",
"0.508756",
"0.5056708",
"0.5051589",
"0.5015357",
"0.5014388",
"0.50095636",
"0.5004895",
"0.49992833"
] | 0.5918209 | 0 |
Collates the different files for each cluster into a dictionary. This will also remove any clusters we request and perform a check that all clusters have the required files. | def file_pairing(self, include=None, exclude=None):
# List the file names for both the images and the catalogs
if isinstance(self._irac_image_dir, list):
image_files = list(chain.from_iterable(glob.glob(f'{img_dir}/*.fits') for img_dir in self._irac_image_dir))
else:
image_files = glob.glob(f'{self._irac_image_dir}/*.fits')
if isinstance(self._sextractor_cat_dir, list):
cat_files = list(
chain.from_iterable(glob.glob(f'{cat_dir}/*.cat') for cat_dir in self._sextractor_cat_dir))
else:
cat_files = glob.glob(f'{self._sextractor_cat_dir}/*.cat')
# Combine and sort both file lists
cat_image_files = sorted(cat_files + image_files, key=self._keyfunct)
# Group the file names together
self._catalog_dictionary = {cluster_id: list(files)
for cluster_id, files in groupby(cat_image_files, key=self._keyfunct)}
# If we want to only run on a set of clusters we can filter for them now
if include is not None:
self._catalog_dictionary = {cluster_id: files for cluster_id, files in self._catalog_dictionary.items()
if cluster_id in include}
# If we want to exclude some clusters manually we can remove them now
if exclude is not None:
for cluster_id in exclude:
self._catalog_dictionary.pop(cluster_id, None)
# Sort the files into a dictionary according to the type of file
for cluster_id, files in self._catalog_dictionary.items():
self._catalog_dictionary[cluster_id] = {}
for f in files:
if f.endswith('.cat'):
self._catalog_dictionary[cluster_id]['se_cat_path'] = f
elif 'I1' in f and '_cov' not in f:
self._catalog_dictionary[cluster_id]['ch1_sci_path'] = f
elif 'I1' in f and '_cov' in f:
self._catalog_dictionary[cluster_id]['ch1_cov_path'] = f
elif 'I2' in f and '_cov' not in f:
self._catalog_dictionary[cluster_id]['ch2_sci_path'] = f
elif 'I2' in f and '_cov' in f:
self._catalog_dictionary[cluster_id]['ch2_cov_path'] = f
# Verify that all the clusters in our sample have all the necessary files
problem_clusters = []
for cluster_id, cluster_files in self._catalog_dictionary.items():
file_keys = {'ch1_sci_path', 'ch1_cov_path', 'ch2_sci_path', 'ch2_cov_path', 'se_cat_path'}
try:
assert file_keys == cluster_files.keys()
except AssertionError:
message = f'Cluster {cluster_id} is missing files {file_keys - cluster_files.keys()}'
warnings.warn(message)
problem_clusters.append(cluster_id)
# For now, remove the clusters missing files
for cluster_id in problem_clusters:
self._catalog_dictionary.pop(cluster_id, None) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def clustering(self): \n clusterOfFiles=self.getClusters()\n \n #group files based on the hash of their contents\n self.keyingMethod=md5Hash\n [self.addFile(afile) for acluster in clusterOfFiles for afile in acluster]\n clusterOfFiles=self.getClusters()\n self.showClusters(clusterOfFiles)",
"def load_clusters(experiment, cdrs = [\"H1\", \"H2\", \"H3\", \"L1\", \"L2\", \"L3\"]):\n # Store the information in this dictionary\n clusters = {}\n # Access the files with all of the clustering information\n files = os.listdir(experiment['Optcdr Cluster Folder'])\n # Go through each of the files\n for file in files:\n # Skip any swap files\n if file[-4:] == \".swp\":\n continue\n # Read through each file\n f = open(experiment['Optcdr Cluster Folder'] + file)\n for line in f:\n # Split the line on white space\n items = line.split()\n # If it is basic information about the cluster, store it\n if len(items) > 0 and items[0] in cdrs:\n # CDR Cluster INT Length: INT Model: NAME Members: INT\n # 0 1 2 3 4 5 6 7 8\n cdr = items[0]\n clusterNo = int(items[2])\n clusterLength = int(items[4])\n modelName = items[6]\n members = int(items[8])\n if cdr not in clusters.keys():\n clusters[cdr] = {}\n # Store the information for this cluster number\n clusters[cdr][clusterNo] = {\"Length\": clusterLength, \"Model\": \\\n modelName, \"Members\": members}\n f.close()\n # Store the clustering information\n return clusters",
"def test_store_cluster(self):\r\n\r\n self.tmpdir = mkdtemp(dir=\"./\", suffix=\"_store_clusters/\")\r\n\r\n self.files_to_remove.append(self.tmpdir + \"singletons.fasta\")\r\n self.files_to_remove.append(self.tmpdir + \"centroids.fasta\")\r\n\r\n # empty map results in empty files\r\n store_clusters({}, self.tiny_test, self.tmpdir)\r\n actual_centroids = list(\r\n parse_fasta(open(self.tmpdir + \"centroids.fasta\")))\r\n self.assertEqual(actual_centroids, [])\r\n actual_singletons = list(\r\n parse_fasta(open(self.tmpdir + \"singletons.fasta\")))\r\n self.assertEqual(actual_singletons, [])\r\n\r\n # non-empty map creates non-empty files, centroids sorted by size\r\n mapping = {'FZTHQMS01B8T1H': [],\r\n 'FZTHQMS01DE1KN': ['FZTHQMS01EHAJG'],\r\n 'FZTHQMS01EHAJG': [1, 2, 3]} # content doesn't really matter\r\n\r\n centroids = [(\r\n 'FZTHQMS01EHAJG | cluster size: 4', 'CATGCTGCCTCCCGTAGGAGTTTGGACCGTGTCTCAGTTCCAATGTGGGGGACCTTCCTCTCAGAACCCCTATCCATCGAAGGTTTGGTGAGCCGTTACCTCACCAACTGCCTAATGGAACGCATCCCCATCGATAACCGAAATTCTTTAATAACAAGACCATGCGGTCTGATTATACCATCGGGTATTAATCTTTCTTTCGAAAGGCTATCCCCGAGTTATCGGCAGGTTGGATACGTGTTACTCACCCGTGCGCCGGTCGCCA'),\r\n ('FZTHQMS01DE1KN | cluster size: 2', 'CATGCTGCCTCCCGTAGGAGTTTGGACCGTGTCTCAGTTCCAATGTGGGGGACCTTCCTCTCAGAACCCCTATCCATCGAAGGTTTGGTGAGCCGTTACCTCACCAACTGCCTAATGGAACGCATCCCCATCGATAACCGAAATTCTTTAATAACAAGACCATGCGGTCTGATTATACCATCGGGTATTAATCTTTCTTTCGAAAGGCTATCCCCGAGTTATCGGCAGGTTGGATACGTGTTACTCACCCGTGCGCCGGTCGCCA')]\r\n\r\n singletons = [(\r\n 'FZTHQMS01B8T1H',\r\n 'CATGCTGCCTCCCGTAGGAGTTTGGACCGTGTCTCAGTTCCAATGTGGGGGACCTTCCTCTCAGAACCCCTATCCATCGAAGGTTTGGTGAGCCGTTACCTCACCAACTGCCTAATGGAACGCATCCCCATCGATAACCGAAATTCTTTAATAATTAAACCATGCGGTTTTATTATACCATCGGGTATTAATCTTTCTTTCGAAAGGCTATCCCCGAGTTATCGGCAGGTTGGATACGTGTTACTCACCCGTGCGCCGGTCGCCATCACTTA')]\r\n\r\n store_clusters(mapping, self.tiny_test, self.tmpdir)\r\n actual_centroids = list(\r\n parse_fasta(open(self.tmpdir + \"centroids.fasta\")))\r\n self.assertEqual(actual_centroids, centroids)\r\n actual_singletons = list(\r\n parse_fasta(open(self.tmpdir + \"singletons.fasta\")))\r\n self.assertEqual(actual_singletons, singletons)",
"def save_people_files(self):\r\n\r\n # Check existence of clustering results\r\n if len(self.recognized_faces) == 0:\r\n\r\n # Try to load YAML files\r\n if os.path.exists(self.cluster_files_path):\r\n\r\n print 'Loading YAML files with clustering results'\r\n logger.debug('Loading YAML file with clustering results')\r\n\r\n self.recognized_faces = []\r\n for yaml_file in os.listdir(self.cluster_files_path):\r\n yaml_file_path = os.path.join(\r\n self.cluster_files_path, yaml_file)\r\n with open(yaml_file_path) as f:\r\n self.recognized_faces.append(yaml.load(f))\r\n\r\n print 'YAML file with clustering results loaded'\r\n logger.debug('YAML file with clustering results loaded')\r\n\r\n else:\r\n\r\n print 'Warning! No clustering results found!'\r\n logger.warning('No clustering results found!')\r\n\r\n return\r\n\r\n # Delete already saved files\r\n if os.path.exists(self.compl_ann_path):\r\n\r\n ann_files = os.listdir(self.compl_ann_path)\r\n\r\n for ann_file in ann_files:\r\n ann_file_path = os.path.join(self.compl_ann_path, ann_file)\r\n os.remove(ann_file_path)\r\n\r\n else:\r\n\r\n os.makedirs(self.compl_ann_path)\r\n\r\n # Delete already saved files\r\n if os.path.exists(self.simple_ann_path):\r\n\r\n ann_files = os.listdir(self.simple_ann_path)\r\n\r\n for ann_file in ann_files:\r\n ann_file_path = os.path.join(self.simple_ann_path, ann_file)\r\n os.remove(ann_file_path)\r\n\r\n else:\r\n\r\n os.makedirs(self.simple_ann_path)\r\n\r\n # Get minimum segment duration\r\n min_duration = c.MIN_SEGMENT_DURATION\r\n\r\n if ((self.params is not None) and\r\n (c.MIN_SEGMENT_DURATION_KEY in self.params)):\r\n min_duration = self.params[c.MIN_SEGMENT_DURATION_KEY]\r\n\r\n # Save unique tags\r\n tags = []\r\n\r\n for person_dict in self.recognized_faces:\r\n\r\n ann_tag = person_dict[c.ASSIGNED_TAG_KEY]\r\n\r\n if (ann_tag != c.UNDEFINED_TAG) and (ann_tag not in tags):\r\n tags.append(ann_tag)\r\n\r\n for tag in tags:\r\n\r\n # Create complete annotations\r\n person_dict = {}\r\n\r\n # Create simple annotations\r\n simple_dict = {c.ANN_TAG_KEY: tag}\r\n\r\n person_dict[c.ANN_TAG_KEY] = tag\r\n\r\n segment_list = []\r\n\r\n simple_segment_list = []\r\n\r\n tot_dur = 0\r\n\r\n # Iterate through all recognized people in video\r\n for temp_person_dict in self.recognized_faces:\r\n\r\n ann_tag = temp_person_dict[c.ASSIGNED_TAG_KEY]\r\n\r\n if ann_tag == tag:\r\n\r\n temp_segment_list = temp_person_dict[c.SEGMENTS_KEY]\r\n\r\n for segment_dict in temp_segment_list:\r\n segment_list.append(segment_dict)\r\n\r\n simple_seg_dict = {}\r\n\r\n start = segment_dict[c.SEGMENT_START_KEY]\r\n\r\n simple_seg_dict[c.SEGMENT_START_KEY] = start\r\n\r\n dur = segment_dict[c.SEGMENT_DURATION_KEY]\r\n\r\n tot_dur = tot_dur + dur\r\n\r\n simple_seg_dict[c.SEGMENT_DURATION_KEY] = dur\r\n\r\n simple_segment_list.append(simple_seg_dict)\r\n\r\n person_dict[c.SEGMENTS_KEY] = segment_list\r\n\r\n # (simple_segment_list, tot_dur) = utils.merge_consecutive_segments(\r\n # simple_segment_list, min_duration)\r\n\r\n simple_dict[c.SEGMENTS_KEY] = simple_segment_list\r\n\r\n person_dict[c.TOT_SEGMENT_DURATION_KEY] = tot_dur\r\n\r\n simple_dict[c.TOT_SEGMENT_DURATION_KEY] = tot_dur\r\n\r\n file_name = tag + '.YAML'\r\n\r\n # Save complete annotations\r\n\r\n file_path = os.path.join(self.compl_ann_path, file_name)\r\n\r\n utils.save_YAML_file(file_path, person_dict)\r\n\r\n # Save simple annotations\r\n\r\n file_path = os.path.join(self.simple_ann_path, file_name)\r\n\r\n utils.save_YAML_file(file_path, simple_dict)",
"def gen_cluster_accuracies():\n accuracies = {}\n with Parallel(n_jobs=morphs.parallel.N_JOBS) as parallel:\n for block_path in morphs.paths.blocks():\n print(block_path)\n spikes = morphs.load.ephys_data(block_path, collapse_endpoints=True)\n\n if len(spikes[\"recording\"].unique()) >= 1:\n template_spikes = spikes[spikes[\"stim_id\"].isin(list(\"abcdefgh\"))]\n assert len(template_spikes) > 0\n cluster_groups = template_spikes.groupby(\"cluster\")\n\n morph_dims = spikes.morph_dim.unique()\n morph_dims = morph_dims[~pd.isnull(morph_dims)]\n morph_dims.sort()\n\n max_num_reps = np.max(\n [\n len(stim_group.groupby(by=[\"recording\", \"stim_presentation\"]))\n for stim_id, stim_group in template_spikes.groupby(\"stim_id\")\n ]\n )\n\n accuracies_list = parallel(\n delayed(cluster_accuracy)(\n cluster, cluster_group, morph_dims, max_num_reps\n )\n for (cluster, cluster_group) in cluster_groups\n )\n\n accuracies[block_path] = pd.concat(accuracies_list)\n\n morphs.paths.PROCESSED_DIR.mkdir(parents=True, exist_ok=True)\n with open(morphs.paths.ACCURACIES_PKL.as_posix(), \"wb\") as f:\n pickle.dump(accuracies, f)",
"def _get_gene_clusters(folder_path: str) -> Dict[str, List[SeqRecord]]:\n clusters = os.listdir(folder_path)\n all_clusters = {}\n\n for cluster in clusters:\n print(f\"Gathering genes for cluster: {cluster}\")\n cluster_info = gen_utils.get_list_of_genes_from_fasta_file(os.path.join(folder_path, cluster))\n all_clusters[cluster] = cluster_info\n\n return all_clusters",
"def automerge_clusters(self):\n all_clusters = self.get_clusters().copy()\n\n if not self._single: # if not in single mode mode\n # initialize the variable to check if some change has happened \n changed = False\n for cl_1 in all_clusters: # cycle over clusters\n c_c1 = all_clusters[cl_1]\n for cl_2 in all_clusters: # inner cycle over clusters\n c_c2 = all_clusters[cl_2]\n # if two clusters have the same speaker and have different \n # cluster identifiers\n if cl_1 != cl_2 and c_c1.get_speaker() != 'unknown' and c_c1.get_speaker() == c_c2.get_speaker() and self._clusters.has_key(cl_1) and self._clusters.has_key(cl_2):\n changed = True\n # merge the clusters an record that something changed\n self._merge_clusters(cl_1, cl_2)\n if changed: # if something has changed\n # rename all the clusters starting from S0\n self._rename_clusters()\n # remove also the old waves and seg files of the old clusters\n shutil.rmtree(self.get_file_basename())\n # rebuild all seg files\n self.generate_seg_file(set_speakers=False)\n # resplit the original wave file according to the new clusters\n self._to_trim()",
"def buildClusters(self):\n oldLatFile = 'needed_files/lat.in'\n oldFile = open(oldLatFile, 'r')\n oldLines = [line for line in oldFile]\n oldFile.close()\n \n newFile = open('enum/lat.in','w')\n for i in xrange(len(oldLines)):\n if 'Number pairs' in oldLines[i-1] and i>=1: #bch use label on previous line\n for num in self.clusterNums:\n newFile.write(str(num) + \" \")\n newFile.write(\"\\n\")\n else:\n newFile.write(oldLines[i])\n newFile.close()\n \n lastDir = os.getcwd()\n os.chdir(lastDir + '/enum')\n if sum(self.clusterNums)<=1500: #the 1500 assumes you are running Main with 16G. \n subprocess.call([self.uncleExec, '10'], stdout=self.uncleOut)\n else:\n subprocess.call(['echo','Warning: BLOCKING CLUSTER JOB to save time'])\n# clustersjob = ClustersBuild.clustersjob()\n# clustersjob.clustBuild()\n# \n os.chdir(lastDir)",
"def clusters(self, *args, **kwargs):\n\n result, name = is_file(kwargs.get('value')[0])\n if result:\n jdata = load_file(name)\n dump = False\n else:\n url = self.base.format('file/clusters')\n if by_id:\n self.params['query'] = 'cluster:{0}'.format(kwargs.get('value')[0])\n else:\n self.params['date'] = name\n jdata, response = get_response(url, apikey=self.apikey, params=self.params)\n\n if kwargs.get('return_raw'):\n return jdata\n\n if _check_error(jdata):\n return\n\n simple_list = (\n 'size_top200',\n 'num_clusters',\n )\n\n self.simple_print(jdata, simple_list, indent='\\n\\t')\n for key in simple_list:\n if jdata.get(key):\n self.print_key(key, indent='\\n\\t')\n print('\\n\\t', jdata.get(key))\n\n if jdata.get('clusters'):\n plist = [[]]\n for line in jdata['clusters']:\n plist.append(\n [line['label'], line['avg_positives'], line['id'], line['size']])\n\n pretty_print_special(\n plist,\n ['Label', 'AV Detections', 'Id', 'Size'],\n [40, 15, 80, 8],\n ['l', 'c', 'l', 'c'],\n kwargs.get('email_template')\n )\n\n if dump:\n jsondump(jdata, 'clusters_{0}'.format(name))",
"def load_files(self):\n print('Saving numpy mask arrays in {0}'.format(self.ProcDir))\n\n if not os.path.isdir(self.ProcDir): os.mkdir(self.ProcDir)\n if not os.path.isdir(self.OutDir): os.mkdir(self.OutDir)\n\n self.Files = {}\n for ig in self.Set:\n phase = roipy.tools.load_half(ig,2)\n # convert wavelength to displacements\n # NOTE: make attributes of commonly used values in rsc: float(ig.Rsc['WAVELENGTH'])\n disp = phase * (ig.Wavelength / (4*np.pi))\n igram = ma.array(disp, mask=ma.nomask)\n name = self.save_ma(ig, igram) #Mask_ array is just zeros at this point..\n self.Files[ig.ID] = name\n\n print('load_files() complete: {0} interferograms'.format(self.Set.Nig))",
"def cluster_faces_in_video(self):\r\n\r\n logger.debug('Executing people clustering')\r\n\r\n rec_loaded = False\r\n\r\n # Try to load YAML files\r\n if os.path.exists(self.cluster_files_path):\r\n\r\n print 'Loading YAML files with clustering results'\r\n logger.debug('Loading YAML files with clustering results')\r\n\r\n self.recognized_faces = []\r\n for yaml_file in os.listdir(self.cluster_files_path):\r\n yaml_file_path = os.path.join(\r\n self.cluster_files_path, yaml_file)\r\n with open(yaml_file_path) as f:\r\n self.recognized_faces.append(yaml.load(f))\r\n\r\n print 'YAML files with clustering results loaded'\r\n logger.debug('YAML files with clustering results loaded')\r\n\r\n if not rec_loaded:\r\n\r\n if len(self.tracked_faces) == 0:\r\n\r\n # Try to load YAML file\r\n if os.path.exists(self.track_file_path):\r\n\r\n print 'Loading YAML file with tracking results'\r\n logger.debug('Loading YAML file with tracking results')\r\n\r\n with open(self.track_file_path) as f:\r\n\r\n self.tracked_faces = yaml.load(f)\r\n\r\n print 'YAML file with tracking results loaded'\r\n logger.debug('YAML file with tracking results loaded')\r\n\r\n else:\r\n\r\n print 'Warning! No tracking results found!'\r\n logger.warning('No tracking results found!')\r\n return\r\n\r\n # Make copy of tracked faces\r\n tracking_list = list(self.tracked_faces)\r\n\r\n if ((self.params is not None) and\r\n (ce.FACE_MODELS_DIR_PATH_KEY in self.params)):\r\n if ce.NOSE_POS_FILE_PATH_KEY in self.params:\r\n nose_pos_file_path = self.params[ce.NOSE_POS_FILE_PATH_KEY]\r\n\r\n with open(nose_pos_file_path) as f:\r\n self.nose_pos_list = pk.load(f)\r\n else:\r\n # Save face models\r\n self.save_face_models(tracking_list)\r\n\r\n use_clothing_rec = c.USE_CLOTHING_RECOGNITION\r\n\r\n if ((self.params is not None) and\r\n (c.USE_CLOTHING_RECOGNITION_KEY in self.params)):\r\n use_clothing_rec = self.params[c.USE_CLOTHING_RECOGNITION_KEY]\r\n\r\n if (use_clothing_rec and\r\n ((self.params is None)\r\n or (ce.CLOTH_MODELS_DIR_PATH_KEY not in self.params))):\r\n # Save cloth models\r\n self.save_cloth_models(tracking_list)\r\n\r\n print '\\n\\n### People clustering ###\\n'\r\n logger.debug('\\n\\n### People clustering ###\\n')\r\n\r\n # Save processing time\r\n start_time = cv2.getTickCount()\r\n\r\n self.recognized_faces = []\r\n\r\n # List of segments already analyzed and annotated\r\n ann_segments = []\r\n\r\n model = None\r\n\r\n # Iterate through tracked faces\r\n person_counter = 0\r\n segment_counter = 0\r\n tracked_faces_nr = float(len(tracking_list))\r\n\r\n for tracking_segment_dict in tracking_list:\r\n\r\n self.progress = 100 * (segment_counter / tracked_faces_nr)\r\n\r\n print('progress: ' + str(self.progress) + ' % \\r'),\r\n\r\n if segment_counter not in ann_segments:\r\n\r\n # Save all segments relative\r\n # to one person in person_dict\r\n person_dict = {c.PERSON_COUNTER_KEY: person_counter,\r\n c.ASSIGNED_LABEL_KEY: c.UNDEFINED_LABEL,\r\n c.ASSIGNED_TAG_KEY: c.UNDEFINED_TAG}\r\n\r\n segment_list = []\r\n\r\n segment_dict = {}\r\n\r\n segment_frame_list = tracking_segment_dict[c.FRAMES_KEY]\r\n\r\n segment_dict[c.FRAMES_KEY] = segment_frame_list\r\n\r\n segment_dict[c.ASSIGNED_TAG_KEY] = c.UNDEFINED_TAG\r\n\r\n segment_dict[c.CONFIDENCE_KEY] = 0\r\n\r\n segment_dict[c.SEGMENT_COUNTER_KEY] = segment_counter\r\n\r\n # Start of segment in milliseconds\r\n # of elapsed time in video\r\n\r\n start = tracking_segment_dict[c.SEGMENT_START_KEY]\r\n\r\n segment_dict[c.SEGMENT_START_KEY] = start\r\n\r\n # Duration of segment in milliseconds\r\n\r\n duration = tracking_segment_dict[c.SEGMENT_DURATION_KEY]\r\n\r\n segment_dict[c.SEGMENT_DURATION_KEY] = duration\r\n\r\n if c.ANN_TAG_KEY in tracking_segment_dict:\r\n segment_ann = tracking_segment_dict[c.ANN_TAG_KEY]\r\n segment_dict[c.ANN_TAG_KEY] = segment_ann\r\n\r\n segment_list.append(segment_dict)\r\n\r\n ann_segments.append(segment_counter)\r\n\r\n db_path = os.path.join(\r\n self.face_models_path, str(segment_counter))\r\n\r\n if os.path.isfile(db_path):\r\n\r\n model = cv2.createLBPHFaceRecognizer()\r\n\r\n model.load(db_path)\r\n\r\n if model:\r\n # Use model of this segment\r\n # to recognize faces of remaining segments\r\n\r\n ann_segments = self.search_face(ann_segments,\r\n segment_list, model,\r\n segment_counter)\r\n\r\n # Add segments to person dictionary\r\n\r\n person_dict[c.SEGMENTS_KEY] = segment_list\r\n\r\n # Save total duration of video in milliseconds\r\n\r\n tot_duration = (\r\n self.video_frames * 1000.0 / self.fps)\r\n\r\n person_dict[c.VIDEO_DURATION_KEY] = tot_duration\r\n\r\n self.recognized_faces.append(person_dict)\r\n\r\n person_counter += 1\r\n\r\n segment_counter += 1\r\n\r\n del model\r\n\r\n if not (os.path.exists(self.cluster_path)):\r\n # Create directory for people clustering\r\n os.makedirs(self.cluster_path)\r\n\r\n # Save clustering result in YAML files\r\n\r\n # Remove previous files\r\n if os.path.exists(self.cluster_files_path):\r\n shutil.rmtree(self.cluster_files_path)\r\n # Create directory for people clustering results\r\n os.makedirs(self.cluster_files_path)\r\n\r\n counter = 0\r\n for person_dict in self.recognized_faces:\r\n yaml_file_name = str(counter) + '.YAML'\r\n yaml_file_path = os.path.join(self.cluster_files_path, yaml_file_name)\r\n utils.save_YAML_file(yaml_file_path, person_dict)\r\n counter += 1\r\n\r\n # Save processing time\r\n time_in_clocks = cv2.getTickCount() - start_time\r\n time_in_seconds = time_in_clocks / cv2.getTickFrequency()\r\n\r\n print 'Time for people clustering:', time_in_seconds, 's\\n'\r\n logger.debug('Time for people clustering:', time_in_seconds, 's\\n')\r\n\r\n self.anal_times[c.PEOPLE_CLUSTERING_TIME_KEY] = time_in_seconds\r\n\r\n utils.save_YAML_file(self.analysis_file_path, self.anal_times)\r\n\r\n self.calculate_medoids()",
"def mergeCenters(nCenters):\r\n\tpath = os.getcwd()\r\n\tos.chdir('Centers/')\r\n\tcenter = np.zeros((0,128))\t\t#: Populator for centers\r\n\r\n\tfor i in os.listdir(os.getcwd()):\r\n\t Center = open(i,\"rb\")\t\t#: File pointer for centers file\r\n\t center = np.vstack((center, pickle.load(Center)))\t#Populate centers\r\n\t Center.close()\r\n\r\n\tcenter = np.float32(center)\r\n\tcriteria = (cv2.TERM_CRITERIA_MAX_ITER, 10,0.0001)\r\n\t#Checking version of opencv..\r\n\tif __verison__[0] == '3':\r\n\t\tret,label,center=cv2.kmeans(center,int(nCenters),None,criteria,50,cv2.KMEANS_PP_CENTERS)\r\n\telse:\r\n\t\tret,label,center=cv2.kmeans(center,int(nCenters),criteria,50,cv2.KMEANS_PP_CENTERS)\r\n\r\n\tCenterFinal = open(path+'/centerFinal.p',\"wb\")#: File pointer for final centers file\r\n\tpickle.dump(center, CenterFinal)\t#Dump centers to file\r\n\tCenterFinal.close()",
"def gatherfiles(self):\n\t\tfrom subprocess import Popen,PIPE\n\t\timport os\n\t\timport tarfile\n\t\timport glob\n\t\t\n\t\tprint \"=== \",self.nameID,\": Joining all the files in one\"\n\t\t# FIXME: Only there are 1 file, not needed the hadd\n\t\tfinalfile = os.path.join(\"Results\",self.outputfile)\n\t\t# FIXED BUG: just cp when there is only one file, otherwise\n\t\t# there are problems with the TTree\n\t\tif len(self.outputfiles) == 1:\n\t\t\t# Note that when there is only 1 file, always its #task=1\n\t\t\tcommand = [ 'cp', self.outputfiles[1], finalfile ]\n\t\telse:\n\t\t\tcommand = [ 'haddPlus', finalfile ]\n\t\t\tfor f in self.outputfiles.itervalues():\n\t\t\t\tcommand.append( f )\n\t\tp = Popen( command ,stdout=PIPE,stderr=PIPE ).communicate()\n\t\t# Checking if everything was allright\n\t\ttotalevts = self.getevents(finalfile,True)\n\t\tif totalevts != self.nevents:\n\t\t\tmessage = \"\\033[33;1mclustermanager.gatherfiles: WARNING\\033[0m the total file\"\n\t\t\tmessage += \"'\"+finalfile+\"' do not contain all the events:\\n\"\n\t\t\tmessage += \"Total events to be processed:\"+str(self.nevents)+\"\\n\"\n\t\t\tmessage += \"Total events in '\"+finalfile+\"':\"+str(totalevts)+\"\\n\"\n\t\t\tprint message\n\t\t\treturn \n\t\t# If everything was fine, deleting the files \n\t\t# and cleaning the directory\n\t\tfor f in self.outputfiles.itervalues():\n\t\t\tos.remove( f )\n\t\t# Taring and compressing\n\t\tfilestotar = glob.glob(\"./*.*\")\n\t\tfilestotar.append( \".storedmanager\")\n\t\ttar = tarfile.open(os.path.basename(self.cwd)+\".tar.gz\",\"w:gz\")\n\t\tfor f in filestotar:\n\t\t\ttar.add(f)\n\t\ttar.close()\n\t\t# if everything was fine, deleting the files\n\t\tif os.path.exists(os.path.basename(self.cwd)+\".tar.gz\"):\n\t\t\tfor f in filestotar:\n\t\t\t\tos.remove(f)\n\t\telse:\n\t\t\tmessage = \"\\033[33;1mclustermanager.gatherfiles: WARNING\\033[0m I can't manage\\n\"\n\t\t\tmessage += \"to create the backup .tar.gz file\\n\"\n\t\t\tprint message\n\n\t\tprint \"Created \"+finalfile\n\t\tprint \"========= Process Completed =========\"",
"def categorize (self):\n\n fout = defaultdict(list)\n\n # Flat lists of files to collect keyed by platform,category\n collect_files = dict()\n for platform in wanted_files:\n for category, flist in wanted_files[platform].items():\n for f in flist:\n collect_files[(platform,category,f)] = list()\n\n for a in self.artifacts:\n try:\n with zfile.ZFile(a.lpath, 'r') as zf:\n if os.path.splitext(a.lpath)[-1] == '.rpm':\n a.info['plat'] = 'rhel'\n\n platform = a.info['plat']\n if platform not in platforms:\n continue\n\n zfiles = zf.getnames()\n if len(zfiles) == 0:\n print('No files in %s?' % a)\n for category, flist in wanted_files[platform].items():\n for f in flist:\n matches = [(a,x) for x in zfiles if os.path.basename(x) == f]\n if len(matches) > 0:\n collect_files[(platform,category,f)] += matches\n fout[category] += matches\n\n except zfile.tarfile.ReadError as e:\n print('ignoring artifact: %s: %s' % (a.lpath, str(e)))\n\n # Verify that all wanted combinations were matched\n errors = 0\n for missing in [x for x in collect_files if len(collect_files[x]) == 0]:\n errors += 1\n print('ERROR: No matching artifact files for', missing)\n\n if errors > 0:\n raise Exception('Not all wanted files found in artifacts, see above.')\n return fout",
"def create_dicts(filelist, class_out, **kwargs):\n\n c = TwoLevelCountDict()\n d = TwoLevelCountDict()\n m = TwoLevelCountDict()\n\n def _merge_dicts(tup):\n new_c, new_d, new_m = tup\n _merge_tlcd(c, new_c)\n _merge_tlcd(d, new_d)\n _merge_tlcd(m, new_m)\n print(len(c), len(d), len(m))\n\n pool = Pool(4)\n\n for f in filelist:\n #p = Process(target=_process_file, args=(f))\n pool.apply_async(_process_file, args=[f], callback=_merge_dicts)\n #_process_file(f, c, d, m)\n\n # Close the pool...\n pool.close()\n pool.join()\n\n # Write out the dicitonaries...\n c_f = open(c_path, 'wb')\n d_f = open(d_path, 'wb')\n m_f = open(m_path, 'wb')\n\n pickle.dump(c, c_f)\n pickle.dump(d, d_f)\n pickle.dump(m, m_f)\n c_f.close()",
"def create_subsets(self):\n new_dict = defaultdict(dict)\n\n for found_file in self.files:\n match = re.match(self.regex_pattern,\n found_file)\n if match:\n groups = match.groupdict()\n for att in groups:\n value = groups[att]\n try:\n new_dict[att][value].add(found_file)\n except KeyError:\n new_dict[att][value] = set([found_file])\n\n return new_dict",
"def _load_cluster(self):",
"def __autorename_clusters(self, cluster_list, dictionary, n=1):\n renamed_cluster_list = {}\n for cluster, docs in cluster_list.items():\n list_of_vectors = []\n for doc_title in docs:\n for doc in self.__corpus:\n if doc.title == doc_title:\n list_of_vectors.append(doc.vector)\n \n def multiply_vector(vector):\n res = 1\n for dim in vector:\n res *= dim\n return res\n \n # Calculate intersection between vectors.\n intersect = [multiply_vector(vector) for vector in zip(*list_of_vectors)]\n \n # Find common words between all documents.\n common_words = {}\n for i in range(0, len(intersect)):\n if intersect[i] != 0:\n common_words[intersect[i]] = dictionary[i]\n \n # Sort common words.\n if (len(common_words) > 0):\n sorted_commond_words = sorted(common_words.items(), reverse=True)[:n]\n renamed_cluster_list[' '.join([str(elem[1]) for elem in sorted_commond_words])] = cluster_list[cluster]\n else:\n renamed_cluster_list[cluster] = cluster_list[cluster]\n return renamed_cluster_list",
"def build_client_snapshot(self):\n self.client_snapshot = {}\n for dirpath, dirs, files in os.walk(self.cfg['sharing_path']):\n for filename in files:\n filepath = os.path.join(dirpath, filename)\n unwanted_file = False\n for r in Daemon.IGNORED_REGEX:\n if re.match(r, filepath) is not None:\n unwanted_file = True\n print 'Ignored Path:', filepath\n break\n if not unwanted_file:\n relative_path = self.relativize_path(filepath)\n with open(filepath, 'rb') as f:\n self.client_snapshot[relative_path] = ['', hashlib.md5(f.read()).hexdigest()]",
"def getCentroids(self):\n\t\timages=[]\n\t\tcentroids_dict={}\n\t\t#Find all images of the objects\n\t\tfor file in os.listdir(objects_dir):\n\t \tif file.endswith(self.extension):\n\t \timages.append(os.path.join(self.objects_dir, file))",
"def compute_clusters(self, documents):\n ###TODO\n for d in range(0, len(documents)):\n maxi = 999999999\n for cid in range(0, len(self.means)):\n dist = self.distance(documents[d], self.means[cid], self.norms[cid])\n if dist < maxi:\n maxi = dist\n clust = cid \n self.cluster[d] = clust",
"def get_all_clusters(self) -> Dict[str, List[str]]:\n result = {}\n for c_id in set(self._clusters.values()):\n result[c_id] = self.get_cluster_by_id(c_id)\n return result",
"def read_input_files(self):\r\n\r\n for input_file in self.list_of_input_files:\r\n input_file.read_header_of_file()\r\n self.list_of_header_objects.extend(input_file.list_of_header_objects)\r\n self.list_of_header_objects_without_ID.extend(input_file.list_of_header_objects_without_ID)\r\n self.list_of_contigs.extend(input_file.list_of_contigs)\r\n\r\n self.list_of_header_objects = list(toolz.unique(self.list_of_header_objects, key=lambda x: x.tag_and_ID))\r\n self.list_of_header_objects_without_ID = list(\r\n toolz.unique(self.list_of_header_objects_without_ID, key=lambda x: x.line))\r\n self.list_of_contigs = list(toolz.unique(self.list_of_contigs, key=lambda x: x.line))\r\n self.list_of_header_objects.extend(self.list_of_header_objects_without_ID)\r\n self.list_of_header_objects.sort(key=lambda x: x.line)\r\n self.list_of_header_objects.extend(self.list_of_contigs)\r\n self.list_of_header_objects.sort(key=lambda x: x.tag, reverse=False)\r\n self.create_body_header_line_for_output()\r\n self.write_header_in_output_file()\r\n\r\n list_of_chrom = list(self.indices.keys())\r\n list_of_chrom.sort(key=lambda x: self.alphanum_key(x))\r\n for chrom in list_of_chrom:\r\n self.list_of_body_objects.clear()\r\n for input_file in self.list_of_input_files:\r\n input_file.read_specific_chrom_body_of_file(chrom)\r\n self.list_of_body_objects.extend(input_file.list_of_body_objects)\r\n\r\n self.adjust_body_records_to_samples()\r\n self.list_of_body_objects = list(toolz.unique(self.list_of_body_objects, key=lambda x: x.line))\r\n self.list_of_body_objects.sort(key=lambda x: self.alphanum_key(x.line))\r\n self.verify_and_merge_body_records()\r\n self.write_specific_chrom_in_output_file()",
"def zephir_clusters_lookup(self, ocns_list):\n zephir_cluster = {\n \"inquiry_ocns_zephir\": ocns_list,\n \"cid_ocn_list\": [],\n \"cid_ocn_clusters\": {},\n \"num_of_matched_zephir_clusters\": 0,\n \"min_cid\": None,\n }\n\n cid_ocn_list_by_ocns = self.find_zephir_clusters_by_ocns(ocns_list)\n if not cid_ocn_list_by_ocns:\n return zephir_cluster\n\n # find all OCNs in each cluster\n cids_list = [cid_ocn.get(\"cid\") for cid_ocn in cid_ocn_list_by_ocns]\n unique_cids_list = list(set(cids_list))\n cid_ocn_list = self.find_zephir_clusters_by_cids(unique_cids_list)\n if not cid_ocn_list:\n return zephir_cluster\n\n # convert to a dict with key=cid, value=list of ocns\n cid_ocn_clusters = formatting_cid_id_clusters(cid_ocn_list, \"ocn\")\n\n zephir_cluster = {\n \"inquiry_ocns_zephir\": ocns_list,\n \"cid_ocn_list\": cid_ocn_list,\n \"cid_ocn_clusters\": cid_ocn_clusters,\n \"num_of_matched_zephir_clusters\": len(cid_ocn_clusters),\n \"min_cid\": min([cid_ocn.get(\"cid\") for cid_ocn in cid_ocn_list])\n }\n return zephir_cluster",
"def map_files(key):\n \n datadir=os.path.join(os.path.dirname(__file__),'ncnr_sample_data')\n filedict={'empty_1m':os.path.join(datadir,'SILIC001.SA3_SRK_S101'),\n 'empty_4m':os.path.join(datadir,'SILIC002.SA3_SRK_S102'),\n 'empty_cell_1m':os.path.join(datadir,'SILIC003.SA3_SRK_S103'),\n 'blocked_1m':os.path.join(datadir,'SILIC004.SA3_SRK_S104'),\n 'trans_empty_cell_4m':os.path.join(datadir,'SILIC005.SA3_SRK_S105'),\n 'trans_sample_4m':os.path.join(datadir,'SILIC006.SA3_SRK_S106'),\n 'blocked_4m':os.path.join(datadir,'SILIC007.SA3_SRK_S107'),\n 'empty_cell_4m':os.path.join(datadir,'SILIC008.SA3_SRK_S108'),\n 'sample_1m':os.path.join(datadir,'SILIC009.SA3_SRK_S109'),\n 'sample_4m':os.path.join(datadir,'SILIC010.SA3_SRK_S110'),\n 'mask':os.path.join(datadir,'DEFAULT.MASK'),\n 'div':os.path.join(datadir,'PLEX_2NOV2007_NG3.DIV'),\n }\n return filedict[key]",
"def compact(self):\n\n # create a collection list for each bucket\n bucket_to_list_node_dict = {}\n # we need to ensure all buckets should have a collection list\n for bucket in self._buckets:\n bucket_to_list_node_dict[bucket] = []\n for node in self._disk_objects:\n bucket_object = node.value\n assert not bucket_object.is_in_memory()\n bucket_to_list_node_dict[bucket_object.bucket].append(node)\n # bucket by bucket processing\n for bucket, node_list in bucket_to_list_node_dict.items():\n with open(bucket.filepath, \"rb\") as source_file:\n tmp_filepath = bucket.filepath + \".tmp\"\n tmp_offset, tmp_addresses = 0, []\n with open(tmp_filepath, \"wb\") as target_file:\n # copy bytes from filepath to tmp_filepath\n for node in node_list:\n bucket_object = node.value\n source_file.seek(bucket_object.value.address)\n header = source_file.read(4)\n data_length = self._byte_array_to_integer(header)\n data = source_file.read(data_length)\n target_file.write(header + data)\n tmp_addresses.append(tmp_offset)\n tmp_offset += len(header + data)\n # swap files in physical disk\n os.rename(tmp_filepath, bucket.filepath)\n # remove all bucket_object from object_to_list_node dict\n for node in node_list:\n self._object_to_list_node.pop(node.value)\n # update disk address and object_to_list_node dict\n for node, address in zip(node_list, tmp_addresses):\n bucket_object = node.value\n bucket_object.value.address = address\n self._object_to_list_node[bucket_object] = node\n # update bucket's offset, very important in compaction\n bucket._offset = tmp_offset",
"def mapRev2Cluster(self):\n\n # For each condition, operating on the side effect matching file to reduce down into\n # the more general categories\n clusterMapping = pd.read_csv('ClusteredSideEffects.csv', sep='$', index_col=0)\n for condition in self.conditions:\n print(\"I'm working on {:s}\".format(condition))\n files = glob.glob('ReviewsMatched2SideEffects/{:s}*csv'.format(condition))\n files = np.sort(files)\n\n for i,f in enumerate(files):\n df = pd.read_csv(f, sep='$', index_col=0)\n\n for cluster in np.unique(clusterMapping['Cluster']):\n # Finding the relevant SEs for the cluster\n SEs = clusterMapping[clusterMapping['Cluster'].eq(cluster)]['Side effect']\n\n # Summing across all those SEs in the dataframe and creating a new column\n match = [SE for SE in SEs if SE in df.columns]\n df[cluster] = (df[match].sum(axis=1) > 0)\n \n if not match:\n df[cluster] = [0]*len(df)\n \n # Stacking to allow for the depression split\n if i == 0:\n master_df = df.copy()\n else:\n master_df = master_df.append(df, ignore_index=0, sort=False)\n\n\n # Dropping all columns not in clusters\n clusters = list(np.unique(clusterMapping['Cluster']))\n keepers = ['Medication','Positive polarity','Negative polarity','Medication mentions','Effectiveness']\n keepers += clusters\n master_df = master_df[keepers]\n \n # Writing the stack to a file to load on to AWS\n master_df.to_csv('FinalProcessedReviews/{:s}_processed.csv'.format(condition), sep='$')\n print(\"I've saved the clustered file\\n\")",
"def cluster_data(data_loc, num_clusters, base_destination, vectorizer):\n cluster_df = __title_cluster_df(data_loc, num_clusters, vectorizer)\n if not os.path.isdir(base_destination):\n os.mkdir(base_destination)\n vec_path = os.path.join(base_destination, 'vectorizer.pkl')\n with open(vec_path, 'wb') as f:\n pickle.dump(vectorizer, f)\n cluster_stats = {}\n for i in range(num_clusters):\n titles = cluster_df[cluster_df['cluster']==i]['title']\n cluster_stats[i] = titles.shape[0]\n cluster_data = __get_data_with_titles(data_loc, titles)\n dest = os.path.join(base_destination, 'cluster_{}.json'.format(i))\n with open(dest, 'w') as f:\n json.dump(cluster_data, f)\n stats_path = os.path.join(base_destination, 'cluster_statistics.txt')\n with open(stats_path, 'w') as f:\n for cluster in cluster_stats.keys():\n f.write('cluster {}: '.format(cluster))\n f.write(str(cluster_stats[cluster]) + '\\n')",
"def get_files_io():\n if GC.conf['general']['training']:\n files_zip = {\n 'raw': os.path.join(COOKED_DATA, 'train.txt'),\n 'new': os.path.join(COOKED_DATA, 'train_new.txt'),\n 'norm': os.path.join(COOKED_DATA, 'train_norm.txt'),\n 'manu': os.path.join(RAW_DATA, 'others', 'temp_updt_manu.txt'),\n 'labels': os.path.join(TRAIN_DATA, 'train_norm.txt_labels.pkl'),\n 'segll': os.path.join(TRAIN_DATA, 'train_norm.txt_seginf_loglab.pkl'),\n 'segdl': os.path.join(TRAIN_DATA, 'train_norm.txt_seginf_deeplog.pkl'),\n 'struct': os.path.join(TRAIN_DATA, 'train_norm.txt_structured.csv'),\n 'output': TRAIN_DATA\n }\n else:\n files_zip = {\n 'raw': os.path.join(COOKED_DATA, 'test.txt'),\n 'new': os.path.join(COOKED_DATA, 'test_new.txt'),\n 'norm': os.path.join(COOKED_DATA, 'test_norm.txt'),\n 'labels': os.path.join(TEST_DATA, 'test_norm.txt_labels.pkl'),\n 'segll': os.path.join(TEST_DATA, 'test_norm.txt_seginf_loglab.pkl'),\n 'segdl': os.path.join(TEST_DATA, 'test_norm.txt_seginf_deeplog.pkl'),\n 'map_norm_raw': os.path.join(TEST_DATA, 'map_norm_raw.pkl'),\n 'map_norm_rcv': os.path.join(TEST_DATA, 'map_norm_rcv.pkl'),\n 'norm_rcv': os.path.join(TEST_DATA, 'test_norm_rcv.txt'),\n 'struct': os.path.join(TEST_DATA, 'test_norm.txt_structured.csv'),\n 'struct_rcv': os.path.join(TEST_DATA, 'test_norm_rcv.txt_structured.csv'),\n 'top': os.path.join(TEST_DATA, 'analysis_summary_top.txt'),\n 'sum': os.path.join(TEST_DATA, 'analysis_summary.csv'),\n 'rst_llab': os.path.join(TEST_DATA, 'results_loglab.csv'),\n 'rst_dlog': os.path.join(TEST_DATA, 'results_deeplog.txt'),\n 'rst_llzr': os.path.join(TEST_DATA, 'results_loglizer.csv'),\n 'dbg': os.path.join(TEST_DATA, 'debug.csv'),\n 'output': TEST_DATA\n }\n return files_zip",
"def cluster_names(center_names):\n names = np.asarray(center_names)\n\n print(\"Clustering names.\")\n lev_similarity = -1*np.array([[distance.levenshtein(w1, w2) for w1 in names] for w2 in names])\n affprop = sklearn.cluster.AffinityPropagation(affinity=\"precomputed\", damping=0.5)\n affprop.fit(lev_similarity)\n\n print(\"Writing clusters to file.\")\n with open(output_file, \"w\") as f:\n for cluster_id in np.unique(affprop.labels_):\n cluster = np.unique(names[np.nonzero(affprop.labels_ == cluster_id)])\n cluster_dict = {\"cluster\": list(cluster)}\n f.write(json.dumps(cluster_dict, indent=2) + \"\\n\")"
] | [
"0.71143055",
"0.63735855",
"0.6250435",
"0.6203251",
"0.616796",
"0.6153523",
"0.6134056",
"0.59433764",
"0.58170253",
"0.5809386",
"0.57750106",
"0.5771018",
"0.5729723",
"0.5688608",
"0.5670634",
"0.56091005",
"0.55651087",
"0.55550224",
"0.5547735",
"0.55256015",
"0.550403",
"0.5500439",
"0.54938895",
"0.54615355",
"0.5455202",
"0.5454709",
"0.5450013",
"0.54399115",
"0.5438228",
"0.54267186"
] | 0.6880145 | 1 |
Matches the science images to the official SPT catalog. Uses the center pixel coordinate of the 3.6 um science image to match against the SZ center of the clusters in the official SPT catalog. Clusters are kept only if their images match an SZ center within the given maximum separation. If multiple images match to the same SZ center within our max separation then only the closest match is kept. | def image_to_catalog_match(self, max_image_catalog_sep):
catalog = self._spt_catalog
# Create astropy skycoord object of the SZ centers.
sz_centers = SkyCoord(catalog['RA'], catalog['DEC'], unit=u.degree)
for cluster in self._catalog_dictionary.values():
# Get the RA and Dec of the center pixel in the image.
w = WCS(cluster['ch1_sci_path'])
center_pixel = np.array(w.array_shape) // 2
# Create astropy skycoord object for the reference pixel of the image.
img_coord = SkyCoord.from_pixel(center_pixel[1], center_pixel[0], wcs=w, origin=0)
# Match the reference pixel to the SZ centers
idx, sep, _ = img_coord.match_to_catalog_sky(sz_centers)
# Add the (nearest) catalog id and separation (in arcsec) to the output array.
cluster.update({'SPT_cat_idx': idx, 'center_sep': sep})
# Reject any match with a separation larger than 1 arcminute.
large_sep_clusters = [cluster_id for cluster_id, cluster_info in self._catalog_dictionary.items()
if cluster_info['center_sep'].to(u.arcmin) > max_image_catalog_sep]
for cluster_id in large_sep_clusters:
self._catalog_dictionary.pop(cluster_id, None)
# If there are any duplicate matches in the sample remaining we need to remove the match that is the poorer
# match. We will only keep the closest matches.
match_info = Table(rows=[[cluster['SPT_cat_idx'], cluster['center_sep'], cluster_id]
for cluster_id, cluster in self._catalog_dictionary.items()],
names=['SPT_cat_idx', 'center_sep', 'cluster_id'])
# Sort the table by the catalog index.
match_info.sort(['SPT_cat_idx', 'center_sep'])
# Use Astropy's unique function to remove the duplicate rows. Because the table rows will be subsorted by the
# separation column we only need to keep the first incidence of the catalog index as our best match.
match_info = unique(match_info, keys='SPT_cat_idx', keep='first')
# Remove the duplicate clusters
duplicate_clusters = set(match_info['cluster_id']).symmetric_difference(self._catalog_dictionary.keys())
for cluster_id in duplicate_clusters:
self._catalog_dictionary.pop(cluster_id, None) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def run(self, max_clusters):\n sample_dist_matrix = self.matrix_dist()\n self.link.print_link()\n first_clus = self.clusters[0] # initialize first cluster to merge into\n second_clus = self.clusters[0] # initialize second cluster to merge\n max_samples_dist = max(sample_dist_matrix.values())\n # initialize minimun distance between two samples\n min_dist = max_samples_dist\n while len(self.clusters) > max_clusters: # clustering loop\n for clus in self.clusters: # iterate over every cluster\n for other_clus in self.clusters: # iterate over other clusters\n if clus.c_id > other_clus.c_id: # avoid duplicates and make sure to pass correct key to dictionary\n # compute distance between two clusters according to current link\n clus_dist = self.link.compute(clus, other_clus, sample_dist_matrix)\n if clus_dist < min_dist: # keep the minimum distance and its clusters\n min_dist = clus_dist\n first_clus = other_clus\n second_clus = clus\n self.clusters.remove(second_clus) # remove the cluster that's getting merged from clusters list\n first_clus.merge(second_clus) # merge the cluster with higher id into the other\n min_dist = max_samples_dist # restore high distance in order to start the search again\n\n sum_sil = self.compute_summery_silhouette(sample_dist_matrix)\n # print results\n for clus in self.clusters:\n clus.print_details(sum_sil[clus.c_id])\n print(f'Whole data: silhouette = {sum_sil[0]}, RI = {self.compute_rand_index()}')",
"def fit_satellite_centers(image, spotcenters_init, window=20):\n # fit satellite centers\n scs = np.zeros((len(spotcenters_init), 2))\n for idx,xy in enumerate(spotcenters_init):\n subim = pre.subimage(image, xy, window=window)\n popt = snm.image_centroid_gaussian1(subim)\n xcenp = popt[1]\n ycenp = popt[2]\n xcen = xy[0]-round(window/2)+xcenp\n ycen = xy[1]-round(window/2)+ycenp\n scs[idx,:] = xcen, ycen \n \n return scs",
"def mergeCenters(nCenters):\r\n\tpath = os.getcwd()\r\n\tos.chdir('Centers/')\r\n\tcenter = np.zeros((0,128))\t\t#: Populator for centers\r\n\r\n\tfor i in os.listdir(os.getcwd()):\r\n\t Center = open(i,\"rb\")\t\t#: File pointer for centers file\r\n\t center = np.vstack((center, pickle.load(Center)))\t#Populate centers\r\n\t Center.close()\r\n\r\n\tcenter = np.float32(center)\r\n\tcriteria = (cv2.TERM_CRITERIA_MAX_ITER, 10,0.0001)\r\n\t#Checking version of opencv..\r\n\tif __verison__[0] == '3':\r\n\t\tret,label,center=cv2.kmeans(center,int(nCenters),None,criteria,50,cv2.KMEANS_PP_CENTERS)\r\n\telse:\r\n\t\tret,label,center=cv2.kmeans(center,int(nCenters),criteria,50,cv2.KMEANS_PP_CENTERS)\r\n\r\n\tCenterFinal = open(path+'/centerFinal.p',\"wb\")#: File pointer for final centers file\r\n\tpickle.dump(center, CenterFinal)\t#Dump centers to file\r\n\tCenterFinal.close()",
"def cluster(self):\n\n result_nominatim = self.nominatim()\n try:\n coord = [(float( i['lat'] ), float( i['lon'] )) for i in result_nominatim]\n except:\n return None\n #print( \"coord\", coord )\n kms_per_radian = 6371.0088\n # Augmenter cette valeur augmente le nombre d'éléments dans un cluster et change les résultats\n epsilon = 2 / kms_per_radian\n # Adapter le nombre de clusters (min_sample) au nombre d'entités dans array ?\n db = DBSCAN( eps=epsilon, min_samples=1, algorithm='ball_tree',\n metric='haversine' ).fit( np.radians( coord ) )\n cluster_labels = db.labels_\n #print( \"cluster\", cluster_labels )\n num_clusters = len( set( cluster_labels ) )\n #print( \"num clusters\", num_clusters )\n counts = np.bincount( cluster_labels )\n #print( \"count\", counts )\n maxi = np.argmax( counts )\n #print( \"maxi\", maxi )\n itemindex = np.where( cluster_labels == maxi )[0]\n #print( \"itemindex\", itemindex )\n\n lat: List[float] = [float( result_nominatim[index]['lat'] ) for index in itemindex]\n lon: List[float] = [float( result_nominatim[index]['lon'] ) for index in itemindex]\n\n # on récupère la moyenne des coordonnées du plus gros cluster. Cette moyenne équivaut au centroide :\n # https://gis.stackexchange.com/questions/12120/calculate-midpoint-from-a-series-of-latitude-and-longitude-coordinates\n\n average = {\"lat\": sum( lat ) / len( lat ), \"lon\": sum( lon ) / len( lon )}\n\n #print( list( zip( cluster_labels, [x['display_name'] for x in results] ) ) )\n #print( \"plus proche de moyenne\", closest( results, average ) )\n return closest( result_nominatim, average )",
"def _cluster_k_medoids_minibatch(self, num_variants, tolerance, batch_size, cache, max_cycles):\n avail_medoid_indices = [self.index[name] for name in self.tree.get_ordered_names() if name in self.available]\n chsn_indices = [self.index[n] for n in self.chosen]\n num_chsn = len(chsn_indices)\n dists = self._transform_distances(tolerance)\n # This spaces the initial centroids randomly around the tree\n seq_chunk = len(avail_medoid_indices) // (num_variants - num_chsn)\n rand_inds = []\n for i in range(num_variants - num_chsn):\n rand_inds.append(avail_medoid_indices[random.randint(i*seq_chunk, (i+1)*seq_chunk-1)])\n best_med_inds = np.array(chsn_indices + rand_inds)\n # Initial random sets\n best_clusters = self._partition_nearest(best_med_inds, dists)\n best_scores = self._sum_dist_scores(best_med_inds, best_clusters, dists)\n best_score = sum(best_scores)\n # Using a simple greedy algorithm, typically converges after 2-5 iterations.\n num_cycles = 0\n improvement = True\n while improvement == True:\n improvement = False\n med_inds = best_med_inds.copy()\n if len(avail_medoid_indices) > batch_size:\n avail_minibatch_inds = random.sample(avail_medoid_indices, batch_size)\n else:\n avail_minibatch_inds = avail_medoid_indices\n for i in range(num_chsn, num_variants):\n for ind in avail_minibatch_inds:\n if ind in med_inds: continue\n med_inds[i] = ind\n score = self._score_pattern(med_inds, dists)\n if score < best_score:\n best_score = score\n best_med_inds[i] = ind\n improvement = True\n else:\n med_inds[i] = best_med_inds[i]\n num_cycles += 1\n cache['cycles_used'] += 1\n if cache['quit_now'] or max_cycles != None and num_cycles >= max_cycles:\n break\n if cache['quit_now'] or max_cycles != None and num_cycles >= max_cycles:\n improvement = False\n break\n best_clusters = self._partition_nearest(best_med_inds, dists)\n best_scores = self._sum_dist_scores(best_med_inds, best_clusters, dists)\n return best_med_inds, best_scores",
"def _find_largest_candidate(self, reduced):\n nbr_counts = np.count_nonzero(reduced == 0, axis=0) # = [1, 1, 4, 2,...] where each value is the number of neighbours for the variant at that index.\n count_max = nbr_counts.max()\n if count_max == 0: # Indicates there are no available variants close enough\n return None, [] # to the remaining unassigned. Usually raises an error.\n max_inds = np.nonzero(nbr_counts == count_max)[0] # Array containing the indices of all variants with the max number of neighbours.\n if len(max_inds) == 1: # A single largest cluster\n best_center = max_inds[0]\n best_clstr = np.nonzero(reduced[:,best_center] == 0)[0]\n else: # A tie for largest cluster. Broken by smallest sum of full scores\n # This was tested with the below more accurate and true scoring function. Unfortunately it became hideously slow (clustered_inds and centre_inds were given as args):\n # clstr_inds = np.nonzero(reduced[:,max_ind] == 0)[0]\n # covered_inds = list(clustered_inds | set(clstr_inds))\n # centre_inds.append(max_ind)\n # score = np.sum(np.min(self.orig_dists[np.ix_(covered_inds,centre_inds)], axis=1))\n # centre_inds.pop()\n best_center, best_clstr, best_score = None, [], np.inf\n for max_ind in max_inds:\n clstr_inds = np.nonzero(reduced[:,max_ind] == 0)[0]\n score = np.sum(self.orig_dists[clstr_inds,max_ind])\n if score < best_score:\n best_center, best_clstr, best_score = max_ind, clstr_inds, score\n return best_center, best_clstr",
"def hassimilarcluster(ind, clusters):\n item = op.itemgetter\n global opt\n found = False\n tx = min(clusters[ind],key=item(0))[0]\n ty = min(clusters[ind],key=item(1))[1]\n for i, cl in enumerate(clusters):\n if i != ind:\n cx = min(cl,key=item(0))[0]\n cy = min(cl,key=item(1))[1]\n dx, dy = cx - tx, cy - ty\n specdist = Hausdorff_distance(clusters[ind],cl,None,(dx,dy))\n if specdist <= int(opt.rgsim):\n found = True\n break\n return found",
"def _qt_radius_clustering_greedy(self, min_to_cluster, reduced, cache, max_cycles):\n centre_inds, clustered_inds = [], set()\n chsn_indices = [self.index[name] for name in self.chosen]\n avail_indices = set(self.index[name] for name in self.available)\n unassigned_indices = list(self._not_ignored_inds - avail_indices - set(chsn_indices))\n if unassigned_indices:\n # Remove unassigned from centre consideration\n reduced[:,unassigned_indices] = np.inf\n for chsn_ind in chsn_indices:\n cluster_inds = np.nonzero(reduced[:,chsn_ind] == 0)[0]\n centre_inds.append(chsn_ind)\n clustered_inds.update(cluster_inds)\n # Remove chosen and their clusters from all future consideration\n reduced[:,cluster_inds] = np.inf\n reduced[cluster_inds,:] = np.inf\n # Iteratively find the largest cluster, until enough variants are clustered\n cache['cycles_used'] = 0\n while len(clustered_inds) < min_to_cluster:\n centre_ind, cluster_inds = self._find_largest_candidate(reduced)\n if centre_ind == None:\n percent_placed = len(clustered_inds)*100.0/float(len(self._not_ignored_inds))\n error_msg = 'Error: clustering finished prematurely ({:.2f}% placed). To fix this, raise the critical threshold, lower the critical percent, or add more available variants.'.format(percent_placed)\n return [], error_msg, [centre_inds, self._not_ignored_inds-clustered_inds]\n centre_inds.append(centre_ind)\n clustered_inds.update(cluster_inds)\n reduced[:,centre_ind] = np.inf\n reduced[cluster_inds,:] = np.inf\n cache['cycles_used'] += 1\n if cache['quit_now'] or max_cycles != None and cache['cycles_used'] >= max_cycles:\n break\n final_cluster_inds = self._partition_nearest(centre_inds, self.orig_dists)\n final_scores = self._sum_dist_scores(centre_inds, final_cluster_inds, self.orig_dists)\n alt_variants = []\n return centre_inds, final_scores, alt_variants",
"def _get_best_fit(self, segmented_instances, num_labels,\\\n stats, end_points, i, j, k, pos_angle=True):\n min_angle = None\n seg_section = None\n min_seg_dist = None\n\n img = np.zeros(segmented_instances.shape)\n img[segmented_instances== (i+1)]= 100\n #self.showme(img, str(i))\n img[segmented_instances== (j+1)]= 100\n #self.showme(img, str(i)+'and? '+str(j))\n cv2.circle(img, (end_points[i][k][0], end_points[i][k][1]), 3, 70, -1)\n seg_section=None\n mid_point_i=[0,0]\n for l in range(0, len(end_points[j])): #Iterate over all endpoints of segment j\n #cv2.line(img,(end_points[i][k][0], end_points[i][k][1]),(stats['centroid'][i,0], stats['centroid'][i,1]),100,4)\n seg_dist = math.sqrt((end_points[j][l][0]-end_points[i][k][0])**2.0 +\n (end_points[j][l][1]-end_points[i][k][1])**2.0 )\n #cv2.line(img,(end_points[j][l][0], end_points[j][l][1]),(stats['centroid'][i,0], stats['centroid'][i,1]),255,4)\n # Stem segments with side branches usually miss a good connection because the centroid is off\n # the end point's axis. To account for this we replace the centroind with a point that is closer to the end point.\n mid_point_i[0] = int(end_points[i][k][0]+ (stats['centroid'][i][0]-end_points[i][k][0])/4.0)\n mid_point_i[1] = int(end_points[i][k][1]+ (stats['centroid'][i][1]-end_points[i][k][1])/4.0)\n\n angle = self._ang([stats['centroid'][j],end_points[j][l]], \\\n [stats['centroid'][j], mid_point_i] )\n if angle==999: #check for a divide by zero error\n cv2.line(img,(stats['centroid'][j][0], stats['centroid'][j][1]),\n (end_points[j][l][0], end_points[j][l][1]), 150, 4 )\n cv2.line(img,(stats['centroid'][j][0], stats['centroid'][j][1]),\n (stats['centroid'][i][0], stats['centroid'][i][1]), 150, 4 )\n cv2.line(img,(end_points[j][l][0], end_points[j][l][1]),\\\n (end_points[i][k][0], end_points[i][k][1]),255,4)\n self.showme(img, str(i)+' '+str(j)+' '+str(angle))\n\n if (pos_angle and angle<=self.angle_thresh) or ( not(pos_angle) and angle>=360-self.angle_thresh):\n if min_seg_dist is None or seg_dist < min_seg_dist:\n min_seg_dist = seg_dist\n min_angle = angle\n seg_section = l\n elif seg_dist<50:\n cv2.line(img,(stats['centroid'][j][0], stats['centroid'][j][1]),\n (end_points[j][l][0], end_points[j][l][1]), 150, 4 )\n cv2.line(img,(stats['centroid'][j][0], stats['centroid'][j][1]),\n (stats['centroid'][i][0], stats['centroid'][i][1]), 150, 4 )\n cv2.line(img,(end_points[j][l][0], end_points[j][l][1]),\\\n (end_points[i][k][0], end_points[i][k][1]),255,4)\n #self.showme(img, str(i)+' '+str(j)+' '+str(angle))\n\n return min_angle, seg_section, min_seg_dist",
"def object_selection(self, ch1_bright_mag, ch2_bright_mag, selection_band_faint_mag, selection_band='I2_MAG_APER4'):\n\n clusters_to_remove = []\n for cluster_id, cluster_info in self._catalog_dictionary.items():\n # Read in the catalog\n se_catalog = Table.read(cluster_info['se_cat_path'], format='ascii')\n\n # Add the mask name to the catalog. Extracting only the system agnostic portion of the path\n se_catalog['MASK_NAME'] = re.search(r'Data_Repository/.*?\\Z', cluster_info['cov_mask_path']).group(0)\n\n # Preform SExtractor Flag cut. A value of under 4 should indicate the object was extracted well.\n se_catalog = se_catalog[se_catalog['FLAGS'] < 4]\n\n # Preform a faint-end magnitude cut in selection band.\n se_catalog = se_catalog[se_catalog[selection_band] <= selection_band_faint_mag]\n\n # Preform bright-end cuts\n # Limits from Eisenhardt+04 for ch1 = 10.0 and ch2 = 9.8\n se_catalog = se_catalog[se_catalog['I1_MAG_APER4'] > ch1_bright_mag] # [3.6] saturation limit\n se_catalog = se_catalog[se_catalog['I2_MAG_APER4'] > ch2_bright_mag] # [4.5] saturation limit\n\n # For the mask cut we need to check the pixel value for each object's centroid.\n # Read in the mask file\n mask, header = fits.getdata(cluster_info['cov_mask_path'], header=True)\n\n # Recast the mask image as a boolean array so we can use it as a check on the catalog entries\n mask = mask.astype(bool)\n\n # Read in the WCS from the mask\n w = WCS(header)\n\n # Get the objects pixel coordinates\n xy_data = np.array(w.wcs_world2pix(se_catalog['ALPHA_J2000'], se_catalog['DELTA_J2000'], 0))\n\n # Floor the values and cast as integers so we have the pixel indices into the mask\n xy_pix_idxs = np.floor(xy_data).astype(int)\n\n # Filter the catalog according to the boolean value in the mask at the objects' locations.\n se_catalog = se_catalog[mask[xy_pix_idxs[1], xy_pix_idxs[0]]]\n\n # If we have completely exhausted the cluster of any object, we should mark it for removal otherwise add it\n # to the data structure\n if se_catalog:\n cluster_info['catalog'] = se_catalog\n else:\n clusters_to_remove.append(cluster_id)\n\n # Remove any cluster that has no objects surviving our selection cuts\n for cluster_id in clusters_to_remove:\n self._catalog_dictionary.pop(cluster_id, None)",
"def refine_center(self, search_size=100, step_size=5, radius_additive=INNER_RADIUS_FULL_SIZE_PHOTO):\n iter = 0\n saves = np.zeros(((search_size*2)**2,3))\n for i_add in xrange(-search_size, search_size, step_size):\n for j_add in xrange(-search_size, search_size, step_size):\n perim_intensities = self.scan_perimeter_intensity(\n self.i_img_center+i_add, self.j_img_center+j_add, self.T, radius_additive, step_size)\n saves[iter, 0] = self.i_img_center+i_add\n saves[iter, 1] = self.j_img_center+j_add\n saves[iter, 2] = perim_intensities[:,3].sum()\n iter += 1\n\n idxs = np.argsort(saves[:,2])\n winner_ij = saves[idxs[-1],:2]\n self.i_img_center = winner_ij[0]\n self.j_img_center = winner_ij[1]\n return(winner_ij[0], winner_ij[1])",
"def get_center_ball_dist(output, x_true, y_true, num_classes=256):\n max_dist = 5\n success, fail = 0, 0\n dists = []\n Rx = 640 / 1280\n Ry = 360 / 720\n\n for i in range(len(x_true)):\n x, y = -1, -1\n # Reshape output\n cur_output = output[i].reshape((360, 640))\n\n # cv2 image must be numpy.uint8, convert numpy.int64 to numpy.uint8\n cur_output = cur_output.astype(np.uint8)\n\n # reshape the image size as original input image\n heatmap = cv2.resize(cur_output, (640, 360))\n\n # heatmap is converted into a binary image by threshold method.\n if num_classes == 256:\n ret, heatmap = cv2.threshold(heatmap, 127, 255, cv2.THRESH_BINARY)\n else:\n heatmap *= 255\n\n # find the circle in image with 2<=radius<=7\n circles = cv2.HoughCircles(heatmap, cv2.HOUGH_GRADIENT, dp=1, minDist=1, param1=50, param2=2, minRadius=2,\n maxRadius=7)\n # check if there have any tennis be detected\n if circles is not None:\n # if only one tennis be detected\n if len(circles) == 1:\n\n x = int(circles[0][0][0])\n y = int(circles[0][0][1])\n\n if x_true[i] < 0:\n if x < 0:\n success += 1\n else:\n fail += 1\n dists.append(-2)\n else:\n if x < 0:\n fail += 1\n dists.append(-1)\n else:\n dist = np.linalg.norm(((x_true[i] * Rx) - x, (y_true[i] * Ry) - y))\n dists.append(dist)\n if dist < max_dist:\n success += 1\n else:\n fail += 1\n\n return dists, success, fail",
"def assign_k_clusters(data, centers):\n clusters = []\n center_data = np.take(data, centers, axis=0)\n best_center = np.argmax(center_data, axis=0)\n for i in range(len(centers)):\n inds = [ind for ind in np.where(best_center == i)[0]]\n clusters.append(inds)\n return clusters",
"def check_central_star(all_images,x_star0,y_star0,all_titles,all_filt,Dx=100,Dy=50):\n index=0\n \n x_star = []\n y_star = []\n \n for image in all_images:\n x0=int(x_star0[index])\n y0=int(y_star0[index])\n \n old_x0=x0-(x0-Dx)\n old_y0=y0-(y0-Dy)\n \n sub_image=np.copy(image[y0-Dy:y0+Dy,x0-Dx:x0+Dx])\n NX=sub_image.shape[1]\n NY=sub_image.shape[0]\n \n profile_X=np.sum(sub_image,axis=0)\n profile_Y=np.sum(sub_image,axis=1)\n X_=np.arange(NX)\n Y_=np.arange(NY)\n \n profile_X_max=np.max(profile_X)*1.2\n profile_Y_max=np.max(profile_Y)*1.2\n \n avX,sigX=weighted_avg_and_std(X_,profile_X**4) ### better if weight squared\n avY,sigY=weighted_avg_and_std(Y_,profile_Y**4) ### really avoid plateau contribution\n #print index,'\\t',avX,avY,'\\t',sigX,sigY\n \n f, (ax1, ax2,ax3) = plt.subplots(1,3, figsize=(20,4))\n\n ax1.imshow(sub_image,origin='lower',vmin=0,vmax=10000,cmap='rainbow')\n ax1.plot([avX],[avY],'ko')\n ax1.grid(True)\n ax1.set_xlabel('X - pixel')\n ax1.set_ylabel('Y - pixel')\n \n ax2.plot(X_,profile_X,'r-',lw=2)\n ax2.plot([old_x0,old_x0],[0,profile_X_max],'y-',label='old',lw=2)\n ax2.plot([avX,avX],[0,profile_X_max],'b-',label='new',lw=2)\n \n \n ax2.grid(True)\n ax2.set_xlabel('X - pixel')\n ax2.legend(loc=1)\n \n ax3.plot(Y_,profile_Y,'r-',lw=2)\n ax3.plot([old_y0,old_y0],[0,profile_Y_max],'y-',label='old',lw=2)\n ax3.plot([avY,avY],[0,profile_Y_max],'b-',label='new',lw=2)\n \n ax3.grid(True)\n ax3.set_xlabel('Y - pixel')\n ax3.legend(loc=1)\n \n \n thetitle=\"{} : {} , {} \".format(index,all_titles[index],all_filt[index])\n f.suptitle(thetitle, fontsize=16)\n \n theX=x0-Dx+avX\n theY=y0-Dy+avY\n \n x_star.append(theX)\n y_star.append(theY)\n \n \n index+=1\n \n x_star=np.array(x_star)\n y_star=np.array(y_star)\n \n return x_star,y_star",
"def SelectClusters(image, background_prediction, result_clustering,\n n_clusters, bands_thresholds=[\"B2\", \"B3\", \"B4\"],\n region_of_interest=None,\n tileScale=PARAMS_CLOUDCLUSTERSCORE_DEFAULT['tileScale']): \n bands_norm_difference = [b + \"_difference\" for b in bands_thresholds]\n\n img_joined = image.subtract(background_prediction)\\\n .select(bands_thresholds, bands_norm_difference)\\\n .addBands(image.select(bands_thresholds))\n\n bands_and_difference_bands = bands_thresholds + bands_norm_difference\n\n multitemporal_score = None\n reflectance_score = None\n\n for i in range(n_clusters):\n img_diff_clus = img_joined.updateMask(\n result_clustering.eq(i)).select(bands_and_difference_bands)\n\n clusteri = img_diff_clus.reduceRegion(ee.Reducer.mean(),\n geometry=region_of_interest,\n bestEffort=True,\n scale=30,\n tileScale=tileScale\n )\n \n clusteri_diff = clusteri.toArray(bands_norm_difference)\n clusteri_refl = clusteri.toArray(bands_thresholds)\n \n clusteri_refl_norm = clusteri_refl.multiply(clusteri_refl).reduce(ee.Reducer.mean(),\n axes=[0]).sqrt().get([0])\n\n clusteridiff_mean = clusteri_diff.reduce(ee.Reducer.mean(), axes=[0]).get([0])\n clusteridiff_norm = clusteri_diff.multiply(clusteri_diff).reduce(ee.Reducer.mean(),\n axes=[0]).sqrt().get([0])\n\n multitemporal_score_clusteri = ee.Algorithms.If(clusteridiff_mean.gt(0),\n clusteridiff_norm,\n clusteridiff_norm.multiply(-1))\n\n multitemporal_score_clusteri = result_clustering.eq(\n i).toFloat().multiply(ee.Number(multitemporal_score_clusteri))\n reflectance_score_clusteri = result_clustering.eq(\n i).toFloat().multiply(ee.Number(clusteri_refl_norm))\n\n if multitemporal_score is None:\n multitemporal_score = multitemporal_score_clusteri\n reflectance_score = reflectance_score_clusteri\n else:\n multitemporal_score = multitemporal_score.add(\n multitemporal_score_clusteri)\n reflectance_score = reflectance_score.add(\n reflectance_score_clusteri)\n\n return multitemporal_score, reflectance_score",
"def clusterlike(self, i):\n name = self.catalog['SPT_ID'][i]\n\n ##### Do we actually want this guy? (some clusters in SPT-SZ are at field boundaries)\n if (name,self.catalog['field'][i]) in self.SPTdoubleCount: return 1.\n if not self.surveyCutSZ[0]<self.catalog['xi'][i]<self.surveyCutSZ[1] or not self.surveyCutRedshift[0]<self.catalog['redshift'][i]<self.surveyCutRedshift[1]: return 1\n\n ##### Check if follow-up is available\n nobs = 0\n obsnames = []\n if self.todo['WL'] and self.catalog['WLdata'][i] is not None:\n nobs+= 1\n if self.catalog['WLdata'][i]['datatype']=='Megacam':\n obsnames.append('WLMegacam')\n elif self.catalog['WLdata'][i]['datatype']=='HST':\n obsnames.append('WLHST')\n # bias = bSim + bMassModel + (bN(z)+bShearCal)\n self.scaling['bWL_HST'] = self.WLcalib['HSTsim'][name][0] + self.scaling['WLbias']*self.catalog['WLdata'][i]['massModelErr'] + self.scaling['HSTbias']*self.catalog['WLdata'][i]['zDistShearErr']\n # lognormal scatter\n self.scaling['DWL_HST'] = self.WLcalib['HSTsim'][name][2]+self.scaling['WLscatter']*self.WLcalib['HSTsim'][name][3]\n cov = [[self.scaling['DWL_HST']**2, self.scaling['rhoSZWL']*self.scaling['Dsz']*self.scaling['DWL_HST']],\n [self.scaling['rhoSZWL']*self.scaling['Dsz']*self.scaling['DWL_HST'], self.scaling['Dsz']**2]]\n if np.linalg.det(cov)<observablecovmat.THRESHOLD:\n return 0.\n self.covmat['WLHST'] = cov\n\n if self.todo['Yx'] and self.catalog['Mg_fid'][i]!=0:\n nobs+= 1\n obsnames.append('Yx')\n if self.todo['Mgas'] and self.catalog['Mg_fid'][i]!=0:\n nobs+= 1\n obsnames.append('Mgas')\n if nobs==0:\n return 1.\n\n ##### Set SPT field scaling factor\n self.thisSPTfieldCorrection = self.SPTfieldCorrection[self.SPTfieldNames.index(self.catalog['field'][i])]\n\n #####\n if nobs==1:\n probability = self.get_P_1obs_xi(obsnames[0], i)\n\n elif nobs==2:\n if 'WLMegacam' in obsnames: covname = 'XrayMegacam'\n elif 'WLHST' in obsnames:\n covname = 'XrayHST'\n cov = [[self.scaling['DWL_HST']**2, self.scaling['rhoWLX']*self.scaling['DWL_HST']*self.scaling['Dx'], self.scaling['rhoSZWL']*self.scaling['Dsz']*self.scaling['DWL_HST']],\n [self.scaling['rhoWLX']*self.scaling['DWL_HST']*self.scaling['Dx'], self.scaling['Dx']**2, self.scaling['rhoSZX']*self.scaling['Dsz']*self.scaling['Dx']],\n [self.scaling['rhoSZWL']*self.scaling['Dsz']*self.scaling['DWL_HST'], self.scaling['rhoSZX']*self.scaling['Dsz']*self.scaling['Dx'], self.scaling['Dsz']**2]]\n if np.linalg.det(cov)<observablecovmat.THRESHOLD:\n return 0.\n self.covmat[covname] = cov\n if self.scaling['rhoWLX']==0:\n probability = self.get_P_1obs_xi(obsnames[0], i) * self.get_P_1obs_xi(obsnames[1], i)\n else:\n probability = self.get_P_2obs_xi(obsnames[:2], i, covname)\n\n else:\n raise ValueError(name,\"has\",nobs,\"follow-up observables. I don't know what to do!\")\n\n if (probability<0) | (np.isnan(probability)):\n return 0\n # raise ValueError(\"P(obs|xi) =\", probability, name)\n\n return probability",
"def resolve_ambiguous(cluster_ar, mz, intensity, min_score=0.6,\r\n min_abundance=0.25, min_improve=0.3,\r\n verbose=False, GT=None):\r\n #%%\r\n result_dic = {}\r\n nc = 0\r\n single_cluster = 0\r\n for cluster_dic in cluster_ar:\r\n nc += 1\r\n score_dic = {}\r\n score_opt_dic = {}\r\n if verbose:\r\n #print (\"{} / {} Done.\".format(nc, n))\r\n print (\"_________________________________________________________________________________________________________________\")\r\n print (\"Resolving cluster:\", cluster_dic)\r\n print (\"MZ Range: \", np.round(mz[list(cluster_dic.values())[0]][0],2))\r\n cluster_ids = list(cluster_dic.keys())\r\n #single cluster does not need any correction, just score it and leave it be\r\n if len(cluster_dic) == 1:\r\n single_cluster += 1\r\n for idx, (z,count) in enumerate(cluster_ids):\r\n\r\n score = AM.score_cluster(mz[cluster_dic[(z,count)]], intensity[cluster_dic[(z,count)]], z)[0]\r\n score_dic[(z,count)] = score\r\n result_dic[(z, count)] = (True if score >= min_score else \\\r\n False, cluster_dic[(z,count)])\r\n\r\n\r\n if verbose:\r\n print (\"\\t Single Cluster {}. Score: {}\".format(\"Rejected\" if score < min_score else \"Accepted\",\r\n np.round(score, 2)))\r\n continue\r\n\r\n #more than one clusters\r\n else:\r\n\r\n #here the interesting part starts, resolving overlapping clusters\r\n # number of paths defines the dimensions\r\n dims = len(cluster_ids)\r\n #mapping from nodes to indices\r\n all_idx = np.sort(np.array(list({item for vals in cluster_dic.values() for item in vals})))\r\n idx_mapping = {i:j for i,j in zip(all_idx, np.arange(0, len(all_idx), 1))}\r\n #initialize X matrix for Lasso, columns will correspond to the response factor\r\n #based on the averagine model\r\n res = np.zeros(dims*len(all_idx)).reshape(dims, len(all_idx))\r\n\r\n\r\n # =============================================================\r\n # Step 1: prescoring without optimization\r\n # =============================================================\r\n for idx, z in enumerate(cluster_ids):\r\n score, peaks = AM.score_cluster(mz[cluster_dic[z]], intensity[cluster_dic[z]], z[0])\r\n score_dic[z] = score\r\n res[idx][[idx_mapping[i] for i in cluster_dic[z]]] = peaks[:, 1]\r\n\r\n # =============================================================\r\n # Step 2: Perform non-negative Lasso\r\n # =============================================================\r\n #perform, non-negative lasso\r\n X = np.transpose(res)\r\n y = intensity[list(all_idx)] #0.0001\r\n lin = Lasso(alpha=1,precompute=True,max_iter=1000, positive=True, random_state=9999,\r\n selection='random', fit_intercept=False,\r\n tol=0.001).fit(X,y)\r\n\r\n coefs = (lin.coef_ + 0.000001 )\r\n abundance_estimate = coefs / coefs.sum()\r\n\r\n # =============================================================\r\n # Step 3: Correct Intensities\r\n # =============================================================\r\n #coefs will give an estimate of the abundance of the individual\r\n #species\r\n abundant_clusters = np.where(abundance_estimate >= min_abundance)[0]\r\n if len(abundant_clusters) == 1:\r\n\r\n #do not do any correction, probably just an artefact\r\n # e.g. overlapping clusters from z=2 and z=1\r\n # 500, 500.5, 501, 501.5 and 500, 501\r\n id_tmp = cluster_dic[cluster_ids[abundant_clusters[0]]]\r\n score, _ = AM.score_cluster(mz[id_tmp], intensity[id_tmp],\r\n cluster_ids[abundant_clusters[0]][0])\r\n score_dic[cluster_ids[abundant_clusters[0]]] = score\r\n result_dic[cluster_ids[abundant_clusters[0]]] = (True \\\r\n if score >= min_score else False, id_tmp)\r\n\r\n if verbose:\r\n print (\"\\t Multi-Cluster (LA) {}. Score: {}. Cluster: {} Abundance: {}\".format(\r\n \"Rejected\" if score < min_score else \"Accepted\",\r\n np.round(score, 2),\r\n cluster_ids[abundant_clusters[0]],\r\n abundance_estimate))\r\n continue\r\n\r\n #okay, here we have two abundant fragments\r\n #do the correction\r\n #rescore with optimized intensities\r\n if verbose:\r\n exp_ar = []\r\n mz_ar = []\r\n z_ar = []\r\n\r\n\r\n for idx, z in enumerate(cluster_ids):\r\n idx_c = [idx_mapping[idi] for idi in cluster_dic[z]]\r\n X_tmp = np.copy(X)\r\n X_tmp[:, [i for i in np.arange(0, len(cluster_dic)) if i == idx]] = 0.0\r\n\r\n #the true intensity of this cluster is the\r\n #observed intensity - predicted intensity of the other peps\r\n exp_intensity = np.abs(intensity[all_idx] - lin.predict(X_tmp))[idx_c]\r\n score, _ = AM.score_cluster(mz[cluster_dic[z]], exp_intensity, z[0])\r\n score_opt_dic[z] = score\r\n if verbose:\r\n print (\"\\t Multi-Cluster (HA):\")\r\n print (\"\\t\\t\",\";\".join([str(i) for i in\r\n np.round(abundance_estimate, 2)]))\r\n print (\"\\t\\t ({}, {}) {} Score: {}\".format(idx, z,\r\n \"Rejected\" if score < min_score else \"Accepted\",\r\n score))\r\n\r\n if verbose:\r\n exp_ar.append(exp_intensity)\r\n mz_ar.append(mz[cluster_dic[z]])\r\n z_ar.append(z)\r\n\r\n # =================================================================\r\n # Step 4: Final selection of clusters\r\n # =================================================================\r\n #%%\r\n old_scores = np.array([score_dic[i] for i in score_dic.keys()])\r\n new_scores = np.array([score_opt_dic[i] for i in score_dic.keys()])\r\n diff = (new_scores - old_scores).sum()\r\n\r\n if diff < min_improve:\r\n if verbose:\r\n print(\"Did't improve...\")\r\n #lasso and modelling didn't improve the clusters\r\n # check\r\n for i in score_dic.keys():\r\n result_dic[i] = (True if score_dic[i] >= min_score \\\r\n else False, cluster_dic[i])\r\n\r\n else:\r\n if verbose:\r\n print(\"Improved by lasso...\")\r\n print(old_scores)\r\n print(new_scores)\r\n #jay, we improved the scores 'significantly' (#not)\r\n #lets keep all the clusters we found\r\n for i in score_opt_dic.keys():\r\n result_dic[i] = (True if score_opt_dic[i] >= min_score \\\r\n else False, cluster_dic[i])\r\n\r\n if GT:\r\n print()\r\n print(\"Optimized: \", score_dic)\r\n print(\"Unoptimized:\", score_opt_dic)\r\n #avoid the ratio to contain a zero\r\n ratio = [str(i) for i in np.round(coefs / coefs.min(),2)]\r\n offset = 0.15\r\n plt.bar(mz[all_idx], intensity[all_idx], label=\"measured\",\r\n width=.15, hatch=\"//\")\r\n for mzi, inti, zi in zip(mz_ar, exp_ar, z_ar):\r\n plt.bar(mzi+offset, inti, width=0.15, label=\"Z: {} Score: {} Score opt: {}\".format(zi[0],\r\n np.round(score_dic[zi],2),\r\n np.round(score_opt_dic[zi], 2)))\r\n offset += 0.15\r\n plt.legend()\r\n\r\n\r\n plt.title(\"\"\"Expected ratio: {} \\n\r\n Computed ratio: {} \\n\r\n Case: {}\"\"\".format(GT[\"ratio\"], \":\".join(ratio),\r\n GT[\"Case\"]))\r\n\r\n plt.savefig(\"testcase_{}_resolved.png\".format(str.zfill(GT[\"TestID\"], 2)))\r\n #%%\r\n return(result_dic)",
"def closest(data):\n\n images, pc_projections, pcs = data.pca.load()\n\n pc_projections_truncated = pc_projections[:, :data.analysis.config.pc_projection_count]\n\n closest_group_count = int(round(data.analysis.config.closest_group * images.shape[0], 0))\n representative_count = int(round(data.analysis.config.representative * images.shape[0], 0))\n\n closest_group = kclosest.k_closest(closest_group_count, pc_projections_truncated)\n representative = closest_group[kclosest.k_closest(representative_count, pc_projections_truncated[closest_group, :])]\n\n data.analysis.save_closest(closest_group, representative)",
"def find_center_visual_sinograms(sino_180, output, start, stop, step=1,\n zoom=1.0):\n (nrow, ncol) = sino_180.shape\n output_name = losa.make_folder_name(output, name_prefix=\"Find_center\",\n zero_prefix=3)\n output_base = output + \"/\" + output_name\n step = np.clip(step, 0.05, ncol - 1)\n start = np.clip(start, 0, ncol - 1)\n stop = np.clip(stop + step, start + step, ncol - 1)\n center_flip = (ncol - 1.0) / 2.0\n sino_flip = np.fliplr(sino_180)\n for center in np.arange(start, stop, step):\n shift_col = 2.0 * (center - center_flip)\n sino_shift = ndi.shift(sino_flip, (0, shift_col), order=3,\n prefilter=False, mode=\"nearest\")\n sino_360 = np.vstack((sino_180, sino_shift))\n sino_zoom = ndi.zoom(sino_360, zoom, mode=\"nearest\")\n file_name = \"/center_{0:6.2f}\".format(center) + \".tif\"\n losa.save_image(output_base + file_name, sino_zoom)\n return output_base",
"def _cluster_segments_all_way(self, segmented_instances, labels, \\\n end_points, stats, cluster_thresh=0.5):\n\n #self.showme(segmented_instances, 'main img')\n segment_association_list = []\n max_num_end_points= 0\n\n # for each stem segment\n for i in range(0, len(labels)):\n # each end point in the current segment i\n if max_num_end_points < len(end_points[i]):\n max_num_end_points = len(end_points[i])\n for k in range(0, len(end_points[i])):\n angle_list=[]\n # find the segment that is most likely connected to segment i at end point[i][k]\n for j in range(0, len(labels)):\n # make sure we are not trying to connect the segment to itself\n if i!= j:\n # angle calculates the angle between the line stats['centroid'][i]-end_points[i][k]\n # and stats['centroid'][i]-stats['centroid'][j]\n\n angle = self._ang([stats['centroid'][i],end_points[i][k]], \\\n [stats['centroid'][i], stats['centroid'][j]] )\n # if the angle value is within the acceptable range of +/- angle_thresh\n if angle<=self.angle_thresh or angle>=360-self.angle_thresh:\n other_angle, other_seg_section, end_point_dist = self._get_best_fit(segmented_instances, \\\n len(labels), \\\n stats, end_points,\\\n i, j, k, pos_angle=angle<=self.angle_thresh)\n # if the best fit segment also has a small angle between its\n # end point-centroid line and centroid-centroid line,\n # add it to segments connected to segment i\n if other_angle!=None and other_angle<=self.angle_thresh:\n angle_list.append((j, other_seg_section, other_angle, end_point_dist, angle))\n #Sort the list of stem segments connected to i by end_point_dist\n angle_list = sorted(angle_list, key=lambda x:x[3])\n #Sorting by the Euclidian distance of the end_point_dist and the other_angle does not change end result\n #angle_list = sorted(angle_list, key=lambda x:(math.sqrt(x[3]**2.0+x[2]**2.0)))\n # the angle value reflects how far segment k is from the straight line\n # going through the centroids\n if len(angle_list)>0:\n # (i, j, k, l, angle between i and centroid line, angle between j and centroid line, distance between closest end points k in seg i and l in seg j)\n segment_association_list.append((i,angle_list[0][0],k, angle_list[0][1], angle_list[0][4], angle_list[0][2], angle_list[0][3]))\n\n\n # sort slope differences in an increasing order\n segment_association_list = sorted(segment_association_list,key=lambda x:(x[6]))\n\n # find best match by iteretively selecting the smallest difference\n # and adding it to the ith cluster\n cluster_list = []\n cluster = np.full(len(labels),None)\n colored_clusterImg = np.zeros(segmented_instances.shape).astype(np.uint8)\n #clusterImg = np.zeros(segmented_instances.shape).astype(np.uint8)\n\n # initialize cluster list to single clusters contianing only each individual segment\n for i in range(0, len(labels)):\n cluster[i]=i\n cluster_list.append([i])\n #self.showme(clusterImg, str(i))\n\n visited=np.full((len(labels),max_num_end_points), False)\n\n #cluster=np.frompyfunc(list,1,1)(cluster) # allows us to append to only the specified list end_points[i]\n new_cluster_num=0\n color_offset=len(labels)\n\n # for each pair of segments in our list of best fit segments\n for curr_tuple in segment_association_list:\n img = np.zeros(segmented_instances.shape)\n i = curr_tuple[0] # index of first segment\n j = curr_tuple[1] # index of second segment in the tuple\n i_section = curr_tuple[2] #end point number in segment i\n j_section = curr_tuple[3] #end point number in segment j\n angle = curr_tuple[4]\n other_angle = curr_tuple[5]\n end_point_dist = curr_tuple[6] #distance between the connecting end points of segments i and j\n img[segmented_instances== i]= 255\n img[segmented_instances== j]= 255\n if (visited[i][i_section]==False)and(visited[j][j_section]==False):\n #cv2.line(clusterImg,(end_points[i][i_section][0],end_points[i][i_section][1]),\\\n # (end_points[j][j_section][0], end_points[j][j_section][1]),150,2)\n #self.showme(clusterImg, str(i))\n visited[i][i_section]=True\n visited[j][j_section]=True\n cluster_num = cluster[i]\n if cluster[i]!=cluster[j]:\n other_cluster_num = cluster[j]\n cluster_list[cluster_num] = list(set(cluster_list[cluster_num]+\\\n copy.deepcopy(cluster_list[other_cluster_num])))\n # update cluster numbers for all segments moved into new cluster\n for seg in cluster_list[other_cluster_num]:\n cluster[seg]=cluster_num\n # update cluster numbers for clusters larger than cluster to be removed\n for idx in range(0, len(cluster)):\n if (cluster[idx]>other_cluster_num):\n cluster[idx]= cluster[idx]-1\n del cluster_list[other_cluster_num]\n\n\n #show clustered segments\n color = 0\n cluster_num = 0\n cluster_mask=[]\n\n for c in cluster_list:\n color = color+0.1\n cluster_mask.append(np.zeros(segmented_instances.shape).astype(np.uint8))\n\n for i in c:\n cluster_mask[cluster_num][(segmented_instances == labels[i])]=1\n colored_clusterImg[(segmented_instances == labels[i])]= int(color*255)\n \"\"\"if self.key in ['../data/images/image1672', '../data/images/image1289']:\n self.showme(colored_clusterImg)\"\"\"\n cluster_num +=1\n\n return cluster_mask, colored_clusterImg",
"def catalogmatch(conn, sources, catalog, imobj, search_radius, save):\n catalog_matched = []\n\n cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)\n \n match_logger.info('Attempting to match {} sources from this image to '\n 'the {} sky catalog...'.format(len(sources), catalog))\n\n # Print results without saving to database\n if not save:\n # Dump sources into a temporary table\n sql = (\n '''\n CREATE TEMP TABLE temp_source (\n src_id INTEGER,\n ra DOUBLE PRECISION,\n dec DOUBLE PRECISION\n );\n ''')\n cur.execute(sql)\n conn.commit()\n for src in sources:\n cur.execute('''INSERT INTO temp_source (\n src_id, ra, dec) VALUES (%s, %s, %s)''', (\n src.src_id, src.ra, src.dec))\n conn.commit()\n # Find nearest neighbor within FOV & \"match\" if within half a beam\n sql = '''SELECT a.src_id, bb.id AS catalog_src_id,\n 3600*q3c_dist(a.ra, a.dec, bb.ra, bb.dec) AS sep,\n 3600*q3c_dist(a.ra, a.dec, bb.ra, bb.dec) < %s AS match\n FROM temp_source AS a, LATERAL (\n SELECT b.* FROM radcat.{} AS b\n WHERE q3c_join(a.ra, a.dec, b.ra, b.dec, %s)\n ORDER BY q3c_dist(a.ra, a.dec, b.ra, b.dec) ASC LIMIT 1) AS bb'''\n values = (0.5*imobj.bmin, 2.*imobj.radius)\n cur.execute(psycopg2.sql.SQL(sql).format(\n psycopg2.sql.Identifier(catalog)), values)\n rows = cur.fetchall()\n cur.execute('DROP TABLE temp_source')\n conn.commit()\n\n match_logger.info('-------------------------------------------------'\n '-------------')\n match_logger.info('VLITE_src_id match catalog_src_id '\n 'separation (arcsec)')\n match_logger.info('-------------------------------------------------'\n '-------------') \n for row in rows:\n if row['match']:\n catalog_matched.append(row['catalog_src_id'])\n match_logger.info('{}\\t\\t{}\\t{}\\t{}'.format(\n row['src_id'], row['match'], row['catalog_src_id'], row['sep']))\n\n # Store results for insertion into database\n else:\n # Skip the sources which already have results for this catalog\n # (from a different image)\n assoc_ids = []\n for src in sources:\n already_matched = dbio.check_catalog_match(conn, src.id, catalog)\n if already_matched:\n continue\n else:\n assoc_ids.append(src.id)\n match_logger.info(' -- found previous matching results for {} sources'.\n format(len(sources) - len(assoc_ids)))\n\n # Find nearest neighbor within half a beam\n sql = '''SELECT a.id AS assoc_id, bb.*, \n 3600*q3c_dist(a.ra, a.dec, bb.ra, bb.dec) AS sep\n FROM assoc_source AS a, LATERAL (\n SELECT b.* FROM radcat.{} AS b\n WHERE a.id IN %s AND q3c_join(a.ra, a.dec, b.ra, b.dec, %s)\n ORDER BY q3c_dist(a.ra, a.dec, b.ra, b.dec) ASC LIMIT 1) AS bb'''\n values = (tuple(assoc_ids), (0.5*(imobj.bmin/3600.)))\n cur.execute(psycopg2.sql.SQL(sql).format(\n psycopg2.sql.Identifier(catalog)), values)\n rows = cur.fetchall()\n\n matched_ids = []\n for row in rows:\n matched_ids.append(row['assoc_id'])\n csrc = catalogio.CatalogSource()\n dbclasses.dict2attr(csrc, row)\n catalog_matched.append(csrc)\n\n for src in sources:\n if src.id in matched_ids:\n # Found a match!\n try:\n src.nmatches += 1\n except TypeError:\n src.nmatches = 1\n else:\n if src.nmatches is None:\n src.nmatches = 0\n\n cur.close()\n\n match_logger.info (' -- number of matches: {}'.format(len(catalog_matched)))\n\n return sources, catalog_matched",
"def match3(img1, img2, coordinates1, coordinates2, PATCH_SIZE, threshold=0.7):\n\n\t#creating patches for all points from img1 and img2\n\tcoord1_patches = [make_patch(coordinate, PATCH_SIZE, img1) for coordinate in coordinates1]\n\tcoord2_patches = [make_patch(coordinate, PATCH_SIZE, img2) for coordinate in coordinates2]\n\n\t# creating a matrix with dissimilarity measures for all pairs\n\tall_matches = np.zeros((len(coordinates1), len(coordinates2)))\n\n\tfor (x, y), _ in np.ndenumerate(all_matches):\n\t\tall_matches[x,y] = count_difference(coord1_patches[x], coord2_patches[y])\n\n\t#looking for best left-to-right and right-to-left matches\n\tmatches = []\n\t#left-to-right\n\tfor i, coord1 in enumerate(coordinates1):\n\t\tbest_ltr_match = np.argmin(all_matches[i, :]) #best left-to-right match for coord1\n\t\tbest_rtl_match = np.argmin(all_matches[:, best_ltr_match]) #best match for a best match\n\t\tif (i == best_rtl_match): #hurray, there is a super match\n\n\t\t\tmatches.append([coord1, coordinates2[best_ltr_match], all_matches[i, best_ltr_match]])\n\t\n\treturn matches",
"def consolidate_instances_all_way(self, stats, segmented_instances):\n\n img = np.zeros(segmented_instances.shape).astype(np.uint8)\n\n #get all pixel labels in the segmented_instances mask\n segment_numbers = np.unique(segmented_instances)\n\n # remove the background label\n segment_numbers=segment_numbers[segment_numbers!=0]\n\n end_points = np.empty((len(segment_numbers),),dtype=np.object_)\n end_points.fill([])\n\n for curr_segment in segment_numbers:\n idx=[]\n i=curr_segment-1\n if curr_segment!=0:\n #Show all segments of curr_segment. Only useful to view results\n img[segmented_instances== curr_segment]= 255\n #get indeces of the segments for curr_segment\n idx = np.argwhere(segmented_instances == curr_segment)\n if len(idx>0):\n end_points[i]= self._get_end_points(segmented_instances, i, \\\n stats, idx)\n # add point markers and lines connecting each end point to centroid.\n # useful only to view results\n \"\"\"for pt_num, pt in enumerate(end_points[i]):\n cv2.circle(img, (pt[0],pt[1]), 3, 100, -1)\n cv2.line(img,(pt[0],pt[1]),\\\n (stats['centroid'][i,0], stats['centroid'][i,1]),150,2)\n cv2.circle(img, (stats['centroid'][i,0], stats['centroid'][i,1]), 3, 200, -1)\"\"\"\n #self.showme(img, 'line '+str(i))\n\n # cluster segments into stem instances\n cluster_mask, clustered_instances = self._cluster_segments_all_way(segmented_instances,\\\n segment_numbers, end_points, \\\n stats)\n\n #put all instances in one layer\n if len(cluster_mask)>0:\n single_layer_cluster_mask=np.zeros(cluster_mask[0].shape)\n for i in xrange(len(cluster_mask)):\n single_layer_cluster_mask[cluster_mask[i]>0]= i+1\n\n # self.showObjects(clustered_instances);\n return single_layer_cluster_mask, clustered_instances",
"def label_centroids_heuristically(self, centroids: np.ndarray):\n\n cluster_centroids_labels = [(\"\", {}) for c in centroids]\n\n centre_point = centroids[0]\n heuristic_centroids = np.array(\n [\n centre_point + [-30, 30],\n centre_point + [30, 30],\n centre_point + [0, -48.125],\n ]\n )\n heuristic_centroid_labels = [\n ConstJoint.LEFT_EYE,\n ConstJoint.RIGHT_EYE,\n ConstJoint.MOUTH,\n ]\n labeled = [False for c in centroids]\n used_label = [False for c in heuristic_centroids]\n while self.__are_labels_matched_with_centroids(cluster_centroids_labels, \"\"):\n min_dist_square = math.inf\n min_centroid = 0\n min_cluster = 0\n current_cluster = {}\n for i, c in enumerate(centroids):\n if labeled[i]:\n continue\n for j, cl in enumerate(heuristic_centroids):\n if used_label[j]:\n continue\n diff = c - cl\n dist_square = diff.dot(diff)\n\n if dist_square < min_dist_square:\n min_centroid = i\n current_cluster = c\n min_cluster = j\n min_dist_square = dist_square\n\n cluster_centroids_labels[min_centroid] = (\n heuristic_centroid_labels[min_cluster],\n current_cluster,\n )\n labeled[min_centroid] = True\n used_label[min_cluster] = True\n\n return cluster_centroids_labels",
"def newCenter(x, y, group, iteration, lastKSet1, lastKSet2):\n\tsumOneX = 0\n\tsumOneY = 0\n\tsumTwoX = 0\n\tsumTwoY = 0\n\tnumOne = 0\n\tnumTwo = 0\n\n\tfor i in range(len(group[iteration])):\n\t\tif (group[iteration][i] == 1):\n\t\t\tsumOneX += x[i]\n\t\t\tsumOneY += y[i]\n\t\t\tnumOne += 1\n\t\telse:\n\t\t\tsumTwoX += x[i]\n\t\t\tsumTwoY += y[i]\n\t\t\tnumTwo += 1\n\n\tif(numOne == 0):\n\t\tkSet1 = lastKSet1\n\tif(numTwo == 0):\n\t\tkSet2 = lastKSet2\n\telse:\n\t\tkSet1 = [sumOneX/numOne, sumOneY/numOne]\n\t\tkSet2 = [sumTwoX/numTwo, sumTwoY/numTwo]\n\n\treturn (kSet1, kSet2)",
"def center(self, obj):\n mn0 = self.master.xy >= obj.center\n mn1 = self.master.xy <= obj.center\n\n point_list = [self.master.xy[mn0], self.master.xy[mn1], self.master.xy[mn0[0], mn1[1]], self.master.xy[mn1[0], mn0[1]]] # 4 physical points near the center coordinate.\n dist_list = []\n idx = 0\n for point in point_list:\n dist_list.append([idx, np.linalg.norm(point - obj.center)]) # Calculate Euclidean distances.\n idx += 1\n dist_sorted = sorted(dist_list, key=lambda distance : distance[1]) # Sort distances in ascending order.\n return self.master.mn(point_list[dist_sorted[0][0]]) # Convert the closest point to abstract coordinate and then return.",
"def run_selection(self, included_clusters, excluded_clusters, max_image_catalog_sep, ch1_min_cov, ch2_min_cov,\n ch1_bright_mag, ch2_bright_mag, selection_band_faint_mag, ch1_ch2_color, spt_colnames,\n output_name, output_colnames):\n self.file_pairing(include=included_clusters, exclude=excluded_clusters)\n self.image_to_catalog_match(max_image_catalog_sep=max_image_catalog_sep)\n self.coverage_mask(ch1_min_cov=ch1_min_cov, ch2_min_cov=ch2_min_cov)\n self.object_mask()\n # self.cluster_k_correction()\n self.object_selection(ch1_bright_mag=ch1_bright_mag, ch2_bright_mag=ch2_bright_mag,\n selection_band_faint_mag=selection_band_faint_mag)\n self.purify_selection(ch1_ch2_color_cut=ch1_ch2_color)\n self.j_band_abs_mag()\n self.catalog_merge(catalog_cols=spt_colnames)\n self.object_separations()\n self.completeness_value()\n final_catalog = self.final_catalogs(filename=output_name, catalog_cols=output_colnames)\n if final_catalog is not None:\n return final_catalog",
"def distortion_of_kmeans_clustering(data_table):\n num_iritations = 5\n singleton_list = []\n for line in data_table:\n singleton_list.append(alg_cluster.Cluster(set([line[0]]), line[1], line[2], line[3], line[4]))\n distortion_list = []\n for num in range(20, 5, -1):\n cluster_list = kmeans_clustering(singleton_list,num, num_iritations)\n distortion = compute_distortion(data_table, cluster_list)\n distortion_list.append(distortion)\n return distortion_list\n\n#####################################################################\n# Code to load cancer data, compute a clustering and\n# visualize the results\n\n\n# def run_example():\n# \"\"\"\n# Load a data table, compute a list of clusters and\n# plot a list of clusters\n#\n# Set DESKTOP = True/False to use either matplotlib or simplegui\n# \"\"\"\n# data_table = load_data_table(DATA_3108_URL)\n# singleton_list = []\n# for line in data_table:\n# singleton_list.append(alg_cluster.Cluster(set([line[0]]), line[1], line[2], line[3], line[4]))\n num_clusters = 16\n # cluster_list = sequential_clustering(singleton_list, num_clusters)\n # print(\"Displaying\", len(cluster_list), \"sequential clusters\")\n #\n # cluster_list = alg_project3_solution.hierarchical_clustering(singleton_list, num_clusters)\n # print(\"Displaying\", len(cluster_list), \"hierarchical clusters\")\n #\n # cluster_list = alg_project3_solution.kmeans_clustering(singleton_list, num_clusters, 5)\n # print(\"Displaying\", len(cluster_list), \"k-means clusters\")\n\n # draw the clusters using matplotlib or simplegui\n #\n # if DESKTOP:\n # # alg_clusters_matplotlib.plot_clusters(data_table, cluster_list, False)\n # alg_clusters_matplotlib.plot_clusters(data_table, cluster_list, True) #add cluster centers\n\n # else:\n # alg_clusters_simplegui.PlotClusters(data_table, cluster_list) # use toggle in GUI to add cluster centers",
"def cluster_spatial_positioning(data):\n \n n_clusters = len(set(data['clusters'])-{-1}) # since -1 element denotes noice\n if n_clusters <2:\n #Setting cluster angluar features to default\n cdist=[Cluster_Relative_Distances()]\n cdist = pd.DataFrame([o.__dict__ for o in cdist])\n\n elif n_clusters >=2:\n # Here we implement two approaches for measuring distances between clustes:\n # (1) border-boder distances and (2) centroid-centroid distances. \n # We compute dispersion measures for the distances obtained. \n \n d = dict(tuple(data.groupby('clusters')))\n d.pop(-1, None)\n\n min_dist_between_clusters=np.row_stack([[np.amin(ss.distance_matrix(np.column_stack([d[i]['X'].array,d[i]['Y'].array]), \n np.column_stack([d[j]['X'].array,d[j]['Y'].array]))) for j in d.keys()] for i in d.keys()])\n min_dist_between_clusters=np.delete(list(set(np.frombuffer(min_dist_between_clusters))) ,0)\n\n cen_dist_between_clusters=ss.distance_matrix(np.row_stack([(np.mean(d[i]['X'].array),np.mean(d[i]['Y'].array)) for i in d.keys()]),\n np.row_stack([(np.mean(d[i]['X'].array),np.mean(d[i]['Y'].array)) for i in d.keys()]))\n cen_dist_between_clusters=np.delete(list(set(np.frombuffer(cen_dist_between_clusters))) ,0)\n\n (avg_bor_bor_dist_cluster,min_bor_bor_dist_cluster,max_bor_bor_dist_cluster,\n std_bor_bor_dist_cluster,CV_bor_bor_dist_cluster,CD_bor_bor_dist_cluster,\n IQR_bor_bor_dist_cluster,Quartile_CD_bor_bor_dist_cluster)= distribution_statistics(min_dist_between_clusters)\n\n (avg_cen_cen_dist_cluster,min_cen_cen_dist_cluster,max_cen_cen_dist_cluster,\n std_cen_cen_dist_cluster,CV_cen_cen_dist_cluster,CD_cen_cen_dist_cluster,\n IQR_cen_cen_dist_cluster,Quartile_CD_cen_cen_dist_cluster)= distribution_statistics(cen_dist_between_clusters)\n\n cdist = [Cluster_Relative_Distances([avg_bor_bor_dist_cluster,min_bor_bor_dist_cluster,max_bor_bor_dist_cluster,\n std_bor_bor_dist_cluster,CV_bor_bor_dist_cluster,CD_bor_bor_dist_cluster,\n IQR_bor_bor_dist_cluster,Quartile_CD_bor_bor_dist_cluster,\n avg_cen_cen_dist_cluster,min_cen_cen_dist_cluster,max_cen_cen_dist_cluster,\n std_cen_cen_dist_cluster,CV_cen_cen_dist_cluster,CD_cen_cen_dist_cluster,\n IQR_cen_cen_dist_cluster,Quartile_CD_cen_cen_dist_cluster])]\n \n cdist = pd.DataFrame([o.__dict__ for o in cdist])\n\n \n return cdist",
"def object_separations(self):\n\n for cluster_info in self._catalog_dictionary.values():\n catalog = cluster_info['catalog']\n\n # Create SkyCoord objects for all objects in the catalog as well as the SZ center\n object_coords = SkyCoord(catalog['ALPHA_J2000'], catalog['DELTA_J2000'], unit=u.degree)\n sz_center = SkyCoord(catalog['SZ_RA'][0], catalog['SZ_DEC'][0], unit=u.degree)\n\n # Calculate the angular separations between the objects and the SZ center in arcminutes\n separations_arcmin = object_coords.separation(sz_center).to(u.arcmin)\n\n # Compute the r500 radius for the cluster\n r500 = (3 * catalog['M500'][0] * u.Msun /\n (4 * np.pi * 500 * self._cosmo.critical_density(catalog['REDSHIFT'][0]).to(\n u.Msun / u.Mpc ** 3))) ** (1 / 3)\n\n # Convert the angular separations into physical separations relative to the cluster's r500 radius\n separations_r500 = (separations_arcmin / r500\n * self._cosmo.kpc_proper_per_arcmin(catalog['REDSHIFT'][0]).to(u.Mpc / u.arcmin))\n\n # Add our new columns to the catalog\n catalog['R500'] = r500\n catalog['RADIAL_SEP_R500'] = separations_r500\n catalog['RADIAL_SEP_ARCMIN'] = separations_arcmin\n\n # Update the catalog in the data structure\n cluster_info['catalog'] = catalog"
] | [
"0.5848657",
"0.5763144",
"0.5756294",
"0.56484246",
"0.55037874",
"0.5455271",
"0.5440703",
"0.53846854",
"0.53790927",
"0.5324826",
"0.5324202",
"0.5316579",
"0.5308012",
"0.52924293",
"0.529075",
"0.5288194",
"0.52846956",
"0.52782357",
"0.52688426",
"0.5266597",
"0.52629834",
"0.5238452",
"0.52051353",
"0.51960534",
"0.51660585",
"0.5165303",
"0.5164256",
"0.5159074",
"0.5155648",
"0.51462865"
] | 0.76026565 | 0 |
Generates a binary good pixel map using the coverage maps. Creates a new fits image where every pixel has values of `1` if the coverage values in both IRAC bands are above the given thresholds or `0` otherwise. | def coverage_mask(self, ch1_min_cov, ch2_min_cov):
for cluster_id, cluster_info in self._catalog_dictionary.items():
# Array element names
irac_ch1_cov_path = cluster_info['ch1_cov_path']
irac_ch2_cov_path = cluster_info['ch2_cov_path']
# Read in the two coverage maps, also grabbing the header from the Ch1 map.
irac_ch1_cover, header = fits.getdata(irac_ch1_cov_path, header=True, ignore_missing_end=True)
irac_ch2_cover = fits.getdata(irac_ch2_cov_path, ignore_missing_end=True)
# Create the mask by setting pixel value to 1 if the pixel has coverage above the minimum coverage value in
# both IRAC bands.
combined_cov = np.logical_and((irac_ch1_cover >= ch1_min_cov), (irac_ch2_cover >= ch2_min_cov)).astype(int)
# For naming, we will use the official SPT ID name for the cluster
spt_id = self._spt_catalog['SPT_ID'][cluster_info['SPT_cat_idx']]
# Write out the coverage mask.
mask_pathname = f'{self._mask_dir}/{spt_id}_cov_mask{ch1_min_cov}_{ch2_min_cov}.fits'
combined_cov_hdu = fits.PrimaryHDU(combined_cov, header=header)
combined_cov_hdu.writeto(mask_pathname, overwrite=True)
# Append the new coverage mask path name and both the catalog and the masking flag from cluster_info
# to the new output list.
cluster_info['cov_mask_path'] = mask_pathname | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def map_iou(boxes_true, boxes_pred, scores, thresholds = [0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75]):\n\n # According to the introduction, images with no ground truth bboxes will not be\n # included in the map score unless there is a false positive detection (?)\n\n # return None if both are empty, don't count the image in final evaluation (?)\n if len(boxes_true) == 0 and len(boxes_pred) == 0:\n return None\n\n assert boxes_true.shape[1] == 4 or boxes_pred.shape[1] == 4, \"boxes should be 2D arrays with shape[1]=4\"\n if len(boxes_pred):\n assert len(scores) == len(boxes_pred), \"boxes_pred and scores should be same length\"\n # sort boxes_pred by scores in decreasing order\n boxes_pred = boxes_pred[np.argsort(scores)[::-1], :]\n\n map_total = 0\n\n # loop over thresholds\n for t in thresholds:\n matched_bt = set()\n tp, fn = 0, 0\n for i, bt in enumerate(boxes_true):\n matched = False\n for j, bp in enumerate(boxes_pred):\n miou = calculate_iou(bt, bp)\n if miou >= t and not matched and j not in matched_bt:\n matched = True\n tp += 1 # bt is matched for the first time, count as TP\n matched_bt.add(j)\n if not matched:\n fn += 1 # bt has no match, count as FN\n\n fp = len(boxes_pred) - len(matched_bt) # FP is the bp that not matched to any bt\n m = tp / (tp + fn + fp)\n map_total += m\n\n return map_total / len(thresholds)",
"def apply_threshold(heatmap, threshold):\n # Zero out pixels below the threshold\n heatmap[heatmap <= threshold] = 0\n # Return thresholded map\n return heatmap",
"def apply_threshold(heatmap, threshold):\n # Zero out pixels below the threshold\n heatmap[heatmap <= threshold] = 0\n # Return thresholded map\n return heatmap",
"def apply_threshold(heatmap, threshold):\n # Zero out pixels below the threshold\n heatmap[heatmap <= threshold] = 0\n # Return thresholded map\n return heatmap",
"def CC_2Dfilter(\n h5path_labels,\n map_propnames,\n criteria,\n h5path_int='',\n slicedim=0,\n usempi=False,\n outputfile='',\n protective=False,\n ):\n\n (min_area,\n max_area,\n max_intensity_mb,\n max_eccentricity,\n min_solidity,\n min_euler_number,\n min_extent) = criteria\n\n # prepare mpi\n mpi_info = utils.get_mpi_info(usempi)\n\n # TODO: check output path\n\n # open data for reading\n h5file_mm, ds_mm, _, _ = utils.h5_load(h5path_labels, comm=mpi_info['comm'])\n if h5path_int:\n h5file_mb, ds_mb, _, _ = utils.h5_load(h5path_int, comm=mpi_info['comm'])\n else:\n ds_mb = None\n # mask used as intensity image in mean_intensity criterium\n\n # get the maximum labelvalue in the input\n root = h5path_labels.split('.h5')[0]\n maxlabel = get_maxlabel(root, ds_mm)\n\n # prepare mpi\n n_slices = ds_mm.shape[slicedim]\n series = np.array(range(0, n_slices), dtype=int)\n if mpi_info['enabled']:\n series = utils.scatter_series(mpi_info, series)[0]\n if mpi_info['rank'] == 0:\n fws_reduced = np.zeros((maxlabel + 1, len(map_propnames)),\n dtype='float')\n else:\n fws_reduced = None\n\n fws = np.zeros((maxlabel + 1, len(map_propnames)),\n dtype='float')\n\n mapall = criteria.count(None) == len(criteria)\n\n # pick labels observing the constraints\n go2D = ((max_eccentricity is not None) or\n (min_solidity is not None) or\n (min_euler_number is not None) or\n mapall)\n if go2D:\n\n for i in series:\n slcMM = utils.get_slice(ds_mm, i, slicedim)\n if h5path_int:\n slcMB = utils.get_slice(ds_mb, i, slicedim) # , 'bool'\n else:\n slcMB = None\n fws = check_constraints(slcMM, fws, map_propnames,\n criteria, slcMB, mapall)\n if mpi_info['enabled']:\n mpi_info['comm'].Reduce(fws, fws_reduced, op=MPI.MAX, root=0)\n else:\n fws_reduced = fws\n\n else:\n\n if mpi_info['rank'] == 0:\n fws = check_constraints(ds_mm, fws, map_propnames,\n criteria, ds_mb, mapall)\n fws_reduced = fws\n\n # write the forward maps to a numpy vector\n if mpi_info['rank'] == 0:\n slc = int(n_slices/2)\n slcMM = ds_mm[slc, :, :]\n slcMB = ds_mb[slc, :, :] if h5path_int else None\n datatypes = get_prop_datatypes(slcMM, map_propnames, slcMB)\n for i, propname in enumerate(map_propnames):\n root = outputfile.split('.h5')[0]\n nppath = '{}_{}.npy'.format(root, propname)\n outarray = np.array(fws_reduced[:, i], dtype=datatypes[i])\n np.save(nppath, outarray)\n\n # close and return\n h5file_mm.close()\n if h5path_int:\n h5file_mb.close()\n\n if mpi_info['rank'] == 0:\n return outarray",
"def classify(self, amaps, threshold=0.1, mask=None):\r\n self.n_classes = amaps.shape[2]\r\n if type(threshold) == float:\r\n self.threshold = 1 - threshold\r\n if type(threshold) == list:\r\n self.threshold = [1 - x for x in threshold]\r\n h, w, n_maps = amaps.shape\r\n self.h, self.w, self.n_maps = amaps.shape\r\n amaps = np.reshape(amaps, (w*h, n_maps))\r\n amaps = _normalize(amaps)\r\n if self.n_classes == 1:\r\n cmap = self._class_single_pixel(amaps, self.threshold)\r\n else:\r\n cmap = self._dispatch(amaps, self.threshold)\r\n self.cmap = np.reshape(cmap, (h, w))\r\n return self.cmap",
"def binarize(self, image, threshold):\n\n bin_img = image.copy()\n [h, w] = bin_img.shape\n opt_threshold = threshold\n print(opt_threshold)\n for row in range(h):\n for col in range(w):\n if bin_img[row, col] > opt_threshold: #greater than threshld white(general)\n bin_img[row, col] = 255 #0 instead of 1\n else: #less than threshold black(general)\n bin_img[row, col] = 0 #0 instead of 1\n\n\n #reverse the cases\n\n return bin_img",
"def apply_threshold(heatmap, threshold):\n # Zero out pixels below the threshold\n thresh_heatmap = np.copy(heatmap)\n thresh_heatmap[heatmap <= threshold] = 0\n # Return thresholded map\n return thresh_heatmap",
"def generateBounds(regionFilename, latitudeRange, longitudeRange): \n rastData = Dataset(regionFilename)\n\n #setting up values for raster data\n latsRast = np.array(rastData[\"lat\"][:])\n lonsRast = np.array(rastData[\"lon\"][:])\n regionOfInterest = np.array(rastData[\"Band1\"][:][:])\n\n\n regionArray = np.zeros((len(longitudeRange),len(latitudeRange)))\n\n\n for lat in latitudeRange:\n closestLatIndex = np.where( np.abs(latsRast-lat) == np.abs(latsRast-lat).min())[0][0]\n for lon in longitudeRange:\n closestLonIndex = np.where( np.abs(lonsRast-lon) == np.abs(lonsRast-lon).min())[0][0]\n\n #If lat long of MERRA data box is offshore or in region (values 1 in raster) set them equal to 1 for master Array, else they are left as zeros\n if (regionOfInterest[closestLatIndex][closestLonIndex] == 1):\n latIndex = np.where(latitudeRange == lat)[0][0]\n lonIndex = np.where(longitudeRange == lon)[0][0]\n regionArray[lonIndex][latIndex] = 1\n\n\n #for debugging\n ''' \n ax = sns.heatmap(regionArray)\n plt.show()\n '''\n return regionArray",
"def apply_threshold(heatmap, threshold):\n heatmap[heatmap <= threshold] = 0\n\n return heatmap",
"def find_roads(\n probability_map,\n *,\n input_threshold=0.3,\n max_roads=None,\n min_strength=0.17, #0.2,\n num_angles=720,\n roads_min_angle=np.pi/8,\n roads_min_distance=50,\n debugimage=None, # for debugging ...\n debugprint=None): # for debugging ...\n\n # shorthand\n im = probability_map\n\n # the angles to be used in the Hough transform\n theta = np.linspace(-np.pi/2, np.pi/2, num_angles)\n\n # normalize almost anything to grayscale\n if im.ndim == 3:\n if im.shape[2] == 4:\n im = im[:,:,:3] # throw away alpha\n im = im.mean(axis=2) # convert RGB to grayscale\n\n if debugimage: debugimage('original', im, 0, 1, 'jet')\n\n assert im.ndim == 2\n\n if debugimage:\n hspace, _, _ = hough_line(im, theta)\n debugimage('original_hough_hspace', hspace)\n\n # create monochrome/binary input map\n im[im >= input_threshold] = 1\n im[im < input_threshold] = 0\n\n if debugimage: debugimage('threshold_applied', im)\n\n # Hough transform\n hspace, angles, distances = hough_line(im, theta)\n\n hspace = np.asarray(hspace, dtype=np.float32)\n hspace /= hspace.max() # normalize\n\n if debugimage: debugimage('hough_hspace', hspace)\n\n # convolution filters, rectangular, tuned for widths of 12, 32, 48 pixels\n w12 = np.concatenate([-np.ones((6)), np.ones((12)), -np.ones((6))])\n w32 = np.concatenate([-np.ones((16)), np.ones((32)), -np.ones((16))])\n w48 = np.concatenate([-np.ones((24)), np.ones((48)), -np.ones((24))])\n\n # convolve\n im12 = ndi.filters.convolve1d(hspace, w12, axis=0)\n im32 = ndi.filters.convolve1d(hspace, w32, axis=0)\n im48 = ndi.filters.convolve1d(hspace, w48, axis=0)\n\n # normalize signal strengths for different road widths\n im12 /= 12\n im32 /= 32\n im48 /= 48\n\n ca = (None, None, 'jet',)\n if debugimage: debugimage('hough_hspace_conv12', im12, *ca)\n if debugimage: debugimage('hough_hspace_conv32', im32, *ca)\n if debugimage: debugimage('hough_hspace_conv48', im48, *ca)\n if debugimage:\n debugimage('hough_hspace_combined',\n np.hstack([im12, im32, im48]), *ca)\n\n # compute possible roads of all widths, sorted by signal strength\n seq = np.stack((im12, im32, im48)).flatten()\n sor = np.argsort(seq)\n roads = np.column_stack((\n seq,\n np.tile(np.tile(angles, distances.shape[0]), 3),\n np.tile(np.repeat(distances, angles.shape[0]), 3),\n np.repeat([12, 32, 48], distances.shape[0] * angles.shape[0])\n ))[sor][::-1]\n\n # columns: strength, angle, distance, width\n found_roads = np.asarray([]).reshape(0, 4)\n\n # find as many as strong roads as desired, while dropping roads that are too\n # similar to roads already found (non-max suppression)\n for i in range(roads.shape[0]):\n if roads[i,0] < min_strength:\n break\n a = roads[i,1]\n d = roads[i,2]\n close = (\n np.logical_or(\n np.logical_and(\n np.abs(found_roads[:,1]-a) < roads_min_angle,\n np.abs(found_roads[:,2]-d) < roads_min_distance),\n np.logical_and(\n np.pi - np.abs(found_roads[:,1]-a) < roads_min_angle,\n np.abs(found_roads[:,2]+d) < roads_min_distance)))\n if not np.any(close):\n found_roads = np.vstack((found_roads, roads[i]))\n if max_roads is not None and found_roads.shape[0] >= max_roads:\n break\n\n return found_roads, im.shape",
"def _gtBinmap(self):\n if os.path.isfile(self.outbinmap) and (not self.clobber):\n print(\"\\t=== '{}' already exists ===\".format(self.outbinmap))\n return\n else:\n if not os.path.isfile(self.outmktime):\n self._gtMktime()\n\n # Image width must be comprised within the acceptance cone\n imWidth = int( np.floor(self.rad* 2**(0.5)) ) # deg\n imWipix = int(imWidth / self.binsz)\n\n # Coordinate system\n if self.csys == 'GAL':\n center_icrs = SkyCoord(ra=self.ra*u.degree, dec=self.dec*u.degree, frame='icrs')\n self.ra = center_icrs.galactic.l.deg\n self.dec = center_icrs.galactic.b.deg\n\n os.popen(\"gtbin evfile={} scfile=none outfile={} algorithm=CMAP emin={}\\\n emax={} nxpix={} nypix={} binsz={} coordsys={} xref={} yref={} axisrot=0\\\n proj=AIT\".format(self.outmktime, self.outbinmap, self.emin, self.emax,\n imWipix, imWipix, self.binsz, self.csys, self.ra, self.dec))\n\n if self.csys == 'GAL':\n self.ra = center_icrs.ra.deg\n self.dec = center_icrs.dec.deg\n return",
"def build_r_map(input_file: str, output_file: str, threshold: float):\n\n DataSiPM = db.DataSiPMsim_only('petalo', 0) # full body PET\n DataSiPM_idx = DataSiPM.set_index('SensorID')\n\n try:\n sns_response = pd.read_hdf(input_file, 'MC/sns_response')\n except ValueError:\n print(f'File {input_file} not found')\n exit()\n except OSError:\n print(f'File {input_file} not found')\n exit()\n except KeyError:\n print(f'No object named MC/sns_response in file {input_file}')\n exit()\n print(f'Analyzing file {input_file}')\n\n sel_df = rf.find_SiPMs_over_threshold(sns_response, threshold)\n\n particles = pd.read_hdf(input_file, 'MC/particles')\n hits = pd.read_hdf(input_file, 'MC/hits')\n events = particles.event_id.unique()\n\n true_r1, true_r2 = [], []\n var_phi1, var_phi2 = [], []\n var_z1, var_z2 = [], []\n\n touched_sipms1, touched_sipms2 = [], []\n\n for evt in events:\n\n ### Select photoelectric events only\n evt_parts = particles[particles.event_id == evt]\n evt_hits = hits [hits .event_id == evt]\n select, true_pos = mcf.select_photoelectric(evt_parts, evt_hits)\n if not select: continue\n\n sns_resp = sel_df[sel_df.event_id == evt]\n if len(sns_resp) == 0: continue\n\n _, _, pos1, pos2, q1, q2 = rf.assign_sipms_to_gammas(sns_resp, true_pos, DataSiPM_idx)\n\n if len(pos1) > 0:\n pos_phi = rf.from_cartesian_to_cyl(np.array(pos1))[:,1]\n _, var_phi = rf.phi_mean_var(pos_phi, q1)\n\n pos_z = np.array(pos1)[:,2]\n mean_z = np.average(pos_z, weights=q1)\n var_z = np.average((pos_z-mean_z)**2, weights=q1)\n r = np.sqrt(true_pos[0][0]**2 + true_pos[0][1]**2)\n\n var_phi1 .append(var_phi)\n var_z1 .append(var_z)\n touched_sipms1.append(len(pos1))\n true_r1 .append(r)\n\n else:\n var_phi1 .append(1.e9)\n var_z1 .append(1.e9)\n touched_sipms1.append(1.e9)\n true_r1 .append(1.e9)\n\n if len(pos2) > 0:\n pos_phi = rf.from_cartesian_to_cyl(np.array(pos2))[:,1]\n _, var_phi = rf.phi_mean_var(pos_phi, q2)\n\n pos_z = np.array(pos2)[:,2]\n mean_z = np.average(pos_z, weights=q2)\n var_z = np.average((pos_z-mean_z)**2, weights=q2)\n r = np.sqrt(true_pos[1][0]**2 + true_pos[1][1]**2)\n\n var_phi2 .append(var_phi)\n var_z2 .append(var_z)\n touched_sipms2.append(len(pos2))\n true_r2 .append(r)\n\n else:\n var_phi2 .append(1.e9)\n var_z2 .append(1.e9)\n touched_sipms2.append(1.e9)\n true_r2 .append(1.e9)\n\n a_true_r1 = np.array(true_r1)\n a_true_r2 = np.array(true_r2)\n a_var_phi1 = np.array(var_phi1)\n a_var_phi2 = np.array(var_phi2)\n a_var_z1 = np.array(var_z1)\n a_var_z2 = np.array(var_z2)\n\n a_touched_sipms1 = np.array(touched_sipms1)\n a_touched_sipms2 = np.array(touched_sipms2)\n\n\n np.savez(output_file, a_true_r1=a_true_r1, a_true_r2=a_true_r2, a_var_phi1=a_var_phi1, a_var_phi2=a_var_phi2, a_var_z1=a_var_z1, a_var_z2=a_var_z2, a_touched_sipms1=a_touched_sipms1, a_touched_sipms2=a_touched_sipms2)",
"def binarize(self, image, threshold):\n bin_img = image.copy()\n for i in range(image.shape[0]):\n for j in range(image.shape[1]):\n if image[i, j] >= threshold:\n bin_img[i, j] = 0\n else:\n bin_img[i, j] = 255\n return bin_img",
"def iou_bitmap(y_true, y_pred, verbose=False):\n EPS = np.finfo(float).eps\n\n # Make sure each pixel was predicted e.g. turn probability into prediction\n if y_true.dtype in [np.float32, np.float64]:\n y_true = y_true.round().astype(bool)\n\n if y_pred.dtype in [np.float32, np.float64]:\n y_pred = y_pred.round().astype(bool)\n\n # Reshape to 1d\n y_true = y_true.ravel()\n y_pred = y_pred.ravel()\n\n # Compute intersection and union\n intersection = np.sum(y_true * y_pred)\n sum_ = np.sum(y_true + y_pred)\n jac = (intersection + EPS) / (sum_ - intersection + EPS)\n\n if verbose:\n print('Intersection:', intersection)\n print('Union:', sum_ - intersection)\n\n return jac",
"def calculate_MAP(self):\n testing_images = open('./digitdata/testimages', 'r')\n with testing_images as ti:\n data = list(csv.reader(ti))\n data = [i for i in data if i]\n count = 0\n #loop through all the test images\n for j in range(0,1000):\n classification_dict = {0:0,1:0,2:0,3:0,4:0,5:0,6:0,7:0,8:0,9:0} \n for l in range(0,28):\n coord = count + l\n for w in range(0,28):\n if data[coord][0][w] == \"+\":\n #iterate through each class. z is the class [0-9]\n for z in range(0,10):\n classification_dict[z] += math.log(self.class_probabilities[z][l][w][0]) \n elif data[coord][0][w] == \"#\":\n for z in range(0,10):\n classification_dict[z] += math.log(self.class_probabilities[z][l][w][1])\n elif data[coord][0][w] == \" \":\n for z in range(0,10):\n classification_dict[z] += math.log(self.class_probabilities[z][l][w][2])\n count += 28\n self.solutions.append(max(classification_dict, key=classification_dict.get))",
"def define_areas(\n pixel_filtered_map: np.ndarray, district_heating_zone_threshold: float\n):\n structure = np.ones((3, 3)).astype(int)\n expanded_map = binary_dilation(input=pixel_filtered_map, structure=structure)\n eroded_map = binary_erosion(input=expanded_map, structure=structure)\n labels_array, n_label = measurements.label(\n input=eroded_map,\n structure=structure,\n )\n\n # labels start from 1, therefore the array size is 'num_labels_array + 1'\n areas_potential = np.zeros((n_label + 1)).astype(float)\n if n_label > 0:\n end, start, sorted_array = get_browsing_indexes(\n labels_array=labels_array,\n pixel_filtered_map=pixel_filtered_map,\n n_label=n_label,\n )\n\n for i, (start_index, end_index) in enumerate(zip(start, end)):\n area = sorted_array[start_index:end_index, 3]\n area_potential = np.sum(area)\n if area_potential >= district_heating_zone_threshold:\n # i+1 because labeling starts from 1 and not from 0\n # factor 0.001 for conversion from MWh/ha to GWh/ha\n areas_potential[i + 1] = np.around(np.sum(area_potential) / 1000, 2)\n\n areas = areas_potential[labels_array]\n filtered_map = pixel_filtered_map * (areas > 0).astype(int)\n total_potential = np.sum(areas_potential)\n return areas, filtered_map, total_potential, areas_potential[1:]",
"def mask_sparse(self, threshold=10):\n self.MaskPrefix = 's' + self.MaskPrefix\n print('Masking pixels that do not have at least {0} coherent values'.format(threshold))\n # each pixel assigned an integer corresponding to # of igrams where coherent\n # NOTE: save coverage map if it doesn't exist already\n coverage = self.get_coverage()\n sparse = ma.masked_less(coverage, threshold)\n for ig in self.Set:\n igram = self.load_ma(ig)\n igram[sparse.mask] = ma.masked\n self.save_ma(ig, igram)\n print('Done')",
"def apply_threshold(heatmap, threshold):\n heatmap_thresh = np.copy(heatmap)\n ind = np.where(np.logical_and(heatmap_thresh>1, heatmap_thresh<=threshold))\n heatmap_thresh[ind] = 0\n #heatmap_thresh[(heatmap_thresh <= threshold)] = 0\n return heatmap_thresh",
"def get_occupany_map(img: np.ndarray, output_path: str, output_size: tuple):\n\n (rows, cols) = img.shape\n\n # the houseexpo layout has black-background so perform inverse transform\n _, thresh_img = cv.threshold(img, 1, 255, cv.THRESH_BINARY_INV)\n\n # find and draw contours i.e, borders\n # reference: https://docs.opencv.org/3.4/d4/d73/tutorial_py_contours_begin.html\n contours, hierarchy = cv.findContours(thresh_img, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)\n cv.drawContours(thresh_img, contours, contourIdx=1, color=100, thickness=3)\n\n # switch colors => 205: unknown, 255: free, 0: occupied\n thresh_img = np.where(thresh_img==255, np.uint8(205), thresh_img) # unknown\n thresh_img = np.where(thresh_img==0, np.uint8(255), thresh_img) # free\n thresh_img = np.where(thresh_img==100, np.uint8(0), thresh_img) # obstacle\n\n # add padding to borders to make the output have equal width and height\n padding = max(rows, cols) + 50\n thresh_img = cv.copyMakeBorder(thresh_img, (padding-rows)//2, (padding-rows)//2, \\\n (padding-cols)//2, (padding-cols)//2, cv.BORDER_CONSTANT, value=205)\n thresh_img = cv.resize(thresh_img, output_size)\n\n # store the image\n cv.imwrite(output_path, thresh_img)",
"def brightness_mask(cmb_map, synch_template, synch_freq, dust_template, dust_freq, mask_file, regions_file, threshold=10.0):\n\n cmb_fluctuation = np.std(cmb_map)\n synch70 = synch_template*(70.0/synch_freq)**(-3.0)\n gamma = h/(kb*19.4)\n dust70 = dust_template*(70.0/dust_freq)**2.6*(np.exp(gamma*dust_freq*1.0e9) - 1.0)/(np.exp(gamma*70.0e9) - 1.0)\n\n regions = hp.read_map(regions_file)\n region_nums = [i for i in range(1, int(np.amax(regions)) + 1)]\n\n mask = np.ones(hp.nside2npix(hp.get_nside(cmb_map)))\n\n synch_idx = np.where(synch70>=threshold*cmb_fluctuation)[0]\n dust_idx = np.where(dust70>=threshold*cmb_fluctuation)[0]\n mask[synch_idx] = 0\n mask[dust_idx] = 0\n\n hp.write_map(mask_file, mask, overwrite=True)\n\n return mask",
"def label_nonzero_pixel_count_ratio_map(label_map1: Image, label_map2: Image, overlap_count_map_destination: Image = None) -> Image:\n from .._tier1 import set_column\n from .._tier1 import replace_intensities\n from .._tier9 import statistics_of_background_and_labelled_pixels, push_regionprops_column\n\n binary = label_map2 > 0\n\n regionprops = statistics_of_background_and_labelled_pixels(binary, label_map1)\n\n values_vector = push_regionprops_column(regionprops, 'mean_intensity')\n set_column(values_vector, 0, 0)\n\n replace_intensities(label_map1, values_vector, overlap_count_map_destination)\n\n return overlap_count_map_destination",
"def create_binary_mask(self, type='negative'):\n if not self.thresh_map_name:\n return None\n mode = self.thresh_mode\n limits = self.thresh_limits\n map = self.map_scalars\n if mode=='mask lower':\n m = (map < limits[0]) if type=='negative' else (map >= limits[0])\n elif mode=='mask higher':\n m = (map > limits[1]) if type=='negative' else (map <= limits[1])\n elif mode=='mask between':\n m = ( (map > limits[0]) & (map < limits[1]) ) \\\n if type=='negative' \\\n else ( (map <= limits[0]) | (map >= limits[1]) )\n else: # mask outside\n m = ( (map < limits[0]) | (map > limits[1]) ) \\\n if type=='negative' \\\n else ( (map >= limits[0]) & (map <= limits[1]) )\n return m",
"def update_map(self, boundaries):\n image = Image.open(self.image_file)\n update_pixels = ImageDraw.Draw(image)\n for i in range(len(boundaries) - 1):\n update_pixels.point(boundaries[i], fill=self.path_color)\n if self.season ==\"winter\":\n image.save(\"temp_winter.png\")\n elif self.season ==\"spring\":\n image.save(\"temp_spring.png\")\n else:\n image.save(\"temp_fall.png\")",
"def pixel2mask(image: np.ndarray, low: float, high: float) -> np.ndarray:\n mask = image > low\n labels = smeasure.label(mask, background=0)\n for region in smeasure.regionprops(label_image=labels, intensity_image=image):\n if region.max_intensity < high:\n mask[region.coords[:, 0], region.coords[:, 1]] = 0\n\n return mask",
"def disp_map(disp):\n map = np.array([\n [0,0,0,114],\n [0,0,1,185],\n [1,0,0,114],\n [1,0,1,174],\n [0,1,0,114],\n [0,1,1,185],\n [1,1,0,114],\n [1,1,1,0]\n ])\n # grab the last element of each column and convert into float type, e.g. 114 -> 114.0\n # the final result: [114.0, 185.0, 114.0, 174.0, 114.0, 185.0, 114.0]\n bins = map[0:map.shape[0]-1,map.shape[1] - 1].astype(float)\n\n # reshape the bins from [7] into [7,1]\n bins = bins.reshape((bins.shape[0], 1))\n\n # accumulate element in bins, and get [114.0, 299.0, 413.0, 587.0, 701.0, 886.0, 1000.0]\n cbins = np.cumsum(bins)\n\n # divide the last element in cbins, e.g. 1000.0\n bins = bins / cbins[cbins.shape[0] -1]\n\n # divide the last element of cbins, e.g. 1000.0, and reshape it, final shape [6,1]\n cbins = cbins[0:cbins.shape[0]-1] / cbins[cbins.shape[0] -1]\n cbins = cbins.reshape((cbins.shape[0], 1))\n\n # transpose disp array, and repeat disp 6 times in axis-0, 1 times in axis-1, final shape=[6, Height*Width]\n ind = np.tile(disp.T, (6,1))\n tmp = np.tile(cbins, (1, disp.size))\n\n # get the number of disp's elements bigger than each value in cbins, and sum up the 6 numbers\n b = (ind > tmp).astype(int)\n s = np.sum(b, axis=0)\n\n bins = 1 / bins\n\n # add an element 0 ahead of cbins, [0, cbins]\n t = cbins\n cbins = np.zeros((cbins.size+1,1))\n cbins[1:] = t\n\n # get the ratio and interpolate it\n disp = (disp - cbins[s]) * bins[s]\n disp = map[s,0:3] * np.tile(1 - disp,(1,3)) + map[s + 1,0:3] * np.tile(disp,(1,3))\n\n return disp",
"def test_binary_mapping(load_database):\n dbf = load_database()\n my_phases = ['LIQUID', 'FCC_A1', 'HCP_A3', 'AL5FE2',\n 'AL2FE', 'AL13FE4', 'AL5FE4']\n comps = ['AL', 'FE', 'VA']\n conds = {v.T: (1200, 1300, 50), v.P: 101325, v.X('AL'): (0, 1, 0.2)}\n zpf_boundaries = map_binary(dbf, comps, my_phases, conds)\n num_boundaries = len(zpf_boundaries.all_compsets)\n assert num_boundaries > 0\n # calling binplot again can add more boundaries\n map_binary(dbf, comps, my_phases, conds, boundary_sets=zpf_boundaries)\n assert len(zpf_boundaries.all_compsets) == 2*num_boundaries",
"def create_binary(image):\n #Channel 1 of the output image highlights the area consisting of the nuclei\n channel1=image[:,:,0]\n \n # Channel 2 of the output image consists of the boundaries between adjoining nuclei\n channel2=image[:,:,1]\n _,channel1=cv2.threshold(channel1, 127,255,cv2.THRESH_BINARY) \n _,channel2=cv2.threshold(channel2, 127,255,cv2.THRESH_BINARY) \n \n #Subtracting channel 2 from channel 1 to get the desired output\n img1=channel1-channel2\n \n return img1",
"def countmap(band,skypos,tranges,skyrange,width=False,height=False,\n\t\t\t verbose=0,tscale=1000.,memlight=False,hdu=False,retries=20):\n\timsz = gxt.deg2pix(skypos,skyrange)\n\tcount = np.zeros(imsz)\n\tfor trange in tranges:\n\t\t# If memlight is requested, break the integration into\n\t\t# smaller chunks.\n\t\tstep = memlight if memlight else trange[1]-trange[0]\n\t\tfor i in np.arange(trange[0],trange[1],step):\n\t\t\tt0,t1=i,i+step\n\t\t\tif verbose:\n\t\t\t\tprint_inline('Coadding '+str(t0)+' to '+str(t1))\n\t\t\tevents = gQuery.getArray(gQuery.rect(band,skypos[0],skypos[1],t0,t1,\n\t\t\t\t\t\t\t\t\t\t\t\t skyrange[0],skyrange[1]),\n\t\t\t\t\t\t\t\t\t verbose=verbose,retries=retries)\n\n\t\t\t# Check that there is actually data here.\n\t\t\tif not events:\n\t\t\t\tif verbose>1:\n\t\t\t\t\tprint \"No data in \"+str([t0,t1])\n\t\t\t\tcontinue\n\n\t\t\ttimes = np.array(events,dtype='float64')[:,0 ]/tscale\n\t\t\tcoo =\tnp.array(events,dtype='float64')[:,1:]\n\n\t\t\t# If there's no data, return a blank image.\n\t\t\tif len(coo)==0:\n\t\t\t\tif verbose:\n\t\t\t\t\tprint 'No data in this frame: '+str([t0,t1])\n\t\t\t\tcontinue\n\n\t\t\t# Define World Coordinate System (WCS)\n\t\t\twcs = define_wcs(skypos,skyrange,width=False,height=False)\n\n\t\t\t# Map the sky coordinates onto the focal plane\n\t\t\tfoc = wcs.sip_pix2foc(wcs.wcs_world2pix(coo,1),1)\n\n\t\t\t# Bin the events into actual image pixels\n\t\t\tH,xedges,yedges=np.histogram2d(foc[:,1]-0.5,foc[:,0]-0.5,\n\t\t\t\t\t\t\t\tbins=imsz,range=([ [0,imsz[0]],[0,imsz[1]] ]))\n\t\t\tcount += H\n\n\treturn count",
"def classify_feature_image(input_img, feature_colors, pix_cutoff=50):\n result = 'negative'\n for pic_val, num in pic_val_count(input_img):\n for min_rgb, max_rgb in feature_colors:\n if (((min_rgb[0] <= pic_val[0] <= max_rgb[0])\n &(min_rgb[1] <= pic_val[1] <= max_rgb[1])\n &(min_rgb[2] <= pic_val[2] <= max_rgb[2])) & (num > pix_cutoff)):\n result = \"positive\"\n return result"
] | [
"0.6125997",
"0.5634476",
"0.5634476",
"0.5634476",
"0.56280094",
"0.5526669",
"0.54829973",
"0.54744196",
"0.54724973",
"0.54532164",
"0.54526675",
"0.54316235",
"0.53706366",
"0.5351472",
"0.53411156",
"0.5336489",
"0.53259146",
"0.53125376",
"0.5258427",
"0.5242191",
"0.5229852",
"0.5178276",
"0.5166135",
"0.5165707",
"0.5130909",
"0.512472",
"0.51146513",
"0.5110191",
"0.5107126",
"0.5105732"
] | 0.58262676 | 1 |
Performs additional masking on the good pixel maps for requested clusters. If a cluster has a DS9 regions file present in the directory specified as `region_file_dir` in initialization then we will read in the file, and set pixels within the shapes present in the file to `0`. Notes The allowable shapes in the regions file are `circle`, `box`, and `ellipse`. An unexpected shape will raise a KeyError. Raises KeyError An error is raised if the shape in the regions file is not one of the allowable shapes. | def object_mask(self):
# Region file directory files
if isinstance(self._region_file_dir, list):
reg_files = {self._keyfunct(f): f for f in chain.from_iterable(glob.glob(f'{reg_dir}/*.reg')
for reg_dir in self._region_file_dir)}
else:
reg_files = {self._keyfunct(f): f for f in glob.glob(f'{self._region_file_dir}/*.reg')}
# Select out the IDs of the clusters needing additional masking
clusters_to_mask = set(reg_files).intersection(self._catalog_dictionary)
for cluster_id in clusters_to_mask:
cluster_info = self._catalog_dictionary.get(cluster_id, None)
region_file = reg_files.get(cluster_id, None)
pixel_map_path = cluster_info['cov_mask_path']
# Read in the coverage mask data and header.
good_pix_mask, header = fits.getdata(pixel_map_path, header=True, ignore_missing_end=True, memmap=False)
# Read in the WCS from the coverage mask we made earlier.
w = WCS(header)
try:
assert w.pixel_scale_matrix[0, 1] == 0.
pix_scale = (w.pixel_scale_matrix[1, 1] * w.wcs.cunit[1]).to(u.arcsec).value
except AssertionError:
cd = w.pixel_scale_matrix
_, eig_vec = np.linalg.eig(cd)
cd_diag = np.linalg.multi_dot([np.linalg.inv(eig_vec), cd, eig_vec])
pix_scale = (cd_diag[1, 1] * w.wcs.cunit[1]).to(u.arcsec).value
# Open the regions file and get the lines containing the shapes.
with open(region_file, 'r') as region:
objs = [ln.strip() for ln in region
if ln.startswith('circle') or ln.startswith('box') or ln.startswith('ellipse')]
# For each shape extract the defining parameters and define a path region.
shapes_to_mask = []
for mask in objs:
# For circle shapes we need the center coordinate and the radius.
if mask.startswith('circle'):
# Parameters of circle shape are as follows:
# params[0] : region center RA in degrees
# params[1] : region center Dec in degrees
# params[2] : region radius in arcseconds
params = np.array(re.findall(r'[+-]?\d+(?:\.\d+)?', mask), dtype=np.float64)
# Convert the center coordinates into pixel system.
# "0" is to correct the pixel coordinates to the right origin for the data.
cent_xy = w.wcs_world2pix(params[0], params[1], 0)
# Generate the mask shape.
shape = Path.circle(center=cent_xy, radius=params[2] / pix_scale)
# For the box we'll need...
elif mask.startswith('box'):
# Parameters for box shape are as follows:
# params[0] : region center RA in degrees
# params[1] : region center Dec in degrees
# params[2] : region width in arcseconds
# params[3] : region height in arcseconds
# params[4] : rotation of region about the center in degrees
params = np.array(re.findall(r'[+-]?\d+(?:\.\d+)?', mask), dtype=np.float64)
# Convert the center coordinates into pixel system.
cent_x, cent_y = w.wcs_world2pix(params[0], params[1], 0)
# Vertices of the box are needed for the path object to work.
verts = [[cent_x - 0.5 * (params[2] / pix_scale), cent_y + 0.5 * (params[3] / pix_scale)],
[cent_x + 0.5 * (params[2] / pix_scale), cent_y + 0.5 * (params[3] / pix_scale)],
[cent_x + 0.5 * (params[2] / pix_scale), cent_y - 0.5 * (params[3] / pix_scale)],
[cent_x - 0.5 * (params[2] / pix_scale), cent_y - 0.5 * (params[3] / pix_scale)]]
# For rotations of the box.
rot = Affine2D().rotate_deg_around(cent_x, cent_y, degrees=params[4])
# Generate the mask shape.
shape = Path(verts).transformed(rot)
elif mask.startswith('ellipse'):
# Parameters for ellipse shape are as follows
# params[0] : region center RA in degrees
# params[1] : region center Dec in degrees
# params[2] : region semi-major axis in arcseconds
# params[3] : region semi-minor axis in arcseconds
# params[4] : rotation of region about the center in degrees
# Note: For consistency, the semi-major axis should always be aligned along the horizontal axis
# before rotation
params = np.array(re.findall(r'[+-]?\d+(?:\.\d+)?', mask), dtype=np.float64)
# Convert the center coordinates into pixel system
cent_xy = w.wcs_world2pix(params[0], params[1], 0)
# Generate the mask shape
shape = Ellipse(cent_xy, width=params[2] / pix_scale, height=params[3] / pix_scale, angle=params[4])
shape = shape.get_path()
# Return error if mask shape isn't known.
else:
raise KeyError(
f'Mask shape is unknown, please check the region file of cluster: {region_file} {mask}')
shapes_to_mask.append(shape)
# Check if the pixel values are within the shape we defined earlier.
# If true, set the pixel value to 0.
pts = list(product(range(w.pixel_shape[0]), range(w.pixel_shape[1])))
shape_masks = np.array(
[shape.contains_points(pts).reshape(good_pix_mask.shape) for shape in shapes_to_mask])
# Combine all the shape masks into a final object mask, inverting the boolean values so we can multiply
# our mask with our existing good pixel mask
total_obj_mask = ~np.logical_or.reduce(shape_masks)
# Apply the object mask to the existing good pixel mask
good_pix_mask *= total_obj_mask.astype(int)
# Write the new mask to disk overwriting the old mask.
new_mask_hdu = fits.PrimaryHDU(good_pix_mask, header=header)
new_mask_hdu.writeto(pixel_map_path, overwrite=True) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_fix_mask(self):\n fixable_mask = mapreader.Map(os.path.join(tests.TEST_DATA_PATH, 'segmentations', 'test_fixable_mask.map'))\n self.assertFalse(fixable_mask.is_mask)\n fixable_mask.fix_mask()\n self.assertTrue(fixable_mask.is_mask)",
"def _region_mask(self, cs, all_regions, xctr, yctr, hwcs):\n if not HAS_REGIONS:\n return None\n ctr_coord = ar.PixCoord(xctr, yctr)\n mask = None\n for reg_str in all_regions:\n # read ds9 string into a region class\n try:\n with set_log_level('CRITICAL'):\n frame_regions = ar.Regions.parse(reg_str, format='ds9')\n except Exception as err:\n log.debug(f'Region parser error: {err}')\n continue\n for fr in frame_regions:\n if cs == 'wcs':\n # convert to a pixel region first\n try:\n with set_log_level('CRITICAL'):\n fr = fr.to_pixel(hwcs)\n except Exception as err: # pragma: no cover\n # error could be anything, since regions package\n # is in early development state\n log.debug(f'Region WCS conversion error: {err}')\n continue\n\n # check if cursor is contained in a region\n # in any frame\n with set_log_level('CRITICAL'):\n contained = fr.contains(ctr_coord)\n if hasattr(contained, '__len__'):\n # PolygonPixelRegion returns an array, currently\n # (regions v0.4)\n contained = contained[0]\n\n if contained:\n # get mask from first matching region\n try:\n with set_log_level('CRITICAL'):\n mask = fr.to_mask()\n except Exception as err: # pragma: no cover\n # error could be anything, since regions package\n # is in early development state\n log.debug(f'Region mask error: {err}')\n continue\n else:\n log.info(f'Contained in {type(fr).__name__}')\n break\n if mask is not None:\n break\n\n # reset active frame\n return mask",
"def calibrate(science_list_fname, master_flat_fname, master_dark_fname, hp_map_fname, bp_map_fname, mask_bad_pixels = False,\n clean_Bad_Pix=True, replace_nans=True, background_fname = None, outdir = None):\n\n #Get the list of science frames\n #science_list = np.loadtxt(science_list_fname, dtype=str)\n science_list = science_list_fname\n\n #Open the master dark\n master_dark_hdu = f.open(master_dark_fname)\n master_dark = master_dark_hdu[0].data\n dark_shape = np.shape(master_dark)\n print((\"Subtracting {} from each flat file\".format(master_dark_fname)))\n dark_exp_time = master_dark_hdu[0].header['EXPTIME']\n\n #Open the master flat\n master_flat_hdu = f.open(master_flat_fname)\n master_flat = master_flat_hdu[0].data\n print((\"Dividing each file by {}\".format(master_flat_fname)))\n dark_exp_time = master_dark_hdu[0].header['EXPTIME']\n\n #Open the bad pixel map from flat\n bp_map_hdu = f.open(bp_map_fname)\n bad_pixel_map = bp_map_hdu[0].data\n bad_pixel_map_bool = np.array(bad_pixel_map, dtype=bool)\n print((\"Using bad pixel map {}\".format(bp_map_fname)))\n\n #now if hot pixel map from dark is also given\n if hp_map_fname != None:\n hp_map_hdu = f.open(hp_map_fname)\n hot_pixel_map = hp_map_hdu[0].data\n bad_pixel_map_bool = np.logical_or(bad_pixel_map_bool, hot_pixel_map.astype(bool) )\n\n\n if background_fname != None:\n background_hdu = f.open(background_fname)\n background = background_hdu[0].data\n print(\"Subtracting background frame {} from all science files\".format(background_fname))\n\n\n for fname in science_list:\n #Open the file\n print((\"Calibrating {}\".format(fname\n )))\n hdu = f.open(fname)\n data = hdu[0].data\n science_exp_time = hdu[0].header['EXPTIME']\n\n if dark_exp_time != science_exp_time:\n warnings.warn(\"The master dark file doesn't have the same exposure time as the data. We'll scale the dark for now, but this isn't ideal\", UserWarning)\n factor = science_exp_time/dark_exp_time\n else:\n factor = 1.\n\n #Subtract the dark, divide by flat\n redux = ((data - factor*master_dark)/master_flat)\n #get rid of crazy values at bad pixel\n redux = redux*~bad_pixel_map_bool\n\n if background_fname != None:\n redux -= background\n\n if clean_Bad_Pix:\n # plt.plot(bad_pixel_map_bool)\n redux = cleanBadPix(redux, bad_pixel_map_bool)\n #redux = ccdproc.cosmicray_lacosmic(redux, sigclip=5)[0]\n\n # redux = ccdproc.cosmicray_median(redux, mbox=7, rbox=5, gbox=7)[0]\n\n #Mask the bad pixels if the flag is set\n if mask_bad_pixels:\n redux *= ~bad_pixel_map_bool\n\n if replace_nans:\n # nan_map = ~np.isfinite(redux)\n # redux = cleanBadPix(redux, nan_map)\n # plt.imshow(redux-after)\n nanmask = np.isnan(redux) #nan = True, just in case this is useful\n redux = np.nan_to_num(redux)\n\n #Put the cablibrated data back in the HDU list\n hdu[0].data = redux\n\n #Add pipeline version and history keywords\n vers = version.get_version()\n hdu[0].header.set('PL_VERS',vers,'Version of pipeline used for processing')\n hdu[0].header['HISTORY'] = \"Subtracting {} from each flat file\".format(master_dark_fname)\n hdu[0].header['HISTORY'] = \"Dividing each file by {}\".format(master_flat_fname)\n\n if background_fname != None:\n hdu[0].header['HISTORY'] = \"Subtracted background frame {}\".format(background_fname)\n\n if mask_bad_pixels:\n hdu[0].header['HISTORY'] = \"Masking all bad pixels found in {}\".format(bp_map_fname)\n\n if clean_Bad_Pix:\n hdu[0].header['HISTORY'] = \"Cleaned all bad pixels found in {} using a median filter\".format(bp_map_fname)\n\n # #Append the bad pixel list to the HDU list\n # hdu.append(f.PrimaryHDU([bad_pixel_map]))\n # hdu[1].header['HISTORY'] = \"Appending bad pixel map :{}\".format(bp_map_fname)\n # hdu[1].header['HISTORY'] = \"0 = good pixel\"\n # hdu[1].header['HISTORY'] = \"1 = bad pixel from flat fields\"\n # hdu[1].header['HISTORY'] = \"2 = hot pixel from darks\"\n\n outname = fname.split('.')[0]+\"_calib.fits\"\n\n #if an output directory is specified we can write out to that directory instead\n #making sure to take only the stuff after the last '/' to avoid directory issues from fname\n if outdir:\n outname = outdir + fname.split('/')[-1]\n\n print((\"Writing calibrated file to {}\".format(outname)))\n #Save the calibrated file\n hdu.writeto(outname, overwrite=True)\n\n # f.PrimaryHDU(redux).writeto('redux_'+i, overwrite = True)",
"def _populate_mask_data(self, filename: str) -> None:\n if self.seg_images.get(filename) is None:\n return None\n\n mask = cv2.imread(self.seg_targets[filename])\n mask = cv2.cvtColor(mask, cv2.COLOR_BGR2RGB)\n\n # convert pixel masks to multidimentional\n height, width = mask.shape[:2]\n segmentation_mask = np.zeros((height, width, len(VOC_COLORMAP)), dtype=np.float32)\n for label_index, label in enumerate(VOC_COLORMAP):\n segmentation_mask[:, :, label_index] = np.all(mask == label, axis=-1).astype(float)\n\n return segmentation_mask",
"def ApplyMask(data,mask):\n \n # loop through portions\n for portion in data.keys():\n # match data keys and apply mask \n for key in data[portion].keys():\n if key in 'xyerr':\n if mask != 'UnMasked':\n data[portion][key].mask = data[portion]['UnMasked']\n data[portion][key].mask = data[portion][mask]\n\t\n return data",
"def apply_masks_to_volume(root_dir):\n\n # Get the full path of the ClearImages, CoregisteredBlurryImages, and Masks directories\n clear_image_dir = join(root_dir, 'ClearImages')\n blurry_image_dir = join(root_dir, 'CoregisteredBlurryImages')\n mask_dir = join(root_dir, 'Masks')\n\n # Iterate over the entire list of images (doesn't matter if it's clear_image_dir or blurry_image_dir)\n for file_name in os.listdir(clear_image_dir):\n if file_name.endswith('.jpg') or file_name.endswith('.png'):\n # Read the clear and blurry images as grayscale images in the form of numpy arrays\n clear_image = cv2.imread(join(clear_image_dir, file_name), 0)\n blurry_image = cv2.imread(join(blurry_image_dir, file_name), 0)\n mask_image = cv2.imread(join(mask_dir, file_name), 0)\n\n if type(blurry_image) is None:\n pass\n\n # Apply the mask to the clear image AND the blurry image\n clear_image_masked = clear_image * (mask_image // 255)\n blurry_image_masked = blurry_image * (mask_image // 255)\n\n # Save the clear and blurry image back\n cv2.imwrite(filename=join(clear_image_dir, file_name), img=clear_image_masked)\n cv2.imwrite(filename=join(blurry_image_dir, file_name), img=blurry_image_masked)\n\n ''' Just logging\n # Show the clear image, clear masked image, blurry image, and blurry masked image\n logger.show_images([(\"clear_image\", clear_image),\n (\"blurry_image\", blurry_image),\n (\"clear_image_masked\", clear_image_masked),\n (\"blurry_image_masked\", blurry_image_masked)])\n '''",
"def test_unfixable_mask(self):\n unfixable_mask = mapreader.Map(os.path.join(tests.TEST_DATA_PATH, 'segmentations', 'test_unfixable_mask.map'))\n self.assertFalse(unfixable_mask.is_mask)\n with self.assertRaises(ValueError):\n unfixable_mask.fix_mask()\n self.assertFalse(unfixable_mask.is_mask)",
"def mask_region(self, region, days=14):\n i = self.Rs.index(region)\n c_s = np.nonzero(np.cumsum(self.NewCases.data[i, :] > 0) == days + 1)[0][0]\n d_s = np.nonzero(np.cumsum(self.NewDeaths.data[i, :] > 0) == days + 1)[0]\n if len(d_s) > 0:\n d_s = d_s[0]\n else:\n d_s = len(self.Ds)\n\n self.Active.mask[i, c_s:] = True\n self.Confirmed.mask[i, c_s:] = True\n self.Deaths.mask[i, d_s:] = True\n self.NewDeaths.mask[i, d_s:] = True\n self.NewCases.mask[i, c_s:] = True\n\n return c_s, d_s",
"def create_masks(image_folder: str, annotation_path: str, outpath: str):\n\n train_reader = ReaderAnnotation(annotation_path)\n\n all_images = os.listdir(image_folder)\n annotated_images = train_reader.annotation.keys()\n\n creator = MaskCreator()\n\n for key in annotated_images:\n file_extension = \".JPG\"\n if not os.path.isfile(\n os.path.join(\n image_folder,\n key.split(\".\")[0] + file_extension,\n )\n ):\n file_extension = file_extension.lower()\n\n image_name = os.path.join(\n image_folder,\n key.split(\".\")[0] + file_extension,\n )\n print(image_name)\n\n out_image_path = os.path.join(outpath, os.path.split(image_name)[-1])\n assert os.path.exists(out_image_path), \"Out image path doesn't exist\"\n\n image = plt.imread(image_name)\n h, w, c = image.shape\n\n regions = train_reader.get(key)[\"regions\"]\n # less than minimal distance\n radius = int(train_reader.get_radius_min(regions=regions) * 0.9)\n\n masks = []\n for _, center in regions.items():\n masks.append(\n creator.create_circular_mask(\n h=h,\n w=w,\n center=(\n int(center[\"shape_attributes\"][\"cx\"]),\n int(center[\"shape_attributes\"][\"cy\"]),\n ),\n radius=radius,\n )\n )\n\n if len(masks) > 50:\n masks = [creator._unite_masks(masks)]\n\n if masks:\n creator.visualize(\n image=image,\n masks=masks,\n filename=out_image_path,\n use_image=False,\n )\n else:\n creator._create_empty_mask(image=image, filename=out_image_path)\n\n print(\"Empty images:\")\n for empty_image in list(set(all_images) - set(annotated_images)):\n if os.path.exists(out_image_path):\n continue\n empty_image = os.path.join(image_folder, empty_image)\n print(empty_image)\n image = plt.imread(empty_image)\n creator._create_empty_mask(\n image=image,\n filename=os.path.join(\n outpath,\n os.path.split(empty_image)[-1],\n ),\n )",
"def mask_weight(region_key,lon,lat):\r\n\t\tlon_res = lon[1] - lon[0];lat_res = lat[1] - lat[0];\r\n\t\tlons,lats = np.meshgrid(lon,lat)\r\n\t\tarea = AreaWeight(lons,lons+lon_res,lats,lats+lat_res)\r\n\r\n\t\t##OCEAN_MASKS FOR COUNTRIES\r\n\t\tocean_mask = sio.loadmat('/home/s1667168/coding/python/external_data/Euro_USA_AUS_BRICS_STA_720_360.mat')\r\n\t\tlon_mask = ocean_mask['lon'][0,:];\r\n\t\tlat_mask = ocean_mask['lat'][0,:];\r\n\t\tbox_region_dic={'All':[0,360,-90,90],'ASIA':[65,145,5,45],'US':[240,290,30,50],'ARCTIC':[0,360,60,90],'TROPICS':[0,360,-28,28],'EUROPE':[0,40,30,70],}\r\n\t\tif (region_key == 'USA' or region_key == 'Europe' or region_key == 'India' or region_key == 'China' or region_key == 'GloLand'):\r\n\t\t\tmask= ocean_mask[region_key][:]\r\n\t\telif region_key in box_region_dic:\r\n\t\t\tmask= ocean_mask['All'][:]\r\n\t\t\tbox = box_region_dic[region_key]\r\n\t\t\tmask = box_clip(box[0],box[1],box[2],box[3],lon_mask,lat_mask,mask)\r\n\t\telse:\r\n\t\t\tprint \"error region name\"\r\n\t\t\r\n\t\t# interpolate from 360*720 to 192*288\r\n\t\tmask[np.isnan(mask)]=0;\tmask[mask>0]=1;\r\n\t\tf = interp2d(lon_mask, lat_mask, mask,kind='linear'); mask = f(lon, lat);\r\n\t\t# plt.imshow(mask,origin='lower');plt.show()\r\n\t\tmask[mask >= 1] = 1;mask[mask < 1] = 0;\r\n\t\t# weight each grid cell by its area weight against the total area\r\n\t\tmask[mask==0] = np.nan\r\n\t\tmask=np.multiply(mask,area); \r\n\t\tmask_weighted = np.divide(mask,np.nansum(np.nansum(mask,axis=1),axis=0))\r\n\t\t# print np.nansum(np.nansum(mask_weighted,axis=1),axis=0)\r\n\t\treturn mask_weighted",
"def load_data_from_dir(instance_dir, image_size=256, pad_size=0.1, skip_indices=()):\n image_dir = osp.join(instance_dir, \"images\")\n mask_dir = osp.join(instance_dir, \"masks\")\n data_dict = {\n \"images_og\": [],\n \"images\": [],\n \"masks\": [],\n \"masks_dt\": [],\n \"bbox\": [],\n \"image_centers\": [],\n \"crop_scales\": [],\n }\n for i, image_path in enumerate(sorted(glob(osp.join(image_dir, \"*.jpg\")))):\n if i in skip_indices:\n continue\n image_name = osp.basename(image_path)\n mask_path = osp.join(mask_dir, image_name.replace(\"jpg\", \"png\"))\n image_og = Image.open(image_path).convert(\"RGB\")\n mask = Image.open(mask_path).convert(\"L\")\n bbox = get_bbox(np.array(mask) / 255.0 > 0.5)\n center = (bbox[:2] + bbox[2:]) / 2.0\n s = max(bbox[2:] - bbox[:2]) / 2.0 * (1 + pad_size)\n square_bbox = np.concatenate([center - s, center + s]).astype(int)\n # Crop image and mask.\n image = image_util.crop_image(image_og, square_bbox)\n image = np.array(image.resize((image_size, image_size), Image.LANCZOS)) / 255.0\n mask = image_util.crop_image(mask, square_bbox)\n mask = np.array(mask.resize((image_size, image_size), Image.BILINEAR))\n mask = mask / 255.0 > 0.5\n image_center, crop_scale = compute_crop_parameters(image_og.size, square_bbox)\n data_dict[\"bbox\"].append(square_bbox)\n data_dict[\"crop_scales\"].append(crop_scale)\n data_dict[\"image_centers\"].append(image_center)\n data_dict[\"images\"].append(image)\n data_dict[\"images_og\"].append(image_og)\n data_dict[\"masks\"].append(mask)\n data_dict[\"masks_dt\"].append(compute_distance_transform(mask))\n for k, v in data_dict.items():\n if k != \"images_og\": # Original images can have any resolution.\n data_dict[k] = np.stack(v)\n\n if osp.exists(osp.join(instance_dir, \"metadata.json\")):\n metadata = json.load(open(osp.join(instance_dir, \"metadata.json\")))\n data_dict[\"extents\"] = metadata[\"extents\"]\n azimuths = metadata[\"azimuths\"]\n elevations = metadata[\"elevations\"]\n R, T = pytorch3d.renderer.look_at_view_transform(\n dist=2,\n elev=elevations,\n azim=azimuths,\n )\n data_dict[\"initial_poses\"] = R.tolist()\n return data_dict",
"def set_rectangular_mask(self, atom_region_rows, atom_region_cols):\n # Check that indices are sane\n n_rows, n_cols = self.image_shape\n for i in range(len(atom_region_rows)):\n if abs(atom_region_rows[i]) > n_rows:\n error_message = (f\"atom_region_rows[{i}] has value \"\n f\"{atom_region_rows[i]} but should be <={n_rows} \"\n f\"and >={-n_rows}.\")\n raise IndexError(error_message)\n for i in range(len(atom_region_cols)):\n if abs(atom_region_cols[i]) > n_cols:\n error_message = (f\"atom_region_cols[{i}] has value \"\n f\"{atom_region_cols[i]} but should be <={n_cols} \"\n f\"and >={-n_cols}.\")\n raise IndexError(error_message)\n\n # Construct the mask and store it\n mask = np.ones(self.image_shape)\n mask[atom_region_rows[0]:atom_region_rows[1],\n atom_region_cols[0]:atom_region_cols[1]] = 0\n self.set_mask(mask)",
"def apply_mask(file: str, nlines: int, nsamples: int, mask_file: str):\n data = read_bin(file, nlines, nsamples)\n mask = read_bmp(mask_file)\n\n data[mask == 0] = 0\n\n outfile = \"{file}_masked\".format(file=file)\n data.tofile(outfile)\n\n return outfile",
"def mask_the_images(working_path,set_name):\n\n file_list=glob('/media/talhassid/My Passport/haimTal/test_images_0b8afe447b5f1a2c405f41cf2fb1198e.npy')\n out_images = [] #final set of images for all patients\n for fname in file_list:\n out_images_per_patient = []\n print (\"working on file \", fname)\n imgs_to_process = np.load(fname.replace(\"lungmask\",\"images\")) # images of one patient\n masks = np.load(fname)\n for i in range(len(imgs_to_process)):\n mask = masks[i]\n img = imgs_to_process[i]\n new_size = [512,512] # we're scaling back up to the original size of the image\n img= mask*img # apply lung mask\n #\n # renormalizing the masked image (in the mask region)\n #\n new_mean = np.mean(img[mask>0])\n new_std = np.std(img[mask>0])\n #\n # Pulling the background color up to the lower end\n # of the pixel range for the lungs\n #\n old_min = np.min(img) # background color\n img[img==old_min] = new_mean-1.2*new_std # resetting backgound color\n img = img-new_mean\n img = img/new_std\n #make image bounding box (min row, min col, max row, max col)\n labels = measure.label(mask)\n regions = measure.regionprops(labels)\n #\n # Finding the global min and max row over all regions\n #\n min_row = 512\n max_row = 0\n min_col = 512\n max_col = 0\n for prop in regions:\n B = prop.bbox\n if min_row > B[0]:\n min_row = B[0]\n if min_col > B[1]:\n min_col = B[1]\n if max_row < B[2]:\n max_row = B[2]\n if max_col < B[3]:\n max_col = B[3]\n width = max_col-min_col\n height = max_row - min_row\n if width > height:\n max_row=min_row+width\n else:\n max_col = min_col+height\n #\n # cropping the image down to the bounding box for all regions\n # (there's probably an skimage command that can do this in one line)\n #\n img = img[min_row:max_row,min_col:max_col]\n mask = mask[min_row:max_row,min_col:max_col]\n if max_row-min_row <5 or max_col-min_col<5: # skipping all images with no god regions\n pass\n else:\n # moving range to -1 to 1 to accomodate the resize function\n mean = np.mean(img)\n img = img - mean\n min = np.min(img)\n max = np.max(img)\n img = img/(max-min)\n new_img = resize(img,[512,512], mode='constant')\n out_images_per_patient.append(new_img)\n\n id = re.sub(r'.*_images_(.*)\\.npy',r'\\1',fname)\n patient_images_and_id = (out_images_per_patient,id)\n out_images.append(patient_images_and_id)\n print (\"Delete files: {} \\n\\t {} \".format(fname,re.sub(\"lungmask\",\"images\",fname)))\n os.remove(fname)\n os.remove(fname.replace(\"images\",\"lungmask\")) # images of one patient\n\n\n np.save(working_path+\"{}Images.npy\".format(set_name),out_images)",
"def simulate_source_mask(binary, n_holes, hole_radius_arcmin):\n\n mask = binary.copy()\n if binary.pixel == \"HEALPIX\":\n idx = np.where(binary.data == 1)\n for i in range(n_holes):\n random_index1 = np.random.choice(idx[0])\n vec = hp.pixelfunc.pix2vec(binary.nside, random_index1)\n disc = hp.query_disc(binary.nside, vec, hole_radius_arcmin / (60.0 * 180) * np.pi)\n mask.data[disc] = 0\n\n if binary.pixel == \"CAR\":\n random_index1 = np.random.randint(0, binary.data.shape[0], size=n_holes)\n random_index2 = np.random.randint(0, binary.data.shape[1], size=n_holes)\n mask.data[random_index1, random_index2] = 0\n dist = enmap.distance_transform(mask.data)\n mask.data[dist * 60 * 180 / np.pi < hole_radius_arcmin] = 0\n\n return mask",
"def filterClusters(inputFile, maskImage, outputFile):\n os.system('3drefit -space MNI ' + maskImage)\n os.system('3dcalc -a ' + maskImage \\\n + ' -b ' + inputFile \\\n + \" -expr 'step(a) * b'\" \\\n + ' -prefix ' + outputFile)",
"def filterClusters(inputFile, maskImage, outputFile):\n os.system('3drefit -space MNI ' + maskImage)\n os.system('3dcalc -a ' + maskImage \\\n + ' -b ' + inputFile \\\n + \" -expr 'step(a) * b'\" \\\n + ' -prefix ' + outputFile)",
"def create_masks_from_segmap(\n segmap, catalog, ref_image, n_jobs=1, skip_existing=True,\n masksky_name='mask-sky.fits', maskobj_name='mask-source-%05d.fits',\n idname='ID', raname='RA', decname='DEC', margin=0, mask_size=(20, 20),\n convolve_fwhm=0, psf_threshold=0.5, verbose=0):\n from joblib import delayed, Parallel\n\n logger = logging.getLogger(__name__)\n\n if isinstance(ref_image, str):\n ref_image = Image(ref_image)\n if isinstance(catalog, str):\n catalog = Catalog.read(catalog)\n if not isinstance(segmap, Segmap):\n segmap = Segmap(segmap)\n\n logger.info('Aligning segmap with reference image')\n segm = segmap.align_with_image(ref_image, truncate=True, margin=margin)\n\n dilateit, struct = _get_psf_convolution_params(convolve_fwhm, segm,\n psf_threshold)\n\n # create sky mask\n masksky = masksky_name() if callable(masksky_name) else masksky_name\n if exists(masksky) and skip_existing:\n logger.debug('sky mask exists, skipping')\n else:\n logger.debug('creating sky mask')\n segm.get_mask(0, inverse=True, dilate=dilateit, struct=struct,\n regrid_to=ref_image, outname=masksky)\n\n # extract source masks\n minsize = 0.\n to_compute = []\n stats = defaultdict(list)\n\n for row in catalog:\n id_ = int(row[idname]) # need int, not np.int64\n source_path = (maskobj_name(id_) if callable(maskobj_name)\n else maskobj_name % id_)\n if skip_existing and exists(source_path):\n stats['skipped'].append(id_)\n else:\n center = (row[decname], row[raname])\n stats['computed'].append(id_)\n to_compute.append(delayed(segm.get_source_mask)(\n id_, center, mask_size, minsize=minsize, struct=struct,\n dilate=dilateit, outname=source_path, regrid_to=ref_image))\n\n # FIXME: check which value to use for max_nbytes\n if to_compute:\n logger.info('computing masks for %d sources', len(to_compute))\n Parallel(n_jobs=n_jobs, verbose=verbose)(progressbar(to_compute))\n else:\n logger.info('nothing to compute')",
"def mask_images(self, folder_name, mask_image_name):\n\n photo_list = self.get_photo_list(folder_name)\n masked_folder_name = folder_name + '_background'\n\n try:\n print(\"Making dir \" + str(masked_folder_name) + \" for masking\")\n os.mkdir(masked_folder_name)\n except OSError:\n print(\"Folder exists, have you already done this masking??\")\n return\n\n full_mask_image = cv2.imread(mask_image_name, cv2.IMREAD_ANYDEPTH)\n\n for i, image_name in enumerate(photo_list):\n print(i)\n print (folder_name + image_name)\n img = cv2.imread(folder_name + '/' + image_name, cv2.IMREAD_ANYDEPTH)\n masked_image = img\n\n size = img.shape\n for row_pixel in range(0, size[0]):\n for column_pixel in range(0, size[1]):\n if full_mask_image[row_pixel, column_pixel] != 0:\n masked_image[row_pixel, column_pixel] = img[row_pixel, column_pixel]\n\n else:\n masked_image[row_pixel, column_pixel] = 0\n\n cv2.imwrite(masked_folder_name + '/' + image_name, masked_image.astype(np.uint16))",
"def load_mask(self, image_id):\n info = self.image_info[image_id]\n# logger.info(\"mask {}\".format(image_id))\n if info[\"mask\"] is None:\n craters = info['craters']\n count = len(craters)\n mask = np.zeros([info['height'], info['width'], count], dtype=np.uint8)\n for i, dims in enumerate(craters):\n mask[:, :, i:i+1] = self.draw_shape(mask[:, :, i:i+1].copy(),\n \"circle\", dims, 1)\n # Handle occlusions\n occlusion = np.logical_not(mask[:, :, -1]).astype(np.uint8)\n for i in range(count-2, -1, -1):\n mask[:, :, i] = mask[:, :, i] * occlusion\n occlusion = np.logical_and(occlusion, np.logical_not(mask[:, :, i]))\n # Map class names to class IDs.\n class_ids = np.array([self.class_names.index(s) for s in info[\"shapes\"]])\n info[\"mask\"] = mask.astype(np.bool)\n info[\"class_ids\"] = class_ids.astype(np.int32)\n else:\n mask, class_ids = info[\"mask\"], info[\"class_ids\"]\n return mask, class_ids",
"def cleanBadPix(redux_science, bad_pixel_map, method = 'median', replacement_box = 5, replace_constant = -99):\n #add negative pixels to the bad pixel map\n bad_pixel_map = np.logical_or(bad_pixel_map, redux_science <= 0)\n # im = np.copy(redux_science)\n # im[np.where(bad_pixel_map)[1]] = 0.\n if method == 'median':\n med_fil = median_filter(redux_science, size = replacement_box)\n\n cleaned = redux_science*~bad_pixel_map + med_fil*bad_pixel_map\n\n #elif method == 'interpolate':\n\n # print('so clean')\n\n return cleaned",
"def load_mask(self, image_id):\n # If not homeobject dataset, delegate to parent class.\n image_info = self.image_info[image_id]\n if image_info[\"source\"] != 'homeobject':\n print(\n \"Warn: \\'{}\\' label not found. Processing with parent load_mask.\".format(image_info[\"source\"]))\n return super(self.__class__, self).load_mask(image_id)\n\n # Convert polygons to a bitmap mask of shape\n class_ids = image_info['class_ids']\n info = self.image_info[image_id]\n mask = np.zeros([info[\"height\"], info[\"width\"], len(info[\"polygons\"])], dtype=np.uint8)\n for i, p in enumerate(info[\"polygons\"]):\n # Get indexes of pixels inside the polygon and set them to 1\n rr, cc = skimage.draw.polygon(p['all_points_y'], p['all_points_x'])\n\n # modify dirt mask if it resides outside of image boundary\n rr[rr > mask.shape[0] - 1] = mask.shape[0] - 1\n cc[cc > mask.shape[1] - 1] = mask.shape[1] - 1\n\n mask[rr, cc, i] = 1\n # Return mask, and array of class IDs of each instance. Since we have\n # one class ID only, we return an array of 1s\n class_ids = np.array(class_ids, dtype=np.int32)\n # return mask.astype(np.bool), np.ones([mask.shape[-1]], dtype=np.int32)\n return mask, class_ids",
"def mask_region(self, ypos, xpos, r):\r\n for j, i in product(np.arange(ypos - r, ypos + r + 1), np.arange(xpos - r, xpos + 1 + r)): # Create square\r\n if (j - ypos) ** 2 + (i - xpos) ** 2 <= r ** 2 and 0 <= j<= self.shapes[0] - 1 and 0<= i <=self.shapes[1] - 1:\r\n j = int(j)\r\n i = int(i)\r\n self.masked[j, i] = 0",
"def repairWorldMap(self,fileRefs,gridLines=True):\n if not fileRefs.fmap: return 0\n progress = self.progress\n progress.setMax((28*2)**2)\n progress(0.0,_(\"Drawing Cells\"))\n proCount = 0\n for gridx in range(-28,28,1):\n for gridy in range(28,-28,-1):\n id = '[%d,%d]' % (gridx,gridy)\n cell = fileRefs.cells_id.get(id,None)\n isMarked = cell and cell.flags & 32\n fileRefs.fmap.drawCell(self.lands.get(id),gridx,gridy,isMarked)\n proCount += 1\n progress(proCount)\n fileRefs.fmap.drawGrid(gridLines)\n return 1",
"def mask_incoherent(self):\n self.MaskPrefix = 'i' + self.MaskPrefix\n print('Masking pixel values where .msk value is less than {0}...'.format(threshold))\n for ig in self.Set:\n igram = self.load_ma(ig)\n mskFile = ig.Path[:-3] + 'msk'\n coherence = roipy.tools.load_half(ig, 2, mskFile)\n incoherent = ma.masked_less(coherence, self.Cothresh)\n igram[incoherent.mask] = ma.masked\n mskFile = self.MaskPrefix + 'Mask_' + ig.Name[:-4]\n np.save(os.path.join(self.ProcDir, mskFile), igram.mask)\n print(mskFile)\n\n print('Done')",
"def get_regions_mask(self, input):",
"def process_regions(mask, t1, ignore_vals=[0]):\n regions = np.unique(mask)\n regions = [i for i in regions if i not in ignore_vals]\n colors = map_label_colors(regions)\n images = [process_region(mask, t1, colors, region) for region in regions]\n montage = create_montage(images, direction='v')\n return(montage)",
"def load_mask(self, image_id):\n # If not a pedestrian dataset image, delegate to parent class.\n image_info = self.image_info[image_id]\n if image_info[\"source\"] != \"pedestrian\":\n return super(self.__class__, self).load_mask(image_id)\n\n # Convert polygons to a bitmap mask of shape\n # [height, width, instance_count]\n info = self.image_info[image_id]\n mask = np.zeros([info[\"height\"], info[\"width\"], len(info[\"polygons\"])],\n dtype=np.uint8)\n for i, p in enumerate(info[\"polygons\"]):\n # Get indexes of pixels inside the polygon and set them to 1\n rr, cc = skimage.draw.polygon(p['all_points_y'], p['all_points_x'])\n mask[rr, cc, i] = 1\n\n # Return mask, and array of class IDs of each instance. Since we have\n # one class ID only, we return an array of 1s\n return mask.astype(np.bool), np.ones([mask.shape[-1]], dtype=np.int32)",
"def createMaskDictionary(self):\n try:\n self.maskMap = dict(list(zip(self.inds,list(range(len(self.inds))))))\n self.maskSet = set(self.inds)\n except Exception as error:\n print(\"failed in createMaskDictionary\", error)",
"def load_mask(self, image_id):\n\n image_info = self.image_info[image_id]\n if image_info[\"source\"] != \"pcb\":\n return super(self.__class__, self).load_mask(image_id)\n\n # convert polygons to a bitmap mask of shape\n # [height, width, instance_count]\n info = self.image_info[image_id]\n \n mask = np.zeros([info[\"height\"], info[\"width\"], len(info[\"polygons\"])],\n dtype=np.uint8)\n \n for i, p in enumerate(info[\"polygons\"]):\n # get indexes of pixels inside the polygon and set them to 1\n rr, cc = skimage.draw.polygon(p['all_points_y'], p['all_points_x'])\n mask[rr, cc, i] = 1\n\n # return mask, and array of class IDs of each instance.\n # since we have one class ID only, we return an array of 1s\n return mask.astype(np.bool), info[\"class_ids\"]"
] | [
"0.53000647",
"0.52897805",
"0.5226478",
"0.50740224",
"0.5040365",
"0.49909344",
"0.49804494",
"0.49571514",
"0.49495628",
"0.49361882",
"0.4911906",
"0.49107993",
"0.4890557",
"0.48747185",
"0.48293045",
"0.48280445",
"0.48280445",
"0.4828019",
"0.48067454",
"0.47801915",
"0.4775538",
"0.47660273",
"0.4754396",
"0.47389513",
"0.47375077",
"0.47345158",
"0.47327444",
"0.47295296",
"0.4724811",
"0.471878"
] | 0.65371066 | 0 |
This function computes a modified Hogg et al. (2002) definition of the Kcorrection. The observed filter and zeropoint will be the IRAC 4.5 um values. | def cluster_k_correction(self):
# Load in the IRAC 4.5 um filter as the observed filter
irac_45 = SpectralElement.from_file('Data_Repository/filter_curves/Spitzer_IRAC/080924ch2trans_full.txt',
wave_unit=u.um)
# Store the official IRAC 4.5 um zero point flux for K-correction computations
irac_45_zp = 179.7 * u.Jy
# If the requested output zero-point is 'vega', pre-load the Vega reference spectrum
if isinstance(self._output_zero_pt, str) and self._output_zero_pt.lower() == 'vega':
self._output_zero_pt = SourceSpectrum.from_vega()
for cluster_id, cluster_info in self._catalog_dictionary.items():
# Retrieve the cluster redshift from the SPT catalog
catalog_idx = cluster_info['SPT_cat_idx']
cluster_z = self._spt_catalog['REDSHIFT'][catalog_idx]
# Compute the K-correction for the cluster's redshift, the given SED and output parameters
k_corr = k_correction(z=cluster_z, f_lambda=self._sed,
g_lambda_R=irac_45_zp, g_lambda_Q=self._output_zero_pt,
R=irac_45, Q=self._output_filter)
# Store the cluster redshift and K-correction in cluster_info for later use
cluster_info['redshift'] = cluster_z
cluster_info['k-correction'] = k_corr | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def kcorrect(self, filter_list_q=None, filter_list_r=None, band_shift=0.):\n if not filter_list_q: filter_list_q = self.filter_list\n if not filter_list_r: filter_list_r = self.filter_list\n\n if len(filter_list_q) != len(filter_list_r):\n raise ValueError('Numbers of filters must match.')\n\n if filter_list_r != self.filter_list:\n rm2 = reconstruct_maggie(self.coeffs, self.redshift,\n self.ptable[filter_list_r])\n else:\n rm2 = self.model_maggie\n\n if band_shift > 0.:\n bs = np.ones(self.redshift.shape, dtype=FTYPE) * band_shift\n rm1 = reconstruct_maggie(self.coeffs, bs,\n self.ptable[filter_list_q])\n rm1 = rm1 / (1. + bs)\n else:\n rm1 = reconstruct_maggie(self.coeffs, self.redshift0,\n self.ptable[filter_list_q])\n return 2.5 * np.log10(rm1 / rm2)",
"def calc_k(self):\n\t\n\tself.k = -np.array([self.sth*self.cphi, self.sth*self.sphi, self.cth])\n\n\treturn",
"def k_change_from_crop(origin_h, origin_w, dist_h, dist_w):\n K = np.array([[431.46,0,954.101,0],\n [ 0.,431.732 ,541.389, 0],\n [ 0 ,0, 1, 0],\n [ 0 ,0, 0, 1]])\n K[0,1] -= (origin_w - dist_w)/2 \n K[0,2] -= (origin_h - dist_h)/2\n\n print(\"K\", K)\n return K",
"def Wk2(z, zp, k, c_M=0, c_B=0):\n c = 299792458/1000 # km/s\n A = np.zeros((len(z), len(zp)))\n chiz = np.copy(A); np.transpose(chiz)[:] = chi(z)\n chifraction = (chiz - chi(zp))*chi(zp)/chiz\n A[:] = omega_matter(zp)*H(zp)/(1 + zp)**2*G_light(zp, c_M, c_B)\n W = 3/2*A*chifraction\n W /= c # Unit correction for Wk to be unitless\n return W",
"def EisensteinHu_nowiggle_Pk(self,\n z = 0.,\n k = np.logspace(-4., 2., 1001),\n sigma_8 = 0.83):\n\n om_m = self.omega_cdm+self.omega_b\n om_b = self.omega_b\n ns = self.ns\n h = self.h\n theta = self.T_cmb/2.7\n \n #if self.w0 != -1. or self.wa != 0.:\n # warnings.warn(\"nw_Pk is not able to reproduce non-static dark energy with w0 != -1. The dark enerdy parameters will be set to w0 = -1, wa = 0\")\n if self.Omega_K != 0.:\n #warnings.warn(\"EisensteinHu_Pk is not able to reproduce non-flat FRW metric! The Omega_K parameter will be transferred to Omega_lambda such that Omega_lambda -> (Omega_lambda + Omega_K)\")\n om_m -= self.Omega_K\n\n kEH = k*h\n s = 44.5*np.log(9.83/om_m)/np.sqrt(1+10*(om_b)**0.75)\n Gamma = om_m/h\n AG = 1 - 0.328*np.log(431*om_m)*om_b/om_m + 0.38*np.log(22.3*om_m)*(om_b/om_m)**2\n Gamma = Gamma*(AG+(1-AG)/(1+(0.43*kEH*s)**4))\n q = kEH * theta**2/Gamma/h\n L0 = np.log(2*np.e + 1.8*q)\n C0 = 14.2 + 731/(1 + 62.5*q)\n T0 = L0/(L0 + C0*q**2)\n PEH = (kEH*h)**ns*T0**2\n\n norm = sigma_8/self.compute_sigma_8(k = k, pk = PEH)\n Pk = np.expand_dims(PEH,0)*np.expand_dims(norm**2.*self.growth_factor_scale_independent(z)**2.,1)\n\n return k, Pk",
"def Wkappa(z, zp, c_M=0, c_B=0):\n c = 299792458/1000 # km/s\n A = np.zeros([len(z)] + list(np.shape(zp)))\n chiz = np.copy(A); np.transpose(chiz)[:] = chi(z)\n chifraction = (chiz - chi(zp))*chi(zp)/chiz\n A[:] = omega_matter(zp)*H(zp)/(1 + zp)**2*G_light(zp, c_M, c_B)\n W = 3/2*A*chifraction\n W /= c # Unit correction for Wk to be unitless\n return W",
"def KRC(self, ik, ipd, ipl, t):\n idx = ik - 1\n\n den1 = 1 - self.delta[idx] * self.coca.PK(ik, t)\n num1 = self.delta[idx] * self.thetak[idx]\n ins = num1 / den1\n\n for l in np.arange(0, self.L):\n pl = self.coca.PL(l, t)\n ins += ((self.thetal[l] * self.gamma[l][idx]) / (1 - pl))\n\n ans = ipd * np.exp(t * ipl) * ins\n\n return ans",
"def calc_magnitude(box,octant):\n # Read the Mi(z=2) magnitudes for the box.\n miz2 = FH.read_file(box)['Miz2'][:]\n # Read the index for each QSO in the octant, and get the Mi(z=2).\n data = FH.read_file(octant)\n zz = data['Z']\n dmod = data['DMOD']\n miz2 = miz2[data['INDX']]\n # Now convert to apparent i-band magnitude using the k-correction.\n # If a tabulated k-correction is available, use that, otherwise\n # default to a power-law continuum approximation.\n # See discussion in Ross++13, Appendix B and Section 4.\n kfile=os.getenv('MOCKINGDESI_BASE')+\"/data/qso-iband-k-correction.txt\"\n if os.path.exists(kfile):\n print(\"Using K-correction from \"+kfile)\n kcorr = np.loadtxt(kfile)\n kcorr = np.interp(zz,kcorr[:,1],kcorr[:,2])\n else:\n print(\"Using power-law K-correction\")\n alpha = -0.5\n kcorr = -2.5*(1+alpha)*np.log10( (1+zz)/(1+2.0) )\n gmi = np.poly1d([0.1502,-0.9886,2.147,-1.758,0.6397])\t# See notes.\n rmi = np.poly1d([-0.1482,1.636,-6.716,12.55,-10.39,3.017])\n magi = miz2 + dmod + kcorr\t# e.g. Ross++13, Eq. 5\n magg = magi + gmi(zz.clip(0.5,3.5))\n magr = magi + rmi(zz.clip(0.5,3.5))\n # and write the results\n data = {}\n data['GMAG'] = magg.astype('f4')\n data['RMAG'] = magr.astype('f4')\n FH.write_file(octant,data)\n #",
"def fR_correction(self, k, z, f_R0, nonlinear = True):#, sigma_8 = 0.8):\n # Substitute k_max above 10.\n if nonlinear: k_new = np.array([i if i<10. else 10. for i in k])\n else: k_new = k\n # k,z arrays\n Z,K = np.meshgrid(z,k_new,indexing='ij')\n a = 1./(1.+Z)\n\n # Change f(R) within bounds\n fR0min = 1e-7\n fR0max = 1e-4\n if(f_R0 < fR0min): f_R0 = fR0min\n if(f_R0 > fR0max): f_R0 = fR0max\n\n # Non-linear enhancement\n if nonlinear:\n # Low values of f(R) and relative table\n fR0_low = 5e-6\n param_low = [ 0.768779 , -0.405375 , 0.0075176, 0.0288574 , -0.0638224 , -0.401206 , \n 0.369507 , 0.109392 , -0.342089 , 0.226376 , -0.107105 , 0.0484649 ,\n -0.024377 , -0.051962 , -0.0351849, 0.147194 , 0.061761 , -0.131382 ,\n 0.00759035, -0.00101884 , 0.0118011, -0.0296267 , 0.025968 , 0.076885 ,\n 0.0312734 , 0.0293253 , -0.0141899, 0.109011 , 0.0818948 , -0.0568241 ,\n 0.120272 , 0.0249235 , -0.0298492, 0.0354401 , -0.262769 , 0.230278 ,\n -0.139116 , -0.132313 , 0.13132 , -0.0565551 , -0.0338864 , 0.0712653 ,\n 0.20246 , -0.116113 , 0.102453 , 0.0632254 , 0.0694305 , 0.00296431,\n 0.0522931 , 0.0780708 , -0.0977045]\n # Medium values of f(R) and relative table\n fR0_mid = 1e-5\n param_mid = [ 0.936496 , -0.545832 , 0.634804 , -0.0290649 , -0.0954373 , -0.342491 ,\n 0.491066 , 0.297816 , -0.287142 , -0.0399919 , 0.3037 , 0.360959 ,\n 0.000615209, -0.00941931, -0.0181341, 0.376297 , 0.486358 , 0.0349385 ,\n 0.240066 , 0.188202 , 0.665834 , 0.0122249 , -0.0343399 , -0.0520361 ,\n 0.261006 , 0.525633 , 0.266255 , 0.393546 , 0.29088 , -0.411491 ,\n 0.776609 , 0.470777 , -0.681923 , -0.079589 , -0.282388 , 0.53954 ,\n -0.0930797 , 0.0783781 , 0.194957 , 0.270378 , 0.370288 , 0.194857 ,\n 0.318637 , 0.0457011 , 0.139237 , 0.033403 , 0.0762982 , -0.0001047 , \n -0.00275824 , 0.0461644 , 0.189897 ]\n\n # High values of f(R) and relative table\n fR0_high = 5e-5\n param_high = [ 0.572477 , 0.254686 , 1.21637 , 0.00046274, -0.0901242 , -0.355849 ,\n 2.31154 , 2.29822 , -0.483186 , 0.4988 , 0.36089 , 0.0703424 ,\n 0.0257389 , 0.0168936 , -0.030697 , -0.206992 , 0.266084 , 0.603357 ,\n 0.574264 , -0.30799 , 0.831644 , -0.0093644 , 0.00221153, 0.0076829 ,\n -0.650381 , 0.0179215 , 0.927038 , 0.77903 , 0.919643 , -0.936328 ,\n 1.26756 , 1.44477 , -1.44129 , 0.219594 , 0.353883 , 1.02533 ,\n -0.251705 , 0.124875 , 0.345995 , -0.146438 , 0.0200251 , 0.0892343 ,\n 0.284755 , -0.158286 , 0.541178 , -0.0471913 , 0.139772 , -0.134888 ,\n 0.0959162 , 0.368186 , -0.157828 ]\n\n # r \\propto log(f_R0)\n r_low = np.log(f_R0/fR0_low)\n r_mid = np.log(f_R0/fR0_mid)\n r_high = np.log(f_R0/fR0_high)\n\n # Find ratios\n ratio_low = self.ratio_by_param(r_low , a, K, param_low ) # 1e-7 < fR0 < 5e-6\n ratio_mid = self.ratio_by_param(r_mid , a, K, param_mid ) # 5e-6 < fR0 < 5e-5\n ratio_high = self.ratio_by_param(r_high, a, K, param_high) # 1e-5 < fR0 < 1e-4\n\n # Return\n if f_R0>=5e-5: enhancement = ratio_high\n elif f_R0<=5e-6: enhancement = ratio_low\n elif f_R0>=1e-5: enhancement = ratio_mid+(ratio_high-ratio_mid)*(f_R0-1e-5)/(5e-5-1e-5)\n else: enhancement = ratio_low+(ratio_mid -ratio_low)*(f_R0-5e-6)/(1e-5-5e-6)\n\n # Change due to Omega_m\n #dom_om = (self.Omega_m-0.3)/0.3\n #aaa = 0.015\n #bbb = 1.4\n #kstar = 0.16*(1e-5/f_R0)**0.5\n #enhancement *= 1-aaa*dom_om*np.tanh((K/kstar)**bbb)\n\n # Change due to sigma_8\n #ds8_s8 = (sigma_8-0.8)/0.8\n #kst = 1.2\n #enhancement *= 1+ds8_s8*K/(1+(K/kstar)**2)\n\n # Linear enhancement\n else:\n r = np.log(f_R0/1e-5)\n K *= np.sqrt(f_R0/1e-5)\n b_Z = 3.10000+ 2.34466*(a-1.)- 1.86362*(a-1.)**2.\n c_Z = 34.49510+28.86370*(a-1.)-13.13020*(a-1.)**2.\n d_Z = 0.14654- 0.01000*(a-1.)- 0.14944*(a-1.)**2.\n e_Z = 1.62807+ 0.71291*(a-1.)- 1.41003*(a-1.)**2.\n enhancement = 1. + (b_Z*K)**2./(1.+c_Z*K**2.) + d_Z*np.abs(np.log(K)*K/(K-1.))*np.arctan(e_Z*K)\n\n # There cannot be suppression\n enhancement[np.where(enhancement<1.0)] = 1.0\n\n return enhancement",
"def Wk(z, zp, k, c_M=0, c_B=0):\n c = 299792458/1000 # km/s\n A = np.zeros((len(k), len(z), len(zp)))\n chiz = np.copy(A); np.transpose(chiz, (0,2,1))[:] = chi(z)\n chifraction = (chiz - chi(zp))*chi(zp)/chiz\n A[:] = omega_matter(zp)*H(zp)/(1 + zp)**2*G_light(zp, c_M, c_B)\n W2 = 3/2*A*chifraction\n Wtransp = np.transpose(W2)#/k**2 # If k is included, multiply by h\n W = np.transpose(Wtransp)\n W /= c # Unit correction for Wk to be unitless\n return W",
"def test_kw_correction(self):\r\n self.assertEqual(_corr_kw(10), 990)\r\n self.assertEqual(_corr_kw(5), 120)\r\n self.assertFloatEqual(_corr_kw(5.4), 152.064)",
"def correction(self):\r\n \r\n # empirical coefficients:\r\n k3, k2, k1, k0 = 0.0892, 0.0544, 0.2511, -0.0017\r\n \r\n # thrust as a function of the azimuth angle and the loads:\r\n thrust = self.qn*np.sin(Turbine.t) + self.qt*np.cos(Turbine.t)\r\n \r\n # interpolator function for the thrust:\r\n function = interp1d(Turbine.t, thrust, kind='cubic')\r\n \r\n # vectorize the function so that it takes an array of angles:\r\n __function__ = np.vectorize(function)\r\n \r\n # thrust coefficient integrating according to phi:\r\n self.cth = simps(__function__(Turbine.p), Turbine.p)\r\n \r\n # induction factor:\r\n self.a = k3*self.cth**3 + k2*self.cth**2 + k1*self.cth + k0\r\n \r\n # correction factor:\r\n if self.a <= 0.15:\r\n self.ka = 1.0/(1.0 - self.a)\r\n else:\r\n self.ka = (1./(1 - self.a))*(0.65 + 0.35*exp(-4.5*(self.a - 0.15)))",
"def EisensteinHu_Pk(self,\n z = 0.,\n k = np.logspace(-4., 2., 1001),\n sigma_8 = 0.83):\n\n om_m = self.Omega_m\n om_b = self.Omega_b\n n_tld = self.ns - 1.\n h = self.h\n theta = self.T_cmb/2.7\n \n if np.sum(self.M_nu) != 0.:\n warnings.warn(\"EisensteinHu_Pk is not able to reproduce massive neutrinos as it uses the Eisenstein & Hu approximation (1998) for the linear power spectrum. The Omega_nu parameter will be transferred to Omega_lambda such that Omega_lambda -> (Omega_lambda + Omega_nu)\")\n om_m -= np.sum(self.Omega_nu)\n if self.w0 != -1. or self.wa != 0.:\n warnings.warn(\"nw_Pk is not able to reproduce non-static dark energy with w0 != -1. The dark enerdy parameters will be set to w0 = -1, wa = 0\")\n if self.Omega_K != 0.:\n warnings.warn(\"EisensteinHu_Pk is not able to reproduce non-flat FRW metric! The Omega_K parameter will be transferred to Omega_lambda such that Omega_lambda -> (Omega_lambda + Omega_K)\")\n om_m -= self.Omega_K\n\n rk = k*h\n e = np.exp(1.)\n\n # Recombination and equality\n thet = 2.728/2.7\n b1 = 0.313*(om_m*h*h)**(-0.419)*(1+0.607*(om_m*h*h)**0.674)\n b2 = 0.238*(om_m*h*h)**0.223\n zd = 1291.*(1+b1*(om_b*h*h)**b2)*(om_m*h*h)**0.251/(1.+0.659*(om_m*h*h)**0.828)\n ze = 2.50e4*om_m*h*h/thet**4.\n rd = 31500.*om_b*h*h/thet**4./zd\n re = 31500.*om_b*h*h/thet**4./ze\n rke = 7.46e-2*om_m*h*h/thet**2.\n s = (2./3./rke)*np.sqrt(6./re)*np.log((np.sqrt(1.+rd)+np.sqrt(rd+re))/(1+np.sqrt(re)))\n rks = 1.6*( (om_b*h*h)**0.52 ) * ( (om_m*h*h)**0.73 ) * (1.+(10.4*om_m*h*h)**(-0.95))\n q = rk/13.41/rke\n y = (1.+ze)/(1.+zd)\n g = y*(-6.*np.sqrt(1+y)+(2.+3.*y)*np.log((np.sqrt(1.+y)+1.)/(np.sqrt(1.+y)-1.)))\n\n # Master function\n ab = g*2.07*rke*s/(1.+rd)**(0.75)\n a1 = (46.9*om_m*h*h)**0.670*(1+(32.1*om_m*h*h)**(-0.532))\n a2 = (12.0*om_m*h*h)**0.424*(1+(45.0*om_m*h*h)**(-0.582))\n ac = (a1**(-om_b/om_m)) * (a2**(-(om_b/om_m)**3.))\n B1 = 0.944/(1+(458.*om_m*h*h)**(-0.708))\n B2 = (0.395*om_m*h*h)**(-0.0266)\n bc = 1./(1.+B1*((1.-om_b/om_m)**B2-1.))\n\n # CDM transfer function\n f = 1./(1.+(rk*s/5.4)**4.)\n c1 = 14.2 + 386./(1.+69.9*q**1.08)\n c2 = 14.2/ac + 386./(1.+69.9*q**1.08)\n tc = f*np.log(e+1.8*bc*q)/(np.log(e+1.8*bc*q)+c1*q*q) +(1.-f)*np.log(e+1.8*bc*q)/(np.log(e+1.8*bc*q)+c2*q*q)\n \n # Baryon transfer function\n bb = 0.5+(om_b/om_m) + (3.-2.*om_b/om_m)*np.sqrt((17.2*om_m*h*h)**2.+1.)\n bn = 8.41*(om_m*h*h)**0.435\n ss = s/(1.+(bn/rk/s)**3.)**(1./3.)\n tb = np.log(e+1.8*q)/(np.log(e+1.8*q)+c1*q*q)/(1+(rk*s/5.2)**2.)\n fac = np.exp(-(rk/rks)**1.4)\n tb = (tb+ab*fac/(1.+(bb/rk/s)**3.))*np.sin(rk*ss)/rk/ss\n\n # Total transfer function\n T = (om_b/om_m)*tb+(1-om_b/om_m)*tc\n\n # Power spectrum and normalization\n #delta_H = 1.94e-5*om_m**(-0.785-0.05*np.log(om_m))*np.exp(-0.95*n_tld-0.169*n_tld**2.)\n #power_tmp = delta_H**2.*(const.c*rk/self.H0)**(3.+self.ns)/rk**3.*(2.*np.pi**2.)*T**2.\n power_tmp = k**self.ns*(2.*np.pi**2.)*T**2.\n norm = sigma_8/self.compute_sigma_8(k = k, pk = power_tmp)\n power_tmp *= norm**(2.)\n \n # Different redshifts\n nz = len(np.atleast_1d(z))\n if nz == 1:\n z = np.array([z])\n nk = len(np.atleast_1d(k))\n Pk = np.zeros((nz,nk))\n for i in range(nz):\n Pk[i] = power_tmp*(self.growth_factor_scale_independent(z[i])/self.growth_factor_scale_independent(0.))**2.\n\n return k, Pk",
"def ekf(z_k_observation_vector, state_estimate_k_minus_1, control_vector_k_minus_1, P_k_minus_1, dk):\n ######################### Predict #############################\n # Predict the state estimate at time k based on the state\n # estimate at time k-1 and the control input applied at time k-1.\n state_estimate_k = A_k_minus_1 @ (state_estimate_k_minus_1) + (getB(state_estimate_k_minus_1[2],dk)) @ (control_vector_k_minus_1) + (process_noise_v_k_minus_1)\n print(f'State Estimate Before EKF={state_estimate_k}\\r\\n')\n \n # Predict the state covariance estimate based on the previous\n # covariance and some noise\n P_k = A_k_minus_1 @ P_k_minus_1 @ A_k_minus_1.T + (Q_k)\n \n ################### Update (Correct) ##########################\n # Calculate the difference between the actual sensor measurements\n # at time k minus what the measurement model predicted \n # the sensor measurements would be for the current timestep k.\n measurement_residual_y_k = z_k_observation_vector - (\n (H_k @ state_estimate_k) + (\n sensor_noise_w_k))\n \n print(f'Observation={z_k_observation_vector}\\r\\n')\n \n # Calculate the measurement residual covariance\n S_k = H_k @ P_k @ H_k.T + R_k\n \n # Calculate the near-optimal Kalman gain\n # We use pseudoinverse since some of the matrices might be\n # non-square or singular.\n K_k = P_k @ H_k.T @ np.linalg.pinv(S_k)\n \n # Calculate an updated state estimate for time k\n state_estimate_k = state_estimate_k + (K_k @ measurement_residual_y_k)\n \n # Update the state covariance estimate for time k\n P_k = P_k - (K_k @ H_k @ P_k)\n \n # Print the best (near-optimal) estimate of the current state of the robot\n print(f'State Estimate After EKF={state_estimate_k}\\r\\n')\n \n # Return the updated state and covariance estimates\n return state_estimate_k, P_k",
"def getK1(inp):\n\td0 = getD0(inp)\n\treturn 0.32745 + 1/(2 * d0) - 8/(81 * d0)",
"def get_func(k_center,enk,I,gamma,gamma_k):\n\n def lorentzian_k(k):\n return 1./np.pi * gamma_k / ( (k-k_center)**2 + gamma_k**2)\n\n def lorentzian(k,omega):\n return I * gamma / ( (omega-enk)**2 + gamma**2) * lorentzian_k(k)\n\n return lorentzian",
"def get_correction(d, a, hfov, img_x):\n\n width = 2 * d*math.tan((hfov/2)*math.pi/180) # in meters\n one_meter = img_x / width\n return int(a*one_meter)",
"def calculate_first_order_correction(self,cutoff_matrix_element,L0,**kwargs):\r\n n = kwargs['ket_index']\r\n m = kwargs['bra_index']\r\n if n >= m: return 0.0\r\n evecs = self.evecs\r\n evals = self.evals\r\n # ignore drive terms whose matrix elements are beneath a specificied cutoff for speed-up. \r\n v_nm = (evecs[n].dag()*(self.v*evecs[m]))[0][0][0]\r\n if abs(v_nm) <= cutoff_matrix_element: return 0.0\r\n \r\n k = self.integer_list\r\n rho_s_vectorform = np.reshape(self.density_matrix,(self.dim**2,1),order='F')\r\n\r\n V_nm = (evecs[n]*evecs[m].dag()*(evecs[n].dag()*(self.v*evecs[m])))\r\n L_nm = qt.liouvillian(V_nm)\r\n #b = np.dot(L_nm.full(),rho_0)\r\n b = (L_nm*rho_s_vectorform).data\r\n omega_of_k = (k[n] - k[m] + 1)*self.omega\r\n \r\n A = 1j*omega_of_k * qt.identity(self.dim**2).data - L0.data\r\n \r\n #A = A.full()\r\n #del_rho = la.lstsq(A,b,rcond = 1e-6)[0]\r\n \r\n if omega_of_k == 0:\r\n del_rho = la.lsmr(A,b)[0]\r\n else:\r\n del_rho = spsolve(A,b)\r\n \r\n return nla.norm(del_rho)",
"def wPk(c,r,w):\n r = M.asarray(r)\n return N.trapz(c.k**3*c.pk*w(M.outer(r,c.k)),\n M.log(c.k))/2.0/M.pi**2",
"def disp_surf_calc(kc_x_max, kc_z_max, m_i, wp_e):\n\n # Make vectors of the wave numbers\n kc_z = np.linspace(1e-6, kc_z_max, 35)\n kc_x = np.linspace(1e-6, kc_x_max, 35)\n\n # Turn those vectors into matrices\n kc_x_mat, kc_z_mat = np.meshgrid(kc_x, kc_z)\n\n # Find some of the numbers that appear later in the calculations\n kc_ = np.sqrt(kc_x_mat ** 2 + kc_z_mat ** 2) # Absolute value of k\n theta_ = np.arctan2(kc_x_mat, kc_z_mat) # The angle between k and B\n wc_i = 1 / m_i # The ion gyro frequency\n wp_i = wp_e / np.sqrt(m_i) # The ion plasma frequency\n wp_ = np.sqrt(wp_e ** 2 + wp_i ** 2) # The total plasma frequency\n\n # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n # For every k_perp and k_par, turn the dispersion relation into a\n # polynomial equation and solve it.\n # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n # The polynomial coefficients are calculated\n pol_koeff_8 = -2 * kc_ ** 2\n pol_koeff_8 -= (1 + wc_i ** 2 + 3 * wp_ ** 2) * np.ones(kc_.shape)\n pol_koeff_6 = (2 * kc_ ** 2 + wp_ ** 2) * (1 + wc_i ** 2 + 2 * wp_ ** 2)\n pol_koeff_6 += kc_ ** 4 + (wp_ ** 2 + wc_i) ** 2\n pol_koeff_4 = -kc_ ** 4 * (1 + wc_i ** 2 + wp_ ** 2)\n pol_koeff_4 -= 2 * kc_ ** 2 * (wp_ ** 2 + wc_i) ** 2\n pol_koeff_4 -= (kc_ * wp_) ** 2 * (1 + wc_i ** 2 - wc_i) * (\n 1 + np.cos(theta_) ** 2)\n pol_koeff_4 -= wp_ ** 2 * (wp_ ** 2 + wc_i) ** 2\n pol_koeff_2 = kc_ ** 4 * (wp_ ** 2 * (1 + wc_i ** 2 - wc_i) * np.cos(\n theta_) ** 2 + wc_i * (wp_ ** 2 + wc_i))\n pol_koeff_2 += kc_ ** 2 * wp_ ** 2 * wc_i * (wp_ ** 2 + wc_i) * (\n 1 + np.cos(theta_) ** 2)\n pol_koeff_0 = -kc_ ** 4 * wc_i ** 2 * wp_ ** 2 * np.cos(theta_) ** 2\n\n w_final = np.zeros((10, len(kc_z), len(kc_x)))\n\n # For each k, solve the equation\n for k_z, k_x in itertools.product(range(len(kc_z)), range(len(kc_x))):\n disp_polynomial = [1, 0, pol_koeff_8[k_z, k_x], 0,\n pol_koeff_6[k_z, k_x], 0, pol_koeff_4[k_z, k_x],\n 0, pol_koeff_2[k_z, k_x], 0, pol_koeff_0[k_z, k_x]]\n # theoretically should be real (A. Tjulin)\n w_temp = np.real(np.roots(disp_polynomial))\n # We need to sort the answers to get nice surfaces.\n w_final[:, k_z, k_x] = np.sort(w_temp)\n\n n2_ = kc_ ** 2 / w_final ** 2\n v_ph_c = np.sqrt(1. / n2_)\n va_c = 1 / (wp_e * np.sqrt(m_i))\n v_ph_va = v_ph_c / va_c\n\n diel_tensor = _calc_diel(kc_, w_final, theta_, wp_e, wp_i, wc_i)\n\n e_x, e_y, e_z, e_per, e_tot, e_pol = _calc_e(diel_tensor)\n e_par = (kc_x_mat * e_x + kc_z_mat * e_z) / kc_\n\n b_x, b_y, b_z, b_par, b_per, b_pol, b_tot = _calc_b(kc_x_mat, kc_z_mat,\n w_final, e_x, e_y, e_z)\n\n dk_x, dk_z = [kc_x_mat[1], kc_z_mat[1]]\n dw_x, dw_z = [np.zeros(w_final.shape) for _ in range(2)]\n dw_x[:, :, 1:] = np.diff(w_final, axis=2)\n dw_z[:, 1:, :] = np.diff(w_final, axis=1)\n v_x, v_z = [dw_ / dk for dw_, dk in zip([dw_x, dw_z], [dk_x, dk_z])]\n\n s_par, s_tot = _calc_s(e_x, e_y, e_z, b_x, b_y, b_z)\n\n # Compute ion and electron velocities\n v_ex, v_ey, v_ez, v_ix, v_iy, v_iz = _calc_vei(m_i, wc_i, w_final,\n e_x, e_y, e_z)\n\n # Ratio of parallel and perpendicular to B speed\n vepar_perp = v_ez * np.conj(v_ez)\n vepar_perp /= (v_ex * np.conj(v_ex) + v_ey * np.conj(v_ey))\n vipar_perp = v_iz * np.conj(v_iz)\n vipar_perp /= (v_ix * np.conj(v_ix) + v_iy * np.conj(v_iy))\n\n # Total particle speeds\n v_e2 = v_ex * np.conj(v_ex) + v_ey * np.conj(v_ey) + v_ez * np.conj(v_ez)\n v_i2 = v_ix * np.conj(v_ix) + v_iy * np.conj(v_iy) + v_iz * np.conj(v_iz)\n\n # Ion and electron energies\n m_e = -1\n en_e = 0.5 * m_e * v_e2\n en_i = 0.5 * m_i * v_i2\n\n # Ratio of particle and field energy densities\n ratio_part_field = _calc_part2fields(wp_e, en_e, en_i, e_tot, b_tot)\n\n # Continuity equation\n dn_e_n, dn_i_n, dne_dni = _calc_continuity(kc_x_mat, kc_z_mat, w_final,\n v_ex, v_ez, v_ix, v_iz)\n\n dn_e_n_db_b = dn_e_n / b_tot\n dn_i_n_db_b = dn_i_n / b_tot\n\n dn_e_n_dbpar_b = dn_e_n / b_par\n dn_i_n_dbpar_b = dn_i_n / b_par\n\n dn_e = dn_e_n * wp_e ** 2\n k_dot_e = e_x * kc_x_mat + e_z * kc_z_mat\n k_dot_e = np.sqrt(k_dot_e * np.conj(k_dot_e))\n\n # Build output dict\n extra_param = {\"Degree of electromagnetism\": np.log10(b_tot / e_tot),\n \"Degree of longitudinality\": np.abs(e_par) / e_tot,\n \"Degree of parallelity E\": e_z / e_tot,\n \"Degree of parallelity B\": np.sqrt(\n b_z * np.conj(b_z)) / b_tot,\n \"Ellipticity E\": e_pol, \"Ellipticity B\": b_pol,\n \"E_part/E_field\": np.log10(ratio_part_field),\n \"v_g\": np.sqrt(v_x ** 2 + v_z ** 2),\n \"v_ph/v_a\": np.log10(v_ph_va),\n \"E_e/E_i\": np.log10(en_e / en_i),\n \"v_e/v_i\": np.log10(np.sqrt(v_e2 / v_i2)),\n \"v_epara/v_eperp\": np.log10(vepar_perp),\n \"v_ipara/v_iperp\": np.log10(vipar_perp),\n \"dn_e/dn_i\": np.log10(dne_dni),\n \"(dn_e/n)/ (dB/B)\": np.log10(dn_e_n_db_b),\n \"(dn_i/n)/(dB/B)\": np.log10(dn_i_n_db_b),\n \"(dn_i/n)/(dBpar/B)\": np.log10(dn_i_n_dbpar_b),\n \"(dn_e/n)/(dB/B)\": np.log10(dn_e / k_dot_e),\n \"(dn_e/n)/(dBpar /B)\": np.log10(dn_e_n_dbpar_b),\n \" Spar/Stot\": s_par / s_tot}\n\n for k, v in zip(extra_param.keys(), extra_param.values()):\n extra_param[k] = np.transpose(np.real(v), [0, 2, 1])\n\n kx_ = np.transpose(kc_x_mat)\n kz_ = np.transpose(kc_z_mat)\n wf_ = np.transpose(w_final, [0, 2, 1])\n\n return kx_, kz_, wf_, extra_param",
"def rb_nfw(m200,c,z):\n\n #Setting up cosmology\n rho0=1.4876862e+11;\n omegam=0.238000;\n msun=1.98892e+33;\n delta_vir=200.;\n G=6.6730003e-08;\n kmpsToCmps = 1.0*10.**(5.);\n Rvir=200.;\n kpc2cm=3.086*10.**(21);\n \n deltac = (delta_vir/3.)*( (c**3.)/( np.log(1.+c) - (c / (1.+c))));\n rho_crit =rho0*omegam*(1.+z)**3.;\n r200 =(m200/delta_vir / rho_crit / (4.*np.pi/3.) )**0.33333 * 1000. ;\n v200 = ((6.67e-8 * m200 * msun / (r200* 3.086*10.**(21.)) )**0.5)/1e5 ;\n \n r =np.linspace(1.,3.*r200,500); # kpc\n rs = r200 / c; \n ss=(((r/rs)*(1.+(r/rs))**2.)*1000.**3);\n rho = (rho_crit * deltac)/(ss); \n M_r = 4.*np.pi* integrate.cumtrapz((r**2)*rho, r,initial=0.)\n \n x = r/r200 ;\n tab=1./x*(np.log(1.+c*x)-c*x/(1.+c*x))/(np.log(1.+c)-c/(1.+c));\n vcirc = v200*(tab)**0.5 ;\n maxvcirc = np.max(vcirc) ;\n q=np.where((vcirc == np.max(vcirc)));\n maxvcircr = r[q];\n \n \n # Now compute V_Esc as per nfw.pro Binney & Tremaine equation 2.31\n Phi_new = r * 0.0;\n vesc = r * 0.0 ;\n for ir in range(2,len(r)-4):\n term1 = (np.trapz(rho[0:ir]*(r[0:ir]**2.),x=r[0:ir])/(r[ir]))* msun; \n term2 = np.trapz(rho[ir:len(r)]*r[ir:len(r)],x=r[ir:len(r)])*msun; \n Phi_new[ir] = -4. *np.pi*6.67e-8*(term1 + term2)/3.086e21 ;\n vesc[ir] = ((2. * np.abs(Phi_new[ir]))**0.5) / 1e5 ; # See Binney & Tremaine (2-22) \n \n\n # Chage Units to do velocity dispersion calculations\n rcm=r*kpc2cm;\n\n #M_r in gram\n M_r_gram=M_r*msun;\n\n Phi=G*integrate.cumtrapz((M_r_gram/rcm**(2)),rcm,initial=0);\n \n Phi=Phi*(1./((1e5)**2.));#%km^2/s^2\n Phi_out=np.max(Phi);\n\n k=0.41;\n a=0.29;\n\n sig = np.sqrt(a *(( Phi/Phi_out)**(k))*(Phi_out -Phi));\n \n nfw={}\n qqqt=np.where((vesc==0.))\n vesc[qqqt]=1e-99\n\n nfw[\"m200\"]=m200;\n nfw[\"c\"]=c;\n nfw[\"r200\"]=r200;\n nfw[\"v200\"]=v200;\n nfw[\"maxvcirc\"]=maxvcirc;\n nfw[\"maxvcircr\"]=maxvcircr;\n nfw[\"r\"]=r;\n nfw[\"rho\"]=rho;\n nfw[\"vcirc\"]=vcirc;\n nfw[\"M_r\"]=M_r;\n nfw[\"sig_v\"]=sig;\n nfw[\"vesc\"]=vesc;\n \n return nfw",
"def derive_RiekeLebofsky(wavelength):\n filters = ['U', 'B', 'V', 'R', 'I', 'J', 'H', 'K', 'L', 'M', \n '[8.0]', '[8.5]', '[9.0]', '[9.5]', '[10.0]', '[10.5]', \n '[11.0]', '[11.5]', '[12.0]', '[12.5]', '[13.0]']\n #wave = np.array([0.365, 0.445, 0.551, 0.658, 0.806, 1.25, 1.635, 2.2, \n # 3.77, 4.68, 4.75, 8.0, 8.5, 9.0, 9.5, 10.0, 10.5, 11.0,\n # 11.5, 12.0, 12.5, 13.0])\n \n # Wavelengths from Nishiyama+09 plot of RL+85 law...slightly different than standard, \n # drop N filter\n wave = np.array([0.365, 0.445, 0.551, 0.658, 0.806, 1.17, 1.57, 2.12, \n 3.40, 4.75, 8.0, 8.5, 9.0, 9.5, 10.0, 10.5, 11.0,\n 11.5, 12.0, 12.5, 13.0])\n A_Av = np.array([1.531, 1.324, 1.00, 0.748, 0.482, 0.282, 0.175, 0.112,\n 0.058, 0.023, 0.02, 0.043, 0.074, 0.087, 0.083,\n 0.074, 0.060, 0.047, 0.037, 0.030, 0.027])\n # Want to change this from A/Av to A/AK\n k_ind = np.where(np.array(filters) == 'K')\n Ak_Av = A_Av[k_ind]\n Av_Ak = 1.0 / Ak_Av\n\n A_Ak = A_Av * Av_Ak\n \n # Interpolate over the curve\n spline_interp = interpolate.splrep(wave, A_Ak, k=3, s=0)\n A_Ak_at_wave = interpolate.splev(wavelength, spline_interp)\n\n return A_Ak_at_wave",
"def k_h(self):\n # Convert `self.gamma` to a regular length scale.\n gamma_scale = B.sqrt(1 / (2 * self.gamma))\n k_h = EQ().stretch(gamma_scale) # Kernel of filter before window\n k_h *= lambda t: B.exp(-self.alpha * t**2) # Window\n if self.causal:\n k_h *= lambda t: B.cast(self.dtype, t >= 0) # Causality constraint\n return k_h",
"def extinction_constant(Z, hkl, th0=0.):\n return rem_angstrom * hc * abs(sf) / ( math.pi * volume * cos(tB) )",
"def calc_kwta_inhibition(self) -> None:\n top_m_units = self.units.top_k_net_indices(self.spec.k + 1)\n g_i_thr_m = self.units.g_i_thr(top_m_units[-1])\n g_i_thr_k = self.units.g_i_thr(top_m_units[-2])\n self.gc_i = g_i_thr_m + 0.5 * (g_i_thr_k - g_i_thr_m)",
"def calc_Hcp_ij(self):\n\t\n\thp0_delayed = self.hp_wavelet.get_Psi(self.xi[0] + self.Orbit.L/l.Clight)\n\thp0 = self.hp_wavelet.get_Psi(self.xi[0])\n\thc0_delayed = self.hc_wavelet.get_Psi(self.xi[0] + self.Orbit.L/l.Clight)\n\thc0 = self.hc_wavelet.get_Psi(self.xi[0])\n\t\n\thp1_delayed = self.hp_wavelet.get_Psi(self.xi[1] + self.Orbit.L/l.Clight)\n\thp1 = self.hp_wavelet.get_Psi(self.xi[1])\n\thc1_delayed = self.hc_wavelet.get_Psi(self.xi[1] + self.Orbit.L/l.Clight)\n\thc1 = self.hc_wavelet.get_Psi(self.xi[1])\n\t\n\thp2_delayed = self.hp_wavelet.get_Psi(self.xi[2] + self.Orbit.L/l.Clight)\n\thp2 = self.hp_wavelet.get_Psi(self.xi[2])\n\thc2_delayed = self.hc_wavelet.get_Psi(self.xi[2] + self.Orbit.L/l.Clight)\n\thc2 = self.hc_wavelet.get_Psi(self.xi[2])\n\t\n\tself.Hpij[0,1] = hp1_delayed - hp0\n\tself.Hpij[1,0] = hp0_delayed - hp1\n\n\tself.Hpij[0,2] = hp2_delayed - hp0\n\tself.Hpij[2,0] = hp0_delayed - hp2\n\n\tself.Hpij[1,2] = hp2_delayed - hp1\n\tself.Hpij[2,1] = hp1_delayed - hp2\n\t\n\t# cross-polarization\n\tself.Hcij[0,1] = hc1_delayed - hc0\n\tself.Hcij[1,0] = hc0_delayed - hc1\n\n\tself.Hcij[0,2] = hc2_delayed - hc0\n\tself.Hcij[2,0] = hc0_delayed - hc2\n\n\tself.Hcij[1,2] = hc2_delayed - hc1\n\tself.Hcij[2,1] = hc1_delayed - hc2\n\t\n\treturn",
"def func_omega_c_318(Ic, d_23):\n return (d_23/hbar)*np.sqrt((2*Ic)/(c*epsilon_0))",
"def computeWeights(xcorr_km,ycorr_km,Tmask,kmpix,dx):\n\n #create grid z where each point represents radius from center of Titan\n lim = kmpix * len(Tmask[0])/2.\n newxpix = int(kmpix * len(Tmask[0]) / dx)\n newypix = int(kmpix * len(Tmask[1]) / dx)\n \n # Adjust dx to match the new (integer) number of pixels\n dx = kmpix * len(Tmask[0]) / newxpix\n \n # new pixel area (sr)\n Sr_pix = np.arctan(dx/Titan_dist)**2\n \n #Need to apply Gaussian taper to the extraction region (not convolution!), to simulate effect of the telescope beam\n TmaskT = taperMask(Tmask)\n \n #Resample the extraction mask to the new grid\n g = zoom(TmaskT, (newxpix/len(Tmask[0]), newypix/len(Tmask[1])), order=1)\n\n x, y = np.indices(g.shape)\n x = dx * (x-(x.max()-x.min())/2.0)\n y = dx * (y-(y.max()-y.min())/2.0)\n z = np.hypot(x, y)\n \n midpoints = 0.5 * (radii[1:] + radii[:-1])\n \n #Change values of z to the angles corresponding to the mid-point radii\n angles = [np.degrees(np.arcsin(float(r)/float(top_atm))) for r in midpoints] #top of atmosphere emission angle\n for i in range(len(angles)):\n z[np.logical_and(z >= radii[i], z < radii[i+1])] = angles[i]\n \n z[z >= radii[-1]] = float('NaN')\n \n #compute normalized weights, taking account of any blank sky (outside top_atm) inside the aperture\n wts = {}\n for val in angles:\n garr = g[np.where(z == val)]\n wts[val] = sum(garr)\n gnanarr = g[np.where(np.isnan(z))] \n s = sum(wts.values())+sum(gnanarr)\n for key,val in wts.items():\n val = float(val)/float(s)\n wts[key] = val\n\n meanangle = sum([val*key for key,val in wts.items()])\n print('Mean emission angle: %.2f deg' %(meanangle))\n \n ########################################################################\n\n #Now compute mean latitude and longitude of observation\n\n #Finding vector of true north of Titan\n northx = -np.sin(ccw)*np.cos(subobslat)\n northy = np.cos(ccw)*np.cos(subobslat)\n northz = np.sin(subobslat) \n\n with np.errstate(divide='ignore',invalid='ignore'): #We actually want all y > Titan_radius + top_atm to be nans, so the invalid inputs to arcsin are helping here\n zcoord = np.sqrt((top_atm)**2 - x**2 - y**2) #these are the actual z-coordinates (distance from Titan center to observer) at each x,y point\n dprod = (northx*x + northy*y + northz*zcoord)/(top_atm) #dot product of north pole vector and each vector in model planet\n z_lat = 90 - np.degrees(np.arccos(dprod)) #latitude of each point on the 2-d grid\n\n conv = np.multiply(g,z_lat)\n meanlat = np.nansum(conv)/np.nansum(g)\n print('Mean top-of-atmosphere latitude: %.2f deg' %(meanlat))\n\n ########################################################################\n\n #Plots\n \n if showplot:\n # Plot extraction aperture overlaid on Titan with lines of latitude\n\n fig = plt.figure() # a new figure window\n ax = fig.add_subplot(1, 1, 1)\n\n img=ax.imshow(g.transpose(),extent=[x.min(), x.max(), y.min(),y.max()], origin='lower', interpolation='nearest')\n \n titanlimb = plt.Circle((0, 0), 2575, color='k',fill=false)\n titanatm = plt.Circle((0, 0), top_atm, color='w',linestyle='dashed',fill=false)\n ax.add_artist(titanlimb)\n ax.add_artist(titanatm)\n \n #Overlay latitudes as contours\n ctr=ax.contour(z_lat.transpose(),colors='gold',extent=[x.min(), x.max(), y.min(),y.max()],linestyles='dashed')\n ax.clabel(ctr, inline=1, fontsize=12, fmt='%.0f')\n for line in ctr.collections: #Making negative contours solid instead of dashed\n if line.get_linestyle() != [(None, None)]:\n line.set_linestyle([(None, None)])\n \n #Overlay the original extraction aperture (interpolated to new grid so it looks a bit pixelated)\n ax.contour(g.transpose(),levels=[0.999],extent=[x.min(), x.max(), y.min(),y.max()],colors='0.75',linestyles=\"dotted\")\n \n ax.set_xlabel('Distance (km)',fontsize=16)\n ax.set_ylabel('Distance (km)',fontsize=16)\n ax.set_title('NEMESIS .spx weights with respect to atmosphere', fontsize=16)\n \n #Colorbar\n cbar = fig.colorbar(img)\n cbar.set_label('Weight',fontsize=14)\n \n fig.show()\n fig.savefig(outimg)\n\n #Other diagnostic plots\n \n ## #Plot Gaussian\n ## fig0 = plt.figure(figsize = (15,15))\n ## ax = fig0.add_subplot(111) \n ## ax.imshow(g,cmap='RdBu',origin='lower')\n ## plt.show()\n ## #Plot z\n ## fig1 = plt.figure(figsize = (15,15))\n ## ax = fig1.add_subplot(111)\n ## ax.imshow(z, cmap='Blues',origin='lower')\n ## plt.show()\n\n ## #Plot convolution\n ## z_flat = np.copy(z)*0 + 1\n ## conv_flat = np.multiply(g,z_flat)\n ## conv = np.multiply(g,z)\n ## fig2,ax = plt.subplots(figsize = (15,15))\n ## im = ax.imshow(conv_flat, cmap='RdBu',origin='lower')\n ## ctr = ax.contour(z,colors='yellow')\n ## ax.clabel(ctr, inline=1, fontsize=14, fmt='%1.1f')\n ## cbar = fig2.colorbar(im,orientation=\"horizontal\")\n ## cbar.ax.set_xlabel('Weighting',fontsize=18)\n \n return wts,meanlat,g,Sr_pix",
"def _dwd_apply_view_zenith_angle_correction(self, chn):\n if not self._is_solar_channel(chn) and \\\n self[chn].info.get(\"view_zen_corrected\", None) is None:\n view_zen_chn_data = self[self.area.area_id + \"_VZA\"].data\n if view_zen_chn_data is not None:\n view_zen_corr_chn = self[chn].viewzen_corr(view_zen_chn_data)\n self[chn].data = view_zen_corr_chn.data.copy()\n del(view_zen_corr_chn)\n else:\n LOGGER.error(\"Missing satellite zenith angle data: \" +\n \"atmospheric correction not possible.\")",
"def _dwd_apply_sun_zenith_angle_correction(self, chn, backup_orig_data=False):\n if self._is_solar_channel(chn) and \\\n self[chn].info.get(\"sun_zen_corrected\", None) is None:\n if self.area.lons is None or self.area.lats is None:\n self.area.lons, self.area.lats = self.area.get_lonlats()\n\n if backup_orig_data:\n self[chn].data_orig = self[chn].data\n self[chn].info_orig = copy.deepcopy(self[chn].info)\n \n sun_zen_chn = self[chn].sunzen_corr(get_first(self.time_slot),\n limit=85.)\n self[chn].data = sun_zen_chn.data.copy()\n del(sun_zen_chn)"
] | [
"0.6049725",
"0.593412",
"0.5909316",
"0.5889606",
"0.5803568",
"0.57738686",
"0.5743096",
"0.5700665",
"0.5682678",
"0.56737506",
"0.56637645",
"0.55764914",
"0.5571384",
"0.555053",
"0.5475023",
"0.54531264",
"0.54312754",
"0.54308134",
"0.53924924",
"0.53894144",
"0.5381827",
"0.53721184",
"0.53623694",
"0.53541434",
"0.5347549",
"0.53230023",
"0.5310782",
"0.5310409",
"0.53095865",
"0.5307841"
] | 0.6409771 | 0 |
Computes the Jband absolute magnitudes for use in the Assef et al. (2011) luminosity function. We will use the observed apparent 3.6 um magnitude and assume a Polleta QSO2 SED for all objects to Kcorrect to the absolute FLAMINGOS Jband magnitude. Returns | def j_band_abs_mag(self):
# Load in the IRAC 3.6 um filter as the observed filter
irac_36 = SpectralElement.from_file(self._irac_filter, wave_unit=u.um)
flamingos_j = SpectralElement.from_file(self._j_band_filter, wave_unit=u.nm)
# We will use the official IRAC 3.6 um zero-point flux
irac_36_zp = 280.9 * u.Jy
for cluster_id, cluster_info in self._catalog_dictionary.items():
# Retrieve the cluster redshift from the SPT catalog
catalog_idx = cluster_info['SPT_cat_idx']
cluster_z = self._spt_catalog['REDSHIFT'][catalog_idx]
# Get the 3.6 um apparent magnitudes from the catalog
se_catalog = cluster_info['catalog']
irac_36_mag = se_catalog['I1_MAG_APER4']
# Given the observed IRAC 3.6 um photometry, compute the rest-frame J-band absolute (Vega) magnitude.
j_abs_mag = k_corr_abs_mag(apparent_mag=irac_36_mag, z=cluster_z, f_lambda_sed=self._sed,
zero_pt_obs_band=irac_36_zp, zero_pt_em_band='vega', obs_filter=irac_36,
em_filter=flamingos_j, cosmo=self._cosmo)
# Store the J-band absolute magnitude in the catalog and update the data structure
se_catalog['J_ABS_MAG'] = j_abs_mag
cluster_info['catalog'] = se_catalog | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def j_band_abs_mag(self):\n\n # Load in the IRAC 3.6 um filter as the observed filter\n irac_36 = SpectralElement.from_file(self._irac_filter, wave_unit=u.um)\n flamingos_j = SpectralElement.from_file(self._j_band_filter, wave_unit=u.nm)\n\n # We will use the official IRAC 3.6 um zero-point flux\n irac_36_zp = 280.9 * u.Jy\n\n for cluster_id, cluster_info in self._catalog_dictionary.items():\n # Get the 3.6 um apparent magnitudes and photometric redshifts from the catalog\n se_catalog = cluster_info['catalog']\n irac_36_mag = se_catalog['I1_MAG_APER4']\n galaxy_z = se_catalog['REDSHIFT']\n\n # Given the observed IRAC 3.6 um photometry, compute the rest-frame J-band absolute (Vega) magnitude.\n j_abs_mag = k_corr_abs_mag(apparent_mag=irac_36_mag, z=galaxy_z, f_lambda_sed=self._sed,\n zero_pt_obs_band=irac_36_zp, zero_pt_em_band='vega', obs_filter=irac_36,\n em_filter=flamingos_j, cosmo=self._cosmo)\n\n # Store the J-band absolute magnitude in the catalog and update the data structure\n se_catalog['J_ABS_MAG'] = j_abs_mag\n cluster_info['catalog'] = se_catalog",
"def calc_jhk_mag(self, data):\n\n # Pull all the magnitudes from the series\n self._all_queried_mag_series = data.loc[GSC_BAND_NAMES]\n\n # Pull magnitude errors for each band, and replace missing errors with 2.5% of the magnitude value\n mag_err_list = [self.gsc_series[ind + 'Err'] if self.gsc_series[ind + 'Err'] != -999\n else self._all_queried_mag_series[ind] * BAND_ERR for ind in self._all_queried_mag_series.index]\n self._all_queried_mag_err_series = pd.Series(mag_err_list, index=self._all_queried_mag_series.index + 'Err')\n\n # List of the magnitude names that are not fill values in the series\n self._present_queried_mags = list(self._all_queried_mag_series[self._all_queried_mag_series != -999].index)\n\n # Dictionary of convert methods\n method_list = []\n for i in ['tmassJmag', 'tmassHmag', 'tmassKsMag']:\n switcher = OrderedDict([\n (i, 'convert_tmass_to_jhk'),\n ('SDSSgMag, SDSSzMag', 'convert_sdssgz_to_jhk'),\n ('SDSSgMag, SDSSiMag', 'convert_sdssgi_to_jhk'),\n ('SDSSiMag, SDSSzMag', 'convert_sdssiz_to_jhk'),\n ('JpgMag, NpgMag', 'convert_gsc2bjin_to_jhk'),\n ('FpgMag, NpgMag', 'convert_gsc2rfin_to_jhk'),\n ('JpgMag, FpgMag', 'convert_gsc2bjrf_to_jhk'),\n ])\n\n # Pull the first entry in the OrderedDict that matches what values are present.\n for key, value in switcher.items():\n key_list = key.split(', ')\n if set(key_list).issubset(self._present_queried_mags):\n setattr(self, '{}_convert_method'.format(i[5].lower()), value)\n break\n if getattr(self, '{}_convert_method'.format(i[5].lower())) is None:\n raise ValueError('There is not enough information on this guide star to get its {} magnitude'.format(i))\n\n # Get the method\n method = getattr(conversions, getattr(self, '{}_convert_method'.format(i[5].lower())), lambda: \"Invalid\")\n method_list.append(method)\n\n # Create a new series with the edited data (in case uncertainties were replaced)\n edited_data_series = pd.concat([self._all_queried_mag_series, self._all_queried_mag_err_series])\n\n # Run conversions\n self.j_mag, self.j_mag_err = method_list[0](data=edited_data_series, output_mag='J')\n self.h_mag, self.h_mag_err = method_list[1](data=edited_data_series, output_mag='H')\n self.k_mag, self.k_mag_err = method_list[2](data=edited_data_series, output_mag='K')\n\n # Create new attribute with updated series\n self._all_calculated_mag_series = copy.deepcopy(self._all_queried_mag_series)\n self._all_calculated_mag_series.loc[['tmassJmag', 'tmassHmag', 'tmassKsMag']] = \\\n self.j_mag, self.h_mag, self.k_mag\n\n self._all_calculated_mag_err_series = copy.deepcopy(self._all_queried_mag_err_series)\n self._all_calculated_mag_err_series.loc[['tmassJmagErr', 'tmassHmagErr', 'tmassKsMagErr']] = \\\n self.j_mag_err, self.h_mag_err, self.k_mag_err\n\n self._present_calculated_mags = self._present_queried_mags + [a for a in\n ['tmassJmag', 'tmassHmag', 'tmassKsMag']\n if a not in self._present_queried_mags]\n\n return self.j_mag, self.j_mag_err, self.h_mag, self.h_mag_err, self.k_mag, self.k_mag_err",
"def magToJy(mag,emag,wband,zpFile=None):\n if zpFile == None:\n zpFile = Path(os.environ['SED_BUILDER']) / Path('zero_points.dat')\n zpWave, zpF0 = read_zp(zpFile)\n F0 = zpF0[wband]\n jy = (10**(-float(mag)/2.5))*F0\n if emag != '--':\n ejy = (float(emag)/2.5)*jy*log(10)\n else:\n ejy = np.nan\n \n return jy, ejy",
"def calc_magnitude(box,octant):\n # Read the Mi(z=2) magnitudes for the box.\n miz2 = FH.read_file(box)['Miz2'][:]\n # Read the index for each QSO in the octant, and get the Mi(z=2).\n data = FH.read_file(octant)\n zz = data['Z']\n dmod = data['DMOD']\n miz2 = miz2[data['INDX']]\n # Now convert to apparent i-band magnitude using the k-correction.\n # If a tabulated k-correction is available, use that, otherwise\n # default to a power-law continuum approximation.\n # See discussion in Ross++13, Appendix B and Section 4.\n kfile=os.getenv('MOCKINGDESI_BASE')+\"/data/qso-iband-k-correction.txt\"\n if os.path.exists(kfile):\n print(\"Using K-correction from \"+kfile)\n kcorr = np.loadtxt(kfile)\n kcorr = np.interp(zz,kcorr[:,1],kcorr[:,2])\n else:\n print(\"Using power-law K-correction\")\n alpha = -0.5\n kcorr = -2.5*(1+alpha)*np.log10( (1+zz)/(1+2.0) )\n gmi = np.poly1d([0.1502,-0.9886,2.147,-1.758,0.6397])\t# See notes.\n rmi = np.poly1d([-0.1482,1.636,-6.716,12.55,-10.39,3.017])\n magi = miz2 + dmod + kcorr\t# e.g. Ross++13, Eq. 5\n magg = magi + gmi(zz.clip(0.5,3.5))\n magr = magi + rmi(zz.clip(0.5,3.5))\n # and write the results\n data = {}\n data['GMAG'] = magg.astype('f4')\n data['RMAG'] = magr.astype('f4')\n FH.write_file(octant,data)\n #",
"def getMagnitudes(self):\n return self._bmag, self._vmag, self._jmag, self._hmag, self._kmag",
"def convert_F_vs_mag(value, F_0=None, band='H', system='Johnson', \n conversion='to_mag'): \n \n dico_zero_pts_Jo = {'U': [0.36,1823.],\n 'B': [0.44,4130.],\n 'V': [0.55,3781.],\n 'R': [0.71,2941.],\n 'I': [0.97,2635.],\n 'J': [1.25,1603.],\n 'H': [1.60,1075.],\n 'K': [2.22,667.],\n 'L': [3.54,288.],\n 'M': [4.80,170.],\n 'N': [10.6,36.],\n 'O': [21.0,9.4]}\n dico_zero_pts_2M = {'J': [1.235,1594.],\n 'H': [1.662,1024.],\n 'K': [2.159,666.7]}\n dico_zero_pts_UK = {'V': [0.5556,3540.], # TOKUNAGA (from Cohen 1992)\n 'I': [0.9,2250.], # UKIRT webpage\n 'J': [1.215,1630.], # TOKUNAGA (from Cohen 1992)\n 'H': [1.654,1050.], # TOKUNAGA (from Cohen 1992)\n 'Ks': [2.157,667.], # TOKUNAGA (from Cohen 1992)\n 'K': [2.179,655.], # TOKUNAGA (from Cohen 1992) \n 'L': [3.547,276.], # TOKUNAGA (from Cohen 1992) \n \"L'\": [3.761,248.], # TOKUNAGA (from Cohen 1992) \n 'M': [4.769,160.], # TOKUNAGA (from Cohen 1992) \n '8.7': [8.756,50.], # TOKUNAGA (from Cohen 1992) \n 'N': [10.472,35.3], # TOKUNAGA (from Cohen 1992) \n '11.7': [11.653,28.6], # TOKUNAGA (from Cohen 1992) \n 'Q': [20.13,9.7]} # TOKUNAGA (from Cohen 1992)\n dico_zero_pts_ESO = {'J': [1.228,3.44e-9], # van der Bliek 1996\n 'H': [1.651,1.21e-9], # van der Bliek 1996\n 'K': [2.216,4.12e-10], # van der Bliek 1996\n \"L'\": [3.771,5.58e-11], # van der Bliek 1996\n \"M\": [4.772,2.21e-11]} # van der Bliek 1996 \n \n if F_0 is None:\n if system == 'Johnson' and band in dico_zero_pts_Jo:\n dico_F_0 = dico_zero_pts_Jo\n elif system == '2MASS' and band in dico_zero_pts_2M:\n dico_F_0 = dico_zero_pts_2M\n elif system == 'UKIRT' and band in dico_zero_pts_UK:\n dico_F_0 = dico_zero_pts_UK\n elif system == 'ESO' and band in dico_zero_pts_UK:\n dico_F_0 = dico_zero_pts_ESO \n else:\n msg = 'Combination of band name and band system not recognized.'\n raise TypeError(msg)\n F_0 = dico_F_0[band][1]\n if system == 'ESO':\n # convert from W m-2 mu-1 to Jy\n F_0 = convert_F_units(F_0, dico_F_0[band][0], in_unit='si', \n out_unit='jy')\n \n if conversion == 'to_mag':\n return -2.5*np.log10(value/F_0)\n elif conversion == 'to_flux':\n return F_0*np.power(10.,-value/2.5)\n else:\n msg = \"conversion not recognized, must be 'to_mag' or 'to_flux'.\"\n raise TypeError(msg)",
"def get_phi_lam_obs(z, qlf, lLfrac_lam_obs_min, lLfrac_lam_obs_max, lam_eff_filter):\n\n #Start by getting the value of Lstar in units of 10^10 Lsun, which will be useful later on.\n Lstar = 10.**(qlf.log_Lstar(z))*qlf.Lstar_units\n Lstar_10 = (Lstar/(1e10*L_sun)).to(1.).value\n\n #Set the grid in bolometric L/Lstar.\n lLfrac_min = -3.0\n lLfrac_max = 3.0 #10.0\n dlLfrac = 0.01\n lLfrac = np.arange(lLfrac_min,lLfrac_max,dlLfrac)\n Lfrac = 10.**lLfrac\n\n #Get the bolometric QLF evaluated in the grid of Lfrac.\n phi_bol = qlf.phi_bol_Lfrac(Lfrac, z)\n\n #Transform the bolometric QLF to the intrinsic luminosity QLF in the band. We assume that the bolometric correction in all bands of interest is proportional to the one in the B-band, as is done in the Hopkins07 provided code.\n phi_lam = phi_bol*jacobian(Lfrac, Lstar_10, qlf)\n Lfrac_lam = get_Lfrac_lam(Lfrac, Lstar_10, qlf)\n lLfrac_lam = np.log10(Lfrac_lam)\n #dlLfrac_lam = dlLfrac/jacobian(Lfrac, Lstar_10, qlf)\n\n #Since there is a natural dispersion to the bolometric corrections, we convolve phi_lam with the uncertainty function to take it into account.\n phi_lam_2D = np.tile(phi_lam, (len(phi_lam), 1))\n sigma = qlf.get_sigma(Lfrac, Lstar_10, lam_eff_filter/(1.+z))\n lLfrac_lam_sig = lLfrac_lam\n sigma_2D = np.tile(sigma, (len(sigma), 1))\n lLfrac_lam_2D = np.tile(lLfrac_lam, (len(lLfrac_lam), 1))\n lLfrac_lam_sig_2D = np.tile(lLfrac_lam_sig, (len(lLfrac_lam), 1)).T\n\n p = (2.*np.pi)**-0.5 * sigma_2D**-1 * np.exp( -0.5*( (lLfrac_lam_sig_2D - lLfrac_lam_2D)/sigma_2D)**2)\n\n phi_lam_sig = np.sum(phi_lam_2D*p * dlLfrac, axis=1)\n\n #The next step is to convolve with the obscuration function. The issue here is that the observed luminosity in the band is a function of the intrinsic luminosity and the obscuration.\n lNH_min = 20.\n lNH_max = 26.\n dlNH = 0.01\n lNH = np.arange(lNH_min, lNH_max, dlNH)\n\n #Following the approach of the Shen20 pubtools, we will now calculate phi_lam_obs for the same luminosity fractions for which we have phi_lam.\n lLfrac_lam_obs_grid = lLfrac_lam_sig\n\n #Determine the obscuration function in the observed band.\n ltheta_fact = 0.4*qlf.dgr(z).to(u.cm**2).value*1e22 * qlf.xi(lam_eff_filter/(1.+z))\n ltheta = 10.**(lNH-22) * ltheta_fact\n ltheta_2D = np.tile(ltheta, [len(lLfrac_lam_obs_grid), 1])\n\n #For each NH, we will need to evaluate the unreddened QLF at a luminosity of lLfrac_lam_obs_grid + ltheta. So let's build it as a 2D array in which each row has the same lLfrac_lam_obs_grid value modified by the reddening correction (i.e., unreddened assuming different levels of obscuration).\n lLfrac_lam_sig_eval_2D = np.tile(lLfrac_lam_obs_grid, [len(lNH), 1]).T + ltheta_2D\n\n #Now, evaluate the f_NH function, following the S20 pubtools. Note: I think this actually wrong. f_NH should be evaluated at the intrinsic luminosity fraction of the reddening corrected luminosity. Here, we just assume that the same intrinsic lLfrac corresponds to the observed lLfrac_lam_obs_grid value for all NHs.\n lLfrac_eval_2D = np.tile(lLfrac, [len(lNH),1]).T\n log_NH_2D = np.tile(lNH, [len(lLfrac_lam_obs_grid), 1])\n f_NH = qlf.fNH(log_NH_2D, lLfrac_eval_2D, Lstar_10, z)\n\n #Extrapolate phi_lam_sig so that we can evaluate it in the new positions.\n log_phi_lam_sig_interp = interp1d(lLfrac_lam_sig, np.log10(phi_lam_sig.value), kind='linear', fill_value = 'extrapolate')\n\n #Evaluate it an produce phi_lam_obs_grid by integrating over f_NH dlNH.\n phi_lam_sig_eval_2D = 10.**(log_phi_lam_sig_interp(lLfrac_lam_sig_eval_2D))\n phi_lam_obs_grid= np.sum(phi_lam_sig_eval_2D * f_NH * dlNH, axis=1)\n\n #Now, this is the output grid we actually want.\n nlLfrac_lam_obs = 100\n dlLfrac_lam_obs = (lLfrac_lam_obs_max-lLfrac_lam_obs_min)/nlLfrac_lam_obs\n if dlLfrac_lam_obs > 0.1:\n dlLfrac_lam_obs = 0.1\n lLfrac_lam_obs = np.arange(lLfrac_lam_obs_min, lLfrac_lam_obs_max + 0.1*dlLfrac_lam_obs, dlLfrac_lam_obs)\n\n #Interpolate/extrapolate phi_lam_obs to put it in the required output grid and return the resulting QLF.\n lphi_lam_obs_interp = interp1d(lLfrac_lam_obs_grid, np.log10(phi_lam_obs_grid), fill_value='extrapolate')\n phi_lam_obs = 10.**(lphi_lam_obs_interp(lLfrac_lam_obs))*phi_lam_sig.unit\n return phi_lam_obs, dlLfrac_lam_obs*u.dex",
"def magnitude(frame):\n sobelx = lambda im: cv2.Sobel(im, cv2.CV_64F, 1, 0, ksize=3)\n sobely = lambda im: cv2.Sobel(im, cv2.CV_64F, 0, 1, ksize=3)\n dxabs = cv2.convertScaleAbs(sobelx(frame))\n dyabs = cv2.convertScaleAbs(sobely(frame))\n\n return cv2.addWeighted(dxabs, 0.5, dyabs, 0.5, 0)",
"def app_mag(abs_mag, phase_angle, slope_g, d_ast_sun, d_ast_earth):\n\n # Compute the apparent / visual magnitude\n mag = red_mag(abs_mag, phase_angle, slope_g) \\\n + 5.0 * np.log10(d_ast_sun * d_ast_earth)\n\n # Return the apparent magnitude\n return mag",
"def AB_zero_mag(self):\n if self.wavelength_unit is None:\n raise AttributeError('Needs wavelength units')\n\n C1 = (Unit(self.wavelength_unit).to('AA') ** 2 /\n Constants.c.to('AA/s').value)\n c1 = self._lpivot ** 2 * C1\n\n m = 2.5 * np.log10(_drop_units(c1)) + 48.6\n return m",
"def make_sq(mlat, dAB, *J):\n if (len(J)!=4):\n print(\"Number of paramaters are exceeded 5!\")\n NN = 2*mlat\n \n tau = np.zeros((NN,NN), dtype=complex)\n h = np.zeros((NN,NN), dtype=complex)\n \n for i in range(mlat-1):\n if (i%2==0):\n h[i,i] = dAB/2. # on-site energy\n h[mlat+i,mlat+i] = -dAB/2. # on-site energy \n h[i, mlat+i] = J[0]\n h[i, i+1] = J[1]\n h[mlat+i, mlat+i+1] = J[3]\n #\n tau[mlat+i, i] = J[2]\n elif (i%2==1):\n h[i,i] = -dAB/2. # on-site energy\n h[mlat+i,mlat+i] = dAB/2. # on-site energy \n h[i, mlat+i] = J[2]\n h[i, i+1] = J[3]\n h[mlat+i, mlat+i+1] = J[1]\n #\n tau[mlat+i, i] = J[0]\n\n # End of loop over lattice sites\n\n # The upper edge site\n if (mlat-1 % 2==0):\n h[mlat-1, mlat-1] = dAB/2. # on-site energy\n h[NN-1,NN-1] = -dAB/2. # on-site energy \n h[mlat-1, NN-1] = J[0]\n #\n tau[NN-1, mlat-1] = J[2]\n elif (mlat-1 % 2==1):\n h[mlat-1, mlat-1] = -dAB/2. # on-site energy\n h[NN-1,NN-1] = dAB/2. # on-site energy \n h[mlat-1, NN-1] = J[2]\n #\n tau[NN-1, mlat-1] = J[0] \n \n h = h + h.conj().T # make it hermitian\n return h, tau",
"def AB_zero_Jy(self):\n c = 1e-8 * Constants.c.to('m/s').value\n f = 1e5 / c * self.lpivot.to('AA').value ** 2 * self.AB_zero_flux.value\n return f * Unit('Jy')",
"def glueEmH( Ja, Jf, truncNum = scipy.inf ):\n w, v = truncBasisH( Ja, truncNum )\n sPlus, sMinus, sZ = sPlusAndMinusAndZ( v )\n \n H1 = scipy.zeros( ( len(w)**4, len(w)**4 ) )\n \n for n in range( len(w)**4 ):\n # Diagonal previous generation contributions\n o = oct(n)[-4:].zfill(4)\n o = [int(char) for char in o]\n o_A, o_B, o_C, o_D = o\n \n H1[n, n] += scipy.sum( [ w[ i ] for i in o ] )\n \n # Edge terms\n for np in range( n, len(w)**4 ):\n op = oct(np)[-4:].zfill(4)\n op = [int(char) for char in op]\n op_A, op_B, op_C, op_D = op\n \n x = 0.\n if ( (o_B == op_B) and (o_C == op_C) ):\n x += -Jf * ( .5 * ( sPlus[0][o_A, op_A] * sMinus[0][o_D, op_D] + sMinus[0][o_A, op_A] * sPlus[0][o_D,op_D] ) + sZ[0][o_A, op_A] * sZ[0][o_D, op_D] )\n if ( (o_C == op_C) and (o_A == op_A) ):\n x += -Jf * ( .5 * ( sPlus[1][o_B, op_B] * sMinus[1][o_D, op_D] + sMinus[1][o_B, op_B] * sPlus[1][o_D,op_D] ) + sZ[1][o_B, op_B] * sZ[1][o_D, op_D] )\n if ( (o_A == op_A) and (o_B == op_B) ):\n x += -Jf * ( .5 * ( sPlus[2][o_C, op_C] * sMinus[2][o_D, op_D] + sMinus[2][o_C, op_C] * sPlus[1][o_D,op_D] ) + sZ[1][o_C, op_C] * sZ[2][o_D, op_D] )\n \n H1[n, np] = x\n H1[np, n] = x\n \n return H1",
"def _flux_unc_as_mags(fluxes, uncs):\n uncs_mag = np.empty(len(fluxes))\n\n # fluxes-uncs case\n indxs, = np.where(fluxes - uncs <= 0)\n if len(indxs) > 0:\n uncs_mag[indxs] = -2.5*np.log10(fluxes[indxs]\n / (fluxes[indxs] + uncs[indxs]))\n\n # normal case\n indxs, = np.where(fluxes - uncs > 0)\n if len(indxs) > 0:\n uncs_mag[indxs] = -2.5*np.log10((fluxes[indxs] - uncs[indxs])\n / (fluxes[indxs] + uncs[indxs]))\n\n return uncs_mag",
"def amplitude(magnitudes):\n ampl = 0.5 * (np.max(magnitudes) - np.min(magnitudes))\n\n return ampl",
"def test_filt_abmag(self):\n sun = Sun.from_builtin('E490_2014')\n V = get_bandpass('johnson v')\n wave, fluxd = sun.filt(V, unit=u.ABmag)\n assert np.isclose(fluxd.value, -26.77, atol=0.007)",
"def openMANGASpectrum(self, path_to_logcube, path_to_dapall, bin_number, plate_number, ifu_number, emlines,mpl='mpl-9'):\n\t\t\n\t\t# Read in MAPS file as this contains part of the information.\n\t\tmaps_header = pyfits.open(self.path_to_spectrum)\n\t\tbin_identification = maps_header['BINID'].data\n\t\twhere = np.where(bin_number == bin_identification[0,:,:]) #use 1st channel of bin_identification\n\t\tx_position, y_position = where[0][0], where[1][0]\n\t\t\n\t\t# Get S/N, right ascension and declination.\n\t\tsignal, ra, dec = maps_header['BIN_SNR'].data[x_position,y_position], maps_header[0].header['OBJRA'],maps_header[0].header['OBJDEC']\n\t\tvelocity_dispersion = maps_header['STELLAR_SIGMA'].data \t\t\t\t\n\t\tvelocity_dispersion_correction = maps_header['STELLAR_SIGMACORR'].data[0,:,:]\n\t\t\n\t\tif velocity_dispersion[x_position,y_position] > velocity_dispersion_correction[x_position,y_position]:\n\t\t\tcorrection = np.sqrt((velocity_dispersion[x_position,y_position])**2-(velocity_dispersion_correction[x_position,y_position])**2)\n\t\t\tvdisp = correction\n\t\telse:\n\t\t\tvdisp = 0\n\n\t\t\n\t\t# Open LOGCUBE to get the flux, wavelength, and error\n\t\theader = pyfits.open(path_to_logcube)\n\t\twavelength, flux, emline, bit_mask, inverse_variance = header['WAVE'].data, header['FLUX'].data, header['EMLINE'].data, header['MASK'].data, header['IVAR'].data\n\t\tself.wavelength = wavelength\n\t\tcorrect_flux = flux[:,x_position,y_position]\n\t\tcorrect_flux_emline = emline[:, x_position, y_position]\n\t\toutput_flux = correct_flux - correct_flux_emline\n\t\tcorrect_inverse_variance = inverse_variance[:, x_position, y_position]\n\t\t\n\t\tLSF = header['LSF'].data[:,x_position,y_position]\t\t# LSF given as sigma of Gaussian in Angstrom\n\t\tsig2fwhm = 2.0 * np.sqrt(2.0 * np.log(2.0))\n\t\tLSF_FWHM = LSF*sig2fwhm\n\t\tRES = wavelength/LSF_FWHM\n\t\t\n\t\tself.r_instrument = RES\n\t\tself.error = np.sqrt(1.0/(correct_inverse_variance))\n\t\tself.bad_flags = np.ones(len(output_flux))\n\t\tself.flux = output_flux\n\t\tself.vdisp = vdisp\n\n\t\tif (mpl=='mpl-10') or (mpl=='mpl-11'):\n\t\t\text=2\n\t\telse:\n\t\t\text=1\n\t\t\n\t\tdap_all = pyfits.open(path_to_dapall)\n\t\tget = np.where(dap_all[ext].data['PLATEIFU']==str(plate_number)+'-'+str(ifu_number))\n\t\tc = const.c.value/1000\n\t\t# Use redshift as measured from the stellar kinematics by the DAP.\n\t\tredshift = dap_all[ext].data['STELLAR_Z'][get][0]\n\t\t# If redshift measurement failed, use redshift estimate from NSA or ancillary programs.\n\t\tif redshift<0:\n\t\t\tredshift = dap_all[ext].data['Z'][get][0]\n\t\t\t\n\t\tsys_vel = maps_header[0].header['SCINPVEL']\n\t\tbin_vel = maps_header['STELLAR_VEL'].data[x_position,y_position]\t\n\t\t\t\n\t\tif redshift<0:\n\t\t\tprint('WARNING: The redshift of this object is negative.')\n\t\t\tprint('z = {}'.format(redshift))\n\t\t\n\t\tredshift_corr = (sys_vel+bin_vel)/c\n\t\tself.redshift = redshift\n\t\tself.restframe_wavelength = self.wavelength / (1.0+redshift_corr)\n\n\t\tbitmask = bit_mask[:,x_position,y_position]&2**0+2**1+2**2+2**3+2**4\n\t\tself.mask_emissionlines(emlines)\n\t\tself.final_mask = (bitmask | self.lines_mask)\n\n\t\tself.wavelength = self.wavelength[(self.final_mask==False)] \n\t\tself.restframe_wavelength = self.restframe_wavelength[(self.final_mask==False)] \n\t\tself.flux = self.flux[(self.final_mask==False)] \n\t\tself.error = self.error[(self.final_mask==False)]\n\t\tself.bad_flags = self.bad_flags[(self.final_mask==False)]\n\t\t\t\t\t\n\t\t# Get Trust flag, object_id, xpos, ypos and instrumental resolution.\n# \t\tself.trust_flag, self.objid, self.r_instrument = True, 0, np.loadtxt(os.path.join(os.environ['FF_DIR'],'data/MaNGA_spectral_resolution.txt'))\n\t\tself.trust_flag, self.objid= True, 0\n# \t\tself.r_instrument = self.r_instrument[0:self.r_instrument.shape[0]//2]\n\t\tself.r_instrument = self.r_instrument[(self.final_mask==False)]\n\t\tself.xpos, self.ypos = ra, dec\n\t\t\n\t\t# gets the amount of MW reddening on the models\n\t\tif self.milky_way_reddening :\n\t\t\tself.ebv_mw = get_dust_radec(ra, dec, 'ebv')\n\t\telse:\n\t\t\tself.ebv_mw = 0.0",
"def mab0(self):\n return WISE_INFO[self.bandname][\"ABmag0\"]",
"def update_mag(self, mags):\n self.log.mag(mags)\n q = self.quaternion()\n roll, pitch, heading = self.es\n\n mag_inertial = (q * quaternion.Quaternion.from_vec(np.array(mags)) * q.inv()).as_ndarray()[1:]\n mag_inertial[2] = 0\n mag_inertial /= sqrt(mag_inertial[0]**2 + mag_inertial[1]**2)\n mag_body = (q.inv() * quaternion.Quaternion.from_vec(mag_inertial) * q).as_ndarray()[1:]\n\n h = (q.inv() * quaternion.Quaternion.from_vec(np.array([1.0, 0, 0])) * q).as_ndarray()[1:]\n y = np.vstack(mag_body) - np.vstack(h)\n \n H = np.zeros((3, 9))\n ch2 = np.cos(heading/2)\n cr2 = np.cos(roll/2)\n sh2 = np.sin(heading/2)\n sr2 = np.sin(roll/2)\n H[0, 0] = 0\n H[0, 1] = 0\n H[0, 2] = -2.0*ch2*cr2**2*sh2/(1.0*ch2**2*cr2**2 + 1.0*ch2**2*sr2**2 + 1.0*cr2**2*sh2**2 + 1.0*sh2**2*sr2**2) - 2.0*ch2*sh2*sr2**2/(1.0*ch2**2*cr2**2 + 1.0*ch2**2*sr2**2 + 1.0*cr2**2*sh2**2 + 1.0*sh2**2*sr2**2)\n H[1, 0] = 4.0*ch2*cr2*sh2*sr2/(1.0*ch2**2*cr2**2 + 1.0*ch2**2*sr2**2 + 1.0*cr2**2*sh2**2 + 1.0*sh2**2*sr2**2)\n H[1, 1] = 2.0*ch2**2*cr2*sr2/(1.0*ch2**2*cr2**2 + 1.0*ch2**2*sr2**2 + 1.0*cr2**2*sh2**2 + 1.0*sh2**2*sr2**2) - 2.0*cr2*sh2**2*sr2/(1.0*ch2**2*cr2**2 + 1.0*ch2**2*sr2**2 + 1.0*cr2**2*sh2**2 + 1.0*sh2**2*sr2**2)\n H[1, 2] = -1.0*ch2**2*cr2**2/(1.0*ch2**2*cr2**2 + 1.0*ch2**2*sr2**2 + 1.0*cr2**2*sh2**2 + 1.0*sh2**2*sr2**2) + 1.0*ch2**2*sr2**2/(1.0*ch2**2*cr2**2 + 1.0*ch2**2*sr2**2 + 1.0*cr2**2*sh2**2 + 1.0*sh2**2*sr2**2) + 1.0*cr2**2*sh2**2/(1.0*ch2**2*cr2**2 + 1.0*ch2**2*sr2**2 + 1.0*cr2**2*sh2**2 + 1.0*sh2**2*sr2**2) - 1.0*sh2**2*sr2**2/(1.0*ch2**2*cr2**2 + 1.0*ch2**2*sr2**2 + 1.0*cr2**2*sh2**2 + 1.0*sh2**2*sr2**2)\n\n S = H.dot(self.P).dot(H.T) + self.Rmag\n K = self.P.dot(H.T).dot(np.linalg.inv(S))\n x = self.state_vec() + K.dot(y)\n\n self.P = (np.eye(9) - K.dot(H)).dot(self.P)\n self.set_state_vec(x)",
"def calculate_magnitude(self, band, system='AB'):\n\n if system not in ('AB', 'Vega'):\n raise ValueError('`system` must be one of `AB` or `Vega`')\n\n f1 = self.calculate_flux(band)\n\n if f1 > 0:\n magnitude = -2.5 * log10(f1 / band.flux[system])\n\n if system == 'Vega':\n # Add 0.026 because Vega has V = 0.026:\n magnitude += 0.026\n\n else:\n magnitude = np.inf\n\n return magnitude",
"def J_over_JUV_avg_slab(tau_SF):\n \n return 1.0/tau_SF*(1.0 - (0.5 - expn(3,tau_SF))/tau_SF)",
"def mab0(self):\n return GALEX_INFO[self.bandname][\"ABmag0\"]",
"def compute_radiocore_luminosity(MBH, L_AGN):\n\tL_X = bolcorr_hardX(L_AGN)\n\tm = log10(MBH / u.Msun)\n\t# Merloni, Heinz & Di Matteo (2003)\n\tlogLR = 0.6 * log10(L_X/(u.erg/u.s)) + 0.78 * m + 7.33\n\treturn 10**logLR * u.erg/u.s",
"def illuminator_of_elfes():\n\n\t# Alpha - simplified by taking out the i by multiplying the outerproduct by 2i\n\talpha1i = np.matrix([[0, 0, 0, 2], [0, 0, 2, 0], [0, -2, 0, 0], [-2, 0, 0, 0]])\n\talpha2i = np.matrix([[0, 2, 0, 0], [-2, 0, 0, 0], [0, 0, 0, 2], [0, 0, -2, 0]])\n\talpha3i = np.matrix([[0, 0, 2, 0], [0, 0, 0, -2], [-2, 0, 0, 0], [0, 2, 0, 0]])\n\n\t# Betas - simplified by taking out the i by multiplication of outerprod by 2i\n\tbeta1i = np.matrix([[0, 0, 0, 2], [0, 0, -2, 0], [0, 2, 0, 0], [-2, 0, 0, 0]])\n\tbeta2i = np.matrix([[0, 0, 2, 0], [0, 0, 0, 2], [-2, 0, 0, 0], [0, -2, 0, 0]])\n\tbeta3i = np.matrix([[0, 2, 0, 0], [-2, 0, 0, 0], [0, 0, 0, -2], [0, 0, 2, 0]])\n\n\t# print(\"alpha 1\")\n\t# print(alpha1i)\n\t# print(\"\")\n\t# print(\"alpha 2\")\n\t# print(alpha2i)\n\t# print(\"\")\n\t# print(\"alpha 3\")\n\t# print(alpha3i)\n\t# print(\"\")\n\t# print(\"beta 1\")\n\t# print(beta1i)\n\t# print(\"\")\n\t# print(\"beta 2\")\n\t# print(beta2i)\n\t# print(\"\")\n\t# print(\"beta 3\")\n\t# print(beta3i)\n\t# print(\"\")\n\n\t# abperm_comb = [ np.multiply(alpha1i,-1), np.multiply(alpha2i,-1), np.multiply(alpha3i,-1), np.multiply(beta1i,-1), np.multiply(beta2i,-1), np.multiply(beta3i,-1)]\n\n\tabperm_comb = [alpha1i, alpha2i, alpha3i, beta1i, beta2i, beta3i]\n\treturn abperm_comb",
"def _LSST_uncertainties(self, mag, five_sigma_mag, band):\n sigma_sys = 0.005\n if band == \"u\":\n gamma = 0.038\n else:\n gamma = 0.039\n\n x = 10**(0.4*(mag-five_sigma_mag))\n sigma = np.sqrt(sigma_sys**2 + (0.04-gamma)*x + gamma*x**2)\n return sigma",
"def mag_to_flux(mag, mag_zp):\n return 10 ** (-0.4 * (mag - mag_zp))",
"def AB2(Jab, Vab, Vcentr, normalize=True):\n # There is a disconnect between the variable names in the WINDNMR GUI and\n # the variable names in this function.\n # The following code provides a temporary interface until this is\n # refactored.\n J, dV, Vab = Jab, Vab, Vcentr\n\n # Also, inconsistencies in WINDNMR GUI, internal WINDNMR code, and Pople\n # equations require a conversion.\n dV = -dV\n va = Vab + (dV / 2)\n vb = va - dV\n\n Jmod = J * (3 / 4)\n C_plus = sqrt(dV**2 + dV * J + (9 / 4) * (J**2)) / 2\n C_minus = sqrt(dV**2 - dV * J + (9 / 4) * (J**2)) / 2\n cos2theta_plus = (dV / 2 + J / 4) / C_plus\n cos2theta_minus = (dV / 2 - J / 4) / C_minus\n sintheta_plus = sqrt((1 - cos2theta_plus) / 2)\n sintheta_minus = sqrt((1 - cos2theta_minus) / 2)\n costheta_plus = sqrt((1 + cos2theta_plus) / 2)\n costheta_minus = sqrt((1 + cos2theta_minus) / 2)\n sin_dtheta = sintheta_plus * costheta_minus - costheta_plus * sintheta_minus\n cos_dtheta = costheta_plus * costheta_minus + sintheta_plus * sintheta_minus\n\n # In Pople, Schneider and Bernstein, Table 6-8:\n # V1-V4 are \"Origin: A\";\n # V5-V8 are \"Origin: B\";\n # V9 is \"Origin: Comb.\"\n V1 = Vab + Jmod + C_plus\n V2 = vb + C_plus + C_minus\n V3 = va\n V4 = Vab - Jmod + C_minus\n V5 = vb + C_plus - C_minus\n V6 = Vab + Jmod - C_plus\n V7 = vb - C_plus + C_minus\n V8 = Vab - Jmod - C_minus\n V9 = vb - C_plus - C_minus\n\n I1 = (sqrt(2) * sintheta_plus - costheta_plus) ** 2\n I2 = (sqrt(2) * sin_dtheta + costheta_plus * costheta_minus) ** 2\n I3 = 1\n I4 = (sqrt(2) * sintheta_minus + costheta_minus) ** 2\n I5 = (sqrt(2) * cos_dtheta + costheta_plus * sintheta_minus) ** 2\n I6 = (sqrt(2) * costheta_plus + sintheta_plus) ** 2\n I7 = (sqrt(2) * cos_dtheta - sintheta_plus * costheta_minus) ** 2\n I8 = (sqrt(2) * costheta_minus - sintheta_minus) ** 2\n I9 = (sqrt(2) * sin_dtheta + sintheta_plus * sintheta_minus) ** 2\n vList = [V1, V2, V3, V4, V5, V6, V7, V8, V9]\n IList = [I1, I2, I3, I4, I5, I6, I7, I8, I9]\n\n if normalize:\n _normalize(IList, 3)\n return list(zip(vList, IList))",
"def V_magJupiter_2(alpha,a_p,d):\n V = 5.*np.log10(a_p*d) - 9.428 - 2.5*np.log10(1.0 - 1.507*(alpha/180.) - 0.363*(alpha/180.)**2. - 0.062*(alpha/180.)**3.+ 2.809*(alpha/180.)**4. - 1.876*(alpha/180.)**5.)\n return V",
"def AB_zero_flux(self):\n return 10 ** (-0.4 * self.AB_zero_mag) * Unit('erg*s**-1*cm**-2*AA**-1')",
"def msqi_ama(x, fs):\n \n # test ecg shape\n try:\n x.shape[1]\n except IndexError:\n x = x[:, np.newaxis]\n \n # Empirical values for the STFFT transformation\n win_size_sec = 0.125 #seconds\n win_over_sec = 0.09375 #seconds\n nfft_factor_1 = 16\n nfft_factor_2 = 4\n\n win_size_smp = int(win_size_sec * fs) #samples\n win_over_smp = int(win_over_sec * fs) #samples\n win_shft_smp = win_size_smp - win_over_smp\n\n # Computes Modulation Spectrogram\n modulation_spectrogram = ama.strfft_modulation_spectrogram(x, fs, win_size_smp, \n win_shft_smp, nfft_factor_1, 'cosine', nfft_factor_2, 'cosine' )\n \n # Find fundamental frequency (HR)\n # f = (0, 40)Hz\n ix_f_00 = (np.abs(modulation_spectrogram['freq_axis'] - 0)).argmin(0) \n ix_f_40 = (np.abs(modulation_spectrogram['freq_axis'] - 40)).argmin(0) + 1\n \n # Look for the maximum only from 0.6 to 3 Hz (36 to 180 bpm)\n valid_f_ix = np.logical_or(modulation_spectrogram['freq_mod_axis'] < 0.66 , modulation_spectrogram['freq_mod_axis'] > 3)\n \n # number of epochs\n n_epochs = modulation_spectrogram['power_modulation_spectrogram'].shape[2]\n \n msqi_vals = np.zeros(n_epochs)\n hr_vals = np.zeros(n_epochs)\n \n for ix_epoch in range(n_epochs):\n B = np.sqrt(modulation_spectrogram['power_modulation_spectrogram'][:, :, ix_epoch])\n \n # Scale to maximun of B\n B = B / np.max(B)\n \n # Add B in the conventional frequency axis from 0 to 40 Hz\n tmp = np.sum(B[ix_f_00:ix_f_40, :], axis=0)\n \n # Look for the maximum only from 0.6 to 3 Hz (36 to 180 bpm)\n tmp[valid_f_ix] = 0\n ix_max = np.argmax(tmp) \n freq_funda = modulation_spectrogram['freq_mod_axis'][ix_max] \n \n # TME\n tme = np.sum(B)\n \n eme = 0\n for ix_harm in range(1, 5):\n ix_fm = (np.abs(modulation_spectrogram['freq_mod_axis'] - (ix_harm * freq_funda) )).argmin(0) \n ix_b = int(round(.3125 / modulation_spectrogram['freq_mod_delta'] )) # 0.3125Hz, half lobe\n # EME\n eme = eme + np.sum(B[ 0 : ix_f_40, ix_fm - ix_b : ix_fm + ix_b + 1 ]) \n \n # RME\n rme = tme - eme\n # MS-QI\n msqi_vals[ix_epoch] = eme / rme\n # HR\n hr_vals[ix_epoch] = freq_funda * 60\n \n return (msqi_vals, hr_vals, modulation_spectrogram)"
] | [
"0.7629827",
"0.5995928",
"0.58825415",
"0.5783412",
"0.56100976",
"0.5591324",
"0.5488376",
"0.54337126",
"0.540519",
"0.5386843",
"0.53587943",
"0.53322387",
"0.5304681",
"0.5287902",
"0.52460724",
"0.5208159",
"0.5202377",
"0.5188505",
"0.5181547",
"0.51752573",
"0.5162223",
"0.51559156",
"0.5138555",
"0.51376456",
"0.5124597",
"0.51199603",
"0.51008946",
"0.5082504",
"0.5040252",
"0.50402325"
] | 0.7608973 | 1 |
Selects the objects in the clusters as AGN subject to a color cut. Reads in the SExtractor catalogs and performs all necessary cuts to select the AGN in the cluster. First, a cut is made on the SExtractor flag requiring an extraction flag of `< 4`. A magnitude cut is applied on the faintend of the selection band. This is determined such that the completeness limit in the selection band is kept above 80%. A magnitude cut is applied to the brightend of both bands to remain under the saturation limit. The [3.6] [4.5] color cut is applied to select for red objects above which we define as IRbright AGN. An absolute magnitude cut is applied to select for only the intrinsically bright AGN in order to have a fair sample across our redshift range. Finally, the surviving objects' positions are checked against the good pixel map to ensure that the object lies on an acceptable location. | def object_selection(self, ch1_bright_mag, ch2_bright_mag, selection_band_faint_mag, selection_band='I2_MAG_APER4'):
clusters_to_remove = []
for cluster_id, cluster_info in self._catalog_dictionary.items():
# Read in the catalog
se_catalog = Table.read(cluster_info['se_cat_path'], format='ascii')
# Add the mask name to the catalog. Extracting only the system agnostic portion of the path
se_catalog['MASK_NAME'] = re.search(r'Data_Repository/.*?\Z', cluster_info['cov_mask_path']).group(0)
# Preform SExtractor Flag cut. A value of under 4 should indicate the object was extracted well.
se_catalog = se_catalog[se_catalog['FLAGS'] < 4]
# Preform a faint-end magnitude cut in selection band.
se_catalog = se_catalog[se_catalog[selection_band] <= selection_band_faint_mag]
# Preform bright-end cuts
# Limits from Eisenhardt+04 for ch1 = 10.0 and ch2 = 9.8
se_catalog = se_catalog[se_catalog['I1_MAG_APER4'] > ch1_bright_mag] # [3.6] saturation limit
se_catalog = se_catalog[se_catalog['I2_MAG_APER4'] > ch2_bright_mag] # [4.5] saturation limit
# For the mask cut we need to check the pixel value for each object's centroid.
# Read in the mask file
mask, header = fits.getdata(cluster_info['cov_mask_path'], header=True)
# Recast the mask image as a boolean array so we can use it as a check on the catalog entries
mask = mask.astype(bool)
# Read in the WCS from the mask
w = WCS(header)
# Get the objects pixel coordinates
xy_data = np.array(w.wcs_world2pix(se_catalog['ALPHA_J2000'], se_catalog['DELTA_J2000'], 0))
# Floor the values and cast as integers so we have the pixel indices into the mask
xy_pix_idxs = np.floor(xy_data).astype(int)
# Filter the catalog according to the boolean value in the mask at the objects' locations.
se_catalog = se_catalog[mask[xy_pix_idxs[1], xy_pix_idxs[0]]]
# If we have completely exhausted the cluster of any object, we should mark it for removal otherwise add it
# to the data structure
if se_catalog:
cluster_info['catalog'] = se_catalog
else:
clusters_to_remove.append(cluster_id)
# Remove any cluster that has no objects surviving our selection cuts
for cluster_id in clusters_to_remove:
self._catalog_dictionary.pop(cluster_id, None) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def purify_selection(self, ch1_ch2_color_cut):\n\n # Read in the number count distribution file\n with open(self._field_number_dist, 'r') as f:\n field_number_distribution = json.load(f)\n field_number_counts = field_number_distribution['normalized_number_counts']\n color_bins = field_number_distribution['color_bins']\n color_bin_min, color_bin_max = np.min(color_bins), np.max(color_bins)\n\n # Create an interpolation of our number count distribution\n color_probability_distribution = interp1d(color_bins, field_number_counts)\n\n clusters_to_remove = []\n for cluster_id, cluster_info in self._catalog_dictionary.items():\n # Get the photometric catalog for the cluster\n se_catalog = cluster_info['catalog']\n\n # Compute the color and color errors for each object\n I1_I2_color = se_catalog['I1_MAG_APER4'] - se_catalog['I2_MAG_APER4']\n I1_I2_color_err = np.sqrt((2.5 * se_catalog['I1_FLUXERR_APER4'] /\n (se_catalog['I1_FLUX_APER4'] * np.log(10))) ** 2 +\n (2.5 * se_catalog['I2_FLUXERR_APER4'] /\n (se_catalog['I2_FLUX_APER4'] * np.log(10))) ** 2)\n\n # Convolve the error distribution for each object with the overall number count distribution\n def object_integrand(x):\n return norm(loc=I1_I2_color, scale=I1_I2_color_err).pdf(x) * color_probability_distribution(x)\n\n # Compute the probability contained within the selection region by each object's color error\n # def degree_of_membership(color, color_err):\n # color_prob_in_numer = quad(\n # lambda x: norm(loc=color, scale=color_err).pdf(x) * color_probability_distribution(x),\n # a=ch1_ch2_color_cut, b=color_bin_max)[0]\n # color_prob_in_denom = quad(\n # lambda x: norm(loc=color, scale=color_err).pdf(x) * color_probability_distribution(x),\n # a=color_bin_min, b=color_bin_max, args=(color, color_err))[0]\n # return color_prob_in_numer / color_prob_in_denom\n #\n # with MultiPool() as pool:\n # color_prob_in = pool.map(degree_of_membership, zip(I1_I2_color, I1_I2_color_err))\n color_prob_in_numer = quad_vec(object_integrand, a=ch1_ch2_color_cut, b=color_bin_max)[0]\n color_prob_in_denom = quad_vec(object_integrand, a=color_bin_min, b=color_bin_max)[0]\n color_prob_in = color_prob_in_numer / color_prob_in_denom\n\n # Store the degree of membership into the catalog\n se_catalog['SELECTION_MEMBERSHIP'] = color_prob_in\n\n # As objects with degrees of membership of 0 do not contribute to the sample, we can safely remove them.\n se_catalog = se_catalog[se_catalog['SELECTION_MEMBERSHIP'] > 0]\n\n # def new_color_prob(x, color, color_err, denom_idx):\n # return -1. * norm(loc=color, scale=color_err).pdf(x) * color_probability_distribution(x) / \\\n # color_prob_in_denom[denom_idx]\n\n # # Maximize the probability distribution\n # new_color = [minimize_scalar(new_color_prob, args=(color, color_err, denom_idx),\n # bounds=(np.min(color_bins), np.max(color_bins)), method='bounded').x\n # for denom_idx, (color, color_err) in enumerate(zip(I1_I2_color, I1_I2_color_err))]\n #\n # # Store the new color in the catalog\n # se_catalog['CORRECTED_COLOR'] = new_color\n #\n # # Select only objects that have a (new) color redder than our threshold\n # se_catalog = se_catalog[se_catalog['CORRECTED_COLOR'] >= ch1_ch2_color_cut]\n\n # If we have exhausted all objects from the catalog mark the cluster for removal otherwise update the\n # photometric catalog in our database\n if se_catalog:\n cluster_info['catalog'] = se_catalog\n else:\n clusters_to_remove.append(cluster_id)\n\n # Remove any cluster that has no objects surviving our selection cuts\n for cluster_id in clusters_to_remove:\n self._catalog_dictionary.pop(cluster_id, None)",
"def run_selection(self, included_clusters, excluded_clusters, max_image_catalog_sep, ch1_min_cov, ch2_min_cov,\n ch1_bright_mag, ch2_bright_mag, selection_band_faint_mag, ch1_ch2_color, spt_colnames,\n output_name, output_colnames):\n self.file_pairing(include=included_clusters, exclude=excluded_clusters)\n self.image_to_catalog_match(max_image_catalog_sep=max_image_catalog_sep)\n self.coverage_mask(ch1_min_cov=ch1_min_cov, ch2_min_cov=ch2_min_cov)\n self.object_mask()\n # self.cluster_k_correction()\n self.object_selection(ch1_bright_mag=ch1_bright_mag, ch2_bright_mag=ch2_bright_mag,\n selection_band_faint_mag=selection_band_faint_mag)\n self.purify_selection(ch1_ch2_color_cut=ch1_ch2_color)\n self.j_band_abs_mag()\n self.catalog_merge(catalog_cols=spt_colnames)\n self.object_separations()\n self.completeness_value()\n final_catalog = self.final_catalogs(filename=output_name, catalog_cols=output_colnames)\n if final_catalog is not None:\n return final_catalog",
"def scan(self, cut_off, r0 = 4, dr = 2, rmax = 80, bg_global = 3419, sigma = 3.5 * 13.8):\r\n \r\n start_time = time.time()\r\n for trial in range(self.dimension):\r\n \r\n max_count, max_rank = self.pick_largest(cut_off = cut_off)\r\n fitted = 0 # galaxy not fitted to a circle\r\n point = [] # \r\n circle_number = 0\r\n \r\n if max_count == -1:\r\n print(\"Scan completed, number of galaxies found is \", len(self.galaxies), \"run time is\", time.time() - start_time)\r\n break\r\n \r\n if max_count >= 0: # That means a value that is larger than cut_off exists\r\n ypos,xpos = self.rank_yx(max_rank)\r\n # print(\"max_count, y, x\", max_count, ypos, xpos) \r\n \r\n \r\n for r in range(r0, rmax, dr): # r = radius, we know the largest radius can't be >80 by inspecting the pic.\r\n print(\"locating the galaxy position at\", ypos, xpos, \"at a radius r =\", r) \r\n if fitted == 1 or r == 80:\r\n # print(\"max_count, yx\",max_count, ypos, xpos, \"cut =\", no_cut, len(new_point)/2)\r\n \r\n self.mask_region(ypos, xpos, r - dr)\r\n # print(bg_local, no_bg)\r\n if r - dr > r0: # it must be a galaxy\r\n \r\n # getting the local bg\r\n if no_bg > 3: # check if enough data to deduce bg\r\n bg = bg_local / no_bg \r\n else:\r\n bg = bg_global\r\n \r\n if circle_number >= np.pi* 2* (r - dr)**2:\r\n self.galaxies.append(galaxy(ypos, xpos, r - dr, np.array(point).sum(), bg, circle_number))\r\n\r\n else:\r\n if no_cut < circle_number/2 and circle_number*2 >= np.pi * (r0**2):\r\n self.galaxies.append(galaxy(ypos, xpos, r - dr, np.array(point).sum(), bg, circle_number))\r\n\r\n print(\"\\ngalaxy scan completed,radius =\", r - dr, \"position =\", ypos, xpos, \"max count =\", max_count, \"time = \", time.time() - start_time)\r\n\r\n break\r\n \r\n ##### Resetting parameters ####\r\n no_bg = 0 # number 0f background pixels\r\n bg_local = 0 # sum of local background noises\r\n no_cut = 0 # number of counts below cut off\r\n new_point = [] # pending pixels to be added\r\n ###############################\r\n \r\n if fitted == 0:\r\n \r\n if r == r0:\r\n \r\n for j, i in product(np.arange(ypos - r, ypos + r + 1), np.arange(xpos - r, xpos + 1 + r)): # Create square\r\n \r\n # Check if it is within the circle radius = 2\r\n if int((i - xpos) ** 2 + (j - ypos) ** 2) <= r ** 2 and 0<= j <= (self.shapes[0] - 1) and 0<= i <= self.shapes[1] - 1:\r\n \r\n i,j =[int(i), int(j)]\r\n if self.raw_image_data[j,i] == self.raw_image_data[j,i] *self.masked[j,i]: # Append the ppoint if not masked (masked = 1)\r\n\r\n if self.raw_image_data[j,i] <= cut_off:\r\n no_cut += 1\r\n\r\n if self.raw_image_data[j,i] > cut_off:\r\n point.append(self.raw_image_data[j, i])\r\n circle_number += 1\r\n\r\n if abs(self.raw_image_data[j,i] - bg_global) <= sigma:\r\n bg_local += self.raw_image_data[j,i]\r\n no_bg += 1\r\n \r\n if no_cut >= len(point)/2 or 2*circle_number < np.pi * r0**2:\r\n fitted = 1\r\n else:\r\n pass\r\n \r\n#######################################################################################################\r\n if r > r0:\r\n for j, i in product(np.arange(ypos - r, ypos + r + 1), np.arange(xpos - r, xpos + 1 + r)): \r\n \r\n # Check if data are in between the previous and the new circle\r\n if (r - dr)**2 < int((i - xpos) ** 2 + (j - ypos) ** 2) <= r ** 2 and 0<= j <= (self.shapes[0] - 1) and 0<= i <= self.shapes[1] - 1:\r\n i,j =[int(i), int(j)] # just incase \r\n \r\n if self.raw_image_data[j,i] * self.masked[j,i] == self.raw_image_data[j,i]:\r\n\r\n if self.raw_image_data[j,i] <= cut_off:\r\n no_cut += 1\r\n if self.raw_image_data[j,i] > cut_off:\r\n new_point.append(self.raw_image_data[j, i]) # points are pending to be added in\r\n circle_number += 1\r\n\r\n if abs(self.raw_image_data[j,i] - bg_global) <= sigma:\r\n bg_local += self.raw_image_data[j,i]\r\n no_bg += 1\r\n \r\n # Check if half of the new data points are inside cut off region\r\n if no_cut <= int(len(new_point)/2) or circle_number*2 < np.pi * r**2:\r\n for rannk in range(len(new_point)):\r\n point.append(new_point[rannk])\r\n\r\n else:\r\n fitted = 1",
"def cluster_select(arrayName, x0, y0, type_stack, w, cc_stack, ncor, Tmin, \\\n Tmax, RMSmin, RMSmax, xmin, xmax, ymin, ymax, typecluster, nc, \\\n palette, amp, n1, n2, draw_scatter=True, draw_hist=True, \\\n envelope=True, draw_cc=True, draw_ac=True, draw_colored_cc=True, \\\n draw_colored_ac=True):\n # Read file containing data from stack_ccorr_tremor\n filename = 'cc/{}/{}_{:03d}_{:03d}/{}_{:03d}_{:03d}_{}.pkl'.format( \\\n arrayName, arrayName, int(x0), int(y0), arrayName, int(x0), int(y0), \\\n type_stack)\n data = pickle.load(open(filename, 'rb'))\n EW_UD = data[6]\n NS_UD = data[7]\n # Read file containing data from stack_acorr_tremor\n# filename = 'ac/{}/{}_{:03d}_{:03d}/{}_{:03d}_{:03d}_{}.pkl'.format( \\\n# arrayName, arrayName, int(x0), int(y0), arrayName, int(x0), int(y0), \\\n# type_stack)\n# data = pickle.load(open(filename, 'rb'))\n# EW = data[6]\n# NS = data[7]\n# UD = data[8]\n # Stack over all tremor windows\n if (cc_stack == 'lin'):\n EW_UD_stack = linstack([EW_UD], normalize=False)[0]\n NS_UD_stack = linstack([NS_UD], normalize=False)[0]\n# EW_stack = linstack([EW], normalize=False)[0]\n# NS_stack = linstack([NS], normalize=False)[0]\n# UD_stack = linstack([UD], normalize=False)[0]\n elif (cc_stack == 'pow'):\n EW_UD_stack = powstack([EW_UD], w, normalize=False)[0]\n NS_UD_stack = powstack([NS_UD], w, normalize=False)[0]\n# EW_stack = powstack([EW], w, normalize=False)[0]\n# NS_stack = powstack([NS], w, normalize=False)[0]\n# UD_stack = powstack([UD], w, normalize=False)[0]\n elif (cc_stack == 'PWS'):\n EW_UD_stack = PWstack([EW_UD], w, normalize=False)[0]\n NS_UD_stack = PWstack([NS_UD], w, normalize=False)[0]\n# EW_stack = PWstack([EW], w, normalize=False)[0]\n# NS_stack = PWstack([NS], w, normalize=False)[0]\n# UD_stack = PWstack([UD], w, normalize=False)[0]\n else:\n raise ValueError( \\\n 'Type of stack must be lin, pow, or PWS')\n # Initialize indicators of cross correlation fit\n nt = len(EW_UD)\n ccmaxEW = np.zeros(nt)\n cc0EW = np.zeros(nt)\n timedelayEW = np.zeros(nt)\n rmsEW = np.zeros(nt)\n ccmaxNS = np.zeros(nt)\n cc0NS = np.zeros(nt)\n timedelayNS = np.zeros(nt)\n rmsNS = np.zeros(nt)\n # Windows of the cross correlation to look at\n i0 = int((len(EW_UD_stack) - 1) / 2)\n ibegin = i0 + int(Tmin / EW_UD_stack.stats.delta)\n iend = i0 + int(Tmax / EW_UD_stack.stats.delta) + 1\n rmsb = i0 + int(RMSmin / EW_UD_stack.stats.delta)\n rmse = i0 + int(RMSmax / EW_UD_stack.stats.delta) + 1\n # Time function\n dt = EW_UD_stack.stats.delta\n imax = int((EW_UD_stack.stats.npts - 1) / 2)\n t = dt * np.arange(- imax, imax + 1)\n for i in range(0, nt):\n rmsEW[i] = np.max(np.abs(EW_UD[i][ibegin:iend])) / \\\n np.sqrt(np.mean(np.square(EW_UD[i][rmsb:rmse])))\n rmsNS[i] = np.max(np.abs(NS_UD[i][ibegin:iend])) / \\\n np.sqrt(np.mean(np.square(NS_UD[i][rmsb:rmse])))\n # Cross correlate cc for EW with stack \n cc_EW = correlate(EW_UD[i][ibegin : iend], \\\n EW_UD_stack[ibegin : iend], ncor)\n ccmaxEW[i] = np.max(cc_EW)\n cc0EW[i] = cc_EW[ncor]\n timedelayEW[i] = (np.argmax(cc_EW) - ncor) * EW_UD_stack.stats.delta\n # Cross correlate cc for NS with stack\n cc_NS = correlate(NS_UD[i][ibegin : iend], \\\n NS_UD_stack[ibegin : iend], ncor)\n ccmaxNS[i] = np.max(cc_NS)\n cc0NS[i] = cc_NS[ncor]\n timedelayNS[i] = (np.argmax(cc_NS) - ncor) * NS_UD_stack.stats.delta\n # Clustering\n df = pd.DataFrame({'ccmaxEW' : ccmaxEW, 'ccmaxNS' : ccmaxNS, \\\n 'cc0EW' : cc0EW, 'cc0NS' : cc0NS, 'timedelayEW' : timedelayEW, \\\n 'timedelayNS' : timedelayNS, 'rmsEW' : rmsEW, 'rmsNS' : rmsNS})\n df = preprocessing.scale(df)\n df = pd.DataFrame(df)\n df.columns = ['ccmaxEW', 'ccmaxNS', 'cc0EW', 'cc0NS', 'timedelayEW', \\\n 'timedelayNS', 'rmsEW', 'rmsNS']\n if (typecluster == 'kmeans'):\n clusters = KMeans(n_clusters=nc, random_state=0).fit_predict(df)\n elif (typecluster == 'agglo'):\n clustering = AgglomerativeClustering(n_clusters=nc).fit(df)\n clusters = clustering.labels_\n else:\n raise ValueError( \\\n 'Type of clustering must be kmeans or agglo')\n # Scatter plot\n if (draw_scatter == True):\n colors = [palette[c] for c in clusters]\n pd.plotting.scatter_matrix(df, c=colors, figsize=(20, 20))\n plt.tight_layout()\n plt.savefig( \\\n 'cc/{}/{}_{:03d}_{:03d}/{}_{:03d}_{:03d}_{}_{}_cluster_scatter.eps'. \\\n format(arrayName, arrayName, int(x0), int(y0), arrayName, int(x0), \\\n int(y0), type_stack, cc_stack), format='eps')\n plt.close()\n # Compute time lags\n timelagEW = np.zeros(nt)\n timelagNS = np.zeros(nt)\n for i in range(0, nt):\n # Time lags\n EWenvelope = obspy.signal.filter.envelope(EW_UD[i].data)\n i0 = np.argmax(EWenvelope[ibegin:iend])\n timelagEW[i] = t[ibegin:iend][i0]\n NSenvelope = obspy.signal.filter.envelope(NS_UD[i].data)\n i0 = np.argmax(NSenvelope[ibegin:iend])\n timelagNS[i] = t[ibegin:iend][i0]\n # Compute width of timelags distribution\n timelags = pd.DataFrame({'timelagEW' : timelagEW, 'timelagNS' : timelagNS})\n width_clust_EW = []\n width_clust_NS = []\n timelag_clust_EW = []\n timelag_clust_NS = []\n for j in range(0, nc):\n times = timelags['timelagEW'].iloc[clusters == j]\n width_clust_EW.append(np.std(times))\n timelag_clust_EW.append(times)\n times = timelags['timelagNS'].iloc[clusters == j]\n width_clust_NS.append(np.std(times))\n timelag_clust_NS.append(times)\n # Save timelags into file\n filename = 'cc/{}/{}_{:03d}_{:03d}/'.format(arrayName, arrayName, \\\n int(x0), int(y0)) + '{}_{:03d}_{:03d}_{}_{}_cluster_timelags.pkl'. \\\n format(arrayName, int(x0), int(y0), type_stack, cc_stack)\n pickle.dump([timelag_clust_EW, timelag_clust_NS], open(filename, 'wb'))\n # Plot histogram of timelags\n if (draw_hist == True):\n params = {'legend.fontsize': 24, \\\n 'xtick.labelsize': 24, \\\n 'ytick.labelsize': 24}\n pylab.rcParams.update(params)\n plt.figure(1, figsize=(10 * nc, 16))\n tlag_min = min(np.min(timelags['timelagEW']), np.min(timelags['timelagNS']))\n tlag_max = max(np.max(timelags['timelagEW']), np.max(timelags['timelagNS']))\n # EW / Vertical\n for j in range(0, nc):\n plt.subplot2grid((2, nc), (0, j))\n times = timelags['timelagEW'].iloc[clusters == j]\n m = np.mean(times)\n s = np.std(times)\n plt.hist(times, range=(tlag_min, tlag_max))\n plt.axvline(m + s, color='grey', linestyle='--')\n plt.axvline(m - s, color='grey', linestyle='--')\n plt.title('EW / UD - Cluster {:d} ({:d} tremor windows)'.format(j, \\\n len(times)), fontsize=24)\n plt.xlabel('Time lag (s)', fontsize=24)\n # NS / Vertical\n for j in range(0, nc):\n plt.subplot2grid((2, nc), (1, j))\n times = timelags['timelagNS'].iloc[clusters == j]\n m = np.mean(times)\n s = np.std(times)\n plt.hist(times, range=(tlag_min, tlag_max))\n plt.title('NS / UD - Cluster {:d} ({:d} tremor windows)'.format(j, \\\n len(times)), fontsize=24)\n plt.axvline(m + s, color='grey', linestyle='--')\n plt.axvline(m - s, color='grey', linestyle='--')\n plt.xlabel('Time lag (s)', fontsize=24)\n # End figure\n plt.tight_layout()\n plt.savefig( \\\n 'cc/{}/{}_{:03d}_{:03d}/{}_{:03d}_{:03d}_{}_{}_cluster_timelags.eps'. \\\n format(arrayName, arrayName, int(x0), int(y0), arrayName, int(x0), \\\n int(y0), type_stack, cc_stack), format='eps')\n plt.close(1)\n # Plot stacked cross correlation\n if (draw_cc == True):\n params = {'legend.fontsize': 24, \\\n 'xtick.labelsize': 24, \\\n 'ytick.labelsize': 24}\n pylab.rcParams.update(params)\n plt.figure(2, figsize=(10 * nc, 16))\n # Time function\n npts = int((EW_UD_stack.stats.npts - 1) / 2)\n dt = EW_UD_stack.stats.delta\n t = dt * np.arange(- npts, npts + 1)\n # EW / Vertical\n cc_clust_EW = []\n t_clust_EW = []\n ratio_clust_EW = []\n EW_UD_stacks = Stream()\n EW_ntremor = []\n for j in range(0, nc):\n # Stack over selected tremor windows\n EWselect = Stream()\n for i in range(0, nt):\n if (clusters[i] == j):\n if envelope == True:\n EW_UD[i].data = obspy.signal.filter.envelope(EW_UD[i].data)\n EWselect.append(EW_UD[i])\n EW_ntremor.append(len(EWselect))\n if (cc_stack == 'lin'):\n EWselect_stack = linstack([EWselect], normalize=False)[0]\n elif (cc_stack == 'pow'):\n EWselect_stack = powstack([EWselect], w, normalize=False)[0]\n elif (cc_stack == 'PWS'):\n EWselect_stack = PWstack([EWselect], w, normalize=False)[0]\n else:\n raise ValueError( \\\n 'Type of stack must be lin, pow, or PWS')\n # Max cc and ratio with RMS\n cc_clust_EW.append(np.max(np.abs(EWselect_stack.data[ibegin:iend])))\n i0 = np.argmax(np.abs(EWselect_stack.data[ibegin:iend]))\n t_clust_EW.append(t[ibegin:iend][i0])\n RMS = np.sqrt(np.mean(np.square(EWselect_stack.data[rmsb:rmse])))\n ratio_clust_EW.append(np.max(np.abs(EWselect_stack.data[ibegin:iend])) / RMS)\n # Plot\n if (draw_cc == True):\n plt.subplot2grid((2, nc), (0, j))\n plt.axvline(Tmin, color='grey', linestyle='--')\n plt.axvline(Tmax, color='grey', linestyle='--')\n plt.plot(t, EW_UD_stack.data, 'k-', label='All')\n plt.plot(t, EWselect_stack.data, color=palette[j], \\\n label='Cluster {:d}'.format(j))\n plt.xlim(xmin, xmax)\n plt.ylim(ymin, ymax)\n plt.title('EW / UD - Cluster {:d} ({:d} tremor windows)'.format(j, \\\n len(EWselect)), fontsize=24)\n plt.xlabel('Lag time (s)', fontsize=24)\n plt.legend(loc=1)\n # Save into stream\n EW_UD_stacks.append(EWselect_stack)\n # Get the best stack\n i0_EW = cc_clust_EW.index(max(cc_clust_EW))\n t_EW = t_clust_EW[i0_EW]\n cc_EW = max(cc_clust_EW)\n ratio_EW = ratio_clust_EW[i0_EW]\n width_EW = width_clust_EW[i0_EW]\n stack_EW = EW_UD_stacks[i0_EW]\n ntremor = EW_ntremor[i0_EW]\n # NS / Vertical\n cc_clust_NS = []\n t_clust_NS = []\n ratio_clust_NS = []\n NS_UD_stacks = Stream()\n NS_ntremor = []\n for j in range(0, nc):\n # Stack over selected tremor windows\n NSselect = Stream()\n for i in range(0, nt):\n if (clusters[i] == j):\n if envelope == True:\n NS_UD[i].data = obspy.signal.filter.envelope(NS_UD[i].data)\n NSselect.append(NS_UD[i])\n NS_ntremor.append(len(NSselect))\n if (cc_stack == 'lin'):\n NSselect_stack = linstack([NSselect], normalize=False)[0]\n elif (cc_stack == 'pow'):\n NSselect_stack = powstack([NSselect], w, normalize=False)[0]\n elif (cc_stack == 'PWS'):\n NSselect_stack = PWstack([NSselect], w, normalize=False)[0]\n else:\n raise ValueError( \\\n 'Type of stack must be lin, pow, or PWS')\n # Max cc and ratio with RMS\n cc_clust_NS.append(np.max(np.abs(NSselect_stack[ibegin:iend])))\n i0 = np.argmax(np.abs(NSselect_stack[ibegin:iend]))\n t_clust_NS.append(t[ibegin:iend][i0])\n RMS = np.sqrt(np.mean(np.square(NSselect_stack[rmsb:rmse])))\n ratio_clust_NS.append(np.max(np.abs(NSselect_stack[ibegin:iend])) \\\n / RMS) \n # Plot\n if (draw_cc == True):\n plt.subplot2grid((2, nc), (1, j))\n plt.axvline(Tmin, color='grey', linestyle='--')\n plt.axvline(Tmax, color='grey', linestyle='--')\n plt.plot(t, NS_UD_stack.data, 'k-', label='All')\n plt.plot(t, NSselect_stack.data, color=palette[j], \\\n label='Cluster {:d}'.format(j, ))\n plt.xlim(xmin, xmax)\n plt.ylim(ymin, ymax)\n plt.title('NS / UD - Cluster {:d} ({:d} tremor windows)'.format(j, \\\n len(NSselect)), fontsize=24)\n plt.xlabel('Lag time (s)', fontsize=24)\n plt.legend(loc=1)\n # Save into stream\n NS_UD_stacks.append(NSselect_stack)\n # Get the best stack\n i0_NS = cc_clust_NS.index(max(cc_clust_NS))\n t_NS = t_clust_NS[i0_NS]\n cc_NS = max(cc_clust_NS)\n ratio_NS = ratio_clust_NS[i0_NS]\n width_NS = width_clust_NS[i0_NS]\n stack_NS = NS_UD_stacks[i0_NS]\n ntremor = NS_ntremor[i0_NS]\n # End figure\n if (draw_cc == True):\n plt.tight_layout()\n plt.savefig( \\\n 'cc/{}/{}_{:03d}_{:03d}/{}_{:03d}_{:03d}_{}_{}_cluster_stackcc.eps'. \\\n format(arrayName, arrayName, int(x0), int(y0), arrayName, int(x0), \\\n int(y0), type_stack, cc_stack), format='eps')\n plt.close(2)\n # Save clusters into file\n filename = 'cc/{}/{}_{:03d}_{:03d}/'.format(arrayName, arrayName, \\\n int(x0), int(y0)) + '{}_{:03d}_{:03d}_{}_{}_clusters.pkl'. \\\n format(arrayName, int(x0), int(y0), type_stack, cc_stack)\n pickle.dump([data[0], data[1], data[2], data[3], data[4], data[5], \\\n clusters, i0_EW, i0_NS], open(filename, 'wb'))\n # Save best stacks into file\n filename = 'cc/{}/{}_{:03d}_{:03d}/'.format(arrayName, arrayName, \\\n int(x0), int(y0)) + '{}_{:03d}_{:03d}_{}_{}_cluster_stacks.pkl'. \\\n format(arrayName, int(x0), int(y0), type_stack, cc_stack)\n pickle.dump([stack_EW, stack_NS], open(filename, 'wb'))\n # Plot stacked autocorrelation\n if (draw_ac == True):\n plt.figure(3, figsize=(10 * nc, 24))\n params = {'legend.fontsize': 24, \\\n 'xtick.labelsize': 24, \\\n 'ytick.labelsize': 24}\n pylab.rcParams.update(params)\n npts = int((EW_stack.stats.npts - 1) / 2)\n dt = EW_stack.stats.delta\n t = dt * np.arange(- npts, npts + 1)\n # EW\n for j in range(0, nc):\n plt.subplot2grid((3, nc), (0, j))\n plt.plot(t, EW_stack.data, 'k-', label='All')\n EWselect = Stream()\n for i in range(0, nt):\n if (clusters[i] == j):\n EWselect.append(EW[i])\n # Stack over selected tremor windows\n if (cc_stack == 'lin'):\n EWselect_stack = linstack([EWselect], normalize=False)[0]\n elif (cc_stack == 'pow'):\n EWselect_stack = powstack([EWselect], w, normalize=False)[0]\n elif (cc_stack == 'PWS'):\n EWselect_stack = PWstack([EWselect], w, normalize=False)[0]\n else:\n raise ValueError( \\\n 'Type of stack must be lin, pow, or PWS')\n plt.plot(t, EWselect_stack.data, color=palette[j], \\\n label='Cluster {:d}'.format(j))\n plt.xlim(0, xmax)\n plt.ylim(- ymax, ymax)\n plt.title('EW - Cluster {:d} ({:d} tremor windows)'.format(j, \\\n len(EWselect)), fontsize=24)\n plt.xlabel('Lag time (s)', fontsize=24)\n plt.legend(loc=1)\n # NS\n for j in range(0, nc):\n plt.subplot2grid((3, nc), (1, j))\n plt.plot(t, NS_stack.data, 'k-', label='All')\n NSselect = Stream()\n for i in range(0, nt):\n if (clusters[i] == j):\n NSselect.append(NS[i])\n # Stack over selected tremor windows\n if (cc_stack == 'lin'):\n NSselect_stack = linstack([NSselect], normalize=False)[0]\n elif (cc_stack == 'pow'):\n NSselect_stack = powstack([NSselect], w, normalize=False)[0]\n elif (cc_stack == 'PWS'):\n NSselect_stack = PWstack([NSselect], w, normalize=False)[0]\n else:\n raise ValueError( \\\n 'Type of stack must be lin, pow, or PWS')\n plt.plot(t, NSselect_stack.data, color=palette[j], \\\n label='Cluster {:d}'.format(j))\n plt.xlim(0, xmax)\n plt.ylim(- ymax, ymax)\n plt.title('NS - Cluster {:d} ({:d} tremor windows)'.format(j, \\\n len(NSselect)), fontsize=24)\n plt.xlabel('Lag time (s)', fontsize=24)\n plt.legend(loc=1)\n # UD\n for j in range(0, nc):\n plt.subplot2grid((3, nc), (2, j))\n plt.plot(t, UD_stack.data, 'k-', label='All')\n UDselect = Stream()\n for i in range(0, nt):\n if (clusters[i] == j):\n UDselect.append(UD[i])\n # Stack over selected tremor windows\n if (cc_stack == 'lin'):\n UDselect_stack = linstack([UDselect], normalize=False)[0]\n elif (cc_stack == 'pow'):\n UDselect_stack = powstack([UDselect], w, normalize=False)[0]\n elif (cc_stack == 'PWS'):\n UDselect_stack = PWstack([UDselect], w, normalize=False)[0]\n else:\n raise ValueError( \\\n 'Type of stack must be lin, pow, or PWS')\n plt.plot(t, UDselect_stack.data, color=palette[j], \\\n label='Cluster {:d}'.format(j))\n plt.xlim(0, xmax)\n plt.ylim(- ymax, ymax)\n plt.title('UD - Cluster {:d} ({:d} tremor windows)'.format(j, \\\n len(UDselect)), fontsize=24)\n plt.xlabel('Lag time (s)', fontsize=24)\n plt.legend(loc=1)\n # End figure\n plt.tight_layout()\n plt.savefig( \\\n 'ac/{}/{}_{:03d}_{:03d}/{}_{:03d}_{:03d}_{}_{}_cluster_stackac.eps'. \\\n format(arrayName, arrayName, int(x0), int(y0), arrayName, int(x0), \\\n int(y0), type_stack, cc_stack), format='eps')\n plt.close(3)\n # Plot colored cross correlation windows\n if (draw_colored_cc == True):\n params = {'legend.fontsize': 24, \\\n 'xtick.labelsize': 24, \\\n 'ytick.labelsize': 24}\n pylab.rcParams.update(params)\n plt.figure(4, figsize=(20, 16))\n # EW - UD cross correlation\n ax1 = plt.subplot(121)\n index = 0\n for j in range(0, nc):\n for i in range(n1, n2):\n if (clusters[i] == j):\n dt = EW_UD[i].stats.delta\n ncor = int((EW_UD[i].stats.npts - 1) / 2)\n t = dt * np.arange(- ncor, ncor + 1)\n plt.plot(t, (2.0 * index + 1) + amp * EW_UD[i].data, \\\n color=palette[j])\n index = index + 1\n plt.xlim(xmin, xmax)\n plt.ylim(0.0, 2.0 * index)\n plt.title('East / Vertical component', fontsize=24)\n plt.xlabel('Lag time (s)', fontsize=24)\n plt.ylabel('Cross correlation', fontsize=24)\n ax1.set_yticklabels([])\n ax1.tick_params(labelsize=20)\n # NS - UD cross correlation\n ax2 = plt.subplot(122)\n index = 0\n for j in range(0, nc):\n for i in range(n1, n2):\n if (clusters[i] == j):\n dt = NS_UD[i].stats.delta\n ncor = int((NS_UD[i].stats.npts - 1) / 2)\n t = dt * np.arange(- ncor, ncor + 1)\n plt.plot(t, (2.0 * index + 1) + amp * NS_UD[i].data, \\\n color=palette[j])\n index = index + 1\n plt.xlim(xmin, xmax)\n plt.ylim(0.0, 2.0 * index)\n plt.title('North / Vertical component', fontsize=24)\n plt.xlabel('Lag time (s)', fontsize=24)\n plt.ylabel('Cross correlation', fontsize=24)\n ax2.set_yticklabels([])\n ax2.tick_params(labelsize=20)\n # End figure\n plt.tight_layout()\n plt.savefig( \\\n 'cc/{}/{}_{:03d}_{:03d}/{}_{:03d}_{:03d}_{}_{}_cluster_ccwin.eps'. \\\n format(arrayName, arrayName, int(x0), int(y0), arrayName, int(x0), \\\n int(y0), type_stack, cc_stack), format='eps')\n ax1.clear()\n ax2.clear()\n plt.close(4)\n # Plot colored autocorrelation windows\n if (draw_colored_ac == True):\n plt.figure(5, figsize=(20, 24))\n params = {'legend.fontsize': 24, \\\n 'xtick.labelsize': 24, \\\n 'ytick.labelsize': 24}\n pylab.rcParams.update(params)\n # EW autocorrelation\n ax1 = plt.subplot(131)\n for i in range(n1, n2):\n dt = EW[i].stats.delta\n ncor = int((EW[i].stats.npts - 1) / 2)\n t = dt * np.arange(- ncor, ncor + 1)\n plt.plot(t, (2.0 * i + 1) - 2 * n1 + amp * EW[i].data, color=colors[i])\n plt.xlim(0, xmax)\n plt.ylim(0.0, 2.0 * (n2 - n1))\n plt.title('East component', fontsize=24)\n plt.xlabel('Lag time (s)', fontsize=24)\n plt.ylabel('Autocorrelation', fontsize=24)\n ax1.set_yticklabels([])\n ax1.tick_params(labelsize=20)\n # NS autocorrelation\n ax2 = plt.subplot(132)\n for i in range(n1, n2):\n dt = NS[i].stats.delta\n ncor = int((NS[i].stats.npts - 1) / 2)\n t = dt * np.arange(- ncor, ncor + 1)\n plt.plot(t, (2.0 * i + 1) - 2 * n1 + amp * NS[i].data, color=colors[i])\n plt.xlim(0, xmax)\n plt.ylim(0.0, 2.0 * (n2 - n1))\n plt.title('North component', fontsize=24)\n plt.xlabel('Lag time (s)', fontsize=24)\n plt.ylabel('Autocorrelation', fontsize=24)\n ax2.set_yticklabels([])\n ax2.tick_params(labelsize=20)\n # UD autocorrelation\n ax3 = plt.subplot(133)\n for i in range(n1, n2):\n dt = UD[i].stats.delta\n ncor = int((UD[i].stats.npts - 1) / 2)\n t = dt * np.arange(- ncor, ncor + 1)\n plt.plot(t, (2.0 * i + 1) - 2 * n1 + amp * UD[i].data, color=colors[i])\n plt.xlim(0, xmax)\n plt.ylim(0.0, 2.0 * (n2 - n1))\n plt.title('Vertical component', fontsize=24)\n plt.xlabel('Lag time (s)', fontsize=24)\n plt.ylabel('Autocorrelation', fontsize=24)\n ax3.set_yticklabels([])\n ax3.tick_params(labelsize=20)\n # End figure and plot\n plt.tight_layout()\n plt.savefig( \\\n 'ac/{}/{}_{:03d}_{:03d}/{}_{:03d}_{:03d}_{}_{}_cluster_acwin.eps'. \\\n format(arrayName, arrayName, int(x0), int(y0), arrayName, int(x0), \\\n int(y0), type_stack, cc_stack), format='eps')\n ax1.clear()\n ax2.clear()\n ax3.clear()\n plt.close(5)\n return (clusters, t_EW, t_NS, cc_EW, cc_NS, ratio_EW, ratio_NS, \\\n width_EW, width_NS, ntremor)",
"def localize_red_clump(star_catalog,close_cat_idx,log):\n\n def select_within_range(mags, colours, mag_min, mag_max, col_min, col_max):\n \"\"\"Function to identify the set of array indices with values\n between the range indicated\"\"\"\n\n idx1 = np.where(colours >= col_min)[0]\n idx2 = np.where(colours <= col_max)[0]\n idx3 = np.where(mags >= mag_min)[0]\n idx4 = np.where(mags <= mag_max)[0]\n idx = set(idx1).intersection(set(idx2))\n idx = idx.intersection(set(idx3))\n idx = list(idx.intersection(set(idx4)))\n\n return idx\n\n RC = photometry_classes.Star()\n\n inst_i = star_catalog['cal_ref_mag_ip'][close_cat_idx]\n inst_r = star_catalog['cal_ref_mag_rp'][close_cat_idx]\n inst_g = star_catalog['cal_ref_mag_gp'][close_cat_idx]\n cal_i = star_catalog['imag'][close_cat_idx]\n cal_r = star_catalog['rmag'][close_cat_idx]\n cal_g = star_catalog['gmag'][close_cat_idx]\n inst_ri = inst_r - inst_i # Catalogue column order is red -> blue\n inst_gi = inst_g - inst_i\n inst_gr = inst_g - inst_r\n cal_ri = cal_r - cal_i\n cal_gi = cal_g - cal_i\n cal_gr = cal_g - cal_r\n\n log.info('\\n')\n log.info('Localizing the Red Clump')\n log.info('Median (r-i), i: '+str(np.median(inst_ri))+', '+str(np.median(inst_i)))\n log.info('Median (g-i), i: '+str(np.median(inst_gi))+', '+str(np.median(inst_i)))\n log.info('Median (g-r), g: '+str(np.median(inst_gr))+', '+str(np.median(inst_g)))\n\n ri_min = 0.8\n ri_max = 1.2\n i_min = 15.5\n i_max = 16.5\n\n r_min = 16.2\n r_max = 17.5\n\n gi_min = 2.5\n gi_max = 3.5\n\n gr_min = 1.5\n gr_max = 2.2\n g_min = 17.8\n g_max = 19.5\n\n log.info('Selected Red Clump giants between:')\n log.info('i = '+str(i_min)+' to '+str(i_max))\n log.info('r = '+str(r_min)+' to '+str(r_max))\n log.info('(r-i) = '+str(ri_min)+' to '+str(ri_max))\n log.info('g = '+str(g_min)+' to '+str(g_max))\n log.info('(g-r) = '+str(gr_min)+' to '+str(gr_max))\n log.info('(g-i) = '+str(gi_min)+' to '+str(gi_max))\n\n idx = select_within_range(inst_i, inst_ri, i_min, i_max, ri_min, ri_max)\n\n (RC.ri, RC.sig_ri, RC.i, RC.sig_i) = calc_distribution_centroid_and_spread_2d(inst_ri[idx], inst_i[idx], use_iqr=True)\n\n idx = select_within_range(inst_r, inst_ri, r_min, r_max, ri_min, ri_max)\n\n (RC.r, RC.sig_r) = calc_distribution_centre_and_spread(inst_r[idx], use_iqr=True)\n\n idx = select_within_range(inst_g, inst_gr, g_min, g_max, gr_min, gr_max)\n\n (RC.gr, RC.sig_gr, RC.g, RC.sig_g) = calc_distribution_centroid_and_spread_2d(inst_gr[idx], inst_g[idx], use_iqr=True)\n\n idx = select_within_range(inst_g, inst_gi, g_min, g_max, gi_min, gi_max)\n\n (RC.gi, RC.sig_gi, RC.g, RC.sig_g) = calc_distribution_centroid_and_spread_2d(inst_gi[idx], inst_g[idx], use_iqr=True)\n\n log.info('\\n')\n log.info('Centroid of Red Clump Stars at:')\n log.info(RC.summary(show_mags=True))\n log.info(RC.summary(show_mags=False,show_colours=True))\n\n RC.transform_to_JohnsonCousins()\n\n log.info(RC.summary(show_mags=False,johnsons=True))\n\n return RC",
"def SelectClusters(image, background_prediction, result_clustering,\n n_clusters, bands_thresholds=[\"B2\", \"B3\", \"B4\"],\n region_of_interest=None,\n tileScale=PARAMS_CLOUDCLUSTERSCORE_DEFAULT['tileScale']): \n bands_norm_difference = [b + \"_difference\" for b in bands_thresholds]\n\n img_joined = image.subtract(background_prediction)\\\n .select(bands_thresholds, bands_norm_difference)\\\n .addBands(image.select(bands_thresholds))\n\n bands_and_difference_bands = bands_thresholds + bands_norm_difference\n\n multitemporal_score = None\n reflectance_score = None\n\n for i in range(n_clusters):\n img_diff_clus = img_joined.updateMask(\n result_clustering.eq(i)).select(bands_and_difference_bands)\n\n clusteri = img_diff_clus.reduceRegion(ee.Reducer.mean(),\n geometry=region_of_interest,\n bestEffort=True,\n scale=30,\n tileScale=tileScale\n )\n \n clusteri_diff = clusteri.toArray(bands_norm_difference)\n clusteri_refl = clusteri.toArray(bands_thresholds)\n \n clusteri_refl_norm = clusteri_refl.multiply(clusteri_refl).reduce(ee.Reducer.mean(),\n axes=[0]).sqrt().get([0])\n\n clusteridiff_mean = clusteri_diff.reduce(ee.Reducer.mean(), axes=[0]).get([0])\n clusteridiff_norm = clusteri_diff.multiply(clusteri_diff).reduce(ee.Reducer.mean(),\n axes=[0]).sqrt().get([0])\n\n multitemporal_score_clusteri = ee.Algorithms.If(clusteridiff_mean.gt(0),\n clusteridiff_norm,\n clusteridiff_norm.multiply(-1))\n\n multitemporal_score_clusteri = result_clustering.eq(\n i).toFloat().multiply(ee.Number(multitemporal_score_clusteri))\n reflectance_score_clusteri = result_clustering.eq(\n i).toFloat().multiply(ee.Number(clusteri_refl_norm))\n\n if multitemporal_score is None:\n multitemporal_score = multitemporal_score_clusteri\n reflectance_score = reflectance_score_clusteri\n else:\n multitemporal_score = multitemporal_score.add(\n multitemporal_score_clusteri)\n reflectance_score = reflectance_score.add(\n reflectance_score_clusteri)\n\n return multitemporal_score, reflectance_score",
"def find_colours(self, img, colour, num_objects=1, ab_dist_thresh=50):\n img_lab = cv2.cvtColor(img,cv2.COLOR_BGR2LAB) #convert to LAB colour space \n img_a = img_lab[:,:,1] #A compononent of image\n img_b = img_lab[:,:,2] #B compononent of image \n des_a = colour[1] #A component of desired colour\n des_b = colour[2] #B component of desired colour\n \n #Compute difference between desired components and actual components\n d_a = img_a - des_a\n d_b = img_b - des_b\n dist_squared = d_a**2 + d_b**2\n \n #Apply threshold\n img_bin = np.uint8(dist_squared<ab_dist_thresh)*255\n \n #do connected components analysis to find centroids of large connected objects\n conn_comp = cv2.connectedComponentsWithStats(img_bin, 8, cv2.CV_32S)\n\n #sort by area, from largest to smallest\n areas = np.int_(conn_comp[2][:,4])\n idx = areas.argsort()\n idx = idx[::-1]\n centroids = np.int_(conn_comp[3])\n centroids = centroids[idx[1:num_objects+1]]\n \n #if more than one object returned, order from left to right\n idx = centroids[:,0].argsort() #sort by x value\n centroids = list(centroids[idx])\n \n #return centroid position and binary image of detected objects\n if len(centroids) == 1:\n return centroids[0], img_bin\n else:\n return centroids, img_bin",
"def color_quantization(self):\n Z = self.img.reshape((-1,3))\n\n # convert to np.float32\n Z = np.float32(Z)\n\n # define criteria, number of clusters(K) and apply kmeans()\n # Criteria arguments for termination of algorithm:\n # -type:\n # cv2.TERM_CRITERIA_EPS - stop the algorithm iteration if specified accuracy, epsilon, is reached. \n # cv2.TERM_CRITERIA_MAX_ITER - stop the algorithm after the specified number of iterations, max_iter. \n # cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER - \n # stop the iteration when any of the above condition is met.\n # -max_iter: (10 recommended)\n # -epsilon: \n # accuracy ; cluster average moved 'eps' from last location (1.0 recommended)\n \n criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)\n\n # Arguments into kmeans are:\n # -datapoints\n # -nclusters\n # -criteria\n # -attempts: num times it will use different initial labels ; (10 recommended)\n # -flags: how initial centers are taken\n \n ret,label,center= cv2.kmeans(Z,self.K,None, criteria ,10 , cv2.KMEANS_RANDOM_CENTERS)\n\n # Now convert back into uint8, and make original image\n center = np.uint8(center)\n res = center[label.flatten()]\n res2 = res.reshape((self.img.shape))\n \n self.img = res2\n self.update_image()",
"def main():\n # Verbosity: 1=Selection Results, >1 is various debugging information\n verbose = 0\n print \"build_all.py running with verbose=%s\"%(str(verbose))\n if verbose:\n print \"Fiducial Cut: \",fid_cut_hex,\"(apethum, z_min, z_max)\"\n print \"Max Drift Distance = %.4f us\"%(max_drift_time)\n\n tree = get_data_tree(list='All') # Golden All\n \n # We use the EXOFitting processed tree to get high-level physical quantities\n # like the anticorrelated energy, etc. \n #ptree_file = ROOT.TFile(preprocessed_tree)\n #ptree = ROOT.Get(\"dataTree\")\n #if verbose: print \"Indexing EXOFitting PreProcessed tree\"\n #ptree.BuildIndex(\"runNum\", \"eventNum\")\n #if verbose: print \" ...done\"\n\n cuts = \"\"\n\n #There must be at least 1 scintillation cluster:\n #cuts = \"@fScintClusters.size()>=1\"\n #cuts = \"(fScintClusters.GetCountsOnAPDPlane(0)+fScintClusters.GetCountsOnAPDPlane(1))>20000\"\n\n # The minimum scintinlation counts must be > 20000 and <70000\n # I observe that three peaks presumable alphas are at 38500, 42200, and 55000\n # So Rn222=5.4MeV, Po218=6MeV, Po214=7.7MeV\n # calibrate:: y=mx+b, m=6167, b=5198\n #cuts = \"fScintClusters.fRawEnergy>20000 && fScintClusters.fRawEnergy<70000\"\n #cuts += \"&& fScintClusters.fRawEnergy>22000 && fScintClusters.fRawEnergy<80000\"\n #cuts += \" && Sum$(fAPDs.fRawCounts) > 8000\"\n\n # Ignore Noise and Muon tagged events\n cuts +=\"fEventHeader.fTaggedAsNoise==0 && fEventHeader.fTaggedAsMuon==0\" \n\n # That's the last of the cuts, lets show the user what the cut looks like\n print \"Applying Cuts to data: \\n%s\"%cuts\n\n #Draw is the fastest method to apply cuts, in the end what we want is a reduced data list\n # to perform a more targeted analysis...\n tree.Draw(\">>+elist_alpha_canidates\",cuts,\"goff\")\n elist_alpha_canidates = ROOT.gDirectory.Get(\"elist_alpha_canidates\")\n print \"There are %d events passing the initial cuts\"%elist_alpha_canidates.GetN()\n\n #Now we have to look at events passing the cuts individually\n tf = ROOT.TFile(\"all.root\",\"RECREATE\")\n Rntree = tree.CloneTree(0)\n \n for i in range(elist_alpha_canidates.GetN()):\n # Print Progress\n if i%int(elist_alpha_canidates.GetN()/20) == 0:\n print \"%d of %d\"%(i,elist_alpha_canidates.GetN())\n\n #Grab the event data\n tree.GetEntry(elist_alpha_canidates.GetEntry(i))\n #ed = tree.EventBranch\n #if verbose>1: print_event_data(ed,verbose)\n\n #is_alphaish = check_alpha_like(ed,verbose)\n \n #is the event a fully reconstructed BiPo?\n #is_bipo = check_full_BiPo(ed,verbose)\n\n # Case1 (position matched Bi-Po)\n #is_case1 = check_case1(ed,verbose)\n #print \"BiPo=%s, Case1=%s\"%(is_bipo, is_case1) \n #raw_input('<hit any key to continue>')\n #if is_bipo or is_alphaish:\n # Write the EventData of events which pass any of our selection criteria\n # to ROOT file\n Rntree.Fill()\n\n Rntree.AutoSave()",
"def find_components(image,deltaPix,lens_rad_arcsec = 6.0,lens_rad_ratio = None,\n center_x = None,center_y = None, gal_rad_ratio = 0.1,\n min_size_arcsec=0.7,thresh=0.5, many_sources = True,\n show_locations=False, title = None):\n\n # convert minimum component size in pixel units\n min_size = int(min_size_arcsec / deltaPix)\n \n #Convert lens radius and central galaxy radius to pixels\n if lens_rad_ratio == None:\n lens_rad = int(lens_rad_arcsec / deltaPix)\n else: lens_rad = int(len(image) * lens_rad_ratio)\n gal_rad = int(len(image) * gal_rad_ratio)\n \n \n# im2[im2 < im2.min() + 10.*thresh] = 0.\n \n # downscale source image to data resolution (for speed + easier for converting to data units)\n #down = image_util.re_size(image, factor=supersampling_factor_source)\n \n # apply laplacian of gaussian (LoG) filter to enhance maxima\n LoG = - gaussian_laplace(deepcopy(image), sigma = min_size, mode='constant', cval=0.) \n \n# LoG = - gaussian_laplace(deepcopy(im2), sigma = 2., mode='constant', cval=0.)\n \n filtered = deepcopy(LoG)\n \n# print(LoG.min(),LoG.max(),np.abs(LoG.min()) + thresh )\n \n# print(type(filtered))\n \n #background mean and std of filtered image \n corners = np.zeros([4,5,5])\n corners[0] = LoG[0:5,0:5]\n corners[1] = LoG[-5:,0:5]\n corners[2] = LoG[0:5,-5:]\n corners[3] = LoG[-5:,-5:]\n means = []\n stds = []\n for c in corners:\n mn,med,s = sigma_clipped_stats(c,sigma=3.0)\n means.append(mn)\n stds.append(s)\n \n stds=np.array(stds)\n means = np.array(means)\n means_std = np.std(means)\n# means_good = means[(means >= means.mean() - 1.0 * means_std) & (means <= means.mean() + 1.0 * means_std)]\n means_good = means[(np.abs(means) <= np.abs(means).min() + 1.0 * means_std)]\n mean_bg = np.mean(means_good)\n std_bg = np.mean(stds[(np.abs(means) <= np.abs(means).min() + 1.0 * means_std)])\n# print('LoG means: {}, Log means std: {}, Log means good: {}, LoG avg mean: {}'.format(means,means_std,means_good,mean_bg))\n# print('min: {}, max: {}, cut: {}'.format(LoG.min(),LoG.max(),mean_bg + thresh))\n# print(LoG.min(),LoG.max(),filtered.min() + thresh)\n \n \n # assume all value below max*threshold can not be maxima, so put all to zero\n# filtered[filtered < thresh*filtered.max()] = 0.\n \n# assume all value below min*threshold can not be maxima, so put all to zero\n# filtered[filtered < filtered.min() + thresh * np.abs(filtered.min())] = 0.\n# filtered[filtered < mean_bg + thresh] = 0.\n filtered[filtered < mean_bg + 6.*std_bg] = 0. #set pixels below the mean + 6x threshold to 0\n \n # find coordinates of local maxima\n #print(int(0.5 * min_size))\n max_idx_2d_small = peak_local_max(filtered, min_distance=0) #All bright pixels\n max_idx_2d_large = peak_local_max(filtered, min_distance=1) #peaks with min size of 1 pixel\n \n x_list_small, y_list_small = max_idx_2d_small[:, 1], max_idx_2d_small[:, 0]\n x_list_large, y_list_large = max_idx_2d_large[:, 1], max_idx_2d_large[:, 0]\n \n im_center_x, im_center_y = len(image) / 2., len(image) / 2. #center of image\n \n if (center_x == None) & (center_y == None):\n new_center_x, new_center_y = im_center_x,im_center_y\n else:\n new_center_x, new_center_y = center_x,center_y #new \"center\" = location of lens galaxy\n \n \n #distance of each detected peak from center\n R_small = np.sqrt((x_list_small - new_center_x)**2 + (y_list_small - new_center_y)**2) \n R_large = np.sqrt((x_list_large - new_center_x)**2 + (y_list_large - new_center_y)**2)\n \n #Contaminant light is only bright pixels further from center than lens_rad\n x_sats, y_sats = x_list_small[R_small > lens_rad], y_list_small[R_small > lens_rad]\n \n if many_sources:\n x_lens, y_lens = deepcopy(x_list_small), deepcopy(y_list_small)\n else:\n x_lens, y_lens = deepcopy(x_list_large), deepcopy(y_list_large)\n \n# x_lens, y_lens = x_list_small[R_small <= lens_rad], y_list_small[R_small <= lens_rad]\n \n if (len(x_lens) == 0) & (len(y_lens) == 0):\n x_lens = [0,15]\n y_lens = [0,15]\n \n sources = QTable([x_lens, y_lens],names={'x_local_peak','y_local_peak'}) #make table of all detected objects\n# print(x_list_large)\n# print(y_list_large)\n# print(sources)\n \n # show maxima on image for debug\n \n if show_locations:\n# fig = plt.figure(figsize=(4, 4))\n #plt.imshow(image, origin='lower', cmap=cmap_flux, norm=LogNorm(1e-2))\n \n f, axes = plt.subplots(1, 5, figsize=(20,5), sharex=False, sharey=False)\n# plt.figure(figsize = (8,8))\n# plt.subplot(1,2,1)\n \n axes[0].imshow(image, origin='lower', norm=SymLogNorm(5))\n axes[0].set_title('Image')\n axes[0].set_axis_off()\n \n \n axes[1].imshow(LoG, origin='lower', norm=SymLogNorm(5))\n axes[1].set_title('LoG Filtered Image')\n axes[1].set_axis_off()\n\n# plt.subplot(1,2,2)\n axes[2].imshow(filtered, origin='lower', norm=SymLogNorm(5))\n axes[2].set_title('Final Filtered Image')\n axes[2].set_axis_off()\n \n axes[3].imshow(image, origin='lower', norm=SymLogNorm(5))\n for i in range(len(x_lens)):\n axes[3].scatter([x_lens[i]], [y_lens[i]], c='red', s=60, marker='+')\n \n for i in range(len(x_list_large)):\n axes[3].scatter([x_list_large[i]], [y_list_large[i]], c='black', s=100, marker='x')\n axes[3].set_title('Detected Objects')\n axes[3].set_axis_off()\n \n axes[4].imshow(image, origin='lower', norm=SymLogNorm(5))\n \n for i in range(len(x_sats)):\n axes[4].scatter([x_sats[i]], [y_sats[i]], c='red', s=60, marker='+')\n \n# plt.annotate(i+1, (x_list[i], y_list[i]), color='black')\n \n# for i in range(len(x_mask)):\n# plt.scatter([x_mask[i]], [y_mask[i]], c='red', s=100, marker='*')\n# plt.annotate(i+1, (x_mask[i], y_mask[i]), color='red')\n axes[4].scatter(new_center_x, new_center_y,c='red', s=100, marker='*')\n \n draw_lens_circle = Circle((new_center_x, new_center_y),lens_rad ,fill=False)\n draw_gal_circle = Circle((new_center_x, new_center_y),gal_rad, fill = False)\n# plt.gcf().gca().add_artist(draw_lens_circle)\n# plt.gcf().gca().add_artist(draw_gal_circle)\n axes[4].add_patch(draw_lens_circle)\n# axes[4].add_patch(draw_gal_circle)\n \n axes[4].set_title('Pixels to Mask: \\n r = {:.3f}'.format(lens_rad_arcsec))\n axes[4].text(1, 1, \"detected components\", color='red')\n axes[4].set_axis_off()\n \n if title != None:\n f.suptitle(title, fontsize = 15)\n# plt.show()\n \n \n return (x_sats, y_sats), (new_center_x, new_center_y), sources",
"def makeGSCcat(self,chopSpur=0):\n curdir = os.getcwd()\n os.chdir(self.astromdir)\n self.logfile.write(\"Entered 'makeGSCcat' to make astrometric ref catalog...\")\n # get input wcs information from first image in sci list\n ff = pyfits.open(os.path.join(self.Imagedir,self.sciImlist[0]))\n inwcs = wcsclass.BasicWCS(ff[0].header)\n ff.close()\n del ff\n \n self.crpix1,self.crpix2 = (inwcs.wcs['CRPIX1'],inwcs.wcs['CRPIX2'])\n self.crval1in = inwcs.wcs['CRVAL1']\n self.crval2in = inwcs.wcs['CRVAL2']\n NX=self.NX = inwcs.wcs['NAXIS1']\n NY=self.NY = inwcs.wcs['NAXIS2']\n\n # get RA,Dec of central pixel and the search radius\n Xctr = (self.NX+1)/2\n Yctr = (self.NY+1)/2\n self.RActr,self.Decctr = inwcs.xy2rd((Xctr,Yctr)) # both in deg\n \n ((rahh,ramm,rass), (decdd,decmm,decss)) = self._deg2hms(self.RActr,self.Decctr)\n rad_amin = 0.5*math.sqrt(NX*NX + NY*NY) * inwcs.wcs['PSCALE']/60.\n rad_amin = round(1000*(0.1+rad_amin))/1000.\n\n self.logfile.write('Input WCS: CRVAL1,2: '+str((self.crval1in,self.crval2in))+\\\n ' crpix1,2: '+str((self.crpix1,self.crpix2)))\n self.logfile.write('Xctr,Yctr: '+str((Xctr,Yctr))+' RA_ctr,Dec_ctr: '+str((self.RActr,self.Decctr)))\n self.logfile.write('Making query to '+_HOST_+' RA,Dec: '+\\\n str(((rahh,ramm,rass),(decdd,decmm,decss)))+' rad_amin = '+str(rad_amin))\n \n fh = cStringIO.StringIO()\n # the STScI query fails a lot, randomly. This loop actually helps sometimes.\n for iwqtry in range(MAXWQtry):\n try:\n self.webquery(host=_HOST_, url=_URL_, method=\"GET\", file=fh,\n RA = \"%s\" % self.RActr,\n DEC = \"%s\" % self.Decctr,\n SR=rad_amin/60.0,\n CAT=_CATALOG_,\n FORMAT=\"CSV\")\n except IOError,err:\n errtxt = str(err)\n self.logfile.write(\"WARNING: webquery \"+str(iwqtry+1)+\" failed...\\n \")\n self.logfile.write(errtxt)\n self.errorList.append((self.modName,errtxt))\n time.sleep(2)\n if (iwqtry<MAXWQtry-1):\n sys.stderr.write(\" trying again...\\n\")\n else:\n sys.stderr.write(\" sorry, no luck.\\n\")\n print \"Web qeury on \",_HOST_,\"failed.\"\n print errtxt\n raise WebQueryError,err\n continue\n break\n # read and format the output, first line is a header so we will ignore it\n output = string.split(fh.getvalue(),'\\n')[2:]\n fh.close()\n gsclines = [i.replace(',', ' ') for i in output if i != '']\n self.Nrefobjs = len(gsclines)\n #\n # AT THIS POINT WE NEED TO BAIL OUT IF THERE AREN'T ANY GSC OBJS FOUND!\n #\n if(self.Nrefobjs<6):\n errtxt = \"WARNING: Too few (%d) GSC objects: no astrometric recalibration possible.\"%(self.Nrefobjs)\n self.logfile.write(\"WARNING: NOT ENUFF GSC OBJS TO CONTINUE!\")\n self.logfile.write(errtxt)\n self.errorList.append((self.modName,errtxt))\n return -1\n \n # write stuff to data files\n fdump = open(os.path.split(_URL_)[1].split('.')[0]+'.scat', 'w')\n culledfile = os.path.split(_URL_)[1].split('.')[0]+'.cull'\n fcull = open(culledfile, 'w')\n self.GSCmatchin = os.path.split(_URL_)[1].split('.')[0]+'.proj'\n \n self.Ncull=0\n for line in gsclines:\n fdump.write(line+'\\n')\n flds = line.split()\n # check Ra\n if flds[1][0] not in \"1234567890.\":\n continue\n # FpgMag, JpgMag, Vmag\n # FpgMagErr, JpgMagErr, VmagErr\n _mlist = [float(flds[7]),float(flds[8]),float(flds[12])]\n _elist = [float(flds[23]),float(flds[24]),float(flds[28])]\n _mlist.sort()\n _elist.sort()\n if _mlist[0] > 88.: continue\n if chopSpur:\n if _mlist[1] > 88.: continue\n \n mag = _mlist[0]\n magerr = _elist[0]\n del _mlist,_elist\n \n self.Ncull += 1\n # hstID, ra, dec, mag, raEpsilon, decEpsilon, magerr, epoch\n oline = '%-14s %s %s %6.2f %s %s %5.2f %s\\n' \\\n %(flds[0],flds[1],flds[2],mag,flds[4],flds[5],magerr,flds[6])\n fcull.write(oline)\n fdump.close()\n fcull.close()\n del fdump,fcull,line,oline,flds,mag,magerr\n\n self.logfile.write(\"Culling: kept %d GSC objects.\"%self.Ncull)\n print \"Culling: kept %d GSC objects.\"%self.Ncull\n \n # finally, write the file with projected coord offsets\n cmd = 'project_coords %s 1 2 %.6f %.6f outfile=%s asec' \\\n %(culledfile,self.RActr,self.Decctr,self.GSCmatchin)\n self.logfile.write(\"**> \"+cmd)\n\n projcoords = popen2.Popen4(cmd)\n _outlines = projcoords.fromchild.readlines()\n projcoords.fromchild.close()\n if len(_outlines) > 0:\n # I've never seen this happen, but better check...\n errtxt = \"Error: project_coords mysteriously failed!\"\n self.logfile.write(errtxt)\n self.errorList.append((self.modName,errtxt))\n for _line in _outlines:\n self.logfile.write(_line)\n print _line\n return -1\n del projcoords\n\n self.logfile.write(\"Astrometric reference catalog %s constructed.\"%self.GSCmatchin)\n\tos.chdir(curdir)\n return 0",
"def customNcuts(self):\n # computing neighboors graph\n A = kneighbors_graph(self.values, self.k, mode='distance', include_self=False).toarray()\n\n for i in range(self.values.shape[0]):\n for j in range(self.values.shape[0]):\n if A[i][j] > 0:\n\n v1 = (self.values[i][3], self.values[i][4], self.values[i][5])\n v2 = (self.values[j][3], self.values[j][4], self.values[j][5])\n\n magnitude1 = np.sqrt(v1[0] * v1[0] + v1[1] * v1[1] + v1[2] * v1[2])\n magnitude2 = np.sqrt(v2[0] * v2[0] + v2[1] * v2[1] + v2[2] * v2[2])\n ang = np.arccos(np.dot(v1, v2) / (magnitude1 * magnitude2))\n\n A[i][j] = max(self.values[i][7], self.values[j][7]) * A[i][j]\n\n # init SpectralClustering\n sc = SpectralClustering(4, affinity='precomputed', n_init=10, assign_labels = 'discretize')\n\n # cluster\n labels = sc.fit_predict(A)\n\n return labels",
"def makeReferenceStars(self, raArray, decArray, filterNameArray,\n bandSelected=False,\n brightStarRA=None, brightStarDec=None, brightStarRadius=None):\n\n # can we use the better smatch code?\n try:\n import smatch\n hasSmatch = True\n self.fgcmLog.info(\"Using smatch for matching.\")\n except ImportError:\n hasSmatch = False\n self.fgcmLog.info(\"Using htm for matching.\")\n\n if (raArray.size != decArray.size):\n raise ValueError(\"raArray, decArray must be same length.\")\n if (raArray.size != filterNameArray.size):\n raise ValueError(\"raArray, filterNameArray must be same length.\")\n\n # Prepare bright stars if necessary...\n if (brightStarRA is not None and brightStarDec is not None and\n brightStarRadius is not None):\n if (brightStarRA.size != brightStarDec.size or\n brightStarRA.size != brightStarRadius.size):\n raise ValueError(\"brightStarRA/Dec/Radius must have same length\")\n cutBrightStars = True\n else:\n cutBrightStars = False\n\n # Define the dtype\n dtype=[('fgcm_id', 'i4'),\n ('ra', 'f8'),\n ('dec', 'f8')]\n\n pixelCats = []\n\n # Split into pixels\n ipring = hp.ang2pix(self.starConfig['coarseNSide'],\n (90.0 - decArray) * np.pi / 180.,\n raArray * np.pi / 180.)\n hpix, revpix = esutil.stat.histogram(ipring, rev=True)\n\n gdpix, = np.where(hpix > 0)\n for ii, gpix in enumerate(gdpix):\n # This is the array of all the observations in the coarse pixel\n p1a=revpix[revpix[gpix]: revpix[gpix + 1]]\n\n if p1a.size == 0:\n continue\n\n bandPixelCat = None\n\n # loop over bands...\n for referenceBand in self.starConfig['referenceBands']:\n print(\"Working on %s\" % (referenceBand))\n # We first need to select based on the band, not on the filter name\n useFlag = None\n for filterName in self.filterNames:\n if (self.starConfig['filterToBand'][filterName] == referenceBand):\n if useFlag is None:\n useFlag = (filterNameArray[p1a] == filterName.encode('utf-8'))\n else:\n useFlag |= (filterNameArray[p1a] == filterName.encode('utf-8'))\n\n raArrayUse = raArray[p1a[useFlag]]\n decArrayUse = decArray[p1a[useFlag]]\n\n if raArrayUse.size == 0:\n print(\"Nothing found for pixel %d\" % (ipring[p1a[0]]))\n continue\n\n if hasSmatch:\n # faster match...\n matches = smatch.match(raArrayUse, decArrayUse,\n self.starConfig['matchRadius'] / 3600.0,\n raArrayUse, decArrayUse,\n nside=self.starConfig['matchNSide'], maxmatch=0)\n i1 = matches['i1']\n i2 = matches['i2']\n else:\n # slower htm matching...\n htm = esutil.htm.HTM(11)\n\n matcher = esutil.htm.Matcher(11, raArrayUse, decArrayUse)\n matches = matcher.match(raArrayUse, decArrayUse,\n self.starConfig['matchRadius'] / 3600.0,\n maxmatch=0)\n i1 = matches[1]\n i2 = matches[0]\n\n fakeId = np.arange(p1a.size)\n hist, rev = esutil.stat.histogram(fakeId[i1], rev=True)\n\n if (hist.max() == 1):\n self.fgcmLog.info(\"Warning: No matches found for pixel %d, band %s!\" %\n (ipring[p1a[0]], referenceBand))\n continue\n\n maxObs = hist.max()\n\n # how many unique objects do we have?\n histTemp = hist.copy()\n count=0\n for j in xrange(histTemp.size):\n jj = fakeId[j]\n if (histTemp[jj] >= self.starConfig['minPerBand']):\n i1a = rev[rev[jj]: rev[jj + 1]]\n histTemp[i2[i1a]] = 0\n count = count + 1\n\n # make a temporary catalog...\n bandPixelCatTemp = np.zeros(count, dtype=dtype)\n\n # Rotate. This works for DES, but maybe not in general?\n raTemp = raArrayUse.copy()\n\n hi, = np.where(raTemp > 180.0)\n raTemp[hi] -= 360.0\n\n # Compute mean ra/dec\n index = 0\n for j in xrange(hist.size):\n jj = fakeId[j]\n if (hist[jj] >= self.starConfig['minPerBand']):\n i1a = rev[rev[jj]: rev[jj + 1]]\n starInd = i2[i1a]\n # make sure this doesn't get used again\n hist[starInd] = 0\n #bandPixelCatTemp['ra'][index] = np.sum(raTemp[p1a[starInd]]) / starInd.size\n #bandPixelCatTemp['dec'][index] = np.sum(decArray[p1a[starInd]]) / starInd.size\n bandPixelCatTemp['ra'][index] = np.sum(raTemp[starInd]) / starInd.size\n bandPixelCatTemp['dec'][index] = np.sum(decArrayUse[starInd]) / starInd.size\n index = index + 1\n\n # Restore negative RAs\n lo, = np.where(bandPixelCatTemp['ra'] < 0.0)\n bandPixelCatTemp['ra'][lo] += 360.0\n\n # Match to previously pixel catalog if available, and remove dupes\n if bandPixelCat is None:\n # First time through, these are all new objects\n bandPixelCat = bandPixelCatTemp\n print(\" Found %d reference stars in %s band\" % (bandPixelCatTemp.size, referenceBand))\n else:\n # We already have objects, need to match/append\n if hasSmatch:\n bandMatches = smatch.match(bandPixelCat['ra'], bandPixelCat['dec'],\n self.starConfig['matchRadius'] / 3600.0,\n bandPixelCatTemp['ra'], bandPixelCatTemp['dec'],\n maxmatch=0)\n i1b = matches['i1']\n i2b = matches['i2']\n else:\n matcher = esutil.htm.Matcher(11, bandPixelCat['ra'], bandPixelCat['dec'])\n matches = matcher.match(bandPixelCatTemp['ra'], bandPixelCatTemp['dec'],\n self.starConfig['matchRadius'] / 3600.0,\n maxmatch=0)\n i1b = matches[1]\n i2b = matches[0]\n\n # Remove all matches from the temp catalog\n bandPixelCatTemp = np.delete(bandPixelCatTemp, i2b)\n print(\" Found %d new reference stars in %s band\" % (bandPixelCatTemp.size, referenceBand))\n\n bandPixelCat = np.append(bandPixelCat, bandPixelCatTemp)\n\n if bandPixelCat is not None:\n # Append to list of catalogs...\n pixelCats.append(bandPixelCat)\n\n self.fgcmLog.info(\"Found %d unique objects in pixel %d (%d of %d).\" %\n (bandPixelCat.size, ipring[p1a[0]], ii, gdpix.size))\n\n # now assemble into a total objCat\n count = 0\n for pixelCat in pixelCats:\n count += pixelCat.size\n\n self.objCat = np.zeros(count, dtype=dtype)\n ctr = 0\n for pixelCat in pixelCats:\n self.objCat[ctr:ctr + pixelCat.size] = pixelCat\n ctr += pixelCat.size\n # and clear memory\n pixelCat = None\n\n self.objCat['fgcm_id'] = np.arange(count) + 1\n\n self.fgcmLog.info(\"Found %d unique objects with >= %d observations.\" %\n (count, self.starConfig['minPerBand']))\n\n if (cutBrightStars):\n self.fgcmLog.info(\"Matching to bright stars for masking...\")\n if (hasSmatch):\n # faster smatch...\n matches = smatch.match(brightStarRA, brightStarDec, brightStarRadius,\n self.objCat['ra'], self.objCat['dec'], nside=self.starConfig['matchNSide'],\n maxmatch=0)\n i1=matches['i1']\n i2=matches['i2']\n else:\n # slower htm matching...\n htm = esutil.htm.HTM(11)\n\n matcher = esutil.htm.Matcher(10, brightStarRA, brightStarDec)\n matches = matcher.match(self.objCat['ra'], self.objCat['dec'], brightStarRadius,\n maxmatch=0)\n # matches[0] -> m1 -> array from matcher.match() call (self.objCat)\n # matches[1] -> m2 -> array from htm.Matcher() (brightStar)\n i1=matches[1]\n i2=matches[0]\n\n self.fgcmLog.info(\"Cutting %d objects too near bright stars.\" % (i2.size))\n self.objCat = np.delete(self.objCat,i2)\n\n # and remove stars with near neighbors\n self.fgcmLog.info(\"Matching stars to neighbors...\")\n if (hasSmatch):\n # faster smatch...\n\n matches=smatch.match(self.objCat['ra'], self.objCat['dec'],\n self.starConfig['isolationRadius']/3600.0,\n self.objCat['ra'], self.objCat['dec'],\n nside=self.starConfig['matchNSide'], maxmatch=0)\n i1=matches['i1']\n i2=matches['i2']\n else:\n # slower htm matching...\n htm = esutil.htm.HTM(11)\n\n matcher = esutil.htm.Matcher(11, self.objCat['ra'], self.objCat['dec'])\n matches = matcher.match(self.objCat['ra'], self.objCat['dec'],\n self.starConfig['isolationRadius']/3600.0,\n maxmatch = 0)\n i1=matches[1]\n i2=matches[0]\n\n use,=np.where(i1 != i2)\n\n if (use.size > 0):\n neighbored = np.unique(i2[use])\n self.fgcmLog.info(\"Cutting %d objects within %.2f arcsec of a neighbor\" %\n (neighbored.size, self.starConfig['isolationRadius']))\n self.objCat = np.delete(self.objCat, neighbored)\n\n # and we're done",
"def vis_mechanically_coupled_regions(img_dir,output_dir,data,dbscn_length,dbscn_min_size,display_not_save=False):\n #Read in the image that is segmented/labelled for nuclei\n img=imread(img_dir)\n\n #save plots to show clusters\n fig = plt.figure(figsize=(6, 2))\n ax0 = fig.add_subplot(131)\n ax1 = fig.add_subplot(132)\n ax3 = fig.add_subplot(133)\n #show segmented image labels\n ax0.imshow(img,aspect='auto') \n ax0.axis('off')\n #nuclear centroid color-coded by their orientation\n img1=ax1.scatter(data[\"Y\"], data[\"X\"], c=data[\"angles\"],s=1)\n ax1.set_xlim(0,img.shape[0])\n ax1.set_ylim(img.shape[1],0)\n plt.colorbar(img1)\n ax1.axis('off')\n\n # plot the cluster assignments\n img3=ax3.scatter(data[data[\"clusters\"]> -1][\"Y\"], data[data[\"clusters\"]> -1][\"X\"], \n c=data[data[\"clusters\"]> -1][\"clusters\"],cmap=\"plasma\",s=1)\n ax3.set_xlim(0,img.shape[0])\n ax3.set_ylim(img.shape[1],0)\n ax3.axis('off')\n\n #add titles\n ax0.title.set_text('Segmented Image')\n ax1.title.set_text('Filtered Orientation')\n ax3.title.set_text('Clusters')\n\n if display_not_save:\n plt.show()\n else: \n plt.savefig((output_dir+\"/\"+img_dir.rsplit('/', 1)[-1][:-4]+\"_\"+str(dbscn_length)+\"_\"+ str(dbscn_min_size)+\".png\"),dpi=600, bbox_inches = 'tight',pad_inches = 0)\n fig.clf()\n plt.close(fig)\n plt.close('all')\n \n \n del fig,ax0,ax1,ax3,img1,img3",
"def get_shapes(self):\n colours = [[None for j in range(self.cellcounts[0])] for i in range(self.cellcounts[1])]\n X = []\n backmap = {}\n rad = 3\n for k,((i,y),(j,x)) in enumerate(self.mids):\n chunk = self.img[y-rad:y+rad, x-rad:x+rad,:]\n m = np.mean(np.mean(chunk, axis=0), axis=0).astype(np.uint16)\n colours[i][j] = m\n X.append(m)\n backmap[k] = (i,j)\n print(np.shape(X))\n Z = linkage(X, 'ward')\n Q = fcluster(Z, self.thresh, criterion='distance')\n\n closenesses = []\n for k,cls in enumerate(Q):\n i,j = backmap[k]\n closenesses.append( np.sqrt(np.sum( (colours[i][j] - self.ideal_bg)**2) ) )\n minidx = np.argmin(closenesses)\n bgcls = Q[minidx]\n\n blibs = [(255,0,0), (0,255,0), (0,0,255), (255,255,0), (0,255,255), (255,0,255)]\n img4 = self.img2.copy()\n for k,((i,y),(j,x)) in enumerate(self.mids):\n cls = Q[k]\n if cls == bgcls:\n continue\n col = blibs[(cls-1)]\n img4 = cv2.circle(img4, (x,y), 5, col, 2)\n\n write_img(\"./out/test_classes.png\", img4)\n self.classimg = img4\n\n A = np.zeros(shape=self.cellcounts, dtype=np.uint8)\n mx = np.max(Q)\n for k,cls in enumerate(Q):\n if cls == bgcls:\n continue\n\n if cls == mx:\n plotcls = bgcls\n else:\n plotcls = cls\n i,j = backmap[k]\n A[i][j] = plotcls\n\n self.res = A",
"def _qt_radius_clustering_greedy(self, min_to_cluster, reduced, cache, max_cycles):\n centre_inds, clustered_inds = [], set()\n chsn_indices = [self.index[name] for name in self.chosen]\n avail_indices = set(self.index[name] for name in self.available)\n unassigned_indices = list(self._not_ignored_inds - avail_indices - set(chsn_indices))\n if unassigned_indices:\n # Remove unassigned from centre consideration\n reduced[:,unassigned_indices] = np.inf\n for chsn_ind in chsn_indices:\n cluster_inds = np.nonzero(reduced[:,chsn_ind] == 0)[0]\n centre_inds.append(chsn_ind)\n clustered_inds.update(cluster_inds)\n # Remove chosen and their clusters from all future consideration\n reduced[:,cluster_inds] = np.inf\n reduced[cluster_inds,:] = np.inf\n # Iteratively find the largest cluster, until enough variants are clustered\n cache['cycles_used'] = 0\n while len(clustered_inds) < min_to_cluster:\n centre_ind, cluster_inds = self._find_largest_candidate(reduced)\n if centre_ind == None:\n percent_placed = len(clustered_inds)*100.0/float(len(self._not_ignored_inds))\n error_msg = 'Error: clustering finished prematurely ({:.2f}% placed). To fix this, raise the critical threshold, lower the critical percent, or add more available variants.'.format(percent_placed)\n return [], error_msg, [centre_inds, self._not_ignored_inds-clustered_inds]\n centre_inds.append(centre_ind)\n clustered_inds.update(cluster_inds)\n reduced[:,centre_ind] = np.inf\n reduced[cluster_inds,:] = np.inf\n cache['cycles_used'] += 1\n if cache['quit_now'] or max_cycles != None and cache['cycles_used'] >= max_cycles:\n break\n final_cluster_inds = self._partition_nearest(centre_inds, self.orig_dists)\n final_scores = self._sum_dist_scores(centre_inds, final_cluster_inds, self.orig_dists)\n alt_variants = []\n return centre_inds, final_scores, alt_variants",
"def clustering_mech_coupled_regions(features,dbscn_length=400,dbscn_min_size=15, ell_threshold=0.9):\n \n ecc=features['eccentricity']\n angles=np.vectorize(degrees)(features['orientation'])\n angles=np.where(angles > 0, angles, abs(angles)+90)\n\n #Filter to get only elongated nuclei\n relevant_angles=(angles)[ecc > ell_threshold]\n cenx_rel=features['centroid-0'][ecc > ell_threshold]\n ceny_rel=features['centroid-1'][ecc > ell_threshold]\n labels_rel=features['label'][ecc > ell_threshold]\n cords=np.column_stack([cenx_rel,ceny_rel,relevant_angles])\n # Compute DBSCAN\n db = DBSCAN(eps=dbscn_length, min_samples=dbscn_min_size).fit(cords)\n clusters = db.labels_\n #save centroid angles and cluster identities\n clus_res=np.column_stack([cenx_rel,ceny_rel,relevant_angles,\n clusters,labels_rel])\n df = pd.DataFrame(data=clus_res, columns=[\"X\", \"Y\",\"angles\",\"clusters\",\"label\"])\n \n return df",
"def findAstromCorrs(self):\n self.logfile.write(\"Entered findAstromCorrs - will run: \"+\\\n \"makeGSCcat(), makeObjcats(), doMatching().\")\n\n if self.makeGSCcat() != 0:\n return -1\n if self.makeObjcats()!= 0:\n return -1\n if self.doMatching() != 0:\n # here we want to remake the GSCcat using a \"chopSpur\" flag,\n # if the cat has a goodly number of objects\n if(self.Ncull >= 10):\n print \"Retrying matchup with only GSC objs detected in 2 bands...\"\n self.logfile.write(\"Retrying matchup with only GSC objs detected in 2 bands...\")\n if self.makeGSCcat(chopSpur=1) != 0:\n return -1\n if self.makeObjcats()!= 0:\n return -1\n if self.doMatching() != 0:\n return -1\n\n return 0",
"def apply_cuts(objects):\n #- Check if objects is a filename instead of the actual data\n if isinstance(objects, (str, unicode)):\n objects = io.read_tractor(objects)\n \n #- undo Milky Way extinction\n flux = unextinct_fluxes(objects)\n gflux = flux['GFLUX']\n rflux = flux['RFLUX']\n zflux = flux['ZFLUX']\n w1flux = flux['W1FLUX']\n wflux = flux['WFLUX']\n \n #- DR1 has targets off the edge of the brick; trim to just this brick\n if 'BRICK_PRIMARY' in objects.dtype.names:\n primary = objects['BRICK_PRIMARY']\n else:\n primary = np.ones(len(objects), dtype=bool)\n \n #----- LRG\n lrg = primary.copy()\n lrg &= rflux > 10**((22.5-23.0)/2.5)\n lrg &= zflux > 10**((22.5-20.56)/2.5)\n lrg &= w1flux > 10**((22.5-19.35)/2.5)\n lrg &= zflux > rflux * 10**(1.6/2.5)\n #- clip to avoid warnings from negative numbers raised to fractional powers\n lrg &= w1flux * rflux.clip(0)**(1.33-1) > zflux.clip(0)**1.33 * 10**(-0.33/2.5)\n\n #----- ELG\n elg = primary.copy()\n elg &= rflux > 10**((22.5-23.4)/2.5)\n elg &= zflux > rflux * 10**(0.3/2.5)\n elg &= zflux < rflux * 10**(1.5/2.5)\n elg &= rflux**2 < gflux * zflux * 10**(-0.2/2.5)\n elg &= zflux < gflux * 10**(1.2/2.5)\n\n #----- Quasars\n psflike = ((objects['TYPE'] == 'PSF') | (objects['TYPE'] == 'PSF ')) \n qso = primary.copy()\n qso &= psflike\n qso &= rflux > 10**((22.5-23.0)/2.5)\n qso &= rflux < gflux * 10**(1.0/2.5)\n qso &= zflux > rflux * 10**(-0.3/2.5)\n qso &= zflux < rflux * 10**(1.1/2.5)\n #- clip to avoid warnings from negative numbers raised to fractional powers\n qso &= wflux * gflux.clip(0)**1.2 > rflux.clip(0)**(1+1.2) * 10**(-0.4/2.5)\n ### qso &= wflux * gflux**1.2 > rflux**(1+1.2) * 10**(2/2.5)\n\n #------ Bright Galaxy Survey\n #- 'PSF' for astropy.io.fits; 'PSF ' for fitsio (sigh)\n bgs = primary.copy()\n bgs &= ~psflike\n bgs &= rflux > 10**((22.5-19.35)/2.5)\n\n #----- Standard stars\n fstd = primary.copy()\n fstd &= psflike\n fracflux = objects['DECAM_FRACFLUX'].T \n signal2noise = objects['DECAM_FLUX'] * np.sqrt(objects['DECAM_FLUX_IVAR'])\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n for j in (1,2,4): #- g, r, z\n fstd &= fracflux[j] < 0.04\n fstd &= signal2noise[:, j] > 10\n\n #- observed flux; no Milky Way extinction\n obs_rflux = objects['DECAM_FLUX'][:, 2]\n fstd &= obs_rflux < 10**((22.5-16.0)/2.5)\n fstd &= obs_rflux > 10**((22.5-19.0)/2.5)\n #- colors near BD+17; ignore warnings about flux<=0\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n grcolor = 2.5 * np.log10(rflux / gflux)\n rzcolor = 2.5 * np.log10(zflux / rflux)\n fstd &= (grcolor - 0.32)**2 + (rzcolor - 0.13)**2 < 0.06**2\n\n #-----\n #- construct the targetflag bits\n #- Currently our only cuts are DECam based (i.e. South)\n desi_target = lrg * desi_mask.LRG_SOUTH\n desi_target |= elg * desi_mask.ELG_SOUTH\n desi_target |= qso * desi_mask.QSO_SOUTH\n\n desi_target |= lrg * desi_mask.LRG\n desi_target |= elg * desi_mask.ELG\n desi_target |= qso * desi_mask.QSO\n\n desi_target |= fstd * desi_mask.STD_FSTAR\n \n bgs_target = bgs * bgs_mask.BGS_BRIGHT\n bgs_target |= bgs * bgs_mask.BGS_BRIGHT_SOUTH\n\n #- nothing for MWS yet; will be GAIA-based\n mws_target = np.zeros_like(bgs_target)\n\n #- Are any BGS or MWS bit set? Tell desi_target too.\n desi_target |= (bgs_target != 0) * desi_mask.BGS_ANY\n desi_target |= (mws_target != 0) * desi_mask.MWS_ANY\n\n return desi_target, bgs_target, mws_target",
"def _seg_image(self, x, y, r_cut=100):\n snr=self.snr\n npixels=self.npixels\n bakground = self.bakground\n error= self.bkg_rms(x,y,r_cut)\n kernel = self.kernel\n image_cutted = self.cut_image(x,y,r_cut)\n image_data = image_cutted\n threshold_detect_objs=detect_threshold(data=image_data, nsigma=snr,error=error)\n segments=detect_sources(image_data, threshold_detect_objs, npixels=npixels, filter_kernel=kernel)\n segments_deblend = deblend_sources(image_data, segments, npixels=npixels,nlevels=10)\n segments_deblend_info = source_properties(image_data, segments_deblend)\n nobjs = segments_deblend_info.to_table(columns=['id'])['id'].max()\n xcenter = segments_deblend_info.to_table(columns=['xcentroid'])['xcentroid'].value\n ycenter = segments_deblend_info.to_table(columns=['ycentroid'])['ycentroid'].value\n image_data_size = np.int((image_data.shape[0] + 1) / 2.)\n dist = ((xcenter - image_data_size) ** 2 + (ycenter - image_data_size) ** 2) ** 0.5\n c_index = np.where(dist == dist.min())[0][0]\n center_mask=(segments_deblend.data==c_index+1)*1 #supposed to be the data mask\n obj_masks = []\n for i in range(nobjs):\n mask = ((segments_deblend.data==i+1)*1)\n obj_masks.append(mask)\n xmin = segments_deblend_info.to_table(columns=['bbox_xmin'])['bbox_xmin'].value\n xmax = segments_deblend_info.to_table(columns=['bbox_xmax'])['bbox_xmax'].value\n ymin = segments_deblend_info.to_table(columns=['bbox_ymin'])['bbox_ymin'].value\n ymax = segments_deblend_info.to_table(columns=['bbox_ymax'])['bbox_ymax'].value\n xmin_c, xmax_c = xmin[c_index], xmax[c_index]\n ymin_c, ymax_c = ymin[c_index], ymax[c_index]\n xsize_c = xmax_c - xmin_c\n ysize_c = ymax_c - ymin_c\n if xsize_c > ysize_c:\n r_center = np.int(xsize_c)\n else:\n r_center = np.int(ysize_c)\n center_mask_info= [center_mask, r_center, xcenter, ycenter, c_index]\n return obj_masks, center_mask_info, segments_deblend",
"def make_arcs_red(slit='0.5', overwrite=False, zenith_only = True): #DaveC zenith = False\r\n\r\n aperture = slit\r\n\r\n iraf.unlearn('imcombine')\r\n iraf.imcombine.rdnoise = det_pars['red']['readnoise']\r\n iraf.imcombine.gain = det_pars['red']['gain']\r\n arcs = iraf.hselect('red????.fits', '$I', 'TURRET == \"LAMPS\" & APERTURE == \"{aperture}\" & LAMPS == \"0001110\"'.format(aperture=aperture), Stdout=1)\r\n if zenith_only:\r\n try:\r\n arcs = iraf.hselect(','.join(arcs), '$I', 'TURRET == \"LAMPS\" & APERTURE == \"{aperture}\" & LAMPS == \"0001110\" & AIRMASS < 1.01'.format(aperture=aperture), Stdout=1)\r\n except:\r\n pass\r\n if overwrite:\r\n iraf.delete('HeNeAr_{aperture}.fits'.format(aperture=aperture), \r\n verify='no')\r\n iraf.imcombine(','.join(arcs), 'HeNeAr_{}'.format(aperture), reject=\"none\")",
"def analyse_colour_mag_diagrams(params,star_catalog,catalog_header,\n target,source,blend,RC,\n det_idx,cat_idx,close_cat_idx,log):\n\n tol = 2.0\n\n filters = { 'ip': 'SDSS-i', 'rp': 'SDSS-r', 'gp': 'SDSS-g' }\n\n inst_i = star_catalog['cal_ref_mag_ip'][det_idx]\n inst_r = star_catalog['cal_ref_mag_rp'][det_idx]\n inst_g = star_catalog['cal_ref_mag_gp'][det_idx]\n cal_i = star_catalog['imag'][cat_idx]\n cal_r = star_catalog['rmag'][cat_idx]\n cal_g = star_catalog['gmag'][cat_idx]\n inst_ri = inst_r - inst_i # Catalogue column order is red -> blue\n inst_gr = inst_g - inst_r\n inst_gi = inst_g - inst_i\n cal_ri = cal_r - cal_i\n cal_gr = cal_g - cal_r\n cal_gi = cal_g - cal_i\n\n linst_i = star_catalog['cal_ref_mag_ip'][close_cat_idx]\n linst_r = star_catalog['cal_ref_mag_rp'][close_cat_idx]\n linst_g = star_catalog['cal_ref_mag_gp'][close_cat_idx]\n lcal_i = star_catalog['imag'][close_cat_idx]\n lcal_r = star_catalog['rmag'][close_cat_idx]\n lcal_g = star_catalog['gmag'][close_cat_idx]\n linst_ri = linst_r - linst_i # Catalogue column order is red -> blue\n linst_gr = linst_g - linst_r\n linst_gi = linst_g - linst_i\n lcal_ri = lcal_r - lcal_i\n lcal_gr = lcal_g - lcal_r\n lcal_gi = lcal_g - lcal_i\n\n plot_colour_mag_diagram(params,inst_i, inst_ri, linst_i, linst_ri, target,\n source, blend, RC, 'r', 'i', 'i', tol, log)\n\n plot_colour_mag_diagram(params,inst_r, inst_ri, linst_r, linst_ri, target,\n source, blend, RC, 'r', 'i', 'r', tol, log)\n\n plot_colour_mag_diagram(params,inst_g, inst_gr, linst_g, linst_gr, target,\n source, blend, RC, 'g', 'r', 'g', tol, log)\n\n plot_colour_mag_diagram(params,inst_g, inst_gi, linst_g, linst_gi, target,\n source, blend, RC, 'g', 'i', 'g', tol, log)",
"def threshold_vareas_by_eccentricity(\n meanruns_dir='/Users/olivercontier/bigfri/scratch/bids/derivatives/mean_runs',\n max_eccen=15.55,\n npt_bdir='/Users/olivercontier/bigfri/scratch/bids/derivatives/prf_neuropythy',\n out_bdir='/Users/olivercontier/bigfri/scratch/bids/derivatives/prf_neuropythy/fixated',\n):\n for sub in tqdm(range(1, 4), desc='subjects'):\n npt_outdir = pjoin(npt_bdir, f'sub-0{sub}')\n thresh_outdir = pjoin(out_bdir, f'sub-0{sub}')\n if not os.path.exists(thresh_outdir):\n os.makedirs(thresh_outdir)\n # anatomical resolution, but reoriented to standard (unlike fsorient)\n eccen_orig_f = pjoin(npt_outdir, 'inferred_eccen.nii.gz')\n eccen_orig_img = load_img(eccen_orig_f)\n eccen = eccen_orig_img.get_fdata()\n periphery_mask = eccen > max_eccen\n # reference image for resampling\n ref_f = pjoin(meanruns_dir, f'sub-0{sub}.nii.gz')\n ref_img = index_img(ref_f, 0)\n # threshold vareas\n va_img = load_img(pjoin(npt_outdir, 'inferred_varea.nii.gz'))\n va = va_img.get_fdata()\n va[periphery_mask] = 0.\n va_thr_img = new_img_like(va_img, va)\n va_thr_img.to_filename(pjoin(thresh_outdir, 'inferred_varea.nii.gz'))\n # resample to functional space and save rois individually\n for roival in tqdm(range(1, 13), desc='rois', leave=False):\n roi_arr = np.zeros(va.shape)\n roi_arr[np.where(va == roival)] = 1.\n roi_img = new_img_like(va_img, roi_arr)\n res_img = resample_to_img(roi_img, ref_img, interpolation='nearest')\n res_img.to_filename(pjoin(thresh_outdir, f'resampled_va-{roival}_interp-nn.nii.gz'))\n lres_img = resample_to_img(roi_img, ref_img, interpolation='linear')\n lres_img.to_filename(pjoin(thresh_outdir, f'resampled_va-{roival}_interp-linear.nii.gz'))\n return None",
"def ShowOneContourCutBKG(index,all_images,all_pointing,thex0,they0,all_titles,object_name,all_expo,dir_top_img,all_filt):\n plt.figure(figsize=(15,6))\n spec_index_min=100 # cut the left border\n spec_index_max=1900 # cut the right border\n star_halfwidth=70\n \n YMIN=-100\n YMAX=100\n \n figname='contourCutBKG_{}_{}.pdf'.format(all_filt[index],index)\n \n figfilename=os.path.join(dir_top_img,figname) \n \n #center is approximately the one on the original raw image (may be changed)\n #x0=int(all_pointing[index][0])\n x0=int(thex0[index])\n \n \n # Extract the image \n full_image=np.copy(all_images[index])\n \n # refine center in X,Y\n star_region_X=full_image[:,x0-star_halfwidth:x0+star_halfwidth]\n \n profile_X=np.sum(star_region_X,axis=0)\n profile_Y=np.sum(star_region_X,axis=1)\n\n NX=profile_X.shape[0]\n NY=profile_Y.shape[0]\n \n X_=np.arange(NX)\n Y_=np.arange(NY)\n \n avX,sigX=weighted_avg_and_std(X_,profile_X**4) # take squared on purpose (weigh must be >0)\n avY,sigY=weighted_avg_and_std(Y_,profile_Y**4)\n \n x0=int(avX+x0-star_halfwidth)\n \n \n # find the center in Y on the spectrum\n yprofile=np.sum(full_image[:,spec_index_min:spec_index_max],axis=1)\n y0=np.where(yprofile==yprofile.max())[0][0]\n\n # cut the image in vertical and normalise by exposition time\n reduc_image=full_image[y0-10:y0+10,:]=0\n reduc_image=full_image[y0+YMIN:y0+YMAX,x0:spec_index_max]/all_expo[index]\n \n reduc_image[:,0:100]=0 # erase central star\n \n X_Size_Pixels=np.arange(0,reduc_image.shape[1])\n Y_Size_Pixels=np.arange(0,reduc_image.shape[0])\n Transverse_Pixel_Size=Y_Size_Pixels-int(float(Y_Size_Pixels.shape[0])/2.)\n \n # calibration in wavelength\n #grating_name=all_filt[index].replace('dia ','')\n grating_name=get_disperser_filtname(all_filt[index])\n \n lambdas=Pixel_To_Lambdas(grating_name,X_Size_Pixels,all_pointing[index],True)\n \n #if grating_name=='Ron200':\n # holo = Hologram('Ron400',verbose=True)\n #else: \n # holo = Hologram(grating_name,verbose=True)\n #lambdas=holo.grating_pixel_to_lambda(X_Size_Pixels,all_pointing[index])\n #if grating_name=='Ron200':\n # lambdas=lambdas*2.\n \n\n X,Y=np.meshgrid(lambdas,Transverse_Pixel_Size) \n T=np.transpose(reduc_image)\n \n \n cs=plt.contourf(X, Y, reduc_image, 100, alpha=1., cmap='jet',origin='lower')\n C = plt.contour(X, Y, reduc_image ,50, colors='white', linewidth=.001,origin='lower') \n \n \n cbar = plt.colorbar(cs) \n \n for line in LINES:\n if line == O2 or line == HALPHA or line == HBETA or line == HGAMMA:\n plt.plot([line['lambda'],line['lambda']],[YMIN,YMAX],'-',color='lime',lw=0.5)\n plt.text(line['lambda'],YMAX*0.8,line['label'],verticalalignment='bottom', horizontalalignment='center',color='lime', fontweight='bold',fontsize=16)\n \n \n \n plt.axis([X.min(), X.max(), Y.min(), Y.max()]); plt.grid(True)\n plt.title(all_titles[index])\n plt.grid(color='white', ls='solid')\n plt.text(200,-5.,all_filt[index],verticalalignment='bottom', horizontalalignment='center',color='yellow', fontweight='bold',fontsize=16)\n plt.xlabel('$\\lambda$ (nm)')\n plt.ylabel('pixels')\n plt.ylim(YMIN,YMAX)\n plt.xlim(0.,1200.)\n plt.savefig(figfilename)",
"def kaons ( self ) :\n from GaudiConfUtils.ConfigurableGenerators import FilterDesktop\n ## \n if self['NOPIDHADRONS'] :\n from StandardParticles import StdAllNoPIDsKaons as inpts\n kaoncut = self['KaonCut']\n else :\n from StandardParticles import StdAllLooseANNKaons as inpts \n kaoncut = \"(%s)&(%s)\" % ( self['KaonCut'] , self['KaonPIDCut'] ) \n #\n ##\n return self.make_selection (\n 'Kaon' ,\n FilterDesktop ,\n [ inpts ] ,\n Code = kaoncut ,\n )",
"def fit(self):\n self.cluseter_agglomerative(n_clusters=20, linkage='average', iterate=5)\n self.sub_clustering(n_clusters=3, index_cluster=[79], linkage='complete')\n self.merge_clusters([[0,9,53],[1,83],[46,35,67],[88,23],[6,68]])\n self.merge_clusters([[6,33,52],[17,14]])\n self.sub_clustering(n_clusters=2, index_cluster=[0], linkage='average')\n self.sub_clustering(n_clusters=3, index_cluster=[2], linkage='average')\n self.sub_clustering(n_clusters=3, index_cluster=[85], linkage='average')\n self.sub_clustering(n_clusters=2, index_cluster=[14], linkage='complete')\n self.sub_clustering(n_clusters=2, index_cluster=[16], linkage='average')\n self.sub_clustering(n_clusters=3, index_cluster=[22], linkage='average')\n self.sub_clustering(n_clusters=2, index_cluster=[24], linkage='complete')\n self.sub_clustering(n_clusters=2, index_cluster=[26], linkage='complete')\n self.sub_clustering(n_clusters=3, index_cluster=[28], linkage='ward')\n self.merge_clusters([[6,98,99]])\n self.merge_clusters([[35,80]])\n self.sub_clustering(n_clusters=4, index_cluster=[35], linkage='complete')\n self.merge_clusters([[76,98]])\n self.sub_clustering(n_clusters=3, index_cluster=[35], linkage='complete')\n self.merge_clusters([[39,42]])\n self.sub_clustering(n_clusters=3, index_cluster=[47], linkage='complete')\n self.sub_clustering(n_clusters=3, index_cluster=[51], linkage='average')\n self.merge_clusters([[70,101]])\n self.sub_clustering(n_clusters=3, index_cluster=[51], linkage='complete')\n self.sub_clustering(n_clusters=3, index_cluster=[61], linkage='ward')\n self.merge_clusters()\n return",
"def sky_orbits(test=True):\n \n t = Table.read('/home/ana/data/baumgardt_positions.fits')\n \n ind_disterr = ~np.isfinite(t['e_Rsun'])\n t['e_Rsun'][ind_disterr] = 0.1 * t['Rsun'][ind_disterr]\n e_max = np.nanmax(t['e_Rsun'][~ind_disterr])\n ind_cap = t['e_Rsun']>e_max\n t['e_Rsun'][ind_cap] = e_max\n \n clusters = ['NGC 3201', 'NGC 4590', 'NGC 5824', 'NGC 5272', 'NGC 5139', 'NGC 5024']\n #clusters = ['NGC 5824', 'NGC 5024']\n N = len(clusters)\n \n match = dict()\n match['NGC 3201'] = dict(streams=['gjoll'], direction=[-1], nstep=[35], gc_label='NGC\\n3201', gcra_off=0*u.deg, gcdec_off=-13*u.deg, gcl_off=0*u.deg, gcb_off=-13*u.deg, stream_label=['$Gj\\\\\\\" oll$'], stream_ra=[-156*u.deg], stream_dec=[-4.5*u.deg], eq_angle=[-45*u.deg], stream_l=[-148*u.deg], stream_b=[-33*u.deg], gal_angle=[22*u.deg])\n \n match['NGC 4590'] = dict(streams=['fjorm'], direction=[1], nstep=[100], gc_label='NGC\\n4590', gcra_off=-15*u.deg, gcdec_off=0*u.deg, gcl_off=-13*u.deg, gcb_off=-10*u.deg, stream_label=['$Fj\\\\\\\" orm$'], stream_ra=[-22*u.deg], stream_dec=[66*u.deg], eq_angle=[35*u.deg], stream_l=[110*u.deg], stream_b=[50*u.deg], gal_angle=[-50*u.deg])\n \n match['NGC 5024'] = dict(streams=['sylgr', 'ravi'], direction=[-1, 1], nstep=[300,500], gc_label='NGC\\n5024', gcra_off=-15*u.deg, gcdec_off=0*u.deg, gcl_off=10*u.deg, gcb_off=-20*u.deg, stream_label=['Sylgr', 'Ravi'], stream_ra=[-70*u.deg, 83*u.deg], stream_dec=[2*u.deg, -47*u.deg], eq_angle=[25*u.deg, 65*u.deg], stream_l=[-110*u.deg, -18.5*u.deg], stream_b=[62*u.deg, -47*u.deg], gal_angle=[30*u.deg, -10*u.deg])\n \n match['NGC 5139'] = dict(streams=['fimbulthul'], direction=[-1], nstep=[70], gc_label='NGC\\n5139', gcra_off=-5*u.deg, gcdec_off=-15*u.deg, gcl_off=0*u.deg, gcb_off=-12*u.deg, stream_label=['Fimbulthul'], stream_ra=[-20*u.deg], stream_dec=[-15*u.deg], eq_angle=[0*u.deg], stream_l=[-20*u.deg], stream_b=[45*u.deg], gal_angle=[0*u.deg])\n \n match['NGC 5272'] = dict(streams=['svol'], direction=[1], nstep=[70], gc_label='NGC\\n5272', gcra_off=-15*u.deg, gcdec_off=10*u.deg, gcl_off=-23*u.deg, gcb_off=-17*u.deg, stream_label=['$Sv\\\\\\\" ol$'], stream_ra=[-2*u.deg], stream_dec=[34*u.deg], eq_angle=[-10*u.deg], stream_l=[55*u.deg], stream_b=[55*u.deg], gal_angle=[-65*u.deg])\n \n match['NGC 5824'] = dict(streams=['triangulum', 'turbio'], direction=[1,1], nstep=[700,1], gc_label='NGC\\n5824', gcra_off=15*u.deg, gcdec_off=-5*u.deg, gcl_off=15*u.deg, gcb_off=-5*u.deg, stream_label=['Triangulum', 'Turbio'], stream_ra=[152*u.deg, 130*u.deg], stream_dec=[32*u.deg, -51*u.deg], eq_angle=[-48*u.deg, 30*u.deg], stream_l=[120*u.deg, -82*u.deg], stream_b=[-31*u.deg, -57*u.deg], gal_angle=[70*u.deg, 105*u.deg])\n \n dt = 0.5*u.Myr\n wangle = 180*u.deg\n ra_off = 120*u.deg\n l_off = 0*u.deg\n \n colors = [mpl.cm.plasma(0.95*x/N) for x in range(N)]\n \n np.random.seed(27529)\n if test:\n Nsample = 1\n else:\n Nsample = 100\n \n plt.close()\n fig = plt.figure(figsize=(12,12))\n \n ax0 = fig.add_subplot(211, projection='mollweide')\n ax1 = fig.add_subplot(212, projection='mollweide')\n ax = [ax0, ax1]\n \n for i in range(N):\n #ind = t['Name']== clusters[i]\n ind = t['Name']==clusters[i]\n t_ = t[ind]\n \n c = coord.SkyCoord(ra=t_['RAJ2000'], dec=t_['DEJ2000'], distance=t_['Rsun'], pm_ra_cosdec=t_['pmRA_'], pm_dec=t_['pmDE'], radial_velocity=t_['RV'], frame='icrs')\n cgal = c.transform_to(coord.Galactic)\n #w0 = gd.PhaseSpacePosition(c.transform_to(gc_frame).cartesian)[0]\n \n color = colors[i]\n alpha_text = 0.8\n \n plt.sca(ax[0])\n plt.plot((c.ra + ra_off).wrap_at(wangle).rad, c.dec.rad, '+', color=color, mew=3, ms=15, label=t_['Name'][0])\n plt.text((c.ra + ra_off + match[clusters[i]]['gcra_off']).wrap_at(wangle).rad, (c.dec + match[clusters[i]]['gcdec_off']).rad, match[clusters[i]]['gc_label'], fontsize='small', ha='center', va='center', alpha=alpha_text)\n \n plt.sca(ax[1])\n plt.plot((cgal.l + l_off).wrap_at(wangle).rad, cgal.b.rad, '+', color=color, mew=3, ms=15, label=t_['Name'][0])\n plt.text((cgal.l + l_off + match[clusters[i]]['gcl_off']).wrap_at(wangle).rad, (cgal.b + match[clusters[i]]['gcb_off']).rad, match[clusters[i]]['gc_label'], fontsize='small', ha='center', va='center', alpha=alpha_text)\n \n\n for j in range(len(match[clusters[i]]['direction'])):\n # sample gc positional uncertainties\n for k in range(-1, Nsample):\n if k==-1:\n c = coord.SkyCoord(ra=t_['RAJ2000'], dec=t_['DEJ2000'], distance=t_['Rsun'], pm_ra_cosdec=t_['pmRA_'], pm_dec=t_['pmDE'], radial_velocity=t_['RV'], frame='icrs')\n w0 = gd.PhaseSpacePosition(c.transform_to(gc_frame).cartesian)[0]\n \n lw = 1.5\n alpha = 1\n else:\n c = coord.SkyCoord(ra=t_['RAJ2000'], dec=t_['DEJ2000'], distance=t_['Rsun'] + np.random.randn()*t_['e_Rsun'], pm_ra_cosdec=t_['pmRA_'] + np.random.randn()*t_['e_pmRA_'], pm_dec=t_['pmDE'] + np.random.randn()*t_['e_pmDE'], radial_velocity=t_['RV'] + np.random.randn()*t_['e_RV'], frame='icrs')\n w0 = gd.PhaseSpacePosition(c.transform_to(gc_frame).cartesian)[0]\n \n lw = 1\n alpha = 0.1\n \n orbit = ham.integrate_orbit(w0, dt=dt*match[clusters[i]]['direction'][j], n_steps=match[clusters[i]]['nstep'][j])\n orbit_eq = orbit.to_coord_frame(coord.ICRS, galactocentric_frame=gc_frame)\n orbit_gal = orbit.to_coord_frame(coord.Galactic, galactocentric_frame=gc_frame)\n \n \n plt.sca(ax[0])\n dra = (orbit_eq.ra+ra_off).wrap_at(wangle)[1:] - (orbit_eq.ra+ra_off).wrap_at(wangle)[:-1]\n if np.any(np.abs(dra)>180*u.deg):\n pos_break = dra>180*u.deg\n ind_break = np.argmax(pos_break)\n ipad = 1\n plt.plot((orbit_eq.ra+ra_off).wrap_at(wangle).rad[:ind_break-ipad], orbit_eq.dec.rad[:ind_break-ipad], '-', color=color, lw=lw, label='', alpha=alpha)\n plt.plot((orbit_eq.ra+ra_off).wrap_at(wangle).rad[ind_break+ipad:], orbit_eq.dec.rad[ind_break+ipad:], '-', color=color, lw=lw, label='', alpha=alpha)\n else:\n plt.plot((orbit_eq.ra+ra_off).wrap_at(wangle).rad, orbit_eq.dec.rad, '-', color=color, lw=lw, label='', alpha=alpha)\n \n plt.sca(ax[1])\n dl = orbit_gal.l.wrap_at(wangle)[1:] - orbit_gal.l.wrap_at(wangle)[:-1]\n if np.any(np.abs(dl)>180*u.deg):\n pos_break = dl>180*u.deg\n ind_break = np.argmax(pos_break)\n ipad = 1\n plt.plot((orbit_gal.l+l_off).wrap_at(wangle).rad[:ind_break-ipad], orbit_gal.b.rad[:ind_break-ipad], '-', color=color, lw=lw, label='', alpha=alpha)\n plt.plot((orbit_gal.l+l_off).wrap_at(wangle).rad[ind_break+ipad:], orbit_gal.b.rad[ind_break+ipad:], '-', color=color, lw=lw, label='', alpha=alpha)\n else:\n plt.plot((orbit_gal.l+l_off).wrap_at(wangle).rad, orbit_gal.b.rad, '-', color=color, lw=lw, label='', alpha=alpha)\n \n # add streams\n pkl = pickle.load(open('../data/streams/data_{:s}.pkl'.format(match[clusters[i]]['streams'][j]), 'rb'))\n cs = coord.SkyCoord(ra=pkl['dec'][0], dec=pkl['dec'][1], frame='icrs')\n cs_gal = cs.transform_to(coord.Galactic)\n \n plt.sca(ax[0])\n plt.plot((cs.ra+ra_off).wrap_at(wangle).rad, cs.dec.rad, 'o', color=color, ms=8, label=match[clusters[i]]['streams'][j])\n plt.text(coord.Longitude(match[clusters[i]]['stream_ra'][j]).wrap_at(wangle).rad, coord.Latitude(match[clusters[i]]['stream_dec'][j]).rad, match[clusters[i]]['stream_label'][j], fontsize='small', alpha=alpha_text, rotation=match[clusters[i]]['eq_angle'][j].value, ha='center', va='center')\n \n plt.sca(ax[1])\n plt.plot((cs_gal.l+l_off).wrap_at(wangle).rad, cs_gal.b.rad, 'o', color=color, ms=8, label=match[clusters[i]]['streams'][j])\n plt.text(coord.Longitude(match[clusters[i]]['stream_l'][j]).wrap_at(wangle).rad, coord.Latitude(match[clusters[i]]['stream_b'][j]).rad, match[clusters[i]]['stream_label'][j], fontsize='small', alpha=alpha_text, rotation=match[clusters[i]]['gal_angle'][j].value, ha='center', va='center')\n \n \n plt.sca(ax[0])\n plt.grid(ls=':')\n plt.xlabel('R.A. [deg]')\n plt.ylabel('Dec [deg]')\n\n plt.gca().xaxis.set_ticklabels([])\n \n xloc = coord.Longitude(np.arange(-150,180,30)*u.deg)\n xloc = np.delete(xloc, [3])\n yloc = coord.Latitude(5*u.deg)\n Nx = len(xloc)\n \n for i in range(Nx):\n plt.text(xloc[i].wrap_at(wangle).rad, yloc.rad, '{:.0f}$\\degree$'.format((xloc[i]-ra_off).wrap_at(wangle).degree), alpha=0.6, ha='center', va='center')\n \n \n plt.sca(ax[1])\n plt.grid(ls=':')\n plt.xlabel('Galactic longitude [deg]')\n plt.ylabel('Galactic latitude [deg]')\n \n plt.gca().xaxis.set_ticklabels([])\n \n xloc = coord.Longitude(np.arange(-150,180,30)*u.deg)\n xloc = np.delete(xloc, [2,3])\n yloc = coord.Latitude(5*u.deg)\n Nx = len(xloc)\n \n for i in range(Nx):\n plt.text(xloc[i].wrap_at(wangle).rad, yloc.rad, '{:.0f}$\\degree$'.format((xloc[i]+l_off).wrap_at(wangle).degree), alpha=0.6, ha='center', va='center')\n \n \n \n plt.tight_layout(h_pad=2)\n plt.savefig('../paper/sky_orbits.pdf')",
"def F_subset_S5PCH4(self,path,if_trop_xch4=False,s5p_product='RPRO'): \n from scipy.interpolate import interp1d\n # find out list of l2 files to subset\n if os.path.isfile(path):\n self.F_update_popy_with_control_file(path)\n l2_list = self.l2_list\n l2_dir = self.l2_dir\n else:\n import glob\n l2_dir = path\n l2_list = []\n cwd = os.getcwd()\n os.chdir(l2_dir)\n start_date = self.start_python_datetime.date()\n end_date = self.end_python_datetime.date()\n days = (end_date-start_date).days+1\n DATES = [start_date + datetime.timedelta(days=d) for d in range(days)]\n for DATE in DATES:\n flist = glob.glob('S5P_'+s5p_product+'_L2__CH4____'+DATE.strftime(\"%Y%m%d\")+'T*.nc')\n l2_list = l2_list+flist\n \n os.chdir(cwd)\n self.l2_dir = l2_dir\n self.l2_list = l2_list\n \n #maxsza = self.maxsza \n #maxcf = self.maxcf\n west = self.west\n east = self.east\n south = self.south\n north = self.north\n min_qa_value = self.min_qa_value\n \n # absolute path of useful variables in the nc file\n data_fields = ['/PRODUCT/SUPPORT_DATA/GEOLOCATIONS/latitude_bounds',\\\n '/PRODUCT/SUPPORT_DATA/GEOLOCATIONS/longitude_bounds',\\\n '/PRODUCT/SUPPORT_DATA/GEOLOCATIONS/solar_zenith_angle',\\\n '/PRODUCT/SUPPORT_DATA/GEOLOCATIONS/viewing_zenith_angle',\\\n '/PRODUCT/latitude',\\\n '/PRODUCT/longitude',\\\n '/PRODUCT/qa_value',\\\n '/PRODUCT/time',\\\n '/PRODUCT/delta_time',\\\n '/PRODUCT/methane_mixing_ratio',\\\n '/PRODUCT/methane_mixing_ratio_bias_corrected',\\\n '/PRODUCT/methane_mixing_ratio_precision'] \n # standardized variable names in l2g file. should map one-on-one to data_fields\n data_fields_l2g = ['latitude_bounds','longitude_bounds','SolarZenithAngle',\\\n 'vza','latc','lonc','qa_value','time','delta_time',\\\n 'column_amount_no_bias_correction','column_amount','column_uncertainty']\n if if_trop_xch4:\n # absolute path of useful variables in the nc file\n data_fields = ['/PRODUCT/SUPPORT_DATA/GEOLOCATIONS/latitude_bounds',\\\n '/PRODUCT/SUPPORT_DATA/GEOLOCATIONS/longitude_bounds',\\\n '/PRODUCT/SUPPORT_DATA/GEOLOCATIONS/solar_zenith_angle',\\\n '/PRODUCT/SUPPORT_DATA/GEOLOCATIONS/viewing_zenith_angle',\\\n '/PRODUCT/SUPPORT_DATA/INPUT_DATA/dry_air_subcolumns',\\\n '/PRODUCT/SUPPORT_DATA/INPUT_DATA/surface_pressure',\\\n '/PRODUCT/SUPPORT_DATA/INPUT_DATA/pressure_interval',\\\n '/PRODUCT/SUPPORT_DATA/INPUT_DATA/methane_profile_apriori',\\\n '/PRODUCT/latitude',\\\n '/PRODUCT/longitude',\\\n '/PRODUCT/qa_value',\\\n '/PRODUCT/time',\\\n '/PRODUCT/delta_time',\\\n '/PRODUCT/methane_mixing_ratio',\\\n '/PRODUCT/methane_mixing_ratio_bias_corrected',\\\n '/PRODUCT/methane_mixing_ratio_precision'] \n # standardized variable names in l2g file. should map one-on-one to data_fields\n data_fields_l2g = ['latitude_bounds','longitude_bounds','SolarZenithAngle',\\\n 'vza','dry_air_subcolumns','surface_pressure','pressure_interval',\n 'methane_profile_apriori','latc','lonc','qa_value','time','delta_time',\\\n 'column_amount_no_bias_correction','column_amount','column_uncertainty']\n self.logger.info('Read, subset, and store level 2 data to l2g_data')\n self.logger.info('Level 2 data are located at '+l2_dir)\n l2g_data = {}\n for fn in l2_list:\n fn_dir = l2_dir+fn\n self.logger.info('Loading '+fn)\n outp_nc = self.F_read_S5P_nc(fn_dir,data_fields,data_fields_l2g)\n if if_trop_xch4:\n sounding_interp = F_interp_geos_mat(outp_nc['lonc'],outp_nc['latc'],outp_nc['UTC_matlab_datenum'],\\\n geos_dir='/mnt/Data2/GEOS/s5p_interp/',\\\n interp_fields=['TROPPT'])\n outp_nc['TROPPT'] = sounding_interp['TROPPT']\n #f1 = outp_nc['SolarZenithAngle'] <= maxsza\n #f2 = outp_nc['cloud_fraction'] <= maxcf\n # ridiculously, qa_value has a scale_factor of 0.01. so error-prone\n f3 = outp_nc['qa_value'] >= min_qa_value \n f4 = outp_nc['latc'] >= south\n f5 = outp_nc['latc'] <= north\n tmplon = outp_nc['lonc']-west\n tmplon[tmplon < 0] = tmplon[tmplon < 0]+360\n f6 = tmplon >= 0\n f7 = tmplon <= east-west\n f8 = outp_nc['UTC_matlab_datenum'] >= self.start_matlab_datenum\n f9 = outp_nc['UTC_matlab_datenum'] <= self.end_matlab_datenum\n validmask = f3 & f4 & f5 & f6 & f7 & f8 & f9\n self.logger.info('You have '+'%s'%np.sum(validmask)+' valid L2 pixels')\n l2g_data0 = {}\n if np.sum(validmask) == 0:\n continue\n # yep it's indeed messed up\n Lat_lowerleft = np.squeeze(outp_nc['latitude_bounds'][:,:,0])[validmask]\n Lat_upperleft = np.squeeze(outp_nc['latitude_bounds'][:,:,3])[validmask]\n Lat_lowerright = np.squeeze(outp_nc['latitude_bounds'][:,:,1])[validmask]\n Lat_upperright = np.squeeze(outp_nc['latitude_bounds'][:,:,2])[validmask]\n Lon_lowerleft = np.squeeze(outp_nc['longitude_bounds'][:,:,0])[validmask]\n Lon_upperleft = np.squeeze(outp_nc['longitude_bounds'][:,:,3])[validmask]\n Lon_lowerright = np.squeeze(outp_nc['longitude_bounds'][:,:,1])[validmask]\n Lon_upperright = np.squeeze(outp_nc['longitude_bounds'][:,:,2])[validmask]\n l2g_data0['latr'] = np.column_stack((Lat_lowerleft,Lat_upperleft,Lat_upperright,Lat_lowerright))\n l2g_data0['lonr'] = np.column_stack((Lon_lowerleft,Lon_upperleft,Lon_upperright,Lon_lowerright))\n for key in outp_nc.keys():\n if key not in {'latitude_bounds','longitude_bounds','time_utc','time','delta_time'}:\n l2g_data0[key] = outp_nc[key][validmask]\n if if_trop_xch4:\n # calculate trop xch4 using l2g_data0\n l2g_data0['air_column_strat'] = np.zeros(l2g_data0['latc'].shape)\n l2g_data0['air_column_total'] = np.zeros(l2g_data0['latc'].shape)\n l2g_data0['methane_ap_column_strat'] = np.zeros(l2g_data0['latc'].shape)\n for il2 in range(len(l2g_data0['latc'])):\n cum_air = np.concatenate(([0.],np.cumsum(l2g_data0['dry_air_subcolumns'][il2,].squeeze())))\n cum_methane = np.concatenate(([0.],np.cumsum(l2g_data0['methane_profile_apriori'][il2,].squeeze())))\n # model top is 10 Pa, 12 layers, 13 levels\n plevel = 10.+np.arange(0,13)*l2g_data0['pressure_interval'][il2]\n tropp = l2g_data0['TROPPT'][il2]\n l2g_data0['air_column_total'][il2] = np.sum(l2g_data0['dry_air_subcolumns'][il2,])\n f = interp1d(plevel,cum_air)\n l2g_data0['air_column_strat'][il2] = f(tropp)\n f = interp1d(plevel,cum_methane)\n l2g_data0['methane_ap_column_strat'][il2] = f(tropp)\n del l2g_data0['dry_air_subcolumns']\n del l2g_data0['methane_profile_apriori'] \n l2g_data = self.F_merge_l2g_data(l2g_data,l2g_data0)\n self.l2g_data = l2g_data\n if not l2g_data:\n self.nl2 = 0\n else:\n self.nl2 = len(l2g_data['latc'])",
"def _red_detect_(self, nslice = 0, thresh = 2.0):\n zk_1 = 's_' + format(nslice, '03d')\n zk_2 = 's_' + format(nslice+1, '03d')\n\n zf_1 = self.z_dense[zk_1]\n zf_2 = self.z_dense[zk_2]\n\n # extract the y and x coordinates\n y1 = zf_1[:,0]\n x1 = zf_1[:,1]\n\n y2 = zf_2[:,0]\n x2 = zf_2[:,1]\n\n\n # create a meshgrid\n [YC, YR] = np.meshgrid(y2, y1)\n [XC, XR] = np.meshgrid(x2, x1)\n\n\n dist_block = np.sqrt((YC-YR)**2 + (XC-XR)**2)\n red_pair = np.where(dist_block <= thresh) # find out where the distance between cell i in plane k and cell j in plane k+1 is below the threshold.\n\n ind1 = red_pair[0] # the indices in the first frame\n ind2 = red_pair[1] # the indices in the second frame\n\n\n # select those with markers > 0 and markers < 0\n marker_1 = zf_1[ind1, 3]\n\n\n new_idx = (marker_1 == 0) # select those with zero-markers, which are never counted before. These are new cells. marker_1 needs to be updated.\n pool_new = ind1[new_idx] # select the indices in the first frame where new redundancies are detected \n pool_new_cov = ind2[new_idx] # select the indices in the second frame where new redundancies are detected.\n\n\n pool_exist = ind1[~new_idx] # among the detected redundancies, find those already marked.\n pool_exist_cov = ind2[~new_idx] # correspondingly, find those already marked in the adjacent slice\n\n n_new = len(pool_new)\n n_exist = len(pool_exist)\n if self.verbose:\n print(n_new, \"new redundancies, \", n_exist, \"existing redundancies\")\n\n for n_count in np.arange(n_new):\n # build the new keys\n # also, we need to assign each new key an identity number which is unique.\n n_ind1 = pool_new[n_count] # find the indices in the first slice that contains new redundancies\n n_ind2 = pool_new_cov[n_count] # find the indices in the following slice \n pr_number = nslice * 1000 + n_ind1\n pr_key = 'sl_' + str(pr_number) # build a key \n new_sl = Simple_list(nslice) # create a simple list with z_marker = nslice, nslice is the index of the first z-slice \n new_sl.add([nslice, zf_1[n_ind1, 4]])\n new_sl.add([nslice+1, zf_2[n_ind2, 4]])\n zf_1[n_ind1, 3] = pr_number # assign the new pr_number to zf_1\n zf_2[n_ind2, 3] = pr_number # assigne the same new pr_number to zf_2\n\n self.redundancy_pool[pr_key] = new_sl # stored into the redundancy pool\n\n\n for n_count in np.arange(n_exist):\n # search for the existing keys\n n_ind1 = pool_exist[n_count]\n n_ind2 = pool_exist_cov[n_count]\n pr_number = int(zf_1[n_ind1, 3])# catch up the pr_number\n pr_key = 'sl_' + str(pr_number) # this pr_key should already exist in the pool. \n\n self.redundancy_pool[pr_key].add([nslice+1, zf_2[n_ind2, 4]])\n zf_2[n_ind2, 3] = pr_number # update the pr_number in the adjacent slice",
"def __init__(self, cols, center, radius):\n ColorSelection.__init__(self, cols, center, radius)"
] | [
"0.60852695",
"0.59461117",
"0.5622022",
"0.5604209",
"0.55619615",
"0.5407059",
"0.532152",
"0.52783316",
"0.52493685",
"0.5190198",
"0.5146076",
"0.5125246",
"0.5118821",
"0.5116043",
"0.5110162",
"0.5108866",
"0.5107161",
"0.5097582",
"0.5056697",
"0.5037951",
"0.5032283",
"0.4977529",
"0.49475577",
"0.49425164",
"0.49333927",
"0.4911323",
"0.4908641",
"0.49042702",
"0.48991066",
"0.48873088"
] | 0.6899919 | 0 |
Merges the SExtractor photometry catalog with information about the cluster in the official SPT catalog. | def catalog_merge(self, catalog_cols=None):
for cluster_info in self._catalog_dictionary.values():
# Array element names
catalog_idx = cluster_info['SPT_cat_idx']
se_catalog = cluster_info['catalog']
# Replace the existing SPT_ID in the SExtractor catalog with the official cluster ID.
# se_catalog.columns[0].name = 'SPT_ID'
# del se_catalog['SPT_ID']
# Then replace the column values with the official ID.
se_catalog['SPT_ID'] = self._spt_catalog['SPT_ID'][catalog_idx]
# Add the SZ center coordinates to the catalog
se_catalog['SZ_RA'] = self._spt_catalog['RA'][catalog_idx]
se_catalog['SZ_DEC'] = self._spt_catalog['DEC'][catalog_idx]
# For all requested columns from the master catalog add the value to all columns in the SExtractor catalog.
if catalog_cols is not None:
for col_name in catalog_cols:
se_catalog[col_name] = self._spt_catalog[col_name][catalog_idx]
cluster_info['catalog'] = se_catalog | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_catalogs(self):\n n_exposures = len(self.info['Module'])\n self.info['point_source'] = [None] * n_exposures\n self.info['galaxyListFile'] = [None] * n_exposures\n self.info['extended'] = [None] * n_exposures\n self.info['convolveExtended'] = [False] * n_exposures\n self.info['movingTarg'] = [None] * n_exposures\n self.info['movingTargSersic'] = [None] * n_exposures\n self.info['movingTargExtended'] = [None] * n_exposures\n self.info['movingTargToTrack'] = [None] * n_exposures\n\n for i in range(n_exposures):\n if int(self.info['detector'][i][-1]) < 5:\n filtkey = 'ShortFilter'\n pupilkey = 'ShortPupil'\n else:\n filtkey = 'LongFilter'\n pupilkey = 'LongPupil'\n filt = self.info[filtkey][i]\n pup = self.info[pupilkey][i]\n\n if self.point_source[i] is not None:\n # In here, we assume the user provided a catalog to go with each filter\n # so now we need to find the filter for each entry and generate a list that makes sense\n self.info['point_source'][i] = os.path.abspath(os.path.expandvars(\n self.catalog_match(filt, pup, self.point_source, 'point source')))\n else:\n self.info['point_source'][i] = None\n if self.galaxyListFile[i] is not None:\n self.info['galaxyListFile'][i] = os.path.abspath(os.path.expandvars(\n self.catalog_match(filt, pup, self.galaxyListFile, 'galaxy')))\n else:\n self.info['galaxyListFile'][i] = None\n if self.extended[i] is not None:\n self.info['extended'][i] = os.path.abspath(os.path.expandvars(\n self.catalog_match(filt, pup, self.extended, 'extended')))\n else:\n self.info['extended'][i] = None\n if self.movingTarg[i] is not None:\n self.info['movingTarg'][i] = os.path.abspath(os.path.expandvars(\n self.catalog_match(filt, pup, self.movingTarg, 'moving point source target')))\n else:\n self.info['movingTarg'][i] = None\n if self.movingTargSersic[i] is not None:\n self.info['movingTargSersic'][i] = os.path.abspath(os.path.expandvars(\n self.catalog_match(filt, pup, self.movingTargSersic, 'moving sersic target')))\n else:\n self.info['movingTargSersic'][i] = None\n if self.movingTargExtended[i] is not None:\n self.info['movingTargExtended'][i] = os.path.abspath(os.path.expandvars(\n self.catalog_match(filt, pup, self.movingTargExtended, 'moving extended target')))\n else:\n self.info['movingTargExtended'][i] = None\n if self.movingTargToTrack[i] is not None:\n self.info['movingTargToTrack'][i] = os.path.abspath(os.path.expandvars(\n self.catalog_match(filt, pup, self.movingTargToTrack, 'non-sidereal moving target')))\n else:\n self.info['movingTargToTrack'][i] = None\n if self.convolveExtended is True:\n self.info['convolveExtended'] = [True] * n_exposures",
"def replica_catalog(train_tweets_path, val_tweets_path, test_tweets_path, dataset_images, EMBEDDING_BASE_PATH):\n rc = ReplicaCatalog()\n http_location = \"https://workflow.isi.edu/Panorama/Data/CrisisComputing\"\n\n # list of input file objects\n input_images = []\n\n # Adding Images to the replica catalogue\n for image_path in dataset_images:\n name = image_path[1].split(\"/\")[-1]\n image_file = File(name)\n input_images.append(image_file)\n \n path_split = image_path[0].split(\"/\")\n #rc.add_replica(\"local\", image_file, image_path[0])\n rc.add_replica(\"isi\", image_file, os.path.join(http_location, path_split[-2], path_split[-1]))\n\n \n glove_embeddings = File('glove.twitter.27B.200d.txt')\n \n # File objects for train, val and test tweets csv\n train_tweets_name = File(train_tweets_path.split('/')[-1])\n val_tweets_name = File(val_tweets_path.split('/')[-1])\n test_tweets_name = File(test_tweets_path.split('/')[-1])\n \n #rc.add_replica(\"local\", train_tweets_name, train_tweets_path)\n #rc.add_replica(\"local\", val_tweets_name, val_tweets_path)\n #rc.add_replica(\"local\", test_tweets_name, test_tweets_path)\n #rc.add_replica(\"local\", glove_embeddings, os.path.join(os.getcwd(), os.path.join(EMBEDDING_BASE_PATH, GLOVE_EMBEDDING_FILE))) \n \n path_split = train_tweets_path.split('/')\n rc.add_replica(\"isi\", train_tweets_name, os.path.join(http_location, path_split[-2], path_split[-1]))\n path_split = val_tweets_path.split('/')\n rc.add_replica(\"isi\", val_tweets_name, os.path.join(http_location, path_split[-2], path_split[-1]))\n path_split = test_tweets_path.split('/')\n rc.add_replica(\"isi\", test_tweets_name, os.path.join(http_location, path_split[-2], path_split[-1]))\n \n rc.add_replica(\"isi\", glove_embeddings, os.path.join(http_location, \"glove_twitter\", GLOVE_EMBEDDING_FILE)) \n rc.write()\n\n return input_images, train_tweets_name, val_tweets_name, test_tweets_name, glove_embeddings",
"def image_to_catalog_match(self, max_image_catalog_sep):\n\n catalog = self._spt_catalog\n\n # Create astropy skycoord object of the SZ centers.\n sz_centers = SkyCoord(catalog['RA'], catalog['DEC'], unit=u.degree)\n\n for cluster in self._catalog_dictionary.values():\n # Get the RA and Dec of the center pixel in the image.\n w = WCS(cluster['ch1_sci_path'])\n center_pixel = np.array(w.array_shape) // 2\n\n # Create astropy skycoord object for the reference pixel of the image.\n img_coord = SkyCoord.from_pixel(center_pixel[1], center_pixel[0], wcs=w, origin=0)\n\n # Match the reference pixel to the SZ centers\n idx, sep, _ = img_coord.match_to_catalog_sky(sz_centers)\n\n # Add the (nearest) catalog id and separation (in arcsec) to the output array.\n cluster.update({'SPT_cat_idx': idx, 'center_sep': sep})\n\n # Reject any match with a separation larger than 1 arcminute.\n large_sep_clusters = [cluster_id for cluster_id, cluster_info in self._catalog_dictionary.items()\n if cluster_info['center_sep'].to(u.arcmin) > max_image_catalog_sep]\n for cluster_id in large_sep_clusters:\n self._catalog_dictionary.pop(cluster_id, None)\n\n # If there are any duplicate matches in the sample remaining we need to remove the match that is the poorer\n # match. We will only keep the closest matches.\n match_info = Table(rows=[[cluster['SPT_cat_idx'], cluster['center_sep'], cluster_id]\n for cluster_id, cluster in self._catalog_dictionary.items()],\n names=['SPT_cat_idx', 'center_sep', 'cluster_id'])\n\n # Sort the table by the catalog index.\n match_info.sort(['SPT_cat_idx', 'center_sep'])\n\n # Use Astropy's unique function to remove the duplicate rows. Because the table rows will be subsorted by the\n # separation column we only need to keep the first incidence of the catalog index as our best match.\n match_info = unique(match_info, keys='SPT_cat_idx', keep='first')\n\n # Remove the duplicate clusters\n duplicate_clusters = set(match_info['cluster_id']).symmetric_difference(self._catalog_dictionary.keys())\n for cluster_id in duplicate_clusters:\n self._catalog_dictionary.pop(cluster_id, None)",
"def read_combined_star_catalog(params,log):\n\n if path.isfile(params['catalog_file']) == False:\n\n return np.zeros(1)\n\n hdulist = fits.open(params['catalog_file'])\n\n data = hdulist[1].data\n\n header = hdulist[0].header\n\n star_catalog = Table(data)\n\n data = hdulist[2].data\n\n image_trios = Table(data)\n\n log.info('Read data from combined colour star catalog')\n\n return star_catalog, image_trios, header",
"def associate_files(self):\n # Open starinfo file and define structured array\n starinfo_file = self.starinfo_file\n nstar = sum(1 for line in open(starinfo_file))\n infoname = ['obj', 'std', 'caldir', 'altname']\n infofmt = ['|S25', '|S25', '|S25', '|S25']\n starinfo = np.zeros(nstar, dtype={\n 'names': infoname, 'formats': infofmt})\n with open(starinfo_file, 'r') as arq:\n for i in range(nstar):\n linelist = arq.readline().split()\n for j in range(len(infoname)):\n starinfo[i][j] = linelist[j]\n\n if self.stored_sens:\n self.load_storedsens()\n\n os.chdir(self.raw_dir)\n\n l = glob.glob('*.fits')\n l.sort()\n\n headers = []\n headers_ext1 = []\n for i in l:\n try:\n headers.append(fits.getheader(i, ext=0))\n headers_ext1.append(fits.getheader(i, ext=1))\n except IOError:\n print('IOError reading file {:s}.'.format(i))\n raise SystemExit(0)\n\n oversc = np.array(\n [('overscan') in i for i in headers_ext1], dtype='bool')\n\n mjds = np.array([i['mjd-obs'] for i in headers_ext1], dtype='float32')\n idx = np.arange(len(l))\n\n images = np.array([\n l[i] for i in idx if (\n (headers[i]['obstype'] == 'OBJECT') &\n (headers[i]['object'] != 'Twilight') &\n (headers[i]['obsclass'] != 'acq'))])\n\n field_names = [\n 'filename', 'observatory', 'instrument', 'detector',\n 'grating', 'filter1', 'obsclass', 'object', 'obstype',\n 'grating_wl', 'overscan', 'mjd', 'ccdsum']\n types = [\n 'S120', 'S60', 'S60', 'S60', 'S60', 'S60', 'S60', 'S60', 'S60',\n 'float32', 'bool', 'float32', 'S60']\n hdrkeys = [\n 'observat', 'instrume', 'detector', 'grating', 'filter1',\n 'obsclass', 'object', 'obstype', 'grwlen']\n\n hdrpars_type = [\n (field_names[i], types[i]) for i in range(len(field_names))]\n\n hdrpars = np.array([\n ((l[i],) + tuple([headers[i][j] for j in hdrkeys]) +\n (oversc[i],) + (mjds[i],) + (headers_ext1[i]['ccdsum'],))\n for i in idx], dtype=hdrpars_type)\n\n associated = []\n\n for i, j in enumerate(images):\n\n # Take great care when changing this.\n hdr = fits.getheader(j, ext=0)\n hdr_ext1 = fits.getheader(j, ext=1)\n mjd = hdr_ext1['mjd-obs']\n\n element = {\n 'image': j, 'observatory': hdr['observat'],\n 'instrument': hdr['instrume'],\n 'detector': hdr['detector'], 'grating_wl': hdr['grwlen'],\n 'mjd': mjd, 'grating': hdr['grating'],\n 'filter1': hdr['filter1'], 'obsclass': hdr['obsclass'],\n 'object': hdr['object']}\n\n if self.stored_sens:\n ssf = self.stored_sensfunc\n element['standard_star'] = ssf['filename'][\n (ssf['observatory'] == hdr['observat']) &\n (ssf['detector'] == hdr['detector']) &\n (ssf['grating'] == hdr['grating']) &\n (ssf['instrument'] == hdr['instrume']) &\n (ssf['filter1'] == hdr['filter1']) &\n (ssf['maskname'] == hdr['maskname'])]\n else:\n element['standard_star'] = hdrpars['filename'][\n (hdrpars['obstype'] == 'OBJECT') &\n (np.array([k in ['partnerCal', 'progCal']\n for k in hdrpars['obsclass']], dtype='bool')) &\n (hdrpars['object'] != 'Twilight') &\n (hdrpars['observatory'] == hdr['observat']) &\n (hdrpars['detector'] == hdr['detector']) &\n (hdrpars['grating'] == hdr['grating']) &\n (hdrpars['filter1'] == hdr['filter1']) &\n (abs(hdrpars['grating_wl'] - hdr['grwlen']) <=\n self.cfg.getfloat('associations', 'stdstar_wltol')) &\n (abs(mjds - mjd) <=\n self.cfg.getfloat('associations', 'stdstar_ttol'))]\n\n element['flat'] = hdrpars['filename'][\n (hdrpars['obstype'] == 'FLAT') &\n (hdrpars['observatory'] == hdr['observat']) &\n (hdrpars['grating'] == hdr['grating']) &\n (hdrpars['grating_wl'] == hdr['grwlen']) &\n (hdrpars['detector'] == hdr['detector']) &\n (abs(mjds - mjd) <= self.cfg.getfloat('associations',\n 'flat_ttol'))]\n\n element['twilight'] = hdrpars['filename'][\n (hdrpars['object'] == 'Twilight') &\n (hdrpars['obstype'] == 'OBJECT') &\n (hdrpars['observatory'] == hdr['observat']) &\n (hdrpars['detector'] == hdr['detector']) &\n (hdrpars['grating'] == hdr['grating']) &\n (abs(hdrpars['grating_wl'] - hdr['grwlen']) <=\n self.cfg.getfloat('associations', 'twilight_wltol')) &\n (abs(mjds - mjd) <=\n self.cfg.getfloat('associations', 'twilight_ttol'))]\n\n c = 'twilight'\n if len(element[c]) > 1:\n element[c] = closest_in_time(element[c], j)\n elif len(element[c]) == 1:\n element[c] = element[c][0]\n elif len(element[c]) == 0:\n element[c] = ''\n\n # A flat close to the twilight observation for a better\n # response function.\n if element['twilight']:\n twipars = hdrpars[hdrpars['filename'] == element['twilight']]\n element['twilight_flat'] = hdrpars['filename'][\n (hdrpars['obstype'] == 'FLAT') &\n (hdrpars['observatory'] == twipars['observatory']) &\n (hdrpars['detector'] == twipars['detector']) &\n (hdrpars['grating'] == twipars['grating']) &\n (hdrpars['grating_wl'] == twipars['grating_wl']) &\n (abs(mjds - twipars['mjd']) <= self.cfg.getfloat(\n 'associations', 'twilight_ttol'))]\n else:\n element['twilight_flat'] = np.array([], dtype='S60')\n\n element['arc'] = hdrpars['filename'][\n # (hdrpars['object'] == 'CuAr') &\n (hdrpars['obstype'] == 'ARC') &\n (hdrpars['observatory'] == hdr['observat']) &\n (hdrpars['detector'] == hdr['detector']) &\n (hdrpars['grating'] == hdr['grating']) &\n (hdrpars['grating_wl'] == hdr['grwlen']) &\n (abs(mjds - mjd) <=\n self.cfg.getfloat('associations', 'arc_ttol'))]\n\n element['bias'] = hdrpars['filename'][\n (hdrpars['obstype'] == 'BIAS') &\n (hdrpars['observatory'] == hdr['observat']) &\n (hdrpars['detector'] == hdr['detector']) &\n (abs(mjds - mjd) <=\n self.cfg.getfloat('associations', 'bias_ttol')) &\n (\n (hdrpars['overscan'] & (self.fl_over == 'yes')) |\n (~hdrpars['overscan'] & (self.fl_over == 'no'))\n )]\n\n im = fits.open(element['image'])\n ishape = np.array(im[1].data.shape, dtype='float32')\n im.close()\n del(im)\n\n validBiases = np.ones(len(element['bias']), dtype='bool')\n k = 0\n\n for biasImage in element['bias']:\n\n bias = fits.open(biasImage)\n bshape = np.array(bias[1].data.shape, dtype='float32')\n bias.close()\n del(bias)\n\n #\n # Elinates biases if they differ in array size from\n # the science image. Small differences are normal due to\n # the overscan subtraction in processed bias frames.\n #\n if np.any(np.abs(bshape / ishape - 1.0) > 0.10):\n validBiases[k] = False\n\n k += 1\n\n element['bias'] = element['bias'][validBiases]\n del(k)\n\n element['bpm'] = hdrpars['filename'][\n (hdrpars['obstype'] == 'BPM') &\n (hdrpars['observatory'] == hdr['observat']) &\n (hdrpars['detector'] == hdr['detector']) &\n (hdrpars['ccdsum'] == hdr_ext1['ccdsum'])]\n\n categories = ['flat', 'bias', 'arc', 'standard_star',\n 'bpm', 'twilight_flat']\n\n for c in categories:\n if len(element[c]) > 1:\n element[c] = closest_in_time(element[c], j)\n elif len(element[c]) == 0:\n element[c] = ''\n elif len(element[c]) == 1:\n element[c] = (element[c])[0]\n\n associated.append(element)\n\n # Define mdf filename\n # Based in gprepare.cl\n # Did not account for observation in Nod-and-Shuffle\n for i in associated:\n header_flat = [\n k for j, k in enumerate(headers) if l[j] == i['flat']\n ]\n if len(header_flat):\n header_flat = header_flat[0]\n MaskName = header_flat['maskname']\n if MaskName == \"IFU-2\":\n slits = 'both'\n elif MaskName == \"IFU-B\":\n slits = 'blue'\n elif MaskName == \"IFU-R\":\n slits = 'red'\n i['slits'] = slits\n\n if self.object_filter:\n objs = self.object_filter.split(',')\n sci_ims = [\n i for i in associated if (\n (i['obsclass'] == 'science') &\n (i['object'] in objs))]\n else:\n sci_ims = [i for i in associated if i['obsclass'] == 'science']\n\n if self.all_stars:\n std_ims = [\n i for i in associated if i['obsclass'] in ['partnerCal',\n 'progCal']]\n else:\n used_stds = [i['standard_star'] for i in sci_ims]\n std_ims = [i for i in associated if i['image'] in used_stds]\n\n # Get star info from starinfo.dat\n possible_names = np.concatenate((starinfo['obj'], starinfo['std'],\n starinfo['altname']))\n n_names = len(possible_names)\n\n for i, j in enumerate(possible_names):\n possible_names[i] = (j.lower()).replace(' ', '')\n\n for i in std_ims:\n # Removes the 'standard_star' key if the dictionary\n # element in question refers to a standard star.\n del i['standard_star']\n starname = (i['object'].lower()).replace(' ', '')\n\n try:\n stdstar_idx = (\n np.arange(n_names)[possible_names == starname] %\n (n_names / 3))[0]\n except:\n raise Exception(\n 'Standard star named {:s} not found in file {:s}'.\n format(starname, starinfo_file))\n\n i['stdstar'] = starinfo[stdstar_idx]['std']\n\n if starinfo[stdstar_idx]['caldir'] == 'gireds_data':\n i['caldir'] = pkg_resources.resource_filename(\n 'gireds', 'data/')\n else:\n i['caldir'] = starinfo[stdstar_idx]['caldir']\n\n self.sci = sci_ims\n self.std = std_ims\n\n # Writes the file association dictionary to an ASCII file\n # in the run directory.\n\n if not self.dry_run:\n try:\n os.mkdir(self.products_dir)\n except OSError as err:\n if err.errno == 17:\n pass\n else:\n raise err\n try:\n os.mkdir(self.run_dir)\n except OSError as err:\n if err.errno == 17:\n pass\n else:\n raise err\n\n if not self.dry_run:\n os.chdir(self.run_dir)\n json.dump(\n sci_ims, open('file_associations_sci.dat', 'w'),\n sort_keys=True, indent=4)\n json.dump(\n std_ims, open('file_associations_std.dat', 'w'),\n sort_keys=True, indent=4)",
"def file_pairing(self, include=None, exclude=None):\n\n # List the file names for both the images and the catalogs\n if isinstance(self._irac_image_dir, list):\n image_files = list(chain.from_iterable(glob.glob(f'{img_dir}/*.fits') for img_dir in self._irac_image_dir))\n else:\n image_files = glob.glob(f'{self._irac_image_dir}/*.fits')\n if isinstance(self._sextractor_cat_dir, list):\n cat_files = list(\n chain.from_iterable(glob.glob(f'{cat_dir}/*.cat') for cat_dir in self._sextractor_cat_dir))\n else:\n cat_files = glob.glob(f'{self._sextractor_cat_dir}/*.cat')\n\n # Combine and sort both file lists\n cat_image_files = sorted(cat_files + image_files, key=self._keyfunct)\n\n # Group the file names together\n self._catalog_dictionary = {cluster_id: list(files)\n for cluster_id, files in groupby(cat_image_files, key=self._keyfunct)}\n\n # If we want to only run on a set of clusters we can filter for them now\n if include is not None:\n self._catalog_dictionary = {cluster_id: files for cluster_id, files in self._catalog_dictionary.items()\n if cluster_id in include}\n\n # If we want to exclude some clusters manually we can remove them now\n if exclude is not None:\n for cluster_id in exclude:\n self._catalog_dictionary.pop(cluster_id, None)\n\n # Sort the files into a dictionary according to the type of file\n for cluster_id, files in self._catalog_dictionary.items():\n self._catalog_dictionary[cluster_id] = {}\n for f in files:\n if f.endswith('.cat'):\n self._catalog_dictionary[cluster_id]['se_cat_path'] = f\n elif 'I1' in f and '_cov' not in f:\n self._catalog_dictionary[cluster_id]['ch1_sci_path'] = f\n elif 'I1' in f and '_cov' in f:\n self._catalog_dictionary[cluster_id]['ch1_cov_path'] = f\n elif 'I2' in f and '_cov' not in f:\n self._catalog_dictionary[cluster_id]['ch2_sci_path'] = f\n elif 'I2' in f and '_cov' in f:\n self._catalog_dictionary[cluster_id]['ch2_cov_path'] = f\n\n # Verify that all the clusters in our sample have all the necessary files\n problem_clusters = []\n for cluster_id, cluster_files in self._catalog_dictionary.items():\n file_keys = {'ch1_sci_path', 'ch1_cov_path', 'ch2_sci_path', 'ch2_cov_path', 'se_cat_path'}\n try:\n assert file_keys == cluster_files.keys()\n except AssertionError:\n message = f'Cluster {cluster_id} is missing files {file_keys - cluster_files.keys()}'\n warnings.warn(message)\n problem_clusters.append(cluster_id)\n\n # For now, remove the clusters missing files\n for cluster_id in problem_clusters:\n self._catalog_dictionary.pop(cluster_id, None)",
"def prepare_statistics(self):\n\n # statistics of clustering files\n len0 = len(self.cluster_lists[0])\n len1 = len(self.cluster_lists[1])\n longer_index = 0 if len0 >= len1 else 1\n shorter_index = 1 if len1 <= len0 else 0\n\n percentage_stars = \"%.2f\" % (100.0 * float(self.shared_spec_num)/float(self.cluster_spectra_num[shorter_index]))\n percentage_starlets = \"%.2f\" % (100.0 * float(self.shared_spec_num)/float(self.cluster_spectra_num[longer_index]))\n\n head = \"{0:<25}{1:<20}{2:<20}\\n\".format(\"name\", \"number\", \"description\")\n rows = \"\"\n rows += \"{0:<25}{1:<20}{2:<20}\\n\".format(\"stars No.\", self.stars_length, \"in file with less(or equal) clusters: file\" + str(shorter_index))\n rows += \"{0:<25}{1:<20}{2:<20}\\n\".format(\"starlets No.\", self.starlets_length, \"in file with more(or equal) clusters: file\" + str(longer_index))\n rows += \"{0:<25}{1:<20}{2:<20}\\n\".format(\"identical cluster No.\", self.similarity_dist[10], \"between them\")\n rows += \"{0:<25}{1:<20}{2:<20}\\n\".format(\"spectrum No\", self.cluster_spectra_num[shorter_index], \"in stars\")\n rows += \"{0:<25}{1:<20}{2:<20}\\n\".format(\"spectrum No\", self.cluster_spectra_num[longer_index], \"in starlets \")\n rows += \"{0:<25}{1:<20}{2:<20}\\n\".format(\"shared spectrum No\", self.shared_spec_num, \"between them\")\n rows += \"{0:<25}{1:<20}{2:<20}\\n\".format(\"shared spectrum percent\", percentage_stars, \"in stars\")\n rows += \"{0:<25}{1:<20}{2:<20}\\n\".format(\"shared spectrum percent\", percentage_starlets, \"in starlets\")\n self.tables.append(('statistics of files', head, rows))\n\n # distribution of cluster size in stars\n head = '{0:<20}{1:<20}{2:<20}{3:<20}\\n'.format(\"cluster size\",\"No.\", \"percentage\", \"accumulate pecentage\")\n rows = \"\"\n rows += \"{0:<20}{1:<20}\\n\".format(\"%.2f\" % (self.ave_star_size), \"average\")\n accumulate_num = 0\n for key in sorted(self.cluster_size_dist[shorter_index].keys()):\n value = self.cluster_size_dist[shorter_index][key]\n accumulate_num += value\n percent = \"%.2f\" % (100 * value/self.stars_length)\n accum_percent = \"%.2f\" % (100 * accumulate_num/self.stars_length)\n rows += '{0:<20}{1:<20}{2:<20}{3:<20}\\n'.format(key, value, percent, accum_percent)\n self.tables.append(('distribution of cluster size in stars', head, rows))\n \n head = '{0:<20}{1:<20}{2:<20}{3:<20}\\n'.format(\"cluster size\",\"No.\", \"percentage\", \"accumulate pecentage\")\n rows = \"\"\n rows += \"{0:<20}{1:<20}\\n\".format(\"%.2f\" % (self.ave_starlet_size), \"average\")\n accumulate_num = 0\n for key in sorted(self.cluster_size_dist[longer_index].keys()):\n value = self.cluster_size_dist[longer_index][key]\n accumulate_num += value\n percent = \"%.2f\" % (100 * value/self.starlets_length)\n accum_percent = \"%.2f\" % (100 * accumulate_num/self.starlets_length)\n rows += '{0:<20}{1:<20}{2:<20}{3:<20}\\n'.format(key, value, percent, accum_percent)\n self.tables.append(('distribution of cluster size in starlets', head, rows))\n\n # distribution of similarity\n head = \"{0:<20}{1:<20}{2:<20}{3:<20}\\n\".format(\"similarity score\", \"pairs of clusters\", \"percentage(stars)\", \"percentage(starlets)\")\n rows = \"\"\n for key in reversed(sorted(self.similarity_dist.keys())):\n value = self.similarity_dist[key]\n percent_star = \"%.2f\" % (100.0*value/self.stars_length)\n percent_starlet = \"%.2f\" % (100.0*value/self.starlets_length)\n rows += '{0:<20}{1:<20}{2:<20}{3:<20}\\n'.format(key, value, percent_star, percent_starlet)\n self.tables.append(('distribution of similarity (identical = 10)', head, rows))\n\n # distribution of star divide factors\n head = '{0:<20}{1:<20}{2:<20}\\n'.format(\"divide factor\",\"No.\",\"percentage\")\n rows = \"\"\n rows += \"{0:<20}{1:<20}\\n\".format(\"%.2f\" % (self.ave_divide_factor_star), \"average\")\n for key in sorted(self.star_divide_factor_dist.keys()):\n value = self.star_divide_factor_dist[key]\n percent_star = \"%.2f\" % (100.0*value/self.stars_length)\n rows += '{0:<20}{1:<20}{2:<20}\\n'.format(key, value, percent_star)\n self.tables.append(('distribution of star divide factors', head, rows))\n\n # distribution of starlet divide factors\n head = '{0:<20}{1:<20}{2:<20}\\n'.format(\"divide factor\",\"No.\",\"percentage\")\n rows = \"\"\n rows += \"{0:<20}{1:<20}\\n\".format(\"%.2f\" % (self.ave_divide_factor_starlet), \"average\")\n for key in sorted(self.starlet_divide_factor_dist.keys()):\n value = self.starlet_divide_factor_dist[key]\n percent_starlet = \"%.2f\" % (100.0*value/self.starlets_length)\n rows += '{0:<20}{1:<20}{2:<20}\\n'.format(key, value, percent_starlet)\n self.tables.append(('distribution of starlet divide factors', head, rows))",
"def _transfer_coos(self):\n # compose the WCS-term for the direct and grism images\n dir_term = getDATA(\"{0:s} [{1:d}]\".format(self.dirname, self.dirname_extinfo['fits_ext']))\n gri_term = getDATA(\"{0:s} [{1:d}]\".format(self.grisim, self.grism_extinfo['fits_ext']))\n\n # generate the WCS objects\n dir_wcs = wcsutil.WCSObject(dir_term)\n gri_wcs = wcsutil.WCSObject(gri_term)\n\n # go over each row in the catalog\n for row in self.gol:\n\n # make a position tuple\n try:\n xy_direct = (row['X_IMAGE'], row['Y_IMAGE'])\n except KeyError: \n # self._treat_NULL_table\n raise aXeError(\"No coordinate columns in catalog, empty?\")\n\n # convert to RADEC using the direct image\n radec_pos = dir_wcs.xy2rd(xy_direct)\n\n # convert to XY on grism image\n xy_grism = gri_wcs.rd2xy(radec_pos)\n\n # store projected vals in the GOL\n row['X_IMAGE'] = float(xy_grism[0])\n row['Y_IMAGE'] = float(xy_grism[1])",
"def updateClusterInfo(self):\n self.nPoints = len(self.labels)\n self.n = len(np.unique(self.labels))\n self.centers = [ [0.0 for j in range(3)] for i in range(self.n)]",
"def overlay(self):\n # retrieve header for photometry keywords\n # from current frame only\n hdr_str = self.run('fits header', via='get')\n\n # read it in to a fits header\n phdr = fits.Header()\n hdr = phdr.fromstring(hdr_str, sep='\\n')\n\n try:\n srcposx = hdr['SRCPOSX'] + 1\n srcposy = hdr['SRCPOSY'] + 1\n s1 = 'point({:f} {:f}) # ' \\\n 'point=x ' \\\n 'color=blue tag={{srcpos}} '\\\n 'text=SRCPOS'.format(srcposx, srcposy)\n self.run('regions', s1)\n except (KeyError, ValueError):\n pass\n try:\n stcentx = hdr['STCENTX'] + 1\n stcenty = hdr['STCENTY'] + 1\n photaper = hdr['PHOTAPER']\n photskap = [float(x) for x in hdr['PHOTSKAP'].split(',')]\n s1 = 'point({:f} {:f}) # ' \\\n 'point=x ' \\\n 'color=cyan tag={{srcpos}}'.format(stcentx, stcenty)\n self.run('regions', s1)\n s2 = 'circle({:f} {:f} {:f}) # ' \\\n 'color=cyan tag={{srcpos}}'.format(\n stcentx, stcenty, photaper)\n self.run('regions', s2)\n s3 = 'annulus({:f} {:f} {:f} {:f}) # ' \\\n 'color=cyan tag={{srcpos}} text=STCENT'.format(\n stcentx, stcenty, photskap[0], photskap[1])\n self.run('regions', s3)\n except (KeyError, ValueError):\n pass\n try:\n stcentx = hdr['STCENTX'] + 1\n stcenty = hdr['STCENTY'] + 1\n flux = hdr['STAPFLX']\n sky = hdr['STAPSKY']\n s1 = 'text({:f} {:f}) # color=cyan ' \\\n 'text=\"Flux={:.2f}, Sky={:.2f}\"'.format(\n stcentx, stcenty - 40, flux, sky)\n self.run('regions', s1)\n except (KeyError, ValueError):\n pass\n\n # try overlaying apertures as well\n try:\n self.overlay_aperture(hdr)\n except ValueError: # pragma: no cover\n # may be encountered with extensions with\n # unexpected WCSs\n pass",
"def updateAnnotations(self):\n self.backupDatafiles()\n print(\"Updating annotation files \", self.field(\"trainDir\"))\n listOfDataFiles = QDir(self.field(\"trainDir\")).entryList(['*.data'])\n for file in listOfDataFiles:\n # Read the annotation\n segments = Segment.SegmentList()\n newsegments = Segment.SegmentList()\n segments.parseJSON(os.path.join(self.field(\"trainDir\"), file))\n allSpSegs = np.arange(len(segments)).tolist()\n newsegments.metadata = segments.metadata\n for segix in allSpSegs:\n seg = segments[segix]\n if self.field(\"species\") not in [fil[\"species\"] for fil in seg[4]]:\n newsegments.addSegment(seg) # leave non-target segments unchanged\n else:\n for seg2 in self.segments:\n if seg2[1] == seg:\n # find the index of target sp and update call type\n seg[4][[fil[\"species\"] for fil in seg[4]].index(self.field(\"species\"))][\"calltype\"] = self.clusters[seg2[-1]]\n newsegments.addSegment(seg)\n newsegments.saveJSON(os.path.join(self.field(\"trainDir\"), file))",
"def mergeCenters(nCenters):\r\n\tpath = os.getcwd()\r\n\tos.chdir('Centers/')\r\n\tcenter = np.zeros((0,128))\t\t#: Populator for centers\r\n\r\n\tfor i in os.listdir(os.getcwd()):\r\n\t Center = open(i,\"rb\")\t\t#: File pointer for centers file\r\n\t center = np.vstack((center, pickle.load(Center)))\t#Populate centers\r\n\t Center.close()\r\n\r\n\tcenter = np.float32(center)\r\n\tcriteria = (cv2.TERM_CRITERIA_MAX_ITER, 10,0.0001)\r\n\t#Checking version of opencv..\r\n\tif __verison__[0] == '3':\r\n\t\tret,label,center=cv2.kmeans(center,int(nCenters),None,criteria,50,cv2.KMEANS_PP_CENTERS)\r\n\telse:\r\n\t\tret,label,center=cv2.kmeans(center,int(nCenters),criteria,50,cv2.KMEANS_PP_CENTERS)\r\n\r\n\tCenterFinal = open(path+'/centerFinal.p',\"wb\")#: File pointer for final centers file\r\n\tpickle.dump(center, CenterFinal)\t#Dump centers to file\r\n\tCenterFinal.close()",
"def make_catalog_comp_info_dict(self, catalog_sources):\n catalog_ret_dict = {}\n split_ret_dict = {}\n for key, value in catalog_sources.items():\n if value is None:\n continue\n if value['model_type'] != 'catalog':\n continue\n versions = value['versions']\n for version in versions:\n ver_key = \"%s_%s\" % (key, version)\n source_dict = self.read_catalog_info_yaml(ver_key)\n try:\n full_cat_info = catalog_ret_dict[key]\n except KeyError:\n full_cat_info = self.build_catalog_info(source_dict)\n catalog_ret_dict[key] = full_cat_info\n\n try:\n all_sources = [x.strip() for x in full_cat_info.catalog_table[\n 'Source_Name'].astype(str).tolist()]\n except KeyError:\n print(full_cat_info.catalog_table.colnames)\n used_sources = []\n rules_dict = source_dict['rules_dict']\n if rules_dict is None:\n rules_dict = {}\n split_dict = {}\n for rule_key, rule_val in rules_dict.items():\n # full_key =\\\n # self._name_factory.merged_sourcekey(catalog=ver_key,\n # rulekey=rule_key)\n sources = select_sources(\n full_cat_info.catalog_table, rule_val['cuts'])\n used_sources.extend(sources)\n split_dict[rule_key] = self.make_catalog_comp_info(\n full_cat_info, version, rule_key, rule_val, sources)\n\n # Now deal with the remainder\n for source in used_sources:\n try:\n all_sources.remove(source)\n except ValueError:\n continue\n rule_val = dict(cuts=[],\n merge=source_dict['remainder'].get('merge', False))\n split_dict['remain'] = self.make_catalog_comp_info(\n full_cat_info, version, 'remain', rule_val, all_sources)\n\n # Merge in the info for this version of splits\n split_ret_dict[ver_key] = split_dict\n\n self._catalog_comp_info_dicts.update(catalog_ret_dict)\n self._split_comp_info_dicts.update(split_ret_dict)\n return (catalog_ret_dict, split_ret_dict)",
"def _consolidate_spont_results(self):\n\n # SPONT vs. SPONT\n\n # 1) deal with numeric results for spont spont\n df = self.numeric_results.copy()\n mean_cols = [c for c in df.columns if '_sem' not in c]\n err_cols = [c for c in df.columns if '_sem' in c]\n\n spont_spont_mean = df.loc[pd.IndexSlice[self.spont_stimulus_pairs, :], :][mean_cols].groupby(by='n_components').mean()\n spont_spont_sem = df.loc[pd.IndexSlice[self.spont_stimulus_pairs, :], :][err_cols].groupby(by='n_components').apply(error_prop)\n spont_spont = pd.concat([spont_spont_mean, spont_spont_sem], axis=1)\n new_idx = pd.MultiIndex.from_tuples([pd.Categorical(('spont_spont', n_components)) \n for n_components in spont_spont.index], names=['combo', 'n_components'])\n spont_spont.set_index(new_idx, inplace=True)\n\n # drop individual spont_spont pairs from master df\n df = df[~df.index.get_level_values('combo').isin(self.spont_stimulus_pairs)]\n\n # add new spont results to df\n df = spont_spont.append(df)\n self.numeric_results = df.copy()\n\n\n # 2) deal with array results for spont_spont\n for obj in self.object_keys:\n df = self.array_results[obj].copy()\n sp_df = df.loc[pd.IndexSlice[self.spont_stimulus_pairs, :], :]\n\n if 'evecs' in obj:\n m = [np.nanmean(reflect_eigenvectors(x), axis=0) for x in [np.stack([a for a in arr[1]['mean'].values]) for arr in sp_df.groupby('n_components')]]\n sem = [error_prop(x, axis=0) for x in [np.stack([a for a in arr[1]['sem'].values]) for arr in sp_df.groupby('n_components')]]\n else:\n m = [np.nanmean(x, axis=0) for x in [np.stack([a for a in arr[1]['mean'].values]) for arr in sp_df.groupby('n_components')]]\n sem = [error_prop(x, axis=0) for x in [np.stack([a for a in arr[1]['sem'].values]) for arr in sp_df.groupby('n_components')]]\n \n components = [arr[0] for arr in sp_df.groupby('n_components')]\n new_idx = pd.MultiIndex.from_tuples([pd.Categorical(('spont_spont', n_components)) \n for n_components in components], names=['combo', 'n_components'])\n new_df = pd.DataFrame(index=new_idx, columns=['mean', 'sem'])\n new_df['mean'] = m\n new_df['sem'] = sem\n\n df = df[~df.index.get_level_values('combo').isin(self.spont_stimulus_pairs)]\n df = new_df.append(df)\n \n self.array_results[obj] = df.copy()\n\n self.spont_stimulus_pairs = ['spont_spont']\n\n\n # SPONT vs. EVOKED\n df = self.numeric_results.copy()\n unique_evoked_bins = np.unique([[c.split('_')[0], c.split('_')[1]] for c in self.evoked_stimulus_pairs])\n\n # 1) deal with numeric results\n new_sp_ev_pairs = []\n for stim in unique_evoked_bins:\n # get all spont / evoked combos\n sp_ev = np.unique([c for c in self.spont_evoked_stimulus_pairs if stim in c])\n m = df.loc[pd.IndexSlice[sp_ev, :], :][mean_cols].groupby(by='n_components').mean()\n sem = df.loc[pd.IndexSlice[sp_ev, :], :][err_cols].groupby(by='n_components').apply(error_prop)\n sp_ev_df = pd.concat([m, sem], axis=1)\n new_idx = pd.MultiIndex.from_tuples([pd.Categorical(('spont_{}'.format(stim), n_components)) \n for n_components in sp_ev_df.index], names=['combo', 'n_components']) \n sp_ev_df.set_index(new_idx, inplace=True)\n df = sp_ev_df.append(df)\n new_sp_ev_pairs.append('spont_{}'.format(stim))\n\n # remove inividual spont_evoked pairs \n df = df[~df.index.get_level_values('combo').isin(self.spont_evoked_stimulus_pairs)] \n\n # save updated dataframe for numeric results\n self.numeric_results = df.copy()\n\n # 2) deal with object results\n for obj in self.object_keys:\n df = self.array_results[obj].copy()\n for stim in unique_evoked_bins:\n sp_ev = np.unique([c for c in self.spont_evoked_stimulus_pairs if stim in c])\n sp_df = df.loc[pd.IndexSlice[sp_ev, :], :]\n\n if 'evecs' in obj:\n m = [np.nanmean(reflect_eigenvectors(x), axis=0) for x in [np.stack([a for a in arr[1]['mean'].values]) for arr in sp_df.groupby('n_components')]]\n sem = [error_prop(x, axis=0) for x in [np.stack([a for a in arr[1]['sem'].values]) for arr in sp_df.groupby('n_components')]]\n else:\n m = [np.nanmean(x, axis=0) for x in [np.stack([a for a in arr[1]['mean'].values]) for arr in sp_df.groupby('n_components')]]\n sem = [error_prop(x, axis=0) for x in [np.stack([a for a in arr[1]['sem'].values]) for arr in sp_df.groupby('n_components')]]\n components = [arr[0] for arr in sp_df.groupby('n_components')]\n new_idx = pd.MultiIndex.from_tuples([pd.Categorical(('spont_{}'.format(stim), n_components)) \n for n_components in components], names=['combo', 'n_components'])\n new_df = pd.DataFrame(index=new_idx, columns=['mean', 'sem'])\n new_df['mean'] = m\n new_df['sem'] = sem\n\n df = df[~df.index.get_level_values('combo').isin(self.spont_evoked_stimulus_pairs)]\n df = new_df.append(df)\n self.array_results[obj] = df\n\n # update self.spont_evoked_stimulus_pairs\n self.spont_evoked_stimulus_pairs = new_sp_ev_pairs \n\n # no need to return anything... just update object attributes",
"def automerge_clusters(self):\n all_clusters = self.get_clusters().copy()\n\n if not self._single: # if not in single mode mode\n # initialize the variable to check if some change has happened \n changed = False\n for cl_1 in all_clusters: # cycle over clusters\n c_c1 = all_clusters[cl_1]\n for cl_2 in all_clusters: # inner cycle over clusters\n c_c2 = all_clusters[cl_2]\n # if two clusters have the same speaker and have different \n # cluster identifiers\n if cl_1 != cl_2 and c_c1.get_speaker() != 'unknown' and c_c1.get_speaker() == c_c2.get_speaker() and self._clusters.has_key(cl_1) and self._clusters.has_key(cl_2):\n changed = True\n # merge the clusters an record that something changed\n self._merge_clusters(cl_1, cl_2)\n if changed: # if something has changed\n # rename all the clusters starting from S0\n self._rename_clusters()\n # remove also the old waves and seg files of the old clusters\n shutil.rmtree(self.get_file_basename())\n # rebuild all seg files\n self.generate_seg_file(set_speakers=False)\n # resplit the original wave file according to the new clusters\n self._to_trim()",
"def gather(session, selected_clusters):\n doc_ids = []\n titles = []\n summaries = []\n links = []\n doc_vector_list = []\n tfidf_vector_list = []\n for i, label in enumerate(session['kmodel'].labels_):\n if str(label) in selected_clusters:\n doc_ids.append(i)\n titles.append(session['titles'][i])\n summaries.append(session['summaries'][i])\n links.append(session['links'][i])\n\n # Create a new topic space matrix by selecting only the vector\n # representations of the new scatter collection documents.\n doc_vector_list.append(session['vector_space'].getrow(i))\n tfidf_vector_list.append(session['tfidf'].getrow(i))\n\n vector_space = vstack(doc_vector_list, format='csr')\n tfidf = vstack(tfidf_vector_list, format='csr')\n\n return doc_ids, titles, summaries, links, vector_space, tfidf",
"def final_catalogs(self, filename=None, catalog_cols=None):\n\n final_catalog = vstack([cluster_info['catalog'] for cluster_info in self._catalog_dictionary.values()])\n\n # If we request to keep only certain columns in our output\n if catalog_cols is not None:\n final_catalog.keep_columns(catalog_cols)\n\n if filename is None:\n return final_catalog\n else:\n if filename.endswith('.cat'):\n final_catalog.write(filename, format='ascii', overwrite=True)\n else:\n final_catalog.write(filename, overwrite=True)",
"def merge_light_catalogue():\n output_filename = os.path.join(constants.DESTINATION,\n 'concatenated',\n 'iphas-dr2-light.fits')\n\n instring = ''\n for lon in np.arange(25, 215+1, constants.STRIPWIDTH):\n for part in ['a', 'b']:\n path = os.path.join(constants.DESTINATION,\n 'concatenated',\n 'light',\n 'iphas-dr2-{0:03d}{1}-light.fits'.format(\n lon, part))\n instring += 'in={0} '.format(path)\n\n # Warning: a bug in stilts causes long fieldIDs to be truncated if -utype S15 is not set\n param = {'stilts': constants.STILTS,\n 'in': instring,\n 'out': output_filename}\n\n cmd = '{stilts} tcat {in} countrows=true lazy=true ofmt=colfits-basic out={out}'\n mycmd = cmd.format(**param)\n log.debug(mycmd)\n status = os.system(mycmd)\n log.info('concat: '+str(status))\n\n return status",
"def detail_matching(self):\n paradic = self.cfg['param']['paradic']\n work_dir = self.work_dir\n \n x = float(self.cfg['param']['x']) # selected pixel in the first image\n y = float(self.cfg['param']['y'])\n \n # sift parameters\n # number of bins in the orientation histogram\n n_bins = int(paradic['n_bins']) \n n_hist = int(paradic['n_hist']) \n # descriptor of n_hist X n_hist weighted histograms with n_ori\n n_ori = int(paradic['n_ori']) \n delta_min = float(paradic['delta_min'])\n sigma_min = float(paradic['sigma_min'])\n sigma_in = float(paradic['sigma_in'])\n lambda_ori = float(paradic['lambda_ori'])\n lambda_descr = float(paradic['lambda_descr'])\n #threshold defining reference orientations\n n_spo = int(paradic['n_spo'])\n \n # Read feature vectors from output files\n if (os.path.getsize(work_dir+'OUTmatches.txt') > 0 ):\n pairdata = find_nearest_keypoint(work_dir+'OUTmatches.txt', y, x)\n \n illustrate_pair(pairdata, n_bins, n_hist, n_ori, work_dir)\n\n \n # Read keys coordinates.\n d = 6+n_bins+n_hist*n_hist*n_ori # size of keydata inside pairdata\n v = n_hist*n_hist*n_ori\n [x1, y1, sigma1, theta1] = [float(x) for x in pairdata[0:4]]\n [o1, s1] = [float(x) for x in pairdata[4+v:4+v+2]]\n [x2a, y2a, sigma2a, theta2a] = [float(x) for x in pairdata[d:d+4]]\n [o2a, s2a] = [float(x) for x in pairdata[d+4+v:d+4+v+2]]\n [x2b, y2b, sigma2b, theta2b] = \\\n [float(x) for x in pairdata[2*d:2*d+4]]\n [o2b, s2b] = [float(x) for x in pairdata[2*d+4+v:2*d+4+v+2]]\n \n draw_one_match(pairdata,\n work_dir+'input_0.png',\n work_dir+'input_1.png',\n d,\n lambda_ori,\n lambda_descr,\n n_hist,\n work_dir+'OUTonepair.png')\n \n \n # Extract thumbnails.\n # keypoint 1 (image 1)\n print ' '.join(['demo_extract_patch', work_dir+'input_0.png',\n str(x1), str(y1), str(sigma1), str(theta1), str(o1), str(s1),\n str(delta_min), str(sigma_min), str(sigma_in), str(n_spo),\n str(lambda_ori), str(lambda_descr), str(n_hist),\n work_dir+\"detail_im1\"])\n proc = self.run_proc(['demo_extract_patch', work_dir+'input_0.png',\n str(x1), str(y1), str(sigma1), str(theta1), str(o1), str(s1),\n str(delta_min), str(sigma_min), str(sigma_in), str(n_spo),\n str(lambda_ori), str(lambda_descr), str(n_hist),\n work_dir+\"detail_im1\"])\n self.wait_proc(proc, timeout=self.timeout)\n \n # keypoint 2a (nearest neighbor in image 2)\n print ' '.join(['demo_extract_patch', work_dir+'input_1.png',\n str(x2a), str(y2a), str(sigma2a), str(theta2a), str(o2a), str(s2a),\n str(delta_min), str(sigma_min), str(sigma_in), str(n_spo),\n str(lambda_ori), str(lambda_descr), str(n_hist),\n work_dir+\"detail_im2a\"])\n proc = self.run_proc(['demo_extract_patch', work_dir+'input_1.png',\n str(x2a), str(y2a), str(sigma2a), str(theta2a), str(o2a), str(s2a),\n str(delta_min), str(sigma_min), str(sigma_in), str(n_spo),\n str(lambda_ori), str(lambda_descr), str(n_hist),\n work_dir+\"detail_im2a\"])\n self.wait_proc(proc, timeout=self.timeout) \n \n # keypoint 2b (second nearest neighbor in image 2)\n proc = self.run_proc(['demo_extract_patch', work_dir+'input_1.png',\n str(x2b), str(y2b), str(sigma2b), str(theta2b), str(o2b), str(s2b),\n str(delta_min), str(sigma_min), str(sigma_in), str(n_spo),\n str(lambda_ori), str(lambda_descr), str(n_hist),\n work_dir+\"detail_im2b\"])\n self.wait_proc(proc, timeout=self.timeout) \n \n \n return 1",
"def __call__(self, imgfilepath, imgname=None, assoc_cat=None, assoc_xname=\"x\", assoc_yname=\"y\",\n returncat=True, prefix=\"\", writelog=True):\n\n\t\tstarttime = datetime.now()\n\t\t\n\t\t# Let's first check if the image file exists.\n\t\tif not os.path.exists(imgfilepath):\n\t\t\traise IOError(\"The image file %s does not exist.\" % imgfilepath)\n\t\tlogger.info(\"Preparing to run SExtractor on %s...\" % imgfilepath)\n\n\t\tif imgname == None:\n\t\t\timgname = os.path.splitext(os.path.basename(imgfilepath))[0]\n\t\tlogger.debug(\"Using imgname '%s'...\" % (imgname))\t\t\n\t\t\n\t\t# We make a deep copy of the config, that we can modify with settings related to this particular\n\t\t# image.\n\t\timgconfig = copy.deepcopy(self.config)\n\t\t\n\t\t# We set the catalog name :\n\t\timgconfig[\"CATALOG_NAME\"] = self._get_cat_filepath(imgname)\n\t\tif os.path.exists(self._get_cat_filepath(imgname)):\n\t\t\tlogger.warning(\"Output catalog %s already exists, I will overwrite it\" % (self._get_cat_filepath(imgname)))\n\t\t\n\t\t\n\t\t# We prepare the ASSOC catalog file, if needed\n\t\tif assoc_cat is not None:\n\t\t\t\n\t\t\tlogger.info(\"I will run in ASSOC mode, trying to find %i sources...\" % (len(assoc_cat)))\n\t\t\tif \"VECTOR_ASSOC(3)\" not in self.params:\n\t\t\t\traise RuntimeError(\"To use the ASSOC helper, you have to add 'VECTOR_ASSOC(3)' to the params\")\n\t\t\tif assoc_xname not in assoc_cat.colnames or assoc_yname not in assoc_cat.colnames:\n\t\t\t\traise RuntimeError(\"I don't have columns %s or %s\" % (assoc_xname, assoc_yname))\n\t\t\tif \"VECTOR_ASSOC_2\" in assoc_cat.colnames:\n\t\t\t\traise RuntimeError(\"Do not give me an assoc_cat that already contains a column VECTOR_ASSOC_2\")\n\t\t\tfor param in self.params + [prefix + \"assoc_flag\"]:\n\t\t\t\t# This is not 100% correct, as some params might be vectors.\n\t\t\t\tif prefix + param in assoc_cat.colnames:\n\t\t\t\t\traise RuntimeError(\"Your assoc_cat already has a column named %s, fix this\" % (prefix + param))\n\t\t\t\n\t\t\tself._write_assoc(cat=assoc_cat, xname=assoc_xname, yname=assoc_yname, imgname=imgname)\n\t\t\n\t\t\timgconfig[\"ASSOC_DATA\"] = \"1, 2, 3\"\n\t\t\timgconfig[\"ASSOC_NAME\"] = self._get_assoc_filepath(imgname)\n\t\t\timgconfig[\"ASSOC_PARAMS\"] = \"1, 2\"\n\t\t\tif \"ASSOC_RADIUS\" not in imgconfig:\n\t\t\t\tlogger.warning(\"ASSOC_RADIUS not specified, using a default of 10.0\")\n\t\t\t\timgconfig[\"ASSOC_RADIUS\"] = 10.0\n\t\t\tif \"ASSOC_TYPE\" not in imgconfig:\n\t\t\t\tlogger.warning(\"ASSOC_TYPE not specified, using a default NEAREST\")\n\t\t\t\timgconfig[\"ASSOC_TYPE\"] = \"NEAREST\"\n\t\t\tif \"ASSOCSELEC_TYPE\" in imgconfig:\n\t\t\t\traise RuntimeError(\"Sorry, you cannot mess with ASSOCSELEC_TYPE yourself when using the helper. I'm using MATCHED.\")\n\t\t\timgconfig[\"ASSOCSELEC_TYPE\"] = \"MATCHED\"\n\n\t\t\n\t\t# We write the input files (if needed)\n\t\tself._write_default_config()\n\t\tself._write_params()\n\t\tself._write_default_conv()\n\t\tself._write_default_psf()\n\t\t\n\t\t# We build the command line arguments\n\t\tpopencmd = [self.sexpath, imgfilepath, \"-c\", self._get_config_filepath()]\n\t\tif self.nice != None: # We prepend the nice command\n\t\t\tpopencmd[:0] = [\"nice\", \"-n\", str(self.nice)]\n\t\t\n\t\t# We add the current state of config\n\t\tfor (key, value) in imgconfig.items():\n\t\t\tpopencmd.append(\"-\"+str(key))\n\t\t\tpopencmd.append(str(value).replace(' ',''))\n\t\t\n\t\t# And we run\n\t\tlogger.info(\"Starting SExtractor now, with niceness %s...\" % (self.nice))\n\t\tlogger.debug(\"Running with command %s...\" % (popencmd))\n\t\tp = subprocess.Popen(popencmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\t\tout, err = p.communicate()\n\t\t\n\t\tif writelog:\n\t\t\tlogfile = open(self._get_log_filepath(imgname), \"w\")\n\t\t\tlogfile.write(\"SExtractor was called with :\\n\")\n\t\t\tlogfile.write(\" \".join(popencmd))\n\t\t\tlogfile.write(\"\\n\\nA nicer view of the config:\\n\")\n\t\t\tlogfile.write(\"\\n\".join([\"%30s : %30s\" % (str(key), str(value)) for (key, value) in imgconfig.items()]))\n\t\t\tlogfile.write(\"\\n\\n####### stdout #######\\n\")\n\t\t\tlogfile.write(out.decode(encoding='UTF-8'))\n\t\t\tlogfile.write(\"\\n####### stderr #######\\n\")\n\t\t\tlogfile.write(err.decode(encoding='UTF-8'))\n\t\t\tlogfile.write(\"\\n\")\n\t\t\tlogfile.close()\n\t\t\t\n\t\tlogger.info(\"SExtractor stderr:\")\n\t\tlogger.info(err)\n\t\t\n\t\tif not \"All done\" in err.decode(encoding='UTF-8'):\n\t\t\tlogger.warning(\"Ouch, something seems wrong, check SExtractor log: %s\" % self._get_log_filepath(imgname))\n\t\t\n\t\tendtime = datetime.now()\n\t\tlogger.info(\"Running SExtractor done, it took %.2f seconds.\" % \\\n ((endtime - starttime).total_seconds()))\n\n\t\t# Let's check if this worked.\n\t\tif not os.path.isfile(self._get_cat_filepath(imgname)):\n\t\t\traise RuntimeError(\"It seems that SExtractor did not write the file '%s'. Check SExtractor log: %s\" % (self._get_cat_filepath(imgname), self._get_log_filepath(imgname)))\n\n\t\t# We return a dict. It always contains at least the path to the sextractor catalog:\n\t\toutput = {\"catfilepath\":self._get_cat_filepath(imgname), \"workdir\":self.workdir}\n\t\tif writelog:\n\t\t\toutput[\"logfilepath\"] = self._get_log_filepath(imgname)\n\t\t\n\t\t# And we read the output, if asked for:\n\t\tif returncat:\n\t\t\tif assoc_cat is None:\n\t\t\t\tsextable = astropy.table.Table.read(self._get_cat_filepath(imgname),\n format=\"ascii.sextractor\")\n\t\t\t\tlogger.info(\"Read %i objects from the SExtractor output catalog\" % (len(sextable)))\n\t\t\t\tself._add_prefix(sextable, prefix)\n\t\t\t\toutput[\"table\"] = sextable\n\t\t\t\t\n\t\t\telse: # We have to process the output catalog, merging it.\n\t\t\t\n\t\t\t\t# We add the \"number\" column to the assoc_cat, calling it VECTOR_ASSOC_2:\n\t\t\t\tintable = copy.deepcopy(assoc_cat)\n\t\t\t\tintable[\"VECTOR_ASSOC_2\"] = range(len(assoc_cat))\n\t\t\t\t\t\t\t\t\n\t\t\t\t# We read in the SExtractor output:\t\t\t\t\t\n\t\t\t\tsextable = astropy.table.Table.read(self._get_cat_filepath(imgname),\n format=\"ascii.sextractor\")\n\t\t\t\tlogger.info(\"Read %i objects from the SExtractor output catalog\" % (len(sextable)))\n\t\t\t\tself._add_prefix(sextable, prefix)\n\t\t\t\tsextable.remove_columns([\"VECTOR_ASSOC\", \"VECTOR_ASSOC_1\"])\n\t\t\t\t\n\t\t\t\t# Due to what seems to be a bug in SExtractor (version 2.19.5 and earlier),\n\t\t\t\t# we need to kick out \"duplicated\" (same VECTOR_ASSOC_2) rows.\n\t\t\t\t# That's weird, as in principle we asked to keep the NEAREST !\n\t\t\t\tsortedassoc = np.sort(sextable[\"VECTOR_ASSOC_2\"].data)\n\t\t\t\tduplassoc = list(np.unique(sortedassoc[sortedassoc[1:] == sortedassoc[:-1]]))\n\t\t\t\t# The unique is here as there might be more than 2 identical numbers...\n\t\t\t\tif len(duplassoc) > 0:\n\t\t\t\t\tlogger.warning(\"%i sources from the SExtractor catalog are strange duplicates (bug ?), I discard them.\" % (len(duplassoc)))\n\t\t\t\t\trowindices_to_remove = []\n\t\t\t\t\tfor row in sextable:\n\t\t\t\t\t\tif row[\"VECTOR_ASSOC_2\"] in duplassoc:\n\t\t\t\t\t\t\trowindices_to_remove.append(row.index)\n\t\t\t\t\tsextable.remove_rows(rowindices_to_remove)\n\t\t\t\t\t\n\t\t\t\tif len(sextable) == 0:\n\t\t\t\t\traise RuntimeError(\"SExtractor has returned no ASSOC match\")\n\t\t\t\t\t\t\t\n\t\t\t\t# We merge the tables, keeping all entries of the \"intable\"\n\t\t\t\tjoined = astropy.table.join(intable, sextable,\n\t\t\t\t\tjoin_type='left', keys='VECTOR_ASSOC_2',\n\t\t\t\t\t # raises an error in case of metadata conflict.\n\t\t\t\t\tmetadata_conflicts = \"error\",\n\t\t\t\t\t # Will only be used in case of column name conflicts.\n\t\t\t\t\ttable_names = ['ASSOC', 'SEx'],\n\t\t\t\t\tuniq_col_name = \"{table_name}_{col_name}\"\n\t\t\t\t\t)\n\t\t\t\t\n\t\t\t\t# This join does not mix the order, as the output is sorted according to our own\n\t\t\t\t# VECTOR_ASSOC_2\n\t\t\t\t\n\t\t\t\t# We remove the last ASSOC column:\n\t\t\t\tjoined.remove_columns([\"VECTOR_ASSOC_2\"])\n\t\t\t\t#assert len(intable) == len(joined)\n\t\t\t\t# More explicit:\n\t\t\t\tif not len(intable) == len(joined):\n\t\t\t\t\traise RuntimeError(\"Problem with joined tables: intable has %i rows, joined has %i. %s %s\" % (len(intable), len(joined), intable.colnames, joined.colnames))\n\t\t\t\t\n\t\t\t\t# The join might return a **masked** table.\n\t\t\t\t# In any case, we add one simply-named column with a flag telling if the\n\t\t\t\t# identification has worked.\n\t\t\t\t\n\t\t\t\tif joined.masked:\n\t\t\t\t\tlogger.info(\"ASSOC join done, my output is a masked table.\")\n\t\t\t\t\tjoined[prefix + \"assoc_flag\"] = joined[joined.colnames[-1]].mask == False\n\t\t\t\t\tnfound = sum(joined[prefix + \"assoc_flag\"])\n\t\t\t\t\tlogger.info(\"I could find %i out of %i sources (%i are missing)\" % \\\n (nfound, len(assoc_cat), len(assoc_cat)-nfound))\n\t\t\t\t\n\t\t\t\telse:\n\t\t\t\t\tlogger.info(\"ASSOC join done, I could find all your sources, my output is not masked.\")\n\t\t\t\t\tjoined[prefix + \"assoc_flag\"] = [True] * len(joined)\n\t\t\t\t\t\n\t\t\t\t\n\t\t\t\toutput[\"table\"] = joined\n\t\t\n\t\treturn output",
"def _compute_util_data(self):\n\n print(\"Computing PCA of document vectors.\")\n self.pca = PCA(n_components = 3)\n\n print(\"Computing document clusters in PCA basis.\")\n inferred_vecs = np.array([self.model.infer_vector(doc.words) for doc in self.tagged_docs])\n self.pca_reduced_vecs = self.pca.fit_transform(inferred_vecs)\n n_clusters = 25 # TODO find way to determine approx cluster size\n self.kmeans = KMeans(init = 'k-means++', n_clusters = n_clusters, random_state = 0)\n self.kmeans_preds = self.kmeans.fit_predict(self.pca_reduced_vecs)",
"def extract_summary(self):\n metadata = {}\n\n ## document Id\n documentId = self.tree.find(\"./id\")\n documentId = documentId.attrib['root'] if documentId is not None and \"root\" in documentId.attrib else \"\"\n metadata[\"documentId\"] = documentId\n\n ## setId\n setid = self.tree.find(\"./setId\")\n setid = setid.attrib['root'] if setid is not None and \"root\" in setid.attrib else \"\"\n metadata[\"setId\"] = setid\n\n ## version number\n splversion = self.tree.find(\"./versionNumber\")\n versionNumber = \"\"\n if splversion is not None:\n if \"value\" in splversion.attrib:\n versionNumber = splversion.attrib[\"value\"]\n metadata[\"versionNumber\"] = versionNumber\n\n ## product type \n code = self.tree.find(\"./code\")\n check_if_attrib_exists = lambda x, key: x[key] if key in x else ''\n product_type = check_if_attrib_exists(code.attrib, \"displayName\")\n metadata[\"productType\"] = product_type\n\n ## title\n title_text = self.tree_et.xpath(\"./title//text()\")\n title = (\" \".join([self.strip_newline_tab(t) for t in title_text]) if len(title_text) > 0 else \"\")\n metadata[\"title\"] = title\n\n ## manufacturer\n manufacturer = self.tree.find(\"./author//representedOrganization/name\")\n if manufacturer != None and manufacturer.text != None:\n manufacturer = self.strip_newline_tab(manufacturer.text)\n else:\n manufacturer = \"\"\n metadata[\"manufacturer\"] = manufacturer\n\n ## effectivetime\n effectiveTime = self.tree_et.xpath(\"./effectiveTime/@value\")\n effectiveTime = self.__normalize_date(effectiveTime)\n\n metadata[\"effectiveTime\"] = effectiveTime\n metadata[\"publishedDate\"] = effectiveTime\n\n ## From manufacturedProduct section\n brand_name = self.tree_et.xpath(\".//manufacturedProduct//name\")\n brand_name = self.strip_newline_tab(brand_name[0].text) if len(brand_name) > 0 else \"\"\n metadata[\"drugName\"] = brand_name\n\n route = self.tree_et.xpath(\".//manufacturedProduct//formCode/@code\")\n route = self.strip_newline_tab(route[0]) if len(route) > 0 else \"\"\n metadata[\"routeOfAdministration\"] = route\n\n product_ndc = self.tree_et.xpath(\".//manufacturedProduct//code/@code\")\n product_ndc = self.strip_newline_tab(product_ndc[0]) if len(product_ndc) > 0 else \"\"\n metadata[\"ndcCode\"] = product_ndc\n\n generic_name = self.tree_et.xpath(\".//manufacturedProduct//asEntityWithGeneric//genericMedicine/name\")\n generic_name = self.strip_newline_tab(generic_name[0].text) if len(generic_name) > 0 else \"\"\n metadata[\"genericName\"] = generic_name\n\n ## dosage form\n dosage_form = self.tree_et.xpath(\".//manufacturedProduct//formCode/@displayName\")\n dosage_form = dosage_form[0] if len(dosage_form) > 0 else \"\"\n metadata[\"dosageForm\"] = dosage_form\n\n # active ingredients\n substance_name = sorted([self.strip_newline_tab(a.text) for a in\n self.tree_et.xpath(\".//.//manufacturedProduct//activeMoiety/activeMoiety/name\")])\n substance_name = \", \".join(set(substance_name))\n metadata[\"substanceName\"] = substance_name\n\n ## inactive ingredients\n inactive_ingredients = sorted([self.strip_newline_tab(inactive.text) for inactive in self.tree_et.xpath(\n \".//manufacturedProduct//inactiveIngredient/inactiveIngredientSubstance/name\")])\n\n if len(inactive_ingredients) == 0:\n inactive_ingredients = \"\"\n else:\n inactive_ingredients = \",\".join(set(inactive_ingredients))\n\n metadata[\"inactiveIngredients\"] = inactive_ingredients\n\n ## other ingredients\n ingredients = sorted([self.strip_newline_tab(ingredient.text) for ingredient in\n self.tree_et.xpath(\".//manufacturedProduct//ingredient/ingredientSubstance/name\")])\n\n if len(ingredients) == 0:\n ingredients = \"\"\n else:\n ingredients = \", \".join(set(ingredients))\n metadata[\"ingredients\"] = ingredients\n\n # marketing_category\n marketing_category = self.tree_et.xpath(\".//manufacturedProduct/subjectOf/approval/code/@displayName\")\n marketing_category = self.strip_newline_tab(marketing_category[0]) if len(marketing_category) > 0 else \"\"\n metadata[\"marketingCategory\"] = marketing_category\n\n # consumed in\n consumed_in = self.tree_et.xpath(\n \".//manufacturedProduct//consumedIn/substanceAdministration/routeCode/@displayName\")\n consumed_in = consumed_in[0] if len(consumed_in) > 0 else \"\"\n metadata[\"consumedIn\"] = consumed_in\n\n # revision date\n marketing_date = self.tree_et.xpath(\".//manufacturedProduct//marketingAct/effectiveTime/low/@value\")\n marketing_date = self.__normalize_date(marketing_date)\n metadata[\"marketingDate\"] = marketing_date\n\n return metadata",
"def _read_catalog(self, catname):\n print('loading catalog:',catname)\n with fitsio.FITS(catname,lower=True) as fits:\n #cat = fits[1][100000:110000]\n if 'object_data' in fits:\n print('reading from MEDS object data')\n ext='object_data'\n else:\n ext=1\n cat = fits[ext][:]\n\n # one cut here based on if we matched to the galsim cat\n w, = np.where(\n #(cat['mu_class'] < 3)\n #&\n #(cat['mask']==0)\n #&\n (cat['gscosmos_index'] >= 0)\n )\n print('initial cuts %d/%d %g%%' % (w.size,cat.size,w.size/cat.size*100))\n\n cat = cat[w]\n return cat",
"def computePosRefCatalog(self, sourceCat):\n minimalPosRefSchema = convertReferenceCatalog._makeSchema(filterNameList=[\"r\"], addCentroid=True)\n refCat = afwTable.SimpleCatalog(minimalPosRefSchema)\n for source in sourceCat:\n refObj = refCat.addNew()\n refObj.setCoord(source.getCoord())\n refObj.set(\"centroid_x\", source.getX())\n refObj.set(\"centroid_y\", source.getY())\n refObj.set(\"hasCentroid\", True)\n refObj.set(\"r_flux\", source.get(\"slot_ApFlux_instFlux\"))\n refObj.set(\"r_fluxErr\", source.get(\"slot_ApFlux_instFluxErr\"))\n refObj.setId(source.getId())\n return refCat",
"def test_hsmcatalog():\n if __name__ == '__main__':\n logger = piff.config.setup_logger(verbose=2)\n else:\n logger = piff.config.setup_logger(log_file='output/test_hsmcatalog.log')\n\n image_file = os.path.join('output','test_stats_image.fits')\n cat_file = os.path.join('output','test_stats_cat.fits')\n psf_file = os.path.join('output','test_starstats.fits')\n hsm_file = os.path.join('output', 'test_hsmcatalog.fits')\n config = {\n 'input' : {\n 'image_file_name' : image_file,\n 'cat_file_name' : cat_file,\n 'stamp_size' : 48,\n },\n 'select' : {\n 'reserve_frac' : 0.2,\n 'seed' : 123\n },\n 'psf' : {\n 'model' : { 'type' : 'Gaussian',\n 'fastfit': True,\n 'include_pixel': False },\n 'interp' : { 'type' : 'Mean' },\n },\n 'output' : {\n 'file_name' : psf_file,\n 'stats' : [\n {\n 'type': 'HSMCatalog',\n 'file_name': hsm_file,\n }\n ]\n }\n }\n piff.piffify(config, logger)\n assert os.path.isfile(hsm_file)\n\n data, header = fitsio.read(hsm_file, header=True)\n for col in ['ra', 'dec', 'x', 'y', 'u', 'v',\n 'T_data', 'g1_data', 'g2_data',\n 'T_model', 'g1_model', 'g2_model',\n 'flux', 'reserve', 'flag_data', 'flag_model']:\n assert len(data[col]) == 10\n true_data = fitsio.read(cat_file)\n\n assert header['PIFF_VERSION'] == piff.__version__\n\n np.testing.assert_allclose(data['x'], true_data['x'])\n np.testing.assert_allclose(data['y'], true_data['y'])\n np.testing.assert_allclose(data['flux'], 123.45, atol=0.001)\n print('reserve = ',data['reserve'])\n print('nreserve = ',np.sum(data['reserve']))\n print('ntot = ',len(data['reserve']))\n assert np.sum(data['reserve']) == int(0.2 * len(data['reserve']))\n np.testing.assert_allclose(data['T_model'], data['T_data'], rtol=1.e-4)\n np.testing.assert_allclose(data['g1_model'], data['g1_data'], rtol=1.e-4)\n np.testing.assert_allclose(data['g2_model'], data['g2_data'], rtol=1.e-4)\n\n # On this file, no hsm errors\n np.testing.assert_array_equal(data['flag_data'], 0)\n np.testing.assert_array_equal(data['flag_model'], 0)\n\n image = galsim.fits.read(image_file)\n world = [image.wcs.toWorld(galsim.PositionD(x,y)) for x,y in zip(data['x'],data['y'])]\n np.testing.assert_allclose(data['ra'], [w.ra.deg for w in world], rtol=1.e-4)\n np.testing.assert_allclose(data['dec'], [w.dec.deg for w in world], rtol=1.e-4)\n\n # Repeat with non-Celestial WCS\n wcs = galsim.AffineTransform(0.26, 0.05, -0.08, -0.24, galsim.PositionD(1024,1024))\n config['input']['wcs'] = wcs\n piff.piffify(config, logger)\n data = fitsio.read(hsm_file)\n np.testing.assert_array_equal(data['ra'], 0.)\n np.testing.assert_array_equal(data['dec'], 0.)\n world = [wcs.toWorld(galsim.PositionD(x,y)) for x,y in zip(data['x'],data['y'])]\n np.testing.assert_allclose(data['u'], [w.x for w in world], rtol=1.e-4)\n np.testing.assert_allclose(data['v'], [w.y for w in world], rtol=1.e-4)\n\n # Use class directly, rather than through config.\n psf = piff.PSF.read(psf_file)\n stars, _, _ = piff.Input.process(config['input'])\n stars = piff.Select.process(config['select'], stars)\n hsmcat = piff.stats.HSMCatalogStats()\n with np.testing.assert_raises(RuntimeError):\n hsmcat.write('dummy') # Cannot write before compute\n hsmcat.compute(psf, stars)\n hsm_file2 = os.path.join('output', 'test_hsmcatalog2.fits')\n with np.testing.assert_raises(ValueError):\n hsmcat.write() # Must supply file_name if not given in constructor\n hsmcat.write(hsm_file2)\n data2 = fitsio.read(hsm_file2)\n for key in data.dtype.names:\n np.testing.assert_allclose(data2[key], data[key], rtol=1.e-5)",
"def load_air_sar2_instances(dirname: str, split: str):\n with PathManager.open(os.path.join(dirname, \"ImageSets\", \"Main\", split + \".txt\")) as f:\n fileids = np.loadtxt(f, dtype=np.str)\n\n dicts = []\n for fileid in fileids:\n anno_file = os.path.join(dirname, \"AIR-SARShip-2.0-xml\", fileid + \".xml\")\n jpeg_file = os.path.join(dirname, \"AIR-SARShip-2.0-data\", fileid + \".tiff\")\n\n tree = ET.parse(anno_file)\n\n r = {\n \"file_name\": jpeg_file,\n \"image_id\": fileid,\n \"height\": 1000,\n \"width\": 1000,\n }\n instances = []\n\n for obj in tree.find('objects').findall(\"object\"):\n cls = obj.find('possibleresult').find('name').text\n bbox = obj.find(\"points\")\n xmin = ymin = float(10000)\n xmax = ymax = 0. \n for bbox_node in bbox.findall('point'):\n coor = bbox_node.text.split(',')\n x, y = map(float, coor)\n xmin = min(xmin, x)\n ymin = min(ymin, y)\n xmax = max(xmax, x)\n ymax = max(ymax, y) \n \n instances.append(\n {\"category_id\": 0, \"bbox\": [xmin, ymin, xmax, ymax], \"bbox_mode\": BoxMode.XYXY_ABS}\n )\n r[\"annotations\"] = instances\n dicts.append(r)\n return dicts",
"def merge_user_information(self, sid):\n pprint(self.extracted_information)\n for (field, value) in self.extracted_information.items():\n value = value[0] # TODO: should set data for everything in list but will do later\n self.data.set_data(sid, field, value[0])",
"def append_phot_catalog(filt, perm_path_to_photocat,path_to_flts):\n \n #if the photcat doesnt already exist, make a file and add the header\n if not os.path.isfile(perm_path_to_photocat+'/'+filt+\"_photcat.dat\"):\n \tprint \"permenant photcat doesn't exist, making file for \" + filt\n \twith open(perm_path_to_photocat+'/'+filt+\"_photcat.dat\", \"a\") as old_photcat:\n \t\tnew_photcat = open(path_to_flts+'/'+filt+\"_photcat.dat\", \"r\")\n \t\t#look in first few lines for # '\n \t\theader_search = new_photcat.readlines()[0:3]\n \t\tnew_photcat.close()\n \t\tfor line in header_search:\n \t\t\tif '#' in line: \n \t\t\t\t#print line \t\t\n \t\t\t\told_photcat.write(line)\n \t\t\n \t\t\n \t\n \t\n \n with open(perm_path_to_photocat+'/'+filt+\"_photcat.dat\", \"a\") as old_photcat:\n new_photcat = open(path_to_flts+'/'+filt+\"_photcat.dat\", \"r\")\n new_photcat_list = new_photcat.readlines()\n new_photcat.close()\n \n for line in new_photcat_list:\n if '#' not in line:\n old_photcat.write(line)",
"def sources_extraction(image,sextractor_pars):\n\n cat_name, detect_minarea, detect_thresh, analysis_thresh, phot_aperture, satur_level, ZP, gain, pixelScale,seeing,back_type,back_value,back_size,backphoto_type,backphoto_thick,back_filterthresh,checkimage_type,checkimage_name= sextractor_pars\n sp.run('sex %s.fits -c gft.sex -CATALOG_NAME %s.cat -CATALOG_TYPE ASCII_HEAD -PARAMETERS_NAME gft.param -DETECT_TYPE CCD -DETECT_MINAREA %d -DETECT_THRESH %d -ANALYSIS_THRESH %d -PHOT_APERTURES %d -SATUR_LEVEL %d -MAG_ZEROPOINT %f -GAIN %f -PIXEL_SCALE %f -SEEING_FWHM %f -BACK_TYPE %s -BACK_VALUE %f -BACK_SIZE %d -BACKPHOTO_TYPE %s -BACKPHOTO_THICK %d -BACK_FILTTHRESH %f -CHECKIMAGE_TYPE %s -CHECKIMAGE_NAME %s.fits ' % (image,cat_name, detect_minarea, detect_thresh, analysis_thresh, phot_aperture, satur_level, ZP, gain, pixelScale,seeing,back_type,back_value,back_size,backphoto_type,backphoto_thick,back_filterthresh,checkimage_type,checkimage_name),shell=True)",
"def computeCluster(filename,cop,serie):\n latitudes = []\n longitudes = []\n tempo = []\n cluster3DLatLong =[]\n serieItens =[]\n \n for i in serie:\n if(hasattr(i,'reporting_date')): # é incidentes\n if(cop == 'TODOS' and i.lon and i.lat):\n latitudes.append(float(i.lat))\n longitudes.append(float(i.lon))\n tempo.append(i.reporting_date)\n cluster3DLatLong.append([float(i.lat),float(i.lon)])\n serieItens.append(i)\n \n elif(i.lon and i.lat and haversine(float(latLongCops[cop][1]),float(latLongCops[cop][0]),float(i.lon),float(i.lat))<=50):\n latitudes.append(float(i.lat))\n longitudes.append(float(i.lon))\n tempo.append(i.reporting_date)\n cluster3DLatLong.append([float(i.lat),float(i.lon)])\n serieItens.append(i)\n \n elif(hasattr(i,'data_hora') and hasattr(i.localizacao,'lat') and hasattr(i.localizacao,'lon')): # é relato\n if(cop == 'TODOS' and hasattr(i.localizacao,'lat') and hasattr(i.localizacao,'lon')):\n latitudes.append(float(i.localizacao['lat']))\n longitudes.append(float(i.localizacao['lon']))\n tempo.append(i.data_hora)\n cluster3DLatLong.append([float(i.localizacao['lat']),float(i.localizacao['lon'])])\n serieItens.append(i)\n \n elif(hasattr(i.localizacao,'lat') and hasattr(i.localizacao,'lon') and\n haversine(float(latLongCops[cop][1]),float(latLongCops[cop][0]),float(i.localizacao['lon']),float(i.localizacao['lat']))<=50):\n latitudes.append(float(i.localizacao['lat']))\n longitudes.append(float(i.localizacao['lon']))\n tempo.append(i.data_hora)\n cluster3DLatLong.append([float(i.localizacao['lat']),float(i.localizacao['lon'])])\n serieItens.append(i)\n \n #clusters geograficos\n features = array(zip(latitudes,longitudes))\n # escolhi pegar o maior valor menor q sqrt(n/2)\n #k = int(math.floor(math.sqrt(len(latitudes)/2.0)))\n k = int(math.floor(math.sqrt(len(latitudes)/4.0)))\n if (k==0): \n k = 1\n clusters,distorcao = kmeans(features,k)\n \n #criando um vetor com a qtde de clusters necessarios\n itensClusterizados = []\n for i in range(0,k):\n itensClusterizados.append([])\n #agrupando cada item no seu cluster\n for i in range(0,len(tempo)):\n distancias=[]\n for c in clusters:\n #calcula a distancia o item ao centro de cada cluster\n distancias.append(haversine(float(longitudes[i]),float(latitudes[i]),float(c[1]),float(c[0])))\n #armazena o item no cluster mais proximo\n itensClusterizados[distancias.index(np.min(distancias))].append(serieItens[i])\n\n menorTempo = np.min(tempo)\n #criando os graficos ... cada grafico com uma cor\n \n plt.close('all')\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n colors=['y','b','r']\n for c in range(0,k):\n for i in itensClusterizados[c]:\n if(hasattr(i,'reporting_date') and i.lon and i.lat): # é incidentes\n ax.scatter(float(i.lat), float(i.lon), (i.reporting_date - menorTempo).total_seconds(),c='r',s=100)\n elif(hasattr(i,'data_hora') and hasattr(i.localizacao,'lat') and hasattr(i.localizacao,'lon')): # é relato\n ax.scatter(float(i.localizacao['lat']), float(i.localizacao['lon']), (i.data_hora - menorTempo).total_seconds(),c='g',s=100)\n ax.set_title('Ocorrencias', fontsize=24)\n ax.set_xlabel('Latitude', fontsize=20)\n ax.set_ylabel('Longitude', fontsize=20)\n ax.set_zlabel('Tempo', fontsize=20)\n fig.set_size_inches(18.5,10.5)\n fig.savefig(filename+cop+'.png',dpi=96)\n \n return clusters,itensClusterizados"
] | [
"0.60301304",
"0.5513732",
"0.55033004",
"0.54286397",
"0.5401107",
"0.530687",
"0.5304147",
"0.523912",
"0.5212986",
"0.5149777",
"0.51291513",
"0.5106632",
"0.50841045",
"0.5069048",
"0.5041999",
"0.5037942",
"0.50347835",
"0.50220233",
"0.50186545",
"0.49860176",
"0.49579468",
"0.4934743",
"0.49340874",
"0.49227268",
"0.49045166",
"0.48976535",
"0.48903444",
"0.48845533",
"0.48813003",
"0.48624954"
] | 0.7108529 | 0 |
Calculates the separations of each object relative to the SZ center. Finds both the angular separations and physical separations relative to the cluster's r500 radius. | def object_separations(self):
for cluster_info in self._catalog_dictionary.values():
catalog = cluster_info['catalog']
# Create SkyCoord objects for all objects in the catalog as well as the SZ center
object_coords = SkyCoord(catalog['ALPHA_J2000'], catalog['DELTA_J2000'], unit=u.degree)
sz_center = SkyCoord(catalog['SZ_RA'][0], catalog['SZ_DEC'][0], unit=u.degree)
# Calculate the angular separations between the objects and the SZ center in arcminutes
separations_arcmin = object_coords.separation(sz_center).to(u.arcmin)
# Compute the r500 radius for the cluster
r500 = (3 * catalog['M500'][0] * u.Msun /
(4 * np.pi * 500 * self._cosmo.critical_density(catalog['REDSHIFT'][0]).to(
u.Msun / u.Mpc ** 3))) ** (1 / 3)
# Convert the angular separations into physical separations relative to the cluster's r500 radius
separations_r500 = (separations_arcmin / r500
* self._cosmo.kpc_proper_per_arcmin(catalog['REDSHIFT'][0]).to(u.Mpc / u.arcmin))
# Add our new columns to the catalog
catalog['R500'] = r500
catalog['RADIAL_SEP_R500'] = separations_r500
catalog['RADIAL_SEP_ARCMIN'] = separations_arcmin
# Update the catalog in the data structure
cluster_info['catalog'] = catalog | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def object_separations(self):\n\n for cutout_info in self._catalog_dictionary.values():\n catalog = cutout_info['catalog']\n\n # Create SkyCoord objects for all objects in the catalog as well as the image center\n object_coords = SkyCoord(catalog['ALPHA_J2000'], catalog['DELTA_J2000'], unit=u.deg)\n center_coord = SkyCoord(catalog['SZ_RA'][0], catalog['SZ_DEC'][0], unit=u.deg)\n\n # Calculate the angular separations between the objects and the image center in arcminutes\n separations_arcmin = object_coords.separation(center_coord).to(u.arcmin)\n\n # Add our new column to the catalog\n catalog['RADIAL_SEP_ARCMIN'] = separations_arcmin\n\n # Update the catalog in the data structure\n cutout_info['catalog'] = catalog",
"def _computeStikeDip(self):\n seg = self._group_index\n groups = np.unique(seg)\n ng = len(groups)\n norm_vec = Vector(0, 0, 0)\n north_vec = Vector(0, 0, 0)\n up_vec = Vector(0, 0, 0)\n for i in range(ng):\n group_segments = np.where(groups[i] == seg)[0]\n nseg = len(group_segments) - 1\n for j in range(nseg):\n ind = group_segments[j]\n P0 = Point(self._toplons[ind],\n self._toplats[ind],\n self._topdeps[ind])\n P1 = Point(self._toplons[ind + 1],\n self._toplats[ind + 1],\n self._topdeps[ind + 1])\n P2 = Point(self._botlons[ind + 1],\n self._botlats[ind + 1],\n self._botdeps[ind + 1])\n P3 = Point(self._botlons[ind],\n self._botlats[ind],\n self._botdeps[ind])\n P1up = Point(self._toplons[ind + 1],\n self._toplats[ind + 1],\n self._topdeps[ind + 1] - 1.0)\n P1N = Point(self._toplons[ind + 1],\n self._toplats[ind + 1] + 0.001,\n self._topdeps[ind + 1])\n P3up = Point(self._botlons[ind],\n self._botlats[ind],\n self._botdeps[ind] - 1.0)\n P3N = Point(self._botlons[ind],\n self._botlats[ind] + 0.001,\n self._botdeps[ind])\n p0 = Vector.fromPoint(P0)\n p1 = Vector.fromPoint(P1)\n p2 = Vector.fromPoint(P2)\n p3 = Vector.fromPoint(P3)\n p1up = Vector.fromPoint(P1up)\n p1N = Vector.fromPoint(P1N)\n p3up = Vector.fromPoint(P3up)\n p3N = Vector.fromPoint(P3N)\n\n # Sides\n s01 = p1 - p0\n s02 = p2 - p0\n s03 = p3 - p0\n s21 = p1 - p2\n s23 = p3 - p2\n\n # First triangle\n t1norm = (s02.cross(s01)).norm()\n a = s01.mag()\n b = s02.mag()\n c = s21.mag()\n s = (a + b + c) / 2\n A1 = np.sqrt(s * (s - a) * (s - b) * (s - c)) / 1000\n\n # Second triangle\n t2norm = (s03.cross(s02)).norm()\n a = s03.mag()\n b = s23.mag()\n c = s02.mag()\n s = (a + b + c) / 2\n A2 = np.sqrt(s * (s - a) * (s - b) * (s - c)) / 1000\n\n # Up and North\n p1up = (p1up - p1).norm()\n p3up = (p3up - p3).norm()\n p1N = (p1N - p1).norm()\n p3N = (p3N - p3).norm()\n\n # Combine\n norm_vec = norm_vec + A1 * t1norm + A2 * t2norm\n north_vec = north_vec + A1 * p1N + A2 * p3N\n up_vec = up_vec + A1 * p1up + A2 * p3up\n\n norm_vec = norm_vec.norm()\n north_vec = north_vec.norm()\n up_vec = up_vec.norm()\n\n # Do I need to flip the vector because it is pointing down (i.e.,\n # right-hand rule is violated)?\n flip = np.sign(up_vec.dot(norm_vec))\n norm_vec = flip * norm_vec\n\n # Angle between up_vec and norm_vec is dip\n self._dip = np.arcsin(up_vec.cross(norm_vec).mag()) * 180 / np.pi\n\n # Normal vector projected to horizontal plane\n nvph = (norm_vec - up_vec.dot(norm_vec) * up_vec).norm()\n\n # Dip direction is angle between nvph and north; strike is orthogonal.\n cp = nvph.cross(north_vec)\n sign = np.sign(cp.dot(up_vec))\n dp = nvph.dot(north_vec)\n strike = np.arctan2(sign * cp.mag(), dp) * 180 / np.pi - 90\n if strike < -180:\n strike = strike + 360\n self._strike = strike",
"def separation_radius(self, z_cm):\n # Calculate the separation given an array of redshift values\n # if z_cm is None:\n # z_cm = utils.log_zgrid([0.1, 3.5], 0.01)\n\n dr_cm = WMAP9.kpc_comoving_per_arcmin(z_cm).to(u.Mpc/u.arcsec)\n\n # density\n # dz_thresh = 0.01 # separation threshold, dz*(1+z)\n\n # Separation radius\n dr_sep = np.sqrt(0.5 / np.pi) * u.Mpc\n dr_area = (np.pi * dr_sep.value ** 2)\n\n return dr_cm, dr_sep, dr_area",
"def find_large_separation(self):\n\n x = self.modes['n'] # radial order\n y = self.modes['freq'] # frequency\n wid = (0.66*self.numax**0.88)/2/np.sqrt(2*np.log(2.0))\n w = (np.exp((-(y-self.numax)**2)/(2*wid**2))) # weight\n\n mN = np.sum(w)*np.sum(w*x*y) - np.sum(w*x)*np.sum(w*y)\n D = np.sum(w)*np.sum(w*x**2) - np.sum(w*x)**2\n Dn = mN/D\n #print Dn\n\n return Dn",
"def calc_o_space(persons):\n c_x = 0\n c_y = 0\n \n# Group size\n g_size = len(persons)\n \n for person in persons:\n c_x += person[0] + math.cos(person[2]) * STRIDE\n c_y += person[1] + math.sin(person[2]) * STRIDE\n\n center = [c_x / g_size, c_y / g_size]\n\n return center",
"def discretize(self, obstacle_collection, drone_poses, goal_poses):\n cyl_c1_list = []\n cyl_c2_list = []\n cyl_r_list = []\n cyl_dir_list = []\n for obs in obstacle_collection.obstacles:\n if isinstance(obs, Cylinder):\n if obs.axis == 'x':\n cyl_dir_list.append(0)\n cyl_c1_list.append(obs.position.y + abs(MIN_Y))\n cyl_c2_list.append(obs.position.z)\n elif obs.axis == 'y':\n cyl_dir_list.append(1)\n cyl_c1_list.append(obs.position.x + abs(MIN_X))\n cyl_c2_list.append(obs.position.z)\n elif obs.axis == 'z':\n cyl_dir_list.append(2)\n cyl_c1_list.append(obs.position.x + abs(MIN_X))\n cyl_c2_list.append(obs.position.y + abs(MIN_Y))\n\n cyl_r_list.append(obs.radius)\n\n x_dim = MAX_X - MIN_X\n y_dim = MAX_Y - MIN_Y\n z_dim = MAX_Z\n mesh_nodes = mesh.generateMesh([x_dim, y_dim, z_dim],\n cyl_c1_list, cyl_c2_list, cyl_r_list, cyl_dir_list,\n self.__angle, self.__size, self.__approximation,\n self.__radiusedge, self.__ratio)\n for _ in mesh_nodes:\n self.__nodes.append(MeshNode())\n for n in mesh_nodes:\n self.__nodes[n.index].x = n.x - abs(MIN_X)\n self.__nodes[n.index].y = n.y - abs(MIN_Y)\n self.__nodes[n.index].z = n.z\n for edge in n.adj:\n self.__nodes[n.index].add_edge(self.__nodes[edge])\n\n def add_node(n):\n for node in self.__nodes:\n # Only short edges that are not on the ground or through obstacles\n if n.dist(node) < MESH_EDGE_DIST and (n.z > DRONE_HEIGHT or node.z > DRONE_HEIGHT):\n valid_edge = True\n for obs in obstacle_collection.obstacles:\n if isinstance(obs, Cylinder):\n cylinder_seg = Segment(Point(obs.position), Point(obs.position))\n if obs.axis == 'x':\n cylinder_seg.b += Point(1000, 0, 0)\n elif obs.axis == 'y':\n cylinder_seg.b += Point(0, 1000, 0)\n else:\n cylinder_seg.b += Point(0, 0, 1000)\n edge_seg = Segment(Point(n.position()), Point(node.position()))\n\n if cylinder_seg.min_distance(edge_seg) < obs.radius:\n valid_edge = False\n if valid_edge:\n n.add_edge(node)\n\n # Adding drone nodes\n drone_nodes = {}\n if drone_poses is not None:\n for drone_id in drone_poses:\n p = drone_poses[drone_id].position\n drone_nodes[drone_id] = MeshNode(p.x, p.y, p.z)\n add_node(drone_nodes[drone_id])\n\n # Adding goal nodes\n goal_nodes = {}\n if goal_poses is not None:\n for drone_id in goal_poses:\n p = goal_poses[drone_id]\n goal_nodes[drone_id] = MeshNode(p.x, p.y, p.z)\n add_node(goal_nodes[drone_id])\n\n return drone_nodes, goal_nodes",
"def separations(self, g1, g2):\n\n results = {}\n com1 = CenterOfMass(self.galaxies[g1], 2)\n com2 = CenterOfMass(self.galaxies[g2], 2)\n com1_p = com1.com_p()\n com2_p = com2.com_p()\n com1_v = com1.com_v(com1_p)\n com2_v = com2.com_v(com2_p)\n \n results['pos_xyz'] = com2_p - com1_p\n results['vel_xyz'] = com2_v - com1_v\n results['r'] = np.round(norm(results['pos_xyz']), 2)\n results['r_hat'] = np.round(results['pos_xyz'] / results['r'], 2) # unit vector\n results['vel_mag'] = np.round(norm(results['vel_xyz']), 2)\n results['v_radial'] = np.round(np.dot(results['r_hat'], results['vel_xyz']), 2)\n results['v_tangential'] = np.round(np.cross(results['r_hat'], results['vel_xyz']), 2)\n results['v_tan_mag'] = np.round(norm(results['v_tangential']), 2)\n\n return results",
"def compute_thickness(self):\n com = vtk.vtkCenterOfMass()\n com.SetInputData(self.inner_rim_poly)\n center = np.asarray(com.GetCenter()) # take center from inner points (not outer)\n\n irp_numpy = numpy_support.vtk_to_numpy(self.inner_rim_poly.GetPoints().GetData())\n orp_numpy = numpy_support.vtk_to_numpy(self.outer_rim_poly.GetPoints().GetData())\n\n # compute average radius ..\n rs_inner = np.linalg.norm(irp_numpy - np.tile(center, (irp_numpy.shape[0], 1)), axis = 1)\n rs_outer = np.linalg.norm(orp_numpy - np.tile(center, (orp_numpy.shape[0], 1)), axis = 1)\n\n # average out\n r_inner = np.mean(rs_inner)\n r_outer = np.mean(rs_outer)\n\n # compute distance\n d = r_outer - r_inner\n self.thickness = d\n\n return d",
"def calculate_separation(self, from_boid: b.Boid, boids: List[b.Boid]) -> Tuple[float, float]:\n separation_x = 0\n separation_y = 0\n for boid in boids:\n distance_squared = (boid.x - from_boid.x) ** 2 + (boid.y - from_boid.y) ** 2\n if distance_squared < self.boid_avoid_distance:\n separation_x -= (boid.x - from_boid.x)\n separation_y -= (boid.y - from_boid.y)\n return separation_x, separation_y",
"def generate_centers(self):\n\t\tcenters = []\n\t\tsize = self.config.image_size\n\t\tfor i in range(self.config.num_obj):\n\t\t\tflag = True\n\t\t\twhile flag:\n\t\t\t\tc = np.random.randint(int(size * 0.05), int(size * 0.95), 2)\n\t\t\t\tflag = False\n\t\t\t\tfor center in centers:\n\t\t\t\t\tif (abs(center[0] - c[0]) <= 0.1 * size) or (abs(center[1] - c[1]) <= 0.1 *size):\n\t\t\t\t\t\tflag = False\n\t\t\tcenters.append(c)\n\t\t\t\t\n\t\treturn centers",
"def computeNodeVolumes(self):\n for i in np.arange(0,self.ni):\n for j in np.arange(0,self.nj):\n for k in np.arange(0,self.nk):\n \n V = self.dh[0]*self.dh[1]*self.dh[2]\n if (i==0 or i==self.ni-1): V*=0.5\n if (j==0 or j==self.nj-1): V*=0.5\n if (k==0 or k==self.nk-1): V*=0.5\n \n self.node_vol[i][j][k] = V",
"def nt_3d_centers(cif_file, consider_all_atoms):\n result =[]\n try:\n structure = MMCIFParser().get_structure(cif_file, cif_file)\n except Exception as e:\n warn(f\"\\n{cif_file.split('/')[-1]} : {e}\", error=True)\n with open(runDir + \"/errors.txt\", \"a\") as f:\n f.write(f\"Exception in nt_3d_centers({cif_file.split('/')[-1]})\\n\")\n f.write(str(e))\n f.write(\"\\n\\n\")\n return result\n for model in structure:\n for chain in model:\n for residue in chain:\n if consider_all_atoms:\n temp_list = []\n for atom in residue:\n temp_list.append(atom.get_coord())\n lg = len(temp_list)\n summ = np.sum(temp_list, axis = 0)\n res_isobaricentre = [summ[0]/lg, summ[1]/lg, summ[2]/lg]\n result.append([res_isobaricentre[0], res_isobaricentre[1], res_isobaricentre[2]])\n else:\n coordinates = None\n for atom in residue:\n if atom.get_name() == \"C1'\":\n coordinates = atom.get_coord()\n if coordinates is None:\n # Residue has no C1'\n res = np.nan\n else:\n res = [coordinates[0], coordinates[1], coordinates[2]]\n result.append(res)\n return(result)",
"def separation(self, boids, avg_vector, total_pom):\r\n \r\n total = total_pom - 1\r\n steering = Vector(*np.zeros(2))\r\n \r\n if total > 0:\r\n avg_vector /= total\r\n avg_vector = Vector(*avg_vector)\r\n if np.linalg.norm(avg_vector) > 0:\r\n avg_vector = (avg_vector / np.linalg.norm(avg_vector)) * self.max_speed\r\n steering = avg_vector - self.velocity # calculating force that steers the boid from neigbours with \r\n # respect to neighbour distance\r\n if np.linalg.norm(steering) > self.max_force:\r\n steering = (steering /np.linalg.norm(steering)) * self.max_force\r\n\r\n return steering",
"def cluster(self):\n\n result_nominatim = self.nominatim()\n try:\n coord = [(float( i['lat'] ), float( i['lon'] )) for i in result_nominatim]\n except:\n return None\n #print( \"coord\", coord )\n kms_per_radian = 6371.0088\n # Augmenter cette valeur augmente le nombre d'éléments dans un cluster et change les résultats\n epsilon = 2 / kms_per_radian\n # Adapter le nombre de clusters (min_sample) au nombre d'entités dans array ?\n db = DBSCAN( eps=epsilon, min_samples=1, algorithm='ball_tree',\n metric='haversine' ).fit( np.radians( coord ) )\n cluster_labels = db.labels_\n #print( \"cluster\", cluster_labels )\n num_clusters = len( set( cluster_labels ) )\n #print( \"num clusters\", num_clusters )\n counts = np.bincount( cluster_labels )\n #print( \"count\", counts )\n maxi = np.argmax( counts )\n #print( \"maxi\", maxi )\n itemindex = np.where( cluster_labels == maxi )[0]\n #print( \"itemindex\", itemindex )\n\n lat: List[float] = [float( result_nominatim[index]['lat'] ) for index in itemindex]\n lon: List[float] = [float( result_nominatim[index]['lon'] ) for index in itemindex]\n\n # on récupère la moyenne des coordonnées du plus gros cluster. Cette moyenne équivaut au centroide :\n # https://gis.stackexchange.com/questions/12120/calculate-midpoint-from-a-series-of-latitude-and-longitude-coordinates\n\n average = {\"lat\": sum( lat ) / len( lat ), \"lon\": sum( lon ) / len( lon )}\n\n #print( list( zip( cluster_labels, [x['display_name'] for x in results] ) ) )\n #print( \"plus proche de moyenne\", closest( results, average ) )\n return closest( result_nominatim, average )",
"def segment_euclidean_length(segmented_img, objects):\n # Store debug\n debug = params.debug\n params.debug = None\n\n x_list = []\n y_list = []\n segment_lengths = []\n rand_color = color_palette(len(objects))\n\n\n labeled_img = segmented_img.copy()\n\n for i, cnt in enumerate(objects):\n # Store coordinates for labels\n x_list.append(objects[i][0][0][0])\n y_list.append(objects[i][0][0][1])\n\n # Draw segments one by one to group segment tips together\n finding_tips_img = np.zeros(segmented_img.shape[:2], np.uint8)\n cv2.drawContours(finding_tips_img, objects, i, (255, 255, 255), 1, lineType=8)\n segment_tips = find_tips(finding_tips_img)\n tip_objects, tip_hierarchies = find_objects(segment_tips, segment_tips)\n points = []\n if not len(tip_objects) == 2:\n fatal_error(\"Too many tips found per segment, try pruning again\")\n for t in tip_objects:\n # Gather pairs of coordinates\n x, y = t.ravel()\n coord = (x, y)\n points.append(coord)\n\n # Draw euclidean distance lines\n cv2.line(labeled_img, points[0], points[1], rand_color[i], 1)\n\n # Calculate euclidean distance between tips of each contour\n segment_lengths.append(euclidean(points[0], points[1]))\n\n segment_ids = []\n # Put labels of length\n for c, value in enumerate(segment_lengths):\n text = \"{:.2f}\".format(value)\n w = x_list[c]\n h = y_list[c]\n cv2.putText(img=labeled_img, text=text, org=(w, h), fontFace=cv2.FONT_HERSHEY_SIMPLEX,\n fontScale=params.text_size, color=(150, 150, 150), thickness=params.text_thickness)\n segment_label = \"ID\" + str(c)\n segment_ids.append(c)\n\n outputs.add_observation(variable='segment_eu_length', trait='segment euclidean length',\n method='plantcv.plantcv.morphology.segment_euclidean_length', scale='pixels', datatype=list,\n value=segment_lengths, label=segment_ids)\n\n # Reset debug mode\n params.debug = debug\n # Auto-increment device\n params.device += 1\n\n if params.debug == 'print':\n print_image(labeled_img, os.path.join(params.debug_outdir, str(params.device) + '_segment_eu_lengths.png'))\n elif params.debug == 'plot':\n plot_image(labeled_img)\n\n return labeled_img",
"def spatial_query(self, z_draws, z_cm, dz_thresh=0.01):\n n_obj = len(self)\n\n dr = np.zeros_like(self['ra'])\n dd = dr * 0.\n aspect = dr * 0.\n\n projected_density = np.zeros(n_obj)\n\n roots = np.unique(self['root'])\n\n dr_cm, dr_sep, dr_area = self.separation_radius(z_cm)\n\n for root in tqdm(roots):\n # Get mask for unique object from root\n rsel = np.where(self['root'] == root)[0]\n\n # Find number of unique objects with this root\n n_r = len(rsel)\n\n # Find the center of the field for this root\n r0 = np.median(self['ra'][rsel])\n d0 = np.median(self['dec'][rsel])\n\n # Calculate offsets from the center for each object\n dr[rsel] = (self['ra'][rsel] - r0) * \\\n np.cos(self['dec'][rsel] / 180 * np.pi) * 3600\n dd[rsel] = (self['dec'][rsel] - d0) * 3600\n aspect[rsel] = ((dr[rsel].max() - dr[rsel].min()) /\n (dd[rsel].max() - dd[rsel].min()))\n\n points = np.array([dr[rsel], dd[rsel]]).T\n\n # Compose cKDTree\n tree = cKDTree(points)\n\n if not hasattr(dr_sep, 'unit'):\n # Find matches between the tree and itself if separation\n # defined in arcsec\n i0 = tree.query_ball_tree(tree, r=dr_sep)\n else:\n # Have to compute scale at each redshift\n scale = np.interp(self['z_map'][rsel], z_cm,\n dr_cm.to(dr_sep.unit / u.arcsec).value)\n\n for j in range(n_r):\n z_j = z_draws[rsel[j], :]\n\n if not(hasattr(dr_sep, 'unit')):\n k = i0[j]\n else:\n dr_arcsec = dr_sep.value / scale[j]\n k = tree.query_ball_point(points[j], r=dr_arcsec)\n\n z_k = z_draws[rsel[k], :]\n\n dz_jk = (z_j - z_k)/(1 + z_j)\n w_k = (np.abs(dz_jk) < dz_thresh).sum(axis=1)/z_draws.shape[-1]\n\n # Summed weights, -1 because i0[j] includes j\n projected_density[rsel[j]] = (w_k.sum()-1)/dr_area\n\n # Now take out some redshift dependence since low-z things seem to\n # have higher densities but similar contrast\n # print(np.interp(1.8, z_cm, (dr_sep/dr_cm).value))\n\n rescale = np.interp(1.8, z_cm, (dr_sep/dr_cm).value) / \\\n np.interp(self['z_map'], z_cm, (dr_sep/dr_cm).value)\n scaled_proj_dens = projected_density * rescale\n\n return scaled_proj_dens, dr, dd",
"def estimate_nc(self):\n mol = self.m\n torsma = '[!$(*#*)&!D1]~[!$(*#*)&!D1]'\n q = Chem.MolFromSmarts(torsma)\n matches = mol.GetSubstructMatches(q)\n nmat = len(matches)\n #torsions = []\n\n # since mostly the molecules concerned here are amons\n # with N_I <=7, we care about 3- to 7-membered rings\n atsr = _get_ring_nodes(mol,3,7,F)\n #print ' -- atsr = ', atsr\n inrs = np.zeros(self.na, dtype=int) # [this atom is] in [how many] number of rings\n for ia in self.ias_heav:\n _sets = []\n for _ats in atsr:\n if ia in _ats:\n _sets.append(_ats)\n #print ' -- ia, _sets = ', ia, _sets\n inr = find_number_of_unique_set(_sets)\n inrs[ia] = inr\n #print ' -- inrs = ', inrs\n if nmat == 0:\n ns = [1]\n if self.debug: print(' |__ ns = ', ns)\n nc = 1\n self.nc = nc\n else:\n ns = []; patts = []\n scale = 0\n for match in matches:\n j = match[0]\n k = match[1]\n cb = set([j,k])\n bond = mol.GetBondBetweenAtoms(j, k)\n aj = mol.GetAtomWithIdx(j)\n ak = mol.GetAtomWithIdx(k)\n hj, hk = [ _hyb[_a.GetHybridization()] for _a in [aj,ak] ]\n iok1 = (hj != 2); iok2 = (hj != 3)\n iok3 = (hk != 2); iok4 = (hk != 3)\n if (iok1 and iok2) or (iok3 and iok4): continue\n\n # do not allow internal rotation about two adjacent sp2 atoms are in a ring\n if inrs[j] and inrs[k] and hj==2 and hk==2: continue\n\n pjk = []\n jk = [j,k]\n hsjk = [hj,hk]\n for _ in range(2):\n ia1 = jk[_]\n ia2 = j if ia1==k else k\n hyb = hsjk[_]\n nbrs = np.setdiff1d(self.ias[self.bom[ia1]>0], [ia2])\n ihs = (self.zs[nbrs]==1)\n if np.all(ihs): # case 'a', e.g., 'a1','a2','a3'\n # check ~X-CH3, ~X-NH2, ...\n nh = len(ihs)\n if hyb==3:\n # for rotor X-C in ~X-CH3, one torsion is allowed\n sn = {1:'a3', 2:'a2', 3:'a1'}[nh]\n else: # hyb==2\n sn = {1:'a2', 2:'a1', 3:'a1'}[nh]\n else: # case 'b', e.g., 'b1','b2','b3'\n inr = inrs[ia1]\n if self.cns[ia1]==2 and inr: # e.g., O<, S<, Se<,\n sn = 1\n else:\n if hyb==3:\n sn = 2 if inr <= 1 else 1 # {0:'b3', 1:'b3', 2:'b2', 3:'b1', 4:'b1'}[inr]\n else: # hyb==2:\n sn = 'b2' if inr == 0 else 'b1'\n #sn = {0:'b2', 1:'b1', 2:'b1', 3:'b1'}[inr]\n _patt = '%d%s'%(hyb,sn)\n pjk.append(_patt)\n #print 'j,k = ', j,k, ', pjk = ', pjk\n nci = min([ int(patt[-1]) for patt in pjk ]) # ndic[patt]; sci = scdic[patt]\n if nci > 1:\n ns.append( nci )\n if not np.any([inrs[j],inrs[k]]):\n scale += 1\n if scale == 0: scale = 1\n nc = np.int(np.floor(np.product(ns))) * scale #* 2\n self.nc = nc if nc > 99 else 99\n if self.debug: print(' |__ ns = ', ns)\n if self.debug: print(' |__ scale = %d, nc = %d'%(scale, nc))\n self.ns = np.array(ns, np.int)",
"def cluster_separation(self, cluster, clustering, clustering_cohesion, condensed_distance_matrix):\n if clustering_cohesion > 0:\n weight = 1./clustering_cohesion\n sep_and_cohe = 0.0\n ## I'm inside?\n where_am_i = clustering.cluster_index(cluster)\n\n for i in range(len(clustering.clusters)):\n if i != where_am_i :\n c_j = clustering.clusters[i]\n sep_and_cohe = sep_and_cohe + self.__between_cluster_distance(cluster,c_j,condensed_distance_matrix)\n return weight*sep_and_cohe\n else:\n return 0. # not defined in this case, could be numpy.finfo(numpy.float32).max",
"def compute_in_radius(self, boids_in_radius):\r\n \r\n avg_velocity = Vector(*np.zeros(2))\r\n center_of_mass = Vector(*np.zeros(2))\r\n avg_vector = Vector(*np.zeros(2))\r\n total = 0\r\n for boid in boids_in_radius:\r\n avg_velocity += boid.velocity # calculating average direction \r\n center_of_mass += boid.position # calculating center of mass\r\n total += 1\r\n distance = np.linalg.norm(boid.position - self.position)\r\n \r\n if self.position != boid.position:\r\n diff = self.position - boid.position\r\n diff /= distance # scaling with the distance in order to avoid closer boids with greater force \r\n avg_vector += diff # calculating repulsive force vector\r\n \r\n return avg_velocity, center_of_mass, avg_vector, total",
"def get_object_centers(data, north_offset, east_offset, drone_altitude, safety_distance):\n points = []\n for i in range(data.shape[0]):\n north, east, alt, d_north, d_east, d_alt = data[i, :]\n if alt + d_alt + safety_distance > drone_altitude:\n points.append([north - north_offset, east - east_offset])\n return points;",
"def contract_tenors(self):\n\n\tself.r_outer_r[:,:,0,1,:] = self.r_outer_r[:,:,0,1,:]/(1. - self.k_dot_r[0,1,:])\n\tself.r_outer_r[:,:,0,2,:] = self.r_outer_r[:,:,0,2,:]/(1. - self.k_dot_r[0,2,:])\n\t\n\tself.r_outer_r[:,:,1,0,:] = self.r_outer_r[:,:,1,0,:]/(1. - self.k_dot_r[1,0,:])\n\tself.r_outer_r[:,:,1,2,:] = self.r_outer_r[:,:,1,2,:]/(1. - self.k_dot_r[1,2,:])\n\t\n\tself.r_outer_r[:,:,2,0,:] = self.r_outer_r[:,:,2,0,:]/(1. - self.k_dot_r[2,0,:])\n\tself.r_outer_r[:,:,2,1,:] = self.r_outer_r[:,:,2,1,:]/(1. - self.k_dot_r[2,1,:])\n\n\tself.delta_l = np.zeros((3,3,self.N),dtype=np.complex_)\n \n\tself.delta_l[0,1,:] = get_l(self,0,1)\n\tself.delta_l[1,0,:] = get_l(self,1,0)\n\t\n\tself.delta_l[0,2,:] = get_l(self,0,2)\n\tself.delta_l[2,0,:] = get_l(self,2,0)\n\t\n\tself.delta_l[1,2,:] = get_l(self,1,2)\n\tself.delta_l[2,1,:] = get_l(self,2,1)\n \n\treturn",
"def exportCircles(self):\n # Remember to compute circumcircles if not done before\n # for t in self.triangles:\n # self.circles[t] = self.circumcenter(t)\n\n # Filter out triangles with any vertex in the extended BBox\n # Do sqrt of radius before of return\n return [(self.circles[(a, b, c)][0], sqrt(self.circles[(a, b, c)][1]))\n for (a, b, c) in self.triangles if a > 3 and b > 3 and c > 3]",
"def get_dihedral_angles(self):\n mol = self.m\n c1 = mol.GetConformer(-1)\n torsma = '[!$(*#*)&!D1]~[!$(*#*)&!D1]'\n q = Chem.MolFromSmarts(torsma)\n matches = mol.GetSubstructMatches(q)\n nmat = len(matches)\n dic = {}\n for match in matches:\n j = match[0]\n k = match[1]\n bond = mol.GetBondBetweenAtoms(j, k)\n aj = mol.GetAtomWithIdx(j)\n ak = mol.GetAtomWithIdx(k)\n hj, hk = [ _hyb[_a.GetHybridization()] for _a in [aj,ak] ]\n iok1 = ( hj not in [2,3] )\n iok2 = ( hk not in [2,3] )\n if iok1 or iok2: continue\n for b1 in aj.GetBonds():\n if (b1.GetIdx() == bond.GetIdx()):\n continue\n i = b1.GetOtherAtomIdx(j)\n for b2 in ak.GetBonds():\n if (b2.GetIdx() == bond.GetIdx()) or (b2.GetIdx() == b1.GetIdx()):\n continue\n l = b2.GetOtherAtomIdx(k)\n # skip 3-membered rings\n if (l == i):\n continue\n _dang = rdMolTransforms.GetDihedralDeg(c1, i,j,k,l)\n dang = abs(_dang)\n assert dang <= 180.0\n ias4 = (i,j,k,l)\n if not self.wH:\n if np.any([ self.zs[iaa]==1 for iaa in ias4 ]):\n continue\n if self.key in ['z']:\n #print('atsi=',ias4, 'zsi=', [_zs[iaa] for iaa in ias4])\n zi,zj,zk,zl = [ self.zs[iaa] for iaa in ias4 ]\n if (zj==zk and zi>zl) or (zj>zk):\n ias4 = (l,k,j,i)\n #torsions.append(ias4)\n #_zi,_zj,_zk,_zl = [ zs[_] for _ in ias4 ]\n #typez = '%d-%d-%d-%d'%(_zi,_zj,_zk,_zl)\n type4 = tuple([self.zs[iaa] for iaa in ias4])\n if type4 in list(dic.keys()):\n dic[type4] += [dang]\n else:\n dic[type4] = [dang]\n elif self.key in ['ia','i']:\n type4 = ias4\n dic[type4] = dang\n else:\n raise Exception('#unknown key')\n return dic",
"def split(self,i):\n alpha = 0.6\n eps = 2.6\n\n if self.n > self.maxn-3:\n print \"cannot refine any further\"\n return False\n \n # The son \n self.m[i] = self.m[i] / 4.0\n #self.h[i] = self.h[i] * alpha\n\n # Daughter 1\n self.r[self.n] = self.r[i] + eps*np.array([0,1])\n self.m[self.n] = self.m[i] \n self.v[self.n] = self.v[i]\n \n # Daughter 2\n self.r[self.n+1] = self.r[i] + eps*np.array([0.866025,-0.5])\n self.m[self.n+1] = self.m[i] \n self.v[self.n+1] = self.v[i]\n \n # Daughter 3\n self.r[self.n+2] = self.r[i] + eps*np.array([-0.866025,-0.5])\n self.m[self.n+2] = self.m[i] \n self.v[self.n+2] = self.v[i]\n \n self.n = self.n+3\n #print \"There are now \",self.n,\"particles\"\n return True",
"def fix_sphere_m (center_x, center_y, center_z, radius, centers, radii, len_points):\n \n g_x = []\n g_y = []\n g_z = []\n points = [hydrogen_coord_gen(center_x, center_y, center_z, radius) for i in range(0, len_points)] \n x = [points[i][0] for i in range(0, len(points))] \n y = [points[i][1] for i in range(0, len(points))]\n z = [points[i][2] for i in range(0, len(points))]\n\n for i in range(0, len(points)):\n check = 0\n j = 0\n while (j <= (len(centers) - 1) and (check == 0)): \n if (calculate_3D_distance_2_centers(x[i], y[i], z[i], centers[j][0], centers[j][1], centers[j][2]) < radii[j]):\n check += 1\n j += 1\n if (check == 0):\n g_x.append(x[i])\n g_y.append(y[i])\n g_z.append(z[i])\n\n return g_x, g_y, g_z",
"def addSeparatorFeature(self):\n \n # graphical separators\n dNS = {\"pc\":PageXml.NS_PAGE_XML}\n someNode = self.lNode[0]\n ndPage = someNode.node.xpath(\"ancestor::pc:Page\", namespaces=dNS)[0]\n lNdSep = ndPage.xpath(\".//pc:SeparatorRegion\", namespaces=dNS)\n loSep = [ShapeLoader.node_to_LineString(_nd) for _nd in lNdSep]\n \n if self.bVerbose: traceln(\" %d graphical separators\"%len(loSep))\n\n # make an indexed rtree\n idx = index.Index()\n for i, oSep in enumerate(loSep):\n idx.insert(i, oSep.bounds)\n \n # take each edge in turn and list the separators it crosses\n nCrossing = 0\n for edge in self.lEdge:\n # bottom-left corner to bottom-left corner\n oEdge = geom.LineString([(edge.A.x1, edge.A.y1), (edge.B.x1, edge.B.y1)])\n prepO = prep(oEdge)\n lCrossingPoints = []\n fSepTotalLen = 0\n for i in idx.intersection(oEdge.bounds):\n # check each candidate in turn\n oSep = loSep[i]\n if prepO.intersects(oSep):\n fSepTotalLen += oSep.length\n oPt = oEdge.intersection(oSep)\n if type(oPt) != geom.Point:\n traceln('Intersection in not a point: skipping it')\n else:\n lCrossingPoints.append(oPt)\n \n if lCrossingPoints:\n nCrossing += 1\n edge.bCrossingSep = True\n edge.sep_NbCrossing = len(lCrossingPoints)\n minx, miny, maxx, maxy = geom.MultiPoint(lCrossingPoints).bounds\n edge.sep_SpanLen = abs(minx-maxx) + abs(miny-maxy)\n edge.sep_AvgSpanSgmt = edge.sep_SpanLen / len(lCrossingPoints) \n edge.sep_AvgSepLen = fSepTotalLen / len(lCrossingPoints)\n else:\n edge.bCrossingSep = False\n edge.sep_NbCrossing = 0\n edge.sep_SpanLen = 0\n edge.sep_AvgSpanSgmt = 0 \n edge.sep_AvgSepLen = 0\n \n #traceln((edge.A.domid, edge.B.domid, edge.bCrossingSep, edge.sep_NbCrossing, edge.sep_SpanLen, edge.sep_AvgSpanSgmt, edge.sep_AvgSepLen))\n \n \n if self.bVerbose: \n traceln(\" %d (/ %d) edges crossing at least one graphical separator\"%(nCrossing, len(self.lEdge)))",
"def _cluster_segments_all_way(self, segmented_instances, labels, \\\n end_points, stats, cluster_thresh=0.5):\n\n #self.showme(segmented_instances, 'main img')\n segment_association_list = []\n max_num_end_points= 0\n\n # for each stem segment\n for i in range(0, len(labels)):\n # each end point in the current segment i\n if max_num_end_points < len(end_points[i]):\n max_num_end_points = len(end_points[i])\n for k in range(0, len(end_points[i])):\n angle_list=[]\n # find the segment that is most likely connected to segment i at end point[i][k]\n for j in range(0, len(labels)):\n # make sure we are not trying to connect the segment to itself\n if i!= j:\n # angle calculates the angle between the line stats['centroid'][i]-end_points[i][k]\n # and stats['centroid'][i]-stats['centroid'][j]\n\n angle = self._ang([stats['centroid'][i],end_points[i][k]], \\\n [stats['centroid'][i], stats['centroid'][j]] )\n # if the angle value is within the acceptable range of +/- angle_thresh\n if angle<=self.angle_thresh or angle>=360-self.angle_thresh:\n other_angle, other_seg_section, end_point_dist = self._get_best_fit(segmented_instances, \\\n len(labels), \\\n stats, end_points,\\\n i, j, k, pos_angle=angle<=self.angle_thresh)\n # if the best fit segment also has a small angle between its\n # end point-centroid line and centroid-centroid line,\n # add it to segments connected to segment i\n if other_angle!=None and other_angle<=self.angle_thresh:\n angle_list.append((j, other_seg_section, other_angle, end_point_dist, angle))\n #Sort the list of stem segments connected to i by end_point_dist\n angle_list = sorted(angle_list, key=lambda x:x[3])\n #Sorting by the Euclidian distance of the end_point_dist and the other_angle does not change end result\n #angle_list = sorted(angle_list, key=lambda x:(math.sqrt(x[3]**2.0+x[2]**2.0)))\n # the angle value reflects how far segment k is from the straight line\n # going through the centroids\n if len(angle_list)>0:\n # (i, j, k, l, angle between i and centroid line, angle between j and centroid line, distance between closest end points k in seg i and l in seg j)\n segment_association_list.append((i,angle_list[0][0],k, angle_list[0][1], angle_list[0][4], angle_list[0][2], angle_list[0][3]))\n\n\n # sort slope differences in an increasing order\n segment_association_list = sorted(segment_association_list,key=lambda x:(x[6]))\n\n # find best match by iteretively selecting the smallest difference\n # and adding it to the ith cluster\n cluster_list = []\n cluster = np.full(len(labels),None)\n colored_clusterImg = np.zeros(segmented_instances.shape).astype(np.uint8)\n #clusterImg = np.zeros(segmented_instances.shape).astype(np.uint8)\n\n # initialize cluster list to single clusters contianing only each individual segment\n for i in range(0, len(labels)):\n cluster[i]=i\n cluster_list.append([i])\n #self.showme(clusterImg, str(i))\n\n visited=np.full((len(labels),max_num_end_points), False)\n\n #cluster=np.frompyfunc(list,1,1)(cluster) # allows us to append to only the specified list end_points[i]\n new_cluster_num=0\n color_offset=len(labels)\n\n # for each pair of segments in our list of best fit segments\n for curr_tuple in segment_association_list:\n img = np.zeros(segmented_instances.shape)\n i = curr_tuple[0] # index of first segment\n j = curr_tuple[1] # index of second segment in the tuple\n i_section = curr_tuple[2] #end point number in segment i\n j_section = curr_tuple[3] #end point number in segment j\n angle = curr_tuple[4]\n other_angle = curr_tuple[5]\n end_point_dist = curr_tuple[6] #distance between the connecting end points of segments i and j\n img[segmented_instances== i]= 255\n img[segmented_instances== j]= 255\n if (visited[i][i_section]==False)and(visited[j][j_section]==False):\n #cv2.line(clusterImg,(end_points[i][i_section][0],end_points[i][i_section][1]),\\\n # (end_points[j][j_section][0], end_points[j][j_section][1]),150,2)\n #self.showme(clusterImg, str(i))\n visited[i][i_section]=True\n visited[j][j_section]=True\n cluster_num = cluster[i]\n if cluster[i]!=cluster[j]:\n other_cluster_num = cluster[j]\n cluster_list[cluster_num] = list(set(cluster_list[cluster_num]+\\\n copy.deepcopy(cluster_list[other_cluster_num])))\n # update cluster numbers for all segments moved into new cluster\n for seg in cluster_list[other_cluster_num]:\n cluster[seg]=cluster_num\n # update cluster numbers for clusters larger than cluster to be removed\n for idx in range(0, len(cluster)):\n if (cluster[idx]>other_cluster_num):\n cluster[idx]= cluster[idx]-1\n del cluster_list[other_cluster_num]\n\n\n #show clustered segments\n color = 0\n cluster_num = 0\n cluster_mask=[]\n\n for c in cluster_list:\n color = color+0.1\n cluster_mask.append(np.zeros(segmented_instances.shape).astype(np.uint8))\n\n for i in c:\n cluster_mask[cluster_num][(segmented_instances == labels[i])]=1\n colored_clusterImg[(segmented_instances == labels[i])]= int(color*255)\n \"\"\"if self.key in ['../data/images/image1672', '../data/images/image1289']:\n self.showme(colored_clusterImg)\"\"\"\n cluster_num +=1\n\n return cluster_mask, colored_clusterImg",
"def excentricidad(self):\n\n centroide = self.find_centroid_cell()\n byw = cv2.imread('byw/imagen' + str(self.__name) + '.png')\n bordes = self.obtener_bordes(byw, self.__col_lista)\n lim = len(bordes)\n dis_centro = []\n for i in range(lim):\n punto = bordes[i]\n dis_centro.append(self.dist(centroide, punto))\n\n var = np.std(dis_centro)\n maximo = max(dis_centro)\n minimo = min(dis_centro)\n exce = minimo/maximo\n return exce, var",
"def getNeighbors(obj, separation, filter=\"all\"):\n if not isinstance(obj, Dso):\n if isinstance(obj, str):\n obj = Dso(obj)\n else:\n raise TypeError('Wrong type obj. Either a Dso or string type was expected.')\n if not (isinstance(separation, int) or isinstance(separation, float)):\n raise TypeError('Wrong type separation. Either a int or float type was expected.')\n\n cols = 'objects.name'\n tables = 'objects'\n params = 'type != \"Dup\" AND ra != \"\" AND dec != \"\" AND name !=\"' + obj.getName() + '\"'\n if filter.upper() == \"NGC\":\n params += \" AND name LIKE 'NGC%'\"\n elif filter.upper() == \"IC\":\n params += \" AND name LIKE 'IC%'\"\n\n neighbors = []\n for item in _queryFetchMany(cols, tables, params):\n possibleNeighbor = Dso(item[0])\n distance = getSeparation(obj, possibleNeighbor)[0]\n if distance <= (separation / 60):\n neighbors.append((possibleNeighbor, distance))\n\n return sorted(neighbors, key=lambda neighbor: neighbor[1])",
"def getCircleDiameter(self):\n segments = []\n for (i, p1) in enumerate(self.points):\n for p2 in self.points[i+1:]:\n segments.append(Segment(p1, p2))\n s = max(segments, key=lambda s: s.length)\n return Circle(*s.middle, radius=s.length/2)"
] | [
"0.7033934",
"0.6200771",
"0.61623174",
"0.58938783",
"0.57421154",
"0.5588339",
"0.557842",
"0.5490397",
"0.54819787",
"0.5392507",
"0.53449166",
"0.5322467",
"0.53214264",
"0.5309282",
"0.52883315",
"0.5286885",
"0.52770865",
"0.5261102",
"0.52358395",
"0.5215119",
"0.5184436",
"0.51774555",
"0.5170822",
"0.5156782",
"0.51567113",
"0.5121712",
"0.5113397",
"0.51052344",
"0.5103176",
"0.5091924"
] | 0.77854586 | 0 |
Adds completeness simulation data to the catalog. Takes the completeness curve values for the cluster, interpolates a function between the discrete values, then | def completeness_value(self, selection_band='I2_MAG_APER4'):
# Load in the completeness simulation data from the file
if isinstance(self._completeness_results, list):
json_dicts = []
for comp_results in self._completeness_results:
with open(comp_results, 'r') as f:
json_dicts.append(json.load(f))
completeness_dict = dict(ChainMap(*json_dicts))
else:
with open(self._completeness_results, 'r') as f:
completeness_dict = json.load(f)
for cluster_id, cluster_info in self._catalog_dictionary.items():
# Array element names
se_catalog = cluster_info['catalog']
# Select the correct entry in the dictionary corresponding to our cluster.
completeness_data = completeness_dict[cluster_id]
# Also grab the magnitude bins used to create the completeness data (removing the last entry so we can
# broadcast our arrays correctly)
mag_bins = completeness_dict['magnitude_bins'][:-1]
# Interpolate the completeness data into a functional form using linear interpolation
completeness_funct = interp1d(mag_bins, completeness_data, kind='linear')
# For the objects' magnitude specified by `selection_band` query the completeness function to find the
# completeness value.
completeness_values = completeness_funct(se_catalog[selection_band])
# The completeness correction values are defined as 1/[completeness value]
completeness_corrections = 1 / completeness_values
# Add the completeness values and corrections to the SExtractor catalog.
se_catalog['COMPLETENESS_VALUE'] = completeness_values
se_catalog['COMPLETENESS_CORRECTION'] = completeness_corrections
cluster_info['catalog'] = se_catalog | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def gen_cluster_accuracies():\n accuracies = {}\n with Parallel(n_jobs=morphs.parallel.N_JOBS) as parallel:\n for block_path in morphs.paths.blocks():\n print(block_path)\n spikes = morphs.load.ephys_data(block_path, collapse_endpoints=True)\n\n if len(spikes[\"recording\"].unique()) >= 1:\n template_spikes = spikes[spikes[\"stim_id\"].isin(list(\"abcdefgh\"))]\n assert len(template_spikes) > 0\n cluster_groups = template_spikes.groupby(\"cluster\")\n\n morph_dims = spikes.morph_dim.unique()\n morph_dims = morph_dims[~pd.isnull(morph_dims)]\n morph_dims.sort()\n\n max_num_reps = np.max(\n [\n len(stim_group.groupby(by=[\"recording\", \"stim_presentation\"]))\n for stim_id, stim_group in template_spikes.groupby(\"stim_id\")\n ]\n )\n\n accuracies_list = parallel(\n delayed(cluster_accuracy)(\n cluster, cluster_group, morph_dims, max_num_reps\n )\n for (cluster, cluster_group) in cluster_groups\n )\n\n accuracies[block_path] = pd.concat(accuracies_list)\n\n morphs.paths.PROCESSED_DIR.mkdir(parents=True, exist_ok=True)\n with open(morphs.paths.ACCURACIES_PKL.as_posix(), \"wb\") as f:\n pickle.dump(accuracies, f)",
"def calculate(self, catalogue, config, completeness=None):\n # Input checks\n cmag, ctime, ref_mag, dmag, config = input_checks(catalogue,\n config,\n completeness)\n ival = 0\n tolerance = 1E-7\n number_intervals = np.shape(ctime)[0]\n b_est = np.zeros(number_intervals, dtype=float)\n neq = np.zeros(number_intervals, dtype=float)\n nyr = np.zeros(number_intervals, dtype=float)\n\n for ival in range(0, number_intervals):\n id0 = np.abs(ctime - ctime[ival]) < tolerance\n m_c = np.min(cmag[id0])\n if ival == 0:\n id1 = np.logical_and(\n catalogue.data['year'] >= (ctime[ival] - tolerance),\n catalogue.data['magnitude'] >= (m_c - tolerance))\n nyr[ival] = float(catalogue.end_year) - ctime[ival] + 1.\n elif ival == number_intervals - 1:\n id1 = np.logical_and(\n catalogue.data['year'] < (ctime[ival - 1] - tolerance),\n catalogue.data['magnitude'] >= (m_c - tolerance))\n nyr[ival] = ctime[ival - 1] - ctime[ival]\n else:\n id1 = np.logical_and(\n catalogue.data['year'] >= (ctime[ival] - tolerance),\n catalogue.data['year'] < (ctime[ival - 1] - tolerance))\n id1 = np.logical_and(\n id1, catalogue.data['magnitude'] > (m_c - tolerance))\n nyr[ival] = ctime[ival - 1] - ctime[ival]\n neq[ival] = np.sum(id1)\n # Get a- and b- value for the selected events\n temp_rec_table = recurrence_table(catalogue.data['magnitude'][id1],\n dmag,\n catalogue.data['year'][id1])\n\n aki_ml = AkiMaxLikelihood()\n b_est[ival] = aki_ml._aki_ml(temp_rec_table[:, 0],\n temp_rec_table[:, 1],\n dmag, m_c)[0]\n ival += 1\n total_neq = np.float(np.sum(neq))\n bval = self._harmonic_mean(b_est, neq)\n sigma_b = bval / np.sqrt(total_neq)\n aval = self._calculate_a_value(bval, total_neq, nyr, cmag, ref_mag)\n sigma_a = self._calculate_a_value(bval + sigma_b, total_neq, nyr,\n cmag, ref_mag)\n\n if not config['reference_magnitude']:\n aval = np.log10(aval)\n sigma_a = np.log10(sigma_a) - aval\n else:\n sigma_a = sigma_a - aval\n return bval, sigma_b, aval, sigma_a",
"def harvest_coupled_cluster(lines, psivar):\n \"\"\"Sample (canonical) CCSD results block\"\"\"\n #----------------------\n #COUPLED CLUSTER ENERGY\n #----------------------\n #\n #E(0) ... -76.063720080\n #E(CORR) ... -0.288938791\n #E(TOT) ... -76.352658871\n #Singles Norm <S|S>**1/2 ... 0.021106262\n #T1 diagnostic ... 0.007462191\n #\n\n \"\"\"Sample DLPNO coupled cluster block (CCSD)\"\"\"\n #----------------------\n #COUPLED CLUSTER ENERGY\n #----------------------\n #\n #E(0) ... -76.026019996\n #E(CORR)(strong-pairs) ... -0.211953159\n #E(CORR)(weak-pairs) ... -0.000007244\n #E(CORR)(corrected) ... -0.211960403\n #E(TOT) ... -76.237980399\n #Singles Norm <S|S>**1/2 ... 0.014443573\n #T1 diagnostic ... 0.005106574\n #\n\n \"\"\"Sample CCSD(T) block (same for DLPNO and canonical)\"\"\"\n #\n #Triples Correction (T) ... -0.001544381\n #Final correlation energy ... -0.134770265\n #E(CCSD) ... -75.709548429\n #E(CCSD(T)) ... -75.711092810\n #\n\n cc_start = find_start(lines, 'COUPLED CLUSTER ENERGY')\n if cc_start == -1:\n return\n\n #psivar[\"CC REFERENCE\"] = float(lines[cc_start + 3].split()[-1])\n\n # CCSD energy block is less than 20 lines\n for i, line in enumerate(lines[cc_start:cc_start + 20], start=cc_start):\n if line[:6] == \"E(TOT)\":\n psivar[\"CCSD TOTAL ENERGY\"] = line.split()[-1]\n psivar[\"CCSD CORRELATION ENERGY\"] = lines[i-1].split()[-1]\n #psivar[\"SINGLES NORM\"] = lines[i+1].split()[-1]\n #psivar[\"T1 DIAGNOSTIC\"] = lines[i+2].split()[-1]\n break\n\n # CCSD(T) energy block\n for i, line in enumerate(lines[cc_start:], start=cc_start):\n if line[:22] == \"Triples Correction (T)\":\n #psivar[\"TRIPLES CORRELATION ENERGY\"] = line.split()[-1]\n psivar[\"CCSD(T) CORRELATION ENERGY\"] = lines[i+1].split()[-1]\n psivar[\"CCSD TOTAL ENERGY\"] = lines[i+2].split()[-1]\n psivar[\"CCSD(T) TOTAL ENERGY\"] = lines[i+3].split()[-1]\n break",
"def __init__(self, samples, dist_descriptions, timeout=None):\n self.dist_descriptions = dist_descriptions # Compute references this attribute at plot.py\n\n list_number_of_intervals = []\n list_width_of_intervals = []\n for dist_description in dist_descriptions:\n list_number_of_intervals.append(dist_description.get('number_of_intervals'))\n list_width_of_intervals.append(dist_description.get('width_of_intervals'))\n for dist_description in dist_descriptions:\n dist_description['list_number_of_intervals'] = list_number_of_intervals\n dist_description['list_width_of_intervals'] = list_width_of_intervals\n\n # Results will be computed for each dimension\n multiple_results = []\n self.multiple_fit_inspection_data = []\n distributions = []\n dependencies = []\n\n for dimension in range(len(samples)):\n dist_description = dist_descriptions[dimension]\n\n # Use multiprocessing if a timeout is defined.\n if timeout:\n pool = Pool()\n multiple_results.append(\n pool.apply_async(self._get_distribution,\n (dimension, samples),\n dist_description)\n )\n\n else:\n kwargs = dist_description\n distribution, dependency, used_number_of_intervals, \\\n fit_inspection_data = self._get_distribution(\n dimension=dimension,\n samples=samples,\n **kwargs)\n distributions.append(distribution)\n dependencies.append(dependency)\n\n # Save the used number of intervals\n for dep_index, dep in enumerate(dependency):\n if dep is not None:\n self.dist_descriptions[dep][\n 'used_number_of_intervals'] = \\\n used_number_of_intervals[dep_index]\n\n self.multiple_fit_inspection_data.append(fit_inspection_data)\n\n # If multiprocessing is used we have to collect the results differently.\n if timeout:\n # Define start time\n start_time = time.time()\n # Get distributions\n for i, res in enumerate(multiple_results):\n current_time = time.time()\n time_difference = current_time - start_time # Previous used time\n try:\n distribution, dependency, used_number_of_intervals, fit_inspection_data = res.get(\n timeout=timeout-time_difference)\n except TimeoutError:\n err_msg = \"The calculation takes too long. \" \\\n \"It takes longer than the given \" \\\n \"value for a timeout, \" \\\n \"which is '{} seconds'.\".format(timeout)\n raise TimeoutError(err_msg)\n\n # Saves distribution and dependency for particular dimension\n distributions.append(distribution)\n dependencies.append(dependency)\n\n # Add fit inspection data for current dimension\n self.multiple_fit_inspection_data.append(fit_inspection_data)\n\n # Save the used number of intervals\n for dep_index, dep in enumerate(dependency):\n if dep is not None:\n self.dist_descriptions[dep]['used_number_of_intervals'] = \\\n used_number_of_intervals[dep_index]\n\n # Add used number of intervals for dimensions with no dependency\n for fit_inspection_data in self.multiple_fit_inspection_data:\n if not fit_inspection_data.used_number_of_intervals:\n fit_inspection_data.used_number_of_intervals = 1\n\n # Save multivariate distribution\n self.mul_var_dist = MultivariateDistribution(distributions, dependencies)",
"def plot_closeness_heatmap(seqids,ali,delimiter=None,rename=None,pout=None,ddout=None,clustering=None,subtypes=None,log=False):\n if clustering==\"clustered\":\n pats,seq_dict=get_clustered_bins(seqids,ali,delimiter=delimiter,rename=rename)\n elif clustering==\"individual\":\n pats,seq_dict=get_individual_bins(seqids,ali,delimiter=delimiter,rename=rename)\n else:\n f=open(clustering,\"r\")\n seq_dict=pickle.load(f)\n pats=seq_dict.keys()\n dfDists, dfCount = get_closeness(pats,seq_dict,log=log)\n if subtypes==None:\n cg = sns.clustermap(dfDists,vmin=0,vmax=1,cmap=\"RdBu_r\",linewidths = 0.30,metric='cityblock')\n else:\n subtypes_pal = sns.color_palette(\"Set1\", n_colors=len(subtypes), desat=.99)\n subtypes_lut = dict(zip(map(str, subtypes), subtypes_pal))\n columnsNames = dfDists.columns.values\n rowsNames = dfDists.index.values\n colscolor=[]\n rowscolor=[]\n for i,name in enumerate(zip(columnsNames,rowsNames)):\n colsubtype=name[0].split(\"-\")[-1]\n rowsubtype=name[1].split(\"-\")[-1]\n try:\n colscolor.append(subtypes_lut[colsubtype])\n rowscolor.append(subtypes_lut[rowsubtype])\n except:\n print subtypes_lut.keys()\n raise KeyError(\"Query sequence title %s doesn't have one of the specified subtypes at the end followed by a '-'. Rename option can be used to add it\"%(name[0]))\n dfcolcolors=pd.DataFrame({'subtype':colscolor},index=columnsNames)\n dfrowcolors=pd.DataFrame({'subtype':rowscolor},index=rowsNames)\n cg = sns.clustermap(dfDists,vmin=0,vmax=1,cmap=\"RdBu_r\",linewidths = 0.30,metric='cityblock',col_colors=dfcolcolors, row_colors=dfrowcolors)\n for label in subtypes:\n cg.ax_col_dendrogram.bar(0, 0, color=subtypes_lut[label],label=label, linewidth=0)\n cg.ax_col_dendrogram.legend(loc=\"best\", bbox_to_anchor=(0, 1.2) ,ncol=1)\n if log:\n if pout!=None:\n cg.savefig(pout+\".log.png\")\n if ddout!=None:\n with open(\"%s.dendro.log.pkl\"%ddout,\"w\") as f:\n pickle.dump(cg,f)\n with open(\"%s.dataframe.log.pkl\"%out,\"w\") as f:\n pickle.dump(dfDists,f)\n idxr=cg.dendrogram_row.reordered_ind\n idxc=cg.dendrogram_col.reordered_ind\n dfDists, dfCount = get_closeness(pats,seq_dict)\n columnsNames = dfDists.columns.values\n rowsNames = dfDists.index.values\n columnsNames = [columnsNames[i] for i in idxc]\n rowsNames = [rowsNames[i] for i in idxr]\n dfDists=dfDists.reindex(columns=columnsNames,index=rowsNames)\n cg = sns.clustermap(dfDists,vmin=0,vmax=1,cmap=\"RdBu_r\",linewidths = 0.30,metric='cityblock',col_colors=dfcolcolors, row_colors=dfrowcolors, row_cluster=False,col_cluster=False)\n else:\n if pout!=None:\n cg.savefig(pout+\".png\")\n if ddout!=None:\n with open(\"%s.dendro.pkl\"%ddout,\"w\") as f:\n pickle.dump(cg,f)\n with open(\"%s.data.pkl\"%ddout,\"w\") as f:\n pickle.dump(dfDists,f)\n return dfDists, dfCount",
"def debunch(totals,cluster):\n new_totals = zeros_like(totals)\n nt_iter = new_totals.item_iter()\n ntv_iter = new_totals.var.item_iter()\n tot_iter = totals.item_iter()\n totv_iter = totals.var.item_iter()\n axis_iter = totals.axes[0].item_iter()\n cluster_begin = axis_iter.next()\n new_angle = cluster_begin\n total_intensity = total_variance = 0.0\n mean_angle = 0.0\n bunch_points = 0\n in_points = 0\n new_axis = []\n (cluster_size,cluster_mode) = cluster\n while True:\n distance = new_angle - cluster_begin\n if distance < cluster_size:\n total_intensity += tot_iter.next()\n total_variance += totv_iter.next()\n mean_angle +=new_angle\n bunch_points += 1\n try:\n new_angle = axis_iter.next()\n except:\n break\n else: #this point to far beyond beginning\n # for debugging\n \n nt_iter.next()\n ntv_iter.next()\n nt_iter.set_curr(total_intensity/bunch_points)\n ntv_iter.set_curr(total_variance/(bunch_points*bunch_points))\n new_axis.append(mean_angle/bunch_points)\n in_points += bunch_points\n # debugging\n #if in_points < 30:\n # print '%d: new_totals[0:50] = ' % in_points + `new_totals.storage[0:50]`\n # print '%d: total_intensity/bunch_points = %f/%f = %f' % (in_points,total_intensity,\n # bunch_points,total_intensity/bunch_points) \n # print '%d: mean angle %f' % (in_points,mean_angle/bunch_points)\n # re-initialise counters\n total_intensity = 0.0\n total_variance = 0.0\n mean_angle = 0.0\n bunch_points = 0\n #The while loop has not stepped the input iterators forward, so we now treat the same\n #point as we have just tested, but as last_point will now be the same, we will accumulate\n #it.\n cluster_begin = new_angle\n # Now we have finished, we just need to handle the last point\n nt_iter.next()\n ntv_iter.next()\n nt_iter.set_curr(total_intensity/bunch_points)\n ntv_iter.set_curr(total_variance/(bunch_points*bunch_points))\n new_axis.append(mean_angle/bunch_points)\n # Trim output arrays\n newlen = len(new_axis)\n print 'Clustered axis has length %d, running from %f to %f' % (newlen,new_axis[0],new_axis[-1])\n cluster_factor = 1.0*len(totals)/newlen\n print 'Cluster factor %d/%d = %f' % (len(totals),newlen,cluster_factor)\n new_totals = new_totals[:newlen]\n new_totals.copy_cif_metadata(totals)\n new_totals.set_axes([new_axis],anames=[totals.axes[0].name],aunits = [totals.axes[0].units])\n new_totals.title = totals.title\n info_string = \"Points within %f of one another were averaged (weighted)\" % cluster_size\n # Apply 'summation' - not real as those points with only one contributor are multiplied as well\n # The alternative would give very dodgy looking patterns!\n if cluster_mode == 'Sum':\n new_totals *= round(cluster_factor)\n info_string += ' and then multiplied by %d to simulate addition.' % round(cluster_factor)\n else:\n info_string += '.' #finish string nicely\n return new_totals,info_string",
"def evaluation_procedure(self, alpha):\n self.alpha_cluster_scoring_dir = str(Path(self.alpha_cluster_dir) / f'Evaluation_scoring' / f'Rain_days_1mm_and_above')\n os.makedirs(self.alpha_cluster_scoring_dir, exist_ok=True)\n\n print(f'<alpha-{alpha}> self.alpha_cluster_scoring_dir @: \\n{self.alpha_cluster_scoring_dir}')\n if utils.find(f'*Mean_brier_score*in_alpha_{alpha}*.png', self.alpha_cluster_scoring_dir) and \\\n utils.find('*Brier_scores_for_cluster_predictions*alpha-{alpha}*.png', self.alpha_cluster_scoring_dir) and \\\n len(utils.find(f'*clus_gt_*', self.alpha_cluster_dir)) == self.tl_model.optimal_k and \\\n len(utils.find(f'*clus_pred_*', self.alpha_cluster_dir)) == self.tl_model.optimal_k : \n pass\n else:\n print(f'Acquiring mean brier scores for each cluster in alpha-{alpha}!')\n evaluation.mean_brier_individual_alpha(self, alpha)\n\n\n if len(utils.find(f'*ROCs_for_alpha_{alpha}*.png', self.alpha_cluster_scoring_dir)) == (self.tl_model.optimal_k + 1): \n pass\n else:\n print('Plotting ROC curves for individual alpha-{alpha} now!')\n evaluation.ROC_AUC_individual_alpha(self, alpha)\n\n\n\n if utils.find(f'*Gridded_brier_individual_alpha_{alpha}_v2*.png', self.alpha_cluster_scoring_dir): \n pass\n else:\n print(f'{utils.time_now()} - Plotting gridded brier scores for individual alpha-{alpha}...')\n evaluation.gridded_brier_individual_alpha(self, alpha)\n\n # print(f'DEBUGGING: {utils.time_now()} - Plotting gridded brier scores for individual alpha-{alpha}...')\n # evaluation.gridded_brier_individual_alpha(self, alpha)\n\n\n\n \n if utils.find(f'*Gridded_AUC_individual_alpha_{alpha}_v2*.png', self.alpha_cluster_scoring_dir) and \\\n utils.find(f'*alpha_{alpha}_aucs.pkl', self.alpha_general_dir): \n pass\n else:\n print(f'{utils.time_now()} - Plotting gridded AUC for individual alpha-{alpha}...')\n evaluation.gridded_AUC_individual_alpha(self, alpha)\n\n # print(f'DEBUGGING: {utils.time_now()} - Plotting gridded AUC for individual alpha-{alpha}...')\n # evaluation.gridded_AUC_individual_alpha(self, alpha)\n\n\n\n print(f'Evaluation completed for raindays (1mm & above) predictions for alpha-{alpha}.')",
"def process(data, cluster_criteria, method = \"PP\", \\\n min_height = 0, pixel_size = 0, \\\n relax = 0, stop = 0, \\\n verbose = True, interactive = False,\n n_jobs = 1, nsteps = 1 ):\n\n#==============================================================================#\n \"\"\"\n Initial prep of key variables\n \"\"\"\n\n self = Acorns()\n start = time.time()\n\n # User input information\n self.cluster_criteria = cluster_criteria\n\n if np.size(relax) == 1:\n self.relax = relax if (relax != 0) else -1.0\n relaxcond = True if (relax != 0) else False\n else:\n self.relax = relax\n relaxcond = True\n\n if method == \"PP\":\n self.method = 0\n elif method == \"PPV\":\n self.method = 1\n elif method == \"PPP\":\n self.method = 2\n else:\n raise ValueError('method {0:s} unknown'.format(method))\n method = str(method)\n\n # Generate some important information:\n self.minnpix_cluster = get_minnpix(self, pixel_size, self.cluster_criteria[0])\n self.min_height = min_height\n self.max_dist = get_maxdist(self, pixel_size)\n self.cluster_criteria[0] = self.max_dist\n self.min_sep = 2.*self.cluster_criteria[0]\n self.nsteps = nsteps\n # Prime the acorns information:\n # cluster_arr will be updated with the indices of new clusters\n self.cluster_arr = gen_cluster_arr(self, data, stop)\n self.clusters = {}\n self.forest = {}\n\n#==============================================================================#\n \"\"\"\n Main controlling routine for acorns\n \"\"\"\n\n # Get the unassigned data array\n find_unassigned_data(self, data, stop)\n\n # Gen KDTree\n tree = generate_kdtree(self)\n\n # Generate the unassigned data array\n unassigned_array_length = len(self.unassigned_data[0,:])\n\n count= 0.0\n if verbose:\n progress_bar = print_to_terminal(self, 0, data, count, \\\n unassigned_array_length, method)\n\n # Cycle through the unassigned array\n starthierarchy = time.time()\n for i in range(0, unassigned_array_length):\n\n if verbose and (count % 1 == 0):\n progress_bar + 1\n progress_bar.show_progress()\n\n # Extract the current data point\n data_point = np.array(self.unassigned_data[:,i])\n # Retrieve this data point's location in the data array\n data_idx = get_data_index(self, data, data_point)\n self.cluster_arr[0,i] = int(data_idx)\n\n # Every data point begins as a new cluster\n self.cluster_idx = i\n bud_cluster = Cluster(data_point, data_idx, idx=self.cluster_idx, acorns=self)\n\n # Calculate distances between all data points\n link = get_links(self, i, i, tree, n_jobs)\n\n # Find clusters that are closely associated with the current data\n # point\n linked_clusters = find_linked_clusters(self, data, i, bud_cluster, link)\n\n if (self.method==1) & (len(linked_clusters) >= 1):\n linked_clusters = check_other_components(self, i, i, data_idx, data, linked_clusters, bud_cluster, tree, n_jobs, re=False)\n\n \"\"\"\n\n Notes\n -----\n\n Now try and merge this cluster with surrounding linked_clusters.\n From this point on there are three options for that data_point:\n\n 1. If no linked clusters are found - add the bud cluster to the\n cluster dictionary.\n 2. If a single linked cluster is found - merge the two.\n 3. If multiple linked clusters are found, check the validity of each\n cluster and either merge non-independent clusters or form a\n branch.\n\n This philosophy follows that of agglomerative hierarchical\n clustering techniques. The basic principle is discussed here:\n http://scikit-learn.org/stable/modules/clustering.html under\n \"2.3.6. Hierarchical Clustering\".\n\n A single link measure is used to connect clusters. The strategy is\n adapted from the general methods of:\n\n astrodendro:\n https://github.com/dendrograms/astrodendro\n Copyright (c) 2013 Thomas P. Robitaille, Chris Beaumont, Braden\n MacDonald, and Erik Rosolowsky\n quickclump:\n https://github.com/vojtech-sidorin/quickclump\n Copyright (c) 2016 Vojtech Sidorin\n\n When linking using the \"PPV\" methodology, single link measures may\n be insufficient and additional connectivity constraints are applied.\n Specifically - it is imposed that no two spectral features extracted\n from the same location can be merged into the same cluster.\n\n Additionally, an additional linking strategy is implemented which\n takes into account of the variance in the properties of the linked\n clusters (specifically those selected by the user). This is only\n implemented when trying to resolve ambiguities and is used as a way\n of establishing the \"strongest\" links when multiple spectral\n features have been detected.\n\n \"\"\"\n\n if not linked_clusters:\n add_to_cluster_dictionary(self, bud_cluster)\n elif len(linked_clusters) == 1:\n merge_into_cluster(self, data, linked_clusters[0], bud_cluster)\n else:\n resolve_ambiguity(self, data, linked_clusters, bud_cluster)\n\n if verbose:\n progress_bar.progress = 100\n progress_bar.show_progress()\n print('')\n print('')\n\n # Remove insignificant clusters from the clusters dictionary and update\n # the unassigned array\n cluster_list, cluster_indices = update_clusters(self, data)\n\n # Take a second pass at the data without relaxing the linking criteria\n # to pick up any remaining stragglers not linked during the first pass\n if (np.size(self.unassigned_data_updated)>1):\n cluster_list, cluster_indices = relax_steps(self, 0, data, method, verbose, tree, n_jobs, second_pass=True)\n endhierarchy = time.time()-starthierarchy\n\n#==============================================================================#\n \"\"\"\n Secondary controlling routine for acorns implemented if the linking\n criteria are relaxed by the user\n\n \"\"\"\n\n if relaxcond and (not interactive) and (np.size(self.unassigned_data_updated)>1):\n startrelax = time.time()\n inc = self.relax/self.nsteps\n cluster_criteria_original = cluster_criteria\n for j in range(1, self.nsteps+1):\n self.cluster_criteria = get_relaxed_cluster_criteria(j*inc, cluster_criteria_original)\n cluster_list, cluster_indices = relax_steps(self, j, data, method, verbose, tree, n_jobs, second_pass=False)\n endrelax = time.time()-startrelax\n\n elif interactive and (np.size(self.unassigned_data_updated)>1):\n startrelax = time.time()\n cluster_criteria_original = cluster_criteria\n #plotting.plot_scatter(self)\n stop = True\n while (not stop): #stop != False:\n self.relax = np.array(eval(input(\"Please enter relax values in list format: \")))\n print('')\n self.cluster_criteria = get_relaxed_cluster_criteria(self.relax, cluster_criteria_original)\n cluster_list, cluster_indices = relax_steps(self, j, data, method, verbose, tree, n_jobs, second_pass=False)\n #plotting.plot_scatter(self)\n s = str(input(\"Would you like to continue? \"))\n print('')\n stop = s in ['True', 'T', 'true', '1', 't', 'y', 'yes', 'Y', 'Yes']\n endrelax = time.time()-startrelax\n\n else:\n startrelax = time.time()\n endrelax = time.time()-startrelax\n\n#==============================================================================#\n \"\"\"\n Tidy everything up for output\n\n \"\"\"\n\n cluster_list, cluster_indices = update_clusters(self, data)\n io.reshape_cluster_array(self, data)\n get_forest(self, verbose)\n\n end = time.time()-start\n\n if verbose:\n print('acorns took {0:0.1f} seconds for completion.'.format(end))\n print('Primary clustering took {0:0.1f} seconds for completion.'.format(endhierarchy))\n if relaxcond==True:\n print('Secondary clustering took {0:0.1f} seconds for completion.'.format(endrelax))\n print('')\n print('acorns found a total of {0} clusters.'.format(len(self.clusters)))\n print('')\n print('A total of {0} data points were used in the search.'.format(len(self.unassigned_data[0,:])))\n print('A total of {0} data points were assigned to clusters.'.format(num_links(self)))\n if (np.size(self.unassigned_data_relax)>1):\n print('A total of {0} data points remain unassigned to clusters.'.format(len(self.unassigned_data_relax[0,:])))\n else:\n print('A total of 0 data points remain unassigned to clusters.')\n print('')\n\n io.housekeeping(self)\n\n return self",
"def add_computed_gas_concentrations(self):\n # Extract the z-coordinate and T, S, P profile\n zs = self.interp_ds.coords[self.ztsp[0]].values\n Ts = self.interp_ds[self.ztsp[1]].values\n Ss = self.interp_ds[self.ztsp[2]].values\n Ps = self.interp_ds[self.ztsp[3]].values\n \n # Create an air object\n air_names = ['nitrogen', 'oxygen', 'argon', 'carbon_dioxide']\n yk = np.array([0.78084, 0.20946, 0.009340, 0.00036])\n from tamoc import dbm\n air = dbm.FluidMixture(air_names)\n m = air.masses(yk)\n \n # Compute the concentrations adjusted for depth\n Cs = np.zeros((len(zs), len(air_names)))\n for i in range(len(zs)):\n Cs[i,:] = air.solubility(m, Ts[i], 101325., Ss[i])[0,:] * \\\n seawater.density(Ts[i], Ss[i], Ps[i]) / \\\n seawater.density(Ts[i], Ss[i], 101325.)\n \n # Make sure none of these gases are already in the measured profile\n for name in air_names:\n if name in self.interp_ds:\n air_names[air_names.index(name)] = 'computed_' + name\n \n # Add these data to the Profile object\n data = np.hstack((np.atleast_2d(zs).transpose(), Cs))\n names = [self.ztsp[0]] + air_names \n units = [self.ztsp_units[0]] + 4*['kg/m^3']\n self.append(data, names, units)\n \n # Rebuild the interpolator\n self._build_interpolator()",
"def SC_generation(hourly_radiation, prop_observers, number_groups, weather_data, g, Sz, Az, ha, Tin_C, height,\n panel_properties, latitude):\n\n\n n0 = panel_properties['n0']\n c1 = panel_properties['c1']\n c2 = panel_properties['c2']\n mB0_r = panel_properties['mB0_r']\n mB_max_r = panel_properties['mB_max_r']\n mB_min_r = panel_properties['mB_min_r']\n C_eff = panel_properties['C_eff']\n t_max = panel_properties['t_max']\n IAM_d = panel_properties['IAM_d']\n Aratio = panel_properties['aperture_area_ratio']\n Apanel = panel_properties['module_area']\n dP1 = panel_properties['dP1']\n dP2 = panel_properties['dP2']\n dP3 = panel_properties['dP3']\n dP4 = panel_properties['dP4']\n Cp_fluid_JperkgK = panel_properties['Cp_fluid'] # J/kgK\n\n # create lists to store results\n list_results = [None] * number_groups\n list_areas_groups = [None] * number_groups\n Sum_mcp_kWperC = np.zeros(8760)\n Sum_qout_kWh = np.zeros(8760)\n Sum_Eaux_kWh = np.zeros(8760)\n Sum_qloss = np.zeros(8760)\n Sum_radiation_kWh = np.zeros(8760)\n\n Tin_array_C = np.zeros(8760) + Tin_C\n aperature_area_per_module = Aratio * Apanel\n total_area_module = prop_observers['total_area_module'].sum() # total area for panel installation\n\n # calculate equivalent length of pipes\n lv = panel_properties['module_length'] # module length\n number_modules = round(total_area_module/Apanel) # this is an estimation\n l_ext_mperm2 = (2 * lv * number_modules/ (total_area_module * Aratio)) # pipe length within the collectors\n l_int_mperm2 = 2 * height / (total_area_module * Aratio) # pipe length from building substation to roof top collectors\n Leq_mperm2 = l_int_mperm2 + l_ext_mperm2 # in m/m2 aperture\n\n if panel_properties['type'] == 'ET': # for evacuated tubes\n Nseg = 100 # default number of subsdivisions for the calculation\n else:\n Nseg = 10 # default number of subsdivisions for the calculation\n\n for group in range(number_groups):\n # load panel angles from group\n teta_z = prop_observers.loc[group, 'surface_azimuth'] # azimuth of panels of group\n area_per_group = prop_observers.loc[group, 'total_area_module']\n tilt_angle_deg = prop_observers.loc[group, 'tilt'] # tilt angle of panels\n\n # create dataframe with irradiation from group\n\n radiation_Wh = pd.DataFrame({'I_sol': hourly_radiation[group]})\n radiation_Wh['I_diffuse'] = weather_data.ratio_diffhout * radiation_Wh.I_sol # calculate diffuse radiation\n radiation_Wh['I_direct'] = radiation_Wh['I_sol'] - radiation_Wh['I_diffuse'] # calculate direct radiation\n radiation_Wh.fillna(0, inplace=True) # set nan to zero\n\n # calculate incidence angle modifier for beam radiation\n IAM_b = calc_IAM_beam_SC(Az, g, ha, teta_z, tilt_angle_deg, panel_properties['type'], Sz, latitude)\n\n # calculate heat production from a solar collector of each group\n list_results[group] = calc_SC_module(tilt_angle_deg, IAM_b, IAM_d, radiation_Wh.I_direct,\n radiation_Wh.I_diffuse, weather_data.drybulb_C, n0,\n c1, c2, mB0_r, mB_max_r, mB_min_r, C_eff, t_max,\n aperature_area_per_module, dP1, dP2, dP3, dP4,\n Cp_fluid_JperkgK, Tin_C, Leq_mperm2, l_ext_mperm2,\n l_int_mperm2, Nseg)\n\n\n # multiplying the results with the number of panels in each group and write to list\n number_modules_per_group = area_per_group / Apanel\n list_areas_groups[group] = area_per_group\n radiation_array = hourly_radiation[group] * list_areas_groups[group] / 1000 # kWh\n Sum_qout_kWh = Sum_qout_kWh + list_results[group][1] * number_modules_per_group\n Sum_Eaux_kWh = Sum_Eaux_kWh + list_results[group][2] * number_modules_per_group\n Sum_qloss = Sum_qloss + list_results[group][0] * number_modules_per_group\n Sum_mcp_kWperC = Sum_mcp_kWperC + list_results[group][5] * number_modules_per_group\n Sum_radiation_kWh = Sum_radiation_kWh + radiation_Wh['I_sol']*area_per_group/1000\n\n Tout_group_C = (Sum_qout_kWh / Sum_mcp_kWperC) + Tin_C # in C assuming all collectors are connected in parallel\n\n Final = pd.DataFrame(\n {'Q_SC_gen_kWh': Sum_qout_kWh, 'T_SC_sup_C': Tin_array_C, 'T_SC_re_C': Tout_group_C, 'mcp_SC_kWperC': Sum_mcp_kWperC, 'Eaux_SC_kWh': Sum_Eaux_kWh,\n 'Q_SC_l_kWh': Sum_qloss, 'Area_SC_m2': sum(list_areas_groups), 'radiation_kWh': Sum_radiation_kWh}, index=range(8760))\n\n return list_results, Final",
"def completeness(self, completeness_fcn=None, completeness_integrals=None):\n if completeness_integrals is None:\n from scipy.integrate import quad\n completeness_integrals = [quad(completeness_fcn, x0, x1, maxp1=200)[0]/(x1-x0) for x0, x1 in \n zip(self.bin_edges[:-1], self.bin_edges[1:])]\n logging.debug(completeness_integrals)\n \n self.complete_vals = np.array([v/I for v, I in zip(self.raw_vals, completeness_integrals)])\n self.complete_low = np.array([v/I for v, I in zip(self.raw_low, completeness_integrals)])\n self.complete_high = np.array([v/I for v, I in zip(self.raw_high, completeness_integrals)])\n return self.complete_vals - self.raw_vals",
"def evaluation_cc(self, property='clustering-coeff'):\n\n if property == 'clustering-coeff':\n rw_cc = [np.mean(clustering_coef_wu(self.rw_data[t])) for t in range(0, self.T)]\n smth_cc = [np.mean(clustering_coef_wu(self.smth_data[t])) for t in range(0, self.T)]\n elif property == 'transitivity':\n rw_cc = [np.mean(transitivity_wu(self.rw_data[t])) for t in range(0, self.T)]\n smth_cc = [np.mean(transitivity_wu(self.smth_data[t])) for t in range(0, self.T)]\n elif property == 'coreness':\n rw_cc = [np.mean(core.core_periphery_dir(self.rw_data[t])) for t in range(0, self.T)]\n smth_cc = [np.mean(core.core_periphery_dir(self.smth_data[t])) for t in range(0, self.T)]\n elif property == 'assortativity':\n rw_cc = [np.mean(core.assortativity_wei(self.rw_data[t], 0)) for t in range(0, self.T)]\n smth_cc = [np.mean(core.assortativity_wei(self.smth_data[t], 0)) for t in range(0, self.T)]\n elif property == 'modularity':\n rw_cc, _ = get_number_of_components(self.rw_data)\n smth_cc, _ = get_number_of_components(self.smth_data)\n elif property == 'path_length':\n rw_cc = [charpath(rw)[0] for rw in self.rw_data]\n smth_cc = [charpath(sm)[0] for sm in self.smth_data]\n\n # rw_cc_ent = get_entropy_list(rw_cc)\n # smth_cc_ent = get_entropy_list(smth_cc)\n\n return rw_cc, smth_cc",
"def __init__(self, data1, data2, tail = 'two', significant_level=0.05):\r\n Critical_05 = pd.DataFrame({'2': [-1.0, -1.0, -1.0, -1.0, -1.0, -1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.0, 2.0, 2.0, 2.0, 3.0, 3.0, 3.0, 3.0, 3.0, 4.0, 4.0, 4.0, 4.0, 5.0, 5.0, 5.0, 5.0, 5.0, 6.0, 6.0, 6.0, 6.0, 7.0, 7.0] ,\r\n '3': [-1.0, -1.0, -1.0, 0.0, 1.0, 1.0, 2.0, 2.0, 3.0, 3.0, 4.0, 4.0, 5.0, 5.0, 6.0, 6.0, 7.0, 7.0, 8.0, 8.0, 9.0, 9.0, 10.0, 10.0, 11.0, 11.0, 12.0, 13.0, 13.0, 14.0, 14.0, 15.0, 15.0, 16.0, 16.0, 17.0, 17.0, 18.0, 18.0] ,\r\n '4': [-1.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 11.0, 12.0, 13.0, 13.0, 15.0, 16.0, 17.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, 31.0] ,\r\n '5': [-1.0, 0.0, 1.0, 2.0, 3.0, 5.0, 6.0, 7.0, 8.0, 9.0, 11.0, 12.0, 13.0, 14.0, 15.0, 17.0, 18.0, 19.0, 20.0, 22.0, 23.0, 24.0, 25.0, 27.0, 28.0, 29.0, 30.0, 32.0, 33.0, 34.0, 35.0, 37.0, 38.0, 39.0, 40.0, 41.0, 43.0, 44.0, 45.0] ,\r\n '6': [-1.0, 1.0, 2.0, 3.0, 5.0, 6.0, 8.0, 10.0, 11.0, 13.0, 14.0, 16.0, 17.0, 19.0, 21.0, 22.0, 24.0, 25.0, 27.0, 29.0, 30.0, 32.0, 33.0, 35.0, 37.0, 38.0, 40.0, 42.0, 43.0, 45.0, 46.0, 48.0, 50.0, 51.0, 53.0, 55.0, 56.0, 58.0, 59.0] ,\r\n '7': [-1.0, 1.0, 3.0, 5.0, 6.0, 8.0, 10.0, 12.0, 14.0, 16.0, 18.0, 20.0, 22.0, 24.0, 26.0, 28.0, 30.0, 32.0, 34.0, 36.0, 38.0, 40.0, 42.0, 44.0, 46.0, 48.0, 50.0, 52.0, 54.0, 56.0, 58.0, 60.0, 62.0, 64.0, 66.0, 68.0, 70.0, 72.0, 74.0] ,\r\n '8': [0, 2, 4, 6, 7, 10, 13, 15, 17, 19, 22, 24, 26, 29, 31, 34, 36, 38, 41, 43, 45, 48, 50, 53, 55, 57, 60, 62, 65, 67, 69, 72, 74, 77, 79, 81, 84, 86, 89] ,\r\n '9': [0, 2, 4, 7, 10, 12, 15, 17, 20, 23, 26, 28, 31, 34, 37, 39, 42, 45, 48, 50, 53, 56, 59, 62, 64, 67, 70, 73, 76, 78, 81, 84, 87, 89, 92, 95, 98, 101, 103] ,\r\n '10': [0, 3, 5, 8, 11, 14, 17, 20, 23, 26, 29, 33, 36, 39, 42, 45, 48, 52, 55, 58, 61, 64, 67, 71, 74, 77, 80, 83, 87, 90, 93, 96, 99, 103, 106, 109, 112, 115, 119] ,\r\n '11': [0, 3, 6, 9, 13, 16, 19, 23, 26, 30, 33, 37, 40, 44, 47, 51, 55, 58, 62, 65, 69, 73, 76, 80, 83, 87, 90, 94, 98, 101, 105, 108, 112, 116, 119, 123, 127, 130, 134] ,\r\n '12': [1, 4, 7, 11, 14, 18, 22, 26, 29, 33, 37, 41, 45, 49, 53, 57, 61, 65, 69, 73, 77, 81, 85, 89, 93, 97, 101, 105, 109, 113, 117, 121, 125, 129, 133, 137, 141, 145, 149] ,\r\n '13': [1, 4, 8, 12, 16, 20, 24, 28, 33, 37, 41, 45, 50, 54, 59, 63, 67, 72, 76, 80, 85, 89, 94, 98, 102, 107, 111, 116, 120, 125, 129, 133, 138, 142, 147, 151, 156, 160, 165] ,\r\n '14': [1, 5, 9, 13, 17, 22, 26, 31, 36, 40, 45, 50, 55, 59, 64, 67, 74, 78, 83, 88, 93, 98, 102, 107, 112, 117, 122, 127, 131, 136, 141, 146, 151, 156, 161, 165, 170, 175, 180] ,\r\n '15': [1, 5, 10, 14, 19, 24, 29, 34, 39, 44, 49, 54, 59, 64, 70, 75, 80, 85, 90, 96, 101, 106, 111, 117, 122, 127, 132, 138, 143, 148, 153, 159, 164, 169, 174, 180, 185, 190, 196] ,\r\n '16': [1, 6, 11, 15, 21, 26, 31, 37, 42, 47, 53, 59, 64, 70, 75, 81, 86, 92, 98, 103, 109, 115, 120, 126, 132, 137, 143, 149, 154, 160, 166, 171, 177, 183, 188, 194, 200, 206, 211] ,\r\n '17': [2, 6, 11, 17, 22, 28, 34, 39, 45, 51, 57, 63, 67, 75, 81, 87, 93, 99, 105, 111, 117, 123, 129, 135, 141, 147, 154, 160, 166, 172, 178, 184, 190, 196, 202, 209, 215, 221, 227] ,\r\n '18': [2, 7, 12, 18, 24, 30, 36, 42, 48, 55, 61, 67, 74, 80, 86, 93, 99, 106, 112, 119, 125, 132, 138, 145, 151, 158, 164, 171, 177, 184, 190, 197, 203, 210, 216, 223, 230, 236, 243] ,\r\n '19': [2, 7, 13, 19, 25, 32, 38, 45, 52, 58, 65, 72, 78, 85, 92, 99, 106, 113, 119, 126, 133, 140, 147, 154, 161, 168, 175, 182, 189, 196, 203, 210, 217, 224, 231, 238, 245, 252, 258] ,\r\n '20': [2, 8, 14, 20, 27, 34, 41, 48, 55, 62, 69, 76, 83, 90, 98, 105, 112, 119, 127, 134, 141, 149, 156, 163, 171, 178, 186, 193, 200, 208, 215, 222, 230, 237, 245, 252, 259, 267, 274] \r\n })\r\n\r\n Critical_1 = pd.DataFrame({'2': [-1.0, -1.0, -1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 2.0, 2.0, 2.0, 3.0, 3.0, 3.0, 4.0, 4.0, 4.0, 5.0, 5.0, 5.0, 6.0, 6.0, 6.0, 7.0, 7.0, 7.0, 7.0, 8.0, 8.0, 8.0, 9.0, 9.0, 9.0, 10.0, 10.0, 10.0, 11.0] ,\r\n '3': [-1.0, -1.0, 0.0, 1.0, 2.0, 2.0, 3.0, 3.0, 4.0, 5.0, 5.0, 6.0, 7.0, 7.0, 8.0, 9.0, 9.0, 10.0, 11.0, 11.0, 12.0, 13.0, 13.0, 14.0, 15.0, 15.0, 16.0, 17.0, 17.0, 18.0, 19.0, 19.0, 20.0, 21.0, 21.0, 22.0, 23.0, 23.0, 24.0] ,\r\n '4': [-1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0, 38.0, 39.0] ,\r\n '5': [0, 1, 2, 4, 5, 6, 8, 9, 11, 12, 13, 15, 16, 18, 19, 20, 22, 23, 25, 26, 28, 29, 30, 32, 33, 35, 36, 38, 39, 40, 42, 43, 45, 46, 48, 49, 50, 52, 53] ,\r\n '6': [0, 2, 3, 5, 7, 8, 10, 12, 14, 16, 17, 19, 21, 23, 25, 26, 28, 30, 32, 34, 36, 37, 39, 41, 43, 45, 46, 48, 50, 52, 54, 56, 57, 59, 61, 63, 65, 67, 68] ,\r\n '7': [0, 2, 4, 6, 8, 11, 13, 15, 17, 19, 21, 24, 26, 28, 30, 33, 35, 37, 39, 41, 44, 46, 48, 50, 53, 55, 57, 59, 61, 64, 66, 68, 70, 73, 75, 77, 79, 82, 84] ,\r\n '8': [1, 3, 5, 8, 10, 13, 15, 18, 20, 23, 26, 28, 31, 33, 36, 39, 41, 44, 47, 49, 52, 54, 57, 60, 62, 65, 68, 70, 73, 76, 78, 81, 84, 86, 89, 91, 94, 97, 99] ,\r\n '9': [1, 3, 6, 9, 12, 15, 18, 21, 24, 27, 30, 33, 36, 39, 42, 45, 48, 51, 54, 57, 60, 63, 66, 69, 72, 75, 78, 82, 85, 88, 91, 94, 97, 100, 103, 106, 109, 112, 115] ,\r\n '10': [1, 4, 7, 11, 14, 17, 20, 24, 27, 31, 34, 37, 41, 44, 48, 51, 55, 58, 62, 65, 68, 72, 75, 79, 82, 86, 89, 93, 96, 100, 103, 107, 110, 114, 117, 121, 124, 128, 131] ,\r\n '11': [1, 5, 8, 12, 16, 19, 23, 27, 31, 34, 38, 42, 46, 50, 54, 57, 61, 65, 69, 73, 77, 81, 85, 89, 92, 96, 100, 104, 108, 112, 116, 120, 124, 128, 131, 135, 139, 143, 147] ,\r\n '12': [2, 5, 9, 13, 17, 21, 26, 30, 34, 38, 42, 47, 51, 55, 60, 64, 68, 72, 77, 81, 85, 90, 94, 98, 103, 107, 111, 116, 120, 124, 128, 133, 137, 141, 146, 150, 154, 159, 163] ,\r\n '13': [2, 6, 10, 15, 19, 24, 28, 33, 37, 42, 47, 51, 56, 61, 65, 70, 75, 80, 84, 89, 94, 98, 103, 108, 113, 117, 122, 127, 132, 136, 141, 146, 151, 156, 160, 165, 170, 175, 179] ,\r\n '14': [2, 7, 11, 16, 21, 26, 31, 36, 41, 46, 51, 56, 61, 66, 71, 77, 82, 87, 92, 97, 102, 107, 113, 118, 123, 128, 133, 138, 144, 149, 154, 159, 164, 170, 175, 180, 185, 190, 196] ,\r\n '15': [3, 7, 12, 18, 23, 28, 33, 39, 44, 50, 55, 61, 66, 72, 77, 83, 88, 94, 100, 105, 111, 116, 122, 128, 133, 139, 144, 150, 156, 161, 167, 172, 178, 184, 189, 195, 201, 206, 212] ,\r\n '16': [3, 8, 14, 19, 25, 30, 36, 42, 48, 54, 60, 65, 71, 77, 83, 89, 95, 101, 107, 113, 119, 125, 131, 137, 143, 149, 156, 162, 168, 174, 180, 186, 192, 198, 204, 210, 216, 222, 228] ,\r\n '17': [3, 9, 15, 20, 26, 33, 39, 45, 51, 57, 64, 70, 77, 83, 89, 96, 102, 109, 115, 121, 128, 134, 141, 147, 154, 160, 167, 173, 180, 186, 193, 199, 206, 212, 219, 225, 232, 238, 245] ,\r\n '18': [4, 9, 16, 22, 28, 35, 41, 48, 55, 61, 68, 75, 82, 88, 95, 102, 109, 116, 123, 130, 136, 143, 150, 157, 164, 171, 178, 185, 192, 199, 206, 212, 219, 226, 233, 240, 247, 254, 261] ,\r\n '19': [4, 10, 17, 23, 30, 37, 44, 51, 58, 65, 72, 80, 87, 94, 101, 109, 116, 123, 130, 138, 145, 152, 160, 167, 174, 182, 189, 196, 204, 211, 218, 226, 233, 241, 248, 255, 263, 270, 278] ,\r\n '20': [4, 11, 18, 25, 32, 39, 47, 54, 62, 69, 77, 84, 92, 100, 107, 115, 123, 130, 138, 146, 154, 161, 169, 177, 185, 192, 200, 208, 216, 224, 231, 239, 247, 255, 263, 271, 278, 286, 294] })\r\n \r\n self.critical05 = Critical_05\r\n self.critical1 = Critical_1\r\n\r\n # Mann Whitney Test \r\n x = np.asarray(data1)\r\n y = np.asarray(data2)\r\n n1 = len(x)\r\n n2 = len(y)\r\n ranked = rankdata(np.concatenate((x, y)))\r\n rankx = ranked[0:n1] # get the x-ranks\r\n u1 = n1*n2 + (n1*(n1+1))/2.0 - np.sum(rankx, axis=0) # calc U for x\r\n u2 = n1*n2 - u1 # remainder is U for y\r\n\r\n # use the min(u1, u2) as u-stat\r\n if u1 <= u2:\r\n stat_a, larger = u1, 1\r\n else:\r\n stat_a, larger = u2, 2\r\n\r\n # compute the effect size \r\n effect = 1 - (2*stat_a)/(n1*n2) \r\n\r\n # Mann-Whitney test \r\n if min(n1, n2) < 2: # sample size too small - cannot do test\r\n return 'Sorry, sample size is too small to test significance. Please collect more data...'\r\n\r\n # Do test for small sample size \r\n elif 2<=min(n1, n2) <= 20 and 2 <= max(n1, n2) <= 40:\r\n if tail != 'two': # only have data for two tail testing\r\n return 'Sorry, sample size too small, only two-tailed test available...'\r\n\r\n u_05 = Critical_05[str(min(n1, n2))][max(n1, n2)-2] # u=critical at signif level .05\r\n u_1 = Critical_1[str(min(n1, n2))][max(n1, n2)-2] # u=critical at signif level .1\r\n\r\n if significant_level == 0.05 and stat_a <= u_05:\r\n self.significance = True\r\n self.sample_size = 'Small'\r\n self.n1 = n1\r\n self.n2 = n2\r\n self.criticalu = u_05\r\n self.u = stat_a\r\n self.effectsize = effect\r\n self.largergroup = larger\r\n elif significant_level == 0.1 and stat_a <= u_1:\r\n self.significance = True\r\n self.sample_size = 'Small'\r\n self.n1 = n1\r\n self.n2 = n2\r\n self.criticalu = u_1\r\n self.u = stat_a\r\n self.effectsize = effect\r\n self.largergroup = larger\r\n elif significant_level == 0.05:\r\n self.significance = False\r\n self.sample_size = 'Small'\r\n self.n1 = n1\r\n self.n2 = n2\r\n self.criticalu = u_05\r\n self.u = stat_a\r\n self.effectsize = effect\r\n self.largergroup = larger\r\n else:\r\n self.significance = False\r\n self.sample_size = 'Small'\r\n self.n1 = n1\r\n self.n2 = n2\r\n self.criticalu = u_1\r\n self.u = stat_a\r\n self.effectsize = effect\r\n self.largergroup = larger\r\n\r\n else:\r\n T = tiecorrect(ranked)\r\n sd = np.sqrt(T * n1 * n2 * (n1+n2+1) / 12.0)\r\n \r\n if T == 0:\r\n raise ValueError('All numbers are identical in mannwhitneyu')\r\n meanrank = n1*n2/2.0 + 0.5 \r\n\r\n if tail == 'two':\r\n bigu = max(u1, u2)\r\n elif tail == 'less':\r\n bigu = u1\r\n elif tail == 'more':\r\n bigu = u2\r\n z = (bigu - meanrank) / sd\r\n \r\n if tail == 'two':\r\n p = 2 * norm.sf(abs(z))\r\n else:\r\n p = norm.sf(z)\r\n if p <= significant_level:\r\n self.significance = True\r\n else:\r\n self.significance = False\r\n \r\n self.sample_size = 'Large'\r\n self.n1 = n1\r\n self.n2 = n2\r\n self.p = p\r\n self.u = stat_a\r\n self.effectsize = effect\r\n self.largergroup = larger",
"def fit(self):\n self.cluseter_agglomerative(n_clusters=20, linkage='average', iterate=5)\n self.sub_clustering(n_clusters=3, index_cluster=[79], linkage='complete')\n self.merge_clusters([[0,9,53],[1,83],[46,35,67],[88,23],[6,68]])\n self.merge_clusters([[6,33,52],[17,14]])\n self.sub_clustering(n_clusters=2, index_cluster=[0], linkage='average')\n self.sub_clustering(n_clusters=3, index_cluster=[2], linkage='average')\n self.sub_clustering(n_clusters=3, index_cluster=[85], linkage='average')\n self.sub_clustering(n_clusters=2, index_cluster=[14], linkage='complete')\n self.sub_clustering(n_clusters=2, index_cluster=[16], linkage='average')\n self.sub_clustering(n_clusters=3, index_cluster=[22], linkage='average')\n self.sub_clustering(n_clusters=2, index_cluster=[24], linkage='complete')\n self.sub_clustering(n_clusters=2, index_cluster=[26], linkage='complete')\n self.sub_clustering(n_clusters=3, index_cluster=[28], linkage='ward')\n self.merge_clusters([[6,98,99]])\n self.merge_clusters([[35,80]])\n self.sub_clustering(n_clusters=4, index_cluster=[35], linkage='complete')\n self.merge_clusters([[76,98]])\n self.sub_clustering(n_clusters=3, index_cluster=[35], linkage='complete')\n self.merge_clusters([[39,42]])\n self.sub_clustering(n_clusters=3, index_cluster=[47], linkage='complete')\n self.sub_clustering(n_clusters=3, index_cluster=[51], linkage='average')\n self.merge_clusters([[70,101]])\n self.sub_clustering(n_clusters=3, index_cluster=[51], linkage='complete')\n self.sub_clustering(n_clusters=3, index_cluster=[61], linkage='ward')\n self.merge_clusters()\n return",
"def test(dist_param, picker_param, iters):\n orig = '/home/zby/MAGISTERKA/MGR/results/oryginal.clustered.t'\n cl_orig = read_clustered(orig)\n name_tag = ''\n ndist = dist_param[1:]\n npick = picker_param[1:]\n for index in drange(4, 20, 0.5):\n name_tag = \"{}_{}_{}\".format(index, npick, ndist)\n tf_conf = configs.TfidfConfig(\n root_name('all_merged.txt', None),\n tfidf_name('merged.stem{}.stop', name_tag),\n tfidf_name('merged.stem{}.stop.txt', name_tag),\n None,\n tfidf_name('merged.stem{}.tfidf', name_tag),\n 10,\n 0,\n None)\n execute(tf_conf)\n tf_conf = configs.TfidfConfig(\n root_name('all_merged.txt', None),\n None,\n tfidf_name('merged.stem{}.stop.txt', name_tag),\n tfidf_name('merged.stem{}.stop', name_tag),\n tfidf_name('merged.stem{}.stop.tfidf', name_tag),\n None,\n None,\n None)\n execute(tf_conf)\n #input, out, picker, distance, iterations,\n clust_cfg = configs.ClusteringConfig(\n tfidf_name('merged.stem{}.stop.tfidf', name_tag),\n tfidf_name('merged.stem{}.stop.clustered.t', name_tag),\n picker_param,\n dist_param,\n iters,\n None\n )\n execute(clust_cfg)\n clust2 = read_clustered(tfidf_name('merged.stem{}.stop.clustered.t', name_tag))\n var, norm = variation_of_information(cl_orig, clust2)\n print(\"**** FOR var {} VOI is {}\".format(name_tag, norm))",
"def read_drainage_efficiency(self):#, PLOT, FIGURE, DISTRIBUTION):\n\n print ' Reading drainage efficiency'\n\n self.drainage_efficiency = {}\n\n drainage = np.zeros(self.ATTM_nrows * self.ATTM_ncols)\n\n for i in range(0, self.ATTM_nrows * self.ATTM_ncols):\n if self.ATTM_Total_Fractional_Area[i] > 0.0 :\n if self.Terrestrial['Drainage_Efficiency_Distribution'].lower() == 'random':\n chance = random.random()\n if chance > self.Terrestrial['Drainage_Efficiency_Random_Value']:\n self.drainage_efficiency[i] = 'above'\n drainage[i] = 1.\n else:\n self.drainage_efficiency[i] = 'below'\n drainage[i] = 2. # redundant, but explicit\n elif self.Terrestrial['Drainage_Efficiency_Distribution'].lower() == 'above':\n self.drainage_efficiency[i] = 'above'\n drainage[i] = 1.\n elif self.Terrestrial['Drainage_Efficiency_Distribution'].lower() == 'below':\n self.drainage_efficiency[i] = 'below'\n drainage[i] = 2.\n else: \n self.drainage_efficiency[i] = 'none'\n drainage[i] =0.\n\n print ' done.'\n print ' '\n\n # ==================================================\n # Create desired output files, figures, and plots\n # ==================================================\n if self.Terrestrial['Drainage_Efficiency_Figure'].lower() == 'yes':\n # -------------------------\n # Move to output directory\n # -------------------------\n if self.Simulation_area.lower() == 'barrow':\n os.chdir(self.control['Run_dir']+self.Output_directory+'/Barrow')\n\n # -----------------------\n # Create desired output\n # -----------------------\n drainage = np.reshape(drainage, [self.ATTM_nrows, self.ATTM_ncols])\n\n fig = pl.figure()\n pl.imshow(drainage, interpolation='nearest', cmap='bone')\n pl.colorbar( extend = 'max', shrink = 0.92)\n pl.title('Drainage efficiency')\n pl.savefig('./Initialization/Drainage_efficiency.png', format = 'png')\n drainage.tofile('./Initialization/Drainage_efficiency.bin')\n pl.close()\n\n os.chdir(self.control['Run_dir'])",
"def draw_data(self, method='linear', number_of_contours=10):\r\n if self.data is not None:\r\n # Coordinates for points to interpolate to\r\n xi, yi = np.mgrid[-1:1:100j, -1:1:100j]\r\n\r\n # Electrode positions for data to interpolate from\r\n points = []\r\n for electrode in self.data.index:\r\n name = TopoPlot.normalize_electrode_name(electrode)\r\n points.append(ELECTRODES[name])\r\n\r\n # Interpolate\r\n # TODO: Will not work with 2 electrodes.\r\n zi = griddata(points, self.data.values, (xi, yi), method=method)\r\n\r\n # Defaults\r\n if number_of_contours is None:\r\n number_of_contours = 10\r\n\r\n # Draw\r\n plt.contourf(xi, yi, zi, number_of_contours)\r\n\r\n # TODO: center\r",
"def check_cdfIntegrity(self, step):\n # Selecting bins automatically:\n x_max = self.onpower_train.max().values[0]\n x_min = 0\n step = 1\n x_onpower = np.arange(x_min, x_max, step).reshape(-1, 1)\n\n x_max = 0\n x_min = self.offpower_train.min().values[0]\n step = 1\n x_offpower = np.arange(x_min, x_max, step).reshape(-1, 1)\n\n x_max = self.duration_train.max().values[0]\n x_min = 0\n step = 1\n x_duration = np.arange(x_min, x_max, step).reshape(-1, 1)\n\n # Evaluating score for:\n # Onpower\n y_onpower = self.__pdf2(self.onpower, x_onpower)\n print(\"Onpower cdf: \" + str(y_onpower.sum()))\n\n # Offpower\n y_offpower = self.__pdf2(self.offpower, x_offpower)\n print(\"Offpower cdf: \" + str(y_offpower.sum()))\n\n # duration\n y_duration = self.__pdf2(self.duration, x_duration)\n print(\"Duration cdf: \" + str(y_duration.sum()))\n\n # Plots:\n # fig1 = plt.figure()\n # ax1 = fig1.add_subplot(311)\n # ax2 = fig1.add_subplot(312)\n # ax3 = fig1.add_subplot(313)\n\n # ax1.plot(x_onpower, y_onpower)\n # ax1.set_title(\"PDF CDF: Onpower\")\n # ax1.set_ylabel(\"density\")\n # ax1.set_xlabel(\"Watts\")\n\n # ax2.plot(x_offpower, y_offpower)\n # ax2.set_title(\" PDF CDF: Offpower\")\n # ax2.set_ylabel(\"denisty\")\n # ax2.set_xlabel(\"Watts\")\n\n # ax3.plot(x_duration, y_duration)\n # ax3.set_title(\"PDF CDF: Duration\")\n # ax3.set_ylabel(\"density\")\n # ax3.set_xlabel(\"Seconds\")",
"def contingency(self, scale, distrib=True, dataname=''):\n print 'Generating the plot ...'\n\n cont = np.zeros((scale, scale))\n minLat, maxLat, minLon, maxLon = self.city[1]\n normLat = scale / (maxLat - minLat)\n normLon = scale / (maxLon - minLon)\n\n # syn = (index, rel index, class)\n for i in range(self.dataset.shape[0]):\n posy = int(((self.dataset[i][0] - minLat) * normLat))\n posx = int(((self.dataset[i][1] - minLon) * normLon))\n # print posx,posy,data[i][0],data[i][1], normLat, normLon\n try:\n if distrib:\n cont[scale - posy - 1, posx - 1] += 1\n else:\n cont[scale - posy - 1, posx - 1] = 1\n except IndexError:\n print self.dataset[i][0], self.dataset[i][1]\n if distrib:\n cont = cont / np.max(cont)\n\n fig = plt.figure()\n\n ax = fig.add_subplot(111)\n plt.title('Density ')\n\n plt.imshow(cont, interpolation='bicubic', cmap=cm.gist_yarg)\n vmax = np.max(cont)\n # vmin=np.min(cont)\n\n if distrib:\n plt.colorbar(ticks=np.round(np.linspace(0, 1, 10), 2),\n orientation='vertical')\n nfile = self.application + '-' + dataname\n\n fig.savefig(homepath + 'Results/' + self.city[2] + '-' + nfile + '.pdf', orientation='landscape', format='pdf')\n\n #plt.show()",
"def evaluate(self, clustering):\n # Pca for each one of the clusters\n pca_mean_val = 0.;\n MAX_ELEMENTS = 1000\n for c in clustering.clusters:\n # Pick the coordinates (ensuring that we are copying them)\n element_indexes = c.all_elements\n ###################\n # Performance hack\n ###################\n # As it can be very slow for big clusters (i.e. > 3k elements) we'll compress this clusters \n # before calculating PCA. It should increase variance but will allow calculations.\n # It should use the kmedoids compressor\n if len(c.all_elements) > MAX_ELEMENTS:\n element_indexes = c.get_random_sample(MAX_ELEMENTS)\n print \"[PCA] Random sampling too big cluster to improve performance (%d elements -> %d elements).\"%(len(c.all_elements),MAX_ELEMENTS)\n ###################\n \n fitting_coordinates_of_this_cluster = self.fitting_coordinates[element_indexes]\n \n calculator = RMSDCalculator(calculatorType = \"QTRFIT_SERIAL_CALCULATOR\",\n fittingCoordsets = fitting_coordinates_of_this_cluster)\n \n if self.calculation_coordinates is not None:\n calculation_coordinates_of_this_cluster = self.calculation_coordinates[element_indexes]\n calculator = RMSDCalculator(calculatorType = \"QTRFIT_SERIAL_CALCULATOR\",\n fittingCoordsets = fitting_coordinates_of_this_cluster,\n calculationCoordsets = calculation_coordinates_of_this_cluster)\n \n # Make an iterative superposition (to get the minimum RMSD of all with respect to a mean conformation)\n calculator.iterativeSuperposition()\n\n # Calculate the covariance matrix\n if self.calculation_coordinates is None:\n covariance_matrix = PCAMetric.create_covariance_matrix(fitting_coordinates_of_this_cluster)\n else:\n covariance_matrix = PCAMetric.create_covariance_matrix(calculation_coordinates_of_this_cluster)\n \n # And then the eigenvalue we are interested in\n pca_mean_val += PCAMetric.calculate_biggest_eigenvalue(covariance_matrix)\n print \"PCA finished\"\n return pca_mean_val /clustering.total_number_of_elements",
"def __init__(self):\n super().__init__()\n self.upperBoundUsed = False # True if the distribution is right truncated\n self.lowerBoundUsed = False # True if the distribution is left truncated\n self.hasInfiniteBound = False # True if the untruncated distribution has bounds of +- system max\n self.upperBound = None # Right bound\n self.lowerBound = None # Left bound\n self.__adjustmentType = '' # this describe how the re-normalization to preserve the probability should be done for truncated distributions\n self.dimensionality = None # Dimensionality of the distribution (1D or ND)\n self.distType = None # Distribution type (continuous or discrete)\n self.memory = False # This variable flags if the distribution has history dependence in the sampling process (True) or not (False)\n self.printTag = 'DISTRIBUTIONS'\n self.preferredPolynomials = None # best polynomial for probability-weighted norm of error\n self.preferredQuadrature = None # best quadrature for probability-weighted norm of error\n self.compatibleQuadrature = [] #list of compatible quadratures\n self.convertToDistrDict = {} #dict of methods keyed on quadrature types to convert points from quadrature measure and domain to distribution measure and domain\n self.convertToQuadDict = {} #dict of methods keyed on quadrature types to convert points from distribution measure and domain to quadrature measure and domain\n self.measureNormDict = {} #dict of methods keyed on quadrature types to provide scalar adjustment for measure transformation (from quad to distr)\n self.convertToDistrDict['CDFLegendre'] = self.CDFconvertToDistr\n self.convertToQuadDict ['CDFLegendre'] = self.CDFconvertToQuad\n self.measureNormDict ['CDFLegendre'] = self.CDFMeasureNorm\n self.convertToDistrDict['CDFClenshawCurtis'] = self.CDFconvertToDistr\n self.convertToQuadDict ['CDFClenshawCurtis'] = self.CDFconvertToQuad\n self.measureNormDict ['CDFClenshawCurtis'] = self.CDFMeasureNorm",
"def toy_sbm2clusters_1Dinterpolation( graph_qt,graph_sizes, cluster_perturbation, intra_p, inter_p , seed):\n dataset = []\n np.random.seed(seed)\n \n def perturbate_size_vector(cluster_perturbation, sizes_vector, n_nodes):\n #We sample a cluster - as GW invariant with perturbation we keep with first cluster\n #Apply the random size perturbation based on cluster_perturbation parameter\n #Propagate the rest to keep the proper number of nodes n_nodes\n rest = n_nodes\n n = len(sizes_vector)\n size_rate= 1 - cluster_perturbation\n #make sure that a cluster keeps a size >= 2\n assert sizes_vector[0]>2\n max_perturbation = max(1, int(sizes_vector[0]*size_rate))\n \n perturbation0= np.random.choice(range(1,max_perturbation))\n sizes_vector[0]-= perturbation0\n rest-= sizes_vector[0]\n for i in range(1, n-1):\n max_perturbation = max(1, int(sizes_vector[i]*size_rate))\n assert sizes_vector[i]>2\n \n perturbation = np.random.choice(np.random.choice(range(1,max_perturbation)))\n sizes_vector[i]-=perturbation\n rest-=sizes_vector[i]\n sizes_vector[-1] = rest\n return sizes_vector\n \n bloc_qt=2\n stacked_rates= []\n for k in range(graph_qt):\n #number of nodes in the graph\n n_nodes=np.random.choice(graph_sizes)\n #Here if we have more than one cluster we had the perturbation\n #on cluster size depending on size_perturbation rate\n \n if n_nodes%bloc_qt ==0:\n \n sizes = [n_nodes//bloc_qt for _ in range(bloc_qt)]\n else:\n residuals = (n_nodes%bloc_qt)\n sizes =[n_nodes//bloc_qt for _ in range(bloc_qt)]\n for i in range(residuals):\n #pos= np.random.choice(len(sizes))\n #we delete this feature - boring for supervised analysis\n sizes[i]+=1\n \n probs = inter_p*np.ones((bloc_qt, bloc_qt))\n np.fill_diagonal(probs, intra_p)\n local_seed= np.random.choice(range(100))\n sizes = perturbate_size_vector(cluster_perturbation,sizes, n_nodes)\n local_rate = sizes[0]/n_nodes\n stacked_rates.append(local_rate)\n print('Graph %s - perturbated_size:%s / rate size C1: %s'%(k,sizes,local_rate))\n G=sbm(sizes,probs,seed=int(local_seed))\n dataset.append(nx.to_numpy_array(G))\n \n return dataset,stacked_rates",
"def collectInitialeccnStatistics(self, folder, databaseFilename, multiplicityFactor = 1.0, deformedNuclei = False):\n typeCollections = ((1, 'sn'), (2,'en'))\n for ecc_id, ecc_type_name in typeCollections:\n db = SqliteDB(path.join(folder, databaseFilename % ecc_type_name))\n # first write the ecc_id_lookup table, makes sure there is only one such table\n if db.createTableIfNotExists(\"ecc_id_lookup\", ((\"ecc_id\",\"integer\"), (\"ecc_type_name\",\"text\"))):\n db.insertIntoTable(\"ecc_id_lookup\", (ecc_id, ecc_type_name))\n\n # next create the eccentricities and collisionParameters table\n db.createTableIfNotExists(\"eccentricities\", ((\"event_id\",\"integer\"), (\"ecc_id\", \"integer\"), (\"n\",\"integer\"), (\"ecc_real\",\"real\"), (\"ecc_imag\",\"real\")))\n db.createTableIfNotExists(\"collisionParameters\", ((\"event_id\",\"integer\"), (\"Npart\", \"integer\"), (\"Ncoll\",\"integer\"), (\"b\",\"real\"), (\"total_entropy\",\"real\")))\n if(deformedNuclei):\n db.createTableIfNotExists(\"deformationParameters\", ((\"event_id\",\"integer\"), (\"cosTheta1\", \"real\"), (\"phi1\",\"real\"), (\"cosTheta2\",\"real\"), (\"phi2\",\"real\")))\n\n # the big loop\n for iorder in range(1,10):\n data = loadtxt(path.join(folder, '%s_ecc_eccp_%d.dat' %(ecc_type_name, iorder)))\n if iorder == 1:\n Npart = data[:,4]\n Ncoll = data[:,5]\n dSdy = data[:,6]/multiplicityFactor #scale out the multiplicity factor used in superMC\n b = data[:,7]\n for event_id in range(len(Npart)):\n db.insertIntoTable(\"collisionParameters\", (event_id, int(Npart[event_id]), int(Ncoll[event_id]), float(b[event_id]), float(dSdy[event_id])))\n if(deformedNuclei):\n cosTheta1 = data[:,8]\n phi1 = data[:,9]\n cosTheta2 = data[:,10]\n phi2 = data[:,11]\n for event_id in range(len(Npart)):\n db.insertIntoTable(\"deformationParameters\", (event_id, float(cosTheta1[event_id]), float(phi1[event_id]), float(cosTheta2[event_id]), float(phi2[event_id])))\n eccReal = data[:,2]\n eccImag = data[:,3]\n for event_id in range(len(eccReal)):\n db.insertIntoTable(\"eccentricities\",(event_id, ecc_id, iorder, float(eccReal[event_id]), float(eccImag[event_id])))\n\n # close connection to commit changes\n db.closeConnection()",
"def initializeDistribution(self):\n\n f = open(self.dataFilename, 'r')\n reader = csv.reader(f)\n headers = next(reader)\n indexFunctionID = headers.index(self.functionID)\n indexVariableID = headers.index(self.variableID)\n f.close()\n rawData = np.genfromtxt(self.dataFilename, delimiter=\",\" , skip_header=1, usecols=(indexVariableID,indexFunctionID))\n\n self.data = rawData[rawData[:,0].argsort()]\n self.lowerBound = self.data[0,0]\n self.upperBound = self.data[-1,0]\n\n if self.functionType == 'cdf':\n self.cdfFunc = UnivariateSpline(self.data[:,0], self.data[:,1], k=self.k, s=self.s)\n self.pdfFunc = self.cdfFunc.derivative()\n self.invCDF = UnivariateSpline(self.data[:,1], self.data[:,0], k=self.k, s=self.s)\n else:\n self.pdfFunc = UnivariateSpline(self.data[:,0], self.data[:,1], k=self.k, s=self.s)\n cdfValues = np.zeros(self.data[:,0].size)\n for i in range(self.data[:,0].size):\n cdfValues[i] = self.pdfFunc.integral(self.data[0][0],self.data[i,0])\n self.invCDF = UnivariateSpline(cdfValues, self.data[:,0] , k=self.k, s=self.s)\n\n # Note that self.invCDF is creating a new spline where I switch its term.\n # Instead of doing spline(x,f(x)) I am creating its inverse spline(f(x),x)\n # This can be done if f(x) is monothonic increasing with x (which is true for cdf)",
"def main(args = []):\n \"\"\" \n args = parse_args(args)\n setup_logging(args.loglevel)\n _logger.debug(\"Starting crazy calculations...\")\n print(\"The {}-th Fibonacci number is {}\".format(args.n, fib(args.n)))\n _logger.info(\"Script ends here\")\n \"\"\"\n\n setup_logging(1)\n _logger.debug(\"Starting crazy calculations...\")\n _logger.info(\"Script ends here\")\n\n sb.set_style(style=\"whitegrid\")\n sb.set_color_codes()\n\n mean = [0, 0]\n cov = [[1, 0], [0, 1]] # diagonal covariance\n Nobjs = 3000\n x, y = np.random.multivariate_normal(mean, cov, Nobjs).T\n # Add manual outlier\n x[0] = 3.3\n y[0] = 3.3\n X = np.array([x, y]).T\n plt.figure(figsize=(7, 7))\n plt.scatter(x, y, s=15, facecolor='k', edgecolor='k')\n\n start = time.time()\n\n F = iso_forest.iForest(X, ntrees=500, sample_size=256)\n S = F.compute_paths(X_in=X)\n\n end = time.time()\n _logger.info(\"Elapsed (with compilation) = %s\" % (end - start))\n\n f, axes = plt.subplots(1, 1, figsize=(7, 7), sharex=True)\n sb.distplot(S, kde=True, color=\"b\", ax=axes, axlabel='anomaly score')\n\n ss = np.argsort(S)\n plt.figure(figsize=(7, 7))\n plt.scatter(x, y, s=15, c='b', edgecolor='b')\n plt.scatter(x[ss[-10:]], y[ss[-10:]], s=55, c='k')\n plt.scatter(x[ss[:10]], y[ss[:10]], s=55, c='r')\n\n N = 4000\n x2 = np.random.rand(N)\n y2 = np.sin(x2 * 10.) + np.random.randn(N) / 2.\n\n x2[0] = 0.4;\n y2[0] = 0.9\n x2[1] = 0.6;\n y2[1] = 1.5\n x2[2] = 0.5;\n y2[2] = -3.\n X2 = np.array([x2, y2]).T\n plt.figure(figsize=(9, 6))\n plt.scatter(x2, y2, c='b', edgecolor='b')\n plt.scatter(x2[:3], y2[:3], c='k')\n plt.ylim(-3.2, 3.2)\n plt.xlim(0, 1)\n\n F2 = iso_forest.iForest(X2, ntrees=500, sample_size=512)\n S2 = F2.compute_paths(X_in=X2)\n f, axes = plt.subplots(1, 1, figsize=(7, 7), sharex=True)\n sb.distplot(S2, kde=True, color=\"b\", ax=axes, axlabel='anomaly score')\n\n ss = np.argsort(S2)\n plt.figure(figsize=(9, 6))\n plt.scatter(x2, y2, c='b', edgecolors='b')\n plt.scatter(x2[ss[-10:]], y2[ss[-10:]], s=55, c='k')\n plt.scatter(x2[ss[:100]], y2[ss[:100]], s=55, c='r')\n\n # plt.show()",
"def update_ptable(self):\n from bokeh.sampledata.periodic_table import elements\n romans = [\"I\", \"II\", \"III\", \"IV\", \"V\", \"VI\", \"VII\"]\n\n elements[\"atomic mass\"] = elements[\"atomic mass\"].astype(str)\n\n elements[\"period\"] = [x for x in elements.period]\n elements = elements[elements.group != \"-\"]\n\n group_range = [str(x) for x in range(1, 19)]\n print ('reaches colormap def')\n colormap = {\n \"c\" : \"#ffa07a\",\n \"nc\" : \"#A9A9A9\"\n }\n elems_colorpair = {}\n\n fcc_B_extrapol_props = {}\n fcc_dB_extrapol_props = {}\n fcc_V0_extrapol_props = {}\n fcc_E0_extrapol_props = {}\n\n bcc_B_extrapol_props = {}\n bcc_dB_extrapol_props = {}\n bcc_V0_extrapol_props = {}\n bcc_E0_extrapol_props = {}\n\n hcp_B_extrapol_props = {}\n hcp_dB_extrapol_props = {}\n hcp_V0_extrapol_props = {}\n hcp_E0_extrapol_props = {}\n\n available_elems = []\n\n for e in elements[\"symbol\"]:\n if e in np.unique(list(self.plot_data['element'])):\n available_elems.append(e)\n for s in np.unique(list(self.plot_data['structure'])):\n plot_struct = self.plot_data[self.plot_data['structure']==s]\n plot_struct_elem = plot_struct[plot_struct['element']==e]\n if s=='fcc':\n try:\n fcc_B_extrapol_props.update({e:list(plot_struct_elem['B'])[0]})\n\n fcc_dB_extrapol_props.update({e:list(plot_struct_elem['BP'])[0]})\n\n fcc_V0_extrapol_props.update({e:list(plot_struct_elem['V0'])[0]})\n\n fcc_E0_extrapol_props.update({e:list(plot_struct_elem['E0'])[0]})\n except:\n pass\n elif s=='bcc':\n try:\n bcc_B_extrapol_props.update({e:list(plot_struct_elem['B'])[0]})\n\n bcc_dB_extrapol_props.update({e:list(plot_struct_elem['BP'])[0]})\n\n bcc_V0_extrapol_props.update({e:list(plot_struct_elem['V0'])[0]})\n\n bcc_E0_extrapol_props.update({e:list(plot_struct_elem['E0'])[0]})\n except:\n pass\n elif s=='hcp':\n try:\n hcp_B_extrapol_props.update({e:list(plot_struct_elem['B'])[0]})\n\n hcp_dB_extrapol_props.update({e:list(plot_struct_elem['BP'])[0]})\n\n hcp_V0_extrapol_props.update({e:list(plot_struct_elem['V0'])[0]})\n\n hcp_E0_extrapol_props.update({e:list(plot_struct_elem['E0'])[0]})\n except:\n pass\n fcc_E0_extrapol_props.update({k:'xxx' for k in elements['symbol'] if k not in fcc_E0_extrapol_props})\n fcc_V0_extrapol_props.update({k:'xxx' for k in elements['symbol'] if k not in fcc_V0_extrapol_props})\n fcc_B_extrapol_props.update({k:'xxx' for k in elements['symbol'] if k not in fcc_B_extrapol_props})\n fcc_dB_extrapol_props.update({k:'xxx' for k in elements['symbol'] if k not in fcc_dB_extrapol_props})\n\n bcc_E0_extrapol_props.update({k:'xxx' for k in elements['symbol'] if k not in bcc_E0_extrapol_props})\n bcc_V0_extrapol_props.update({k:'xxx' for k in elements['symbol'] if k not in bcc_V0_extrapol_props})\n bcc_B_extrapol_props.update({k:'xxx' for k in elements['symbol'] if k not in bcc_B_extrapol_props})\n bcc_dB_extrapol_props.update({k:'xxx' for k in elements['symbol'] if k not in bcc_dB_extrapol_props})\n\n hcp_E0_extrapol_props.update({k:'xxx' for k in elements['symbol'] if k not in hcp_E0_extrapol_props})\n hcp_V0_extrapol_props.update({k:'xxx' for k in elements['symbol'] if k not in hcp_V0_extrapol_props})\n hcp_B_extrapol_props.update({k:'xxx' for k in elements['symbol'] if k not in hcp_B_extrapol_props})\n hcp_dB_extrapol_props.update({k:'xxx' for k in elements['symbol'] if k not in hcp_dB_extrapol_props})\n\n elems_colorpair.update( { key:'c' for key in np.unique(available_elems) } )\n elems_colorpair.update( { key:'nc' for key in list(elements['symbol']) if key not in list(elems_colorpair.keys()) } )\n\n\n print ([ colormap[elems_colorpair[x]] for x in elements['symbol'] ])\n\n source = ColumnDataSource(\n data=dict(\n group=[str(x) for x in elements[\"group\"]],\n period=[str(y) for y in elements[\"period\"]],\n symx=[str(x)+\":0.1\" for x in elements[\"group\"]],\n numbery=[str(x)+\":0.8\" for x in elements[\"period\"]],\n massy=[str(x)+\":0.15\" for x in elements[\"period\"]],\n namey=[str(x)+\":0.3\" for x in elements[\"period\"]],\n sym=elements[\"symbol\"],\n name=elements[\"name\"],\n# cpk=elements[\"CPK\"],\n atomic_number=elements[\"atomic number\"],\n# electronic=elements[\"electronic configuration\"],\n fcc_B=[fcc_B_extrapol_props[x] for x in elements[\"symbol\"]],\n fcc_dB=[fcc_dB_extrapol_props[x] for x in elements[\"symbol\"]],\n fcc_V0=[fcc_V0_extrapol_props[x] for x in elements[\"symbol\"]],\n fcc_E0=[fcc_E0_extrapol_props[x] for x in elements[\"symbol\"]],\n bcc_B=[bcc_B_extrapol_props[x] for x in elements[\"symbol\"]],\n bcc_dB=[bcc_dB_extrapol_props[x] for x in elements[\"symbol\"]],\n bcc_V0=[bcc_V0_extrapol_props[x] for x in elements[\"symbol\"]],\n bcc_E0=[bcc_E0_extrapol_props[x] for x in elements[\"symbol\"]],\n hcp_B=[hcp_B_extrapol_props[x] for x in elements[\"symbol\"]],\n hcp_dB=[hcp_dB_extrapol_props[x] for x in elements[\"symbol\"]],\n hcp_V0=[hcp_V0_extrapol_props[x] for x in elements[\"symbol\"]],\n hcp_E0=[hcp_E0_extrapol_props[x] for x in elements[\"symbol\"]],\n type=elements[\"metal\"],\n type_color=[ colormap[elems_colorpair[x]] for x in elements['symbol'] ],\n )\n )\n\n # plot the periodic layout\n #name = source.data[\"name\"]\n #B = source.data[\"B\"]\n\n ptable = figure(title=\"Periodic Table\", tools=\"hover\",\n x_range=group_range, y_range=list(reversed(romans)))\n ptable.background_fill_color='white'\n ptable.plot_width = 1500\n ptable.toolbar_location = None\n ptable.outline_line_color = None\n\n ptable.rect(\"group\", \"period\", 0.9, 0.9, source=source,\n fill_alpha=0.3, color='type_color')\n\n text_props = {\n \"source\": source,\n \"angle\": 0,\n \"color\": \"black\",\n \"text_align\": \"left\",\n \"text_baseline\": \"middle\"\n }\n\n ptable.text(x=\"symx\", y=\"period\", text=\"sym\",\n text_font_style=\"bold\", text_font_size=\"22pt\", **text_props)\n\n ptable.text(x=\"symx\", y=\"numbery\", text=\"atomic_number\",\n text_font_size=\"9pt\", **text_props)\n\n# ptable.text(x=\"symx\", y=\"namey\", text=\"name\",\n# text_font_size=\"6pt\", **text_props)\n\n# ptable.text(x=\"symx\", y=\"massy\", text=\"mass\",\n# text_font_size=\"5pt\", **text_props)\n\n ptable.grid.grid_line_color = None\n\n\n ptable.select_one(HoverTool).tooltips = [\n (\"name\", \"@name\"),\n (\"fcc, V0 (A^3 per atom)\", \"@fcc_V0\"),\n (\"fcc, B (GPa)\", \"@fcc_B\"),\n (\"fcc, dB/dP\", \"@fcc_dB\"),\n (\"bcc, V0 (A^3 per atom)\", \"@bcc_V0\"),\n (\"bcc, B (GPa)\", \"@bcc_B\"),\n (\"bcc, dB/dP\", \"@bcc_dB\"),\n (\"hcp, V0 (A^3 per atom)\", \"@hcp_V0\"),\n (\"hcp, B (GPa)\", \"@hcp_B\"),\n (\"hcp, dB/dP\", \"@hcp_dB\")]\n return ptable",
"def __evaluate_result(self):\n # Evaluate internally using cophenetic correlation coefficient.\n cpcc = self.__clusterer.calc_cophenetic_coeff()\n \n # Show evaluation through a popup window.\n popup = WarningPopup(self.__gui, 'Clustering evaluation',\n 'Cophenetic correlation coefficient : ' + str(cpcc))\n popup._start()",
"def correlation_analysis():\n\n raw_covid_data = read_covid_data()\n\n pop_data = read_population()\n\n life_expectancy_data = read_life_expectancy()\n\n gdp_data = read_gdp()\n\n edu_data = read_education()\n\n int_data = read_internet()\n\n covid_joined = pd.merge(raw_covid_data, pop_data, on=\"Country\")\n\n covid_joined.insert(4, \"Confirmed rate\", covid_joined[\"Confirmed\"] / covid_joined[\"Population\"])\n covid_joined.insert(5, \"Death rate\", covid_joined[\"Death\"] / covid_joined[\"Population\"])\n\n covid_life_joined = pd.merge(covid_joined, life_expectancy_data, on=\"Country\")\n covid_life_gdp_joined = pd.merge(covid_life_joined, gdp_data, on=\"Country\")\n covid_life_gdp_edu_joined = pd.merge(covid_life_gdp_joined, edu_data, on=\"Country\")\n covid_life_gdp_edu_int_joined = pd.merge(covid_life_gdp_edu_joined, int_data, on=\"Country\")\n covid_life_gdp_edu_int_joined = covid_life_gdp_edu_int_joined[covid_life_gdp_edu_int_joined.Education != '..']\n covid_life_gdp_edu_int_joined = covid_life_gdp_edu_int_joined[covid_life_gdp_edu_int_joined.Internet != '..']\n covid_life_gdp_edu_int_joined['Education'] = covid_life_gdp_edu_int_joined['Education'].astype(float)\n covid_life_gdp_edu_int_joined['Internet'] = covid_life_gdp_edu_int_joined['Internet'].astype(float)\n\n sns.set()\n\n draw_histogram(covid_life_gdp_edu_int_joined[\"Confirmed rate\"], \"COVID-19 Confirmed rate\")\n draw_histogram(covid_life_gdp_edu_int_joined[\"Death rate\"], \"COVID-19 Death rate\")\n\n display_analysis_result(covid_life_gdp_edu_int_joined[\"Life expectancy\"], covid_life_gdp_edu_int_joined[\"Confirmed rate\"], \"Life expectancy\", \"Confirmed rate\")\n display_analysis_result(covid_life_gdp_edu_int_joined[\"Life expectancy\"], covid_life_gdp_edu_int_joined[\"Death rate\"], \"Life expectancy\", \"Death rate\")\n\n display_analysis_result(covid_life_gdp_edu_int_joined[\"GDP\"], covid_life_gdp_edu_int_joined[\"Confirmed rate\"], \"GDP\", \"Confirmed rate\")\n display_analysis_result(covid_life_gdp_edu_int_joined[\"GDP\"], covid_life_gdp_edu_int_joined[\"Death rate\"], \"GDP\", \"Death rate\")\n\n display_analysis_result(covid_life_gdp_edu_int_joined[\"Education\"], covid_life_gdp_edu_int_joined[\"Confirmed rate\"], \"Education\", \"Confirmed rate\")\n display_analysis_result(covid_life_gdp_edu_int_joined[\"Education\"], covid_life_gdp_edu_int_joined[\"Death rate\"], \"Education\", \"Death rate\")\n\n display_analysis_result(covid_life_gdp_edu_int_joined[\"Internet\"], covid_life_gdp_edu_int_joined[\"Confirmed rate\"], \"Internet\", \"Confirmed rate\")\n display_analysis_result(covid_life_gdp_edu_int_joined[\"Internet\"], covid_life_gdp_edu_int_joined[\"Death rate\"], \"Internet\", \"Death rate\")",
"def interpolate(self):\n print(\"Interpolating points...\")\n interpolated_points = set()\n if os.cpu_count():\n processes = os.cpu_count()\n print(f\"Running on all {processes} cores.\")\n else:\n processes = 1\n length = len(self.main_cluster)\n delta = math.ceil(length / processes)\n manager = Manager()\n result_map = manager.dict()\n jobs = []\n for index in range(processes):\n start = index * delta\n stop = (index + 1) * delta\n if stop > length:\n stop = length\n p = Process(target=worker, args=(start, stop,\n result_map, index,\n self.distances,\n self.interpolation_threshold,\n self.main_cluster,\n self.color_lookup_table_points))\n jobs.append(p)\n p.start()\n\n for proc in jobs:\n proc.join()\n\n for index in result_map.keys():\n print(index)\n interpolated_points.update(result_map[index])\n\n main_points = [self.get_value_tuple(index) for index in self.main_cluster]\n interpolated_points.update(main_points)\n\n print(\"Finished interpolation!\")\n\n self.interpolated_points = list(interpolated_points)",
"def main(\n input_dir, radius, bins, num_evaluations\n):\n registration_results = io.read_registration_results(\n os.path.join(input_dir, 'reg_result.txt')\n )\n\n # init output\n df_output = io.init_output()\n\n for i, r in progressbar.progressbar(\n list(\n registration_results.iterrows()\n )\n ):\n # for interactive visualization:\n if i >= num_evaluations:\n exit(0)\n \n # parse point cloud index:\n idx_target = int(r['idx1'])\n idx_source = int(r['idx2'])\n\n # load point clouds:\n pcd_source = io.read_point_cloud_bin(\n os.path.join(input_dir, 'point_clouds', f'{idx_source}.bin')\n )\n pcd_source, idx_inliers = pcd_source.remove_radius_outlier(nb_points=4, radius=radius)\n search_tree_source = o3d.geometry.KDTreeFlann(pcd_source)\n\n pcd_target = io.read_point_cloud_bin(\n os.path.join(input_dir, 'point_clouds', f'{idx_target}.bin')\n )\n pcd_target, idx_inliers = pcd_target.remove_radius_outlier(nb_points=4, radius=radius)\n search_tree_target = o3d.geometry.KDTreeFlann(pcd_target)\n\n # detect keypoints:\n keypoints_source = detect(pcd_source, search_tree_source, radius)\n keypoints_target = detect(pcd_target, search_tree_target, radius)\n\n # create descriptions:\n pcd_source_keypoints = pcd_source.select_by_index(keypoints_source['id'].values)\n fpfh_source_keypoints = o3d.registration.compute_fpfh_feature(\n pcd_source_keypoints, \n o3d.geometry.KDTreeSearchParamHybrid(radius=5*radius, max_nn=100)\n ).data\n\n pcd_target_keypoints = pcd_target.select_by_index(keypoints_target['id'].values)\n fpfh_target_keypoints = o3d.registration.compute_fpfh_feature(\n pcd_target_keypoints, \n o3d.geometry.KDTreeSearchParamHybrid(radius=5*radius, max_nn=100)\n ).data\n\n # generate matches:\n distance_threshold_init = 1.5 * radius\n distance_threshold_final = 1.0 * radius\n\n # RANSAC for initial estimation:\n init_result = ransac_match(\n pcd_source_keypoints, pcd_target_keypoints, \n fpfh_source_keypoints, fpfh_target_keypoints, \n ransac_params = RANSACParams(\n max_workers=5,\n num_samples=4, \n max_correspondence_distance=distance_threshold_init,\n max_iteration=200000, \n max_validation=500,\n max_refinement=30\n ),\n checker_params = CheckerParams(\n max_correspondence_distance=distance_threshold_init,\n max_edge_length_ratio=0.9,\n normal_angle_threshold=None\n ) \n )\n\n # exact ICP for refined estimation:\n final_result = exact_match(\n pcd_source, pcd_target, search_tree_target,\n init_result.transformation,\n distance_threshold_final, 60\n )\n\n # visualize:\n visualize.show_registration_result(\n pcd_source_keypoints, pcd_target_keypoints, init_result.correspondence_set,\n pcd_source, pcd_target, final_result.transformation\n )\n\n # add result:\n io.add_to_output(df_output, idx_target, idx_source, final_result.transformation)\n\n # write output:\n io.write_output(\n os.path.join(input_dir, 'reg_result_yaogefad.txt'),\n df_output\n )"
] | [
"0.5378484",
"0.52962726",
"0.527498",
"0.52039576",
"0.515216",
"0.51226455",
"0.5103976",
"0.50949556",
"0.50500226",
"0.50090396",
"0.49913463",
"0.4972856",
"0.495353",
"0.49418268",
"0.49357253",
"0.49216944",
"0.49161276",
"0.4904244",
"0.49036115",
"0.49016604",
"0.48914683",
"0.48853937",
"0.4869881",
"0.485974",
"0.485121",
"0.48418203",
"0.48315048",
"0.48306033",
"0.4821838",
"0.48115918"
] | 0.61809057 | 0 |
Collates all catalogs into one table then writes the catalog to disk. | def final_catalogs(self, filename=None, catalog_cols=None):
final_catalog = vstack([cluster_info['catalog'] for cluster_info in self._catalog_dictionary.values()])
# If we request to keep only certain columns in our output
if catalog_cols is not None:
final_catalog.keep_columns(catalog_cols)
if filename is None:
return final_catalog
else:
if filename.endswith('.cat'):
final_catalog.write(filename, format='ascii', overwrite=True)
else:
final_catalog.write(filename, overwrite=True) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def commit_all(catalog):\n data = _sort(catalog)\n for i in (\"space\", \"folder\", \"dataset\"):\n for x in data[i]:\n x.commit()",
"def catalog_merge(self, catalog_cols=None):\n\n for cluster_info in self._catalog_dictionary.values():\n # Array element names\n catalog_idx = cluster_info['SPT_cat_idx']\n se_catalog = cluster_info['catalog']\n\n # Replace the existing SPT_ID in the SExtractor catalog with the official cluster ID.\n # se_catalog.columns[0].name = 'SPT_ID'\n # del se_catalog['SPT_ID']\n\n # Then replace the column values with the official ID.\n se_catalog['SPT_ID'] = self._spt_catalog['SPT_ID'][catalog_idx]\n\n # Add the SZ center coordinates to the catalog\n se_catalog['SZ_RA'] = self._spt_catalog['RA'][catalog_idx]\n se_catalog['SZ_DEC'] = self._spt_catalog['DEC'][catalog_idx]\n\n # For all requested columns from the master catalog add the value to all columns in the SExtractor catalog.\n if catalog_cols is not None:\n for col_name in catalog_cols:\n se_catalog[col_name] = self._spt_catalog[col_name][catalog_idx]\n\n cluster_info['catalog'] = se_catalog",
"def commit(self):\n for db in self.values():\n db.commit()",
"def merge_tables(self):\r\n\r\n table_params = pd.read_json(os.path.join(self.config_path, self.db_config_file),\r\n orient='records')[self.report_type]['table']\r\n\r\n self.trees = create_tree(table_params)\r\n\r\n for tree in self.trees:\r\n self._recursive_merge(tree=tree)\r\n self.merged_table.append(self._get_table(self.master_table[0]))\r\n\r\n # Drop table from list_of_tables, table_indexes and master_table\r\n del self.list_of_tables[self.table_indexes.index(self.master_table[0])]\r\n self.table_indexes.remove(self.master_table[0])\r\n del self.master_table[0]",
"def collate(\n batch,\n config,\n plate,\n base_directory=\"../..\",\n column=None,\n munge=False,\n csv_dir=\"analysis\",\n aws_remote=None,\n aggregate_only=False,\n tmp_dir=\"/tmp\",\n overwrite=False,\n add_image_features=True,\n image_feature_categories=[\"Granularity\", \"Texture\", \"ImageQuality\", \"Threshold\"],\n printtoscreen=True,\n):\n\n from pycytominer.cyto_utils.cells import SingleCells\n\n # Set up directories (these need to be abspaths to keep from confusing makedirs later)\n input_dir = pathlib.Path(f\"{base_directory}/analysis/{batch}/{plate}/{csv_dir}\")\n backend_dir = pathlib.Path(f\"{base_directory}/backend/{batch}/{plate}\")\n cache_backend_dir = pathlib.Path(f\"{tmp_dir}/backend/{batch}/{plate}\")\n\n aggregated_file = pathlib.Path(f\"{backend_dir}/{plate}.csv\")\n backend_file = pathlib.Path(f\"{backend_dir}/{plate}.sqlite\")\n cache_backend_file = pathlib.Path(f\"{cache_backend_dir}/{plate}.sqlite\")\n\n if not aggregate_only:\n if os.path.exists(cache_backend_file):\n if not overwrite:\n sys.exit(\n f\"An SQLite file for {plate} already exists at {cache_backend_file} and overwrite is set to False. Terminating.\"\n )\n else:\n os.remove(cache_backend_file)\n\n for eachdir in [input_dir, backend_dir, cache_backend_dir]:\n if not os.path.exists(eachdir):\n os.makedirs(eachdir, exist_ok=True)\n\n if aws_remote:\n remote_input_dir = f\"{aws_remote}/analysis/{batch}/{plate}/{csv_dir}\"\n\n remote_backend_file = f\"{aws_remote}/backend/{batch}/{plate}/{plate}.sqlite\"\n\n remote_aggregated_file = f\"{aws_remote}/backend/{batch}/{plate}/{plate}.csv\"\n\n sync_cmd = f\"aws s3 sync --exclude * --include */Cells.csv --include */Nuclei.csv --include */Cytoplasm.csv --include */Image.csv {remote_input_dir} {input_dir}\"\n if printtoscreen:\n print(f\"Downloading CSVs from {remote_input_dir} to {input_dir}\")\n run_check_errors(sync_cmd)\n\n if printtoscreen:\n print(f\"Ingesting {input_dir}\")\n # Run cytominer-database ingest\n if munge:\n cytominer_database.munge.munge(config_path=config, source=input_dir)\n\n cytominer_database.ingest.seed(\n source=input_dir,\n target=f\"sqlite:///{cache_backend_file}\",\n config_file=config,\n )\n\n # Create a sqlite3 connection\n with sqlite3.connect(cache_backend_file, isolation_level=None) as connection:\n cursor = connection.cursor()\n if column:\n if print:\n print(f\"Adding a Metadata_Plate column based on column {column}\")\n cursor.execute(\"ALTER TABLE Image ADD COLUMN Metadata_Plate TEXT;\")\n cursor.execute(f\"UPDATE image SET Metadata_Plate ={column};\")\n\n if printtoscreen:\n print(f\"Indexing database {cache_backend_file}\")\n cursor.execute(\n \"CREATE INDEX IF NOT EXISTS table_image_idx ON Image(TableNumber, ImageNumber);\"\n )\n for eachcompartment in [\"Cells\", \"Cytoplasm\", \"Nuclei\"]:\n cursor.execute(\n f\"\"\"CREATE INDEX IF NOT EXISTS table_image_object_{eachcompartment.lower()}_idx \n ON {eachcompartment}(TableNumber, ImageNumber, ObjectNumber);\"\"\"\n )\n cursor.execute(\n \"CREATE INDEX IF NOT EXISTS plate_well_image_idx ON Image(Metadata_Plate, Metadata_Well);\"\n )\n cursor.close()\n connection.close()\n\n if aws_remote:\n if printtoscreen:\n print(f\"Uploading {cache_backend_file} to {remote_backend_file}\")\n cp_cmd = [\"aws\", \"s3\", \"cp\", cache_backend_file, remote_backend_file]\n run_check_errors(cp_cmd)\n\n if printtoscreen:\n print(\n f\"Removing analysis files from {input_dir} and {cache_backend_dir}\"\n )\n import shutil\n\n shutil.rmtree(input_dir)\n\n if printtoscreen:\n print(f\"Renaming {cache_backend_file} to {backend_file}\")\n os.rename(cache_backend_file, backend_file)\n\n if printtoscreen:\n print(f\"Aggregating sqlite:///{backend_file}\")\n\n if aggregate_only and aws_remote:\n remote_backend_file = f\"{aws_remote}/backend/{batch}/{plate}/{plate}.sqlite\"\n\n remote_aggregated_file = f\"{aws_remote}/backend/{batch}/{plate}/{plate}.csv\"\n\n cp_cmd = [\"aws\", \"s3\", \"cp\", remote_backend_file, backend_file]\n if printtoscreen:\n print(\n f\"Downloading SQLite files from {remote_backend_file} to {backend_file}\"\n )\n run_check_errors(cp_cmd)\n\n if not os.path.exists(backend_file):\n sys.exit(f\"{backend_file} does not exist. Exiting.\")\n\n if add_image_features:\n pass\n else:\n image_feature_categories = None # defensive but not sure what will happen if we give a list but set to False\n\n database = SingleCells(\n f\"sqlite:///{backend_file}\",\n aggregation_operation=\"mean\",\n add_image_features=add_image_features,\n image_feature_categories=image_feature_categories,\n )\n database.aggregate_profiles(output_file=aggregated_file)\n\n if aws_remote:\n if printtoscreen:\n print(f\"Uploading {aggregated_file} to {remote_aggregated_file}\")\n csv_cp_cmd = [\"aws\", \"s3\", \"cp\", aggregated_file, remote_aggregated_file]\n run_check_errors(csv_cp_cmd)\n\n if printtoscreen:\n print(f\"Removing backend files from {backend_dir}\")\n import shutil\n\n shutil.rmtree(backend_dir)",
"def finalize_tables(self):\n self.attrbuilder.finalize(self.ext_type)\n self.vtabbuilder.finalize(self.ext_type)",
"def cleanup():\n cat = CSVCatalog.CSVCatalog()\n cat.drop_table(\"people\")\n cat.drop_table(\"batting\")\n cat.drop_table(\"teams\")",
"def flush(self):\n for db in self.values():\n db.flush()",
"def cleanup():\n cat = CSVCatalog.CSVCatalog()\n cat.drop_table(\"people\", force_drop=True)\n cat.drop_table(\"batting\", force_drop=True)\n cat.drop_table(\"teams\", force_drop=True)",
"def compress_all_db():\r\n #взять все файлы, которые есть в папке с дб,\r\n #и всем сделать compress_data, по их порядку создания\r\n _db_files = sorted(glob.iglob('{}\\\\jsons\\\\DB_*.json'.format(db_folder_path)), key=os.path.getctime)\r\n #iterate through all dbs\r\n for _n in range(len(_db_files)):\r\n print('compressing {} db out of {}'.format(_n+1, len(_db_files)))\r\n #берем файл, достаем из него инфу и компрессим ее в новый файл\r\n with open(_db_files[_n], encoding=\"utf8\") as json_file:\r\n compress_json.dump(json.load(json_file), \"{}.gz\".format(_db_files[_n]))",
"def create_all_tables(self):\n pass",
"def create_all_schemas_and_tables(self):\n for schema, tables in self.schemas.items():\n self.create_schema(schema)\n for table in tables.keys():\n self.create_table(schema, table)",
"def full_initialization_process():\n\n db1 = Database('TOBACCO_RAW;')\n con1, cur1 = db1.connect()\n cur1.execute('create index idl_doc_field_id_idx on idl_doc_field(id);')\n cur1.execute('create index idl_doc_id_idx on idl_doc(id);')\n add_timestamp_to_idl_doc()\n\n create_utf_text_files()\n\n initialize_tables()\n fill_tables()",
"def clean_tables():\n io_raw = dbReadWriteRaw()\n io_clean = dbReadWriteClean()\n\n tables_to_clean = {\n \"measurement_abstract_rpt\": \"_clean_measurement_abstract_rpt(tbl)\",\n \"a_measgraphref\": \"_clean_measgraphref(tbl)\",\n \"a_measgraphic\": \"_clean_measgraphic(tbl)\",\n \"dm_spain_view_study_summary\": \"_clean_study_summary(tbl)\",\n \"a_modvolume\": \"_clean_modvolume(tbl)\",\n \"instance_filename\": \"_clean_instance_filename(tbl)\",\n }\n\n for key, val in tables_to_clean.items():\n tbl = io_raw.get_table(key)\n clean_tbl = eval(val)\n\n io_clean.save_to_db(clean_tbl, key)\n logger.info(\"Created table `{}` in schema {}\".format(key, io_clean.schema))",
"def run(self):\n data = query_orm(self.orm)\n # To avoid confusion downstream: don't write out data if there isn't any\n if not data:\n return\n for table_key, version_key in self.make_s3_keys():\n save_to_s3(table_key, version_key, data)",
"def save_corpora(self):\n\n if self.filename_for_save is not None:\n with open(self.filename_for_save, 'w', newline='', encoding=constants.load_encoding) as file:\n writer = csv.writer(file)\n\n for string in self.__corpora:\n writer.writerow([string])",
"def commit(self):\r\n if self._closed:\r\n raise Error('The connection to the database has been closed.')\r\n for table in self.tables.keys():\r\n if self.tables[table].open:\r\n self.tables[table].commit()\r\n self.createdTables = []",
"def structure_and_repopulate_db() -> None:\n with open('db.sql', encoding=\"utf-8\") as f:\n commands = f.read().strip().split(';')\n commands = [command.strip() for command in commands]\n for command in commands:\n my_cursor.execute(command)\n my_db.commit()\n print('Source structure created, data repopulated')",
"def save_catalog(self):\n self.catalog.to_csv(self.catalog_path, index_label='dateTime')",
"def stage_and_commit(self):\n self.stage_all()\n self.commit()",
"def syncdb(self, dropfirst=False):\n for db in self._db_tree:\n for collection in db['collections']:\n if dropfirst:\n self._datastore[db['database']][collection['name']].drop()\n\n with io.open(os.path.join(self._db_path, db['database'],\n collection['file']), encoding='utf-8') as coll_json:\n for record in coll_json.readlines():\n record = json_util.loads(record)\n self._datastore[db['database']][collection['name']].insert(record, check_keys=False)",
"def merge_db(self):\n path_1 = _path_finder('keydata','{0}_rest.db'.format(self.keyword))\n path_2 = _path_finder('keydata','{0}_stream.db'.format(self.keyword))\n if os.path.isfile(path_1) & os.path.isfile(path_2):\n self.__db_init('joined')\n self.c.execute(\"ATTACH '{0}' as restdb\".format(path_1))\n self.c.execute(\"ATTACH '{0}' as streamdb\".format(path_2))\n self.c.execute('''INSERT OR IGNORE INTO main.tweets(id,date) \n SELECT * FROM restdb.tweets \n UNION \n SELECT * FROM streamdb.tweets''')\n self.c.execute(\"DETACH DATABASE 'restdb'\")\n self.c.execute(\"DETACH DATABASE 'streamdb'\")\n self.conn.commit()\n self.conn.close()\n print('Databases have been merged:')\n print(os.path.abspath(_path_finder(\n 'keydata','{0}_joined.db'.format(self.keyword))))",
"def merge_cat(UT):\n csv_path = Path(\"./catalog\"+UT+\".csv\")\n if csv_path.exists() != 1:\n Popen('rm -rf merged'+UT+'.log', shell=True)\n Popen('touch merged'+UT+'.log', shell=True)\n all_files = glob.glob(\"./results/20*/\"+UT+\"/*\")\n print('merging table: {} (1/{})'.format(all_files[0],len(all_files)))\n tab = pd.read_csv(all_files[0])\n cat = tab.copy()\n merged = open('merged'+UT+'.log','a+')\n merged.write(all_files[0]+'\\n')\n try:\n for i, file in enumerate(all_files[1:]):\n print('merging table: {} ({}/{})'.format(file,i+2,len(all_files)))\n tab = pd.read_csv(file)\n cat = pd.merge(cat, tab, how='outer')\n merged.write(file+'\\n')\n cat.to_csv('catalog'+UT+'.csv', index=False, header=True)\n merged.close()\n except:\n cat.to_csv('catalog'+UT+'.csv', index=False, header=True)\n merged.close()\n else:\n cat = pd.read_csv('catalog'+UT+'.csv')\n all_files = glob.glob(\"./results/20*/\"+UT+\"/*\")\n merged = list(pd.read_table('merged'+UT+'.log', header=None).values)\n merged = [i[0] for i in merged]\n if set(all_files) == set(merged):\n print('GOOD NEWS: No new table is needed to be merged.')\n else:\n non_processed = list(set(all_files) - set(merged))\n merged = open('merged'+UT+'.log','a+')\n try:\n for i, new_img in enumerate(non_processed):\n print('merging table: {} ({}/{})'.format(new_img,i+1,len(non_processed)))\n tab = pd.read_csv(new_img)\n cat = pd.merge(cat, tab, how='outer')\n merged.write(new_img+'\\n')\n cat.to_csv('catalog'+UT+'.csv', index=False, header=True)\n merged.close()\n except:\n cat.to_csv('catalog'+UT+'.csv', index=False, header=True)\n merged.close()\n cat = pd.read_csv('catalog'+UT+'.csv')\n m = Table(cat.values, names=cat.columns)\n hdu = fits.table_to_hdu(m)\n hdulist = fits.HDUList([fits.PrimaryHDU(), hdu])\n hdulist.writeto('catalog'+UT+'.fits', overwrite=True)",
"def create_final_table(conn, county):\r\n for county in county:\r\n query = f\"SELECT name FROM sqlite_master WHERE type ='table' AND name NOT LIKE 'sqlite_%' AND name = '{county}'\"\r\n result = execute_query(conn, query)\r\n try:\r\n if len(result) == 0:\r\n query = f\"create table {county} as select * from {county}_stg;\"\r\n execute_query(conn, query)\r\n\r\n load_final_table(conn, county)\r\n except Exception as e:\r\n print(f\"This query {query} failed with exception {e}\")",
"def create_all_tables():\n\tcommon_db.create_all_tables()",
"def migrar(self):\r\n start = time.time()\r\n if not os.path.exists('output'):\r\n os.makedirs('output')\r\n # seleciona as tabelas\r\n res = self.cur_origem.execute(\r\n \"select table_name from INFORMATION_SCHEMA.tables where TABLE_SCHEMA = '%s' %s order by table_name;\" % (self.schema_origem, self.filtro))\r\n\r\n # para cada tabela\r\n for row, in res.fetchall():\r\n row = row.strip()\r\n # conta os registros\r\n countsql = self.cur_origem.execute(\r\n \"select count(*) as total from %s.%s \" % (self.schema_origem,row))\r\n count, = countsql.fetchall()[0]\r\n start_time = time.time()\r\n start_datetime = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\r\n print(\"MIGRANDO: %s\\n NRO REGISTROS: %s registros\\n INICIO: %s\" % (\r\n row, count, start_datetime))\r\n\r\n # gera o create table e trunca a tabela ( se ja existir )\r\n create, tipos = self.ddl_table(row)\r\n self.cur_destino.execute(create)\r\n self.cur_destino.execute(\"TRUNCATE TABLE %s\" % row)\r\n\r\n # gera as colunas\r\n cols = \"\"\r\n for id, [col, tipo] in tipos.items():\r\n cols += \"%s,\" % col\r\n # print( \"select %s from %s.%s \" % (cols[:-1], self.schema_origem,row))\r\n\r\n # busca os dados\r\n \r\n\r\n # grava os dados no TXT\r\n with open(\"output/%s.txt\" % row, \"w\", newline='', encoding=\"latin-1\", errors='ignore') as f:\r\n w = csv.writer(\r\n f, delimiter='|', quotechar='\"')\r\n try:\r\n self.cur_origem.execute(\"select %s from %s.%s \" % (cols[:-1], self.schema_origem,row))\r\n w.writerows(self.cur_origem.fetchall())\r\n except Exception as e:\r\n self.erros.append([\"%s\" % row, e])\r\n end_time = time.time()\r\n end_datetime = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\r\n print(\"\\tFIM: %s\\tTEMPO: %ss\\tTABELA COM ERRO %s\" %\r\n (end_datetime, round(end_time-start_time, 0), e))\r\n\r\n # le o arquivo gravado e copia para o banco destino\r\n with open(\"output/%s.txt\" % row, \"r\", encoding=\"latin-1\", errors='ignore') as f:\r\n try:\r\n self.cur_destino.copy_expert(\r\n \"\"\"COPY %s FROM STDIN WITH QUOTE '\"' DELIMITER '|' NULL '' CSV \"\"\" % row, f)\r\n except Exception as e:\r\n self.erros.append([\"%s\" % row, e])\r\n end_time = time.time()\r\n end_datetime = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\r\n print(\" FIM: %s\\n TEMPO: %ss\\n TABELA COM ERRO %s\" %\r\n (end_datetime, round(end_time-start_time, 0), e))\r\n else:\r\n end_time = time.time()\r\n end_datetime = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\r\n print(\" FIM: %s\\n TEMPO: %ss\\n OK\" %\r\n (end_datetime, round(end_time-start_time, 0)))\r\n end = time.time()\r\n print(\"TEMPO GASTO: %s s\" % (end-start))",
"def collate_fn(batch):\n metadata = []\n for el in batch:\n metadata.append(el[\"metadata\"])\n del el[\"metadata\"]\n\n batch = default_collate(batch)\n\n batch[\"metadata\"] = metadata\n\n return batch",
"def migrar(self):\r\n start = time.time()\r\n if not os.path.exists('output'):\r\n os.makedirs('output')\r\n # seleciona as tabelas\r\n res = self.cur_origem.execute(\r\n \"select rdb$relation_name from rdb$relations where rdb$view_blr is null and (rdb$system_flag is null or rdb$system_flag = 0) %s;\" % self.filtro)\r\n\r\n # para cada tabela\r\n for row, in res.fetchall():\r\n row = row.strip()\r\n\r\n # conta os registros\r\n countsql = self.cur_origem.execute(\r\n \"select count(*) as total from %s \" % row)\r\n count, = countsql.fetchall()[0]\r\n start_time = time.time()\r\n start_datetime = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\r\n print(\"MIGRANDO: %s\\n NRO REGISTROS: %s registros\\n INICIO: %s\" % (\r\n row, count, start_datetime))\r\n\r\n # gera o create table e trunca a tabela ( se ja existir )\r\n create, tipos = self.ddl_table(row)\r\n self.cur_destino.execute(create)\r\n self.cur_destino.execute(\"TRUNCATE TABLE %s\" % row)\r\n\r\n # busca os dados\r\n self.cur_origem.execute(\"select * from %s \" % (row))\r\n\r\n # grava os dados no TXT\r\n with open(\"output/%s.txt\" % row, \"wb\") as f:\r\n writer = csv.writer(f, delimiter='|')\r\n writer.writerows(self.cur_origem.fetchall())\r\n\r\n # le o arquivo gravado e copia para o banco destino\r\n with open(\"output/%s.txt\" % row, \"r\") as f:\r\n try:\r\n self.cur_destino.copy_expert(\r\n \"\"\"COPY %s FROM STDIN WITH QUOTE '\"' DELIMITER '|' NULL '' CSV \"\"\" % row, f)\r\n except Exception as e:\r\n self.erros.append([\"%s\" % row, e])\r\n end_time = time.time()\r\n end_datetime = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\r\n print(\" FIM: %s\\n TEMPO: %ss\\n TABELA COM ERRO %s\" %\r\n (end_datetime, round(end_time-start_time, 0), e))\r\n else:\r\n end_time = time.time()\r\n end_datetime = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\r\n print(\" FIM: %s\\n TEMPO: %ss\\n OK\" %\r\n (end_datetime, round(end_time-start_time, 0)))\r\n end = time.time()\r\n print(\"TEMPO GASTO: %s s\" % (end-start))",
"def to_db(self):\n bulk = conn_db().initialize_ordered_bulk_op()\n for fiction in self.fictions:\n bulk.find({'id': fiction.id}).upsert().update({'$set': fiction.__dict__})\n bulk.execute()",
"def migrar(self):\r\n start = time.time()\r\n if not os.path.exists('output'):\r\n os.makedirs('output')\r\n # seleciona as tabelas\r\n res = self.cur_origem.execute(\r\n \"SELECT table_name FROM dba_tables WHERE owner = '%s' %s order by table_name\" % (self.schema_origem, self.filtro))\r\n\r\n # para cada tabela\r\n for row, in res.fetchall():\r\n row = row.strip()\r\n # conta os registros\r\n countsql = self.cur_origem.execute(\r\n \"select count(*) as total from %s \" % row)\r\n count, = countsql.fetchall()[0]\r\n start_time = time.time()\r\n start_datetime = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\r\n print(\"MIGRANDO: %s\\n NRO REGISTROS: %s registros\\n INICIO: %s\" % (\r\n row, count, start_datetime))\r\n\r\n # gera o create table e trunca a tabela ( se ja existir )\r\n create, tipos = self.ddl_table(row)\r\n self.cur_destino.execute(create)\r\n self.cur_destino.execute(\"TRUNCATE TABLE %s\" % row)\r\n\r\n # gera as colunas\r\n cols = \"\"\r\n # tratamento exclusivo SONNER\r\n for id, [col, tipo] in tipos.iteritems():\r\n if col == \"SENHA\" or (col == \"DADOS\" and row == \"CADARQUIVODIGITAL\"):\r\n cols += \"NULL AS %s,\" % col\r\n else:\r\n cols += \"%s,\" % col\r\n # padrao seria:\r\n # for id, [col, tipo] in tipos.iteritems():\r\n # cols += \"%s,\" % col\r\n # print \"select %s from %s \" % (cols[:-1], row)\r\n\r\n # busca os dados\r\n self.cur_origem.execute(\"select %s from %s \" % (cols[:-1], row))\r\n\r\n # grava os dados no TXT\r\n with open(\"output/%s.txt\" % row, \"wb\") as f:\r\n w = csv.writer(\r\n f, delimiter='|', quotechar='\"')\r\n try:\r\n\r\n # execao SONNER (dados TEXT tipo cblob necessita de conversao anterior por conta de encoding)\r\n if row == \"CADPESSOACONTSOC\":\r\n for cada in self.cur_origem.fetchall():\r\n cada = list(cada)\r\n if cada[5] is not None:\r\n cada[5] = cada[5].read().encode(\"latin-1\")\r\n w.writerow(cada)\r\n\r\n else:\r\n w.writerows(self.cur_origem.fetchall())\r\n except Exception as e:\r\n self.erros.append([\"%s\" % row, e])\r\n end_time = time.time()\r\n end_datetime = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\r\n print(\"\\tFIM: %s\\tTEMPO: %ss\\tTABELA COM ERRO %s\" %\r\n (end_datetime, round(end_time-start_time, 0), e))\r\n\r\n # le o arquivo gravado e copia para o banco destino\r\n with open(\"output/%s.txt\" % row, \"r\") as f:\r\n try:\r\n self.cur_destino.copy_expert(\r\n \"\"\"COPY %s FROM STDIN WITH QUOTE '\"' DELIMITER '|' NULL '' CSV \"\"\" % row, f)\r\n except Exception as e:\r\n self.erros.append([\"%s\" % row, e])\r\n end_time = time.time()\r\n end_datetime = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\r\n print(\" FIM: %s\\n TEMPO: %ss\\n TABELA COM ERRO %s\" %\r\n (end_datetime, round(end_time-start_time, 0), e))\r\n else:\r\n end_time = time.time()\r\n end_datetime = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\r\n print(\" FIM: %s\\n TEMPO: %ss\\n OK\" %\r\n (end_datetime, round(end_time-start_time, 0)))\r\n end = time.time()\r\n print(\"TEMPO GASTO: %s s\" % (end-start))"
] | [
"0.6468309",
"0.5989688",
"0.56459284",
"0.5489879",
"0.5469518",
"0.5309471",
"0.5253361",
"0.52173823",
"0.5171973",
"0.515313",
"0.5135029",
"0.5130573",
"0.5085238",
"0.5084769",
"0.50752443",
"0.5074923",
"0.5034462",
"0.50288045",
"0.502765",
"0.5023414",
"0.50230813",
"0.50215834",
"0.50207186",
"0.501546",
"0.500791",
"0.4979279",
"0.49764493",
"0.49619243",
"0.49243313",
"0.49147755"
] | 0.608957 | 1 |
Calculates the angular separations of each object relative to the image center. | def object_separations(self):
for cutout_info in self._catalog_dictionary.values():
catalog = cutout_info['catalog']
# Create SkyCoord objects for all objects in the catalog as well as the image center
object_coords = SkyCoord(catalog['ALPHA_J2000'], catalog['DELTA_J2000'], unit=u.deg)
center_coord = SkyCoord(catalog['SZ_RA'][0], catalog['SZ_DEC'][0], unit=u.deg)
# Calculate the angular separations between the objects and the image center in arcminutes
separations_arcmin = object_coords.separation(center_coord).to(u.arcmin)
# Add our new column to the catalog
catalog['RADIAL_SEP_ARCMIN'] = separations_arcmin
# Update the catalog in the data structure
cutout_info['catalog'] = catalog | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def naturalAspectRatio(self):\n return math.sin(self.view_angle_h) / math.sin(self.view_angle_v)",
"def barycentre (liste_objets):\r\n x = 0\r\n y = 0\r\n summass = 0\r\n for i in liste_objets:\r\n x += i.mass * i.posx\r\n y += i.mass * i.posy\r\n summass += i.mass\r\n x /= summass\r\n y /= summass\r\n return x,y,summass",
"def angular_separation(r1: np.ndarray, r2: np.ndarray) -> float:\n # First compute the rotation that maps r1 to r2.\n dr = r2 @ r1.transpose()\n # Then extract the angle.\n _, angle = transforms3d.axangles.mat2axangle(dr)\n # Normalise the angle.\n if angle > np.pi:\n angle = 2 * np.pi - angle\n\n # Return the angle in degrees.\n return angle * 180 / np.pi",
"def calc_o_space(persons):\n c_x = 0\n c_y = 0\n \n# Group size\n g_size = len(persons)\n \n for person in persons:\n c_x += person[0] + math.cos(person[2]) * STRIDE\n c_y += person[1] + math.sin(person[2]) * STRIDE\n\n center = [c_x / g_size, c_y / g_size]\n\n return center",
"def azizen(self):\n # x0,y0 array pixel coordinates relative to cx,cy\n# ndy0,ndx0=img.shape\n ndy0=self.ndy0\n ndx0=self.ndx0\n x0,y0=np.meshgrid(np.linspace(0,ndx0-1,ndx0)-self.cx,np.linspace(0,ndy0-1,ndy0)-self.cy)\n r0=np.sqrt(x0**2+y0**2)/self.pr0 # fractional radial distance from 0,0\n# self.roi=np.s_[ystart:ystart+self.ny0,xstart:xstart+self.nx0]\n # why not model the zenith angle dependence with polynomial directly\n # rather than linear interpolation between roots.\n roots=np.zeros(51)\n rr=np.arange(51)/100.0\n for i,ref in enumerate(rr):\n roots[i]=np.real(np.roots([self.c3,0,self.c2,0,self.c1,-ref])[-1])\n theta0 = np.interp(r0/2,rr,roots)\n \n phi0 = np.arctan2(x0,y0) - self.rot ####phi (i.e., azimuth) is reckoned with -pi corresponding to north, increasing clockwise, NOTE: pysolar use sub-standard definition\n phi0 = phi0%(2*np.pi)\n\n #####correction for the tilt of the camera\n k=np.array((np.sin(self.azm),np.cos(self.azm),0))\n a=np.array([np.sin(theta0)*np.cos(phi0),np.sin(theta0)*np.sin(phi0),np.cos(theta0)]); \n a = np.transpose(a,[1,2,0])\n b=np.cos(self.beta)*a + np.sin(self.beta)*np.cross(k,a,axisb=2) \\\n + np.reshape(np.outer(np.dot(a,k),k),(self.ndy0,self.ndx0,3))*(1-np.cos(self.beta))\n theta0=np.arctan(np.sqrt(b[:,:,0]**2+b[:,:,1]**2)/b[:,:,2])\n phi0=np.arctan2(b[:,:,1],b[:,:,0])%(2*np.pi)\n# max_theta *= deg2rad \n# valid0 = (theta0<max_theta) & (theta0>0); \n# theta0[valid0]=np.nan;\n self.theta0,self.phi0=theta0,phi0",
"def compute_positions(self):\n return (self.x + DIRECTIONS[self.facing_direction][0]) % (self.image.shape[0] - 1), \\\n (self.y + DIRECTIONS[self.facing_direction][1]) % (self.image.shape[1] - 1)",
"def get_center_of_mass_allies(self,obs):",
"def object_separations(self):\n\n for cluster_info in self._catalog_dictionary.values():\n catalog = cluster_info['catalog']\n\n # Create SkyCoord objects for all objects in the catalog as well as the SZ center\n object_coords = SkyCoord(catalog['ALPHA_J2000'], catalog['DELTA_J2000'], unit=u.degree)\n sz_center = SkyCoord(catalog['SZ_RA'][0], catalog['SZ_DEC'][0], unit=u.degree)\n\n # Calculate the angular separations between the objects and the SZ center in arcminutes\n separations_arcmin = object_coords.separation(sz_center).to(u.arcmin)\n\n # Compute the r500 radius for the cluster\n r500 = (3 * catalog['M500'][0] * u.Msun /\n (4 * np.pi * 500 * self._cosmo.critical_density(catalog['REDSHIFT'][0]).to(\n u.Msun / u.Mpc ** 3))) ** (1 / 3)\n\n # Convert the angular separations into physical separations relative to the cluster's r500 radius\n separations_r500 = (separations_arcmin / r500\n * self._cosmo.kpc_proper_per_arcmin(catalog['REDSHIFT'][0]).to(u.Mpc / u.arcmin))\n\n # Add our new columns to the catalog\n catalog['R500'] = r500\n catalog['RADIAL_SEP_R500'] = separations_r500\n catalog['RADIAL_SEP_ARCMIN'] = separations_arcmin\n\n # Update the catalog in the data structure\n cluster_info['catalog'] = catalog",
"def barycenter(self):\n _value = (sum((v[0] for v in self.objects.values())),sum((v[1] for v in self.objects.values())))\n if self.objects:\n _value = (_value[0]/len(self.objects), _value[1]/len(self.objects))\n self.bc=_value\n return _value",
"def areas(self):\n\n height_delta = (np.cos(self.polar_corners[:-1, :-1]) - np.cos(self.polar_corners[:-1, 1:]))\n azimuth_delta = (self.azimuthal_corners[1:, 1:] - self.azimuthal_corners[:-1, 1:])\n\n return height_delta * azimuth_delta",
"def sivina(self):\n return (self.r + self.g + self.b) / 3",
"def compute_thickness(self):\n com = vtk.vtkCenterOfMass()\n com.SetInputData(self.inner_rim_poly)\n center = np.asarray(com.GetCenter()) # take center from inner points (not outer)\n\n irp_numpy = numpy_support.vtk_to_numpy(self.inner_rim_poly.GetPoints().GetData())\n orp_numpy = numpy_support.vtk_to_numpy(self.outer_rim_poly.GetPoints().GetData())\n\n # compute average radius ..\n rs_inner = np.linalg.norm(irp_numpy - np.tile(center, (irp_numpy.shape[0], 1)), axis = 1)\n rs_outer = np.linalg.norm(orp_numpy - np.tile(center, (orp_numpy.shape[0], 1)), axis = 1)\n\n # average out\n r_inner = np.mean(rs_inner)\n r_outer = np.mean(rs_outer)\n\n # compute distance\n d = r_outer - r_inner\n self.thickness = d\n\n return d",
"def _rad_center(self):\n return ((self.rad_hi + self.rad_lo) / 2).to(\"deg\")",
"def photons(self, depth=1):\n self.dx[:,:self.W-1] = self.z[:,1:] - self.z[:,:self.W-1]\n self.dy[:self.H-1,:] = self.z[1:,:] - self.z[:self.H-1,:]\n px = self.xv - self.dx*depth\n py = self.yv - self.dy*depth\n return px,py",
"def get_diameter(self) -> float:\r\n \r\n return (self.box[3] - self.box[1] + self.box[2] - self.box[0]) / 2",
"def compute_in_radius(self, boids_in_radius):\r\n \r\n avg_velocity = Vector(*np.zeros(2))\r\n center_of_mass = Vector(*np.zeros(2))\r\n avg_vector = Vector(*np.zeros(2))\r\n total = 0\r\n for boid in boids_in_radius:\r\n avg_velocity += boid.velocity # calculating average direction \r\n center_of_mass += boid.position # calculating center of mass\r\n total += 1\r\n distance = np.linalg.norm(boid.position - self.position)\r\n \r\n if self.position != boid.position:\r\n diff = self.position - boid.position\r\n diff /= distance # scaling with the distance in order to avoid closer boids with greater force \r\n avg_vector += diff # calculating repulsive force vector\r\n \r\n return avg_velocity, center_of_mass, avg_vector, total",
"def center(self):\n points = set()\n for face in self._points:\n points.update(face)\n x_points = [point[0] for point in points]\n y_points = [point[1] for point in points]\n z_points = [point[2] for point in points]\n return \\\n (np.average(x_points), np.average(y_points), np.average(z_points))",
"def _distance_calculation(self,\n object_coordinates: list = [],\n avg_width: int = 53,\n debug: bool = False\n ):\n\n # Initialize lists for various measurements\n proportion_x = []\n proportion_y = []\n camera_distance = []\n\n # Initialise proportion lists for later easy acces\n for i in range(len(object_coordinates)):\n proportion_x.append(0)\n proportion_y.append(0)\n\n for j in range(len(object_coordinates)):\n # Measure height and width of detected person (in pixel)\n proportion_x[j] = object_coordinates[j][1][0] - object_coordinates[j][0][0]\n camera_distance.append((self.focal_value * avg_width) / proportion_x[j])\n\n one_pixel = proportion_x[j] / avg_width\n if (debug):\n print(\"Length of one pixel in cm:\" + str(one_pixel))\n print(\"Object \" + str(j) + \" - Distance to camera:\", camera_distance[-1])\n\n if (j > 0):\n min_dist_pixels = one_pixel * self.distance_threshold\n\n for k in range(j):\n # Horizontal distance of the detected objects\n min_dist_obj_x_1 = abs(object_coordinates[j][1][0] - object_coordinates[k][0][0])\n min_dist_obj_x_2 = abs(object_coordinates[k][1][0] - object_coordinates[j][0][0])\n\n # Distance objects on z axis\n dist_obj_z = abs(camera_distance[j] - camera_distance[k])\n\n if (debug):\n print(\"Object \" + str(j) + \", \" + str(k) + \" - Distance to camera:\", camera_distance[-1])\n\n # Check for shortest distance between the objects\n if (min_dist_obj_x_1 < min_dist_obj_x_2):\n objects_distance = math.sqrt(min_dist_obj_x_1 ** 2 + dist_obj_z ** 2)\n case = 0\n elif (min_dist_obj_x_2 < min_dist_obj_x_1):\n objects_distance = math.sqrt(min_dist_obj_x_2 ** 2 + dist_obj_z ** 2)\n case = 1\n else:\n objects_distance = 0\n case = 2\n\n # Check if the shortest distance between the objects is smaller than the threshold\n if (objects_distance < min_dist_pixels):\n return [case, j, k, objects_distance, min_dist_pixels]\n else:\n return [3, j, k, objects_distance, min_dist_pixels]\n\n return [3]",
"def calc_separation_alt_az(self, body):\n self.body.compute(self.site)\n body.body.compute(self.site)\n\n delta_az = float(self.body.az) - float(target.az)\n delta_alt = float(self.body.alt) - float(target.alt)\n return (delta_alt, delta_az)",
"def ionic_fractions(self) -> np.ndarray:\n return self._ionic_fractions",
"def fieldCenter(self):\n if self.ra0 is None:\n self.ra0 = reduce(lambda x, y: x + y, [src.pos.ra for src in self.sources]) / len(\n self.sources) if self.sources else 0\n if self.dec0 is None:\n self.dec0 = reduce(lambda x, y: x + y, [src.pos.dec for src in self.sources]) / len(\n self.sources) if self.sources else 0\n return self.ra0, self.dec0",
"def normalize(self):\n total = 0.0\n for i in range(0,self.npoints):\n total+=self.y[i]*self._dx\n for i in range(0,self.npoints):\n self.y[i]/=total\n return",
"def _normalize_image(self, img: np.ndarray) -> np.ndarray:\n i2 = img.astype(float) - self.bg\n i2 /= i2.max()\n return i2",
"def normalize(self) -> NoReturn:\n self._ionic_fractions = self._ionic_fractions / np.sum(self._ionic_fractions)",
"def nine_regions(self):\n\n coordinateList = []\n\n # Top left.\n x = (int)( self.oriImgSize[IDX_WIDTH] * self.ratioTopLeft[IDX_X] )\n y = (int)( self.oriImgSize[IDX_HEIGHT] * self.ratioTopLeft[IDX_Y] )\n coordinateList.append( [x, y] )\n\n # Top center.\n x = (int)( self.oriImgSize[IDX_WIDTH] * 0.5 - self.regionSize[IDX_WIDTH] / 2 )\n y = (int)( self.oriImgSize[IDX_HEIGHT] * self.ratioTopLeft[IDX_Y] ) \n coordinateList.append( [x, y] )\n\n # Top right.\n x = (int)( self.oriImgSize[IDX_WIDTH] * ( 1.0 - self.ratioTopLeft[IDX_X] ) - self.regionSize[IDX_WIDTH] )\n y = (int)( self.oriImgSize[IDX_HEIGHT] * self.ratioTopLeft[IDX_Y] )\n coordinateList.append( [x, y] )\n\n # Center left.\n x = (int)( self.oriImgSize[IDX_WIDTH] * self.ratioTopLeft[IDX_X] )\n y = (int)( self.oriImgSize[IDX_HEIGHT] * 0.5 - self.regionSize[IDX_HEIGHT] / 2 )\n coordinateList.append( [x, y] )\n\n # Center.\n x = (int)( self.oriImgSize[IDX_WIDTH] * 0.5 - self.regionSize[IDX_WIDTH] / 2 )\n y = (int)( self.oriImgSize[IDX_HEIGHT] * 0.5 - self.regionSize[IDX_HEIGHT] / 2 )\n coordinateList.append( [x, y] )\n\n # Center right.\n x = (int)( self.oriImgSize[IDX_WIDTH] * (1.0 - self.ratioTopLeft[IDX_X]) - self.regionSize[IDX_WIDTH] )\n y = (int)( self.oriImgSize[IDX_HEIGHT] * 0.5 - self.regionSize[IDX_HEIGHT] / 2 )\n coordinateList.append( [x, y] )\n\n # Bottom left.\n x = (int)( self.oriImgSize[IDX_WIDTH] * self.ratioTopLeft[IDX_X] )\n y = (int)( self.oriImgSize[IDX_HEIGHT] * (1.0 - self.ratioTopLeft[IDX_Y]) - self.regionSize[IDX_HEIGHT] )\n coordinateList.append( [x, y] )\n\n # Bottom center.\n x = (int)( self.oriImgSize[IDX_WIDTH] * 0.5 - self.regionSize[IDX_WIDTH] / 2 )\n y = (int)( self.oriImgSize[IDX_HEIGHT] * (1.0 - self.ratioTopLeft[IDX_Y]) - self.regionSize[IDX_HEIGHT] )\n coordinateList.append( [x, y] )\n\n # Bottom right.\n x = (int)( self.oriImgSize[IDX_WIDTH] * (1.0 - self.ratioTopLeft[IDX_X]) - self.regionSize[IDX_WIDTH] )\n y = (int)( self.oriImgSize[IDX_HEIGHT] * (1.0 - self.ratioTopLeft[IDX_Y]) - self.regionSize[IDX_HEIGHT] )\n coordinateList.append( [x, y] )\n\n return coordinateList",
"def principal(self):\n s = [self.voigt[i] for i in range(6)]\n\n # calculate centers\n cxy = (s[0] + s[1]) / 2\n cyz = (s[1] + s[2]) / 2\n czx = (s[2] + s[0]) / 2\n\n # calculate radii\n rxy = ((s[0] - s[1])**2 + (2*s[3])**2)**(1/2) / 2\n ryz = ((s[1] - s[2])**2 + (2*s[4])**2)**(1/2) / 2\n rzx = ((s[2] - s[0])**2 + (2*s[5])**2)**(1/2) / 2\n\n return array([[cxy + rxy, cxy - rxy, rxy],\n [cyz + ryz, cyz - ryz, ryz],\n [czx + rzx, czx - rzx, rzx]])",
"def getRatio(probe_num, position_vector, shot_range, dir, day ='050119r'):\n ratio_x = 0\n ratio_y = 0\n ratio_z = 0\n # helm_B = [0,0,0]\n divideby = 0\n for shot in range(shot_range[0], shot_range[1]+1):\n print( 'On shot ', day+str(shot), ' for probe ',probe_num)\n x,y,z, currmax,helmB_new = probe_calib(day+str(shot), probe_num, position_vector,dir)\n ratio_x = ratio_x + x\n ratio_y = ratio_y + y\n ratio_z = ratio_z + z\n # helm_B = [helm_B[i] + helmB_new[i] for i in len(helmB)]\n divideby = divideby + 1 #averaging over the number of shots\n ratio_Bx = ratio_x/divideby\n ratio_By = ratio_y/divideby\n ratio_Bz = ratio_z/divideby\n # helmB = [helm_B]/divideby\n # print ratio_Bx, ratio_By, ratio_Bz, helmB\n # print(\"ratio_Bx %f, ratio_By %f, ratio_Bz %f, helmB%s\"%(ratio_Bx, ratio_By, ratio_Bz, helmB))\n Bx_sqr =ratio_x**2\n By_sqr =ratio_y**2\n Bz_sqr =ratio_z**2\n B = Bx_sqr + By_sqr+ Bz_sqr\n norm_factor = np.sqrt(B)\n ratio_Bx, ratio_By, ratio_Bz = [ratio_Bx, ratio_By, ratio_Bz]/norm_factor\n\n return (ratio_Bx, ratio_By, ratio_Bz, norm_factor)",
"def _normalize_images(self, images: th.Tensor) -> th.Tensor:\n output = ((images+2)/4 - self._norm_mean)/self._norm_std\n return output",
"def direction(self):\n import pylab\n i = 0\n j = 0\n vals = []\n vects = []\n kpx = self.keypoints.x\n kpy = self.keypoints.y\n sigma = self.keypoints.sigma\n img = self.raw\n pylab.figure()\n pylab.imshow(img, interpolation='nearest')\n\n for y, x, s in zip(kpy, kpx, sigma):\n s_patch = numpy.trunc(s * 2)\n\n if s_patch % 2 == 0 :\n s_patch += 1\n\n if s_patch < 3 : s_patch = 3\n\n if (x > s_patch / 2 and x < img.shape[1] - s_patch / 2 - 1 and y > s_patch / 2 and y < img.shape[0] - s_patch / 2):\n\n patch = img[y - (s_patch - 1) / 2:y + (s_patch - 1) / 2 + 1, x - (s_patch - 1) / 2:x + (s_patch - 1) / 2 + 1]\n x_patch = numpy.arange(s_patch)\n Gx = numpy.exp(-4 * numpy.log(2) * (x_patch - numpy.median(x_patch)) ** 2 / s)\n Gy = Gx[:, numpy.newaxis]\n dGx = -Gx * 4 * numpy.log(2) / s * 2 * (x_patch - numpy.median(x_patch))\n dGy = dGx[:, numpy.newaxis]\n d2Gx = -8 * numpy.log(2) / s * ((x_patch - numpy.median(x_patch)) * dGx + Gx)\n d2Gy = d2Gx[:, numpy.newaxis]\n\n Hxx = d2Gx * Gy\n Hyy = d2Gy * Gx\n Hxy = dGx * dGy\n\n d2x = (Hxx.ravel() * patch.ravel()).sum()\n d2y = (Hyy.ravel() * patch.ravel()).sum()\n dxy = (Hxy.ravel() * patch.ravel()).sum()\n H = numpy.array([[d2y, dxy], [dxy, d2x]])\n val, vect = numpy.linalg.eig(H)\n\n# print 'new point'\n# print x, y\n# print val\n# print vect\n# print numpy.dot(vect[0],vect[1])\n e = numpy.abs(val[0] - val[1]) / numpy.abs(val[0] + val[1])\n j += 1\n# print j\n# print e\n if numpy.abs(val[1]) < numpy.abs(val[0]): # reorganisation des valeurs propres et vecteurs propres\n val[0],val[1] = val[1],val[0]\n vect = vect[-1::-1,:]\n\n\n pylab.annotate(\"\", xy=(x + vect[0][0] * val[0], y + vect[0][1] * val[0]), xytext=(x, y),\n arrowprops=dict(facecolor='red', shrink=0.05),)\n\n pylab.annotate(\"\", xy=(x + vect[1][0] * val[1], y + vect[1][1] * val[1]), xytext=(x, y),\n arrowprops=dict(facecolor='red', shrink=0.05),)\n pylab.plot(x, y, 'og')\n vals.append(val)\n vects.append(vect)\n return vals, vects",
"def get_center_of_masses(self) -> np.array:\n com = np.average(self.obj[:, :2], weights=self.obj[:, 2], axis=0)\n return com"
] | [
"0.5753707",
"0.5689572",
"0.56501245",
"0.5601185",
"0.55973375",
"0.5538094",
"0.55155957",
"0.5512323",
"0.55011624",
"0.54460406",
"0.54402614",
"0.5406486",
"0.53141814",
"0.5306149",
"0.5304944",
"0.5300432",
"0.5297689",
"0.52513605",
"0.52495813",
"0.524905",
"0.52183276",
"0.52172506",
"0.52083564",
"0.52054435",
"0.52036667",
"0.51953834",
"0.51912653",
"0.5191088",
"0.5189153",
"0.518661"
] | 0.6110831 | 0 |
Computes the Jband absolute magnitudes for use in the Assef et al. (2011) luminosity function. We will use the observed apparent 3.6 um magnitude and assume a Polleta QSO2 SED for all objects to Kcorrect to the absolute FLAMINGOS Jband magnitude. Returns | def j_band_abs_mag(self):
# Load in the IRAC 3.6 um filter as the observed filter
irac_36 = SpectralElement.from_file(self._irac_filter, wave_unit=u.um)
flamingos_j = SpectralElement.from_file(self._j_band_filter, wave_unit=u.nm)
# We will use the official IRAC 3.6 um zero-point flux
irac_36_zp = 280.9 * u.Jy
for cluster_id, cluster_info in self._catalog_dictionary.items():
# Get the 3.6 um apparent magnitudes and photometric redshifts from the catalog
se_catalog = cluster_info['catalog']
irac_36_mag = se_catalog['I1_MAG_APER4']
galaxy_z = se_catalog['REDSHIFT']
# Given the observed IRAC 3.6 um photometry, compute the rest-frame J-band absolute (Vega) magnitude.
j_abs_mag = k_corr_abs_mag(apparent_mag=irac_36_mag, z=galaxy_z, f_lambda_sed=self._sed,
zero_pt_obs_band=irac_36_zp, zero_pt_em_band='vega', obs_filter=irac_36,
em_filter=flamingos_j, cosmo=self._cosmo)
# Store the J-band absolute magnitude in the catalog and update the data structure
se_catalog['J_ABS_MAG'] = j_abs_mag
cluster_info['catalog'] = se_catalog | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def j_band_abs_mag(self):\n\n # Load in the IRAC 3.6 um filter as the observed filter\n irac_36 = SpectralElement.from_file(self._irac_filter, wave_unit=u.um)\n flamingos_j = SpectralElement.from_file(self._j_band_filter, wave_unit=u.nm)\n\n # We will use the official IRAC 3.6 um zero-point flux\n irac_36_zp = 280.9 * u.Jy\n\n for cluster_id, cluster_info in self._catalog_dictionary.items():\n # Retrieve the cluster redshift from the SPT catalog\n catalog_idx = cluster_info['SPT_cat_idx']\n cluster_z = self._spt_catalog['REDSHIFT'][catalog_idx]\n\n # Get the 3.6 um apparent magnitudes from the catalog\n se_catalog = cluster_info['catalog']\n irac_36_mag = se_catalog['I1_MAG_APER4']\n\n # Given the observed IRAC 3.6 um photometry, compute the rest-frame J-band absolute (Vega) magnitude.\n j_abs_mag = k_corr_abs_mag(apparent_mag=irac_36_mag, z=cluster_z, f_lambda_sed=self._sed,\n zero_pt_obs_band=irac_36_zp, zero_pt_em_band='vega', obs_filter=irac_36,\n em_filter=flamingos_j, cosmo=self._cosmo)\n\n # Store the J-band absolute magnitude in the catalog and update the data structure\n se_catalog['J_ABS_MAG'] = j_abs_mag\n cluster_info['catalog'] = se_catalog",
"def calc_jhk_mag(self, data):\n\n # Pull all the magnitudes from the series\n self._all_queried_mag_series = data.loc[GSC_BAND_NAMES]\n\n # Pull magnitude errors for each band, and replace missing errors with 2.5% of the magnitude value\n mag_err_list = [self.gsc_series[ind + 'Err'] if self.gsc_series[ind + 'Err'] != -999\n else self._all_queried_mag_series[ind] * BAND_ERR for ind in self._all_queried_mag_series.index]\n self._all_queried_mag_err_series = pd.Series(mag_err_list, index=self._all_queried_mag_series.index + 'Err')\n\n # List of the magnitude names that are not fill values in the series\n self._present_queried_mags = list(self._all_queried_mag_series[self._all_queried_mag_series != -999].index)\n\n # Dictionary of convert methods\n method_list = []\n for i in ['tmassJmag', 'tmassHmag', 'tmassKsMag']:\n switcher = OrderedDict([\n (i, 'convert_tmass_to_jhk'),\n ('SDSSgMag, SDSSzMag', 'convert_sdssgz_to_jhk'),\n ('SDSSgMag, SDSSiMag', 'convert_sdssgi_to_jhk'),\n ('SDSSiMag, SDSSzMag', 'convert_sdssiz_to_jhk'),\n ('JpgMag, NpgMag', 'convert_gsc2bjin_to_jhk'),\n ('FpgMag, NpgMag', 'convert_gsc2rfin_to_jhk'),\n ('JpgMag, FpgMag', 'convert_gsc2bjrf_to_jhk'),\n ])\n\n # Pull the first entry in the OrderedDict that matches what values are present.\n for key, value in switcher.items():\n key_list = key.split(', ')\n if set(key_list).issubset(self._present_queried_mags):\n setattr(self, '{}_convert_method'.format(i[5].lower()), value)\n break\n if getattr(self, '{}_convert_method'.format(i[5].lower())) is None:\n raise ValueError('There is not enough information on this guide star to get its {} magnitude'.format(i))\n\n # Get the method\n method = getattr(conversions, getattr(self, '{}_convert_method'.format(i[5].lower())), lambda: \"Invalid\")\n method_list.append(method)\n\n # Create a new series with the edited data (in case uncertainties were replaced)\n edited_data_series = pd.concat([self._all_queried_mag_series, self._all_queried_mag_err_series])\n\n # Run conversions\n self.j_mag, self.j_mag_err = method_list[0](data=edited_data_series, output_mag='J')\n self.h_mag, self.h_mag_err = method_list[1](data=edited_data_series, output_mag='H')\n self.k_mag, self.k_mag_err = method_list[2](data=edited_data_series, output_mag='K')\n\n # Create new attribute with updated series\n self._all_calculated_mag_series = copy.deepcopy(self._all_queried_mag_series)\n self._all_calculated_mag_series.loc[['tmassJmag', 'tmassHmag', 'tmassKsMag']] = \\\n self.j_mag, self.h_mag, self.k_mag\n\n self._all_calculated_mag_err_series = copy.deepcopy(self._all_queried_mag_err_series)\n self._all_calculated_mag_err_series.loc[['tmassJmagErr', 'tmassHmagErr', 'tmassKsMagErr']] = \\\n self.j_mag_err, self.h_mag_err, self.k_mag_err\n\n self._present_calculated_mags = self._present_queried_mags + [a for a in\n ['tmassJmag', 'tmassHmag', 'tmassKsMag']\n if a not in self._present_queried_mags]\n\n return self.j_mag, self.j_mag_err, self.h_mag, self.h_mag_err, self.k_mag, self.k_mag_err",
"def magToJy(mag,emag,wband,zpFile=None):\n if zpFile == None:\n zpFile = Path(os.environ['SED_BUILDER']) / Path('zero_points.dat')\n zpWave, zpF0 = read_zp(zpFile)\n F0 = zpF0[wband]\n jy = (10**(-float(mag)/2.5))*F0\n if emag != '--':\n ejy = (float(emag)/2.5)*jy*log(10)\n else:\n ejy = np.nan\n \n return jy, ejy",
"def calc_magnitude(box,octant):\n # Read the Mi(z=2) magnitudes for the box.\n miz2 = FH.read_file(box)['Miz2'][:]\n # Read the index for each QSO in the octant, and get the Mi(z=2).\n data = FH.read_file(octant)\n zz = data['Z']\n dmod = data['DMOD']\n miz2 = miz2[data['INDX']]\n # Now convert to apparent i-band magnitude using the k-correction.\n # If a tabulated k-correction is available, use that, otherwise\n # default to a power-law continuum approximation.\n # See discussion in Ross++13, Appendix B and Section 4.\n kfile=os.getenv('MOCKINGDESI_BASE')+\"/data/qso-iband-k-correction.txt\"\n if os.path.exists(kfile):\n print(\"Using K-correction from \"+kfile)\n kcorr = np.loadtxt(kfile)\n kcorr = np.interp(zz,kcorr[:,1],kcorr[:,2])\n else:\n print(\"Using power-law K-correction\")\n alpha = -0.5\n kcorr = -2.5*(1+alpha)*np.log10( (1+zz)/(1+2.0) )\n gmi = np.poly1d([0.1502,-0.9886,2.147,-1.758,0.6397])\t# See notes.\n rmi = np.poly1d([-0.1482,1.636,-6.716,12.55,-10.39,3.017])\n magi = miz2 + dmod + kcorr\t# e.g. Ross++13, Eq. 5\n magg = magi + gmi(zz.clip(0.5,3.5))\n magr = magi + rmi(zz.clip(0.5,3.5))\n # and write the results\n data = {}\n data['GMAG'] = magg.astype('f4')\n data['RMAG'] = magr.astype('f4')\n FH.write_file(octant,data)\n #",
"def getMagnitudes(self):\n return self._bmag, self._vmag, self._jmag, self._hmag, self._kmag",
"def convert_F_vs_mag(value, F_0=None, band='H', system='Johnson', \n conversion='to_mag'): \n \n dico_zero_pts_Jo = {'U': [0.36,1823.],\n 'B': [0.44,4130.],\n 'V': [0.55,3781.],\n 'R': [0.71,2941.],\n 'I': [0.97,2635.],\n 'J': [1.25,1603.],\n 'H': [1.60,1075.],\n 'K': [2.22,667.],\n 'L': [3.54,288.],\n 'M': [4.80,170.],\n 'N': [10.6,36.],\n 'O': [21.0,9.4]}\n dico_zero_pts_2M = {'J': [1.235,1594.],\n 'H': [1.662,1024.],\n 'K': [2.159,666.7]}\n dico_zero_pts_UK = {'V': [0.5556,3540.], # TOKUNAGA (from Cohen 1992)\n 'I': [0.9,2250.], # UKIRT webpage\n 'J': [1.215,1630.], # TOKUNAGA (from Cohen 1992)\n 'H': [1.654,1050.], # TOKUNAGA (from Cohen 1992)\n 'Ks': [2.157,667.], # TOKUNAGA (from Cohen 1992)\n 'K': [2.179,655.], # TOKUNAGA (from Cohen 1992) \n 'L': [3.547,276.], # TOKUNAGA (from Cohen 1992) \n \"L'\": [3.761,248.], # TOKUNAGA (from Cohen 1992) \n 'M': [4.769,160.], # TOKUNAGA (from Cohen 1992) \n '8.7': [8.756,50.], # TOKUNAGA (from Cohen 1992) \n 'N': [10.472,35.3], # TOKUNAGA (from Cohen 1992) \n '11.7': [11.653,28.6], # TOKUNAGA (from Cohen 1992) \n 'Q': [20.13,9.7]} # TOKUNAGA (from Cohen 1992)\n dico_zero_pts_ESO = {'J': [1.228,3.44e-9], # van der Bliek 1996\n 'H': [1.651,1.21e-9], # van der Bliek 1996\n 'K': [2.216,4.12e-10], # van der Bliek 1996\n \"L'\": [3.771,5.58e-11], # van der Bliek 1996\n \"M\": [4.772,2.21e-11]} # van der Bliek 1996 \n \n if F_0 is None:\n if system == 'Johnson' and band in dico_zero_pts_Jo:\n dico_F_0 = dico_zero_pts_Jo\n elif system == '2MASS' and band in dico_zero_pts_2M:\n dico_F_0 = dico_zero_pts_2M\n elif system == 'UKIRT' and band in dico_zero_pts_UK:\n dico_F_0 = dico_zero_pts_UK\n elif system == 'ESO' and band in dico_zero_pts_UK:\n dico_F_0 = dico_zero_pts_ESO \n else:\n msg = 'Combination of band name and band system not recognized.'\n raise TypeError(msg)\n F_0 = dico_F_0[band][1]\n if system == 'ESO':\n # convert from W m-2 mu-1 to Jy\n F_0 = convert_F_units(F_0, dico_F_0[band][0], in_unit='si', \n out_unit='jy')\n \n if conversion == 'to_mag':\n return -2.5*np.log10(value/F_0)\n elif conversion == 'to_flux':\n return F_0*np.power(10.,-value/2.5)\n else:\n msg = \"conversion not recognized, must be 'to_mag' or 'to_flux'.\"\n raise TypeError(msg)",
"def get_phi_lam_obs(z, qlf, lLfrac_lam_obs_min, lLfrac_lam_obs_max, lam_eff_filter):\n\n #Start by getting the value of Lstar in units of 10^10 Lsun, which will be useful later on.\n Lstar = 10.**(qlf.log_Lstar(z))*qlf.Lstar_units\n Lstar_10 = (Lstar/(1e10*L_sun)).to(1.).value\n\n #Set the grid in bolometric L/Lstar.\n lLfrac_min = -3.0\n lLfrac_max = 3.0 #10.0\n dlLfrac = 0.01\n lLfrac = np.arange(lLfrac_min,lLfrac_max,dlLfrac)\n Lfrac = 10.**lLfrac\n\n #Get the bolometric QLF evaluated in the grid of Lfrac.\n phi_bol = qlf.phi_bol_Lfrac(Lfrac, z)\n\n #Transform the bolometric QLF to the intrinsic luminosity QLF in the band. We assume that the bolometric correction in all bands of interest is proportional to the one in the B-band, as is done in the Hopkins07 provided code.\n phi_lam = phi_bol*jacobian(Lfrac, Lstar_10, qlf)\n Lfrac_lam = get_Lfrac_lam(Lfrac, Lstar_10, qlf)\n lLfrac_lam = np.log10(Lfrac_lam)\n #dlLfrac_lam = dlLfrac/jacobian(Lfrac, Lstar_10, qlf)\n\n #Since there is a natural dispersion to the bolometric corrections, we convolve phi_lam with the uncertainty function to take it into account.\n phi_lam_2D = np.tile(phi_lam, (len(phi_lam), 1))\n sigma = qlf.get_sigma(Lfrac, Lstar_10, lam_eff_filter/(1.+z))\n lLfrac_lam_sig = lLfrac_lam\n sigma_2D = np.tile(sigma, (len(sigma), 1))\n lLfrac_lam_2D = np.tile(lLfrac_lam, (len(lLfrac_lam), 1))\n lLfrac_lam_sig_2D = np.tile(lLfrac_lam_sig, (len(lLfrac_lam), 1)).T\n\n p = (2.*np.pi)**-0.5 * sigma_2D**-1 * np.exp( -0.5*( (lLfrac_lam_sig_2D - lLfrac_lam_2D)/sigma_2D)**2)\n\n phi_lam_sig = np.sum(phi_lam_2D*p * dlLfrac, axis=1)\n\n #The next step is to convolve with the obscuration function. The issue here is that the observed luminosity in the band is a function of the intrinsic luminosity and the obscuration.\n lNH_min = 20.\n lNH_max = 26.\n dlNH = 0.01\n lNH = np.arange(lNH_min, lNH_max, dlNH)\n\n #Following the approach of the Shen20 pubtools, we will now calculate phi_lam_obs for the same luminosity fractions for which we have phi_lam.\n lLfrac_lam_obs_grid = lLfrac_lam_sig\n\n #Determine the obscuration function in the observed band.\n ltheta_fact = 0.4*qlf.dgr(z).to(u.cm**2).value*1e22 * qlf.xi(lam_eff_filter/(1.+z))\n ltheta = 10.**(lNH-22) * ltheta_fact\n ltheta_2D = np.tile(ltheta, [len(lLfrac_lam_obs_grid), 1])\n\n #For each NH, we will need to evaluate the unreddened QLF at a luminosity of lLfrac_lam_obs_grid + ltheta. So let's build it as a 2D array in which each row has the same lLfrac_lam_obs_grid value modified by the reddening correction (i.e., unreddened assuming different levels of obscuration).\n lLfrac_lam_sig_eval_2D = np.tile(lLfrac_lam_obs_grid, [len(lNH), 1]).T + ltheta_2D\n\n #Now, evaluate the f_NH function, following the S20 pubtools. Note: I think this actually wrong. f_NH should be evaluated at the intrinsic luminosity fraction of the reddening corrected luminosity. Here, we just assume that the same intrinsic lLfrac corresponds to the observed lLfrac_lam_obs_grid value for all NHs.\n lLfrac_eval_2D = np.tile(lLfrac, [len(lNH),1]).T\n log_NH_2D = np.tile(lNH, [len(lLfrac_lam_obs_grid), 1])\n f_NH = qlf.fNH(log_NH_2D, lLfrac_eval_2D, Lstar_10, z)\n\n #Extrapolate phi_lam_sig so that we can evaluate it in the new positions.\n log_phi_lam_sig_interp = interp1d(lLfrac_lam_sig, np.log10(phi_lam_sig.value), kind='linear', fill_value = 'extrapolate')\n\n #Evaluate it an produce phi_lam_obs_grid by integrating over f_NH dlNH.\n phi_lam_sig_eval_2D = 10.**(log_phi_lam_sig_interp(lLfrac_lam_sig_eval_2D))\n phi_lam_obs_grid= np.sum(phi_lam_sig_eval_2D * f_NH * dlNH, axis=1)\n\n #Now, this is the output grid we actually want.\n nlLfrac_lam_obs = 100\n dlLfrac_lam_obs = (lLfrac_lam_obs_max-lLfrac_lam_obs_min)/nlLfrac_lam_obs\n if dlLfrac_lam_obs > 0.1:\n dlLfrac_lam_obs = 0.1\n lLfrac_lam_obs = np.arange(lLfrac_lam_obs_min, lLfrac_lam_obs_max + 0.1*dlLfrac_lam_obs, dlLfrac_lam_obs)\n\n #Interpolate/extrapolate phi_lam_obs to put it in the required output grid and return the resulting QLF.\n lphi_lam_obs_interp = interp1d(lLfrac_lam_obs_grid, np.log10(phi_lam_obs_grid), fill_value='extrapolate')\n phi_lam_obs = 10.**(lphi_lam_obs_interp(lLfrac_lam_obs))*phi_lam_sig.unit\n return phi_lam_obs, dlLfrac_lam_obs*u.dex",
"def magnitude(frame):\n sobelx = lambda im: cv2.Sobel(im, cv2.CV_64F, 1, 0, ksize=3)\n sobely = lambda im: cv2.Sobel(im, cv2.CV_64F, 0, 1, ksize=3)\n dxabs = cv2.convertScaleAbs(sobelx(frame))\n dyabs = cv2.convertScaleAbs(sobely(frame))\n\n return cv2.addWeighted(dxabs, 0.5, dyabs, 0.5, 0)",
"def app_mag(abs_mag, phase_angle, slope_g, d_ast_sun, d_ast_earth):\n\n # Compute the apparent / visual magnitude\n mag = red_mag(abs_mag, phase_angle, slope_g) \\\n + 5.0 * np.log10(d_ast_sun * d_ast_earth)\n\n # Return the apparent magnitude\n return mag",
"def AB_zero_mag(self):\n if self.wavelength_unit is None:\n raise AttributeError('Needs wavelength units')\n\n C1 = (Unit(self.wavelength_unit).to('AA') ** 2 /\n Constants.c.to('AA/s').value)\n c1 = self._lpivot ** 2 * C1\n\n m = 2.5 * np.log10(_drop_units(c1)) + 48.6\n return m",
"def make_sq(mlat, dAB, *J):\n if (len(J)!=4):\n print(\"Number of paramaters are exceeded 5!\")\n NN = 2*mlat\n \n tau = np.zeros((NN,NN), dtype=complex)\n h = np.zeros((NN,NN), dtype=complex)\n \n for i in range(mlat-1):\n if (i%2==0):\n h[i,i] = dAB/2. # on-site energy\n h[mlat+i,mlat+i] = -dAB/2. # on-site energy \n h[i, mlat+i] = J[0]\n h[i, i+1] = J[1]\n h[mlat+i, mlat+i+1] = J[3]\n #\n tau[mlat+i, i] = J[2]\n elif (i%2==1):\n h[i,i] = -dAB/2. # on-site energy\n h[mlat+i,mlat+i] = dAB/2. # on-site energy \n h[i, mlat+i] = J[2]\n h[i, i+1] = J[3]\n h[mlat+i, mlat+i+1] = J[1]\n #\n tau[mlat+i, i] = J[0]\n\n # End of loop over lattice sites\n\n # The upper edge site\n if (mlat-1 % 2==0):\n h[mlat-1, mlat-1] = dAB/2. # on-site energy\n h[NN-1,NN-1] = -dAB/2. # on-site energy \n h[mlat-1, NN-1] = J[0]\n #\n tau[NN-1, mlat-1] = J[2]\n elif (mlat-1 % 2==1):\n h[mlat-1, mlat-1] = -dAB/2. # on-site energy\n h[NN-1,NN-1] = dAB/2. # on-site energy \n h[mlat-1, NN-1] = J[2]\n #\n tau[NN-1, mlat-1] = J[0] \n \n h = h + h.conj().T # make it hermitian\n return h, tau",
"def AB_zero_Jy(self):\n c = 1e-8 * Constants.c.to('m/s').value\n f = 1e5 / c * self.lpivot.to('AA').value ** 2 * self.AB_zero_flux.value\n return f * Unit('Jy')",
"def glueEmH( Ja, Jf, truncNum = scipy.inf ):\n w, v = truncBasisH( Ja, truncNum )\n sPlus, sMinus, sZ = sPlusAndMinusAndZ( v )\n \n H1 = scipy.zeros( ( len(w)**4, len(w)**4 ) )\n \n for n in range( len(w)**4 ):\n # Diagonal previous generation contributions\n o = oct(n)[-4:].zfill(4)\n o = [int(char) for char in o]\n o_A, o_B, o_C, o_D = o\n \n H1[n, n] += scipy.sum( [ w[ i ] for i in o ] )\n \n # Edge terms\n for np in range( n, len(w)**4 ):\n op = oct(np)[-4:].zfill(4)\n op = [int(char) for char in op]\n op_A, op_B, op_C, op_D = op\n \n x = 0.\n if ( (o_B == op_B) and (o_C == op_C) ):\n x += -Jf * ( .5 * ( sPlus[0][o_A, op_A] * sMinus[0][o_D, op_D] + sMinus[0][o_A, op_A] * sPlus[0][o_D,op_D] ) + sZ[0][o_A, op_A] * sZ[0][o_D, op_D] )\n if ( (o_C == op_C) and (o_A == op_A) ):\n x += -Jf * ( .5 * ( sPlus[1][o_B, op_B] * sMinus[1][o_D, op_D] + sMinus[1][o_B, op_B] * sPlus[1][o_D,op_D] ) + sZ[1][o_B, op_B] * sZ[1][o_D, op_D] )\n if ( (o_A == op_A) and (o_B == op_B) ):\n x += -Jf * ( .5 * ( sPlus[2][o_C, op_C] * sMinus[2][o_D, op_D] + sMinus[2][o_C, op_C] * sPlus[1][o_D,op_D] ) + sZ[1][o_C, op_C] * sZ[2][o_D, op_D] )\n \n H1[n, np] = x\n H1[np, n] = x\n \n return H1",
"def _flux_unc_as_mags(fluxes, uncs):\n uncs_mag = np.empty(len(fluxes))\n\n # fluxes-uncs case\n indxs, = np.where(fluxes - uncs <= 0)\n if len(indxs) > 0:\n uncs_mag[indxs] = -2.5*np.log10(fluxes[indxs]\n / (fluxes[indxs] + uncs[indxs]))\n\n # normal case\n indxs, = np.where(fluxes - uncs > 0)\n if len(indxs) > 0:\n uncs_mag[indxs] = -2.5*np.log10((fluxes[indxs] - uncs[indxs])\n / (fluxes[indxs] + uncs[indxs]))\n\n return uncs_mag",
"def amplitude(magnitudes):\n ampl = 0.5 * (np.max(magnitudes) - np.min(magnitudes))\n\n return ampl",
"def test_filt_abmag(self):\n sun = Sun.from_builtin('E490_2014')\n V = get_bandpass('johnson v')\n wave, fluxd = sun.filt(V, unit=u.ABmag)\n assert np.isclose(fluxd.value, -26.77, atol=0.007)",
"def openMANGASpectrum(self, path_to_logcube, path_to_dapall, bin_number, plate_number, ifu_number, emlines,mpl='mpl-9'):\n\t\t\n\t\t# Read in MAPS file as this contains part of the information.\n\t\tmaps_header = pyfits.open(self.path_to_spectrum)\n\t\tbin_identification = maps_header['BINID'].data\n\t\twhere = np.where(bin_number == bin_identification[0,:,:]) #use 1st channel of bin_identification\n\t\tx_position, y_position = where[0][0], where[1][0]\n\t\t\n\t\t# Get S/N, right ascension and declination.\n\t\tsignal, ra, dec = maps_header['BIN_SNR'].data[x_position,y_position], maps_header[0].header['OBJRA'],maps_header[0].header['OBJDEC']\n\t\tvelocity_dispersion = maps_header['STELLAR_SIGMA'].data \t\t\t\t\n\t\tvelocity_dispersion_correction = maps_header['STELLAR_SIGMACORR'].data[0,:,:]\n\t\t\n\t\tif velocity_dispersion[x_position,y_position] > velocity_dispersion_correction[x_position,y_position]:\n\t\t\tcorrection = np.sqrt((velocity_dispersion[x_position,y_position])**2-(velocity_dispersion_correction[x_position,y_position])**2)\n\t\t\tvdisp = correction\n\t\telse:\n\t\t\tvdisp = 0\n\n\t\t\n\t\t# Open LOGCUBE to get the flux, wavelength, and error\n\t\theader = pyfits.open(path_to_logcube)\n\t\twavelength, flux, emline, bit_mask, inverse_variance = header['WAVE'].data, header['FLUX'].data, header['EMLINE'].data, header['MASK'].data, header['IVAR'].data\n\t\tself.wavelength = wavelength\n\t\tcorrect_flux = flux[:,x_position,y_position]\n\t\tcorrect_flux_emline = emline[:, x_position, y_position]\n\t\toutput_flux = correct_flux - correct_flux_emline\n\t\tcorrect_inverse_variance = inverse_variance[:, x_position, y_position]\n\t\t\n\t\tLSF = header['LSF'].data[:,x_position,y_position]\t\t# LSF given as sigma of Gaussian in Angstrom\n\t\tsig2fwhm = 2.0 * np.sqrt(2.0 * np.log(2.0))\n\t\tLSF_FWHM = LSF*sig2fwhm\n\t\tRES = wavelength/LSF_FWHM\n\t\t\n\t\tself.r_instrument = RES\n\t\tself.error = np.sqrt(1.0/(correct_inverse_variance))\n\t\tself.bad_flags = np.ones(len(output_flux))\n\t\tself.flux = output_flux\n\t\tself.vdisp = vdisp\n\n\t\tif (mpl=='mpl-10') or (mpl=='mpl-11'):\n\t\t\text=2\n\t\telse:\n\t\t\text=1\n\t\t\n\t\tdap_all = pyfits.open(path_to_dapall)\n\t\tget = np.where(dap_all[ext].data['PLATEIFU']==str(plate_number)+'-'+str(ifu_number))\n\t\tc = const.c.value/1000\n\t\t# Use redshift as measured from the stellar kinematics by the DAP.\n\t\tredshift = dap_all[ext].data['STELLAR_Z'][get][0]\n\t\t# If redshift measurement failed, use redshift estimate from NSA or ancillary programs.\n\t\tif redshift<0:\n\t\t\tredshift = dap_all[ext].data['Z'][get][0]\n\t\t\t\n\t\tsys_vel = maps_header[0].header['SCINPVEL']\n\t\tbin_vel = maps_header['STELLAR_VEL'].data[x_position,y_position]\t\n\t\t\t\n\t\tif redshift<0:\n\t\t\tprint('WARNING: The redshift of this object is negative.')\n\t\t\tprint('z = {}'.format(redshift))\n\t\t\n\t\tredshift_corr = (sys_vel+bin_vel)/c\n\t\tself.redshift = redshift\n\t\tself.restframe_wavelength = self.wavelength / (1.0+redshift_corr)\n\n\t\tbitmask = bit_mask[:,x_position,y_position]&2**0+2**1+2**2+2**3+2**4\n\t\tself.mask_emissionlines(emlines)\n\t\tself.final_mask = (bitmask | self.lines_mask)\n\n\t\tself.wavelength = self.wavelength[(self.final_mask==False)] \n\t\tself.restframe_wavelength = self.restframe_wavelength[(self.final_mask==False)] \n\t\tself.flux = self.flux[(self.final_mask==False)] \n\t\tself.error = self.error[(self.final_mask==False)]\n\t\tself.bad_flags = self.bad_flags[(self.final_mask==False)]\n\t\t\t\t\t\n\t\t# Get Trust flag, object_id, xpos, ypos and instrumental resolution.\n# \t\tself.trust_flag, self.objid, self.r_instrument = True, 0, np.loadtxt(os.path.join(os.environ['FF_DIR'],'data/MaNGA_spectral_resolution.txt'))\n\t\tself.trust_flag, self.objid= True, 0\n# \t\tself.r_instrument = self.r_instrument[0:self.r_instrument.shape[0]//2]\n\t\tself.r_instrument = self.r_instrument[(self.final_mask==False)]\n\t\tself.xpos, self.ypos = ra, dec\n\t\t\n\t\t# gets the amount of MW reddening on the models\n\t\tif self.milky_way_reddening :\n\t\t\tself.ebv_mw = get_dust_radec(ra, dec, 'ebv')\n\t\telse:\n\t\t\tself.ebv_mw = 0.0",
"def mab0(self):\n return WISE_INFO[self.bandname][\"ABmag0\"]",
"def update_mag(self, mags):\n self.log.mag(mags)\n q = self.quaternion()\n roll, pitch, heading = self.es\n\n mag_inertial = (q * quaternion.Quaternion.from_vec(np.array(mags)) * q.inv()).as_ndarray()[1:]\n mag_inertial[2] = 0\n mag_inertial /= sqrt(mag_inertial[0]**2 + mag_inertial[1]**2)\n mag_body = (q.inv() * quaternion.Quaternion.from_vec(mag_inertial) * q).as_ndarray()[1:]\n\n h = (q.inv() * quaternion.Quaternion.from_vec(np.array([1.0, 0, 0])) * q).as_ndarray()[1:]\n y = np.vstack(mag_body) - np.vstack(h)\n \n H = np.zeros((3, 9))\n ch2 = np.cos(heading/2)\n cr2 = np.cos(roll/2)\n sh2 = np.sin(heading/2)\n sr2 = np.sin(roll/2)\n H[0, 0] = 0\n H[0, 1] = 0\n H[0, 2] = -2.0*ch2*cr2**2*sh2/(1.0*ch2**2*cr2**2 + 1.0*ch2**2*sr2**2 + 1.0*cr2**2*sh2**2 + 1.0*sh2**2*sr2**2) - 2.0*ch2*sh2*sr2**2/(1.0*ch2**2*cr2**2 + 1.0*ch2**2*sr2**2 + 1.0*cr2**2*sh2**2 + 1.0*sh2**2*sr2**2)\n H[1, 0] = 4.0*ch2*cr2*sh2*sr2/(1.0*ch2**2*cr2**2 + 1.0*ch2**2*sr2**2 + 1.0*cr2**2*sh2**2 + 1.0*sh2**2*sr2**2)\n H[1, 1] = 2.0*ch2**2*cr2*sr2/(1.0*ch2**2*cr2**2 + 1.0*ch2**2*sr2**2 + 1.0*cr2**2*sh2**2 + 1.0*sh2**2*sr2**2) - 2.0*cr2*sh2**2*sr2/(1.0*ch2**2*cr2**2 + 1.0*ch2**2*sr2**2 + 1.0*cr2**2*sh2**2 + 1.0*sh2**2*sr2**2)\n H[1, 2] = -1.0*ch2**2*cr2**2/(1.0*ch2**2*cr2**2 + 1.0*ch2**2*sr2**2 + 1.0*cr2**2*sh2**2 + 1.0*sh2**2*sr2**2) + 1.0*ch2**2*sr2**2/(1.0*ch2**2*cr2**2 + 1.0*ch2**2*sr2**2 + 1.0*cr2**2*sh2**2 + 1.0*sh2**2*sr2**2) + 1.0*cr2**2*sh2**2/(1.0*ch2**2*cr2**2 + 1.0*ch2**2*sr2**2 + 1.0*cr2**2*sh2**2 + 1.0*sh2**2*sr2**2) - 1.0*sh2**2*sr2**2/(1.0*ch2**2*cr2**2 + 1.0*ch2**2*sr2**2 + 1.0*cr2**2*sh2**2 + 1.0*sh2**2*sr2**2)\n\n S = H.dot(self.P).dot(H.T) + self.Rmag\n K = self.P.dot(H.T).dot(np.linalg.inv(S))\n x = self.state_vec() + K.dot(y)\n\n self.P = (np.eye(9) - K.dot(H)).dot(self.P)\n self.set_state_vec(x)",
"def calculate_magnitude(self, band, system='AB'):\n\n if system not in ('AB', 'Vega'):\n raise ValueError('`system` must be one of `AB` or `Vega`')\n\n f1 = self.calculate_flux(band)\n\n if f1 > 0:\n magnitude = -2.5 * log10(f1 / band.flux[system])\n\n if system == 'Vega':\n # Add 0.026 because Vega has V = 0.026:\n magnitude += 0.026\n\n else:\n magnitude = np.inf\n\n return magnitude",
"def J_over_JUV_avg_slab(tau_SF):\n \n return 1.0/tau_SF*(1.0 - (0.5 - expn(3,tau_SF))/tau_SF)",
"def mab0(self):\n return GALEX_INFO[self.bandname][\"ABmag0\"]",
"def illuminator_of_elfes():\n\n\t# Alpha - simplified by taking out the i by multiplying the outerproduct by 2i\n\talpha1i = np.matrix([[0, 0, 0, 2], [0, 0, 2, 0], [0, -2, 0, 0], [-2, 0, 0, 0]])\n\talpha2i = np.matrix([[0, 2, 0, 0], [-2, 0, 0, 0], [0, 0, 0, 2], [0, 0, -2, 0]])\n\talpha3i = np.matrix([[0, 0, 2, 0], [0, 0, 0, -2], [-2, 0, 0, 0], [0, 2, 0, 0]])\n\n\t# Betas - simplified by taking out the i by multiplication of outerprod by 2i\n\tbeta1i = np.matrix([[0, 0, 0, 2], [0, 0, -2, 0], [0, 2, 0, 0], [-2, 0, 0, 0]])\n\tbeta2i = np.matrix([[0, 0, 2, 0], [0, 0, 0, 2], [-2, 0, 0, 0], [0, -2, 0, 0]])\n\tbeta3i = np.matrix([[0, 2, 0, 0], [-2, 0, 0, 0], [0, 0, 0, -2], [0, 0, 2, 0]])\n\n\t# print(\"alpha 1\")\n\t# print(alpha1i)\n\t# print(\"\")\n\t# print(\"alpha 2\")\n\t# print(alpha2i)\n\t# print(\"\")\n\t# print(\"alpha 3\")\n\t# print(alpha3i)\n\t# print(\"\")\n\t# print(\"beta 1\")\n\t# print(beta1i)\n\t# print(\"\")\n\t# print(\"beta 2\")\n\t# print(beta2i)\n\t# print(\"\")\n\t# print(\"beta 3\")\n\t# print(beta3i)\n\t# print(\"\")\n\n\t# abperm_comb = [ np.multiply(alpha1i,-1), np.multiply(alpha2i,-1), np.multiply(alpha3i,-1), np.multiply(beta1i,-1), np.multiply(beta2i,-1), np.multiply(beta3i,-1)]\n\n\tabperm_comb = [alpha1i, alpha2i, alpha3i, beta1i, beta2i, beta3i]\n\treturn abperm_comb",
"def compute_radiocore_luminosity(MBH, L_AGN):\n\tL_X = bolcorr_hardX(L_AGN)\n\tm = log10(MBH / u.Msun)\n\t# Merloni, Heinz & Di Matteo (2003)\n\tlogLR = 0.6 * log10(L_X/(u.erg/u.s)) + 0.78 * m + 7.33\n\treturn 10**logLR * u.erg/u.s",
"def _LSST_uncertainties(self, mag, five_sigma_mag, band):\n sigma_sys = 0.005\n if band == \"u\":\n gamma = 0.038\n else:\n gamma = 0.039\n\n x = 10**(0.4*(mag-five_sigma_mag))\n sigma = np.sqrt(sigma_sys**2 + (0.04-gamma)*x + gamma*x**2)\n return sigma",
"def mag_to_flux(mag, mag_zp):\n return 10 ** (-0.4 * (mag - mag_zp))",
"def AB2(Jab, Vab, Vcentr, normalize=True):\n # There is a disconnect between the variable names in the WINDNMR GUI and\n # the variable names in this function.\n # The following code provides a temporary interface until this is\n # refactored.\n J, dV, Vab = Jab, Vab, Vcentr\n\n # Also, inconsistencies in WINDNMR GUI, internal WINDNMR code, and Pople\n # equations require a conversion.\n dV = -dV\n va = Vab + (dV / 2)\n vb = va - dV\n\n Jmod = J * (3 / 4)\n C_plus = sqrt(dV**2 + dV * J + (9 / 4) * (J**2)) / 2\n C_minus = sqrt(dV**2 - dV * J + (9 / 4) * (J**2)) / 2\n cos2theta_plus = (dV / 2 + J / 4) / C_plus\n cos2theta_minus = (dV / 2 - J / 4) / C_minus\n sintheta_plus = sqrt((1 - cos2theta_plus) / 2)\n sintheta_minus = sqrt((1 - cos2theta_minus) / 2)\n costheta_plus = sqrt((1 + cos2theta_plus) / 2)\n costheta_minus = sqrt((1 + cos2theta_minus) / 2)\n sin_dtheta = sintheta_plus * costheta_minus - costheta_plus * sintheta_minus\n cos_dtheta = costheta_plus * costheta_minus + sintheta_plus * sintheta_minus\n\n # In Pople, Schneider and Bernstein, Table 6-8:\n # V1-V4 are \"Origin: A\";\n # V5-V8 are \"Origin: B\";\n # V9 is \"Origin: Comb.\"\n V1 = Vab + Jmod + C_plus\n V2 = vb + C_plus + C_minus\n V3 = va\n V4 = Vab - Jmod + C_minus\n V5 = vb + C_plus - C_minus\n V6 = Vab + Jmod - C_plus\n V7 = vb - C_plus + C_minus\n V8 = Vab - Jmod - C_minus\n V9 = vb - C_plus - C_minus\n\n I1 = (sqrt(2) * sintheta_plus - costheta_plus) ** 2\n I2 = (sqrt(2) * sin_dtheta + costheta_plus * costheta_minus) ** 2\n I3 = 1\n I4 = (sqrt(2) * sintheta_minus + costheta_minus) ** 2\n I5 = (sqrt(2) * cos_dtheta + costheta_plus * sintheta_minus) ** 2\n I6 = (sqrt(2) * costheta_plus + sintheta_plus) ** 2\n I7 = (sqrt(2) * cos_dtheta - sintheta_plus * costheta_minus) ** 2\n I8 = (sqrt(2) * costheta_minus - sintheta_minus) ** 2\n I9 = (sqrt(2) * sin_dtheta + sintheta_plus * sintheta_minus) ** 2\n vList = [V1, V2, V3, V4, V5, V6, V7, V8, V9]\n IList = [I1, I2, I3, I4, I5, I6, I7, I8, I9]\n\n if normalize:\n _normalize(IList, 3)\n return list(zip(vList, IList))",
"def V_magJupiter_2(alpha,a_p,d):\n V = 5.*np.log10(a_p*d) - 9.428 - 2.5*np.log10(1.0 - 1.507*(alpha/180.) - 0.363*(alpha/180.)**2. - 0.062*(alpha/180.)**3.+ 2.809*(alpha/180.)**4. - 1.876*(alpha/180.)**5.)\n return V",
"def msqi_ama(x, fs):\n \n # test ecg shape\n try:\n x.shape[1]\n except IndexError:\n x = x[:, np.newaxis]\n \n # Empirical values for the STFFT transformation\n win_size_sec = 0.125 #seconds\n win_over_sec = 0.09375 #seconds\n nfft_factor_1 = 16\n nfft_factor_2 = 4\n\n win_size_smp = int(win_size_sec * fs) #samples\n win_over_smp = int(win_over_sec * fs) #samples\n win_shft_smp = win_size_smp - win_over_smp\n\n # Computes Modulation Spectrogram\n modulation_spectrogram = ama.strfft_modulation_spectrogram(x, fs, win_size_smp, \n win_shft_smp, nfft_factor_1, 'cosine', nfft_factor_2, 'cosine' )\n \n # Find fundamental frequency (HR)\n # f = (0, 40)Hz\n ix_f_00 = (np.abs(modulation_spectrogram['freq_axis'] - 0)).argmin(0) \n ix_f_40 = (np.abs(modulation_spectrogram['freq_axis'] - 40)).argmin(0) + 1\n \n # Look for the maximum only from 0.6 to 3 Hz (36 to 180 bpm)\n valid_f_ix = np.logical_or(modulation_spectrogram['freq_mod_axis'] < 0.66 , modulation_spectrogram['freq_mod_axis'] > 3)\n \n # number of epochs\n n_epochs = modulation_spectrogram['power_modulation_spectrogram'].shape[2]\n \n msqi_vals = np.zeros(n_epochs)\n hr_vals = np.zeros(n_epochs)\n \n for ix_epoch in range(n_epochs):\n B = np.sqrt(modulation_spectrogram['power_modulation_spectrogram'][:, :, ix_epoch])\n \n # Scale to maximun of B\n B = B / np.max(B)\n \n # Add B in the conventional frequency axis from 0 to 40 Hz\n tmp = np.sum(B[ix_f_00:ix_f_40, :], axis=0)\n \n # Look for the maximum only from 0.6 to 3 Hz (36 to 180 bpm)\n tmp[valid_f_ix] = 0\n ix_max = np.argmax(tmp) \n freq_funda = modulation_spectrogram['freq_mod_axis'][ix_max] \n \n # TME\n tme = np.sum(B)\n \n eme = 0\n for ix_harm in range(1, 5):\n ix_fm = (np.abs(modulation_spectrogram['freq_mod_axis'] - (ix_harm * freq_funda) )).argmin(0) \n ix_b = int(round(.3125 / modulation_spectrogram['freq_mod_delta'] )) # 0.3125Hz, half lobe\n # EME\n eme = eme + np.sum(B[ 0 : ix_f_40, ix_fm - ix_b : ix_fm + ix_b + 1 ]) \n \n # RME\n rme = tme - eme\n # MS-QI\n msqi_vals[ix_epoch] = eme / rme\n # HR\n hr_vals[ix_epoch] = freq_funda * 60\n \n return (msqi_vals, hr_vals, modulation_spectrogram)",
"def AB_zero_flux(self):\n return 10 ** (-0.4 * self.AB_zero_mag) * Unit('erg*s**-1*cm**-2*AA**-1')"
] | [
"0.7608049",
"0.59964824",
"0.58822817",
"0.57833415",
"0.56102043",
"0.55912673",
"0.5488814",
"0.5432965",
"0.54048514",
"0.53869367",
"0.53587365",
"0.5332131",
"0.5304899",
"0.528779",
"0.52466476",
"0.52075094",
"0.5202391",
"0.5187904",
"0.51815224",
"0.51740694",
"0.5161537",
"0.51554817",
"0.51383597",
"0.51382136",
"0.5124399",
"0.5118826",
"0.5099395",
"0.5082007",
"0.50409293",
"0.50392854"
] | 0.7629013 | 0 |
Generates twodimensional training data from two classes. The number of examples in the data is "example_size". Each example is randomly generated from one of two classes. Data belonging to each class are generated using two twodimensional Gaussian distribution with different means and covariance matrices. | def generate_data(example_size):
# A placeholder for data.
x = np.zeros((example_size, 2))
# Randomly selects one of two classes for each example.
class_id = np.random.randint(2, size=example_size)
# Generates data for the class 0.
class_0_idx = np.where(class_id == 0)
mean0 = [0.0, 1.0]
cov0 = [[0.4, 0.0], [0.0, 1.0]]
x[class_0_idx[0], :] = np.random.multivariate_normal(
mean0, cov0, class_0_idx[0].shape[0])
# Generates data for the class 0.
class_1_idx = np.where(class_id == 1)
mean1 = [1.0, 2.0]
cov1 = [[1.0, 0.0], [0.0, 0.4]]
x[class_1_idx[0], :] = np.random.multivariate_normal(
mean1, cov1, class_1_idx[0].shape[0])
return (x, class_id) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_data(num_sample=None):\n I = np.eye(3, dtype=np.float32)\n\n\n if (num_sample == None):\n num_sample = 100\n\n # Generate first class\n m1 = np.asarray([0.5, 0.5], dtype=np.float32)\n cov1 = np.asarray([[0.1, 0],\n [0, 0.1]], dtype=np.float32)\n data1 = rng.multivariate_normal(m1, cov1, num_sample)\n label1 = np.ones((num_sample), dtype=np.uint16) - 1\n label1 = I[label1,:]\n\n # Generate second class\n m2 = np.asarray([0.3,0.3], dtype=np.float32)\n cov2 = np.asarray([[0.5, 0], [0, 0.5]], dtype=np.float32)\n data2 = rng.multivariate_normal(m2, cov2, num_sample)\n label2 = np.ones((num_sample), dtype=np.uint16)\n label2 = I[label2, :]\n\n\n return (data1, label1, data2, label2)",
"def make_gmm_dataset(config='random', classes=10,dim=2,samples=10,spread = 1,\n shift=None, rotate=None, diagonal_cov=False, shuffle=True):\n means, covs, distribs = [], [], []\n _configd = gmm_configs[config]\n spread = spread if (config == 'random' or not 'spread' in _configd) else _configd['spread']\n shift = shift if (config == 'random' or not 'shift' in _configd) else _configd['shift']\n\n for i in range(classes):\n if config == 'random':\n mean = torch.randn(dim)\n cov = create_symm_matrix(1, dim, verbose=False).squeeze()\n elif config == 'star':\n mean = gmm_configs['star']['means'][i]\n cov = gmm_configs['star']['covs'][i]\n if rotate:\n mean = rot(mean, rotate)\n cov = rot_evecs(cov, rotate)\n\n if diagonal_cov:\n cov.masked_fill_(~torch.eye(dim, dtype=bool), 0)\n\n means.append(spread*mean)\n covs.append(cov)\n distribs.append(MultivariateNormal(means[-1],covs[-1]))\n\n X = torch.cat([P.sample(sample_shape=torch.Size([samples])) for P in distribs])\n Y = torch.LongTensor([samples*[i] for i in range(classes)]).flatten()\n\n if shift:\n print(X.shape)\n X += torch.tensor(shift)\n\n if shuffle:\n indx = torch.arange(Y.shape[0])\n print(indx)\n X = X[indx, :]\n Y = Y[indx]\n return X, Y, distribs",
"def _generate_data(self, x_data, y_data, max_seq_len, digits, seq_len,\n n_samples, use_one_hot, class_partition,\n upsample_control):\n # modify seq_len in case we do upsampling control\n if upsample_control:\n upsample_factor = seq_len\n seq_len = 1\n if not self.two_class:\n raise NotImplementedError()\n\n # construct all possible classes\n classes = [\"\".join(seq) for seq in \\\n itertools.product(\"01\", repeat=seq_len)]\n\n # get the right number of samples per class to get a balanced data set\n # with the desired n_samples.\n num = n_samples\n div = len(classes)\n n_samples_per_class = [num // div + (1 if x < num % div else 0) \\\n for x in range (div)]\n\n # find indices of samples with the wanted digit class\n y_data = [np.argmax(y) for y in y_data]\n digit_idx = []\n digit_idx.append(np.where(np.asarray(y_data) == digits[0])[0])\n digit_idx.append(np.where(np.asarray(y_data) == digits[1])[0])\n\n # generate samples for every class\n samples = []\n labels = []\n for i,c in enumerate(classes):\n this_label = i\n digits_to_sample = [int(c[i]) for i in range(len(c))]\n for s in range(n_samples_per_class[i]):\n this_sample = None\n for d in digits_to_sample:\n rand_idx = self._rstate.randint(len(digit_idx[d]))\n sample_idx = digit_idx[d][rand_idx]\n digit_sample = x_data[sample_idx]\n if this_sample is None:\n this_sample = digit_sample\n else:\n this_sample = np.vstack((this_sample,digit_sample)) \n samples.append(this_sample)\n labels.append(this_label)\n\n # if configured sort labels into 2 classes\n labels = np.asarray(labels)\n if self.two_class and not upsample_control:\n lbl_mask = np.isin(labels, class_partition)\n labels[~lbl_mask] = 0\n labels[lbl_mask] = 1\n\n if upsample_control:\n for i,s in enumerate(samples):\n # Initial timestep is absolute start position of digit. To\n # translate to a higher resolution image, we can just multiply\n # the abolute position vby the scaling factor.\n upsample = s[0,:]*upsample_factor\n for t in np.arange(1,s.shape[0]):\n # don't do upsampling at end of strokes or end of digits\n if all((s[t,2] == 0, s[t,3] == 0)):\n # Repeat original stroke \"upsample_factor\" times, such\n # that the relative stroke length is identical if\n # images are normalized to same resolution.\n for k in range(upsample_factor):\n upsample = np.vstack((upsample, s[t,:]))\n else:\n upsample = np.vstack((upsample, s[t,:]))\n samples[i] = upsample\n\n # structure output data\n out_data = labels.reshape(-1, 1)\n if use_one_hot:\n n_classes = 2**seq_len\n if self.two_class:\n n_classes = 2\n\n # FIXME We shouldn't call this method if the validation set size is\n # zero.\n if out_data.size == 0:\n out_data = np.matlib.repmat(out_data, 1, n_classes)\n else:\n # FIXME use internal method `_to_one_hot` and set required class\n # attributes beforehand.\n one_hot_encoder = OneHotEncoder(categories=[range(n_classes)])\n one_hot_encoder.fit(npm.repmat(np.arange(n_classes), 1, 1).T)\n out_data = one_hot_encoder.transform(out_data).toarray()\n\n if self.target_per_timestep:\n out_data = np.matlib.repmat(np.asarray(out_data), 1, max_seq_len)\n\n # structure input data\n in_data = np.zeros((n_samples,max_seq_len,4))\n sample_lengths = np.zeros(n_samples)\n for i,s in enumerate(samples):\n in_data[i,:s.shape[0],:] = s\n sample_lengths[i] = s.shape[0]\n\n in_data = self._flatten_array(in_data)\n\n return in_data, out_data, sample_lengths",
"def crescent_data(num_data=200, seed=default_seed):\r\n np.random.seed(seed=seed)\r\n sqrt2 = np.sqrt(2)\r\n # Rotation matrix\r\n R = np.array([[sqrt2 / 2, -sqrt2 / 2], [sqrt2 / 2, sqrt2 / 2]])\r\n # Scaling matrices\r\n scales = []\r\n scales.append(np.array([[3, 0], [0, 1]]))\r\n scales.append(np.array([[3, 0], [0, 1]]))\r\n scales.append([[1, 0], [0, 3]])\r\n scales.append([[1, 0], [0, 3]])\r\n means = []\r\n means.append(np.array([4, 4]))\r\n means.append(np.array([0, 4]))\r\n means.append(np.array([-4, -4]))\r\n means.append(np.array([0, -4]))\r\n\r\n Xparts = []\r\n num_data_part = []\r\n num_data_total = 0\r\n for i in range(0, 4):\r\n num_data_part.append(round(((i + 1) * num_data) / 4.))\r\n num_data_part[i] -= num_data_total\r\n part = np.random.normal(size=(num_data_part[i], 2))\r\n part = np.dot(np.dot(part, scales[i]), R) + means[i]\r\n Xparts.append(part)\r\n num_data_total += num_data_part[i]\r\n X = np.vstack((Xparts[0], Xparts[1], Xparts[2], Xparts[3]))\r\n\r\n Y = np.vstack((np.ones((num_data_part[0] + num_data_part[1], 1)), -np.ones((num_data_part[2] + num_data_part[3], 1))))\r\n return {'X':X, 'Y':Y, 'info': \"Two separate classes of data formed approximately in the shape of two crescents.\"}",
"def generate_synthetic_data(args):\n number_training_obeservations = args.ntr\n number_testing_obeservations = args.nte\n number_dimensions = args.nd\n mu = args.mu\n feature_model = args.feature_model\n outcome_model = args.outcome_model\n sigma_outcome = args.sigma_outcome\n number_environments = args.ne\n \n T_train = generate_T(number_training_obeservations)\n T_test = generate_T(number_testing_obeservations)\n\n X_train, X_test = generate_x(number_dimensions, T_train, T_test, mu, feature_model)\n \n train_potential_outcome, test_potential_outcome = generate_outcomes(outcome_model, feature_model, X_train, X_test, sigma_outcome)\n\n train_po_control = train_potential_outcome[:,0].reshape(number_training_obeservations,1)\n train_po_treatment = train_potential_outcome[:,1].reshape(number_training_obeservations,1)\n\n y_train = np.multiply(T_train , train_po_treatment) + np.multiply(1-T_train , train_po_control)\n\n return X_train, T_train, y_train, X_test, T_test, test_potential_outcome",
"def generate_mog_dataset():\n\n n_per_class = 100\n dim = 2\n n_gaussians = 4\n mus = [(0, 1), (-1, 0), (0, -1), (1, 0)]\n mus = [torch.tensor(m) for m in mus]\n var = 0.05\n\n inputs, labels = [], []\n\n for id in range(n_gaussians):\n # Generate input data by mu + x @ sqrt(cov)\n cov = np.sqrt(var) * torch.eye(dim)\n mu = mus[id]\n inputs.append(mu + torch.randn(n_per_class, dim) @ cov)\n\n # Labels\n labels.append(torch.tensor(n_per_class * [1.0 if id < 2 else 0.0]))\n\n return torch.cat(inputs, dim=0), torch.cat(labels, dim=0)",
"def synthetic_data(w, b, num_examples): #@save\n X = np.random.normal(0, 1, (num_examples, len(w)))\n #print(X)\n y = np.dot(X, w) + b\n y += np.random.normal(0, 0.01, y.shape)\n return X, y.reshape((-1, 1))",
"def generate_x(number_dimensions, T_train, T_test, mu, feature_model):\n number_training_obeservations = T_train.shape[0]\n number_testing_obeservations = T_test.shape[0]\n\n X_train = np.zeros((number_training_obeservations,number_dimensions))\n X_test = np.zeros((number_testing_obeservations,number_dimensions))\n\n mixture_indicator_train = generate_mixture_indicator(number_training_obeservations)\n mixture_indicator_test = generate_mixture_indicator(number_testing_obeservations)\n\n G = np.random.normal(0,1,(number_dimensions,number_dimensions))\n q, r = np.linalg.qr(G)\n\n mu1 = mu*np.ones(number_dimensions)\n mu2 = -mu*np.ones(number_dimensions)\n\n if feature_model == \"A\":\n eigenvalues1 = np.random.uniform(0,1,(number_dimensions,1))\n eigenvalues1 = np.sort(eigenvalues1, axis = 0)/np.sum(eigenvalues1)\n lambda1 = np.identity(number_dimensions)\n np.fill_diagonal(lambda1,eigenvalues1)\n cov1 = q@[email protected]\n\n for i in range(number_training_obeservations):\n if T_train[i] == 0:\n X_train[i,:] = np.random.multivariate_normal(mu1,cov1,1)\n else:\n X_train[i,:] = np.random.multivariate_normal(mu2,cov1,1)\n \n for i in range(number_testing_obeservations):\n if T_test[i] == 0:\n X_test[i,:] = np.random.multivariate_normal(mu1,cov1,1)\n else:\n X_test[i,:] = np.random.multivariate_normal(mu2,cov1,1)\n\n\n elif feature_model == \"B\":\n eigenvalues1 = np.random.uniform(0,1,(number_dimensions,1))\n eigenvalues1 = np.sort(eigenvalues1, axis = 0)/np.sum(eigenvalues1)\n lambda1 = np.identity(number_dimensions)\n np.fill_diagonal(lambda1,eigenvalues1)\n cov1 = q@[email protected]\n\n eigenvalues2 = np.random.uniform(0,1,(number_dimensions,1))\n eigenvalues2 = np.sort(eigenvalues2, axis = 0)[::-1]/np.sum(eigenvalues2)\n lambda2 = np.identity(number_dimensions)\n np.fill_diagonal(lambda2,eigenvalues2)\n cov2 = q@[email protected]\n\n\n for i in range(number_training_obeservations):\n if T_train[i] == 0:\n if mixture_indicator_train[i] == 0:\n X_train[i,:] = np.random.multivariate_normal(mu1,cov1,1)\n else:\n X_train[i,:] = np.random.multivariate_normal(mu1,cov2,1)\n else:\n if mixture_indicator_train[i] == 0:\n X_train[i,:] = np.random.multivariate_normal(mu2,cov1,1)\n else:\n X_train[i,:] = np.random.multivariate_normal(mu2,cov2,1)\n \n for i in range(number_testing_obeservations):\n if T_test[i] == 0:\n if mixture_indicator_test[i] == 0:\n X_test[i,:] = np.random.multivariate_normal(mu1,cov1,1)\n else:\n X_test[i,:] = np.random.multivariate_normal(mu1,cov2,1)\n else:\n if mixture_indicator_test[i] == 0:\n X_test[i,:] = np.random.multivariate_normal(mu2,cov1,1)\n else:\n X_test[i,:] = np.random.multivariate_normal(mu2,cov2,1)\n\n train_mean = np.mean(X_train, axis = 0)\n train_std = np.std(X_train, axis = 0)\n X_train = (X_train - train_mean)/train_std\n X_test = (X_test - train_mean)/train_std\n \n return X_train, X_test",
"def generate_data(sample_size, noise_variance):\n \n # generate true beta\n A = np.array([[1]*15, [0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0]]).T\n B = np.array([[0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0], [1]*15]).T\n x_shape = A.shape[0]\n y_shape = B.shape[0]\n \n X_train = np.random.randn(sample_size, x_shape, y_shape) \n X_train_vec = np.reshape(X_train, (sample_size, x_shape*y_shape))\n \n cross_beta = A @ B.T\n vec_cross_beta = np.reshape(cross_beta, (x_shape*y_shape, 1))\n cross_norm = np.linalg.norm(cross_beta, 'fro')\n cross_beta = cross_beta / cross_norm\n Y_soft = np.zeros((sample_size, 1))\n \n for i in range(sample_size):\n epsilon = noise_variance * np.random.randn(1, 1)\n x_i = X_train_vec[i, :]\n y_i = (x_i @ vec_cross_beta) + epsilon\n Y_soft[i, :] = y_i\n \n Y_hard = np.sign(Y_soft)\n \n return cross_beta, X_train, Y_hard, Y_soft",
"def generate_data(self,seed):\n X, y = make_classification( n_samples = 250, random_state = seed )\n # Add bias term\n X = np.concatenate( ( np.ones( ( 250, 1 ) ), X ), axis = 1 )\n self.X_train, self.X_test, self.y_train, self.y_test = train_test_split( \n X, y, test_size = 50, random_state = seed )",
"def generate_train_test(self):\n x, y = self.read_data()\n x_train, y_train, x_test, y_test = self.sample_data(x, y)\n self.train = (x_train, y_train)\n self.test = (x_test, y_test)",
"def get_examples(ds_data, network, parents, verbose=1, **params):\n # Parameters\n classes = params.setdefault('classes', [-1,0,1])\n target = params.setdefault('target', int(1.2e6))\n slice_len = params.setdefault('slice_len', 330)\n \n assert not target % len(classes)\n \n G = np.mean(ds_data, axis=0) \n examples = np.zeros((target, 5, slice_len, 1))\n labels = np.zeros((target, len(classes)))\n count = 0\n \n if verbose > 0:\n print('Generating {} training examples'.format(target))\n bar = pb.ProgressBar(max_value=target,\n widgets=[pb.Percentage(), ' - ',\n pb.Bar(), ' - ',\n pb.ETA()])\n \n for c in classes:\n \n pairs = np.argwhere(network == c)\n reps = int(target/len(classes)/pairs.shape[0]) + 1\n pair_idx = np.repeat(np.arange(pairs.shape[0]), reps)\n pair_idx = np.random.permutation(pair_idx)[:target//len(classes)]\n start_idx = np.random.randint(\n 0, ds_data.shape[1]-slice_len, size=target//len(classes))\n \n for i in range(pair_idx.size):\n \n n1 = pairs[pair_idx[i]][0]\n n2 = pairs[pair_idx[i]][1]\n assert(network[n1,n2] == c)\n \n start = start_idx[i]\n end = start + slice_len\n \n p1 = np.mean(ds_data[parents[n1], start:end], axis=0)\n p2 = np.mean(ds_data[parents[n2], start:end], axis=0)\n \n examples[count,:,:,0] = np.vstack((\n p1, \n ds_data[n1][start:end], \n G[start:end], \n ds_data[n2][start:end], \n p2\n ))\n \n labels[count,:] = np.equal(classes, c, dtype=np.int32)\n \n if verbose > 0:\n bar.update(count)\n count +=1\n \n if verbose > 0:\n bar.finish()\n print(\n 'Generated examples of shape:', examples.shape,\n '\\nGenerated labels of shape:', labels.shape,\n '\\nThere are {} classes: {}'.format(len(classes), classes)\n )\n \n assert not np.isnan(examples).any()\n return examples, labels",
"def generate_rand():\n n_cols = 5\n n_rows = 100\n n_class = 10\n trial_x = np.random.rand(n_rows, n_cols)\n trial_y = np.random.random_integers(1, n_class, size = (n_rows, 1))\n\n # Append response to data\n trial_data = np.append(trial_x, trial_y, 1)\n return trial_data, n_class, n_cols",
"def create_data(self):\n\n print (f'Using {self.n_s} simulations for the training data to estimate cov')\n print (f'Using {self.n_p} simulations for the upper/lower training data')\n print (f'Number of splits, to increase number simulations: {self.n_train}')\n print (f'Adding noise to the derivative: {np.invert(self.noiseless_deriv)}')\n\n # Number of upper and lower simulations\n n_p = int(self.n_s * self.derivative_fraction)\n\n # set a seed to surpress the sample variance (EVEN FOR CENTRAL SIMULATIONS)\n seed = np.random.randint(1e6) \n # We should double-check to see if the sample variance if being surpressed\n\n # Perturb lower \n np.random.seed(seed)\n t_m = self.generate_data(np.array([self.theta_fid for i in \n range(self.n_train * self.n_p)])\n ,train = -self.delta_theta, flatten = self.flatten\n ,noiseless_deriv = self.noiseless_deriv) \n # Perturb higher \n np.random.seed(seed)\n t_p = self.generate_data(np.array([theta_fid for i in \n range(self.n_train * self.n_p)])\n ,train = self.delta_theta, flatten = self.flatten\n , noiseless_deriv = self.noiseless_deriv)\n\n # Central\n np.random.seed(seed)\n t = self.generate_data(np.array([self.theta_fid for i in \n range(self.n_train * self.n_s)])\n ,train = None, flatten = self.flatten)\n\n\n # derivative data\n t_d = (t_p - t_m) / (2. * self.delta_theta)\n\n # Save in a dict that the network takes\n data = {\"data\": t, \"data_d\": t_d}\n # for plotting purposes we save the upper/lower separately as well\n data[\"x_m\"], data[\"x_p\"] = t_m, t_p \n\n\n # Repeat the same story to generate test data\n print ('\\n')\n print (f'Using {self.n_s} simulations for the test data to estimate cov')\n print (f'Using {self.n_p_val} simulations for the upper/lower test data')\n print (f'Number of splits, to increase number simulations: {self.n_train_val}')\n print (f'Adding noise to the derivative: {np.invert(self.noiseless_deriv)}')\n print ('\\n')\n\n seed = np.random.randint(1e6)\n # Perturb lower \n np.random.seed(seed)\n tt_m = self.generate_data(np.array([self.theta_fid for i in \n range(self.n_train * self.n_p)])\n , train = -self.delta_theta, flatten = self.flatten\n , noiseless_deriv = self.noiseless_deriv)\n # Perturb higher \n np.random.seed(seed)\n tt_p = self.generate_data(np.array([self.theta_fid for i in \n range(self.n_train * self.n_p)])\n , train = self.delta_theta, flatten = self.flatten\n , noiseless_deriv = self.noiseless_deriv)\n # Central sim\n np.random.seed(seed)\n tt = self.generate_data(np.array([self.theta_fid for i in \n range(self.n_train * self.n_s)])\n , train = None, flatten = self.flatten)\n \n # np.random.seed()\n \n # derivative data\n tt_d = (tt_p - tt_m) / (2. * self.delta_theta)\n\n data[\"validation_data\"] = tt \n data[\"validation_data_d\"] = tt_d\n\n # for plotting purposes we save the upper/lower separately\n data[\"x_m_test\"], data[\"x_p_test\"] = tt_m, tt_p \n\n return data",
"def two_step_generator(classes: list, paths_list: list, imgs_per_class: int, shape: tuple,\n nb_win: int, greys: bool, nb_to_gen: int, img_gen: ImageDataGenerator) -> list:\n \n datawin = list() \n datagen = list()\n \n for class_ in classes:\n print(class_)\n \n # Images paths list\n class_imgs_path = [paths_list[k] for k in range(len(paths_list)) if class_ in paths_list[k]]\n\n # Randomly choose images\n class_imgs_subset = np.random.choice(class_imgs_path, size=imgs_per_class, replace=False)\n\n # Get images\n class_imgs = get_imgs(class_imgs_subset)\n\n # Step 1: resize and crop on sliding windows\n class_new_imgs = create_windows_imgs(class_imgs, shape=shape, nb_win=nb_win, greys=greys)\n class_new_imgs = np.array(flat_list(class_new_imgs))\n datawin.append(class_new_imgs)\n \n # Step 2: DataGenerator\n class_datagen = datagen_class(class_new_imgs, nb_to_gen, img_gen)\n class_datagen = class_datagen.astype(int)\n\n datagen.append(class_datagen)\n \n return datawin, datagen",
"def multiclass_toy_data(): \n #dataset = np.zeros((10,5), np.int)\n dataset = np.array([[0,0,0,0,4],\n [0,0,0,0,5],\n [1,3,0,0,0],\n [3,1,0,0,1],\n [0,0,6,2,0],\n [0,0,0,0,0],\n [0,0,1,7,2], \n [0,0,5,1,5],\n [0,0,34,0,0],\n [0,0,3,0,0]])\n Y = np.array([3,3,2,2,1,0,1,1,0,0])\n #for i in range(10):\n #for j in range(5):\n #dataset[i][j] = np.random.randint(0,10) \n dataset = np.column_stack((dataset, Y))\n return (dataset)",
"def generate(batch, size=32):\n\n # Using the data Augmentation in traning data\n ptrain = 'data224/train'\n pval = 'data224/test'\n\n datagen1 = ImageDataGenerator(\n samplewise_center=True,\n samplewise_std_normalization=True,\n shear_range=0.2,\n zoom_range=0.2,\n rotation_range=90,\n width_shift_range=0.2,\n height_shift_range=0.2,\n horizontal_flip=True)\n\n datagen2 = ImageDataGenerator(samplewise_center=True,\n samplewise_std_normalization=True,)\n\n train_generator = datagen1.flow_from_directory(\n ptrain,\n target_size=(size, size),\n batch_size=batch,\n class_mode='categorical')\n\n validation_generator = datagen2.flow_from_directory(\n pval,\n target_size=(size, size),\n batch_size=batch,\n class_mode='categorical')\n\n count1 = 0\n for root, dirs, files in os.walk(ptrain):\n for each in files:\n count1 += 1\n\n count2 = 0\n for root, dirs, files in os.walk(pval):\n for each in files:\n count2 += 1\n\n return train_generator, validation_generator, count1, count2",
"def generate_data(Para1, Para2, seed=0):\r\n\r\n np.random.seed(seed)\r\n X1 = np.vstack((rand.normal(Para1['mx'], Para1['ux'], Para1['N']), \r\n rand.normal(Para1['my'], Para1['uy'], Para1['N'])))\r\n\r\n X2 = np.vstack((rand.normal(Para2['mx'], Para2['ux'], Para2['N']), \r\n rand.normal(Para2['my'], Para2['uy'], Para2['N'])))\r\n \r\n Y = np.hstack(( Para1['y']*np.ones(Para1['N']), \r\n Para2['y']*np.ones(Para2['N']) )) \r\n X = np.hstack((X1, X2)) \r\n X = np.transpose(X)\r\n\r\n return X, Y",
"def learn(self, Xtrain, ytrain):\n\n ### YOUR CODE HERE\n \n self.numfeatures = Xtrain.shape[1]\n numsamples = Xtrain.shape[0]\n #print (self.numfeatures)\n count = 0\n for i in ytrain:\n if (i>count):\n count+=1\n self.numclasses = count + 1\n \n if(self.params['usecolumnones']==False):\n b = np.ones((numsamples, self.numfeatures-1))\n b = Xtrain[:,:-1]\n Xtrain = b\n self.numfeatures -= 1\n # print(Xtrain.shape[1])\n\n ### END YOUR CODE\n\n origin_shape = (self.numclasses, self.numfeatures)\n self.means = np.zeros(origin_shape)\n self.stds = np.zeros(origin_shape)\n\n ### YOUR CODE HERE\n countclass = np.zeros(self.numclasses)\n for i in range (0, numsamples):\n k = int(ytrain[i])\n countclass[k] += 1\n for j in range (0, self.numfeatures):\n self.means[k][j]+=Xtrain[i][j]\n \n for i in range (0, self.numclasses):\n #np.true_divide(self.means[i], countclass[i])\n for j in range (0, self.numfeatures):\n self.means[i][j] = self.means[i][j]/(countclass[i]+1e-8)\n \n self.yprob = np.true_divide(countclass, numsamples)\n \n for i in range (0, numsamples):\n k = int(ytrain[i])\n for j in range (0, self.numfeatures):\n self.stds[k][j]+= (Xtrain[i][j] - self.means[k][j])**2\n # print (self.stds)\n \n for i in range (0, self.numclasses):\n #np.true_divide(self.stds[i], countclass[i])\n for j in range (0, self.numfeatures):\n self.stds[i][j] = self.stds[i][j]/(countclass[i]+1e-8)\n \n # print (self.means)\n # print (self.stds)\n ### END YOUR CODE\n\n assert self.means.shape == origin_shape\n assert self.stds.shape == origin_shape",
"def init_benchmark_data(\n num_inputs, input_size, num_classes, rand_seed=None,\n **kwargs\n):\n N, D, C = num_inputs, input_size, num_classes\n\n rs = np.random.RandomState(seed=rand_seed)\n X = rs.rand(N, D)\n y = rs.choice(C, size=N)\n return X, y",
"def generate(self):\n self.training_data.gen_x(self.x_func)\n self.training_data.gen_a(self.a_func)\n self.training_data.gen_y(self.y_func)\n \n self.testing_data.gen_x(self.x_func)\n self.testing_data.gen_ys(self.y_func)\n self.testing_data.gen_azero(self.ytotal_func)",
"def generate_training_data_2D():\n c11 = np.random.uniform(0.05, 1.50, 100)\n c12 = np.random.uniform(-1.50, 1.50, 100)\n c21 = np.random.uniform(-1.50, -0.05, 100)\n c22 = np.random.uniform(-1.50, 1.50, 100)\n c1 = np.array([[i, j] for i, j in zip(c11, c12)])\n c2 = np.array([[i, j] for i, j in zip(c21, c22)])\n\n points = plt.figure()\n plt.plot(c1[:, 0], c1[:, 1], 'o', c2[:, 0], c2[:, 1], '*')\n plt.show()\n plt.close()\n\n return c1, c2",
"def generate_data(data, samples, targeted=True, start=0, inception=True):\n \n assert (targeted==True and start==0 and inception==True)\n \n \n inputs = []\n targets = []\n \n '''\n for i in range(samples):\n if targeted:\n if inception:\n seq = random.sample(range(1,1001), 10)\n else:\n seq = range(data.test_labels.shape[1])\n\n for j in seq:\n if (j == np.argmax(data.test_labels[start+i])) and (inception == False):\n continue\n inputs.append(data.test_data[start+i])\n targets.append(np.eye(data.test_labels.shape[1])[j])\n else:\n inputs.append(data.test_data[start+i])\n targets.append(data.test_labels[start+i])\n\n inputs = np.array(inputs)\n targets = np.array(targets)\n '''\n\n return inputs, targets",
"def generate_data(data, samples, targeted=True, start=0, inception=False):\n inputs = []\n targets = []\n labels = []\n true_ids = []\n for i in range(samples):\n if targeted:\n if inception:\n # for inception, randomly choose 10 target classes\n seq = np.random.choice(range(1, 1001), 1)\n # seq = [580] # grand piano\n else:\n # for CIFAR and MNIST, generate all target classes\n seq = range(data.test_labels.shape[1])\n\n # print ('image label:', np.argmax(data.test_labels[start+i]))\n for j in seq:\n # skip the original image label\n if (j == np.argmax(data.test_labels[start + i])) and (inception == False):\n continue\n inputs.append(data.test_data[start + i])\n targets.append(np.eye(data.test_labels.shape[1])[j])\n labels.append(data.test_labels[start + i])\n true_ids.append(start + i)\n else:\n inputs.append(data.test_data[start + i])\n targets.append(data.test_labels[start + i])\n labels.append(data.test_labels[start + i])\n true_ids.append(start + i)\n\n inputs = np.array(inputs)\n targets = np.array(targets)\n labels = np.array(labels)\n true_ids = np.array(true_ids)\n\n return inputs, targets, labels, true_ids",
"def _GenerateTestData(self, num_classes, dim, batch_size, num_true, labels,\n sampled, subtract_log_q):\n weights = np.random.randn(num_classes, dim).astype(np.float32)\n biases = np.random.randn(num_classes).astype(np.float32)\n hidden_acts = np.random.randn(batch_size, dim).astype(np.float32)\n\n true_exp = np.full([batch_size, 1], fill_value=0.5, dtype=np.float32)\n sampled_exp = np.full([len(sampled)], fill_value=0.5, dtype=np.float32)\n sampled_vals = (sampled, true_exp, sampled_exp)\n\n sampled_w, sampled_b = weights[sampled], biases[sampled]\n true_w, true_b = weights[labels], biases[labels]\n\n true_logits = np.sum(\n hidden_acts.reshape((batch_size, 1, dim)) * true_w.reshape(\n (batch_size, num_true, dim)),\n axis=2)\n true_b = true_b.reshape((batch_size, num_true))\n true_logits += true_b\n sampled_logits = np.dot(hidden_acts, sampled_w.T) + sampled_b\n\n if subtract_log_q:\n true_logits -= np.log(true_exp)\n sampled_logits -= np.log(sampled_exp[np.newaxis, :])\n\n exp_logits = np.concatenate([true_logits, sampled_logits], axis=1)\n exp_labels = np.hstack(\n (np.ones_like(true_logits) / num_true, np.zeros_like(sampled_logits)))\n\n return weights, biases, hidden_acts, sampled_vals, exp_logits, exp_labels",
"def run_train_test(training_input, testing_input):\n #grab the size of the training data input for each of the classes\n num_A_train = training_input[0][1]\n num_B_train = training_input[0][2]\n num_C_train = training_input[0][3]\n #remove the information that we used to find the size of the classes and segregate each of the\n #classes into their own numpy array\n training_input.remove(training_input[0])\n training = np.array(training_input)\n A_array = training[:num_A_train]\n B_array = training[1+num_A_train:num_A_train+num_B_train]\n C_array = training[1+num_A_train+num_B_train:]\n #Find the centroid by summing the columns and dividing by the total number of training data points in the given class\n A_centroid = A_array.mean(axis=0)\n B_centroid = B_array.mean(axis=0)\n C_centroid = C_array.mean(axis=0)\n #Calculate the weight\n AB_w = A_centroid - B_centroid\n BC_w = B_centroid - C_centroid\n AC_w = A_centroid - C_centroid\n #Calculate t\n AB_t = np.dot(AB_w, (A_centroid + B_centroid) / 2)\n BC_t = np.dot(BC_w, (B_centroid + C_centroid) / 2)\n AC_t = np.dot(AC_w, (A_centroid + C_centroid) / 2)\n #find the size of the testing data for each class\n num_A_test = testing_input[0][1]\n num_B_test = testing_input[0][2]\n num_C_test = testing_input[0][3]\n #remove the information and separate into three numpy arrays for each class\n testing_input.remove(testing_input[0])\n testing = np.array(testing_input)\n A_test_array = testing[:num_A_test]\n B_test_array = testing[num_A_test:num_A_test+num_B_test]\n C_test_array = testing[num_A_test+num_B_test:]\n\n truePositiveA = 0;\n truePositiveB = 0;\n truePositiveC = 0;\n trueNegativeA = 0;\n trueNegativeB = 0;\n trueNegativeC = 0;\n AinB = 0;\n AinC = 0;\n BinA = 0;\n BinC = 0;\n CinA = 0;\n CinB = 0;\n #loop through the testing data and store the true positive and true negative results. Additionally store\n #the number of A points classified as B, A points classified in C and etc.\n for i in range(num_A_test):\n if((np.dot(A_test_array[i], AB_w) >= AB_t) & (np.dot(A_test_array[i], AC_w) >= AC_t)):\n truePositiveA += 1\n elif((np.dot(A_test_array[i], AB_w) < AB_t)):\n AinB += 1\n else:\n AinC += 1\n for i in range(num_B_test):\n if((np.dot(B_test_array[i], AB_w) < AB_t) & (np.dot(B_test_array[i], BC_w) >= BC_t)):\n truePositiveB += 1\n elif((np.dot(B_test_array[i], AB_w) < AB_t)):\n BinA += 1\n else:\n BinC += 1\n for i in range(num_C_test):\n if((np.dot(C_test_array[i], AC_w) < AC_t) & (np.dot(C_test_array[i], BC_w) < BC_t)):\n truePositiveC += 1\n elif((np.dot(C_test_array[i], AC_w) < AC_t)):\n CinA += 1\n else:\n CinB += 1\n #Calculate the true positive, true negative, false positive, false negative, total positive, total negative\n #and estimated positive to calculate the tpr, fpr, error rate, accuracy and precision\n truePositive = truePositiveA + truePositiveB + truePositiveC\n trueNegative = truePositiveB + truePositiveC + BinC + CinB + truePositiveA + truePositiveB + AinB + BinA +truePositiveA + truePositiveC + AinC + CinA\n falsePositive = BinA + CinA + AinB + CinB + AinC + BinC\n falseNegative = AinC + AinB + BinA + BinC + CinA + CinB\n totalPositive = truePositive + falseNegative\n totalNegative = falsePositive + trueNegative\n estimatedPositive = truePositive + falsePositive\n #Calculate these measures and return the result values\n return {\n \"tpr\": float(truePositive)/totalPositive,\n \"fpr\": float(falsePositive)/totalNegative,\n \"error_rate\": float(falsePositive+falseNegative)/(totalPositive+totalNegative),\n \"accuracy\": float(truePositive+trueNegative)/(totalPositive+totalNegative),\n \"precision\": float(truePositive)/estimatedPositive\n }",
"def generate_embeddings_gen(dataset_path, classes):\n model = embeddings(INPUT_DIM)\n X_train, X_test, y_train, y_test = get_train_test_lists(dataset_path, classes)\n # create data generators\n batch_size = 16\n train_batch_generator = image_batch_generator(X_train, model, batch_size=batch_size)\n test_batch_generator = image_batch_generator(X_test, model, batch_size=batch_size)\n\n return train_batch_generator, test_batch_generator",
"def random_cls_dataset(request):\n set_seed()\n shape = request.param.get('shape', 10)\n size = request.param.get('size', 100)\n X, Y = make_classification(n_samples=2*size, n_features=shape, n_classes=10, n_informative=10, n_redundant=0)\n X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.5)\n Y_train, Y_test = Y_train.astype(np.int64), Y_test.astype(np.int64)\n return (X_train, Y_train), (X_test, Y_test)",
"def __init__(self, inputSize, hiddenSize, outputSize, epochs = 100, debug = False):\n self.inputSize = inputSize\n self.hiddenSize = hiddenSize\n self.outputSize = outputSize\n self.epochs = epochs\n self.debug = debug\n\n #weights\n self.W1 = np.random.randn(self.inputSize, self.hiddenSize) \n self.W2 = np.random.randn(self.hiddenSize, self.outputSize)",
"def generate_data(params, sigma):\n rng = random.PRNGKey(0)\n k = len(params) // 2\n a_array = params[:k]\n b_array = params[k:]\n n = 20 * k\n xs = sample_our_uniform(n, 1, rng).reshape((n,))\n ys = onp.zeros(n)\n all_indices = set(onp.arange(n))\n for i in range(k):\n i_idxs = onp.random.choice(list(all_indices), 20, replace=False)\n all_indices = set(all_indices) - set(i_idxs)\n ys[i_idxs] = xs[i_idxs] * a_array[i] + b_array[i] + onp.random.normal(0, sigma, size=(20,))\n return xs, ys"
] | [
"0.65752196",
"0.6445264",
"0.62967867",
"0.621237",
"0.61468756",
"0.61384785",
"0.61201763",
"0.60479647",
"0.60273725",
"0.60257494",
"0.6001405",
"0.597554",
"0.5915635",
"0.58671814",
"0.58343005",
"0.58264834",
"0.5801961",
"0.5795742",
"0.57617813",
"0.57445174",
"0.57350355",
"0.57300717",
"0.5722423",
"0.57215923",
"0.5717522",
"0.56981236",
"0.5696349",
"0.5693019",
"0.56911105",
"0.5679642"
] | 0.7620779 | 0 |
Compute the node volumes. | def computeNodeVolumes(self):
for i in np.arange(0,self.ni):
for j in np.arange(0,self.nj):
for k in np.arange(0,self.nk):
V = self.dh[0]*self.dh[1]*self.dh[2]
if (i==0 or i==self.ni-1): V*=0.5
if (j==0 or j==self.nj-1): V*=0.5
if (k==0 or k==self.nk-1): V*=0.5
self.node_vol[i][j][k] = V | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def volumes(self):",
"def volume(nodes, graph):\n ###TODO\n pass",
"def volume(self):\n return [node.volume for node in self]",
"def list_volumes(self):\n print '# Listing existing volumes'\n self.compute.list_volumes()",
"def list_volumes(self, node=None):\n\n data = self._perform_get(self._get_disk_path(), Disks)\n volumes = [self._to_volume(volume=v, node=node) for v in data]\n return volumes",
"def volumes(self):\n return self._volumes",
"def volumes(self) -> dict:\n return self.data[\"volumes\"]",
"def extract_volume(self):\n\n # RDD or array of [(partition, vol)]\n vols = None\n if self.usespark:\n vols = self._retrieve_vol(self.current_spot, None)\n else:\n vols = self._retrieve_vol(self.current_spot, len(self.partitions))\n self.current_spot += len(self.partitions)\n \n return vols",
"def get_volumes(self):\n url = self._get_url() + 'volumes'\n volumes = self._request(url)\n return volumes.json()",
"def get_volumes(self, dim):\n cdef np.ndarray[float64, mode='c', ndim=1] out\n\n if dim == 0:\n raise ValueError('vertices have no volume!')\n\n else:\n out = np.empty((self.mesh.topology.num[dim],),\n dtype=np.float64)\n mesh_get_volumes(self.mesh, &out[0], dim)\n\n return out",
"def total_volume(self):",
"def get_all_volume_usage(self, context, compute_host_bdms):\n volusage = []\n return volusage",
"def get_all_volume_usage(self, context, compute_host_bdms):\n volusage = []\n return volusage",
"def compute_volume(bundle):\n\taff=np.array([[-1.25, 0, 0, 90],[0, 1.25, 0, -126],[0, 0, 1.25, -72],[0, 0, 0, 1]])\n\tvoxel_list = streamline_mapping(bundle, affine=aff).keys()\n\tvol_bundle = len(set(voxel_list))\n\n\treturn vol_bundle",
"def volumes(self) -> Optional[Sequence['_core.v1.outputs.Volume']]:\n return pulumi.get(self, \"volumes\")",
"def pyscal_voronoi_volume(self):\n return analyse_voronoi_volume(atoms=self._structure)",
"def calculateVolumes(data):\n print \"Calculating volumes...\"\n results = {}\n for dataLine in data:\n name = dataLine['name']\n r1 = dataLine['r1']\n r2 = dataLine['r2']\n r3 = dataLine['r3']\n r4 = dataLine['r4']\n t1 = dataLine['t1']\n t2 = dataLine['t2']\n t3 = dataLine['t3']\n volCup = (math.pi/3.0) * t1 * ((r1**2) + (r4**2) - (r1*r4))\n volPeanut = math.pi * (t1 - t2 - t3) * ((r2**2) + (r3**2) - (r2*r3)) / 3.0\n volChoc = volCup - volPeanut\n ratio = volChoc/volPeanut\n print \"Ratio for \" + name + \" is \" + str(ratio)\n results[name] = [r1, volChoc, volPeanut, volCup, ratio]\n return results",
"def get_volumes():\n vols = []\n try:\n result = run_diskpart(['list volume'])\n except subprocess.CalledProcessError:\n pass\n else:\n # Append volume numbers\n output = result.stdout.decode().strip()\n for tmp in re.findall(r'Volume (\\d+)\\s+([A-Za-z]?)\\s+', output):\n vols.append({'Number': tmp[0], 'Letter': tmp[1]})\n\n return vols",
"def test_volumes_get(self):\n pass",
"def get_volume(path_list, box_size, resolution,\n norm = False, rot = False, trans = False):\n \n pdb2coords = PDB2CoordsUnordered()\n assignTypes = Coords2TypedCoords()\n translate = CoordsTranslate()\n rotate = CoordsRotate()\n project = TypedCoords2Volume(box_size, resolution)\n\n\n #with torch.no_grad():\n batch_size = len(path_list)\n coords, _, resnames, _, atomnames, num_atoms = pdb2coords(path_list)\n \n a,b = getBBox(coords, num_atoms)\n protein_center = (a+b)*0.5\n coords = translate(coords, -protein_center, num_atoms)\n random_rotations = getRandomRotation(batch_size)\n\n #rotate xyz \n if rot:\n coords = rotate(coords, random_rotations, num_atoms)\n \n box_center = torch.zeros(batch_size, 3, dtype=torch.double, device='cpu').fill_(resolution*box_size/2.0)\n coords = translate(coords, box_center, num_atoms)\n \n \n #translate xyz\n if trans: \n random_translations = getRandomTranslation(a, b, resolution*box_size) coords = translate(coords, random_translations, num_atoms) \n\n coords, num_atoms_of_type, offsets = assignTypes(coords.to(dtype=torch.float32),\n resnames, atomnames, num_atoms)\n volume = project(coords.cuda(), num_atoms_of_type.cuda(), offsets.cuda())\n \n if norm: #apply min-max norm \n volume = (volume - torch.min(volume)) / (torch.max(volume) - torch.min(volume))\n \n \n return volume, random_rotations",
"def get_volumes(self):\n res = self.get('%s/volumes' % self.catalog['volume'])\n if res['status'] == 200:\n return json.loads(res['body'])['volumes']\n else:\n LOG.error('Get volumes failed: %s %s %s' %\n (res['status'], res['reason'], res['body']))\n raise InvalidResponse(res)",
"def total_volume(self):\n v = self.cell_edges\n v = np.abs(v[-1] - v[0])\n return v",
"def volumes_per_instance(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"volumes_per_instance\")",
"def volumes_per_instance(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"volumes_per_instance\")",
"def volumes_per_instance(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"volumes_per_instance\")",
"def _get_data_volumes(vm_):\n ret = []\n volumes = vm_[\"volumes\"]\n for key, value in volumes.items():\n # Verify the required 'disk_size' property is present in the cloud\n # profile config\n if \"disk_size\" not in volumes[key].keys():\n raise SaltCloudConfigError(\n \"The volume '{}' is missing 'disk_size'\".format(key)\n )\n # Use 'HDD' if no 'disk_type' property is present in cloud profile\n if \"disk_type\" not in volumes[key].keys():\n volumes[key][\"disk_type\"] = \"HDD\"\n\n # Construct volume object and assign to a list.\n volume = Volume(\n name=key,\n size=volumes[key][\"disk_size\"],\n disk_type=volumes[key][\"disk_type\"],\n licence_type=\"OTHER\",\n )\n\n # Set volume availability zone if defined in the cloud profile\n if \"disk_availability_zone\" in volumes[key].keys():\n volume.availability_zone = volumes[key][\"disk_availability_zone\"]\n\n ret.append(volume)\n\n return ret",
"def get_volumes_on_node(hostname, vil):\n # Returns a list of volume names on a node for display\n\n vol_list = []\n try:\n if not hostname:\n raise Exception('No GRIDCell name passed')\n\n if not vil:\n vil, err = get_basic_volume_info_all()\n if err:\n raise Exception(err)\n\n if vil:\n vol_list = []\n for vol_info_dict in vil:\n bl, err = get_brick_hostname_list(vol_info_dict)\n if err:\n raise Exception(err)\n if bl and hostname in bl:\n vol_list.append(vol_info_dict[\"name\"])\n except Exception, e:\n return None, 'Error getting volumes on GRIDCell : %s' % str(e)\n else:\n return vol_list, None",
"def cell_volumes(self, index):\n if index is None:\n index = list(range(len(self.axes)))\n shape = []\n for i in index:\n shape.append(self.axes[i].size)\n volumes = np.ones(shape)\n for i, ind in enumerate(index):\n v = self.axes[ind].volumes\n volumes = array_routines.multiply_along_axis(volumes, v, i)\n return volumes",
"def volume(self):\n return self.volume_array",
"def volumes(self) -> Sequence['outputs.GetVolumeGroupSapHanaVolumeResult']:\n return pulumi.get(self, \"volumes\")"
] | [
"0.74941075",
"0.731756",
"0.7136064",
"0.68049896",
"0.6739897",
"0.66602635",
"0.6347831",
"0.62408555",
"0.61435556",
"0.6129233",
"0.60676765",
"0.60538983",
"0.60538983",
"0.5960976",
"0.59290266",
"0.5905389",
"0.5893624",
"0.58917075",
"0.58512735",
"0.5849365",
"0.58413273",
"0.5840318",
"0.58370453",
"0.58370453",
"0.58370453",
"0.5832837",
"0.5830663",
"0.58278173",
"0.5797896",
"0.57490027"
] | 0.7711581 | 0 |
Compute the charge density. | def computeChargeDensity(self):
self.rho = np.zeros((self.ni, self.nj, self.nk))
for species in self.speciesList:
if species.charge!=0:
self.rho += species.charge*species.den | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _density(self):\n fraction = np.array([0.]+[m.value for m in self.fraction])\n # TODO: handle invalid fractions using penalty functions\n # S = sum(fraction)\n # scale = S/100 if S > 100 else 1\n # fraction[0] = 100 - S/scale\n # penalty = scale - 1\n fraction[0] = 100 - sum(fraction)\n if (fraction < 0).any():\n return NaN\n volume = self._volume(fraction)\n density = np.array([m.density() for m in [self.base]+self.material])\n return np.sum(volume*density)",
"def density(self):\n return (1e-3*self.molar_mass) * self.pressure / (gas_constant * self.temperature) # kg/m^3",
"def density(self):\n return self.get_density()",
"def density( self ) :\n return self.__density",
"def density( self ) :\n return self.__density",
"def density( self ) :\n return self.__density",
"def density(self):\n return self.nnz/self.dim",
"def density(self):\n return self.nnz / self.size",
"def density(self):\n return self._density",
"def density(self):\n return self.num_arcs() / (self.nframes / FRATE)",
"def calculate_density(self, dc, cut_off=False):\n data_size, distance = self.data_size, self.distance\n logger.info('calculate density begin')\n func = lambda dij, dc: math.exp(- (dij / dc) ** 2)\n if cut_off:\n func = lambda dij, dc: 1 if dij < dc else 0\n max_density = -1\n for index in range(data_size):\n density = 0\n for front in range(index):\n density += func(distance[(front, index)], dc)\n for later in range(index + 1, data_size):\n density += func(distance[(index, later)], dc)\n self.result.append([density, float(\"inf\")])\n max_density = max(max_density, density)\n if max_density == density:\n self.max_pos = index\n self.max_density = max_density\n self.result = np.array(self.result)\n self.rho_des_index = np.argsort(-self.result[:, 0])\n logger.info('calculate density end')",
"def discharge_coefficient(self) -> _VectorisedFloat:\n return 0.6",
"def _densityctr(self, rangex, rangey, dim = misc.DEF_VIS_DIM):\n gr = N.meshgrid(rangex, rangey)\n x = gr[0].flatten()\n y = gr[1].flatten()\n xdata = N.concatenate((x[:, N.newaxis], y[:, N.newaxis]), axis = 1)\n dmu = self.mu[:, dim]\n dva = self._get_va(dim)\n den = GM.fromvalues(self.w, dmu, dva).pdf(xdata, log = True)\n den = den.reshape(len(rangey), len(rangex))\n\n return gr[0], gr[1], den",
"def density(self):\n return _cantera.reactor_density(self.__reactor_id)",
"def current_density(self, xyz):\n return self.sigma * self.electric_field(xyz)",
"def current_density(self, xyz):\n return self.sigma * self.electric_field(xyz)",
"def __density(self, x):\n\n z = np.power(self.rate, x) / m.factorial(x)\n return z * np.exp(-self.rate)",
"def density(self) -> float:\n if self.is_directed():\n factor = 1\n else:\n factor = 2\n\n num_e = self._Impl.number_of_edges(directed_edges=True)\n num_v = self._Impl.number_of_vertices()\n\n density = (factor * num_e) / (num_v * (num_v - 1))\n return density",
"def calculate(self, density):\n if density not in self.potential_memo:\n\n if density == 0:\n self.potential_memo[density] = 0\n else:\n a = self.a\n x_0 = self.x_0\n b = self.b\n c = self.c\n x = self.wigner_seitz_radius(density)**(1/2)\n x_x = x**2 + b * x + c\n x_x_0 = x_0**2 + b * x_0 + c\n q = (4 * c - b**2)**(1/2)\n\n self.potential_memo[density] = a * (log(x**2 / x_x) + (2 * b / q) * atan(q / (2 * x + b))\n - (b * x_0 / x_x_0) * (log((x - x_0)**2 / x_x) + (2 * (b + 2 * x_0) / q) * atan(q / (2 * x + b)))) \\\n - (a / 3) * ((1 + x_0 * x) / (1 + x_0 * x + b * x**2 + c * x**3))\n\n return self.potential_memo[density]",
"def getDensityEstimate(self):\n return self.density",
"def density(self, arg):\n out = 0\n for weight, mean, std in zip(self.weights, self.means, self.stds):\n scale = std * self.data['maturity']**.5\n loc = ((mean - self.data['riskfree']) *\n self.data['maturity'] - scale**2)\n out += weight * scs.norm(loc, scale).pdf(arg)\n return out",
"def gc_prob_density(r):\n return np.exp(_interp_ln_dens(r))",
"def density(self):\r\n return self.count_ones() / float(self.xspan * self.yspan)",
"def current_density(self, xyz):\n\n j = self.electric_field(xyz) / self.rho\n return j",
"def density(self, arg):\n return self.gb2_density(np.exp(arg)) * np.exp(arg)",
"def rate_density(self, value):\n\n # TODO: analyse for certain that log units cancel out\n # with the change in occr\n\n if value.ndim == 2:\n value = value.T\n\n R_i = np.digitize(value[0], self._R_boundaries) - 1\n P_i = np.digitize(value[1], self._P_boundaries) - 1\n\n # Remove the ones out of bounds (oob_mask = out of bounds mask)\n oob_mask = np.zeros_like(R_i, dtype=bool)\n oob_mask = oob_mask | ((R_i < 0) | (R_i >= np.shape(self.occr)[0]))\n oob_mask = oob_mask | ((P_i < 0) | (P_i >= len(self._P_boundaries)-1))\n\n R_i = R_i[~oob_mask]\n P_i = P_i[~oob_mask]\n\n return self.occr[R_i] * self._cpf_grid[R_i, P_i]",
"def number_density(self) -> u.m**-3:\n return self._number_density",
"def density(temp,pres):\n g_p = liq_g(0,1,temp,pres)\n dliq = g_p**(-1)\n return dliq",
"def discharge_coefficient(self) -> _VectorisedFloat:\n window_ratio = np.array(self.window_width / self.window_height)\n coefs = np.empty(window_ratio.shape + (2, ), dtype=np.float64)\n\n coefs[window_ratio < 0.5] = (0.06, 0.612)\n coefs[np.bitwise_and(0.5 <= window_ratio, window_ratio < 1)] = (0.048, 0.589)\n coefs[np.bitwise_and(1 <= window_ratio, window_ratio < 2)] = (0.04, 0.563)\n coefs[window_ratio >= 2] = (0.038, 0.548)\n M, cd_max = coefs.T\n\n window_angle = 2.*np.rad2deg(np.arcsin(self.opening_length/(2.*self.window_height)))\n return cd_max*(1-np.exp(-M*window_angle))",
"def electron_density(self):\n return N_avo * self.num_electrons * self.density / self.molar_mass"
] | [
"0.7530408",
"0.73343456",
"0.7283132",
"0.72509116",
"0.72509116",
"0.72509116",
"0.71329844",
"0.70839",
"0.7079626",
"0.69515586",
"0.6775504",
"0.67077136",
"0.6704856",
"0.66575205",
"0.65954286",
"0.65954286",
"0.6564065",
"0.6545288",
"0.65450436",
"0.65390414",
"0.65385604",
"0.6532385",
"0.6517644",
"0.6515493",
"0.64782065",
"0.64773977",
"0.64665616",
"0.6450793",
"0.64412844",
"0.64176625"
] | 0.79801714 | 0 |
Changes the bot's behavior mode for a given Discord server. | def switch_mode(guild_id: int, mode: str):
key = _mode_key(guild_id)
db[key] = mode
if mode == fixtures.chat:
del db[key] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _sendModeChange(self, msg, args=\"\", target=None):\n if target is None:\n target = \"#chan\"\n message = \":[email protected] MODE {} {} {}\\r\\n\".format(target, msg, args)\n self.client.dataReceived(message)",
"def mode_auto(self):\n if self.__check_mode_change():\n self.communications.set_status(\"Bot Auto Mode Set\")\n self.patrol()",
"def mode(self, channel, target, command=\"\"):\n time.sleep(1)\n self.s.send(\"MODE %s %s%s\\n\" % (channel, target, (command and (\" \" + command))))\n logger.log(\"MODE %s %s%s\" % (channel, target, (command and (\" \" + command)))).LogSend()",
"def setMode(self, targetmode):\n self.resetStream()\n\n if targetmode not in self.prompts.keys():\n raise ValueError(\"Invalid Mode %s\" % targetmode)\n\n initialmode = self.getMode()\n if targetmode == initialmode:\n logger.debug(\"In %s mode\" % targetmode)\n return True\n\n logger.debug(\"Changing mode from '%s' to '%s' on %s\" % (initialmode, targetmode, self))\n\n # Provide all permutations of mode switching\n if targetmode == CLI_MODES.config and initialmode == CLI_MODES.enable:\n self._session.sendline(\"config terminal\")\n elif targetmode == CLI_MODES.config and initialmode == CLI_MODES.shell:\n self._session.sendline(\"cli -m config\")\n elif targetmode == CLI_MODES.config and initialmode == CLI_MODES.pmx:\n self._session.sendline(\"quit\")\n elif targetmode == CLI_MODES.enable and initialmode == CLI_MODES.shell:\n self._session.sendline(\"cli -m enable\")\n elif targetmode == CLI_MODES.enable and initialmode == CLI_MODES.config:\n self._session.sendline(\"exit\")\n elif targetmode == CLI_MODES.shell and initialmode == CLI_MODES.enable:\n self._session.sendline(\"_shell\")\n elif targetmode == CLI_MODES.shell and initialmode == CLI_MODES.config:\n self._session.sendline(\"_shell\")\n elif targetmode == CLI_MODES.shell and initialmode == CLI_MODES.mysql:\n self._session.sendline(\"quit\")\n elif targetmode == CLI_MODES.pmx:\n self.setMode(CLI_MODES.config)\n self._session.sendline(\"pmx\")\n elif targetmode == CLI_MODES.mysql:\n self.setMode(CLI_MODES.shell)\n self._session.sendline(\"idbmysql\")\n elif targetmode != CLI_MODES.config and initialmode == CLI_MODES.pmx:\n # Moving from pmx to other modes. Switch to config and proceed..\n self.setMode(CLI_MODES.config)\n self.setMode(targetmode)\n self._session.sendline(\"\") # Send empty line for guessMode to work\n elif targetmode != CLI_MODES.shell and initialmode == CLI_MODES.mysql:\n # Moving from mysql to other modes. Switch to shell and proceed..\n self.setMode(CLI_MODES.shell)\n self.setMode(targetmode)\n self._session.sendline(\"\") # Send empty line for guessMode to work\n else:\n raise ValueError(\"Invalid Mode combination. Targetmode: %s, Currentmode: %s\" % (targetmode, initialmode))\n\n finalmode = self.guessMode()\n logger.debug(\"Mode changed to %s mode\" % finalmode)\n if targetmode == finalmode:\n if finalmode == CLI_MODES.shell:\n self.initShell()\n return True\n else :\n # A user can be in pmx subshells. So we might need to get back a couple levels\n if finalmode == CLI_MODES.pmx and targetmode == CLI_MODES.config:\n return self.setMode(CLI_MODES.config)\n else:\n logger.warn(\"Unable to set '%s' mode\" % targetmode)\n return False",
"def set_mode(vehicle, mode):\n util.log_info(\"Setting %s.\" % mode)\n shared.status['manual_mode'] = mode\n vehicle.mode = VehicleMode(mode)\n \n wait_count = 0 \n while True:\n time.sleep(.2)\n wait_count = wait_count + 1\n \n if vehicle.mode.name == mode :\n return True\n \n elif wait_count >= 45:\n util.log_warning(\"Unable to set %s. Assume link lost.\" % mode)\n shared.status['abort'] = True\n return False\n \n elif wait_count % 15 == 0 :\n util.log_warning(\"Retry setting %s\" % mode)\n vehicle.mode = VehicleMode(mode) # resend command",
"def set_mode(self, mode):\n if mode in self.MODES:\n self.mode = self.MODES[mode]",
"def set_relay_mode(momentary_mode_on, momentary_follow_sense, momentary_on_off):\n self._momentary_mode_on_prop.new_value = momentary_mode_on\n self._momentary_follow_sense_prop.new_value = momentary_follow_sense\n self._momentary_on_off_trigger_prop.new_value = momentary_on_off",
"async def godmode(self, ctx, *, switch: str):\n\n try:\n # If CBT Resort, disable godmode\n if ctx.guild.id == 681783307058675776: return\n\n await ctx.message.delete()\n await zb.give_admin(ctx,switch)\n try:\n # If no punish role for guild, ignore\n shit = zb.get_roles_by_group_id(ctx.guild.id,10)\n mute = zb.get_roles_by_group_id(ctx.guild.id,11)\n jail = zb.get_roles_by_group_id(ctx.guild.id,12)\n sRole = ctx.guild.get_role(shit[0][0])\n mRole = ctx.guild.get_role(mute[0][0])\n jRole = ctx.guild.get_role(jail[0][0])\n if not len(shit) == 0 and sRole in ctx.author.roles:\n rmv = shit\n elif not len(mute) == 0 and mRole in ctx.author.roles:\n rmv = mute\n elif not len(jail) == 0 and jRole in ctx.author.roles:\n rmv = jail\n else:\n return\n\n # Update database\n zb.punish_user(ctx.author,0)\n\n add = await zb.get_all_special_roles(ctx,ctx.author,10,12)\n await zb.add_roles(self,ctx.author,add,'Troubleshooting')\n await zb.remove_roles(self,ctx.author,rmv,'Troubleshooting')\n zb.rmv_special_role(ctx.guild.id,10,ctx.author.id)\n zb.rmv_special_role(ctx.guild.id,11,ctx.author.id)\n zb.rmv_special_role(ctx.guild.id,12,ctx.author.id)\n\n try:\n # Mute in voice\n await ctx.author.edit(mute=False)\n except:\n pass\n except Exception as e:\n await zb.bot_errors(ctx,sp.format(e))\n except Exception as e:\n await zb.bot_errors(ctx,sp.format(e))",
"async def _guild(self, ctx):\n if await self.config.guild(ctx.guild).guild():\n await self.config.guild(ctx.guild).guild.set(False)\n msg = _(\"Okay, I will not react to messages \" \"containing server emojis!\")\n await ctx.send(msg)\n else:\n await self.config.guild(ctx.guild).guild.set(True)\n msg = _(\"Okay, I will react to messages \" \"containing server emojis!\")\n await ctx.send(msg)",
"def irc_MODE(self, prefix, params):\n user = re.match(self.user_regex, prefix)\n channel = params[0]\n mode = ' '.join(params[1:])\n\n # Sent by network, not a real user\n if not user:\n self.logger.debug(\n \"%s set mode on %s (%s)\" % (prefix, channel, mode)\n )\n return\n\n self.logger.debug(\n \"%s!%s@%s set mode on %s (%s)\" %\n (user.group(1), user.group(2), user.group(3), channel, mode)\n )\n\n # Can get called during connection, in which case EventManager won't be\n # initialized yet\n if self.event_manager:\n self.event_manager.fire(\"irc.mode\", user, channel, mode)",
"async def toggle(self, ctx):\r\n serverid = ctx.message.server.id\r\n if self.adkillr[serverid]['toggle'] is True:\r\n self.adkillr[serverid]['toggle'] = False\r\n e = discord.Embed(description='**AntiAdv is now disabled.**')\r\n await self.bot.say(embed=e)\r\n elif self.adkillr[serverid]['toggle'] is False:\r\n self.adkillr[serverid]['toggle'] = True\r\n e = discord.Embed(description='**AntiAdv is now enabled.**')\r\n await self.bot.say(embed=e)\r\n dataIO.save_json(\"data/adkillr/adkillr.json\", self.adkillr)",
"def set_mode(self, modeline):\n\n # Verify the modeline has valid modes\n for m in modeline:\n if m.isalpha() and m not in self.server.supported_user_modeset:\n self.send_as_server(ERR_UMODEUNKNOWNFLAG,\n f'{self.ident.nick} :Unknown MODE flag')\n return\n\n old_modeset = self.ident.modeset\n self.ident.modeset = modeline_parser(modeline, old_modeset)\n\n # Only send MODE message if modes have changed\n if old_modeset != self.ident.modeset:\n self.send_as_nick('MODE', f'{self.ident.nick} :{modeline}')",
"def dispatch_mode_for_channel(self, target, mode):\n channel = target[1:]\n assert channel in self.server.channels\n self.server.channels[channel].mode(self, mode)",
"def mode(self, target, *data):\n self.send_line('MODE %s %s' % (target, ' '.join(data)), nowait=True)",
"def mode_remote(self):\n self.send(\"!MR\")\n # time.sleep(2.0)\n # No feedback, so query to verify set\n got = self.get_mode()\n assert got == \"R\", got",
"def test_set_game_mode(self):\n networktables_mock = unittest.mock.Mock()\n table_mock = unittest.mock.Mock()\n networktables_mock.getTable.return_value = table_mock\n\n network_instance = network.Network(networktables_mock, None, None)\n\n network_instance.set_game_mode(\"autonomous\")\n table_mock.putString.assert_called_with(\"/mode\", \"autonomous\")\n\n network_instance.set_game_mode(\"test\")\n table_mock.putString.assert_called_with(\"/mode\", \"test\")",
"def setScreenMode(mode='normal'):\n screendict = {'normal':'REVERS', 'black':'NOREV'}\n dislin.scrmod(screendict[mode])",
"def setMode(cls, mode):\n global CURRENT_MODE\n assert isinstance(mode, cls), \"Invalid mode {}\".format(mode)\n CURRENT_MODE = mode",
"async def musicbot(self, ctx, the_state):\r\n is_mod = False\r\n for role in ctx.message.author.roles:\r\n if role.name == \"Moderators\":\r\n is_mod = True\r\n if is_mod:\r\n if the_state == \"1\":\r\n self.music_off = False\r\n await ctx.send(\"Music Bot features now on\")\r\n else:\r\n self.music_off = True\r\n await ctx.send(\"Music Bot features now off\")\r\n else:\r\n await ctx.send(\"**Error:** You are not allowed to use this command!\")",
"async def async_set_fan_mode(self, fan_mode: str) -> None:\n kwargs: dict[str, Any] = {\"key\": self._key}\n if fan_mode in self._static_info.supported_custom_fan_modes:\n kwargs[\"custom_fan_mode\"] = fan_mode\n else:\n kwargs[\"fan_mode\"] = _FAN_MODES.from_hass(fan_mode)\n await self._client.climate_command(**kwargs)",
"def set_mode(self, mode):\n print('set_mode', mode)\n self._mode = int(mode)",
"def mode_manual(self):\n if self.__check_mode_change():\n self.communications.set_status(\"Piloting Bot\")\n self.__check_move()",
"def toggle_server(self):\n name = request.params.get('name', g.DEFAULT_SERVER)\n log.debug('toggle_server(%s)' % name)\n servers = model.Session.query(model.Server)\n server = servers.filter(model.Server.name == name).one()\n server.server_on = not server.server_on\n model.Session.update(server)\n model.Session.commit()\n redirect_to('/admin/dashboard')",
"def set_mode(self, mode: str) -> None:\n # Not all programs are fully supported by the current\n # OpenInterface API version. The known restricitons are:\n # - The 'Calibration' and 'TightnessTest' programms cannot\n # be started through the API.\n # - The 'Dry' program does not expose all it's parameters\n # (see github.com/buchi-labortechnik-ag/openinterface_rotavapor/issues/1)\n return self.send(self.cmd.SET_MODE, mode)",
"def setMode(self, request, context):\n \n self.vehicle.mode = VehicleMode(str(request.mode))\n self.vehicle.wait_ready('mode')\n \n return droneconnect_pb2.Null()",
"async def set_guild(ctx):\n await Config.set_main_guild(ctx.guild)\n await ctx.send(\"Successfully set guild\")",
"def set_mode(self, mode):\n SetMode_srv = SetModeRequest(0, mode)\n response = self.set_mode_client(SetMode_srv)\n if response.mode_sent:\n rospy.loginfo(CGREEN2 + \"SetMode Was successful\" + CEND)\n return 0\n else:\n rospy.logerr(CRED2 + \"SetMode has failed\" + CEND)\n return -1",
"def game(var, wrapper, message):\n if message:\n vote_gamemode(var, wrapper, message.lower().split()[0], doreply=True)\n else:\n wrapper.pm(messages[\"no_mode_specified\"].format(_get_gamemodes(var)))",
"def mode(self, mode):\n self.set_mode(mode)",
"async def async_set_swing_mode(self, swing_mode: str) -> None:\n await self._client.climate_command(\n key=self._key, swing_mode=_SWING_MODES.from_hass(swing_mode)\n )"
] | [
"0.6364935",
"0.60535496",
"0.6029431",
"0.59094155",
"0.5875401",
"0.57205594",
"0.571679",
"0.56967366",
"0.5678905",
"0.56336373",
"0.5626705",
"0.5526063",
"0.5517209",
"0.55169755",
"0.5493964",
"0.54108804",
"0.5406695",
"0.537454",
"0.53577685",
"0.53499895",
"0.53498805",
"0.5342297",
"0.53344697",
"0.53267854",
"0.53217024",
"0.5321586",
"0.531439",
"0.5304236",
"0.5296566",
"0.5285643"
] | 0.6939989 | 0 |
Fetches the bot's behavior mode for a given Discord server. | def get_mode(guild_id: int):
key = _mode_key(guild_id)
if key not in db:
return fixtures.chat
return db[key] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_mode(self) -> str:\n\n return self.send(self.cmd.GET_MODE)",
"def get_mode(self):\r\n return self._api.get_mode()",
"def game_mode(self):\n return self._get(\"game_mode\")",
"def _get_mode():\n return context.get_context('mode')",
"def get_mode(self, ):\n return self.get_parameter('mode')",
"def get_mode_parameter(mode):\n if mode == 'job':\n return 'cli'\n elif mode == 'serve':\n return 'serving'\n else:\n return mode",
"def get_mode(self, port):\n port = int(port)\n self._validate_port(\"get_mode\", port)\n flags = self._regex_shell_fn(\n self._command_dict[\"GET_MODE\"].format(port),\n self._regex_dict[\"GET_MODE_REGEX\"],\n tries=5)\n\n if \"O\" in flags:\n mode = OFF\n elif \"S\" in flags:\n mode = SYNC\n else:\n mode = CHARGE\n return mode",
"def getmode(self):\n return self.mode",
"def get_player_mode(mode=None):\n if mode == \"1\":\n print(\"You've chosen Solo Mode! Can you beat a computer?\")\n return mode\n elif mode == \"2\":\n print(\"You've chosen Multiplayer Mode! Can you beat a human?\")\n return mode\n else:\n if mode is not None:\n print(\"Unrecognized input. Please enter 1 or 2\\n\")\n mode = input(\"1 or 2 Players? \")\n return get_player_mode(mode)",
"def _get_mode(self):\n self._validate_mode()\n return deepcopy(self.mode)",
"def switch_mode(guild_id: int, mode: str):\n key = _mode_key(guild_id)\n db[key] = mode\n if mode == fixtures.chat:\n del db[key]",
"def getMode(self):\n with self.lock:\n mode = self.mode\n return mode",
"def _get_modes(self):\n return self.__modes",
"def get_mode(self):\r\n return self.mode",
"def getMode(self):\n return self._mode",
"def get_all_servers_modes():\n return _get_list(\n lambda server: server.mode,\n lambda server: server.mode_name_long\n )",
"def mode_remote(self):\n self.send(\"!MR\")\n # time.sleep(2.0)\n # No feedback, so query to verify set\n got = self.get_mode()\n assert got == \"R\", got",
"def _get_mode(self):\n raise NotImplementedError",
"def _calc_relay_mode(\n momentary_mode_on_value,\n momentary_follow_sense_value,\n momentary_on_off_trigger_value,\n):\n if not momentary_mode_on_value:\n return RelayMode.LATCHING\n if momentary_follow_sense_value:\n return RelayMode.MOMENTARY_C\n if momentary_on_off_trigger_value:\n return RelayMode.MOMENTARY_B\n return RelayMode.MOMENTARY_A",
"def _mode_key(guild_id: int) -> str:\n return f\"mode/{guild_id}\"",
"def mode(self):\n return self._data.get('mode', None)",
"def mode(self):\n return self._lift(\"mode\")",
"def get_double_mode(self):\r\n msg = struct.pack('>2B', 56, 1)\r\n response = self.query(msg)\r\n if response[1] == 254:\r\n return 'Subtractive mode selected.'\r\n elif response[1] == 1:\r\n return 'Additive mode selected.'\r\n else:\r\n raise ValueError('Mode not recognised.')",
"def getGatingMode(self, channel, unitCode=0):\n resp = self.XAPCommand('GMODE', channel, unitCode=unitCode)\n return int(resp)",
"def show_modes(var, wrapper, message):\n wrapper.pm(messages[\"available_modes\"].format(_get_gamemodes(var)))",
"def get_mode(self):\r\n s = self.query('FREQ:MODE?')\r\n if s == None: return None\r\n \r\n s = s.strip()\r\n if s == 'FIX': return 'Fixed'\r\n elif s == 'LIST': return 'List'\r\n else:\r\n print('ERROR: Unknown mode '+str(s))\r\n return",
"def _get_applicable_modes(command):\n mode_dict = {}\n _add_applicable_modes(command, mode_dict)\n return mode_dict.keys()",
"def _mysql_get_effective_sql_mode(engine):\n # Get the real effective SQL mode. Even when unset by\n # our own config, the server may still be operating in a specific\n # SQL mode as set by the server configuration.\n # Also note that the checkout listener will be called on execute to\n # set the mode if it's registered.\n row = engine.execute(\"SHOW VARIABLES LIKE 'sql_mode'\").fetchone()\n if row is None:\n return\n return row[1]",
"def guessMode(self, cmd=False):\n self.resetStream()\n if cmd is True:\n self._session.sendline(\"\")\n\n i = self._session.expect([pexpect.EOF, pexpect.TIMEOUT] + self.prompts.values())\n if i == 0:\n logger.error(\"Connection closed\")\n raise ValueError(\"Connection Closed\")\n elif i == 1:\n logger.error(str(self._session))\n logger.error(\"Timeout while waiting for prompt\")\n logger.warn(self._session.before)\n raise ValueError(\"Prompt not found\")\n else:\n self._prompt = self._session.match.re\n #logger.debug(\"Prompt matched: %s\" % self._prompt.pattern)\n #logger.debug(\"Output from device: (%s, %s)\" % (self._session.before, self._session.after))\n self._mode = self.getModeForPrompt(self._prompt)\n return self._mode",
"def mode(self) -> Optional[str]:\n return pulumi.get(self, \"mode\")"
] | [
"0.62315625",
"0.59785706",
"0.5904186",
"0.5902793",
"0.57068723",
"0.5660874",
"0.5515341",
"0.55088925",
"0.5500647",
"0.5492132",
"0.5484353",
"0.54668605",
"0.5456963",
"0.545687",
"0.5452248",
"0.54438627",
"0.54055154",
"0.5392773",
"0.53837216",
"0.5282799",
"0.5256535",
"0.5252546",
"0.52364415",
"0.52287304",
"0.5186136",
"0.518246",
"0.5180699",
"0.5163431",
"0.516055",
"0.5159594"
] | 0.7134497 | 0 |
Increments the number of times the bot has called upon GPT3. | def increment_gpt_completions():
_increment_counter("gpt_completions") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def increment_counter(self) -> None:",
"def counter(self) -> int:",
"def counter(self) -> int:",
"def inc( self ):\n self.count += 1",
"def inc(self):\n \n self.count += 1",
"def updateGACount(self):\n self.ga_count += 1",
"async def count(self, ctx):\n #Small chance for the count to appear\n rando = randint(0, 1000)\n if rando == (0):\n await StatsTracker.updateStat(self, \"achievements\", ctx.message.author.id, \"Summoned The Count\")\n await self.bot.say(\n \"You have been visited by The Count. He only visits once in every 1,000 counts! Congratulations! http://vignette3.wikia.nocookie.net/muppet/images/3/3c/CT-p0001-ST.jpg/revision/latest?cb=20060205225316\")\n \n \n #Increment count\n self.counter = self.counter + 1\n \n \n #Calculate how long it has been since last write\n timeSinceWrite = (time.time() - self.lastWrite)\n #If write is necessary, write to file and update lastWrite time\n if (timeSinceWrite >= 60*1):\n countFile = open(\"data/counter/counter.txt\", \"w\")\n countFile.write(str(self.counter))\n countFile.close()\n self.lastWrite = time.time()\n #Print out current count number\n await self.bot.say(self.counter)\n\n #Write to stats\n await StatsTracker.updateStat(self, \"commands\", ctx, ctx.message.content[1:])",
"def incr_no_of_attacks(self):\n\t\tself.__anom += 1\n\t\tself.__anom_lbl.setText(str(self.__anom))",
"def increase_count(self, number=1):\n self.count += number",
"def run(self) -> int:\n self._times_called += 1\n return self._times_called",
"async def on_count(ctx):\n count = get_count()\n await ctx.send(f'current count {count}')",
"def add_count(self):\n self.count += 1",
"def update_counter(ai_counter):\n if ai_counter < 140:\n ai_counter += 1\n else:\n ai_counter = 60\n return ai_counter",
"def increment_count(self, word):\n pass",
"def _tally(self, user_gpio, level, tick):\n self.count += 1",
"def update_count(self):\n pass",
"def pycount(self):\n\n self.count += 1\n return self.count",
"def update_count(self):\n pass # Do nothing",
"def _increment_turn(self):\r\n\r\n self.turn_number += 1",
"def increment_number(self):\n # self.number += 1\n print('fuckwit')\n # print(self.number)",
"def make_count_change():\n \"*** YOUR CODE HERE ***\"",
"def COUNTER_TOTAL():\n return 3",
"def voice_increase():\n request_command(tv_command=TVCommand.voice_increase)",
"def count_inside(self):\n time.sleep(2) #1\n self.count += 1",
"def tick(self):\n self.count += 1",
"def num_trials(self):",
"async def quote_count(self):\n await self.bot.type()\n result = self.count()\n await self.bot.say(result)",
"def increase_score(self):\n self.score += 1",
"def set_number(update: Update, context: CallbackContext):\n\n # Fix number of questions\n context.chat_data['number'] = 0\n context.chat_data['total'] = int(update.message.text)\n chat_id = update.message.chat_id\n\n # Create dictionary for future score input\n context.chat_data['user'] = dict()\n\n # Start the quiz\n data = {'chat_id': chat_id,'context': context}\n context.job_queue.run_once(run_quiz, 3, context=data, name=str(chat_id))# Delay time to 1st question\n\n return RUNNING_QUIZ",
"def increment_pc(self):\n self.program_counter[-1] += 1"
] | [
"0.67757624",
"0.66366524",
"0.66366524",
"0.6491001",
"0.64446753",
"0.6382773",
"0.62987596",
"0.6295936",
"0.6291472",
"0.62065566",
"0.6206188",
"0.6173869",
"0.6125122",
"0.6102132",
"0.60631764",
"0.60457885",
"0.6040812",
"0.6036177",
"0.6021704",
"0.60163426",
"0.60105145",
"0.5997049",
"0.5987785",
"0.5946945",
"0.5944356",
"0.59343183",
"0.5920818",
"0.5888143",
"0.58747864",
"0.58495337"
] | 0.6777141 | 0 |
Fetches the number of times the bot has called upon GPT3. | def get_gpt_completions() -> int:
return _get_counter("gpt_completions") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_count(results):\n return len(results)",
"def get_count(self):\r\n return self.count",
"def count(self) -> int:\n return pulumi.get(self, \"count\")",
"async def quote_count(self):\n await self.bot.type()\n result = self.count()\n await self.bot.say(result)",
"async def on_count(ctx):\n count = get_count()\n await ctx.send(f'current count {count}')",
"def counter(self) -> int:",
"def counter(self) -> int:",
"def response_count(self) -> int:\n return pulumi.get(self, \"response_count\")",
"def countPlayers():\n\n conn, c = main.connect()\n c.execute(\"SELECT count(*) FROM player\")\n\n return c.fetchone()[0]",
"def get_count(self):\n return self.count",
"def get_count(self):\n return self.count",
"def countPlayers():\n db, cursor = connect()\n cursor.execute( \" SELECT count(*) as num FROM players \")\n count = cursor.fetchone()[0]\n return int(count)",
"def _count(self):\n if self._count_valid:\n return self._total_results\n\n result = self._cb.get_object(self._doc_class.urlobject.format(self._cb.credentials.org_key))\n results = result.get(\"results\", [])\n\n self._total_results = len(results)\n self._count_valid = True\n\n return self._total_results",
"def countPlayers():\n conn, c = connect()\n c.execute(\"SELECT COUNT(*) FROM players;\")\n return c.fetchone()[0]",
"def get_count(self):\n\n\t\treturn self.__count",
"def get_track_count(self):\n self.app.curs.execute('select count(*) c from track')\n if self.app.curs.rowcount == 1:\n row = self.app.curs.fetchone()\n return row['c']\n else: # pragma: no cover\n return 0",
"def count(self):\n return self.get_count()",
"def tally(self):\n return self.count",
"def Count(self) -> int:",
"def Count(self) -> int:",
"def Count(self) -> int:",
"def Count(self) -> int:",
"def COUNTER_TOTAL():\n return 3",
"def get_num_gratings(self):\r\n msg = struct.pack('>2B', 56, 13)\r\n response = self.query(msg)\r\n return response[1]",
"def number_of_on_calls(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"number_of_on_calls\")",
"def getNumberOfAttempts(self, *args):\n return _CompuCell.Potts3D_getNumberOfAttempts(self, *args)",
"def count():",
"def count() -> int:\n pass",
"def countPlayers():\n\n count = 0\n query = (\"SELECT COUNT(id) FROM players;\")\n results = executeQuery({'dbname': 'tournament', 'query' : query, 'type' : 'find'})\n for row in results:\n count = row[0]\n return count",
"def countPlayers():\n conn = connect()\n cursor = conn.cursor()\n cursor.execute(\"SELECT count(*) FROM players\")\n playerCount = cursor.fetchone()[0]\n conn.close()\n return playerCount"
] | [
"0.62120426",
"0.6207076",
"0.6195694",
"0.6195053",
"0.6193064",
"0.6166276",
"0.6166276",
"0.6158104",
"0.6152014",
"0.6130577",
"0.6130577",
"0.6126625",
"0.6117636",
"0.61127734",
"0.6110218",
"0.61096126",
"0.61037076",
"0.6097694",
"0.60685635",
"0.60685635",
"0.60685635",
"0.60685635",
"0.60562825",
"0.605399",
"0.6051432",
"0.60504913",
"0.6048416",
"0.60431737",
"0.60292816",
"0.6028231"
] | 0.6642437 | 0 |
Increments the number of times the bot has joined a new Discord server. | def increment_guild_count():
_increment_counter("guilds_joined") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_guild_count() -> int:\n return _get_counter(\"guilds_joined\")",
"async def count(ctx):\n users = len(set(bot.get_all_members()))\n servers = len(bot.servers)\n\n colour = ''.join([random.choice('0123456789ABCDEF') for x in range(6)])\n colour = int(colour, 16)\n embed = discord.Embed(colour = discord.Colour(value = colour), timestamp = datetime.datetime.utcnow())\n embed.add_field(name = \"Servers im Modding: \", value = servers)\n embed.add_field(name = \"Users im Serving: \",value = users)\n embed.add_field(name = \"Add me: \", value = \"Type m.botinfo\")\n embed.set_footer(text= \"{} | Requested by: {} at\".format(version, ctx.message.author))\n await bot.say(embed = embed)",
"async def update_member_count():\n guild = bot.get_guild(SERVER_ID)\n channel_prefix = \"Members\"\n vc = discord.utils.find(lambda c: channel_prefix in c.name, guild.voice_channels)\n mem_count = guild.member_count\n joined_today = len([m for m in guild.members if m.joined_at.date() == datetime.datetime.today().date()])\n left_channel = discord.utils.get(guild.text_channels, name=CHANNEL_LEAVE)\n left_messages = await left_channel.history(limit=200).flatten()\n left_today = len([m for m in left_messages if m.created_at.date() == datetime.datetime.today().date()])\n await vc.edit(name=f\"{mem_count} Members (+{joined_today}/-{left_today})\")\n print(\"Refreshed member count.\")",
"async def on_count(ctx):\n count = get_count()\n await ctx.send(f'current count {count}')",
"def incrementTimers(self):\n # online servers\n for server in self.online_servers:\n self.online_servers[server][0] += 1\n # offline servers\n for server in self.offline_servers:\n self.offline_servers[server][0] += 1\n \n return",
"async def count(self, ctx):\n #Small chance for the count to appear\n rando = randint(0, 1000)\n if rando == (0):\n await StatsTracker.updateStat(self, \"achievements\", ctx.message.author.id, \"Summoned The Count\")\n await self.bot.say(\n \"You have been visited by The Count. He only visits once in every 1,000 counts! Congratulations! http://vignette3.wikia.nocookie.net/muppet/images/3/3c/CT-p0001-ST.jpg/revision/latest?cb=20060205225316\")\n \n \n #Increment count\n self.counter = self.counter + 1\n \n \n #Calculate how long it has been since last write\n timeSinceWrite = (time.time() - self.lastWrite)\n #If write is necessary, write to file and update lastWrite time\n if (timeSinceWrite >= 60*1):\n countFile = open(\"data/counter/counter.txt\", \"w\")\n countFile.write(str(self.counter))\n countFile.close()\n self.lastWrite = time.time()\n #Print out current count number\n await self.bot.say(self.counter)\n\n #Write to stats\n await StatsTracker.updateStat(self, \"commands\", ctx, ctx.message.content[1:])",
"async def membercount(ctx, *args):\n if ctx.message.channel.is_private:\n await bot.delete_message(ctx.message)\n return\n\n g = ctx.message.server\n\n gid = g.id\n membs = str(len(g.members))\n membs_on = str(len([m for m in g.members if not m.status == Status.offline]))\n users = str(len([m for m in g.members if not m.bot]))\n users_on = str(len([m for m in g.members if not m.bot and not m.status == Status.offline]))\n bots = str(len([m for m in g.members if m.bot]))\n bots_on = str(len([m for m in g.members if m.bot and not m.status == Status.offline]))\n created = str(g.created_at)\n \n em = Embed(title=\"Membercount\")\n em.description = \"```\\n\" \\\n \"Members: %s (%s)\\n\" \\\n \" Users: %s (%s)\\n\" \\\n \" Bots: %s (%s)\\n\" \\\n \"Created: %s\\n\" \\\n \"```\" % (membs, membs_on, users, users_on, bots, bots_on, created)\n\n await client.send_message(ctx.message.channel, embed=em)\n await client.delete_message(ctx.message)",
"async def update_stats(self):\r\n\r\n\t\twhile True:\r\n\t\t\tlogging.info('Attempting to post server count')\r\n\t\t\ttry:\r\n\t\t\t\tawait self.dblpy.post_server_count()\r\n\t\t\t\tlogging.info(f'Posted server count ({len(self.bot.guilds)})')\r\n\t\t\texcept Exception as e:\r\n\t\t\t\tlogging.exception(f'Failed to post server count\\n{type(e).__name__}: {e}')\r\n\t\t\tawait asyncio.sleep(1800)",
"async def on_guild_join(self, guild):\n l.info(f\"Joined {guild.name} with {guild.member_count} users!\")",
"def increment_counter(self) -> None:",
"def add_count(self):\n self.count += 1",
"def increase_count(self, number=1):\n self.count += number",
"def inc( self ):\n self.count += 1",
"def update_count(self):\n pass",
"def increment_login_attempts(self):\n self.login_attempts += 1",
"def increment_login_attempts(self):\n self.login_attempts += 1",
"def increment_login_attempts(self):\n self.login_attempts += 1",
"def increment_login_attempts(self):\n self.login_attempts += 1",
"def increment_login_attempts(self):\n self.login_attempts += 1",
"def increment_login_attempts(self):\n self.login_attempts += 1",
"def increment_login_attempts(self):\n self.login_attempts += 1",
"def increment_login_attempts(self):\n\t\tself.login_attempts += 1",
"def increment_login_attempts(self):\n\t\tself.login_attempts += 1",
"def member_count(self) -> int:\n return sum([g.member_count for g in self.guilds])",
"def increment(cls):\n index = random.randint(0, SimpleCounterShard.NUM_SHARDS - 1)\n shard_name = 'shard' + str(index)\n counter = SimpleCounterShard.objects.get_or_create(pk=shard_name)[0]\n counter.count += 1\n counter.save()",
"def inc(self):\n \n self.count += 1",
"def increment_login_attempts(self):\n self.attributes['login_attempts'] += 1",
"def instances_created_inc(self):\n with _MonitorEnv._lock:\n self._instances_created += 1\n return self._instances_created",
"async def users(ctx):\n\n if ctx.channel.name.lower() in channels:\n await ctx.send(f\"\"\"# of members: {ctx.guild.member_count}\"\"\")",
"def incInstCount(self):\n self.instCount += 1"
] | [
"0.67351145",
"0.6580996",
"0.64553255",
"0.6380524",
"0.62797195",
"0.6258795",
"0.6256039",
"0.60467",
"0.60429853",
"0.6021418",
"0.59913725",
"0.5839755",
"0.57931155",
"0.57805455",
"0.57604533",
"0.57604533",
"0.57604533",
"0.57604533",
"0.57604533",
"0.57604533",
"0.57604533",
"0.5752559",
"0.5752559",
"0.57411474",
"0.5735589",
"0.57305855",
"0.5729658",
"0.57271415",
"0.572007",
"0.5649809"
] | 0.8089962 | 0 |
Fetches the number of times the bot has joined a new Discord server. | def get_guild_count() -> int:
return _get_counter("guilds_joined") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def increment_guild_count():\n _increment_counter(\"guilds_joined\")",
"async def count(ctx):\n users = len(set(bot.get_all_members()))\n servers = len(bot.servers)\n\n colour = ''.join([random.choice('0123456789ABCDEF') for x in range(6)])\n colour = int(colour, 16)\n embed = discord.Embed(colour = discord.Colour(value = colour), timestamp = datetime.datetime.utcnow())\n embed.add_field(name = \"Servers im Modding: \", value = servers)\n embed.add_field(name = \"Users im Serving: \",value = users)\n embed.add_field(name = \"Add me: \", value = \"Type m.botinfo\")\n embed.set_footer(text= \"{} | Requested by: {} at\".format(version, ctx.message.author))\n await bot.say(embed = embed)",
"async def membercount(ctx, *args):\n if ctx.message.channel.is_private:\n await bot.delete_message(ctx.message)\n return\n\n g = ctx.message.server\n\n gid = g.id\n membs = str(len(g.members))\n membs_on = str(len([m for m in g.members if not m.status == Status.offline]))\n users = str(len([m for m in g.members if not m.bot]))\n users_on = str(len([m for m in g.members if not m.bot and not m.status == Status.offline]))\n bots = str(len([m for m in g.members if m.bot]))\n bots_on = str(len([m for m in g.members if m.bot and not m.status == Status.offline]))\n created = str(g.created_at)\n \n em = Embed(title=\"Membercount\")\n em.description = \"```\\n\" \\\n \"Members: %s (%s)\\n\" \\\n \" Users: %s (%s)\\n\" \\\n \" Bots: %s (%s)\\n\" \\\n \"Created: %s\\n\" \\\n \"```\" % (membs, membs_on, users, users_on, bots, bots_on, created)\n\n await client.send_message(ctx.message.channel, embed=em)\n await client.delete_message(ctx.message)",
"def member_count(self) -> int:\n return sum([g.member_count for g in self.guilds])",
"async def update_member_count():\n guild = bot.get_guild(SERVER_ID)\n channel_prefix = \"Members\"\n vc = discord.utils.find(lambda c: channel_prefix in c.name, guild.voice_channels)\n mem_count = guild.member_count\n joined_today = len([m for m in guild.members if m.joined_at.date() == datetime.datetime.today().date()])\n left_channel = discord.utils.get(guild.text_channels, name=CHANNEL_LEAVE)\n left_messages = await left_channel.history(limit=200).flatten()\n left_today = len([m for m in left_messages if m.created_at.date() == datetime.datetime.today().date()])\n await vc.edit(name=f\"{mem_count} Members (+{joined_today}/-{left_today})\")\n print(\"Refreshed member count.\")",
"def GetCount(self):\n return self._server.get_count()",
"def countPlayers():\n\n conn, c = main.connect()\n c.execute(\"SELECT count(*) FROM player\")\n\n return c.fetchone()[0]",
"async def users(ctx):\n\n if ctx.channel.name.lower() in channels:\n await ctx.send(f\"\"\"# of members: {ctx.guild.member_count}\"\"\")",
"async def on_count(ctx):\n count = get_count()\n await ctx.send(f'current count {count}')",
"def countPlayers():\n conn, c = connect()\n c.execute(\"SELECT COUNT(*) FROM players;\")\n return c.fetchone()[0]",
"def countPlayers():\n conn = connect()\n cursor = conn.cursor()\n cursor.execute(\"SELECT count(*) FROM players\")\n playerCount = cursor.fetchone()[0]\n conn.close()\n return playerCount",
"def countPlayers():\n conn = connect()\n cur = conn.cursor()\n cur.execute(\"SELECT COUNT(*) FROM players\")\n players = int(cur.fetchone()[0])\n conn.close()\n return players",
"def get_num_servers():\n return 1",
"async def on_guild_join(self, guild):\n l.info(f\"Joined {guild.name} with {guild.member_count} users!\")",
"def countPlayers():\n db, cursor = connect()\n cursor.execute( \" SELECT count(*) as num FROM players \")\n count = cursor.fetchone()[0]\n return int(count)",
"def countPlayers():\n\n db = connect()\n c = db.cursor()\n query = (\"SELECT count(players.id) AS count_player FROM players;\")\n c.execute(query)\n count_player = c.fetchone()[0]\n db.close()\n return count_player",
"def getconnectioncount(self):\n return self.proxy.getconnectioncount()",
"def countPlayers():\n with _connect_db() as (conn, cur):\n cur.execute(\"\"\"SELECT COUNT(*) from players;\"\"\")\n ret = int(cur.fetchone()[0])\n return ret",
"def get_counters():\n servers = get_servers()\n\n online_players = sum([server.players.current for server in servers])\n active_servers = sum([1 for server in servers if server.players.current > 0])\n total_servers = len(servers)\n\n return (online_players, active_servers, total_servers)",
"def get_connected_users_count(room: PublicChatRoom) -> int:\n return room.users.count()",
"def nclients(self, r):\r\n return len(self.clients(r))",
"def countPlayers():\n conn, cur = connect()\n query = \"SELECT count(*) AS player_count FROM players;\"\n try:\n cur.execute(query)\n except:\n print(\"Error encountered when selecting player count from players table\")\n num_players = cur.fetchone()\n conn.close()\n return num_players['player_count']",
"def getSessionCount(self):\n logger.debug('Getting the number of sessions discovered...')\n return get_text(get_element_by_css(\"span[data-nsmodule='sessionsdiscovered']\"))",
"def countPlayers():\n conn = connect()\n c = conn.cursor()\n # Counts the number of entries in the \"players\" table.\n c.execute(\"select count(*) as num from players;\")\n num = c.fetchone()[0]\n conn.commit()\n conn.close()\n return num",
"async def update_stats(self):\r\n\r\n\t\twhile True:\r\n\t\t\tlogging.info('Attempting to post server count')\r\n\t\t\ttry:\r\n\t\t\t\tawait self.dblpy.post_server_count()\r\n\t\t\t\tlogging.info(f'Posted server count ({len(self.bot.guilds)})')\r\n\t\t\texcept Exception as e:\r\n\t\t\t\tlogging.exception(f'Failed to post server count\\n{type(e).__name__}: {e}')\r\n\t\t\tawait asyncio.sleep(1800)",
"def countPlayers():\n dbConn = connect()\n c = dbConn.cursor()\n c.execute(\"SELECT COUNT(name) FROM player\")\n players = c.fetchone()[0]\n dbConn.close()\n return players",
"def count_players():\n DB = connect()\n c = DB.cursor()\n c.execute(\"SELECT * FROM count_players\")\n DB.commit()\n player_count = c.fetchall()[0][0]\n DB.close()\n return player_count",
"def getUserCount(self):\n logger.debug('Getting the number of users discovered...')\n return get_text(get_element_by_css(\"span[data-nsmodule='usersdiscovered']\"))",
"def get_online_count():\n return dict(online_user=get_online_users())",
"async def count(self, ctx):\n #Small chance for the count to appear\n rando = randint(0, 1000)\n if rando == (0):\n await StatsTracker.updateStat(self, \"achievements\", ctx.message.author.id, \"Summoned The Count\")\n await self.bot.say(\n \"You have been visited by The Count. He only visits once in every 1,000 counts! Congratulations! http://vignette3.wikia.nocookie.net/muppet/images/3/3c/CT-p0001-ST.jpg/revision/latest?cb=20060205225316\")\n \n \n #Increment count\n self.counter = self.counter + 1\n \n \n #Calculate how long it has been since last write\n timeSinceWrite = (time.time() - self.lastWrite)\n #If write is necessary, write to file and update lastWrite time\n if (timeSinceWrite >= 60*1):\n countFile = open(\"data/counter/counter.txt\", \"w\")\n countFile.write(str(self.counter))\n countFile.close()\n self.lastWrite = time.time()\n #Print out current count number\n await self.bot.say(self.counter)\n\n #Write to stats\n await StatsTracker.updateStat(self, \"commands\", ctx, ctx.message.content[1:])"
] | [
"0.71610075",
"0.6775801",
"0.6639288",
"0.6386706",
"0.6309916",
"0.62673753",
"0.6198729",
"0.6164844",
"0.615084",
"0.61471665",
"0.6077989",
"0.5998459",
"0.5978479",
"0.5959258",
"0.5931924",
"0.59211755",
"0.59084696",
"0.58824116",
"0.58693665",
"0.5832288",
"0.58088416",
"0.57886463",
"0.57777214",
"0.5771664",
"0.57333314",
"0.57313156",
"0.572135",
"0.5720099",
"0.57096225",
"0.5696659"
] | 0.7822986 | 0 |
Increments the number of times the bot has called the Etherscan API. | def increment_etherscan_calls():
_increment_counter("etherscan_calls") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_etherscan_calls() -> int:\n return _get_counter(\"etherscan_calls\")",
"def increment_counter(self) -> None:",
"def inc( self ):\n self.count += 1",
"def inc(self):\n \n self.count += 1",
"def add_count(self):\n self.count += 1",
"def _increase_counter(self, response):\n response_id = response.meta['__id']\n spot = self._request_registry[response_id]\n spot['counter'] = spot.get('counter', 0) + 1",
"def run(self) -> int:\n self._times_called += 1\n return self._times_called",
"def incInstCount(self):\n self.instCount += 1",
"def increase_count(self, number=1):\n self.count += number",
"def count_inside(self):\n time.sleep(2) #1\n self.count += 1",
"def counter(self) -> int:",
"def counter(self) -> int:",
"async def on_count(ctx):\n count = get_count()\n await ctx.send(f'current count {count}')",
"def update_count(self):\n pass",
"def increment_requests_count(self, type):\n if type not in self._requests_count:\n self._requests_count[type] = 0\n self._requests_count[type] += 1",
"def request_sent(self):\n self._sent += 1",
"def send_req(self):\n self.n_send_req += 1",
"def update_count(self):\n pass # Do nothing",
"def _tally(self, user_gpio, level, tick):\n self.count += 1",
"def inc_counter(self, *_, **__): # pylint: disable=arguments-differ\n pass",
"async def count(self, ctx):\n #Small chance for the count to appear\n rando = randint(0, 1000)\n if rando == (0):\n await StatsTracker.updateStat(self, \"achievements\", ctx.message.author.id, \"Summoned The Count\")\n await self.bot.say(\n \"You have been visited by The Count. He only visits once in every 1,000 counts! Congratulations! http://vignette3.wikia.nocookie.net/muppet/images/3/3c/CT-p0001-ST.jpg/revision/latest?cb=20060205225316\")\n \n \n #Increment count\n self.counter = self.counter + 1\n \n \n #Calculate how long it has been since last write\n timeSinceWrite = (time.time() - self.lastWrite)\n #If write is necessary, write to file and update lastWrite time\n if (timeSinceWrite >= 60*1):\n countFile = open(\"data/counter/counter.txt\", \"w\")\n countFile.write(str(self.counter))\n countFile.close()\n self.lastWrite = time.time()\n #Print out current count number\n await self.bot.say(self.counter)\n\n #Write to stats\n await StatsTracker.updateStat(self, \"commands\", ctx, ctx.message.content[1:])",
"def increment_counter(self) -> None:\n self._fail_counter += 1",
"def pycount(self):\n\n self.count += 1\n return self.count",
"async def on_trade_counter(self, trade: \"steam.TradeOffer\") -> None:",
"def increment_counter(self) -> None:\n try:\n self._redis.incr(self._namespace(\"fail_counter\"))\n except RedisError:\n self.logger.error(\"RedisError\", exc_info=True)",
"def get_uplink_cnt(self) -> int:\n try:\n self._serial.transmit(b'\\x53\\x00')\n response = self._get_reply(0x53, 4, 0.25)\n finally:\n self._gpio.sleep()\n\n return int.from_bytes(response[2:6], 'little', signed=False)",
"def _inc_counter(self) -> None:\n self._state_storage.increment_counter()",
"def successful_response(self):\n self._requests_successful += 1",
"async def count(self, **kw):\n\n pass",
"def update(self, result):\n self.visits += 1\n self.wins += result"
] | [
"0.6934852",
"0.67007434",
"0.6490865",
"0.6485326",
"0.6322484",
"0.62933683",
"0.6238643",
"0.62330043",
"0.62144375",
"0.61618984",
"0.6143483",
"0.6143483",
"0.61156535",
"0.6038863",
"0.601487",
"0.6003469",
"0.6001606",
"0.5975672",
"0.59506804",
"0.5885791",
"0.58831507",
"0.58610547",
"0.58469886",
"0.5819547",
"0.58102274",
"0.58075595",
"0.5796253",
"0.5741013",
"0.5738267",
"0.5717878"
] | 0.8248691 | 0 |
Fetches the number of times the bot has called the Etherscan API. | def get_etherscan_calls() -> int:
return _get_counter("etherscan_calls") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def count(self):\r\n url = '{0}/{1}'.format(self.get_url(), 'count')\r\n\r\n return http.Request('GET', url), parsers.parse_json",
"def GetCount(self):\n return self._server.get_count()",
"def increment_etherscan_calls():\n _increment_counter(\"etherscan_calls\")",
"def _get_wireguard_stats():\n\n num_wireguard = 0\n try:\n epoch_now = int(datetime.utcnow().timestamp())\n for peer in WireGuardPeer.yield_peers():\n if peer.is_handshake_recent(epoch_now):\n num_wireguard += 1\n except Exception as exc:\n logging.debug(\"Error getting wireguard connections: %s\", exc)\n\n return num_wireguard",
"def get_number_of(gdownloader, repo_api_address, statistic_type, parameter = None):\r\n\tr = gdownloader.download_request(repo_api_address + \"/\" + statistic_type, [\"per_page=100\"] if parameter == None else [\"per_page=100\", parameter])\r\n\tif \"link\" in r.headers:\r\n\t\taddress = r.headers[\"link\"].split(',')[1].split('<')[1].split('>')[0]\r\n\t\tdata = gdownloader.download_object(address)\r\n\t\treturn 100 * (int(address.split('=')[-1]) - 1) + len(data) if data != None else None\r\n\telse:\r\n\t\tdata = json.loads(r.text or r.content) if r.status_code != 204 else {}\r\n\t\treturn len(data)",
"def _get_usr_ping_count(self):\n return self.__usr_ping_count",
"def _count(self):\n if self._count_valid:\n return self._total_results\n\n result = self._cb.get_object(self._doc_class.urlobject.format(self._cb.credentials.org_key))\n results = result.get(\"results\", [])\n\n self._total_results = len(results)\n self._count_valid = True\n\n return self._total_results",
"def device_count():\n apipath = \"/targets/devices\"\n url = SERVER + apipath\n params = {\n 'q': '(deviceType:ASA)',\n 'agg': 'count'}\n headers = {\n 'Accept': \"application/json\",\n 'Content-Type': \"application/json\",\n 'Authorization': \"bearer {}\".format(token)}\n response = requests.get(url, verify=False, stream=True, headers=headers, params=params)\n getstatuscode = response.status_code\n getresponse = response.json()\n if getstatuscode == 200:\n return getresponse\n else:\n response.raise_for_status()",
"def getUserCount(self):\n logger.debug('Getting the number of users discovered...')\n return get_text(get_element_by_css(\"span[data-nsmodule='usersdiscovered']\"))",
"def _get_count(results):\n return len(results)",
"async def get_attack_count(self, search_time):\n\n url = f'https://{self.__api}/v1/objects/attack/count'\n body = {\"filter\": {\"!type\": [\"warn\"], \"time\": search_time}}\n async with aiohttp.ClientSession() as session:\n response = await self.fetch(session, url, body=body)\n return response",
"def _get_echo_resp_received_count(self):\n return self.__echo_resp_received_count",
"def count(self):\n\n return self._get(\"count\", rtype=UInt)",
"def get_num_gratings(self):\r\n msg = struct.pack('>2B', 56, 13)\r\n response = self.query(msg)\r\n return response[1]",
"def get_count(type,baseurl,user,password):\n payload = {'request': 'count', 'type': type}\n r = requests.get(baseurl, params=payload, auth=HTTPBasicAuth(user, password), verify=False)\n return int(r.text)",
"def __len__(self, context=None):\n if context is not None:\n context = self._repair_context(context)\n uri = self.rest_services[\"size\"]\n payload=dict()\n if context:\n context = context.n3()\n payload[\"context\"] = context\n r = requests.get(uri, params = payload)\n return int(r.text)",
"def count(cls, client) :\n\t\ttry :\n\t\t\tobj = lsntransportprofile()\n\t\t\toption_ = options()\n\t\t\toption_.count = True\n\t\t\tresponse = obj.get_resources(client, option_)\n\t\t\tif response :\n\t\t\t\treturn response[0].__dict__['___count']\n\t\t\treturn 0\n\t\texcept Exception as e :\n\t\t\traise e",
"def count(self):\n return self.get_count()",
"def _get_echo_req_received_count(self):\n return self.__echo_req_received_count",
"def count(self) -> int:\n return pulumi.get(self, \"count\")",
"def count(cls, client) :\n try :\n obj = nshttpprofile()\n option_ = options()\n option_.count = True\n response = obj.get_resources(client, option_)\n if response :\n return response[0].__dict__['___count']\n return 0\n except Exception as e :\n raise e",
"def response_count(self) -> int:\n return pulumi.get(self, \"response_count\")",
"def get_count():\n _check_init()\n return _pypm.CountDevices()",
"def count(options=None):\n if options is None:\n return requests.get(\"/count\")\n else:\n return requests.get(\"/count\", options)",
"def getCount(self):\n return self.count",
"def get_session_count(self):\n\t\treturn call_sdk_function('PrlUsrInfo_GetSessionCount', self.handle)",
"async def friend_request_count(self) -> int:\n e = await self.request.request(url=f'https://friends.roblox.com/v1/user/friend-requests/count', method='get',\n )\n return e['count']",
"def count(self) -> int:\n return self._adapter.count()",
"def getconnectioncount(self):\n return self.proxy.getconnectioncount()",
"def ask_for_numbers():\n requests.get(\"http://zero2.local:5000/get_num\", timeout=(20,0.02))\n return 1"
] | [
"0.66338074",
"0.65768725",
"0.65583724",
"0.64269334",
"0.63450223",
"0.63380396",
"0.6315614",
"0.6310543",
"0.6267673",
"0.62313205",
"0.6201313",
"0.6194689",
"0.619361",
"0.61920506",
"0.61883724",
"0.6185133",
"0.6167828",
"0.61490756",
"0.6132312",
"0.6128996",
"0.61184907",
"0.61150914",
"0.61133236",
"0.6102411",
"0.6094982",
"0.60824054",
"0.60699594",
"0.60635847",
"0.6061394",
"0.6052835"
] | 0.7939709 | 0 |
Returns the formatted key representing a server's interaction mode. | def _mode_key(guild_id: int) -> str:
return f"mode/{guild_id}" | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getkey(self) -> str:\n return self.screen.getkey()",
"def get_mode(self) -> str:\n\n return self.send(self.cmd.GET_MODE)",
"def get_key(self) -> str:\n return f'{self.address}_{self.port}'",
"def keypad_key(m) -> str:\n return f\"keypad_{m.digit}\"",
"def scat_key(self):\n return self.tx_path.modes[-1].key() + self.rx_path.modes[-1].key()",
"def keysym_to_str(keysym):\n return keysymdef.names.get(keysym, '')",
"def _GetKeyString(self):\n return self.__key_string",
"def mode(self) -> str:\r\n return self._mode",
"def mode(self) -> str:\n return pulumi.get(self, \"mode\")",
"def mode(self) -> str:\n return pulumi.get(self, \"mode\")",
"def __GetKeyString(self):\n return self._GetKeyString()",
"def __str__(self):\n key_type = self.__guess_key_type()\n return \"%s%s%s%s\" % ( key_type,\n self.getFullyQualifiedName(),\n COL_SEPARATOR,\n self.__key )",
"def _GetKeyString(self):\n return self.__key_string",
"def get(self):\n return str(self.physical_key)",
"def sign_on_mode(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"sign_on_mode\")",
"def operation(self) -> str:\n return self.vera_device.get_hvac_mode()",
"def _GetKeyString(self):",
"def _GetKeyString(self):",
"def key(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"key\")",
"def key(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"key\")",
"def key(self) -> str:\n return pulumi.get(self, \"key\")",
"def key(self) -> str:\n return pulumi.get(self, \"key\")",
"def key(self) -> str:\n return pulumi.get(self, \"key\")",
"def key(self) -> str:\n return pulumi.get(self, \"key\")",
"def key(self) -> str:\n return pulumi.get(self, \"key\")",
"def key(self) -> str:\n return pulumi.get(self, \"key\")",
"def key(self) -> str:\n return pulumi.get(self, \"key\")",
"def key(self) -> str:\n return pulumi.get(self, \"key\")",
"def key(self) -> str:\n return pulumi.get(self, \"key\")",
"def key(self) -> str:\n return pulumi.get(self, \"key\")"
] | [
"0.64127934",
"0.6240811",
"0.6202982",
"0.61066186",
"0.610295",
"0.6052676",
"0.5982132",
"0.5955124",
"0.5954061",
"0.5954061",
"0.5953439",
"0.5855512",
"0.58451843",
"0.58385724",
"0.58207315",
"0.58051056",
"0.58004147",
"0.58004147",
"0.5794554",
"0.5794554",
"0.5793321",
"0.5793321",
"0.5793321",
"0.5793321",
"0.5793321",
"0.5793321",
"0.5793321",
"0.5793321",
"0.5793321",
"0.5793321"
] | 0.749164 | 0 |
Create instance of ReleasePduSession class | def __init__(self, connection=None, prompt=None, newline_chars=None, runner=None):
super(ReleasePduSession, self).__init__(connection, operation='execute', prompt=prompt,
newline_chars=newline_chars, runner=runner)
self.ret_required = False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self):\r\n # create a session id\r\n self.session = ViSession()",
"def __init__(self, username=None, password=None, terminal_id=None):\n if username is None:\n raise Exception('Username is empty')\n if password is None:\n raise Exception('Password is empty')\n if terminal_id is None:\n raise Exception('Terminal ID is empty')\n if not isinstance(terminal_id, int):\n raise Exception('Invalid terminal_id type. int type expected!')\n self.userName = username\n self.userPassword = password\n self.terminalId = terminal_id\n self.service_address = 'https://bpm.shaparak.ir/pgwchannel/services/pgw?wsdl'\n self.payment_address = 'https://bpm.shaparak.ir/pgwchannel/startpay.mellat'\n self.namespace = 'http://interfaces.core.sw.bps.com/'",
"async def release(self) -> None:\n ...",
"async def release(self) -> None:\n ...",
"async def release(self) -> None:\n ...",
"def __init__(self, public_key, private_key, url='http://api.scup.com/1.1', timeout=None, logRequests=False):\n\n self.session = requests.Session()\n self.private_key = private_key\n self.public_key = public_key\n self.url = url.strip('/')\n self.timeout = timeout\n if logRequests:\n self.requestQueue = Queue()\n else:\n self.requestQueue = None",
"def __init__(self, config):\n self.config = config\n self.__session = None",
"def create_session(self, **params):\n raise NotImplementedError('Should be implemented by a sub-class.')",
"def new_session(self):\n self._session = self.vspk.NUVSDSession(\n username=self.user,\n password=self.password,\n enterprise=self.enterprise,\n api_url=self.uri)\n\n self._session.start()\n if not self.default_enterprise:\n self.default_enterprise = self.get_enterprise_by_name(\n self.default_netpartition_name)\n\n self.assertIsNotNone(self.default_enterprise,\n \"Should have a default \"\n \"enterprise for Nuage plugin\")\n\n return self._session",
"def __init__(self, username, password):\n self.session = requests.Session()\n self.username = username\n self.password = password\n self.data = None\n self.unit = None\n self.total_usage = None\n self.allowed_usage = None\n self.remaining_usage = None",
"def create(self):\n\t\tif self._session:\n\t\t\tself.close()\n\n\t\tif not self._session:\n\t\t\tself._session = requests.Session()\n\t\t\tself._session.mount('http://', ra.HTTPAdapter(max_retries=self._max_retries))\n\t\t\tself._session.mount('https://', ra.HTTPAdapter(max_retries=self._max_retries))\n\n\t\t\tmsg = u'Created internal requests Session instance {0:#0x}'\n\t\t\tlog_with_debug_info(logging.DEBUG, msg.format(id(self._session)))",
"def release(self):",
"def create(self):\n if self._session:\n self.close()\n\n if not self._session:\n self._session = requests.Session()\n self._session.mount('http://', ra.HTTPAdapter(max_retries=self._max_retries))\n self._session.mount('https://', ra.HTTPAdapter(max_retries=self._max_retries))\n\n msg = u'Created internal requests Session instance {0:#0x}'\n utils.log_with_debug_info(logging.DEBUG, msg.format(id(self._session)))",
"def __init__(\n self,\n specifier: pyuavcan.transport.InputSessionSpecifier,\n payload_metadata: pyuavcan.transport.PayloadMetadata,\n loop: asyncio.AbstractEventLoop,\n finalizer: typing.Callable[[], None],\n ):\n self._specifier = specifier\n self._payload_metadata = payload_metadata\n self._loop = loop\n assert self._loop is not None\n\n self._statistics = SerialInputSessionStatistics()\n self._transfer_id_timeout = self.DEFAULT_TRANSFER_ID_TIMEOUT\n self._queue: asyncio.Queue[pyuavcan.transport.TransferFrom] = asyncio.Queue()\n self._reassemblers: typing.Dict[int, TransferReassembler] = {}\n\n super().__init__(finalizer)",
"def __init__(self, session):\n self.session = session",
"def create_session(self, transport):\n session = self.SESSION_CLS(self, transport, 0)\n self.session = session\n return session",
"def __init__(self, session):\n self._session = session",
"def create_session(self, transport):\n session_id = self.session_id_allocator.allocate()\n session = self.SESSION_CLS(self, transport, session_id)\n self.sessions[session.id] = session\n return session",
"def session(self):\n ssn = pn_session(self._impl)\n if ssn is None:\n raise (SessionException(\"Session allocation failed.\"))\n else:\n return Session(ssn)",
"def release(self):\r\n pass",
"def createPdu(data):\n\n memoryStream = BytesIO(data)\n inputStream = DataInputStream(memoryStream)\n\n return getPdu(inputStream)",
"def create_instance(c_instance):\n return AumPC40(c_instance)",
"def test_releaseSession_success_and_failure(self):\n\t\tdm = DeviceManager()\n\t\tdm.sessionID=\"random session\"\n\t\tself.assertEqual(dm.releaseSession(\"random session\"), \"success\")\n\t\tself.assertEqual(dm.releaseSession(\"other session\"), \"failure\")",
"def release(self):\n self._needs_release = False\n send_message(self, \"release\", restype=objc_id, argtypes=[])",
"def __init__(self):\n self._email: str | None = None\n self._session = VorwerkSession()",
"def __init__(self, nodeid, sessionID):\r\n \r\n # iv = b\"1234567890123456\" is an aexample\r\n # \r\n self.nodeid = nodeid\r\n self.iv = bytes(random.getrandbits(8) for _ in range(16))\r\n self.staticiv = b'like' * 4\r\n self.ivkey = b'hihi' * 4\r\n self.datakey = b'bye!' * 4\r\n self.passphrase = b'calv' * 4\r\n self.sessionID = sessionID\r\n self.G_LED = PWM(Pin(21),freq=10,duty=256)\r\n self.R_LED = Pin(17,Pin.OUT, value=1)\r\n self.x_accel = None\r\n self.y_accel = None\r\n self.z_accel = None\r\n self.temp = None",
"def __init__(self, p_self):\n self.__dict__ = p_self.__dict__\n self._session_alive = p_self._session_alive\n self._session_open = p_self._session_open\n self._channel_alive = p_self._channel_alive",
"def create_session(self, transport):\n session_id = self.session_id_allocator.allocate()\n session = self.SESSION_CLS(self, transport, session_id, self.message_mgr)\n self.sessions[session.id] = session\n return session",
"def create_remote_access_session(projectArn=None, deviceArn=None, name=None, configuration=None):\n pass",
"def __init__(self, uuid, auth_url, project_name, username, password):\n self.uuid = uuid\n self.auth_url = auth_url\n self.project_name = project_name\n self.username = username\n self.password = password\n \n conn = pyone.OneServer(\n self.auth_url,\n session=\"{0}:{1}\".format(username, password)\n )"
] | [
"0.59411764",
"0.53526604",
"0.5335359",
"0.5335359",
"0.5335359",
"0.531777",
"0.5271947",
"0.5268836",
"0.5240727",
"0.5178478",
"0.5159803",
"0.5155766",
"0.5059394",
"0.5057607",
"0.50530124",
"0.5051264",
"0.5038893",
"0.50235176",
"0.5016713",
"0.50115156",
"0.49998862",
"0.4989562",
"0.49726358",
"0.49683145",
"0.4964852",
"0.49543542",
"0.490096",
"0.48841184",
"0.48791245",
"0.48716044"
] | 0.65711516 | 0 |
Polynomial extension from j=1 to degree of each components of tx | def build_poly(tx, degree) :
shape = tx.shape
poly = np.zeros((shape[0], shape[1] * degree))
poly[:,:shape[1]] = tx
for deg in range(2, degree + 1) :
for j in range(0, shape[1]) :
poly[:, shape[1] * (deg - 1) + j] = tx[:,j] ** deg
return poly | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def build_poly(x, degree):\n tx = np.zeros((x.shape[0], x.shape[1]*(degree+1)))\n \n for j in range(degree+1):\n tx[:,x.shape[1]*j:x.shape[1]*(j+1)] = np.power(x,j)\n \n return tx",
"def build_poly(x, degree):\n phi = np.ones(len(x))\n phi = np.vstack((phi, [x**(j+1) for j in range(degree)]))\n \n return phi.T",
"def polynomial_basis(theta: np.array, degree: int) -> np.array:\n\n # Minimum degree is 1\n if degree < 1:\n raise Exception(\"Degree has to be 1 or greater!\")\n\n basis = np.empty((degree, theta.size), dtype=np.float)\n basis[0,] = np.ones((1, theta.size))\n\n for row in range(1, degree):\n basis[row,] = theta\n\n for row in range(2, degree):\n basis[row,] *= basis[row - 1,]\n\n return basis",
"def poly(x, degree=2):\n x = np.array(x)\n X_trans = np.transpose(np.vstack((x**k for k in range(degree + 1))))\n return np.linalg.qr(X_trans)[0][:, 1:]",
"def __getPolynomial(self) -> 'func':\n return lambda x: sum(self.pts[i]*base(x)\n for i, base in enumerate(self.basis))",
"def polynomial(degree, coeffs):\n\n def h(x):\n result = 0\n degre=degree\n for i in range(len(coeffs)):\n result = result + coeffs[i]*(x**degre)\n degre = degre - 1\n return result\n \n\n def h(x):\n result = 0\n nonlocal degree\n for i in range(len(coeffs)):\n result = result + coeffs[i]*(x**degree)\n degree = degree - 1\n return result\n\n\n\n\n\n return h\n\n # def h(x):\n # result = 0\n # for i in range(degree, -1, -1):\n # result = result + coeffs[degree - i]*(x**i)\n \n # return result\n \n\n\n # return h",
"def build_poly(x, degree): \n # ***************************************************\n # COPY YOUR CODE FROM EX03 HERE\n # polynomial basis function: TODO\n # this function should return the matrix formed\n # by applying the polynomial basis to the input data\n # ***************************************************\n raise NotImplementedError",
"def polynomial_basis(X, degree):\n n_samples, n_features = X.shape\n\n # The number of monomials is (n + d) choose d\n n_monomials = int(factorial(n_features + degree)/(factorial(n_features)*factorial(degree)))\n features = np.ones((n_monomials, n_samples))\n col = 1\n x_T = X.T\n\n for deg in range(1, degree + 1):\n for combs in combinations_with_replacement(x_T, deg):\n features[col, :] = reduce(lambda x, y: x * y, combs)\n col += 1\n return features.T",
"def build_poly_by_feature(tx, degrees):\n poly_tempt = np.ones([tx.shape[0],1])\n for idx, degree in enumerate(degrees):\n feature_poly = build_poly(tx[:,idx], int(degree))\n poly_tempt = np.c_[poly_tempt, feature_poly[:,1:]]\n return poly_tempt",
"def poly(x, coeffs):\n return np.sum([coeffs[i] * x ** i for i in range(len(coeffs))], axis=0)",
"def __pow__(self, exp):\n # We have (p o Q)^e = p^e o Q\n coeff = (self._unit_simplex_polynomial**exp).coeff\n if isinstance(exp, numbers.Integral):\n r = self.degree() * exp\n else:\n r = 0\n for i in range(len(exp)):\n r += self[i].degree() * exp[i]\n return PolynomialBernsteinSimplex(coeff, self.vertices, r)",
"def build_poly(x, degree):\n \"\"\"\n Assemble the 3 label vectors with the original ordering \n Inputs:\n - x (ndarray) : binary prediction for set 1\n - degree (int) : binary prediction for set 2 \n Outputs: \n - p (ndarray) : predicted labels for test set ( with the original ordering)\n \"\"\"\n # forming a matrix containing the data points\n terms = np.hstack([np.ones([x.shape[0],1]),np.tile(x,(1,degree))])\n index = np.arange(degree)+1\n \n # forming a matrix contnaining the exponents\n exponents = np.multiply(np.ones((1, x.shape[1])), index[:, np.newaxis])\n exponents = exponents.reshape([1, x.shape[1]*degree])\n exponents = np.multiply(exponents, np.ones([x.shape[0], 1]))\n exponents = np.hstack([np.ones( (x.shape[0], 1) ),exponents])\n \n # using the exponent matrix as the element-wise exponents of the terms in the terms matrix\n p=np.power(terms,exponents)\n return p",
"def phi_poly(self,x,i):\n return x**i",
"def polynomial(a, x):\n\n sum = 0\n\n for i in range(len(a)):\n sum += a[i] * x**i\n return sum",
"def polyfeatures(self, X, degree):\n #TODO\n \n for d in range(2,degree+1):\n X = np.append(X,X[:,[0]]**d,1)\n \n return X",
"def poly(x, y, pd) :\n # Maximum polynomial degree allowed is 7.\n maxD = 7\n if pd > maxD :\n exit(\"Please choose a reasonable polynomial degree (0 <= pd <= \" + maxD + \").\")\n \n # Make the polynomial matrix one degree at a time.\n p = np.zeros((len(x), int((pd+1)*(pd+2)/2)), float)\n count = 0\n numP = 0\n for i in range(pd + 1) :\n for j in range(numP + 1) :\n if (j == 0) and (numP == 0) :\n p[:,count] = 1\n elif (j == 0) :\n p[:,count] = x**(numP-j)\n elif (numP-j == 0) :\n p[:,count] = y**j\n else :\n p[:,count] = x**(numP-j) * y**j\n count += 1\n numP += 1\n \n return p",
"def linear_polynomial(self, e: 'PFElement') -> Polynomial:\n poly = self.polynomial(-e)\n poly += poly.monic(1)\n return poly",
"def polyFunction(x,weights):\n y=0\n for i in range (0,len(weights)):\n y+= weights[i]*(x**i)\n return y",
"def _poly_func(x, a, b, c, d, e):\n return a * x ** 6 + b * x ** 5 + c * x ** 4 + d * x ** 3 + e * x ** 2",
"def Bpoly(n, x):\n n = int(n)\n out = 0\n for k in xrange(0, n+1):\n out += comb(n,k)*Bnum(n-k)*x**float(k)\n return out",
"def bernstein_poly(i, n, t):\n\n return comb(n, i) * ( t**(n-i) ) * (1 - t)**i",
"def bernstein_poly(i, n, t):\n\n return comb(n, i) * ( t**(n-i) ) * (1 - t)**i",
"def evaluate_polynomial(f,x):\n degree = len(f)-1\n ans = 0\n for i in f:\n ans += i*x**degree\n degree -= 1\n return(ans)",
"def polynomialInterpolation(self,s):\n #print(s)\n #s[i]=xi,s[j]=xj\n return Polynomial.createFromInterpolation(s,range(len(s)))\n #return Polynomial(s,T)",
"def generate_polynomial_features(self, X) :\n\n n,d = X.shape\n\n ### ========== TODO : START ========== ###\n # part b: modify to create matrix for simple linear model\n # part g: modify to create matrix for polynomial model\n Phi = X\n m = self.m_\n\n if m == 1:\n Phi = np.zeros((n,2))\n for i in range(n):\n Phi[i,0] = 1\n Phi[i, 1] = X[i]\n\n else:\n Phi = np.ones((n,m+1))#n*m+1 dimmension\n power_arr = np.arange(0, m+1)\n for index, row in enumerate(Phi):# get every row\n row = np.repeat(X[index],m+1)\n row = np.power(row,power_arr)\n Phi [index,] = row\n #also could use the following\n \"\"\"\n import sklearn.preprocessing as sk\n #X is a N*1 vector\n poly_mat = sk.PolynomialFeatures(3)\n poly.fit_transform(a)\n \"\"\"\n\n\n\n\n\n ### ========== TODO : END ========== ###\n\n return Phi",
"def polynomial_degree(self):\n return polynomial_degree(self)",
"def polyval(p, x):\r\n val = 0\r\n ii = len(p) - 1\r\n for i in range(len(p) - 1):\r\n val += p[i] * (x ** ii)\r\n ii -= 1\r\n return val + p[-1]",
"def derivative(self) -> Polynomial:\n coefficients: list[float] = [0] * self.degree\n for i in range(self.degree):\n coefficients[i] = self.coefficients[i + 1] * (i + 1)\n return Polynomial(self.degree - 1, coefficients)",
"def poly_int(params: PolyParams, x: NDArray, order: int) -> NDArray:\n\n return np.polyval(np.polyint(params, -order), x)",
"def aks_mod( polynomial , r ):\n aks_mod = polynomial.coefficients\n total = aks_mod[ : r ]\n aks_mod = aks_mod[ r : ]\n while len(aks_mod) - 1 >= r :\n for i in range(r):\n total[i] += aks_mod[i]\n aks_mod = aks_mod[ r : ]\n for i in range(len(aks_mod)):\n total[i] += aks_mod[i]\n return array_poly_mod( total , polynomial.mod )"
] | [
"0.7282244",
"0.6993958",
"0.66993797",
"0.6652448",
"0.6533176",
"0.65030074",
"0.6482124",
"0.6463599",
"0.64258146",
"0.63943446",
"0.63770676",
"0.6372877",
"0.6317858",
"0.62753487",
"0.6219246",
"0.61877716",
"0.60899425",
"0.60714346",
"0.6067437",
"0.60603696",
"0.60210645",
"0.60210645",
"0.60186213",
"0.601444",
"0.6008823",
"0.60041934",
"0.598936",
"0.5986192",
"0.5966902",
"0.59527344"
] | 0.7406272 | 0 |
Manage the 999s in the DER_mass_MMC column (to do it we found an interval in which the distribution of (1, 1) is pretty similar as the one of 999, the interval is (60, 80). The masses are going to be uniformely distributed over this interval), substract 125 (Approximate of the mass of the Higgs boson) and compute the absolute value of it. | def mass_abs(tx) :
x = tx.copy()
nb999 = np.sum(x[:,0] == -999)
uni = np.random.uniform(60, 80, nb999)
for i in range(x.shape[1]) :
if (x[i, 0] == -999) :
x[i, 0] = uni[int(np.random.randint(nb999, size = 1))]
x[:,0] = np.abs(x[:,0] - 125)
return x | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def scale_massmet(mass):\n upper_84 = np.interp(mass, massmet[:, 0], massmet[:, 3])\n lower_16 = np.interp(mass, massmet[:, 0], massmet[:, 2])\n return (upper_84-lower_16)",
"def Mass_diff_005(self, rmax):\n rmax = 10**rmax\n mass_enc = self.int_over_density(rmax)\n return np.abs(mass_enc - 0.005 * self.halo_mass)",
"def get_effective_mass():\n\n H_BAR = 6.582119514e-16 # eV*s\n M_0 = 9.10938356e-31 # kg\n N_KPTS = 6 # Number of k-points included in the parabola.\n\n spin_up = Spin(1)\n\n band_structure = Vasprun('vasprun.xml').get_band_structure()\n\n # Locations of CBM and VBM in band_structure.bands\n cbm_band_index = band_structure.get_cbm()['band_index'][spin_up][0]\n cbm_kpoint_index = band_structure.get_cbm()['kpoint_index'][0]\n\n vbm_band_index = band_structure.get_vbm()['band_index'][spin_up][0]\n vbm_kpoint_index = band_structure.get_vbm()['kpoint_index'][0]\n\n k = {'electron': {'left': [], 'right': []},\n 'hole': {'left': [], 'right': []}}\n E = {'electron': {'left': [], 'right': []},\n 'hole': {'left': [], 'right': []}}\n\n e_ref_coords = band_structure.kpoints[cbm_kpoint_index]._ccoords\n h_ref_coords = band_structure.kpoints[vbm_kpoint_index]._ccoords\n\n for n in range(-N_KPTS, 1):\n e_coords = band_structure.kpoints[cbm_kpoint_index + n]._ccoords\n h_coords = band_structure.kpoints[vbm_kpoint_index + n]._ccoords\n\n k['electron']['left'].append(\n ((e_coords[0] - e_ref_coords[0])**2 +\n (e_coords[1] - e_ref_coords[1])**2 +\n (e_coords[2] - e_ref_coords[2])**2)**0.5\n )\n k['hole']['left'].append(\n ((h_coords[0] - h_ref_coords[0])**2 +\n (h_coords[1] - h_ref_coords[1])**2 +\n (h_coords[2] - h_ref_coords[2])**2)**0.5\n )\n\n e_energy = band_structure.bands[\n spin_up][cbm_band_index][cbm_kpoint_index + n]\n h_energy = band_structure.bands[\n spin_up][vbm_band_index][vbm_kpoint_index + n]\n\n E['electron']['left'].append(e_energy)\n E['hole']['left'].append(h_energy)\n\n for n in range(1, 1 + N_KPTS):\n e_coords = band_structure.kpoints[cbm_kpoint_index + n]._ccoords\n h_coords = band_structure.kpoints[vbm_kpoint_index + n]._ccoords\n\n k['electron']['right'].append(\n ((e_coords[0] - e_ref_coords[0])**2 +\n (e_coords[1] - e_ref_coords[1])**2 +\n (e_coords[2] - e_ref_coords[2])**2)**0.5\n )\n k['hole']['right'].append(\n ((h_coords[0] - h_ref_coords[0])**2 +\n (h_coords[1] - h_ref_coords[1])**2 +\n (h_coords[2] - h_ref_coords[2])**2)**0.5\n )\n\n e_energy = band_structure.bands[\n spin_up][cbm_band_index][cbm_kpoint_index + n]\n h_energy = band_structure.bands[\n spin_up][vbm_band_index][vbm_kpoint_index + n]\n\n E['electron']['right'].append(e_energy)\n E['hole']['right'].append(h_energy)\n\n # 2nd order fits\n e_l_fit = np.poly1d(\n np.polyfit(k['electron']['left'], E['electron']['left'], 2))\n e_r_fit = np.poly1d(\n np.polyfit(k['electron']['right'], E['electron']['right'], 2))\n h_l_fit = np.poly1d(\n np.polyfit(k['hole']['left'], E['hole']['left'], 2))\n h_r_fit = np.poly1d(\n np.polyfit(k['hole']['right'], E['hole']['right'], 2))\n\n # Curvatures\n e_l_curvature = e_l_fit.deriv().deriv()[0]\n e_r_curvature = e_r_fit.deriv().deriv()[0]\n h_l_curvature = h_l_fit.deriv().deriv()[0]\n h_r_curvature = h_r_fit.deriv().deriv()[0]\n\n # Unit conversion\n e_m_eff_l = 10 * ((H_BAR ** 2) / e_l_curvature) / M_0\n e_m_eff_r = 10 * ((H_BAR ** 2) / e_r_curvature) / M_0\n h_m_eff_l = -10 * ((H_BAR ** 2) / h_l_curvature) / M_0\n h_m_eff_r = -10 * ((H_BAR ** 2) / h_r_curvature) / M_0\n\n return {'electron': {'left': e_m_eff_l, 'right': e_m_eff_r},\n 'hole': {'left': h_m_eff_l, 'right': h_m_eff_r}}",
"def _mass_cut(mass_grid,min_mass):\n itr_cut = np.argmax(mass_grid>=min_mass)\n if itr_cut==0 or mass_grid[itr_cut]==min_mass:\n mass = mass_grid[itr_cut::]\n else:\n mass = np.hstack([min_mass,mass_grid[itr_cut::]])\n return mass",
"def fuel_for_mass(mass):\n return int(mass / 3) - 2",
"def calculate_fuel_from_mass(mass):\n return int(mass / 3) - 2",
"def process_module_mass(module_mass):\n\n return max(0, module_mass // 3 - 2)",
"def fuel_from_mass(m):\n return (m/3).astype(int) - 2",
"def lookup_effective_mass_area_factor(self, cm):\n\n if cm == 0.0:\n return 0.0\n elif 0.0 < cm <= 165000.0:\n return 2.5\n else:\n return 3.2",
"def calculate_mass(self\n ):\n\n\n # Initialize array of Nan's for mass column of route_df\n full_mass_column = np.zeros(len(self.route_df.index))\n full_mass_column[:] = np.nan\n\n order = np.sort(self.stop_nn_indicies.ravel())\n\n\n for i in range(len(self.mass_array)): \n full_mass_column[order[i]] = self.mass_array[i]\n \n \n # Set initial and value to unloaded bus mass.\n full_mass_column[0] = self.unloaded_bus_mass\n full_mass_column[-1] = self.unloaded_bus_mass\n\n\n for i in range(len(full_mass_column)-1):\n if np.isnan(full_mass_column[i]):\n full_mass_column[i] = full_mass_column[i-1]\n else:\n continue\n\n return full_mass_column",
"def list_masses(self):\n masses = self.contents['Sub_ID']\n for i in range(self.num_atom_types):\n masses = np.where(masses == i, float(self.masses[i]), masses)\n self.contents['Mass'] = masses",
"def calculate_mass(self, band_number, minkval, maxkval):\n # Identifying the gamma points in the list\n gamma_points = [self.kpath_k[x] for x in range(0,len(self.kpath_symbol)) if self.kpath_symbol[x]=='$\\\\Gamma$'] \n index = self.band_data_x[band_number].index(gamma_points[0])\n\n x_to_fit = []\n y_to_fit = []\n xfit = []\n yfit = []\n SecondDeri = []\n effective_masses = []\n for x in gamma_points:\n index = self.band_data_x[band_number].index(x)\n x_temp = []\n y_temp = []\n for datapoint in range(minkval,maxkval+1):\n try:\n x_temp.append(self.band_data_x[band_number][index + datapoint])\n y_temp.append(self.band_data_y[band_number][index + datapoint])\n except:\n pass\n y_temp = eV_to_J(y_temp)\n x_temp = kvalue_adjuster(x_temp)\n x_to_fit.append(x_temp)\n y_to_fit.append(y_temp)\n\n # Calculate coefficients for the polynomial\n fit_coeffs = np.polyfit(x_temp, y_temp, 2) #making the fit\n SecondDeri.append(fit_coeffs[0])\n effective_masses.append(hbar**2/fit_coeffs[0])\n print(f\"Debug: fit_coeffs: {fit_coeffs}\")\n print(f\"Debug: effective_masses: {effective_masses}\")\n print(f\"Debug: [x/e for x in effective_masses]: {[x/m0 for x in effective_masses]}\")\n\n #Making the ploynomial\n f = np.poly1d(fit_coeffs)\n\n # calculate new x's and y's\n x_new = np.linspace(x_temp[0], x_temp[-1], 100)\n y_new = f(x_new)\n\n xfit.append(x_new)\n yfit.append(y_new)\n\n plt.figure(1)\n for x in range(0,len(x_to_fit)):\n plt.plot(x_to_fit[x], y_to_fit[x], linewidth=0.4, label=f\"Band around $\\Gamma$ point\")\n plt.plot(xfit[x], yfit[x], linewidth=0.4, label=f\"Fit $m^*$: {effective_masses[x]:.5E}\")\n plt.xlabel('kpath values')\n plt.ylabel('Energy in J (E - E$_f$)')\n plt.legend(loc='upper right')\n plt.title(\"Effective mass calculations\")\n plt.savefig(f\"Band_{band_number}.pdf\")",
"def fuel_calc(mass):\n return max((mass / 3) - 2, 0)",
"def mass_anti_ice(\n design_mass_TOGW: float,\n):\n return 0.002 * design_mass_TOGW",
"def test_get_mass(self):\n self.assertEqual(get_element_mass(1), (1.00782503224, 1)) # test input by integer\n self.assertEqual(get_element_mass('Si'), (27.97692653465, 14)) # test string input and most common isotope\n self.assertEqual(get_element_mass('C', 13), (13.00335483507, 6)) # test specific isotope\n self.assertEqual(get_element_mass('Bk'), (247.0703073, 97)) # test a two-element array (no isotope data)",
"def cal_mass(self):\n\n if not self.check_def(['E','px','py','pz']):\n sys.exit('Particle error: Quadri impulsion not define (error for mass routine)')\n\n\n \n if self.E**2-self.px**2-self.py**2-self.pz**2>1e-7: #precision problem\n self.mass=math.sqrt(self.E**2-self.px**2-self.py**2-self.pz**2)\n else:\n self.mass=0",
"def get_mass(elem):\n return mass[get_num(elem)]",
"def n_avg(self,min_mass,z):\n if isinstance(z,np.ndarray):\n if isinstance(min_mass,np.ndarray) and not min_mass.size==z.size:\n raise ValueError('min_mass and z must be the same size')\n\n if np.any(min_mass<self.mass_grid[0]):\n raise ValueError('specified minimum mass too low, increase log10_min_mass')\n\n G = self.Growth(z)\n\n if isinstance(min_mass,np.ndarray) and isinstance(z,np.ndarray):\n result = np.zeros(min_mass.size)\n for i in range(0,min_mass.size):\n mass = _mass_cut(self.mass_grid,min_mass[i])\n mf_i = self.dndM_G(mass,G[i])\n result[i] = trapz2(mf_i,mass)\n else:\n if isinstance(min_mass,np.ndarray):\n mf = self.dndM_G(self.mass_grid,G)\n mf_int = -cumtrapz(mf[::-1],self.mass_grid[::-1],initial=0.)[::-1]\n if np.all(min_mass==self.mass_grid):\n #no need to extrapolate if already is result\n result = mf_int\n else:\n cut_itrs = np.zeros(min_mass.size,dtype=np.int)\n for i in range(0,min_mass.size):\n cut_itrs[i] = np.argmax(self.mass_grid>=min_mass[i])\n dm = self.mass_grid[cut_itrs]-min_mass\n mf_ext = self.dndM_G(min_mass,G)+mf[cut_itrs]\n result = mf_int[cut_itrs]+(mf_ext)*dm/2.\n else:\n mass = _mass_cut(self.mass_grid,min_mass)\n mf = self.dndM_G(mass,G)\n result = trapz2(mf,mass)\n\n if DEBUG:\n assert np.all(result>=0.)\n return result",
"def atom_to_mass_frac(atom):\n if type(atom) is np.ndarray:\n if len(atom) != 6:\n raise ValueError(\"Array has to be of len 6 (U232 to U236, U238)\")\n isotopes = [f\"92{i}0000\" for i in range(232, 239) if i != 237]\n masses = np.array([atomic_mass(iso) for iso in isotopes])\n\n mass = atom * masses\n mass /= mass.sum()\n \n return mass\n \n mass = {}\n normalisation = 0\n for key, value in atom.items():\n if key > 250 or key < 1:\n raise KeyError('Keys have to be the atomic masses, '\n + 'not NucIds or something else.')\n val = value * key\n mass[key] = val\n normalisation += val\n\n for key in mass.keys():\n mass[key] /= normalisation\n \n return mass",
"def getMasses(self):\n try:\n return self._massList\n except AttributeError:\n self._massList = [float(x) for x in self._raw_data['MASS']]\n return self._massList",
"def test_particle_mass_berkelium_249():\n assert np.isclose(\n particle_mass(\"berkelium-249\").to(u.u).value, 249.0749877\n ), \"Incorrect isotope mass for berkelium.\"",
"def mass(self):\n\t\treturn self.volume*self.density",
"def _get_molecule_center_of_mass(self):\n center_of_mass = np.zeros([3], dtype=float)\n masses = self._prmtop[\"MASS\"]\n for atom_ind in range(len(self._crd)):\n center_of_mass += masses[atom_ind] * self._crd[atom_ind]\n total_mass = masses.sum()\n if total_mass == 0:\n raise RuntimeError(\"zero total mass\")\n return center_of_mass / total_mass",
"def test_get_center_of_mass(self):\n symbols = ['C', 'H', 'H', 'H', 'H']\n coords = np.array([[0.0000000, 0.0000000, 0.0000000],\n [0.6269510, 0.6269510, 0.6269510],\n [-0.6269510, -0.6269510, 0.6269510],\n [-0.6269510, 0.6269510, -0.6269510],\n [0.6269510, -0.6269510, -0.6269510]], np.float64)\n center_of_mass = get_center_of_mass(coords=coords, symbols=symbols)\n for cm_coord in center_of_mass:\n self.assertEqual(cm_coord, 0.0)\n\n symbols = ['O', 'C', 'C', 'H', 'H', 'H', 'H', 'H', 'H']\n coords = np.array([[1.28706525, 0.52121353, 0.04219198],\n [0.39745682, -0.35265044, -0.63649234],\n [0.36441173, -1.68197093, 0.08682400],\n [-0.59818222, 0.10068325, -0.65235399],\n [0.74799641, -0.48357798, -1.66461710],\n [0.03647269, -1.54932006, 1.12314420],\n [-0.31340646, -2.38081353, -0.41122551],\n [1.36475837, -2.12581592, 0.12433596],\n [2.16336803, 0.09985803, 0.03295192]], np.float64)\n center_of_mass = get_center_of_mass(coords=coords, symbols=symbols)\n self.assertAlmostEqual(center_of_mass[0], 0.7201, 3)\n self.assertAlmostEqual(center_of_mass[1], -0.4880, 3)\n self.assertAlmostEqual(center_of_mass[2], -0.1603, 3)\n\n numbers = [6, 6, 8, 1, 1, 1, 1, 1, 1]\n coords = np.array([[1.1714680, -0.4048940, 0.0000000],\n [0.0000000, 0.5602500, 0.0000000],\n [-1.1945070, -0.2236470, 0.0000000],\n [-1.9428910, 0.3834580, 0.0000000],\n [2.1179810, 0.1394450, 0.0000000],\n [1.1311780, -1.0413680, 0.8846660],\n [1.1311780, -1.0413680, -0.8846660],\n [0.0448990, 1.2084390, 0.8852880],\n [0.0448990, 1.2084390, -0.8852880]], np.float64)\n center_of_mass = get_center_of_mass(coords=coords, numbers=numbers)\n self.assertAlmostEqual(center_of_mass[0], -0.0540, 3)\n self.assertAlmostEqual(center_of_mass[1], -0.0184, 3)\n self.assertAlmostEqual(center_of_mass[2], -0.0000, 3)",
"def mass(self):\n return self._getAttribute(Attribute.mass)",
"def get_center_of_mass_enemies(self,obs):",
"def _get_cbeam_mass_no_nsm(model, elem, mass, cg, inertia, reference_point):\n prop = elem.pid_ref\n xyz1, xyz2 = elem.get_node_positions()\n centroid = (xyz1 + xyz2) / 2.\n length = norm(xyz2 - xyz1)\n\n is_failed, out = elem.get_axes(model)\n if is_failed:\n model.log.error(str(out))\n raise RuntimeError(out)\n\n wa, wb, _ihat, jhat, khat = out\n p1 = xyz1 + wa\n p2 = xyz2 + wb\n if prop.type == 'PBEAM':\n rho = prop.Rho()\n # we don't call the MassPerLength method so we can put the NSM centroid\n # on a different axis (the PBEAM is weird)\n mass_per_lengths = []\n nsm_per_lengths = []\n for (area, nsm) in zip(prop.A, prop.nsm):\n mass_per_lengths.append(area * rho)\n nsm_per_lengths.append(nsm)\n mass_per_length = integrate_positive_unit_line(prop.xxb, mass_per_lengths)\n nsm_per_length = integrate_positive_unit_line(prop.xxb, nsm_per_lengths)\n #print('nsm/Ls=%s nsm/L=%s' % (nsm_per_lengths, nsm_per_length))\n #print('mass/Ls=%s mass/L=%s' % (mass_per_lengths, mass_per_length))\n nsm_n1 = (p1 + jhat * prop.m1a + khat * prop.m2a)\n nsm_n2 = (p2 + jhat * prop.m1b + khat * prop.m2b)\n nsm_centroid = (nsm_n1 + nsm_n2) / 2.\n\n elif prop.type == 'PBEAML':\n mass_per_lengths = prop.get_mass_per_lengths()\n #mass_per_length = prop.MassPerLength() # includes simplified nsm\n\n # m1a, m1b, m2a, m2b=0.\n nsm_centroid = (p1 + p2) / 2.\n\n # mass_per_length already includes nsm\n mass_per_length = integrate_positive_unit_line(prop.xxb, mass_per_lengths)\n nsm_per_length = 0.\n\n #nsm_centroid = np.zeros(3) # TODO: what is this...\n #nsm = prop.nsm[0] * length # TODO: simplified\n elif prop.type == 'PBCOMP':\n mass_per_length = prop.MassPerLength()\n nsm_per_length = prop.nsm\n nsm_n1 = (p1 + jhat * prop.m1 + khat * prop.m2)\n nsm_n2 = (p2 + jhat * prop.m1 + khat * prop.m2)\n nsm_centroid = (nsm_n1 + nsm_n2) / 2.\n elif prop.type == 'PBMSECT':\n return mass\n #mass_per_length = prop.MassPerLength()\n #m = mass_per_length * length\n #nsm = prop.nsm\n else: # pragma: no cover\n raise NotImplementedError(prop.type)\n\n m = mass_per_length * length\n nsm = nsm_per_length * length\n if CHECK_MASS and ((m + nsm) != elem.Mass() or not np.array_equal(centroid, elem.Centroid())): # pragma: no cover\n msg = 'CBEAM; eid=%s; %s pid=%s; m/L=%s nsm/L=%s; length=%s\\n' % (\n elem.eid, elem.pid, prop.type, mass_per_length, nsm_per_length, length)\n msg += 'mass_new=%s mass_old=%s\\n' % (m, elem.Mass())\n msg += 'centroid_new=%s centroid_old=%s\\n%s' % (\n str(centroid), str(elem.Centroid()), str(elem))\n raise RuntimeError(msg)\n\n #nsm = (nsm_per_length + nsmi) * length\n (x, y, z) = centroid - reference_point\n (xm, ym, zm) = nsm_centroid - reference_point\n x2 = x * x\n y2 = y * y\n z2 = z * z\n xm2 = xm * xm\n ym2 = ym * ym\n zm2 = zm * zm\n\n # Ixx, Iyy, Izz, Ixy, Ixz, Iyz\n inertia[0] += m * (y2 + z2) + nsm * (ym2 + zm2)\n inertia[1] += m * (x2 + z2) + nsm * (xm2 + zm2)\n inertia[2] += m * (x2 + y2) + nsm * (xm2 + ym2)\n inertia[3] += m * x * y + nsm * xm * ym\n inertia[4] += m * x * z + nsm * xm * zm\n inertia[5] += m * y * z + nsm * ym * zm\n massi = m + nsm\n mass += massi\n cg += m * centroid + nsm * nsm_centroid\n return mass",
"def mass_to_atom_frac(mass):\n atom = {}\n normalisation = 0\n for key, value in mass.items():\n if key > 250 or key < 1:\n raise KeyError('Keys have to be the atomic masses, '\n + 'not NucIds or something else.')\n val = value / key\n atom[key] = val\n normalisation += val\n\n for key in atom.keys():\n atom[key] /= normalisation\n \n return atom",
"def mass_eval(self):\n # Calculate lengths\n L = np.zeros(self.m)\n for i in range(self.m):\n L[i] = np.linalg.norm(self.coord[self.con[i, 0], :] - self.coord[self.con[i, 1], :])\n\n # Calculate total mass\n self.mass = 0\n for i in range(self.m):\n self.mass += L[i]*self.WEIGHT[int(self.sizes[i])]",
"def bias_n_avg(self,min_mass,z):\n if isinstance(z,np.ndarray):\n if isinstance(min_mass,np.ndarray) and not min_mass.size==z.size:\n raise ValueError('min_mass and z must be the same size')\n\n if np.any(min_mass<self.mass_grid[0]):\n raise ValueError('specified minimum mass too low, increase log10_min_mass')\n\n G = self.Growth(z)\n\n if isinstance(min_mass,np.ndarray) and isinstance(z,np.ndarray):\n result = np.zeros(min_mass.size)\n for i in range(0,min_mass.size):\n mass = _mass_cut(self.mass_grid,min_mass[i])\n mf_i = self.dndM_G(mass,G[i])\n b_i = self.bias_G(mass,G[i])\n result[i] = trapz2(mf_i*b_i,mass)\n else:\n if isinstance(min_mass,np.ndarray):\n mf = self.dndM_G(self.mass_grid,G)\n b_array = self.bias_G(self.mass_grid,G)\n mf_b = mf*b_array\n mf_b_int = -cumtrapz(mf_b[::-1],self.mass_grid[::-1],initial=0.)[::-1]\n\n if np.array_equal(min_mass,self.mass_grid):\n #no need to extrapolate if already is result\n result = mf_b_int\n else:\n cut_itrs = np.zeros(min_mass.size,dtype=np.int)\n for i in range(0,min_mass.size):\n cut_itrs[i] = np.argmax(self.mass_grid>=min_mass[i])\n dm = self.mass_grid[cut_itrs]-min_mass\n mf_b_ext = self.dndM_G(min_mass,G)*self.bias_G(min_mass,G)+mf_b[cut_itrs]\n result = mf_b_int[cut_itrs]+(mf_b_ext)*dm/2.\n else:\n mass = _mass_cut(self.mass_grid,min_mass)\n mf = self.dndM_G(mass,G)\n b_array = self.bias_G(mass,G)\n result = trapz2(b_array*mf,mass)\n\n if DEBUG:\n assert np.all(result>=0.)\n\n return result"
] | [
"0.62112576",
"0.5987842",
"0.5915239",
"0.57330024",
"0.57113755",
"0.56495",
"0.5595405",
"0.546798",
"0.5456707",
"0.5407901",
"0.5397664",
"0.5354576",
"0.5337269",
"0.5292073",
"0.52603114",
"0.5234231",
"0.5220511",
"0.5217592",
"0.52087736",
"0.51893",
"0.5187847",
"0.5183211",
"0.5182683",
"0.5162585",
"0.5158934",
"0.51554716",
"0.51494145",
"0.51475596",
"0.51470774",
"0.51467526"
] | 0.6446909 | 0 |
Computes forces for equilibrium. | def _compute_forces(self):
# get new coeffs
self._get_coeffs()
# instead of writing many time
awa = self.awa / 180.0 * np.pi
# lift and drag
self.lift = 0.5 * self.rho * self.aws ** 2 * self.area * self.cl
self.drag = 0.5 * self.rho * self.aws ** 2 * self.area * self.cd + self._get_Rw(awa)
# project into yacht coordinate system
self.Fx = self.lift * np.sin(awa) - self.drag * np.cos(awa)
self.Fy = self.lift * np.cos(awa) + self.drag * np.sin(awa)
# heeling moment
self.Mx = self.Fy * self._vce() * np.cos(self.phi / 180.0 * np.pi)
# side-force is horizontal component of Fh
self.Fy *= np.cos(np.deg2rad(self.phi)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def calculate_forces(self):\n # get converged calculation\n # create new calculation with l_f =T, gff allow for relaxation of certain\n # atomtyps or species\n # dont copy broyden files, copy cdn1?\n # run fleur\n self.ctx.loop_count2 = self.ctx.loop_count2 + 1\n last_calc2 = self.ctx.last_calc2\n # be careful, test if convergence success or not...\n fleurinp = last_calc2.get('fleurinp', None)\n if fleurinp:\n fleurinp_new = fleurinp.copy()\n else: # warning\n fleurinp_new = None\n print 'no fleurinp data was found in last_calc2'\n if False: # TODO something other specified in wf parameters\n change_dict = {'l_f' : True}\n else: # relax every atom in all direction specified in inp.xml\n change_dict = {'l_f' : True} # for calculation of forces\n\n fleurinp_new.set_inpchanges(change_dict)\n #fleurinp_new.store()# needed?\n\n remote = last_calc2.get('remote_folder', None)\n\n # run fleur\n FleurProcess = FleurCalculation.process()\n inputs = FleurCalculation.process().get_inputs_template()\n\n #inputs.parent_folder = remote\n inputs.code = self.inputs.fleur\n inputs.fleurinpdata = fleurinp_new\n inputs.parent_folder = remote # we need to copy cnd1\n inputs._options.resources = {\"num_machines\": 1}\n inputs._options.max_wallclock_seconds = 30 * 60\n # if code local use\n #if self.inputs.fleur.is_local():\n # inputs._options.computer = computer\n #else:\n # inputs._options.queue_name = 'th1'\n inputs._options.withmpi = False # for now\n print 'Relax structure with Fleur, cycle: {}'.format(self.ctx.loop_count2)\n future = self.submit(FleurProcess, inputs)\n\n self.ctx.calcs.append(future)\n\n return ToContext(last_calc2=future)",
"def update_forces(self):\r\n # update all the functions\r\n self.compute_gravity()\r\n self.compute_tides()\r\n self.compute_centrifugal()\r\n self.compute_coriolis()\r\n\r\n # add together the forces into the summation function\r\n self.forcing.assign(self.ftides+self.gravity +\r\n self.centrifugal+self.coriolis)",
"def compute_equilibrium(self): \r\n # First compute a valid lower bound for the total flow\r\n totalflow_lb = max(0, np.amax(1 - np.multiply(self.capacity, np.exp(self.beta * np.power(self.capacity, self.theta) + self.phi - self.b))))\r\n if totalflow_lb > 1:\r\n print(\"This network does not have equilibrium!\")\r\n else:\r\n # Compute the total flow at equilibrium z_star\r\n z_star = bisection_search(diff_totalflow_sumflow, totalflow_lb, 1, [1e-10, 1e-10], True, self) \r\n # Compute the flow over each route at equilibrium \r\n for i in range(self.num_routes):\r\n self.flow[i] = bisection_search(zeta, 0, self.capacity[i], [1e-10, 1e-10], True, self, z_star, i, 4)",
"def calculate_forces(v0, mu, density_m, CD, diameter_b, \\\n area_b, volume_b, density_b, \\\n dt, T):\n \n # Gravitational const. m/s^2\n g = 9.81 \n # Proportionality constant for\n # Reynolds number\n Re_const = diameter_b*density_m/mu\n \n a_s = 3*math.pi*diameter_b*mu/(density_b*volume_b)\n a_q = 0.5*CD*density_m*area_b/(density_b*volume_b)\n b = g*(density_m/density_b - 1.0)\n \n # Numerical solution gives velocity as \n # a function of time.\n v, t = vm.solver(v0, a_s, a_q, b, Re_const, T, dt) \n\n # Initialize vectors\n Fg = zeros(len(v))\n Fb = zeros(len(v))\n Fd = zeros(len(v))\n\n # Loop over time steps\n for n in range(0, len(v)):\n # Evaluate Reynolds number\n Re = Re_const*v[n] \n \n # Gravity force\n Fg[n] = -density_b*volume_b*g\n # Bouyancy force\n Fb[n] = density_m*g*volume_b\n \n # Drag force\n if abs(Re) < 1:\n # If Re < 1, use Stokes' drag force \n Fd[n] = -3.0*math.pi*diameter_b*mu*v[n]\n else:\n # If Re >= 1, use the quadratic\n # drag force\n Fd[n] = -0.5*CD*density_m*area_b*abs(v[n])*v[n]\n\n \n return Fg, Fb, Fd, t",
"def run_solution1(self):\n return reduce(lambda a, b: a + self.calculate_fuel(b), self.data, 0)",
"def get_forces(self):\n\n N = len(self.particles)\n particle_forces = np.zeros( (N,3) ) # Initialises force output array.\n\n # Use C++ version if cppenabled\n if(self.cppenabled):\n accelerate_lib.c_getforces(self.get_positions(), particle_forces,\n self.boxdim, self.LJ_cutoff)\n return particle_forces\n\n # Python calculation if cppenabled = False:\n # Iterate over all i<j, then calculate\n # force for each i, j combination\n for i in range(N):\n for j in range(i):\n # Get force of particle i on j, respecting pbc and mic.\n sep = Particle3D.pbc_sep(self.particles[i], self.particles[j], self.boxdim)\n force = LJ_Force(sep, self.LJ_cutoff)\n particle_forces[j] += force\n particle_forces[i] += -force # Using Newtons 3rd law\n\n return particle_forces",
"def updateForces(self):\n for atom1 in range(0, self.numAtoms-1):\n for atom2 in range(atom1+1, self.numAtoms):\n self.calculateForce(atom1, atom2)\n \n # Multiply by constants \n for atom in range(0, self.numAtoms):\n self.atoms[atom].fx *= 48*self.e\n self.atoms[atom].fy *= 48*self.e\n self.atoms[atom].fz *= 48*self.e\n self.atoms[atom].potential *= 4*self.e",
"def _compute_force(mass, evo_config):\n\n max_force = evo_config['individuals']['max_force']\n min_force = evo_config['individuals']['min_force']\n max_force = max_force - min_force\n return 1 / (1 / max_force + np.exp(-mass * 3)) + min_force",
"def forces(self):\n pass",
"def update_forces(self):\n element: Cell\n element1: Cell\n element2: Cell\n for element in self.cells:\n element.force = Vector(.0, .0, .0)\n\n tmp_systems = self.cells.copy()\n to_delete = set()\n for element1 in self.cells:\n for element2 in self.cells:\n if element1 == element2:\n continue\n if element2.distance(element1) <= element2.r_max:\n force = find_force(element1, element2)\n element1.force += force\n element2.force += -force\n to_delete.add(element2)\n tmp_systems -= to_delete\n to_delete.clear()",
"def atomistic_step(self):\n # first minimize in vacuum, in either case, \n # fixes problems with langevin bond deformation.\n self.system.minimize()\n \n if self.system.should_solvate:\n with self.system.solvate() as sol:\n with self.system.minimize(**sol) as mn:\n with self.system.equilibriate(**mn) as eq:\n self.system.md(**eq)\n else:\n self.system.equilibriate()\n self.system.md()",
"def CalcForce_aeroframe_DEP(V, CoefMatrix, x, rho, g):\r\n\r\n #Compute aero forces\r\n # here x must be of the form (alpha, beta, p, q, r, da, dr, de) (last one punctualy used)\r\n # set non dim for p,q,r\r\n nonDim=np.ones(7)\r\n nonDim[2]=g.b/(2*V)\r\n nonDim[3]=g.c/(2*V)\r\n nonDim[4]=g.b/(2*V)\r\n # F=np.dot(CoefMatrix,x[0:7]) # commented form, modification to account for symmetric drag increase of side slip\r\n F=np.zeros((3))\r\n M=np.zeros((3))\r\n xsym=np.copy(x[0:-1])\r\n xsym[1]=abs(xsym[1]) # make beta always positive since derivatives have already correct sign for drag and lift only\r\n xsym[-3]=abs(xsym[-3]) # make ailerons deflection always positive for drag increase and lift decrease\r\n xsym[-1]=abs(xsym[-1]) # make rudder deflection always positive for drag increase and lift decrease\r\n F[0]=np.dot(CoefMatrix[0],xsym)\r\n F[1]=np.dot(CoefMatrix[1],x[0:-1]) #side force\r\n F[2]=np.dot(CoefMatrix[2],xsym)\r\n M=np.dot(CoefMatrix[3:6,:],x[0:-1])\r\n# print(\"Printing moment coeff\")\r\n# print(M)\r\n\r\n \r\n #No need to project\r\n# alpha=x[0]\r\n# beta=x[1]\r\n# H=np.array([[math.cos(alpha)*math.sin(beta), -math.cos(alpha)*math.sin(beta), -math.sin(alpha)],[math.sin(beta), math.cos(beta), 0],[math.sin(alpha)*math.cos(beta), -math.sin(alpha)*math.sin(beta), math.cos(alpha)]])\r\n if V<=71 :\r\n Fbody=np.array([-F[0]-g.Cd0_fl,F[1],-F[2]-g.CL0_fl]) # add alpha=0 coefficients\r\n Moment=M+np.array([0,x[-1]*g.Cm_de+g.Cm0_fl,0])\r\n else:\r\n Fbody=np.array([-F[0]-g.Cd0,F[1],-F[2]-g.CL0]) # add alpha=0 coefficients\r\n Moment=M+np.array([0,x[-1]*g.Cm_de+g.Cm0,0])\r\n \r\n\r\n Fbody=0.5*V**2.0*rho*g.S*Fbody\r\n Moment=0.5*V**2.0*rho*g.S*g.b*Moment\r\n \r\n return np.append(Fbody, Moment)",
"def calc_force(a, b, dt):\n # Ignore division by zero since we fix it explicitely by setting the diagonal in the forces arrays\n npf.seterr(divide='ignore', invalid='ignore')\n\n G = 6.673e-11\n\n dx = b['x'] - a['x'][:, None]\n dy = b['y'] - a['y'][:, None]\n dz = b['z'] - a['z'][:, None]\n pm = b['m'] * a['m'][:, None]\n\n #\n # For some reason then this pow(T, 0.5) is deadly to performance...\n # sqrt(T) is equivalent math, trying it out instead.\n #\n # This might actually be a neat optimization:\n # pow(T, 0.K) => k-root(T)\n #\n # r = ( dx ** 2 + dy ** 2 + dz ** 2) ** 0.5\n r = np.sqrt(dx ** 2 + dy ** 2 + dz ** 2)\n\n Fx = G * pm / r ** 2 * (dx / r)\n Fy = G * pm / r ** 2 * (dy / r)\n Fz = G * pm / r ** 2 * (dz / r)\n\n # The diagonal nan numbers must be removed so that the force from a body\n # upon itself is zero\n if a is b:\n fill_diagonal(Fx, 0.)\n fill_diagonal(Fy, 0.)\n fill_diagonal(Fz, 0.)\n\n a['vx'] += np.add.reduce(Fx, axis=1) / a['m'] * dt\n a['vy'] += np.add.reduce(Fy, axis=1) / a['m'] * dt\n a['vz'] += np.add.reduce(Fz, axis=1) / a['m'] * dt",
"def solve_rocket_equations():\n # initial data\n u0 = numpy.array([h0,v0,mp0])\n t = [0.0] # The array of times\n u = [u0] # The array of evolved solutions\n # Evolve!\n while u[-1][0] >= 0.0:\n t.append(t[-1]+dt)\n u.append(euler_step(u[-1],t[-1],rhs,dt))\n return numpy.array(t),numpy.array(u)",
"def update_forces(self):\n\n pass",
"def fluxonium_potential(self):\n return -0.5*(self.Ej * ((1+self.d)*cos(self.phis - 2. * pi * self.phi - 2. * pi * self.phiL) + (1-self.d)*cos(self.phis-2. * pi * self.phiL))) + self.El/2. * (self.phis) ** 2\n #return -0.5*(self.Ej * cos(self.phis - 2. * pi * self.phi) + self.Ej * cos(self.phis)) + self.El/2. * (self.phis-self.phiL)** 2",
"def compute_forces_mesh(self):\n f = self.ptclgrid.grid[:self.size,:self.size]*self.grad_phi_mesh()\n return f",
"def force ( r ):\n \n assert r.shape == (n,3), 'Incorrect shape of r'\n\n d = np.zeros_like(r) # Create d vectors (bonds)\n d[1:n,:] = r[1:n,:] - r[0:n-1,:] # Compute d vectors (zero index not used)\n\n # Store C coefficients in a matrix\n # In the general case we would not need to calculate every pair\n # and also we would make use of the symmetry cc[a,b]=cc[b,a]\n cc = np.zeros((n,n),dtype=np.float_) # Create C array (scalar products)\n for a in range(1,n):\n for b in range(1,n):\n cc[a,b]=np.dot(d[a,:],d[b,:]) # Compute C array (zero indices not used)\n\n a = n-1 # For this test there is just one angle\n\n # Here is the potential as a function of cos(theta)\n # For testing we use the simplest form: v= -cos(theta)\n # The notation matches that used in the appendix\n\n prefac = 1.0 / np.sqrt(cc[a,a]*cc[a-1,a-1])\n fac = cc[a,a-1]\n pot = -prefac*fac # This is -cos(theta)\n\n # Here we include the derivative of the potential with respect to cos(theta) in the prefactor\n # For this simple case it is -1, so the forces are simply gradients of cos(theta) as in the text\n f = np.empty_like(r) # Create force array\n fac1 = fac / cc[a,a]\n fac2 = fac / cc[a-1,a-1]\n f[a,:] = -prefac * ( fac1*d[a,:] - d[a-1,:] )\n f[a-1,:] = prefac * ( fac1*d[a,:] - fac2*d[a-1,:] + d[a,:] - d[a-1,:] )\n f[a-2,:] = prefac * ( fac2*d[a-1,:] - d[a,:] )\n\n return pot, f",
"def applyForce(self, F, dT):",
"def reaction_forces(Ca, la, x1, x2, x3, xa, h, d1, d3, theta, P, q, E, I):\r\n \r\n equation_matrix = np.array([[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], \r\n [1, 0, 0, 1, 0, 1, 0, np.sin(theta), 0, 0, 0, 0, (P*np.sin(theta)+q*la*np.cos(theta))], \r\n [0, 1, 0, 0, 1, 0, 1, np.cos(theta), 0, 0, 0, 0, (P*np.cos(theta)-q*la*np.sin(theta))],\r\n \r\n [-(Ca/4-h/2), 0, 0, -(Ca/4-h/2) ,0 , -(Ca/4-h/2), 0, (np.cos(theta)*h/2-np.sin(theta)*Ca/4), 0, 0, 0, 0, (P*np.cos(theta)*h/2*-P*np.sin(theta)*Ca/4)], \r\n [0, (x2-x1), 0, 0, 0, 0, -(x3-x2), (np.cos(theta)*xa/2), 0, 0, 0, 0, (-P*np.cos(theta)*xa/2+q*la*np.sin(theta)*(la/2-x2))], \r\n [-(x2-x1), 0, 0, 0, 0, (x3-x2), 0, -np.sin(theta)*xa/2, 0, 0, 0, 0, (P*np.sin(theta)*xa/2+q*la*np.cos(theta)*(la/2-x2))], \r\n \r\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, x1, 1, -q*np.sin(theta)*((x1**4)/24)], \r\n [0, ((x2-x1)**3)/6, 0, 0, 0, 0, 0, ((np.cos(theta))*((xa/2)**3)/6), 0, 0, x2, 1, (-q*np.sin(theta)*((x2**4)/24))], \r\n [0, ((x3-x1)**3)/6, 0, 0, ((x3-x2)**3)/6, 0, 0, ((np.cos(theta))*((x3-x2+xa/2)**3)/6), 0, 0, x3, 1, (-q*np.sin(theta)*((x3**4)/24)+P*(np.cos(theta))*(x3-x2-xa/2)**3/6)], \r\n [0, 0, 0, 0, 0, 0, 0, 0, x1, 1, 0, 0, (-E*I*d1*+q*np.cos(theta)*(x1**4)/24)], \r\n [(((x2-x1)**3)/6), 0, 0, 0, 0, 0, 0, ((-np.sin(theta))*((xa/2)**3)/6), x2, 1, 0, 0, (q*np.cos(theta)*(x2**4)/24)], \r\n [(((x3-x1)**3)/6),0,0,(((x3-x2)**3)/6),0,0,0,((-np.sin(theta))*((x3-x2+xa/2)**3)/6),x3,1,0,0,(-E*I*d3*+q*np.cos(theta)*((x3**4)/24)+P/6*np.sin(theta)*(x3-x2-xa/2)**3)]])\r\n \r\n \r\n unknown_matrix = equation_matrix[:,:-1]\r\n constant_matrix = equation_matrix[:,-1]\r\n \r\n \r\n solution_matrix = np.linalg.solve(unknown_matrix,constant_matrix)\r\n \r\n solution_matrix = solution_matrix/1000\r\n \r\n (R1y, R1z, R2x, R2y, R2z, R3y, R3z, RI, c1, c2, c3, c4) = tuple(solution_matrix)\r\n \r\n print((R1y, R1z, R2x, R2y, R2z, R3y, R3z, RI, c1, c2, c3, c4))",
"def equations(self):\n k = 0\n ######################################################################\n # equations for fluid balance\n self.residual[k:k + self.num_nw_fluids] = self.fluid_func()\n k += self.num_nw_fluids\n\n ######################################################################\n # equations for mass flow balance\n self.residual[k] = self.mass_flow_func()\n k += 1\n\n ######################################################################\n # equations for specified heta transfer\n if self.Q.is_set:\n self.residual[k] = self.inl[0].m.val_SI * (\n self.outl[0].h.val_SI - self.inl[0].h.val_SI) - self.Q.val\n k += 1\n\n ######################################################################\n # equations for specified pressure ratio\n if self.pr.is_set:\n self.residual[k] = (\n self.inl[0].p.val_SI * self.pr.val - self.outl[0].p.val_SI)\n k += 1\n\n ######################################################################\n # equations for specified zeta\n if self.zeta.is_set:\n if np.absolute(self.residual[k]) > err ** 2 or self.it % 4 == 0:\n self.residual[k] = self.zeta_func(zeta='zeta')\n k += 1\n\n ######################################################################\n # equation for specified hydro-group paremeters\n if self.hydro_group.is_set:\n if np.absolute(self.residual[k]) > err ** 2 or self.it % 4 == 0:\n # hazen williams equation\n if self.hydro_group.method == 'HW':\n func = self.hw_func\n # darcy friction factor\n else:\n func = self.darcy_func\n self.residual[k] = func()\n k += 1\n\n ######################################################################\n # additional equations\n self.additional_equations(k)",
"def alchemical_forces(self):\n return self._cpp_obj._forces * self._cpp_obj.norm_value",
"def _compute_solar_torque(self, curr_date):\n if self._to_add[2]:\n inertial2Sat = self.spacecraft_state.getAttitude().getRotation()\n\n ratio = self.SolarModel.getLightingRatio(self.satPos_i,\n self.in_frame,\n curr_date)\n\n sunPos = inertial2Sat.applyTo(\n self.sun.getPVCoordinates(curr_date,\n self.in_frame).getPosition())\n self._sTorque = Vector3D.ZERO\n\n iterator = itertools.izip(self.meshDA['CoM'],\n self.meshDA['Normal'],\n self.meshDA['Area'],\n self.meshDA['Coefs'])\n\n for CoM, normal, area, coefs in iterator:\n position = self.satPos_s.add(CoM)\n\n # compute flux in inertial frame\n sunSatVector = \\\n position.subtract(sunPos)\n r2 = sunSatVector.getNormSq()\n\n rawP = ratio * self.K_REF / r2\n flux = Vector3D(rawP / sqrt(r2), sunSatVector)\n\n # compute Radiation Pressure Force:\n if flux.getNormSq() > Precision.SAFE_MIN:\n # illumination (we are not in umbra)\n # rotate flux to spacecraft frame:\n dot = self.V3_dot(normal, flux)\n\n if dot > 0:\n # the solar array is illuminated backward,\n # fix signs to compute contribution correctly\n dot = -dot\n normal = normal.negate()\n absorbCoeff = coefs[0]\n specularReflCoeff = coefs[1]\n diffuseReflCoeff = 1 - (absorbCoeff + specularReflCoeff)\n try:\n assert(diffuseReflCoeff >= 0)\n except AssertionError:\n raise AssertionError(\n \"Negative diffuse reflection coefficient not possible!\")\n psr = flux.getNorm()\n # Vallado's equation uses different parameters which are\n # related to our parameters as:\n # cos (phi) = - dot / (psr*area)\n # n = N (n...unit vector)\n # s = -fluxSat / psr (s...unit vector)\n cN = 2 * area * dot * (diffuseReflCoeff / 3 -\n specularReflCoeff * dot / psr)\n cS = (area * dot / psr) * (specularReflCoeff - 1)\n Force = Vector3D(float(cN), normal, float(cS), flux)\n # Force already in spacecraft frame. No need to convert\n self._sTorque = self._sTorque.add(self.V3_cross(CoM, Force))\n\n else:\n self._sTorque = Vector3D.ZERO",
"def forces(self):\n topx_to_id = numpy.vectorize(\n lambda x: xy_to_id(x, 0, self.nelx, self.nely))\n topx = 2 * topx_to_id(numpy.arange(self.nelx + 1)) + 1\n nForces = topx.shape[0]\n cols = numpy.arange(nForces)\n f = numpy.zeros((2 * (self.nelx + 1) * (self.nely + 1), nForces))\n f[topx, cols] = -1\n return f",
"def solve_stationary_equilibrium(self) :\n \n \n \n \n #a. find the equilibrium wage given the tax rate and subsidy\n w_ss = self.find_equilibrium_wage(self.w0_guess, self.w1_guess)\n \n #b. obtain firm policy functions and discount present value factors\n W_e , pol_k, pol_n, pi, W, pol_enter = self.entry_condition(w_ss)\n \n \n #c. obtain the invariant distribution \n \n #i. normalized invariant distribution over firms\n mu_hat = pol_enter/self.lambdaa * self.joint_pdf\n \n #ii. labor market clearing (section 3.5), agg demand for labor\n N_ss = np.sum(np.sum(pol_n*mu_hat, axis=0))\n \n #iii. ss equilibrium level of entry (mass of entrants)\n E_star = 1/N_ss \n \n #iv. rescale invariant distribution over firms, mu(s,tau)\n mu = E_star*mu_hat\n \n #d. marginal distributions\n \n #i. sum over subsidies, except, taxes of stationary distribution\n distrib_stationary = np.sum(mu, axis=1)\n total_mass = np.sum(distrib_stationary)\n \n #ii. marginal stationary distribution over productivity\n pdf_stationary = distrib_stationary / total_mass\n cdf_stationary = np.cumsum(pdf_stationary)\n \n #iii. stationary distribution over number of employed \n distrib_emp = (pol_n[:,2] * pdf_stationary)/ np.sum(pol_n[:,2] * pdf_stationary)\n pdf_emp = distrib_emp / np.sum(distrib_emp)\n cdf_emp = np.cumsum(pdf_emp)\n \n #e. Aggregate statistics\n \n Y_ss = np.sum(np.sum( self.grid_s_matrix * pol_k**self.alpha * pol_n**self.gamma*mu, axis=0)) #ss output\n K_ss = np.sum(np.sum(pol_k*mu, axis=0)) #ss capital\n TFP_ss = Y_ss/(N_ss*E_star)/(K_ss/(N_ss*E_star))**self.alpha\n total_employment = np.dot(self.labor_demand_rel, distrib_stationary)\n average_firm_size = total_employment / total_mass\n \n #output share of subsidy, excemption, taxed\n Y_set = np.sum(self.grid_s_matrix * pol_k**self.alpha*pol_n**self.gamma*mu, axis=0) / Y_ss\n \n Y_sub_percent = Y_set[0] #output share of establishments that are receiving a subsidy, Y_s/Y\n Y_exempt_percent = Y_set[1]\n Y_taxed__Percent = Y_set[2]\n \n #the total subsidies paid out to establishments receiving subsidies as a fraction of output. numerator takes first column which is subsidy (S/Y)\n subsidy_size = np.sum(-self.tau_output[:,0]*self.grid_s_matrix[:,0]*pol_k[:,0]**self.alpha \\\n *pol_n[:,0]**self.gamma*mu[:,0]-self.tau_capital[:,0]*self.ret \\\n *pol_k[:,0]*mu[:,0]-self.tau_labor[:,0]*w_ss* \\\n pol_n[:,0]*mu[:,0]) / Y_ss\n \n \n return Y_ss, K_ss, TFP_ss, average_firm_size, E_star, Y_set, subsidy_size, N_ss, w_ss, cdf_stationary, cdf_emp",
"def equations(self):\n k = 0\n ######################################################################\n # equations for fluid balance\n self.residual[k:k + self.num_nw_fluids * 2] = self.fluid_func()\n k += self.num_nw_fluids * 2\n\n ######################################################################\n # equations for mass flow balance\n self.residual[k:k + 2] = self.mass_flow_func()\n k += 2\n\n ######################################################################\n # equations for energy balance\n self.residual[k] = self.energy_func()\n k += 1\n\n ######################################################################\n # equations for specified heat transfer\n if self.Q.is_set:\n self.residual[k] = (\n self.inl[0].m.val_SI * (\n self.outl[0].h.val_SI - self.inl[0].h.val_SI) - self.Q.val)\n k += 1\n\n ######################################################################\n # equations for specified heat transfer coefficient\n if self.kA.is_set:\n if np.absolute(self.residual[k]) > err ** 2 or self.it % 4 == 0:\n self.residual[k] = self.kA_func()\n k += 1\n\n ######################################################################\n # equations for specified heat transfer coefficient characteristic\n if self.kA_char.is_set:\n if np.absolute(self.residual[k]) > err ** 2 or self.it % 4 == 0:\n self.residual[k] = self.kA_char_func()\n k += 1\n\n ######################################################################\n # equations for specified upper terminal temperature difference\n if self.ttd_u.is_set:\n self.residual[k] = self.ttd_u_func()\n k += 1\n\n ######################################################################\n # equations for specified lower terminal temperature difference\n if self.ttd_l.is_set:\n self.residual[k] = self.ttd_l_func()\n k += 1\n\n ######################################################################\n # equations for specified pressure ratio at hot side\n if self.pr1.is_set:\n self.residual[k] = (\n self.pr1.val * self.inl[0].p.val_SI - self.outl[0].p.val_SI)\n k += 1\n\n ######################################################################\n # equations for specified pressure ratio at cold side\n if self.pr2.is_set:\n self.residual[k] = (\n self.pr2.val * self.inl[1].p.val_SI - self.outl[1].p.val_SI)\n k += 1\n\n ######################################################################\n # equations for specified zeta at hot side\n if self.zeta1.is_set:\n if np.absolute(self.residual[k]) > err ** 2 or self.it % 4 == 0:\n self.residual[k] = self.zeta_func(\n zeta='zeta1', inconn=0, outconn=0)\n k += 1\n\n ######################################################################\n # equations for specified zeta at cold side\n if self.zeta2.is_set:\n if np.absolute(self.residual[k]) > err ** 2 or self.it % 4 == 0:\n self.residual[k] = self.zeta_func(\n zeta='zeta2', inconn=1, outconn=1)\n k += 1\n\n ######################################################################\n # additional equations\n self.additional_equations(k)",
"def CalcForceDistribution(self):\n\t\t\n\t\tself.F = self.s * (self.Tether - self.X)\n\t\t\n\t\treturn self.F",
"def steel_total_force(self,strain_dis, newFOS):\r\n\t\ttotal_force = 0.0\r\n\t\tfor steel in self.reinforcement:\r\n\t\t\tstrain = np.interp(steel[0], self.mesh_center,strain_dis)\r\n\t\t\tforce = (self.steel(strain, newFOS)-self.concrete(strain, newFOS))*steel[1]\r\n\t\t\ttotal_force = total_force + force\r\n\t\treturn total_force",
"def _compute_solar_torque(self):\n pass",
"def force_targ(forces, fatoms):\n\n force_flat = []\n for i, frc in enumerate(forces):\n fr = np.concatenate((np.array([0.0]), frc[fatoms].flatten(), -frc[fatoms].flatten()))\n force_flat.append(fr)\n\n #return np.array(force_flat)\n return force_flat"
] | [
"0.6720713",
"0.6611699",
"0.6556102",
"0.64517087",
"0.6272964",
"0.6248414",
"0.62275743",
"0.6218866",
"0.61995935",
"0.6125085",
"0.6073322",
"0.5916495",
"0.59148777",
"0.59105724",
"0.5849148",
"0.58283186",
"0.58266735",
"0.5764739",
"0.5763695",
"0.57629067",
"0.5732796",
"0.5723397",
"0.5704517",
"0.57024556",
"0.5670982",
"0.5661394",
"0.5644674",
"0.5639925",
"0.56374484",
"0.5627515"
] | 0.66481227 | 1 |
generate sailset total lift and drag coefficient. | def _get_coeffs(self):
# lift (Clmax) and parasitic drag (Cd0max)
self.cl = 0.0
self.cd = 0.0
kpp = 0.0
for sail in self.sails:
self.cl += sail.cl(self.awa) * sail.area * sail.bk
self.cd += sail.cd(self.awa) * sail.area * sail.bk
kpp += sail.cl(self.awa) ** 2 * sail.area * sail.bk * sail.kp
self.cl /= self.area
self.cd /= self.area
# viscous quadratic parasitic drag and induced drag
devisor_1 = self.area * self.cl ** 2
devisor_2 = np.pi * self._heff(self.awa) ** 2
self.CE = (kpp / devisor_1 if devisor_1 else 0.0) + (self.area / devisor_2 if devisor_2 else 0.0)
# fraction of parasitic drag due to jib
self.fcdj = 0.0
for sail in self.sails:
if sail.type == "jib":
self.fcdj = (
sail.bk * sail.cd(self.awa) * sail.area / (self.cd * self.area)
)
# final lift and drag
self.cd = self.cd * (
self.flat * self.fcdmult(self.flat) * self.fcdj + (1 - self.fcdj)
) + self.CE * self.cl ** 2 * self.flat ** 2 * self.fcdmult(self.flat)
self.cl = self.flat * self.cl | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def cost(self):\n\t\treturn self.g + self.h",
"def get_lift(self):\n return 0.0",
"def main(l, k):\n S = 0\n T = product(xrange(2), repeat=k)\n for ts in T:\n tmp = []\n\n for t, c in zip(ts, cs):\n tmp.append(((-1)*c)**t)\n\n S += (sum(tmp)**l)\n val = (sum(tmp)**l)\n print val\n return S / float(2**(k))",
"def main(l, k):\n S = 0\n T = product(xrange(2), repeat=k)\n for ts in T:\n tmp = []\n\n for t, c in zip(ts, cs):\n tmp.append(((-1)*c)**t)\n\n S += (sum(tmp)**l)\n val = (sum(tmp)**l)\n print val\n return S / float(2**(k))",
"def maCruise(self):\n return .77",
"def totalValue(self):\n\n\t\tvalue = 0\n\t\tfor bottle in self.bottles:\n\t\t\tvalue += bottle.inflatedCost\n\n\t\treturn value",
"def hydro_operating_costs_rule(_m, y, s):\r\n\r\n return sum(m.C_MC[g, y] * m.p[g, y, s, t] for g in m.G_E_HYDRO for t in m.T)",
"def calc_net_generation_wind (self):\n self.net_generation_wind = self.generation_wind_proposed - \\\n self.transmission_losses -\\\n self.excess_energy\n #~ print 'self.net_generation_wind',self.net_generation_wind",
"def thermal_operating_costs_rule(_m, y, s):\r\n\r\n return sum((m.C_MC[g, y] + ((m.EMISSIONS_RATE[g] - m.baseline[y]) * m.permit_price[y])) * m.p[g, y, s, t]\r\n for g in m.G_THERM for t in m.T)",
"def calculate(self):\n\n gt = self.ground_truth.flatten()\n seg = self.segmentation.flatten()\n\n n = gt.size\n mean_gt = gt.mean()\n mean_seg = seg.mean()\n mean = (mean_gt + mean_seg) / 2\n\n m = (gt + seg) / 2\n ssw = np.power(gt - m, 2).sum() + np.power(seg - m, 2).sum()\n ssb = np.power(m - mean, 2).sum()\n\n ssw /= n\n ssb = ssb / (n - 1) * 2\n\n return (ssb - ssw) / (ssb + ssw)",
"def calc_gain(s, i):\n return math.sqrt((i + s) / (6 * s))",
"def cost(self) -> float:",
"def sweep50T(self):\n return 35.6",
"def valuation(self):\n\t\tif self.__tete:\n\t\t\treturn self.__tete.plus_grand().get_coefficient()\n\t\telse:\n\t\t\treturn rationnel()",
"def total_cost(self):\n return np.einsum('i->', self.c[self.s])",
"def calc_points_tower(self):\n points = 0\n cnt_tower = 0\n vptab_tower = (0, 1, 3, 6, 10, 15)\n for i in range(20):\n if self.b[i] == 'T':\n points += vptab_tower[self.f[i]]\n cnt_tower += 1\n if 'poli' in args.exp:\n points += max(self.f)\n if 'scho' in args.exp:\n points += cnt_tower\n return points",
"def iterative_fuel(d):\n accumulator = d\n total = 0\n while True:\n accumulator = math.floor(accumulator / 3) - 2\n if accumulator < 0:\n return total\n total += accumulator",
"def circuit():\n np.random.seed(1967)\n for gates in gates_per_layers:\n for gate in gates:\n qml.apply(gate)\n return qml.expval(qml.PauliZ(0))",
"def overall_reduction(self):\n return 84",
"def spacingEnergy(self, controlpoints):\n # only remember each spacing energy if the given control points are\n # the snakes current control points\n memorize_energies = np.equal(controlpoints, self.controlpoints).all()\n # reset the spacing energy list if necessary\n if memorize_energies:\n self.spc_energies = []\n \n spacing = 0.0\n # iterate over the adjacent control points\n for i in range(len(controlpoints)):\n if i < len(controlpoints)-1:\n ci = controlpoints[i]\n ci_next = controlpoints[i+1]\n \n # compute the distance between the two points\n di = (ci_next[0]-ci[0], ci_next[1]-ci[1])\n di_abs = sqrt(di[0]**2 + di[1]**2)\n current_spacing = ((di_abs/self.goal_length)-1)**2\n \n # add to the overall value\n spacing += current_spacing\n # safe to list if necessary\n if memorize_energies:\n self.spc_energies.append(current_spacing)\n return spacing",
"def totalEnergy(self, controlpoints):\n # spacing is positive and unbound, but smaller than n-1 in pratice\n # curvature is within [0, 2*(n-2)]\n internal = self.spacingEnergy(controlpoints) + self.curvatureEnergy(controlpoints)\n n = len(self.controlpoints)\n internal_max = n-1 + 2*(n-2) \n \n # external is within [0, self.ExternalEnergy.max]\n external = self.externalEnergy(controlpoints)\n \n # return the sum of the scaled internal and the external energy\n return self.ExternalEnergy.max*(internal/internal_max)*self.inner_weight + external*self.outer_weight",
"def calculate(self):",
"def priceit(self):\n paytree = np.zeros((self.steps+1,self.steps+1))\n paytree[-1,:] = np.array( list( map(lambda x:max(x-self.s,0.0),self.pricetree[-1,:]) ) )\n discount = math.exp( self.r*self.deltatime )\n for i in range(self.steps,0,-1):\n for j in range(i):\n paytree[i-1][j] = (paytree[i][j]*self.upprob +paytree[i][j+1]*(1-self.upprob))/discount\n return paytree[0][0]",
"def wind_heat_transfer_coefficient(self) -> float:\n\n return 3.8 + 2 * self.wind_speed\n # return 4.5 + 2.9 * self.wind_speed",
"def linear_add():\n # global initial_amount\n # initial_amount = 100 * cmstate.start_size\n return 100 * cmstate.start_size",
"def glycolysis_rate_cal (self) :\n x = self.mitochondria.get_atp()\n y = self.mitochondria.get_adp()\n a = self.atp\n b = self.adp\n self.adp_to_atp(self.mitochondria.atp_translocase(math.ceil((x*b - a*y)/(a+b+x+y))))\n if a<1 :\n return\n else :\n self.set_glycolysis(int(5*b/a))",
"def tot(self):\n return self.det + self.out + self.faint + self.late",
"def get_total_shield(self,obs):",
"def system_capex(self):\n\n topside = self.config[\"offshore_substation_topside\"][\"unit_cost\"]\n substructure = self.config[\"offshore_substation_substructure\"][\"unit_cost\"]\n mooring = self.config[\"offshore_substation_substructure\"][\"mooring_cost\"]\n\n return self.num_substations * (topside + substructure + mooring)",
"def _cost_petrol(self):\n return self.distance * self.petrol_usage * self.petrol_cost"
] | [
"0.56124866",
"0.5534608",
"0.5531715",
"0.5531715",
"0.55162984",
"0.551232",
"0.55093086",
"0.5502523",
"0.54637873",
"0.5462315",
"0.5420496",
"0.5391726",
"0.539018",
"0.5386778",
"0.5361632",
"0.53331506",
"0.5307801",
"0.53052086",
"0.5300728",
"0.52987695",
"0.52978647",
"0.5290432",
"0.52843696",
"0.5280979",
"0.5278608",
"0.52754056",
"0.52592355",
"0.5252355",
"0.52516276",
"0.52473253"
] | 0.6082229 | 0 |
Fill sail area variable | def _area(self):
self.area = 0.0
for sail in self.sails:
self.area += sail.area | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def area(self):",
"def fillingrid(self):\n\n if self.imagearray is None:\n if self.gs.isfixed:\n for n in range(0, self.numcols):\n self.vspins[n].setValue(self.currentvalues[n])\n elif self.gs.isperc:\n for n in range(0, self.numcols):\n self.fillinpercent(n)\n else:\n for n in range(0, self.numcols):\n self.nsspins[n].setValue(self.currentnsigs[n])\n else:\n for n in range(0, self.numcols):\n self.vspins[n].setValue(self.currentvalues[n])\n self.nsspins[n].setValue(self.currentnsigs[n])\n self.fillinpercent(n)",
"def FillupArea(self):\r\n\r\n drillsize = self.FromUserUnit(float(self.m_txtViaDrillSize.GetValue()))\r\n viasize = self.FromUserUnit(float(self.m_txtViaSize.GetValue()))\r\n step_x = self.FromUserUnit(float(self.m_txtHSpacing.GetValue()))\r\n step_y = self.FromUserUnit(float(self.m_txtVSpacing.GetValue()))\r\n clearance = self.FromUserUnit(float(self.m_txtClearance.GetValue()))\r\n self.randomize = self.m_chkRandomize.GetValue()\r\n self.clearance = clearance\r\n bbox = self.area.GetBoundingBox()\r\n top = bbox.GetTop()\r\n bottom = bbox.GetBottom()\r\n right = bbox.GetRight()\r\n left = bbox.GetLeft()\r\n netname = self.m_cbNet.GetStringSelection()\r\n netcode = self.board.GetNetcodeFromNetname(netname)\r\n # commit = pcbnew.COMMIT()\r\n viacount = 0\r\n x = left\r\n\r\n # Cycle trough area bounding box checking and implanting vias\r\n layer = self.area.GetLayer()\r\n\r\n while x <= right:\r\n y = top\r\n while y <= bottom:\r\n if self.randomize:\r\n xp = x + random.uniform(-1, 1) * step_x / 5\r\n yp = y + random.uniform(-1, 1) * step_y / 5\r\n else:\r\n xp = x\r\n yp = y\r\n\r\n if hasattr(pcbnew, 'VECTOR2I'):\r\n p = pcbnew.VECTOR2I(xp, yp)\r\n else:\r\n if(hasattr(pcbnew, 'wxPoint')):\r\n p = pcbnew.wxPoint(xp, yp)\r\n\r\n if self.area.HitTestFilledArea(layer, p, 0):\r\n via = pcbnew.PCB_VIA(self.board)\r\n via.SetPosition(p)\r\n via.SetLayer(layer)\r\n via.SetNetCode(netcode)\r\n # Set up via with clearance added to its size-> bounding box check will be OK in worst case, may be too conservative, but additional checks are possible if needed\r\n # TODO: possibly take the clearance from the PCB settings instead of the dialog\r\n # Clearance is all around -> *2\r\n via.SetDrill(drillsize + 2 * clearance)\r\n via.SetWidth(viasize + 2 * clearance)\r\n # via.SetTimeStamp(__timecode__)\r\n if not self.CheckOverlap(via):\r\n # Check clearance only if clearance value differs from 0 (disabled)\r\n if (clearance == 0) or self.CheckClearance(via, self.area, clearance):\r\n via.SetWidth(viasize)\r\n via.SetDrill(drillsize)\r\n self.board.Add(via)\r\n # commit.Add(via)\r\n self.pcb_group.AddItem(via)\r\n viacount += 1\r\n y += step_y\r\n x += step_x\r\n\r\n if viacount > 0:\r\n wx.MessageBox(_(u\"Implanted: %d vias!\") % viacount)\r\n # commit.Push()\r\n pcbnew.Refresh()\r\n else:\r\n wx.MessageBox(_(u\"No vias implanted!\"))",
"def area(self):\n ...",
"def setFill(self, fill):\n self.area_show = fill",
"def fill(self, value, x, y, width, height):\n for sub_y in range(y, y+height):\n for sub_x in range(x, x+width):\n self[sub_x, sub_y] = value",
"def fill():\n # Switch in edit mode\n bpy.ops.object.mode_set(mode = 'EDIT')\n \n # Fill hole\n bpy.ops.mesh.fill()",
"def set_area(self, area=0.0):\n self.area = area",
"def fill(self, *args, **kwargs):\n closed = kwargs.pop('closed', True)\n return super(RadarAxes, self).fill(closed=closed, *args, **kwargs)",
"def fill(self, *args, **kwargs):\n closed = kwargs.pop('closed', True)\n return super(RadarAxes, self).fill(closed=closed, *args, **kwargs)",
"def fill(self):\n return self._turtle.fill()",
"def getFill(self):\n return self.area_show",
"def fill(self, *args, **kwargs):\r\n closed = kwargs.pop('closed', True)\r\n return super(RadarAxes, self).fill(closed=closed, *args, **kwargs)",
"def update_fill(self, event):\r\n\r\n if event.type == 'FILL':\r\n self.update_positions_from_fill(event)\r\n self.update_holdings_from_fill(event)",
"def fill_up(self):\n self.fuel = self.gas_tank_size",
"def fill_box(self, x, y, w, h):\n\t\tpass",
"def fill(self, x, y, color):\n raise NotImplementedError # Override this function in the Solution classes",
"def area(base, height):\n\n return base * height",
"def area(self):\n raise NotImplementedError",
"def update_fill(self, event):\n if event.type == 'FILL':\n self.update_positions_from_fill(event)\n self.update_holdings_from_fill(event)",
"def update_fill(self, event):\n if event.type == 'FILL':\n self.update_positions_from_fill(event)\n self.update_holdings_from_fill(event)",
"def area(self, tileID):\n pass",
"def area(self):\n raise Exception(\"area() is not implemented\")",
"def area(self):\n raise Exception(\"area() is not implemented\")",
"def fill_grid(self, gx, gy, color=Color['white']):\n area = [gx * self.px, gy * self.py, self.px, self.py]\n pygame.draw.rect(self.display, color, area)",
"def fill_single_street():\n if facing_north():\n if not on_beeper():\n if not front_is_clear():\n turn_right()\n move()\n if not on_beeper():\n turn_around()\n # back to the initial position\n move()\n turn_around()\n fill_one_line()",
"def area(self):\n raise Exception('area() is not implemented')",
"def area(self):\n raise Exception('area() is not implemented')",
"def fill(self, value):\n self.defval = value\n for v in self.sects.values():\n v.fill(value)",
"def update_fill(self, event):\n if event.type == 'FILL':\n self.update_positions_from_fill(event)\n self.update_prices_from_fill(event)\n self.update_holdings_from_fill(event)"
] | [
"0.65842456",
"0.64734346",
"0.6444428",
"0.6345076",
"0.63240373",
"0.61849344",
"0.6145598",
"0.60974485",
"0.59381616",
"0.59381616",
"0.593126",
"0.592286",
"0.58886546",
"0.5878622",
"0.5837911",
"0.58069104",
"0.5780059",
"0.574524",
"0.5735918",
"0.5735614",
"0.5735614",
"0.57310766",
"0.5727064",
"0.5727064",
"0.57238394",
"0.5721313",
"0.5702568",
"0.5702568",
"0.57001203",
"0.56762874"
] | 0.68048114 | 0 |
Train and evaluate a BERT NER Model | def train_and_evaluate(OUTPUT_DIR,do_train = True,do_eval=True):
BATCH_SIZE = 32
LEARNING_RATE = 2e-5
NUM_TRAIN_EPOCHS = 5.0
#in this steps lr will be low and training will be slow
WARMUP_PROPORTION = 0.1
if os.path.exists(OUTPUT_DIR) and os.listdir(OUTPUT_DIR) and do_train:
raise ValueError("Output directory ({}) already exists and is not empty.".format(OUTPUT_DIR))
if not os.path.exists(OUTPUT_DIR):
os.makedirs(OUTPUT_DIR)
#create train and test data
train_sents,train_labels,test_sents,test_labels = create_train_test("ADE/DRUG-AE.rel","ADE/negative_data_AE.rel")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
tokenizer = BertTokenizer.from_pretrained("bert-base-uncased", do_lower_case=True)
if do_train:
train_examples = [InputExample(guid=None,text_a=sentence,text_b=None,label=label) for sentence,label in zip(train_sents, train_labels)]
num_train_examples = len(train_examples)
num_train_steps = int(math.ceil(num_train_examples / BATCH_SIZE * NUM_TRAIN_EPOCHS))
num_warmup_steps = int(num_train_steps * WARMUP_PROPORTION)
model = BertForSequenceClassification.from_pretrained("bert-base-uncased",num_labels = num_labels)
model.to(device)
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = BertAdam(optimizer_grouped_parameters,lr=LEARNING_RATE,warmup=WARMUP_PROPORTION,t_total=num_train_steps)
global_step = 0
nb_tr_steps = 0
tr_loss = 0
train_features = convert_examples_to_features(
train_examples, label_list, MAX_SEQ_LENGTH, tokenizer)
logger.info("***** Running training *****")
logger.info(" Num examples = %d", num_train_examples)
logger.info(" Batch size = %d", BATCH_SIZE)
logger.info(" Num steps = %d", num_train_steps)
all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.long)
train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
train_sampler = RandomSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=BATCH_SIZE)
model.train()
# for name, param in model.named_parameters():
# if param.requires_grad:
# print(name)
# return
for _ in trange(int(NUM_TRAIN_EPOCHS), desc="Epoch"):
tr_loss = 0
nb_tr_examples, nb_tr_steps = 0, 0
for step, batch in enumerate(tqdm(train_dataloader, desc="Iteration")):
batch = tuple(t.to(device) for t in batch)
input_ids, input_mask, segment_ids, label_id = batch
loss = model(input_ids, segment_ids, input_mask, label_id)
loss.backward()
tr_loss += loss.item()
nb_tr_examples += input_ids.size(0)
nb_tr_steps += 1
optimizer.step()
optimizer.zero_grad()
global_step += 1
print(tr_loss)
# Save a trained model and the associated configuration
model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self
output_model_file = os.path.join(OUTPUT_DIR, WEIGHTS_NAME)
torch.save(model_to_save.state_dict(), output_model_file)
output_config_file = os.path.join(OUTPUT_DIR, CONFIG_NAME)
with open(output_config_file, 'w') as f:
f.write(model_to_save.config.to_json_string())
label_map = {i : label for i, label in enumerate(label_list,1)}
model_config = {"bert_model":"bert-base-uncased","do_lower":True,"max_seq_length":MAX_SEQ_LENGTH,"num_labels":num_labels,"label_map":label_map}
json.dump(model_config,open(os.path.join(OUTPUT_DIR,"model_config.json"),"w"))
else:
output_config_file = os.path.join(OUTPUT_DIR, CONFIG_NAME)
output_model_file = os.path.join(OUTPUT_DIR, WEIGHTS_NAME)
config = BertConfig(output_config_file)
model = BertForSequenceClassification(config, num_labels=num_labels)
model.load_state_dict(torch.load(output_model_file))
model.to(device)
if do_eval:
EVAL_BATCH_SIZE = 32
eval_examples = [InputExample(guid=None,text_a=sentence,text_b=None,label=label) for sentence,label in zip(test_sents, test_labels)]
num_eval_examples = len(eval_examples)
eval_features = convert_examples_to_features(
eval_examples, label_list, MAX_SEQ_LENGTH, tokenizer)
logger.info("***** Running evaluation *****")
logger.info(" Num examples = %d", num_eval_examples)
logger.info(" Batch size = %d", EVAL_BATCH_SIZE)
all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.long)
eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
# # Run prediction for full data
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=EVAL_BATCH_SIZE)
model.eval()
eval_loss, eval_accuracy = 0, 0
nb_eval_steps, nb_eval_examples = 0, 0
y_true = []
y_pred = []
label_map = {i : label for i, label in enumerate(label_list,1)}
for input_ids, input_mask, segment_ids, label_ids in tqdm(eval_dataloader, desc="Evaluating"):
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
label_ids = label_ids.to(device)
with torch.no_grad():
logits = model(input_ids, segment_ids, input_mask)
logits = torch.argmax(F.log_softmax(logits,dim=1),dim=1)
logits = logits.detach().cpu().numpy()
label_ids = label_ids.to('cpu').numpy()
y_pred.extend(logits)
y_true.extend(label_ids)
print(len(y_pred))
print(len(y_true))
report = classification_report(y_true, y_pred)
output_eval_file = os.path.join(OUTPUT_DIR, "eval_results.txt")
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results *****")
logger.info("\n%s", report)
writer.write(report) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def train(self, X, y):\n tf.logging.set_verbosity(\n tf.logging.INFO) # comment if you don't want to display the information during training/evaluation\n\n X_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=self.params[\"TEST_SIZE\"], random_state=42, stratify=y)\n\n self.label_list = y.unique()\n\n train_features = self.sentences_to_features(X_train, y_train)\n test_features = self.sentences_to_features(X_test, y_test)\n if DEBUG:\n print(\"Transformation to features completed\")\n\n num_train_steps = int(\n len(train_features) / self.params[\"BATCH_SIZE\"] * self.params[\"NUM_TRAIN_EPOCHS\"])\n num_warmup_steps = int(\n num_train_steps * self.params[\"WARMUP_PROPORTION\"])\n\n run_config = self.run_config_builder()\n model_fn = self.model_fn_builder(len(self.label_list), self.params[\"LEARNING_RATE\"], num_train_steps,\n num_warmup_steps)\n self.estimator = self.estimator_builder(model_fn, run_config)\n\n train_input_fn = bert.run_classifier.input_fn_builder(features=train_features,\n seq_length=self.params[\"MAX_SEQ_LENGTH\"],\n is_training=True, drop_remainder=False)\n if DEBUG:\n print(\"Beginning Training!\")\n current_time = time.time()\n self.estimator.train(input_fn=train_input_fn,\n max_steps=num_train_steps)\n if DEBUG:\n print(\"Training took time :\", time.time() - current_time,\n \"s, or \", (time.time() - current_time) / 60, \"min\")\n\n self.classifier_trained = True\n\n test_input_fn = run_classifier.input_fn_builder(features=test_features,\n seq_length=self.params[\"MAX_SEQ_LENGTH\"],\n is_training=False, drop_remainder=False)\n\n # apply model on test set and print all metrics\n if DEBUG:\n print(\"Evaluating\")\n self.estimator.evaluate(input_fn=test_input_fn, steps=None)",
"def train():\n # YOUR TRAINING CODE GOES HERE",
"def evaluate(net,\n tokenizer, ner_tagger,\n device, eval_data_filepath, eval_preds_filepath,\n fb_passes = 1, text_length = 250, verbose=False):\n\n \"\"\" PREPADE DATA FOR PREDICTION \"\"\"\n dh = utils.HotPotDataHandler(eval_data_filepath)\n dev_data = dh.data_for_paragraph_selector()\n\n point_ids = [point[0] for point in dev_data] # needed to handle useless datapoints\n queries = [point[2] for point in dev_data]\n contexts = [point[3] for point in dev_data]\n\n graphs = [EntityGraph.EntityGraph(c,\n context_length=text_length,\n tagger=ner_tagger)\n for c in contexts]\n\n # if the NER in EntityGraph doesn't find entities, the datapoint is useless.\n useless_datapoint_inds = [i for i, g in enumerate(graphs) if not g.graph]\n queries = [q for i, q in enumerate(queries) if i not in useless_datapoint_inds]\n contexts = [c for i, c in enumerate(contexts) if i not in useless_datapoint_inds]\n graphs = [g for i, g in enumerate(graphs) if i not in useless_datapoint_inds]\n\n # required for prediction in the right format\n s_lens_batch = [utils.sentence_lengths(c, tokenizer) for c in contexts]\n\n # turn the texts into tensors in order to put them on the GPU\n qc_ids = [net.encoder.token_ids(q, c) for q, c in zip(queries, contexts)] # list[ (list[int], list[int]) ]\n q_ids, c_ids = list(zip(*qc_ids)) # tuple(list[int]), tuple(list[int])\n q_ids_list = [torch.tensor(q).to(device) for q in q_ids] # list[Tensor]\n c_ids_list = [torch.tensor(c).to(device) for c in c_ids] # list[Tensor]\n\n for i,g in enumerate(graphs):\n graphs[i].M = g.M.to(device) # work with enumerate to actually mutate the graph objects\n\n \"\"\" FORWARD PASSES \"\"\"\n answers = {} # {question_id: str} (either \"yes\", \"no\" or a string containing the answer)\n sp = {} # {question_id: list[list[paragraph_title, sent_num]]} (supporting sentences)\n\n # return useless datapoints unanswered\n for i in useless_datapoint_inds:\n answers[point_ids[i]] = \"noanswer\"\n sp[point_ids[i]] = []\n\n for i, (query, context, graph, s_lens) in enumerate(zip(q_ids_list, c_ids_list, graphs, s_lens_batch)):\n\n if verbose: print(queries[i])\n\n answer, sup_fact_pairs = predict(net, query, context, graph, tokenizer,\n s_lens, fb_passes=fb_passes) #TODO sort these parameters\n\n answers[dev_data[i][0]] = answer # {question_id: str}\n sp[dev_data[i][0]] = sup_fact_pairs # {question_id: list[list[paragraph_title, sent_num]]}\n\n if verbose: print(answer)\n\n with open(eval_preds_filepath, 'w') as f:\n json.dump( {\"answer\":answers, \"sp\":sp} , f)\n\n\n \"\"\" EVALUATION \"\"\"\n return official_eval_script.eval(eval_preds_filepath, eval_data_filepath) #TODO return aything else than the metrics?",
"def train_ner(model, new_model_name, output_dir, n_iter, train_data):\r\n random.seed(0)\r\n if model is not None:\r\n nlp = spacy.load(model) # load existing spaCy model\r\n print(\"Loaded model '%s'\" % model)\r\n else:\r\n nlp = spacy.blank(\"en\") # create blank Language class\r\n print(\"Created blank 'en' model\")\r\n # Add entity recognizer to model if it's not in the pipeline\r\n # nlp.create_pipe works for built-ins that are registered with spaCy\r\n if \"ner\" not in nlp.pipe_names:\r\n ner = nlp.create_pipe(\"ner\")\r\n nlp.add_pipe(ner)\r\n # otherwise, get it, so we can add labels to it\r\n else:\r\n ner = nlp.get_pipe(\"ner\")\r\n\r\n ner.add_label(LABEL) # add new entity label to entity recognizer\r\n # Adding extraneous labels shouldn't mess anything up\r\n ner.add_label(\"EXTRA\")\r\n \r\n \r\n if model is None:\r\n optimizer = nlp.begin_training()\r\n else:\r\n optimizer = nlp.resume_training()\r\n move_names = list(ner.move_names)\r\n # get names of other pipes to disable them during training\r\n other_pipes = [pipe for pipe in nlp.pipe_names if pipe != \"ner\"]\r\n with nlp.disable_pipes(*other_pipes): # only train NER\r\n sizes = compounding(1.0, 4.0, 1.001)\r\n # batch up the examples using spaCy's minibatch\r\n for itn in range(n_iter):\r\n random.shuffle(train_data)\r\n batches = minibatch(train_data, size=sizes)\r\n losses = {}\r\n for batch in batches:\r\n texts, annotations = zip(*batch)\r\n nlp.update(texts, annotations, sgd=optimizer, drop=0.35, losses=losses)\r\n print(\"Losses\", losses)\r\n\r\n # save model to output directory\r\n if output_dir is not None:\r\n output_dir = Path(output_dir)\r\n if not output_dir.exists():\r\n output_dir.mkdir()\r\n nlp.meta[\"name\"] = new_model_name # rename model\r\n nlp.to_disk(output_dir)\r\n print(\"Saved model to\", output_dir)",
"def create_model(is_predicting, input_ids, input_mask, segment_ids, vocab, vocab_size, bert_config, use_one_hot_embeddings):\n\n # bert_module = hub.Module(\n # BERT_MODEL_HUB,\n # trainable=True)\n \n # bert_inputs = dict(\n # input_ids=input_ids,\n # input_mask=input_mask,\n # segment_ids=segment_ids)\n\n # bert_outputs = bert_module(\n # inputs=bert_inputs,\n # signature=\"tokens\",\n # as_dict=True)\n\n # Use \"pooled_output\" for classification tasks on an entire sentence.\n # Use \"sequence_output\" for token-level output.\n # output_layer = bert_outputs[\"sequence_output\"]\n \n\n model = modeling.BertModel(\n config=bert_config,\n is_training=not is_predicting,\n input_ids=input_ids,\n input_mask=input_mask,\n token_type_ids=segment_ids,\n use_one_hot_embeddings=use_one_hot_embeddings\n )\n\n output_layer = model.get_sequence_output()\n\n\n\n batch_size = output_layer.shape[0]\n max_seq_length = output_layer.shape[1]\n hidden_size = output_layer.shape[2]\n \n\n # Create our own layer to tune for politeness data.\n output_weights = tf.get_variable(\n \"output_weights\", [vocab_size, hidden_size],\n initializer=tf.truncated_normal_initializer(stddev=0.02))\n\n output_bias = tf.get_variable(\n \"output_bias\", [vocab_size], initializer=tf.zeros_initializer())\n\n with tf.variable_scope(\"loss\"):\n\n # Dropout helps prevent overfitting\n output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)\n\n logits = tf.matmul(output_layer, output_weights, transpose_b=True)\n # add a max_seq length stack of bias so that we add the bias to each word distributoin\n logits = tf.nn.bias_add(logits, output_bias)\n log_probs = tf.nn.log_softmax(logits, axis=-1)\n\n # Convert labels into one-hot encoding\n one_hot_answer = tf.one_hot(input_ids, depth=vocab_size)\n\n\n predictions = tf.squeeze(tf.argmax(log_probs, axis=-1, output_type=tf.int32))\n # If we're predicting, we want predicted labels and the probabiltiies.\n if is_predicting:\n return (predictions, log_probs)\n\n # If we're train/eval, compute loss between predicted and actual label\n per_example_loss = -tf.reduce_sum(one_hot_answer * log_probs, axis=-1)\n per_example_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=input_ids, logits=logits)\n \n loss = tf.reduce_mean(per_example_loss)\n return (loss, predictions, log_probs)",
"def predict(self, texts, merge_tokens=True):\n if os.environ.get('DISABLE_V2_BEHAVIOR', None) != '1':\n warnings.warn(\"Please add os.environ['DISABLE_V2_BEHAVIOR'] = '1' at top of your script or notebook\")\n msg = \"\\nNER in ktrain uses the CRF module from keras_contrib, which is not yet\\n\" +\\\n \"fully compatible with TensorFlow 2. To use NER, you must add the following to the top of your\\n\" +\\\n \"script or notebook BEFORE you import ktrain (after restarting runtime):\\n\\n\" +\\\n \"import os\\n\" +\\\n \"os.environ['DISABLE_V2_BEHAVIOR'] = '1'\\n\"\n print(msg)\n return\n else:\n import tensorflow.compat.v1 as tf\n tf.disable_v2_behavior()\n\n #old_do = os.environ.get('CUDA_DEVICE_ORDER', None)\n #old_vd = os.environ.get('CUDA_VISIBLE_DEVICES', None)\n #os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\n #os.environ['CUDA_VISIBLE_DEVICES'] = '-1'\n\n if isinstance(texts, str): texts = [texts]\n if self.lang == 'zh':\n dirpath = os.path.dirname(os.path.abspath(__file__))\n fpath = os.path.join(dirpath, 'ner_models/ner_chinese')\n elif self.lang == 'ru':\n dirpath = os.path.dirname(os.path.abspath(__file__))\n fpath = os.path.join(dirpath, 'ner_models/ner_russian')\n elif self.lang=='en':\n dirpath = os.path.dirname(os.path.abspath(__file__))\n fpath = os.path.join(dirpath, 'ner_models/ner_english')\n else:\n raise ValueError('lang %s is not supported by NER' % (self.lang))\n try:\n import io\n from contextlib import redirect_stdout\n f = io.StringIO()\n with redirect_stdout(f):\n import ktrain\n except:\n raise ValueError('ktrain could not be imported. Install with: pip3 install ktrain')\n predictor = ktrain.load_predictor(fpath)\n results = []\n for text in texts:\n text = text.strip()\n result = predictor.predict(text)\n if merge_tokens:\n result = self.merge_entities(result)\n results.append(result)\n if len(result) == 1: result = result[0]\n\n #if old_do is not None:\n #os.environ[\"CUDA_DEVICE_ORDER\"] = old_do\n #else:\n #del os.environ['CUDA_DEVICE_ORDER']\n #if old_vd is not None:\n #os.environ['CUDA_VISIBLE_DEVICES'] = old_vd\n #else:\n #del os.environ['CUDA_VISIBLE_DEVICES']\n return result",
"def predict():\n\n predict_cfg = get_predict_args()\n device = get_device()\n print(device)\n\n # load checkpoint\n ckpt_path = find_ckpt_in_directory(predict_cfg.ckpt)\n ckpt = torch.load(ckpt_path, map_location=device)\n best_iter = ckpt[\"best_iter\"]\n cfg = ckpt[\"cfg\"]\n aspect = cfg[\"aspect\"]\n\n for k, v in cfg.items():\n print(\"{:20} : {:10}\".format(k, str(v)))\n\n eval_batch_size = 64\n\n print(\"Loading data\")\n dev_data = list(beer_reader(cfg[\"dev_path\"]))\n test_data = beer_annotations_reader(cfg[\"test_path\"], aspect=aspect)\n\n print(\"dev\", len(dev_data))\n print(\"test\", len(test_data))\n\n print(\"Loading pre-trained word embeddings\")\n vocab = Vocabulary()\n vectors = load_embeddings(cfg[\"embeddings\"], vocab) # required for vocab\n\n # build model\n model = build_model(cfg[\"model\"], vocab, cfg=cfg)\n\n # load parameters from checkpoint into model\n print(\"Loading saved model..\")\n model.load_state_dict(ckpt[\"state_dict\"])\n model.to(device)\n print(\"Done\")\n\n print(model)\n print_parameters(model)\n\n print(\"Evaluating\")\n dev_eval = evaluate_loss(\n model, dev_data, batch_size=eval_batch_size,\n device=device, cfg=cfg)\n test_eval = evaluate_loss(\n model, test_data, batch_size=eval_batch_size,\n device=device, cfg=cfg)\n\n if hasattr(model, \"z\"):\n path = os.path.join(\n cfg[\"save_path\"], \"final_rationales.txt\")\n test_precision, test_macro_prec = evaluate_rationale(\n model, test_data, aspect=aspect, device=device,\n batch_size=eval_batch_size, path=path)\n else:\n test_precision = 0.\n test_macro_prec = 0.\n test_eval[\"precision\"] = test_precision\n test_eval[\"macro_precision\"] = test_macro_prec\n\n dev_s = make_kv_string(dev_eval)\n test_s = make_kv_string(test_eval)\n\n print(\"best model iter {:d} dev {} test {}\".format(\n best_iter, dev_s, test_s))",
"def train(net, train_data,\n dev_data_filepath, dev_preds_filepath, model_save_path,\n para_selector, # TODO sort these nicely\n ps_threshold=0.1,\n ner_device=torch.device('cpu'), training_device=torch.device('cpu'),\n text_length=250,\n fb_passes=1, coefs=(0.5, 0.5),\n epochs=3, batch_size=1, learning_rate=1e-4,\n eval_interval=None, verbose_evaluation=False, timed=False):\n timer = utils.Timer()\n\n tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')\n\n flair.device = torch.device(ner_device)\n ner_tagger = flair.models.SequenceTagger.load('ner') # this hard-codes flair tagging!\n\n optimizer = torch.optim.Adam(net.parameters(), lr=learning_rate)\n\n losses = []\n real_batch_sizes = [] # some data points are not usable; this logs the real sizes\n graph_logging = [0, 0, 0] # [total nodes, total connections, number of graphs]\n point_usage = [0, 0] # [used points, unused points]\n dev_scores = []\n\n # Set the network into train mode\n net.train()\n net = net.to(training_device)\n\n timer(\"training_preparation\")\n\n print(\"Training...\")\n\n best_score = 0\n eval_interval = eval_interval if eval_interval else float('inf') # interval in batches\n a_model_was_saved_at_some_point = False\n\n for epoch in range(epochs):\n # TODO take recurrent times for forward, evaluation saving etc.\n print('Epoch %d/%d' % (epoch + 1, epochs))\n batch_counter = 0\n\n for step, batch in enumerate(tqdm(train_data, desc=\"Iteration\")):\n\n \"\"\" DATA PROCESSING \"\"\"\n ids = []\n queries = []\n contexts = []\n graphs = []\n\n useless_datapoint_inds = []\n\n for i, point in enumerate(batch):\n\n # make a list[ list[str, list[str]] ] for each point in the batch\n context = para_selector.make_context(point,\n threshold=ps_threshold,\n context_length=text_length) # TODO add device and numerated arguments\n graph = EntityGraph.EntityGraph(context,\n context_length=text_length,\n tagger=ner_tagger)\n if graph.graph:\n ids.append(point[0])\n queries.append(point[2])\n contexts.append(context)\n graphs.append(graph)\n graph_logging = [a+b # [total nodes, total connections, number of graphs]\n for a,b in zip(graph_logging, [len(graph.graph),\n len(graph.relation_triplets()),\n 1])]\n point_usage[0] += 1\n else: # if the NER in EntityGraph doesn't find entities, the datapoint is useless.\n useless_datapoint_inds.append(i)\n point_usage[1] += 1\n\n batch = [point for point in batch if point[0] in ids] # update the batch to exclude useless data points\n\n real_batch_sizes.append(batch_size - len(useless_datapoint_inds)) #TODO track the batch sizes!\n\n # if our batch is completely useless, just continue with the next batch. :(\n if len(useless_datapoint_inds) == batch_size:\n continue\n\n # turn the texts into tensors in order to put them on the GPU\n qc_ids = [net.encoder.token_ids(q, c) for q, c in zip(queries, contexts)] # list[ (list[int], list[int]) ]\n q_ids, c_ids = list(zip(*qc_ids)) # tuple(list[int]), tuple(list[int])\n q_ids_list = [torch.tensor(q) for q in q_ids] # list[Tensor] #TODO? maybe put this into forward()?\n c_ids_list = [torch.tensor(c) for c in c_ids] # list[Tensor]\n\n \"\"\" MAKE TRAINING LABELS \"\"\"\n # replace the paragraphs in raw_point with their shortened versions (obtained from PS)\n for (i, p), c in zip(enumerate(batch), contexts):\n batch[i][3] = c\n\n # TODO? change utils.make_labeled_data_for_predictor() to process batches of data?\n labels = [utils.make_labeled_data_for_predictor(g,p,tokenizer) for g,p in zip(graphs, batch)] # list[(support, start, end, type)]\n # list[(Tensor, Tensor, Tensor, Tensor)] -> tuple(Tensor), tuple(Tensor), tuple(Tensor), tuple(Tensor)\n sup_labels, start_labels, end_labels, type_labels = list(zip(*labels))\n # print(f\"in train_dfgn.train(): shapes of labels:\\n{len(sup_labels)}, {len(start_labels)}, {len(end_labels)}, {len(type_labels)}\") #CLEANUP\n\n q_ids_list = [t.to(training_device) if t is not None else None for t in q_ids_list]\n c_ids_list = [t.to(training_device) if t is not None else None for t in c_ids_list]\n for i, g in enumerate(graphs):\n graphs[i].M = g.M.to(training_device) # work with enumerate to actually mutate the graph objects\n\n sup_labels = torch.stack(sup_labels).to(training_device) # (batch, M)\n start_labels = torch.stack(start_labels).to(training_device) # (batch, 1)\n end_labels = torch.stack(end_labels).to(training_device) # (batch, 1)\n type_labels = torch.stack(type_labels).to(training_device) # (batch)\n\n \"\"\" FORWARD PASSES \"\"\"\n optimizer.zero_grad()\n\n sups, starts, ends, types = [], [], [], []\n for query, context, graph in zip(q_ids_list, c_ids_list, graphs): # 'graph' is not a tensor -> for-loop instead of batch processing\n\n o_sup, o_start, o_end, o_type = net(query, context, graph, fb_passes=fb_passes) # (M, 2), (M), (M), (1, 3)\n sups.append(o_sup)\n starts.append(o_start)\n ends.append(o_end)\n types.append(o_type)\n\n sups = torch.stack(sups) # (batch, M, 2)\n starts = torch.stack(starts) # (batch, 1, M)\n ends = torch.stack(ends) # (batch, 1, M)\n types = torch.stack(types) # (batch, 1, 3)\n\n \"\"\" LOSSES & BACKPROP \"\"\"\n weights = torch.ones(2, device=training_device) #TODO maybe extract this to a tiny function?\n sup_label_batch = sup_labels.view(-1)\n weights[0] = sum(sup_label_batch)/float(sup_label_batch.shape[0])\n weights[1] -= weights[0] # assign the opposite weight\n\n sup_criterion = torch.nn.CrossEntropyLoss(weight=weights)\n criterion = torch.nn.CrossEntropyLoss() # for prediction of answer type\n\n # use .view(-1,...) to put points together (this is like summing the points' losses)\n sup_loss = sup_criterion(sups.view(-1,2), sup_label_batch) # (batch*M, 2), (batch*M)\n start_loss = sum([criterion(starts[i], start_labels[i]) for i in range(start_labels.shape[0])]) # batch * ( (1, M, 1), (1) )\n end_loss = sum([criterion(ends[i], end_labels[i]) for i in range(end_labels.shape[0])]) # batch * ( (1, M, 1), (1) )\n type_loss = criterion(types.view(-1,3), type_labels.view(-1)) # (batch, 1, 3), (batch, 1)\n\n # This doesn't have the weak supervision BFS mask stuff from section 3.5 of the paper\n # TODO? maybe start training with start/end loss only first, then train another model on all 4 losses?\n loss = start_loss + end_loss + coefs[0]*sup_loss + coefs[1]*type_loss # formula 15\n\n loss.backward(retain_graph=True)\n losses.append( (loss.item(),\n sup_loss.item(),\n start_loss.item(),\n end_loss.item(),\n type_loss.item())) # for logging purposes\n\n batch_counter += 1\n # Evaluate on validation set after some iterations\n if batch_counter % eval_interval == 0:\n\n # this calls the official evaluation script (altered to return metrics)\n metrics = evaluate(net, #TODO make this prettier\n tokenizer, ner_tagger,\n training_device, dev_data_filepath, dev_preds_filepath,\n fb_passes = fb_passes,\n text_length = text_length,\n verbose=verbose_evaluation)\n score = metrics[\"joint_f1\"]\n dev_scores.append(metrics) # appends the whole dict of metrics\n if score >= best_score:\n print(f\"Better eval found with accuracy {round(score, 3)} (+{round(score - best_score, 3)})\")\n best_score = score\n\n torch.save(net, model_save_path) #TODO make sure that this works (maybe, should we save each of the 3 parts indvidually?)\n a_model_was_saved_at_some_point = True\n else:\n print(f\"No improvement yet...\")\n timer(f\"training_evaluation_{batch_counter/eval_interval}\")\n\n optimizer.step()\n timer(f\"training_epoch_{epoch}\")\n\n #========= END OF TRAINING =============#\n metrics = evaluate(net, # TODO make this prettier\n tokenizer, ner_tagger,\n training_device, dev_data_filepath, dev_preds_filepath,\n fb_passes=fb_passes,\n text_length=text_length,\n verbose=verbose_evaluation)\n score = metrics[\"joint_f1\"]\n dev_scores.append(metrics) # appends the whole dict of metrics\n if score >= best_score:\n torch.save(net,\n model_save_path)\n\n if not a_model_was_saved_at_some_point: # make sure that there is a model file\n print(f\"saving model to {model_save_path}...\")\n torch.save(net, model_save_path)\n\n losses_with_batchsizes = [(b, t[0], t[1], t[2], t[3], t[4]) for b,t in zip(real_batch_sizes, losses)]\n\n if timed:\n return losses_with_batchsizes, dev_scores, graph_logging, point_usage, timer\n else:\n return losses_with_batchsizes, dev_scores, graph_logging, point_usage",
"def main(model=None, output_dir=None, n_iter=20):\n if model is not None:\n nlp = spacy.load(model) # load existing spaCy model\n print(\"Loaded model '%s'\" % model)\n else:\n nlp = spacy.blank('en') # create blank Language class\n print(\"Created blank 'en' model\")\n\n # create the built-in pipeline components and add them to the pipeline\n # nlp.create_pipe works for built-ins that are registered with spaCy\n if 'ner' not in nlp.pipe_names:\n ner = nlp.create_pipe('ner')\n nlp.add_pipe(ner, last=True)\n # otherwise, get it so we can add labels\n else:\n ner = nlp.get_pipe('ner')\n\n # add labels\n for _, annotations in TRAIN_DATA:\n for ent in annotations.get('entities'):\n ner.add_label(str(ent[2]))\n\n # get names of other pipes to disable them during training\n other_pipes = [pipe for pipe in nlp.pipe_names if pipe != 'ner']\n with nlp.disable_pipes(*other_pipes): # only train NER\n optimizer = nlp.begin_training()\n for itn in range(n_iter):\n random.shuffle(TRAIN_DATA)\n losses = {}\n for text, annotations in TRAIN_DATA:\n nlp.update(\n [text], # batch of texts\n [annotations], # batch of annotations\n drop=0.5, # dropout - make it harder to memorise data\n sgd=optimizer, # callable to update weights\n losses=losses)\n print(losses)\n \n # test the trained model\n for text, _ in TRAIN_DATA:\n doc = nlp(text)\n print('Entities', [(ent.text, ent.label_) for ent in doc.ents])\n print('Tokens', [(t.text, t.ent_type_, t.ent_iob) for t in doc])\n \n # save model to output directory\n if output_dir is not None:\n print(output_dir)\n output_dir = Path(output_dir)\n if not output_dir.exists():\n output_dir.mkdir()\n nlp.to_disk(output_dir)\n print(\"Saved model to\", output_dir)\n\n # test the saved model\n print(\"Loading from\", output_dir)\n nlp2 = spacy.load(output_dir)\n for text, _ in TRAIN_DATA:\n doc = nlp2(text)\n print('Entities', [(ent.text, ent.label_) for ent in doc.ents])\n print('Tokens', [(t.text, t.ent_type_, t.ent_iob) for t in doc])",
"def train_model(\n fname: Path,\n save_name: Path,\n batch_size: int = 32,\n warmup_steps: int = 100,\n steps: int = 1000,\n num_heads: int = 2,\n model_dim: int = 128,\n key_dim: int = 128,\n value_dim: int = 128,\n dropout: float = 0.1,\n num_mask: int = 9,\n):\n seqs = read_fasta(fname)\n\n X = seqs_to_integer(seqs)\n\n X = torch.from_numpy(X).type(torch.LongTensor)\n\n X_train, X_test = random_split(X)\n\n train_params = {\n \"batch_size\": batch_size,\n \"lr\": 0.0005,\n \"weight_decay\": 0.0,\n \"warmup_steps\": warmup_steps,\n \"steps\": steps,\n }\n model_params[\"n_head\"] = num_heads\n model_params[\"d_model\"] = model_dim\n model_params[\"d_k\"] = key_dim\n model_params[\"d_v\"] = value_dim\n model_params[\"dropout\"] = dropout\n model_params[\"num_mask\"] = num_mask\n\n model = BERT(**model_params)\n\n optimizer = Adam(\n model.parameters(),\n lr=train_params[\"lr\"],\n weight_decay=train_params[\"weight_decay\"],\n )\n\n scheduler = WarmupAnnealLR(optimizer, warmup_steps=train_params[\"warmup_steps\"])\n\n train(\n model,\n X_train,\n X_test,\n save_name,\n batch_size=train_params[\"batch_size\"],\n optimizer=optimizer,\n scheduler=scheduler,\n steps=train_params[\"steps\"],\n pbar_increment=10,\n )",
"def evaluate_model():\n\n # Get the processed data (in proper format to evaluate the NER model)\n data = get_json_from_file_path(PROCESSED_DATA_PATH)\n # Split the dataset for training and test as we did for training\n train_data, test_data = train_test_split(data, train_size=0.7, \n random_state=4)\n\n # Load the model trained\n try:\n ner_model = spacy.load(OUTPUT_MODEL_PATH)\n except Exception as err:\n msg = f'Could not load the model. Error: {err}'\n raise Exception(msg)\n\n # Compute evaluation scores\n print('Computing metrics...')\n scores = evaluate(ner_model, test_data)\n # General metrics of the model\n f_score = scores.get('ents_f')\n precision = scores.get('ents_p')\n recall = scores.get('ents_r')\n print('\\nScoring:')\n print(f'F-score: {f_score}')\n print(f'Precision: {precision}')\n print(f'Recall: {recall}')\n\n # Get the specific scores for each entity \n scores_per_entity = scores.get('ents_per_type')\n # Get the F-score of the entities\n f_scores_of_entities = []\n for entity_scores in scores_per_entity.values():\n f_scores_of_entities.append(entity_scores['f'])\n # Compute the macro averaged F-score\n macro_avg_f_score = sum(f_scores_of_entities)/len(f_scores_of_entities)\n print(f'Macro averaged F-score: {macro_avg_f_score}')\n \n print('\\nScores per entity;')\n print('{:<15} {:<10} {:<10} {:<10}'.format('Entity','F-score','Precision','Recall'))\n for key, value in scores_per_entity.items():\n entity = key\n f, p, r = value['f'], value['p'], value['r']\n print('{:<15} {:<10.2f} {:<10.2f} {:<10.2f}'.format(entity, f, p, r))",
"def train(\n # fmt: off\n lang: (\"Model language\", \"positional\", None, str),\n output_path: (\"Output directory to store model in\", \"positional\", None, Path),\n train_path: (\"Location of JSON-formatted training data\", \"positional\", None, Path),\n dev_path: (\"Location of JSON-formatted development data\", \"positional\", None, Path),\n raw_text: (\"Path to jsonl file with unlabelled text documents.\", \"option\", \"rt\", Path) = None,\n base_model: (\"Name of model to update (optional)\", \"option\", \"b\", str) = None,\n pipeline: (\"Comma-separated names of pipeline components\", \"option\", \"p\", str) = \"tagger,parser,ner\",\n vectors: (\"Model to load vectors from\", \"option\", \"v\", str) = None,\n replace_components: (\"Replace components from base model\", \"flag\", \"R\", bool) = False,\n n_iter: (\"Number of iterations\", \"option\", \"n\", int) = 30,\n n_early_stopping: (\"Maximum number of training epochs without dev accuracy improvement\", \"option\", \"ne\", int) = None,\n n_examples: (\"Number of examples\", \"option\", \"ns\", int) = 0,\n use_gpu: (\"Use GPU\", \"option\", \"g\", int) = -1,\n version: (\"Model version\", \"option\", \"V\", str) = \"0.0.0\",\n meta_path: (\"Optional path to meta.json to use as base.\", \"option\", \"m\", Path) = None,\n init_tok2vec: (\"Path to pretrained weights for the token-to-vector parts of the models. See 'spacy pretrain'. Experimental.\", \"option\", \"t2v\", Path) = None,\n parser_multitasks: (\"Side objectives for parser CNN, e.g. 'dep' or 'dep,tag'\", \"option\", \"pt\", str) = \"\",\n entity_multitasks: (\"Side objectives for NER CNN, e.g. 'dep' or 'dep,tag'\", \"option\", \"et\", str) = \"\",\n noise_level: (\"Amount of corruption for data augmentation\", \"option\", \"nl\", float) = 0.0,\n orth_variant_level: (\"Amount of orthography variation for data augmentation\", \"option\", \"ovl\", float) = 0.0,\n eval_beam_widths: (\"Beam widths to evaluate, e.g. 4,8\", \"option\", \"bw\", str) = \"\",\n gold_preproc: (\"Use gold preprocessing\", \"flag\", \"G\", bool) = False,\n learn_tokens: (\"Make parser learn gold-standard tokenization\", \"flag\", \"T\", bool) = False,\n textcat_multilabel: (\"Textcat classes aren't mutually exclusive (multilabel)\", \"flag\", \"TML\", bool) = False,\n textcat_arch: (\"Textcat model architecture\", \"option\", \"ta\", str) = \"bow\",\n textcat_positive_label: (\"Textcat positive label for binary classes with two labels\", \"option\", \"tpl\", str) = None,\n tag_map_path: (\"Location of JSON-formatted tag map\", \"option\", \"tm\", Path) = None,\n verbose: (\"Display more information for debug\", \"flag\", \"VV\", bool) = False,\n debug: (\"Run data diagnostics before training\", \"flag\", \"D\", bool) = False,\n # fmt: on\n):\n util.fix_random_seed()\n util.set_env_log(verbose)\n\n # Make sure all files and paths exists if they are needed\n train_path = util.ensure_path(train_path)\n dev_path = util.ensure_path(dev_path)\n meta_path = util.ensure_path(meta_path)\n output_path = util.ensure_path(output_path)\n if raw_text is not None:\n raw_text = list(srsly.read_jsonl(raw_text))\n if not train_path or not train_path.exists():\n msg.fail(\"Training data not found\", train_path, exits=1)\n if not dev_path or not dev_path.exists():\n msg.fail(\"Development data not found\", dev_path, exits=1)\n if meta_path is not None and not meta_path.exists():\n msg.fail(\"Can't find model meta.json\", meta_path, exits=1)\n meta = srsly.read_json(meta_path) if meta_path else {}\n if output_path.exists() and [p for p in output_path.iterdir() if p.is_dir()]:\n msg.warn(\n \"Output directory is not empty\",\n \"This can lead to unintended side effects when saving the model. \"\n \"Please use an empty directory or a different path instead. If \"\n \"the specified output path doesn't exist, the directory will be \"\n \"created for you.\",\n )\n if not output_path.exists():\n output_path.mkdir()\n msg.good(f\"Created output directory: {output_path}\")\n\n tag_map = {}\n if tag_map_path is not None:\n tag_map = srsly.read_json(tag_map_path)\n # Take dropout and batch size as generators of values -- dropout\n # starts high and decays sharply, to force the optimizer to explore.\n # Batch size starts at 1 and grows, so that we make updates quickly\n # at the beginning of training.\n dropout_rates = util.decaying(\n util.env_opt(\"dropout_from\", 0.2),\n util.env_opt(\"dropout_to\", 0.2),\n util.env_opt(\"dropout_decay\", 0.0),\n )\n batch_sizes = util.compounding(\n util.env_opt(\"batch_from\", 100.0),\n util.env_opt(\"batch_to\", 1000.0),\n util.env_opt(\"batch_compound\", 1.001),\n )\n\n if not eval_beam_widths:\n eval_beam_widths = [1]\n else:\n eval_beam_widths = [int(bw) for bw in eval_beam_widths.split(\",\")]\n if 1 not in eval_beam_widths:\n eval_beam_widths.append(1)\n eval_beam_widths.sort()\n has_beam_widths = eval_beam_widths != [1]\n\n default_dir = Path(__file__).parent.parent / \"ml\" / \"models\" / \"defaults\"\n\n # Set up the base model and pipeline. If a base model is specified, load\n # the model and make sure the pipeline matches the pipeline setting. If\n # training starts from a blank model, intitalize the language class.\n pipeline = [p.strip() for p in pipeline.split(\",\")]\n msg.text(f\"Training pipeline: {pipeline}\")\n disabled_pipes = None\n pipes_added = False\n if use_gpu >= 0:\n activated_gpu = None\n try:\n activated_gpu = set_gpu(use_gpu)\n except Exception as e:\n msg.warn(f\"Exception: {e}\")\n if activated_gpu is not None:\n msg.text(f\"Using GPU: {use_gpu}\")\n else:\n msg.warn(f\"Unable to activate GPU: {use_gpu}\")\n msg.text(\"Using CPU only\")\n use_gpu = -1\n if base_model:\n msg.text(f\"Starting with base model '{base_model}'\")\n nlp = util.load_model(base_model)\n if nlp.lang != lang:\n msg.fail(\n f\"Model language ('{nlp.lang}') doesn't match language \"\n f\"specified as `lang` argument ('{lang}') \",\n exits=1,\n )\n if vectors:\n msg.text(f\"Loading vectors from model '{vectors}'\")\n _load_vectors(nlp, vectors)\n\n nlp.select_pipes(disable=[p for p in nlp.pipe_names if p not in pipeline])\n for pipe in pipeline:\n # first, create the model.\n # Bit of a hack after the refactor to get the vectors into a default config\n # use train-from-config instead :-)\n if pipe == \"parser\":\n config_loc = default_dir / \"parser_defaults.cfg\"\n elif pipe == \"tagger\":\n config_loc = default_dir / \"tagger_defaults.cfg\"\n elif pipe == \"ner\":\n config_loc = default_dir / \"ner_defaults.cfg\"\n elif pipe == \"textcat\":\n config_loc = default_dir / \"textcat_defaults.cfg\"\n elif pipe == \"senter\":\n config_loc = default_dir / \"senter_defaults.cfg\"\n else:\n raise ValueError(f\"Component {pipe} currently not supported.\")\n pipe_cfg = util.load_config(config_loc, create_objects=False)\n if vectors:\n pretrained_config = {\n \"@architectures\": \"spacy.VocabVectors.v1\",\n \"name\": vectors,\n }\n pipe_cfg[\"model\"][\"tok2vec\"][\"pretrained_vectors\"] = pretrained_config\n\n if pipe == \"parser\":\n pipe_cfg[\"learn_tokens\"] = learn_tokens\n elif pipe == \"textcat\":\n pipe_cfg[\"exclusive_classes\"] = not textcat_multilabel\n pipe_cfg[\"architecture\"] = textcat_arch\n pipe_cfg[\"positive_label\"] = textcat_positive_label\n\n if pipe not in nlp.pipe_names:\n msg.text(f\"Adding component to base model '{pipe}'\")\n nlp.add_pipe(nlp.create_pipe(pipe, config=pipe_cfg))\n pipes_added = True\n elif replace_components:\n msg.text(f\"Replacing component from base model '{pipe}'\")\n nlp.replace_pipe(pipe, nlp.create_pipe(pipe, config=pipe_cfg))\n pipes_added = True\n else:\n if pipe == \"textcat\":\n textcat_cfg = nlp.get_pipe(\"textcat\").cfg\n base_cfg = {\n \"exclusive_classes\": textcat_cfg[\"exclusive_classes\"],\n \"architecture\": textcat_cfg[\"architecture\"],\n \"positive_label\": textcat_cfg[\"positive_label\"],\n }\n if base_cfg != pipe_cfg:\n msg.fail(\n f\"The base textcat model configuration does\"\n f\"not match the provided training options. \"\n f\"Existing cfg: {base_cfg}, provided cfg: {pipe_cfg}\",\n exits=1,\n )\n msg.text(f\"Extending component from base model '{pipe}'\")\n disabled_pipes = nlp.select_pipes(\n disable=[p for p in nlp.pipe_names if p not in pipeline]\n )\n else:\n msg.text(f\"Starting with blank model '{lang}'\")\n lang_cls = util.get_lang_class(lang)\n nlp = lang_cls()\n\n if vectors:\n msg.text(f\"Loading vectors from model '{vectors}'\")\n _load_vectors(nlp, vectors)\n\n for pipe in pipeline:\n # first, create the model.\n # Bit of a hack after the refactor to get the vectors into a default config\n # use train-from-config instead :-)\n if pipe == \"parser\":\n config_loc = default_dir / \"parser_defaults.cfg\"\n elif pipe == \"tagger\":\n config_loc = default_dir / \"tagger_defaults.cfg\"\n elif pipe == \"morphologizer\":\n config_loc = default_dir / \"morphologizer_defaults.cfg\"\n elif pipe == \"ner\":\n config_loc = default_dir / \"ner_defaults.cfg\"\n elif pipe == \"textcat\":\n config_loc = default_dir / \"textcat_defaults.cfg\"\n elif pipe == \"senter\":\n config_loc = default_dir / \"senter_defaults.cfg\"\n else:\n raise ValueError(f\"Component {pipe} currently not supported.\")\n pipe_cfg = util.load_config(config_loc, create_objects=False)\n if vectors:\n pretrained_config = {\n \"@architectures\": \"spacy.VocabVectors.v1\",\n \"name\": vectors,\n }\n pipe_cfg[\"model\"][\"tok2vec\"][\"pretrained_vectors\"] = pretrained_config\n\n if pipe == \"parser\":\n pipe_cfg[\"learn_tokens\"] = learn_tokens\n elif pipe == \"textcat\":\n pipe_cfg[\"exclusive_classes\"] = not textcat_multilabel\n pipe_cfg[\"architecture\"] = textcat_arch\n pipe_cfg[\"positive_label\"] = textcat_positive_label\n\n pipe = nlp.create_pipe(pipe, config=pipe_cfg)\n nlp.add_pipe(pipe)\n\n # Update tag map with provided mapping\n nlp.vocab.morphology.tag_map.update(tag_map)\n\n # Multitask objectives\n multitask_options = [(\"parser\", parser_multitasks), (\"ner\", entity_multitasks)]\n for pipe_name, multitasks in multitask_options:\n if multitasks:\n if pipe_name not in pipeline:\n msg.fail(\n f\"Can't use multitask objective without '{pipe_name}' in \"\n f\"the pipeline\"\n )\n pipe = nlp.get_pipe(pipe_name)\n for objective in multitasks.split(\",\"):\n pipe.add_multitask_objective(objective)\n\n # Prepare training corpus\n msg.text(f\"Counting training words (limit={n_examples})\")\n corpus = GoldCorpus(train_path, dev_path, limit=n_examples)\n n_train_words = corpus.count_train()\n\n if base_model and not pipes_added:\n # Start with an existing model, use default optimizer\n optimizer = create_default_optimizer()\n else:\n # Start with a blank model, call begin_training\n cfg = {\"device\": use_gpu}\n optimizer = nlp.begin_training(lambda: corpus.train_examples, **cfg)\n nlp._optimizer = None\n\n # Load in pretrained weights (TODO: this may be broken in the config rewrite)\n if init_tok2vec is not None:\n components = _load_pretrained_tok2vec(nlp, init_tok2vec)\n msg.text(f\"Loaded pretrained tok2vec for: {components}\")\n\n # Verify textcat config\n if \"textcat\" in pipeline:\n textcat_labels = nlp.get_pipe(\"textcat\").cfg.get(\"labels\", [])\n if textcat_positive_label and textcat_positive_label not in textcat_labels:\n msg.fail(\n f\"The textcat_positive_label (tpl) '{textcat_positive_label}' \"\n f\"does not match any label in the training data.\",\n exits=1,\n )\n if textcat_positive_label and len(textcat_labels) != 2:\n msg.fail(\n \"A textcat_positive_label (tpl) '{textcat_positive_label}' was \"\n \"provided for training data that does not appear to be a \"\n \"binary classification problem with two labels.\",\n exits=1,\n )\n train_data = corpus.train_data(\n nlp,\n noise_level=noise_level,\n gold_preproc=gold_preproc,\n max_length=0,\n ignore_misaligned=True,\n )\n train_labels = set()\n if textcat_multilabel:\n multilabel_found = False\n for ex in train_data:\n train_labels.update(ex.gold.cats.keys())\n if list(ex.gold.cats.values()).count(1.0) != 1:\n multilabel_found = True\n if not multilabel_found and not base_model:\n msg.warn(\n \"The textcat training instances look like they have \"\n \"mutually-exclusive classes. Remove the flag \"\n \"'--textcat-multilabel' to train a classifier with \"\n \"mutually-exclusive classes.\"\n )\n if not textcat_multilabel:\n for ex in train_data:\n train_labels.update(ex.gold.cats.keys())\n if list(ex.gold.cats.values()).count(1.0) != 1 and not base_model:\n msg.warn(\n \"Some textcat training instances do not have exactly \"\n \"one positive label. Modifying training options to \"\n \"include the flag '--textcat-multilabel' for classes \"\n \"that are not mutually exclusive.\"\n )\n nlp.get_pipe(\"textcat\").cfg[\"exclusive_classes\"] = False\n textcat_multilabel = True\n break\n if base_model and set(textcat_labels) != train_labels:\n msg.fail(\n f\"Cannot extend textcat model using data with different \"\n f\"labels. Base model labels: {textcat_labels}, training data \"\n f\"labels: {list(train_labels)}\",\n exits=1,\n )\n if textcat_multilabel:\n msg.text(\n f\"Textcat evaluation score: ROC AUC score macro-averaged across \"\n f\"the labels '{', '.join(textcat_labels)}'\"\n )\n elif textcat_positive_label and len(textcat_labels) == 2:\n msg.text(\n f\"Textcat evaluation score: F1-score for the \"\n f\"label '{textcat_positive_label}'\"\n )\n elif len(textcat_labels) > 1:\n if len(textcat_labels) == 2:\n msg.warn(\n \"If the textcat component is a binary classifier with \"\n \"exclusive classes, provide '--textcat_positive_label' for \"\n \"an evaluation on the positive class.\"\n )\n msg.text(\n f\"Textcat evaluation score: F1-score macro-averaged across \"\n f\"the labels '{', '.join(textcat_labels)}'\"\n )\n else:\n msg.fail(\n \"Unsupported textcat configuration. Use `spacy debug-data` \"\n \"for more information.\"\n )\n\n # fmt: off\n row_head, output_stats = _configure_training_output(pipeline, use_gpu, has_beam_widths)\n row_widths = [len(w) for w in row_head]\n row_settings = {\"widths\": row_widths, \"aligns\": tuple([\"r\" for i in row_head]), \"spacing\": 2}\n # fmt: on\n print(\"\")\n msg.row(row_head, **row_settings)\n msg.row([\"-\" * width for width in row_settings[\"widths\"]], **row_settings)\n try:\n iter_since_best = 0\n best_score = 0.0\n for i in range(n_iter):\n train_data = corpus.train_dataset(\n nlp,\n noise_level=noise_level,\n orth_variant_level=orth_variant_level,\n gold_preproc=gold_preproc,\n max_length=0,\n ignore_misaligned=True,\n )\n if raw_text:\n random.shuffle(raw_text)\n raw_batches = util.minibatch(\n (nlp.make_doc(rt[\"text\"]) for rt in raw_text), size=8\n )\n words_seen = 0\n with tqdm.tqdm(total=n_train_words, leave=False) as pbar:\n losses = {}\n for batch in util.minibatch_by_words(train_data, size=batch_sizes):\n if not batch:\n continue\n try:\n nlp.update(\n batch,\n sgd=optimizer,\n drop=next(dropout_rates),\n losses=losses,\n )\n except ValueError as e:\n err = \"Error during training\"\n if init_tok2vec:\n err += \" Did you provide the same parameters during 'train' as during 'pretrain'?\"\n msg.fail(err, f\"Original error message: {e}\", exits=1)\n if raw_text:\n # If raw text is available, perform 'rehearsal' updates,\n # which use unlabelled data to reduce overfitting.\n raw_batch = list(next(raw_batches))\n nlp.rehearse(raw_batch, sgd=optimizer, losses=losses)\n docs = [ex.doc for ex in batch]\n if not int(os.environ.get(\"LOG_FRIENDLY\", 0)):\n pbar.update(sum(len(doc) for doc in docs))\n words_seen += sum(len(doc) for doc in docs)\n with nlp.use_params(optimizer.averages):\n util.set_env_log(False)\n epoch_model_path = output_path / f\"model{i}\"\n nlp.to_disk(epoch_model_path)\n nlp_loaded = util.load_model_from_path(epoch_model_path)\n for beam_width in eval_beam_widths:\n for name, component in nlp_loaded.pipeline:\n if hasattr(component, \"cfg\"):\n component.cfg[\"beam_width\"] = beam_width\n dev_dataset = list(\n corpus.dev_dataset(\n nlp_loaded,\n gold_preproc=gold_preproc,\n ignore_misaligned=True,\n )\n )\n nwords = sum(len(ex.doc) for ex in dev_dataset)\n start_time = timer()\n scorer = nlp_loaded.evaluate(dev_dataset, verbose=verbose)\n end_time = timer()\n if use_gpu < 0:\n gpu_wps = None\n cpu_wps = nwords / (end_time - start_time)\n else:\n gpu_wps = nwords / (end_time - start_time)\n with use_ops(\"numpy\"):\n nlp_loaded = util.load_model_from_path(epoch_model_path)\n for name, component in nlp_loaded.pipeline:\n if hasattr(component, \"cfg\"):\n component.cfg[\"beam_width\"] = beam_width\n dev_dataset = list(\n corpus.dev_dataset(\n nlp_loaded,\n gold_preproc=gold_preproc,\n ignore_misaligned=True,\n )\n )\n start_time = timer()\n scorer = nlp_loaded.evaluate(dev_dataset, verbose=verbose)\n end_time = timer()\n cpu_wps = nwords / (end_time - start_time)\n acc_loc = output_path / f\"model{i}\" / \"accuracy.json\"\n srsly.write_json(acc_loc, scorer.scores)\n\n # Update model meta.json\n meta[\"lang\"] = nlp.lang\n meta[\"pipeline\"] = nlp.pipe_names\n meta[\"spacy_version\"] = f\">={about.__version__}\"\n if beam_width == 1:\n meta[\"speed\"] = {\n \"nwords\": nwords,\n \"cpu\": cpu_wps,\n \"gpu\": gpu_wps,\n }\n meta.setdefault(\"accuracy\", {})\n for component in nlp.pipe_names:\n for metric in _get_metrics(component):\n meta[\"accuracy\"][metric] = scorer.scores[metric]\n else:\n meta.setdefault(\"beam_accuracy\", {})\n meta.setdefault(\"beam_speed\", {})\n for component in nlp.pipe_names:\n for metric in _get_metrics(component):\n meta[\"beam_accuracy\"][metric] = scorer.scores[metric]\n meta[\"beam_speed\"][beam_width] = {\n \"nwords\": nwords,\n \"cpu\": cpu_wps,\n \"gpu\": gpu_wps,\n }\n meta[\"vectors\"] = {\n \"width\": nlp.vocab.vectors_length,\n \"vectors\": len(nlp.vocab.vectors),\n \"keys\": nlp.vocab.vectors.n_keys,\n \"name\": nlp.vocab.vectors.name,\n }\n meta.setdefault(\"name\", f\"model{i}\")\n meta.setdefault(\"version\", version)\n meta[\"labels\"] = nlp.meta[\"labels\"]\n meta_loc = output_path / f\"model{i}\" / \"meta.json\"\n srsly.write_json(meta_loc, meta)\n util.set_env_log(verbose)\n\n progress = _get_progress(\n i,\n losses,\n scorer.scores,\n output_stats,\n beam_width=beam_width if has_beam_widths else None,\n cpu_wps=cpu_wps,\n gpu_wps=gpu_wps,\n )\n if i == 0 and \"textcat\" in pipeline:\n textcats_per_cat = scorer.scores.get(\"textcats_per_cat\", {})\n for cat, cat_score in textcats_per_cat.items():\n if cat_score.get(\"roc_auc_score\", 0) < 0:\n msg.warn(\n f\"Textcat ROC AUC score is undefined due to \"\n f\"only one value in label '{cat}'.\"\n )\n msg.row(progress, **row_settings)\n # Early stopping\n if n_early_stopping is not None:\n current_score = _score_for_model(meta)\n if current_score < best_score:\n iter_since_best += 1\n else:\n iter_since_best = 0\n best_score = current_score\n if iter_since_best >= n_early_stopping:\n msg.text(\n f\"Early stopping, best iteration is: {i - iter_since_best}\"\n )\n msg.text(\n f\"Best score = {best_score}; Final iteration score = {current_score}\"\n )\n break\n except Exception as e:\n msg.warn(f\"Aborting and saving final best model. Encountered exception: {e}\")\n finally:\n best_pipes = nlp.pipe_names\n if disabled_pipes:\n disabled_pipes.restore()\n with nlp.use_params(optimizer.averages):\n final_model_path = output_path / \"model-final\"\n nlp.to_disk(final_model_path)\n meta_loc = output_path / \"model-final\" / \"meta.json\"\n final_meta = srsly.read_json(meta_loc)\n final_meta.setdefault(\"accuracy\", {})\n final_meta[\"accuracy\"].update(meta.get(\"accuracy\", {}))\n final_meta.setdefault(\"speed\", {})\n final_meta[\"speed\"].setdefault(\"cpu\", None)\n final_meta[\"speed\"].setdefault(\"gpu\", None)\n meta.setdefault(\"speed\", {})\n meta[\"speed\"].setdefault(\"cpu\", None)\n meta[\"speed\"].setdefault(\"gpu\", None)\n # combine cpu and gpu speeds with the base model speeds\n if final_meta[\"speed\"][\"cpu\"] and meta[\"speed\"][\"cpu\"]:\n speed = _get_total_speed(\n [final_meta[\"speed\"][\"cpu\"], meta[\"speed\"][\"cpu\"]]\n )\n final_meta[\"speed\"][\"cpu\"] = speed\n if final_meta[\"speed\"][\"gpu\"] and meta[\"speed\"][\"gpu\"]:\n speed = _get_total_speed(\n [final_meta[\"speed\"][\"gpu\"], meta[\"speed\"][\"gpu\"]]\n )\n final_meta[\"speed\"][\"gpu\"] = speed\n # if there were no speeds to update, overwrite with meta\n if (\n final_meta[\"speed\"][\"cpu\"] is None\n and final_meta[\"speed\"][\"gpu\"] is None\n ):\n final_meta[\"speed\"].update(meta[\"speed\"])\n # note: beam speeds are not combined with the base model\n if has_beam_widths:\n final_meta.setdefault(\"beam_accuracy\", {})\n final_meta[\"beam_accuracy\"].update(meta.get(\"beam_accuracy\", {}))\n final_meta.setdefault(\"beam_speed\", {})\n final_meta[\"beam_speed\"].update(meta.get(\"beam_speed\", {}))\n srsly.write_json(meta_loc, final_meta)\n msg.good(\"Saved model to output directory\", final_model_path)\n with msg.loading(\"Creating best model...\"):\n best_model_path = _collate_best_model(final_meta, output_path, best_pipes)\n msg.good(\"Created best model\", best_model_path)",
"def load_bert(self):\n self.hermes.info(\"Load the bert...\")\n model = load_trained_model_from_checkpoint(self.conf.bert[\"config\"], self.conf.bert[\"checkpoint\"])\n\n self.hermes.info(\"Build the tokenizer...\")\n tokenizer = self.poseidon.build_tokenizer()\n\n return model, tokenizer",
"def bert_module_fn(is_training):\n\n input_ids = tf.compat.v1.placeholder(shape=[None, None], dtype=tf.int32, name=\"input_ids\")\n input_mask = tf.compat.v1.placeholder(shape=[None, None], dtype=tf.int32, name=\"input_mask\")\n token_type = tf.compat.v1.placeholder(shape=[None, None], dtype=tf.int32, name=\"segment_ids\")\n\n config = modeling.BertConfig.from_json_file(config_path)\n model = modeling.BertModel(config=config, is_training=is_training,\n input_ids=input_ids, input_mask=input_mask, token_type_ids=token_type)\n \n seq_output = model.all_encoder_layers[seq_layer]\n tok_output = model.all_encoder_layers[tok_layer]\n pool_output = model.get_pooled_output()\n\n config_file = tf.constant(value=config_path, dtype=tf.string, name=\"config_file\")\n vocab_file = tf.constant(value=vocab_path, dtype=tf.string, name=\"vocab_file\")\n lower_case = tf.constant(do_lower_case)\n\n tf.compat.v1.add_to_collection(tf.compat.v1.GraphKeys.ASSET_FILEPATHS, config_file)\n tf.compat.v1.add_to_collection(tf.compat.v1.GraphKeys.ASSET_FILEPATHS, vocab_file)\n \n input_map = {\"input_ids\": input_ids,\n \"input_mask\": input_mask,\n \"segment_ids\": token_type}\n \n output_map = {\"pooled_output\": pool_output,\n \"sequence_output\": seq_output,\n \"token_output\": tok_output}\n\n output_info_map = {\"vocab_file\": vocab_file,\n \"do_lower_case\": lower_case}\n \n hub.add_signature(name=\"tokens\", inputs=input_map, outputs=output_map)\n hub.add_signature(name=\"tokenization_info\", inputs={}, outputs=output_info_map)",
"def train_test_model_batch():\n train=learning.Train_kmer_clf()\n train.run()",
"def train(self, arg1=None, arg2=None, **kwargs):\n nltk.download('averaged_perceptron_tagger')\n nltk.download('wordnet')\n nltk.download('twitter_samples')\n nltk.download('punkt')\n nltk.download('stopwords')\n nltk.download('vader_lexicon')\n\n positive_tweets = twitter_samples.strings('positive_tweets.json')\n negative_tweets = twitter_samples.strings('negative_tweets.json')\n text = twitter_samples.strings('tweets.20150430-223406.json')\n tweet_tokens = twitter_samples.tokenized('positive_tweets.json')[0]\n\n stop_words = stopwords.words('english')\n\n positive_tweet_tokens = twitter_samples.tokenized('positive_tweets.json')\n negative_tweet_tokens = twitter_samples.tokenized('negative_tweets.json')\n\n positive_cleaned_tokens_list = []\n negative_cleaned_tokens_list = []\n\n for tokens in positive_tweet_tokens:\n positive_cleaned_tokens_list.append(self.remove_noise(tokens, stop_words))\n\n for tokens in negative_tweet_tokens:\n negative_cleaned_tokens_list.append(self.remove_noise(tokens, stop_words))\n\n all_pos_words = self.get_all_words(positive_cleaned_tokens_list)\n\n freq_dist_pos = FreqDist(all_pos_words)\n print(freq_dist_pos.most_common(20))\n\n positive_tokens_for_model = self.get_tweets_for_model(positive_cleaned_tokens_list)\n negative_tokens_for_model = self.get_tweets_for_model(negative_cleaned_tokens_list)\n\n positive_dataset = [(tweet_dict, \"Positive\")\n for tweet_dict in positive_tokens_for_model]\n\n negative_dataset = [(tweet_dict, \"Negative\")\n for tweet_dict in negative_tokens_for_model]\n\n dataset = positive_dataset + negative_dataset\n\n random.shuffle(dataset)\n\n train_data = dataset[:7000]\n test_data = dataset[7000:]\n\n self.classifier = NaiveBayesClassifier.train(train_data)",
"def trainNet():",
"def main():\n # Read data for train set\n print('loading training data')\n train = read_datafile('../data/tsd_train.csv')\n\n # Read trial data for validation set\n validation = read_datafile('../data/tsd_trial.csv')\n\n # Read data for test set\n print('loading test data')\n test = read_datafile('../data/tsd_test.csv')\n\n # Convert training data to Spacy Entities\n nlp = spacy.load(\"en_core_web_sm\")\n print('preparing training data')\n training_data = []\n for n, (spans, text) in enumerate(train):\n doc = nlp(text)\n ents = spans_to_ents(doc, set(spans), 'TOXIC')\n training_data.append((doc.text, {'entities': ents}))\n\n toxic_tagging = spacy.blank('en')\n toxic_tagging.vocab.strings.add('TOXIC')\n ner = nlp.create_pipe(\"ner\")\n toxic_tagging.add_pipe(ner, last=True)\n ner.add_label('TOXIC')\n\n pipe_exceptions = [\"ner\", \"trf_wordpiecer\", \"trf_tok2vec\"]\n unaffected_pipes = [\n pipe for pipe in toxic_tagging.pipe_names\n if pipe not in pipe_exceptions]\n\n\n print('Training!')\n with toxic_tagging.disable_pipes(*unaffected_pipes):\n \n toxic_tagging.begin_training()\n for iteration in range(30):\n random.shuffle(training_data)\n losses = {}\n batches = spacy.util.minibatch(\n training_data, size=spacy.util.compounding(\n 4.0, 32.0, 1.001))\n for batch in batches:\n texts, annotations = zip(*batch)\n toxic_tagging.update(texts, annotations, drop=0.5, losses=losses)\n print(\"Losses\", losses)\n\n\n # Define helper function for evaluating datasets\n def evaluate(dateset):\n precision_recall_f1_scores = []\n for spans, text in dateset:\n pred_spans = []\n doc = toxic_tagging(text)\n for ent in doc.ents:\n pred_spans.extend(range(ent.start_char, ent.start_char + len(ent.text)))\n \n # score = semeval2021.f1(pred_spans, spans)\n precision_recall_f1_scores.append(per_post_precision_recall_f1(pred_spans, spans))\n\n # compute average precision, recall and f1 score of all posts\n return np.array(precision_recall_f1_scores).mean(axis=0)\n\n # Evaluate on dev and test sets\n print('Evaluation:')\n eval_precision, eval_recall, eval_f1 = evaluate(validation)\n test_precision, test_recall, test_f1 = evaluate(test)\n \n print(f'Dev set: Precision = {eval_precision}, Recall = {eval_recall}, F1 = {eval_f1}')\n print(f'Test set: Precision = {test_precision}, Recall = {test_recall}, F1 = {test_f1}')",
"def train():\n pass",
"def build_bert_input(data, data_path, tokenizer):\n\n cache_fp = f\"{data_path[:data_path.rfind('.')]}_{type(tokenizer).__name__}_{str(BERT_MAX_LEN)}_cache\"\n if os.path.isfile(cache_fp): \n logger.info(\"Loading tokenized data from cache...\")\n all_samples = torch.load(cache_fp)\n return all_samples\n\n bert_sequences = [] \n\n # modification for turn classification task \n if 'turn' in data_path:\n for instance in data:\n seq = \"[CLS] {} [SEP] {} [SEP]\".format(instance['p'], instance['r'])\n bert_sequences.append([instance['label'], seq])\n\n # regular yes-and classifier \n else: \n \n for k in data['non-yesands'].keys():\n for non_yesand in data['non-yesands'][k]: \n seq = \"[CLS] {} [SEP] {} [SEP]\".format(non_yesand['p'], non_yesand['r'])\n bert_sequences.append([0, seq])\n \n for k in data['yesands'].keys(): \n for yesand in data['yesands'][k]: \n seq = \"[CLS] {} [SEP] {} [SEP]\".format(yesand['p'], yesand['r'])\n bert_sequences.append([1, seq])\n\n sentences = [x[1] for x in bert_sequences]\n labels = [x[0] for x in bert_sequences]\n logger.info(\"Tokenizing loaded data...\")\n tokenized_texts = [tokenizer.encode(sentence) for sentence in sentences]\n\n\n # cache_fp = data_path[:data_path.rfind('.')] + \"_\" + type(tokenizer).__name__\n # if os.path.isfile(cache_fp): \n # logger.info(\"Loading tokenized data from cache...\")\n # tokenized_texts = torch.load(cache_fp)\n # else: \n # logger.info(\"Tokenizing loaded data...\")\n # # tokenize with BERT tokenizer \n # tokenized_texts = [tokenizer.encode(sentence) for sentence in sentences]\n # torch.save(tokenized_texts, cache_fp)\n\n\n\n # pad input to MAX_LEN\n input_ids = pad_sequences(tokenized_texts, maxlen=BERT_MAX_LEN, dtype=\"long\", truncating=\"post\", padding=\"post\")\n\n # get attention masks and segment ids \n attention_masks = build_attention_mask(input_ids)\n segment_ids = build_segment_ids(input_ids)\n\n all_samples = [{\"input_ids\": input_ids[i], \"token_type_ids\": segment_ids[i], \"attention_mask\": attention_masks[i], \"label\": labels[i]} for i in range(len(input_ids))]\n torch.save(all_samples, cache_fp)\n\n return all_samples",
"def build_bert(self, verbose=True):\r\n # bert inputs\r\n bert_word_ids = Input(batch_shape=(None, self._params.max_sent_len), dtype=\"int32\", name=\"bert_word_input\")\r\n bert_mask_ids = Input(batch_shape=(None, self._params.max_sent_len), dtype=\"int32\", name='bert_mask_input')\r\n bert_segment_ids = Input(batch_shape=(None, self._params.max_sent_len), dtype=\"int32\", name=\"bert_segment_input\")\r\n \r\n inputs = [bert_word_ids, bert_mask_ids, bert_segment_ids]\r\n\r\n bert_out = BertLayer(n_fine_tune_layers=self._params.n_fine_tune_layers, bert_path=self._params.bert_path, name=\"bert_layer\")([bert_word_ids, bert_mask_ids, bert_segment_ids])\r\n\r\n features = bert_out\r\n\r\n if self._params.use_dict:\r\n if verbose: logging.info(\"use user dict features\")\r\n dict_ids = Input(batch_shape=(None, self._params.max_sent_len), dtype='int32', name='dict_input')\r\n inputs.append(dict_ids)\r\n\r\n dict_embeddings = Embedding(input_dim=self._params.dict_vocab_size,\r\n output_dim=self._params.dict_embedding_dim,\r\n mask_zero=True,\r\n name='dict_embedding')(dict_ids)\r\n\r\n features = Concatenate(name=\"bert_and_dict_features\")([features, dict_embeddings])\r\n\r\n z = Dense(self._params.fc_dim, activation='relu', name=\"fc_dense\")(features)\r\n\r\n if self._params.use_crf:\r\n if verbose: logging.info('use crf decode layer')\r\n crf = CRF(self._params.num_labels, sparse_target=False,\r\n learn_mode='marginal', test_mode='marginal', name='crf_out')\r\n loss = crf.loss_function\r\n pred = crf(z)\r\n else:\r\n loss = 'categorical_crossentropy'\r\n pred = Dense(self._params.num_labels, activation='softmax', name='softmax_out')(z)\r\n\r\n model = Model(inputs=inputs, outputs=pred)\r\n model.summary(print_fn=lambda x: logging.info(x + '\\n'))\r\n\r\n # It is recommended that you use this optimizer for fine tuning, since this\r\n # is how the model was trained (note that the Adam m/v variables are NOT\r\n # loaded from init_checkpoint.)\r\n optimizer = AdamWeightDecayOptimizer(\r\n learning_rate=1e-5,\r\n weight_decay_rate=0.01,\r\n beta_1=0.9,\r\n beta_2=0.999,\r\n epsilon=1e-6,\r\n exclude_from_weight_decay=[\"LayerNorm\", \"layer_norm\", \"bias\"])\r\n \r\n model.compile(loss=loss, optimizer=optimizer)\r\n\r\n self.model = model",
"def run(self) -> None:\n self.model = self.trainer.train_model(self.model, self.data)",
"def __init__(self, config: BertConfig):\r\n super().__init__(config)\r\n ### YOUR CODE HERE\r\n self.num_labels = config.num_labels # [0, 1] (start or end)\r\n self.bert = BertModel(config)\r\n self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) # TODO: Not a separate FFN ? (For Start_FFN and End_FFN)\r\n\r\n ### END YOUR CODE\r\n\r\n # Don't forget initializing the weights\r\n self.init_weights()",
"def word_level_prediction(model_dir: str, ner_training_output_dir: str, ner_data_dir: str):\n\n output_dir = 'gs://ekaba-assets/{}/{}/{}'.format(model_dir, ner_training_output_dir, ner_data_dir)\n ner_data_dir_path = 'gs://ekaba-assets/datasets/NER/{}'.format(ner_data_dir)\n\n try:\n run('python biobert/biocodes/ner_detoknize.py --token_test_path={}/token_test.txt ' \\\n '--label_test_path={}/label_test.txt --answer_path={}/test.tsv --output_dir={} '.format(\n output_dir, output_dir, ner_data_dir_path, output_dir\n ))\n except exceptions.UnexpectedExit:\n print('Cannot do NER word level prediction')\n\n try:\n if not os.path.exists('{}'.format(ner_training_output_dir)):\n os.makedirs('{}'.format(ner_training_output_dir))\n\n run('gsutil cp gs://ekaba-assets/{}/{}/{}/NER_result_conll.txt {}'.format(\n model_dir, ner_training_output_dir, ner_data_dir, ner_training_output_dir))\n\n run('perl biobert/biocodes/conlleval.pl < {}/NER_result_conll.txt'.format(ner_training_output_dir))\n except exceptions.UnexpectedExit:\n print('Cannot do NER word level prediction - perl biocodes')",
"def main_strategy_2():\n en_text, de_text, train_iter, dev_iter, _ = clean_data_strategy_2()\n embedding_en, embedding_de = get_GloVe_embedding(en_text, de_text)\n model = Model(len(en_text.vocab), len(de_text.vocab), 300, embedding_en, embedding_de)\n train(model, train_iter, dev_iter)",
"def bert_score(preds: Union[List[str], Dict[str, Tensor]], target: Union[List[str], Dict[str, Tensor]], model_name_or_path: Optional[str]=None, num_layers: Optional[int]=None, all_layers: bool=False, model: Optional[Module]=None, user_tokenizer: Any=None, user_forward_fn: Callable[[Module, Dict[str, Tensor]], Tensor]=None, verbose: bool=False, idf: bool=False, device: Optional[Union[str, torch.device]]=None, max_length: int=512, batch_size: int=64, num_threads: int=4, return_hash: bool=False, lang: str='en', rescale_with_baseline: bool=False, baseline_path: Optional[str]=None, baseline_url: Optional[str]=None) ->Dict[str, Union[List[float], str]]:\n if len(preds) != len(target):\n raise ValueError('Number of predicted and reference sententes must be the same!')\n if verbose and not _TQDM_AVAILABLE:\n raise ModuleNotFoundError('An argument `verbose = True` requires `tqdm` package be installed. Install with `pip install tqdm`.')\n if model is None:\n if not _TRANSFORMERS_AVAILABLE:\n raise ModuleNotFoundError('`bert_score` metric with default models requires `transformers` package be installed. Either install with `pip install transformers>=4.0` or `pip install torchmetrics[text]`.')\n if model_name_or_path is None:\n warn(f'The argument `model_name_or_path` was not specified while it is required when default `transformers` model are used.It is, therefore, used the default recommended model - {_DEFAULT_MODEL}.')\n tokenizer = AutoTokenizer.from_pretrained(model_name_or_path or _DEFAULT_MODEL)\n model = AutoModel.from_pretrained(model_name_or_path or _DEFAULT_MODEL)\n else:\n tokenizer = user_tokenizer\n model.eval()\n model\n try:\n if num_layers and num_layers > model.config.num_hidden_layers:\n raise ValueError(f'num_layers={num_layers} is forbidden for {model_name_or_path}. Please use num_layers <= {model.config.num_hidden_layers}')\n except AttributeError:\n warn('It was not possible to retrieve the parameter `num_layers` from the model specification.')\n _are_empty_lists = all(isinstance(text, list) and len(text) == 0 for text in (preds, target))\n _are_valid_lists = all(isinstance(text, list) and len(text) > 0 and isinstance(text[0], str) for text in (preds, target))\n _are_valid_tensors = all(isinstance(text, dict) and isinstance(text['input_ids'], Tensor) for text in (preds, target))\n if _are_empty_lists:\n warn('Predictions and references are empty.')\n output_dict: Dict[str, Union[List[float], str]] = {'precision': [0.0], 'recall': [0.0], 'f1': [0.0]}\n if return_hash:\n output_dict.update({'hash': _get_hash(model_name_or_path, num_layers, idf)})\n return output_dict\n baseline = _load_baseline(lang, model_name_or_path, baseline_path, baseline_url) if rescale_with_baseline else None\n if _are_valid_lists:\n target_dataset = TextDataset(target, tokenizer, max_length, idf=idf)\n preds_dataset = TextDataset(preds, tokenizer, max_length, idf=idf, tokens_idf=target_dataset.tokens_idf)\n elif _are_valid_tensors:\n target_dataset = TokenizedDataset(**target, idf=idf)\n preds_dataset = TokenizedDataset(**preds, idf=idf, tokens_idf=target_dataset.tokens_idf)\n else:\n raise ValueError('Invalid input provided.')\n target_loader = DataLoader(target_dataset, batch_size=batch_size, num_workers=num_threads)\n preds_loader = DataLoader(preds_dataset, batch_size=batch_size, num_workers=num_threads)\n target_embeddings, target_idf_scale = _get_embeddings_and_idf_scale(target_loader, target_dataset.max_length, model, device, num_layers, all_layers, idf, verbose, user_forward_fn)\n preds_embeddings, preds_idf_scale = _get_embeddings_and_idf_scale(preds_loader, preds_dataset.max_length, model, device, num_layers, all_layers, idf, verbose, user_forward_fn)\n precision, recall, f1_score = _get_precision_recall_f1(preds_embeddings, target_embeddings, preds_idf_scale, target_idf_scale)\n if baseline is not None:\n precision, recall, f1_score = _rescale_metrics_with_baseline(precision, recall, f1_score, baseline, num_layers, all_layers)\n output_dict = {'precision': precision.tolist(), 'recall': recall.tolist(), 'f1': f1_score.tolist()}\n if return_hash:\n output_dict.update({'hash': _get_hash(model_name_or_path, num_layers, idf)})\n return output_dict",
"def evaluate(model, tokenizer, dataset, lines, output_test_file, batch_size=32):\n sampler = SequentialSampler(dataset)\n dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=batch_size)\n\n print(\"*** Evaluating ***\")\n eval_loss = 0.0\n num_steps = 0\n preds = None\n out_label_ids = None\n for i, batch in enumerate(dataloader):\n if i % 200 == 199:\n print(\"=\", end=\"\")\n if i % 5000 == 4999:\n print(\"[Step \" + str(i+1) + \" / \" + str(len(dataloader)) + \"] \" )\n model.eval()\n batch = tuple(t.to(device) for t in batch)\n\n with torch.no_grad():\n labels = batch[3]\n outputs = model(input_ids=batch[0], attention_mask=batch[1], labels=labels)\n tmp_eval_loss, logits = outputs[:2]\n eval_loss += tmp_eval_loss.mean().item()\n \n num_steps += 1\n if preds is None:\n preds = logits.detach().cpu().numpy()\n out_label_ids = labels.detach().cpu().numpy()\n else:\n preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)\n out_label_ids = np.append(out_label_ids, labels.detach().cpu().numpy(), axis=0)\n \n eval_loss = eval_loss / num_steps\n \n preds_label = np.argmax(preds, axis=1)\n \n accuracy = (preds_label == out_label_ids).mean()\n output_dir = os.path.dirname(output_test_file)\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n with open(output_test_file, \"w\") as writer:\n all_logits = preds.tolist()\n for i, logit in enumerate(all_logits):\n line = '<CODESPLIT>'.join(\n [item.encode('ascii', 'ignore').decode('ascii') for item in lines[i]])\n\n writer.write(line + '<CODESPLIT>' + '<CODESPLIT>'.join([str(l) for l in logit]) + '\\n')\n print(\"Accuracy =\", str(accuracy))\n\n return accuracy",
"def main(self, data):\n\t\ttokenizer = BertTokenizer.from_pretrained(\"bert-base-uncased\", do_lower_case=True)\n\t\teval_features = self.get_features(data, self.labels, tokenizer, self.max_seq_length)\n\t\tlabel, prob = self.predict(eval_features)\n\t\treturn label, prob",
"def run(cfg): # pylint: disable=too-many-locals,too-many-statements\n # load_text\n voca, gazet, data_, pos_model, word_model = load_text(cfg)\n\n char_voca = voca['in']\n\n # Build Ner model\n model = build_model(cfg, char_voca=char_voca, word_voca=None,\n gazet=gazet, pos_voca=pos_model.cfg.voca['out'])\n\n epoch_syl_cnt = data_['train'].get_syllable_count()\n iter_per_epoch = epoch_syl_cnt // cfg.batch_size\n iter_to_rvt = iter_per_epoch * cfg.rvt_epoch\n\n # Load GPU\n if torch.cuda.is_available():\n model.cuda()\n\n # Loss / Optimizer\n criterion = nn.CrossEntropyLoss()\n optimizer = cfg.optimizer(model.parameters())\n\n losses = []\n accuracies = []\n f_scores = []\n\n iter_ = 1\n best_iter = 0\n\n # Remove existing log directory\n if cfg.clean:\n logging.info('==== removing log: %s ====', cfg.model_dir)\n shutil.rmtree(cfg.model_dir)\n time.sleep(3)\n\n else:\n if cfg.ckpt_path.exists():\n logging.info('==== reverting from check point ====')\n model_dump = CheckPoint.load(cfg.ckpt_path)\n model.load_state_dict(model_dump['model'])\n optimizer.load_state_dict(model_dump['optim'])\n best_iter = model_dump['iter']\n iter_ = best_iter + 1\n losses.append(model_dump['loss'])\n accuracies.append(model_dump['accuracy'])\n f_scores.append(model_dump['f-score'])\n logging.info('---- iter: %dk, loss: %f, accuracy: %f, f-score: %f ----',\n iter_ // 1000, losses[-1], accuracies[-1], f_scores[-1])\n lrs = [param_group['lr'] for param_group in optimizer.param_groups]\n logging.info('learning rates: %s', ', '.join([str(_) for _ in lrs]))\n\n # Tensorboard Summary Writer\n sum_wrt = SummaryWriter(cfg.model_dir)\n\n # loss / accuracy / f-score logging (.tsv)\n log_path = cfg.model_dir.joinpath('log.tsv')\n logf = open(log_path, 'at' if cfg.ckpt_path.exists() else 'wt')\n if os.path.getsize(log_path) == 0:\n print('iter\\tloss\\taccuracy\\tf-score', file=logf)\n\n # Main Training Loop\n revert = 0\n one_more_thing = True # one more change to increase learning rate into 10 times\n batches = []\n while revert <= cfg.rvt_term or one_more_thing:\n for train_sent in data_['train']:\n # Convert to Tensor\n # labels [sentence_len]\n # contexts [sentence_len, 21]\n # gazet [sentence_len, 21, 15]\n train_sent.set_word_feature(pos_model, word_model, cfg.window)\n train_sent.set_pos_feature(pos_model, cfg.window)\n train_labels, train_contexts, train_gazet, train_pos, train_words = \\\n train_sent.to_tensor(voca, gazet, cfg.window, cfg.phoneme, cfg.gazet_embed)\n\n # Convert to Variable\n train_labels = Variable(train_labels)\n train_contexts = Variable(train_contexts)\n train_gazet = Variable(train_gazet)\n train_pos = Variable(train_pos, requires_grad=False)\n train_words = Variable(train_words, requires_grad=False)\n\n # Load on GPU\n if torch.cuda.is_available():\n train_labels = train_labels.cuda()\n train_contexts = train_contexts.cuda()\n train_gazet = train_gazet.cuda()\n train_pos = train_pos.cuda()\n train_words = train_words.cuda()\n\n # Reset Gradient\n optimizer.zero_grad()\n\n # Training mode (updates/dropout/batchnorm)\n model.train()\n\n # import ipdb; ipdb.set_trace()\n\n # Forward Prop\n outputs = model(train_contexts, train_gazet, train_pos, train_words)\n\n batches.append((train_labels, outputs))\n if sum([batch[0].size(0) for batch in batches]) < cfg.batch_size:\n continue\n batch_label = torch.cat([x[0] for x in batches], 0)\n batch_output = torch.cat([x[1] for x in batches], 0)\n batches = []\n\n # Backprop\n loss = criterion(batch_output, batch_label)\n loss.backward()\n optimizer.step()\n\n # Validation\n if iter_ % 1000 == 0:\n measure = tagger.PerformanceMeasure()\n # Freeze parameters\n model.eval()\n\n # Calculate loss\n losses.append(loss.data[0])\n for dev_sent in data_['dev']:\n # Convert to CUDA Variable\n dev_sent.set_word_feature(pos_model, word_model, cfg.window)\n dev_sent.set_pos_feature(pos_model, cfg.window)\n _, dev_contexts, dev_gazet, dev_pos, dev_words = \\\n dev_sent.to_tensor(voca, gazet, cfg.window, cfg.phoneme, cfg.gazet_embed)\n dev_contexts = Variable(dev_contexts, volatile=True)\n dev_gazet = Variable(dev_gazet, volatile=True)\n dev_pos = Variable(dev_pos, volatile=True)\n dev_words = Variable(dev_words, volatile=True)\n if torch.cuda.is_available():\n dev_contexts = dev_contexts.cuda()\n dev_gazet = dev_gazet.cuda()\n dev_pos = dev_pos.cuda()\n dev_words = dev_words.cuda()\n\n outputs = model(dev_contexts, dev_gazet, dev_pos, dev_words)\n\n _, predicts = outputs.max(1)\n dev_sent.compare_label(predicts, voca, measure)\n\n accuracy, f_score = measure.get_score()\n print(file=sys.stderr)\n sys.stderr.flush()\n if not f_scores or f_score > max(f_scores):\n logging.info('==== writing best model: %f ====', f_score)\n model.save(cfg.ckpt_path)\n check_point = CheckPoint(optimizer, model,\n {'iter': iter_, 'loss': loss.data[0],\n 'accuracy': accuracy, 'f-score': f_score})\n check_point.save(cfg.ckpt_path)\n logging.info('check point: %s', check_point)\n best_iter = iter_\n revert = 0\n one_more_thing = True\n accuracies.append(accuracy)\n f_scores.append(f_score)\n logging.info('---- iter: %dk, loss: %f, accuracy: %f, f-score: %f (max: %r) ----',\n iter_ // 1000, losses[-1], accuracy, f_score, max(f_scores))\n\n if cfg.model_dir.exists():\n sum_wrt.add_scalar('loss', losses[-1], iter_ // 1000)\n sum_wrt.add_scalar('accuracy', accuracy, iter_ // 1000)\n sum_wrt.add_scalar('f-score', f_score, iter_ // 1000)\n print('{}\\t{}\\t{}\\t{}'.format(iter_ // 1000, losses[-1], accuracy,\n f_score), file=logf)\n logf.flush()\n\n # revert policy\n if (iter_ - best_iter) > iter_to_rvt:\n revert += 1\n logging.info('==== revert to iter: %dk, revert count: %d ====',\n best_iter // 1000, revert)\n model_dump = CheckPoint.load(cfg.ckpt_path)\n model.load_state_dict(model_dump['model'])\n optimizer.load_state_dict(model_dump['optim'])\n lrs = []\n for param_group in optimizer.param_groups:\n param_group['lr'] *= (0.9 if one_more_thing else 0.8) ** revert\n lrs.append(param_group['lr'])\n best_iter = iter_\n logging.info('learning rates: %s', ', '.join([str(_) for _ in lrs]))\n elif iter_ % 100 == 0:\n print('.', end='', file=sys.stderr)\n sys.stderr.flush()\n\n iter_ += 1\n if revert > cfg.rvt_term and one_more_thing:\n logging.info('==== one more thing, revert to iter: %dk ====', best_iter // 1000)\n model_dump = CheckPoint.load(cfg.ckpt_path)\n model.load_state_dict(model_dump['model'])\n optimizer.load_state_dict(model_dump['optim'])\n lrs = []\n for param_group in optimizer.param_groups:\n param_group['lr'] *= 10.0\n lrs.append(param_group['lr'])\n best_iter = iter_\n revert = 0\n one_more_thing = False\n logging.info('learning rates: %s', ', '.join([str(_) for _ in lrs]))",
"def test():\n listpost,listclass = bayes.loaddataset()\n myvocablist = bayes.createlist(listpost)\n tmatrix = list()\n for doc in listpost:\n\t vec = bayes.word2vec(myvocablist,doc)\n\t tmatrix.append(vec)\n p0,p1,pa = bayes.train(tmatrix,listclass)\n testdoc1 = ['love','my','dalmation']\n testvec1 = bayes.word2vec(myvocablist,testdoc1)\n print testdoc1,'classify as :',bayes.classify(testvec1,p0,p1,pa)\n testdoc2 = ['stupid','love']\n testvec2 = bayes.word2vec(myvocablist,testdoc2)\n print testdoc2,'classify as :',bayes.classify(testvec2,p0,p1,pa)"
] | [
"0.6732188",
"0.66579294",
"0.6617429",
"0.6609218",
"0.65915716",
"0.6566272",
"0.6564456",
"0.6536589",
"0.6532117",
"0.6528989",
"0.6522149",
"0.6457771",
"0.64378786",
"0.64107096",
"0.6403063",
"0.63879454",
"0.63494843",
"0.6348164",
"0.6344589",
"0.6325119",
"0.63243926",
"0.6314535",
"0.63078165",
"0.6291369",
"0.62879366",
"0.62800026",
"0.62795657",
"0.6277995",
"0.6266679",
"0.6264659"
] | 0.7334636 | 0 |
Given a url, set or replace a query parameter and return the modified url. | def set_url_query_param(url: str, param_name: str, param_value: str):
parsed_url: ParseResult = urlparse(url)
query_params: dict = dict(parse_qsl(parsed_url.query))
query_params[param_name] = param_value
new_query_string = urlencode(query_params)
return urlunparse((
parsed_url.scheme,
parsed_url.netloc,
parsed_url.path,
parsed_url.params,
new_query_string,
parsed_url.fragment,
)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _replace_url_query(url, new_query):\n scheme, netloc, path, _, fragment = urlparse.urlsplit(url)\n return urlparse.urlunsplit((scheme, netloc, path, new_query, fragment))",
"def replace_query_params(cls, url: str, **params: Mapping[str, str]) -> str:\n url, _ = cls.separate_query_params(url, params.keys())\n return cls.add_query_params(url, **params)",
"def set_query_parameter(url, param_name, param_value):\n scheme, netloc, path, query_string, fragment = urlsplit(url)\n query_params = parse_qs(query_string)\n\n deleted = False\n for _, value in query_params.items():\n if param_value in value:\n deleted = True\n\n if deleted:\n query_params.pop(param_name, None)\n clear = True\n if not deleted:\n query_params[param_name] = param_value\n clear = False\n\n new_query_string = urlencode(query_params, doseq=True)\n url = urlunsplit((scheme, netloc, path, new_query_string, fragment))\n\n return url, clear",
"def set_params(url, params):\n components = urlparse(url)\n\n query = parse_qs(components.query)\n query.update(params)\n\n components = components._replace(query=urlencode(query, doseq=True))\n return urlunparse(components)",
"def set_query_parameters(url, params):\n url_parts = list(urlparse(url))\n\n query = dict(parse_qsl(url_parts[4]))\n query.update(params)\n url_parts[4] = urlencode(query)\n\n return urlunparse(url_parts)",
"def add_or_replace_parameter(url, name, new_value):\n return _add_or_replace_parameters(url, {name: new_value})",
"def append_query_param(url: str, key: str, value: str) -> str:\n template = '?' in url and '{}&{}={}' or '{}?{}={}'\n return template.format(url, key, value)",
"def url_update(url):\n url_lst = url.split('&')\n start_str = url_lst[1]\n max_results_str = url_lst[2]\n idx1, idx2 = start_str.find('='), max_results_str.find('=')\n num1, num2 = int(start_str[idx1+1:]), int(max_results_str[idx2+1:])\n url_lst[1] = 'start=' + str(num1+num2)\n return '&'.join(url_lst)",
"def append_to_query_string(url, key, value) -> str:\n url = list(urlparse(url))\n query = dict(parse_qsl(url[4]))\n query[key] = value\n url[4] = '&'.join(f'{p}={v}' for p, v in query.items())\n\n return urlunparse(url)",
"def append_query_params(original_url, **kwargs):\n scheme, netloc, path, query_string, fragment = urlsplit(original_url)\n query_params = parse_qs(query_string)\n if kwargs is not None:\n for key, value in kwargs.items():\n query_params[key] = [value]\n\n new_query_string = urlencode(query_params, doseq=True)\n new_url = urlunsplit((scheme, netloc, path, new_query_string, fragment))\n return new_url",
"def append_query_params(original_url, **kwargs):\n scheme, netloc, path, query_string, fragment = urlsplit(original_url)\n query_params = parse_qs(query_string)\n if kwargs is not None:\n for key, value in kwargs.items():\n query_params[key] = [value]\n\n new_query_string = urlencode(query_params, doseq=True)\n new_url = urlunsplit((scheme, netloc, path, new_query_string, fragment))\n return new_url",
"def url_with_querystring(url, **kwargs):\n return url + '?' + urlencode(kwargs)",
"def add_query_params(url: str, additional_params: dict) -> str:\n url_components = urlparse(url)\n original_params = parse_qs(url_components.query)\n # Before Python 3.5 you could update original_params with\n # additional_params, but here all the variables are immutable.\n merged_params = {**original_params, **additional_params}\n updated_query = urlencode(merged_params, doseq=True)\n # _replace() is how you can create a new NamedTuple with a changed field\n return url_components._replace(query=updated_query).geturl()",
"def url_replace(context, **kwargs):\n query = context['request'].GET.dict()\n query.update(kwargs)\n return urlencode(query)",
"def url_replace(context, **kwargs):\n query = context['request'].GET.dict()\n query.update(kwargs)\n return urlencode(query)",
"def add_or_replace_parameters(url, new_parameters):\n return _add_or_replace_parameters(url, new_parameters)",
"def delete_query_parameter(url, param_name):\n scheme, netloc, path, query_string, fragment = urlsplit(url)\n query_params = parse_qs(query_string)\n query_params.pop(param_name, None)\n new_query_string = urlencode(query_params, doseq=True)\n url = urlunsplit((scheme, netloc, path, new_query_string, fragment))\n\n return url",
"def url_append_query(url, query_params):\n if not query_params:\n return url\n scheme, netloc, path, params, query, fragment = urlparse_normalized(url)\n query = (query + \"&\") if query else query\n query_string = query + urlencode_s(query_unflatten(query_params))\n return urlunparse((scheme, netloc, path, params, query_string, fragment))",
"def parameterised_url(url, params):\n url_parts = list(urlparse.urlparse(url))\n query = dict(urlparse.parse_qsl(url_parts[4]))\n query.update(params)\n url_parts[4] = urlparse.urlencode(query)\n url = urlparse.urlunparse(url_parts)\n return url",
"def url_replace(request, field, value):\n _dict = request.GET.copy()\n _dict[field] = value\n return _dict.urlencode()",
"def _extend_url(self, url, params):\n # filter out None parameters\n params = {k:v for k,v in params.items() if v is not None}\n for key in params:\n url = url + \"&{}={}\".format(key, params[key])\n return url",
"def url_add_query(url, extra_query_params, allow_func=None):\n scheme, netloc, path, params, query, fragment = urlparse_normalized(url)\n\n d = query_add(query, extra_query_params)\n qs = urlencode_s(d, allow_func=allow_func)\n return urlunparse((scheme, netloc, path, params, qs, fragment))",
"def merge_url(url, params):\n req = PreparedRequest()\n req.prepare_url(url, params)\n return req.url",
"def updated_querystring(request, params):\n original_params = request.GET.copy()\n for key in params:\n if key in original_params:\n original_params.pop(key)\n original_params.update(params)\n return original_params.urlencode()",
"def updated_querystring(request, params):\n original_params = request.GET.copy()\n for key in params:\n if key in original_params:\n original_params.pop(key)\n original_params.update(params)\n return original_params.urlencode()",
"def this_url_replace(context, **kwargs):\n query = context['request'].GET.dict()\n query.update(kwargs)\n return '{}?{}'.format(\n context['view'].request.META['PATH_INFO'],\n urlencode(query)\n )",
"def modify_query(**values):\n args = request.args.copy()\n\n for attr, new_value in values.items():\n if new_value is not None:\n args[attr] = new_value\n elif attr in args:\n del args[attr]\n\n if args:\n return request.base_url + \"?\" + url_encode(args)\n else:\n return request.base_url",
"def add_query_params(\n url: str, **params: Mapping[str, Union[str, List[str]]]\n ) -> str:\n o = urlparse(url)\n qp = parse_qs(o.query, keep_blank_values=True)\n\n for k, v in params.items():\n if isinstance(v, str):\n v = [v]\n try:\n qp[k].extend(v)\n except KeyError:\n qp[k] = v\n\n qs = urlencode(qp, doseq=True, quote_via=quote)\n return urlunparse(o._replace(query=qs))",
"def url_set(self, url):\n self.request('/v1.1/url', 'POST', body={'url': url})",
"def edit_url(self, inplace=False,show_url = 0, **kwargs):\n\n if len(kwargs) > 0 :\n other_args = [\"{}={}\".format(k,str(v).replace(\" \",\"+\")) for k,v in kwargs.items()]\n new_url = self.url + \"&\" + \"&\".join(other_args)\n if show_url: print(new_url) \n\n if \"maxresults\" not in kwargs : \n print(\"Be careful : This request will only display the first 100 results.\")\n\n if inplace:\n self.url = new_url"
] | [
"0.7598249",
"0.7557596",
"0.7520044",
"0.7314738",
"0.7273958",
"0.691234",
"0.67587596",
"0.6752881",
"0.67470497",
"0.67071164",
"0.67071164",
"0.6613174",
"0.658672",
"0.6533154",
"0.6533154",
"0.64942557",
"0.64076775",
"0.6387804",
"0.63828295",
"0.6361286",
"0.6291151",
"0.62464005",
"0.62065595",
"0.6167459",
"0.6167459",
"0.6135879",
"0.6065812",
"0.60417587",
"0.6028099",
"0.5960752"
] | 0.80038387 | 0 |
For every partition whose idle resources do not meet the requirements add the maximally available idle resources. | def add_max_resources(idle_res, hwinfo):
hwinfo_idle = hwinfo.filter_idle()
idle_partitions = [r.partition() for r in idle_res]
max_resources = resources.get_maximal_resources(hwinfo_idle)
for p in np.unique(hwinfo_idle['partition']):
if p not in idle_partitions:# and max_resources[p].cpus() > 0:
idle_res.append(max_resources[p]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _calc_worker_assign_limits(self, initial_count, occupied=None):\n occupied = occupied or dict()\n actual_count = initial_count - sum(occupied.values())\n\n endpoint_res = sorted(self._worker_slots.items(), key=operator.itemgetter(1),\n reverse=True)\n\n endpoints = [t[0] for t in endpoint_res]\n endpoint_cores = np.array([t[1] for t in endpoint_res]).astype(np.float32)\n\n # remove assigned nodes from limitations\n counts = initial_count * endpoint_cores / endpoint_cores.sum()\n for idx, ep in enumerate(endpoints):\n counts[idx] = max(0, counts[idx] - occupied.get(ep, 0))\n\n # all assigned, nothing to do\n if counts.sum() == 0:\n return dict((ep, 0) for ep in endpoints)\n\n counts = (actual_count * counts / counts.sum()).astype(np.int32)\n\n # assign remaining nodes\n pos = 0\n rest = actual_count - counts.sum()\n while rest > 0:\n counts[pos] += 1\n rest -= 1\n pos = (pos + 1) % len(counts)\n return dict(zip(endpoints, counts))",
"def reclaim_unschedulable_nodes(self, new_desired_capacity):\n desired_capacity = min(self.max_size, new_desired_capacity)\n num_unschedulable = len(self.unschedulable_nodes)\n num_schedulable = self.actual_capacity - num_unschedulable\n \n if num_schedulable < desired_capacity:\n for node in self.unschedulable_nodes:\n if node.uncordon():\n num_schedulable += 1\n # Uncordon only what we need\n if num_schedulable == desired_capacity:\n break",
"def get_available_resources(threshold, usage, total):\n return dict((host, int(threshold * total[host] - resource))\n for host, resource in usage.items())",
"def calculate_available_node_res (self, vnfs_to_be_left_in_place={},\n mode=MODE_ADD):\n # add available res attribute to all Infras and subtract the running\n # NFs` resources from the given max res\n for n in self.infras:\n setattr(self.network.node[n.id], 'availres',\n copy.deepcopy(self.network.node[n.id].resources))\n if mode == self.MODE_ADD:\n for vnf in self.running_nfs(n.id):\n # if a VNF needs to be left in place, then it is still mapped by the \n # mapping process, but with placement criteria, so its resource \n # requirements will be subtracted during the greedy process.\n if vnf.id not in vnfs_to_be_left_in_place:\n try:\n newres = self.network.node[n.id].availres.subtractNodeRes(\n self.network.node[vnf.id].resources,\n self.network.node[n.id].resources)\n except RuntimeError:\n raise RuntimeError(\n \"Infra node`s resources are expected to represent its maximal \"\n \"capabilities.\"\n \"The NodeNF(s) running on Infra node %s, use(s)more resource \"\n \"than the maximal.\" % n.id)\n else:\n try:\n newres = self.network.node[n.id].availres.subtractNodeRes(\n vnfs_to_be_left_in_place[vnf.id].resources,\n self.network.node[n.id].resources)\n except RuntimeError:\n raise RuntimeError(\"VNF %s cannot be kept on host %s with \"\n \"increased resource requirements due to not \"\n \"enough available resources!\" % (vnf.id, n.id))\n\n self.network.node[n.id].availres = newres",
"def _update_available_resources(self, context):\n\n all_nodes = self.driver.get_available_nodes()\n all_rps = self.scheduler_client.reportclient\\\n .get_filtered_resource_providers({})\n node_uuids = [node.uuid for node in all_nodes]\n\n # Clean orphan resource providers in placement\n for rp in all_rps:\n if rp['uuid'] not in node_uuids:\n server_by_node = objects.Server.list(\n context, filters={'node_uuid': rp['uuid']})\n if server_by_node:\n continue\n self.scheduler_client.reportclient.delete_resource_provider(\n rp['uuid'])\n\n for node in all_nodes:\n if self.driver.is_node_consumable(node):\n self.scheduler_client.reportclient \\\n .delete_allocations_for_resource_provider(node.uuid)\n resource_class = sched_utils.ensure_resource_class_name(\n node.resource_class)\n inventory = self.driver.get_node_inventory(node)\n inventory_data = {resource_class: inventory}\n self.scheduler_client.set_inventory_for_provider(\n node.uuid, node.name or node.uuid, inventory_data,\n resource_class)",
"def find_free(min_=0):\n while is_occupied(min_):\n min_ += 1\n return min_",
"def _consume_resources(self, job_limits: Dict[str, int]) -> None:\n for limit_name, count in job_limits.items():\n self.limits_used[limit_name] += count",
"def get_required_platform_reserved_memory(dbapi, ihost, numa_node, low_core=False):\n required_reserved = 0\n\n system = dbapi.isystem_get_one()\n ihost_inodes = dbapi.inode_get_by_ihost(ihost['uuid'])\n numa_node_count = len(ihost_inodes)\n\n if is_virtual() or is_virtual_worker(ihost):\n # minimal memory requirements for VirtualBox\n required_reserved += constants.DISK_IO_RESIDENT_SET_SIZE_MIB_VBOX\n if host_has_function(ihost, constants.WORKER):\n if numa_node == 0:\n if ihost['personality'] == constants.WORKER:\n required_reserved += \\\n constants.PLATFORM_CORE_MEMORY_RESERVED_MIB_VBOX_WORKER\n else:\n required_reserved += \\\n constants.PLATFORM_CORE_MEMORY_RESERVED_MIB_VBOX\n if host_has_function(ihost, constants.CONTROLLER):\n required_reserved += \\\n constants.COMBINED_NODE_CONTROLLER_MEMORY_RESERVED_MIB_VBOX\n else:\n # If not a controller, add overhead for metadata and vrouters\n required_reserved += \\\n constants.NETWORK_METADATA_OVERHEAD_MIB_VBOX\n else:\n required_reserved += \\\n constants.DISK_IO_RESIDENT_SET_SIZE_MIB_VBOX\n elif (system.distributed_cloud_role ==\n constants.DISTRIBUTED_CLOUD_ROLE_SYSTEMCONTROLLER and\n ihost['personality'] == constants.CONTROLLER):\n required_reserved += \\\n constants.DISTRIBUTED_CLOUD_CONTROLLER_MEMORY_RESERVED_MIB // numa_node_count\n elif host_has_function(ihost, constants.WORKER):\n # Engineer reserve per numa node for disk IO RSS overhead\n required_reserved += constants.DISK_IO_RESIDENT_SET_SIZE_MIB\n if numa_node == 0:\n # Engineer platform reserve for worker\n required_reserved += \\\n constants.PLATFORM_CORE_MEMORY_RESERVED_MIB\n if host_has_function(ihost, constants.CONTROLLER):\n # If AIO, reserve additional memory for controller function.\n # Controller memory usage depends on number of workers.\n if low_core:\n required_reserved += \\\n constants.COMBINED_NODE_CONTROLLER_MEMORY_RESERVED_MIB_XEOND\n else:\n required_reserved += \\\n constants.COMBINED_NODE_CONTROLLER_MEMORY_RESERVED_MIB\n else:\n # If not a controller, add overhead for metadata and vrouters\n required_reserved += \\\n constants.NETWORK_METADATA_OVERHEAD_MIB\n elif ihost['personality'] == constants.CONTROLLER:\n # Standard controller\n required_reserved += \\\n constants.STANDARD_CONTROLLER_MEMORY_RESERVED_MIB // numa_node_count\n\n return required_reserved",
"def peak_shaving_max_min(\n loadfactor_yd_cy_improved,\n average_yd,\n fuel_yh,\n mode_constrained\n ):\n # ------------------------------------------\n # Calculate new maximum demand for every day\n # and fueltype with help of newly adaped load factor\n # ------------------------------------------\n allowed_demand_max_d = average_yd / loadfactor_yd_cy_improved\n allowed_demand_max_d[np.isnan(allowed_demand_max_d)] = 0\n\n if mode_constrained:\n average_yd = average_yd[:, np.newaxis]\n allowed_demand_max_d = allowed_demand_max_d[:, np.newaxis]\n else:\n average_yd = average_yd[:, :, np.newaxis]\n allowed_demand_max_d = allowed_demand_max_d[:, :, np.newaxis]\n\n # ------------------------------------------\n # Calculate difference to daily mean for every hour\n # for every fueltype (hourly value - daily mean)\n # ------------------------------------------\n diff_to_mean = fuel_yh - average_yd\n\n # ------------------------\n # Calculate areas of lp below average for every day\n # all lp higher than average are set to zero\n # ------------------------\n diff_to_mean[diff_to_mean > 0] = 0\n diff_to_mean = np.abs(diff_to_mean)\n\n # Sum along all fueltypes the total fuels which are lp below average\n # Calculate percentage of total shiftable from above average to\n # below average for all hours which can take on fuel\n if mode_constrained:\n tot_area_below_mean = np.sum(diff_to_mean, axis=1) #one fueltype\n tot_area_below_mean = tot_area_below_mean[:, np.newaxis]\n else:\n tot_area_below_mean = np.sum(diff_to_mean, axis=2) #multiple fueltypes\n tot_area_below_mean = tot_area_below_mean[:, :, np.newaxis]\n\n area_below_mean_p = diff_to_mean / tot_area_below_mean\n area_below_mean_p[np.isnan(area_below_mean_p)] = 0\n\n # Calculate diff to newmax for every hour\n diff_to_max_demand_d = fuel_yh - allowed_demand_max_d\n diff_to_max_demand_d[diff_to_max_demand_d < 0] = 0\n\n # -----------------------------------------\n # Start with largest deviation to mean\n # and shift to all hours below average\n # -----------------------------------------\n # Calculate total demand which is to be shifted\n if mode_constrained:\n tot_demand_to_shift = np.sum(diff_to_max_demand_d, axis=1) # one fueltype\n tot_demand_to_shift = tot_demand_to_shift[:, np.newaxis]\n else:\n tot_demand_to_shift = np.sum(diff_to_max_demand_d, axis=2) # multiple fueltypes\n tot_demand_to_shift = tot_demand_to_shift[:, :, np.newaxis]\n\n # Add fuel below average:\n # Distribute shiftable demand to all hours which are below average\n # according to percentage contributing to lf which is below average\n shifted_fuel_yh = fuel_yh + (area_below_mean_p * tot_demand_to_shift)\n\n # Set all fuel hours whih are above max to max (substract diff)\n shifted_fuel_yh = shifted_fuel_yh - diff_to_max_demand_d\n\n return shifted_fuel_yh",
"def get_minimum_platform_reserved_memory(dbapi, ihost, numa_node):\n reserved = 0\n\n system = dbapi.isystem_get_one()\n ihost_inodes = dbapi.inode_get_by_ihost(ihost['uuid'])\n numa_node_count = len(ihost_inodes)\n\n if is_virtual() or is_virtual_worker(ihost):\n # minimal memory requirements for VirtualBox\n if host_has_function(ihost, constants.WORKER):\n if numa_node == 0:\n reserved += 1200\n if host_has_function(ihost, constants.CONTROLLER):\n reserved += 5000\n else:\n reserved += 500\n elif (system.distributed_cloud_role ==\n constants.DISTRIBUTED_CLOUD_ROLE_SYSTEMCONTROLLER and\n ihost['personality'] == constants.CONTROLLER):\n reserved += \\\n constants.DISTRIBUTED_CLOUD_CONTROLLER_MEMORY_RESERVED_MIB // numa_node_count\n elif host_has_function(ihost, constants.WORKER):\n # Engineer 1G per numa node for disk IO RSS overhead\n reserved += constants.DISK_IO_RESIDENT_SET_SIZE_MIB\n elif ihost['personality'] == constants.CONTROLLER:\n # Standard controller\n reserved += constants.STANDARD_CONTROLLER_MEMORY_RESERVED_MIB // numa_node_count\n\n return reserved",
"def get_capacity():\n fs.get_capacity()",
"def setMinMax(self):\n currentIndustryNum = self.myParent.myIndustry[self.myIndustryData.id]\n oldIndustryNum = self.myParent.myOldIndustry[self.myIndustryData.id]\n self.setMinValue(-currentIndustryNum)\n if oldIndustryNum > currentIndustryNum:\n self.setMaxValue(oldIndustryNum-currentIndustryNum)\n elif self.isIndustryResearched() == 0:\n self.setMaxValue(0)\n else:\n max = self.getMaxFromFundsAvail()\n cityNum = (self.myParent.cities-self.myParent.citiesUsed)/self.myIndustryData.cities\n if max < cityNum:\n self.setMaxValue(max)\n else:\n self.setMaxValue(cityNum)",
"def free_resources(self, context):\n self.update_available_resource(context.elevated())",
"def _get_prod_bounds(self, comp):\n cap_res = comp.get_capacity_var() # name of resource that defines capacity\n maximum = comp.get_capacity(None, None, None, None)[0][cap_res]\n # TODO minimum!\n # producing or consuming the defining resource?\n if maximum > 0:\n return 0, maximum, pyo.NonNegativeReals\n else:\n return maximum, 0, pyo.NonPositiveReals",
"def _allocate(self, n_resource, beliefs):\n # With probability epsilon allocate with uniform probability.\n # With probability 1-epsilon, allocate according to belief.\n if self.rng.binomial(1, self.params.epsilon):\n self.last_allocation = self.sample_from(self.action_space)\n else:\n optimal_allocation = None\n max_expected_yield = 0\n\n # Construct entire fi table, and corresponding min and max fi tables.\n # The fi table is a table of the expected probability that a incident\n # in a bin is discovered by an attention unit, for each bin and each\n # possible allocation amount for that bin.\n fi_table = self._construct_approx_fi_table(self._n_bins, beliefs,\n self._n_resource + 1)\n min_fi_table = np.maximum(fi_table - self.params.alpha, 0)\n max_fi_table = min_fi_table + self.params.alpha\n\n # For every bin.\n for bin_i in range(self._n_bins):\n current_allocation = np.zeros(\n self._n_bins, dtype=self.action_space.dtype)\n alloc_upperbound = np.zeros(self._n_bins, dtype=self.action_space.dtype)\n\n # Get all upper and lower bounds with bin_i as starting bin.\n rows = np.array([i for i in range(self._n_bins) if i != bin_i])\n broadcast_shape = (self._n_resource + 1, len(rows),\n self._n_resource + 1)\n lower_bounds = np.argmax(\n (np.broadcast_to(fi_table[rows, :], broadcast_shape).T >=\n min_fi_table[bin_i]).T,\n axis=2)\n upper_bounds = np.argmin(\n (np.broadcast_to(fi_table[rows, :], broadcast_shape).T <=\n max_fi_table[bin_i]).T,\n axis=2) - 1\n upper_bounds[upper_bounds == -1] = self._n_resource\n\n # For every possible allocation to that bin.\n for alloc_to_i in range(self._n_resource + 1):\n current_allocation = np.zeros(\n self._n_bins, dtype=self.action_space.dtype)\n current_allocation[bin_i] = alloc_to_i\n alloc_upperbound[rows] = upper_bounds[alloc_to_i]\n # Set current allocation values to lower bounds.\n current_allocation[rows] = lower_bounds[alloc_to_i]\n alloc_upperbound[bin_i] = alloc_to_i\n\n if np.sum(current_allocation) > self._n_resource or np.any(\n current_allocation > alloc_upperbound):\n # This allocation scheme requires more resource than available.\n # Move on to next possible allocation scheme.\n continue\n remaining_resource = self._n_resource - np.sum(current_allocation)\n\n # Now greedily allocate remaining resources to bins that have maximal\n # marginal probability of making another discovery.\n for _ in range(remaining_resource):\n marginal_probs = []\n for j in range(self._n_bins):\n if current_allocation[j] < alloc_upperbound[j]:\n marginal_probs.append(\n ((self._calculate_tail_probability(\n current_allocation[j] + 1, beliefs[j]) -\n self._calculate_tail_probability(current_allocation[j],\n beliefs[j])), j))\n if not marginal_probs:\n # Allocation cannot make full use of resources and satisfy\n # fairness constraint go to next allocation.\n break\n next_bin = max(marginal_probs, key=lambda i: i[0])[1]\n current_allocation[next_bin] += 1\n if np.sum(current_allocation) < self._n_resource or np.any(\n current_allocation > alloc_upperbound):\n # This allocation scheme requires more resource than available\n # or doesn't make full use of resources.\n # Move on to next possible allocation scheme.\n continue\n\n # If current_allocation has the highest expected yield, store it as\n # the optimal allocation.\n # pylint: disable=g-complex-comprehension\n expected_yield = np.sum([\n np.sum([\n self._calculate_tail_probability(\n np.array(range(1, current_allocation[i] + 1)), beliefs[i])\n ]) for i in range(self._n_bins)\n ])\n # pylint: enable=g-complex-comprehension\n\n if expected_yield >= max_expected_yield:\n max_expected_yield = expected_yield\n optimal_allocation = current_allocation\n\n if optimal_allocation is None:\n print(\"No allocation found for this alpha: %f\" % self.params.alpha)\n logging.warning(\"No allocation found for this alpha: %f\",\n self.params.alpha)\n optimal_allocation = np.zeros(\n self._n_bins, dtype=self.action_space.dtype)\n raise gym.error.InvalidAction(\"Invalid action: %s with alpha %f\" %\n (optimal_allocation, self.params.alpha))\n\n self.last_allocation = optimal_allocation\n\n return self.last_allocation",
"def _shrink(self):\n self.capacity = round(self.capacity / self.factor)\n temp = [None] * self.capacity\n for i in range(self.capacity):\n temp[i] = self.store[i]\n self.store = temp",
"def _release_resources(self, job_limits: Dict[str, int]) -> None:\n for limit_name, count in job_limits.items():\n self.limits_used[limit_name] -= count",
"def _get_new_capacity(self):\n for prime in primes:\n if prime > 2 * self.size:\n return prime\n raise ValueError(\"Error: Table size overflow!\")",
"def best_fit_decreasing(last_n_vm_cpu, hosts_cpu, hosts_ram,\n inactive_hosts_cpu, inactive_hosts_ram,\n vms_cpu, vms_ram):\n LOG.debug('last_n_vm_cpu: %s', str(last_n_vm_cpu))\n LOG.debug('hosts_cpu: %s', str(hosts_cpu))\n LOG.debug('hosts_ram: %s', str(hosts_ram))\n LOG.debug('inactive_hosts_cpu: %s', str(inactive_hosts_cpu))\n LOG.debug('inactive_hosts_ram: %s', str(inactive_hosts_ram))\n LOG.debug('vms_cpu: %s', str(vms_cpu))\n LOG.debug('vms_ram: %s', str(vms_ram))\n vms_tmp = []\n for vm, cpu in vms_cpu.items():\n if cpu:\n last_n_cpu = cpu[-last_n_vm_cpu:]\n vms_tmp.append((sum(last_n_cpu) / len(last_n_cpu),\n vms_ram[vm],\n vm))\n else:\n LOG.warning('No CPU data for VM: %s - skipping', vm)\n\n vms = sorted(vms_tmp, reverse=True)\n hosts = sorted(((v, hosts_ram[k], k)\n for k, v in hosts_cpu.items()))\n inactive_hosts = sorted(((v, inactive_hosts_ram[k], k)\n for k, v in inactive_hosts_cpu.items()))\n mapping = {}\n for vm_cpu, vm_ram, vm_uuid in vms:\n mapped = False\n while not mapped:\n for _, _, host in hosts:\n if hosts_cpu[host] >= vm_cpu and hosts_ram[host] >= vm_ram:\n mapping[vm_uuid] = host\n hosts_cpu[host] -= vm_cpu\n hosts_ram[host] -= vm_ram\n mapped = True\n break\n else:\n if inactive_hosts:\n activated_host = inactive_hosts.pop(0)\n hosts.append(activated_host)\n hosts = sorted(hosts)\n hosts_cpu[activated_host[2]] = activated_host[0]\n hosts_ram[activated_host[2]] = activated_host[1]\n else:\n break\n\n if len(vms) == len(mapping):\n return mapping\n return {}",
"def update_available_resource(self, context):\n # ask hypervisor for its view of resource availability &\n # usage:\n resources = self.driver.get_available_resource()\n if not resources:\n # The virt driver does not support this function\n LOG.warn(_(\"Virt driver does not support \"\n \"'get_available_resource' Compute tracking is disabled.\"))\n self.compute_node = None\n self.claims = {}\n return\n\n # Confirm resources dictionary contains expected keys:\n self._verify_resources(resources)\n\n resources['free_ram_mb'] = resources['memory_mb'] - \\\n resources['memory_mb_used']\n resources['free_disk_gb'] = resources['local_gb'] - \\\n resources['local_gb_used']\n\n LOG.audit(_(\"free_ram_mb: %s\") % resources['free_ram_mb'])\n LOG.audit(_(\"free_disk_gb: %s\") % resources['free_disk_gb'])\n # Apply resource claims representing in-progress operations to\n # 'resources'. This may over-estimate the amount of resources in use,\n # at least until the next time 'update_available_resource' runs.\n self._apply_claims(resources)\n\n # also generate all load stats:\n values = self._create_load_stats(context)\n resources.update(values)\n\n if not self.compute_node:\n # we need a copy of the ComputeNode record:\n service = self._get_service(context)\n if not service:\n # no service record, disable resource\n return\n\n compute_node_ref = service['compute_node']\n if compute_node_ref:\n self.compute_node = compute_node_ref[0]\n\n if not self.compute_node:\n # Need to create the ComputeNode record:\n resources['service_id'] = service['id']\n self.compute_node = self._create(context, resources)\n LOG.info(_('Compute_service record created for %s ') % self.host)\n\n else:\n # just update the record:\n self.compute_node = self._update(context, resources,\n prune_stats=True)\n LOG.info(_('Compute_service record updated for %s ') % self.host)",
"def calculate_demand(self):\r\n \r\n for cell in self.cells:\r\n cell.demand = min(cell.volume, self.max_volume) /self.interval\r\n self.demand = self.cells[-1].demand",
"def _create_capacity(self, m, comp, prod_name):\n name = comp.name\n cap_res = comp.get_capacity_var() # name of resource that defines capacity\n r = m.resource_index_map[comp][cap_res] # production index of the governing resource\n # production is always lower than capacity\n ## NOTE get_capacity returns (data, meta) and data is dict\n ## TODO does this work with, e.g., ARMA-based capacities?\n ### -> \"time\" is stored on \"m\" and could be used to correctly evaluate the capacity\n cap = comp.get_capacity(None, None, None, None)[0][cap_res] # value of capacity limit (units of governing resource)\n rule = partial(self._capacity_rule, prod_name, r, cap)\n constr = pyo.Constraint(m.T, rule=rule)\n setattr(m, '{c}_{r}_capacity_constr'.format(c=name, r=cap_res), constr)\n # minimum production\n print('DEBUGG dispatchable?', comp.name, comp.is_dispatchable())\n if comp.is_dispatchable() == 'fixed':\n minimum = cap\n var = getattr(m, prod_name)\n values = var.get_values()\n for k in values:\n values[k] = cap\n var.set_values(values)\n else:\n minimum = 0 # -> for now just use 0, but fix this! XXX\n print('DEBUGG ... min:', minimum)\n rule = partial(self._min_prod_rule, prod_name, r, cap, minimum)\n constr = pyo.Constraint(m.T, rule=rule)\n setattr(m, '{c}_{r}_minprod_constr'.format(c=name, r=cap_res), constr)",
"def _brute_force_unbounded_knapsack_aux(weight_limit, weight_list, value_list, item_list):\n max_value = 0\n max_list = []\n for i in range(len(weight_list)):\n available_space = weight_limit - weight_list[i]\n if available_space >= 0:\n current_value, current_list = _brute_force_unbounded_knapsack_aux(\n available_space, weight_list, value_list, item_list)\n current_value += value_list[i]\n current_list.append(i)\n if (current_value > max_value):\n max_value = current_value\n max_list = current_list\n return max_value, max_list",
"def new_capacity_rule(mod, prj, prd):\n return 0",
"def new_capacity_rule(mod, g, p):\n return 0",
"def set_resources():\n global available_resources\n global EdgenodeResources\n recv_json = request.get_json()\n for resourcename, value in recv_json.items():\n available_resources[resourcename] = value\n # TODO make this better\n EdgenodeResources = [TaskResources(ram=int(available_resources['RAM']), cpu=int(\n available_resources['CPU']), hdd=int(available_resources['HDD'])), available_resources['DATA']]\n\n print 'Available resources set to', EdgenodeResources\n return 'Available resources set to ' + str(available_resources)",
"def _grow(self):\n self.capacity *= self.factor\n temp = [None] * self.capacity\n for i in range(self.size):\n temp[i] = self.store[i]\n self.store = temp",
"def uppmax(disk_quota, cpu_hours):\n merged_results = {}\n if disk_quota:\n disk_quota_data = status.get_uppmax_quotas()\n merged_results.update(disk_quota_data)\n if cpu_hours:\n cpu_hours_data = status.get_uppmax_cpu_hours()\n if not merged_results:\n merged_results = cpu_hours_data\n else:\n for key in cpu_hours_data.keys():\n if key not in merged_results:\n merged_results[key] = cpu_hours_data[key]\n else:\n merged_results[key].update(cpu_hours_data[key])\n status.update_status_db(merged_results, server_type='uppmax')",
"def ram_condition(min_gb=3):\n return get_free_gb() < min_gb",
"def refresh_pids(active_pids, resources):\n still_active_pids = []\n no_change = True\n for info in active_pids:\n pid, gpu, title, cmd, lock_path = info\n if still_active(pid, cmd):\n still_active_pids.append(info)\n else:\n print(f\"[{time.strftime(time.ctime())}] {title} seems to be over.\")\n os.remove(lock_path)\n resources.free(gpu=gpu)\n no_change = False\n return still_active_pids, no_change"
] | [
"0.60580754",
"0.55720556",
"0.5522742",
"0.54593384",
"0.5274331",
"0.5196709",
"0.5186926",
"0.5179052",
"0.51778924",
"0.51728237",
"0.5168364",
"0.5154532",
"0.5147112",
"0.5139551",
"0.510703",
"0.50974077",
"0.50913036",
"0.5090747",
"0.5088053",
"0.503926",
"0.50236523",
"0.50177157",
"0.50041175",
"0.49924114",
"0.49591324",
"0.49544853",
"0.4936392",
"0.49081606",
"0.49050307",
"0.49042308"
] | 0.766325 | 0 |
Extract common date features from date | def dataset_extract_features_from_date(dataset,date_feature):
dataset['dayofmonth'] = dataset[date_feature].dt.day
dataset['dayofyear'] = dataset[date_feature].dt.dayofyear
dataset['dayofweek'] = dataset[date_feature].dt.dayofweek
dataset['month'] = dataset[date_feature].dt.month
dataset['year'] = dataset[date_feature].dt.year
dataset['weekofyear'] = dataset[date_feature].dt.weekofyear
dataset['is_month_start'] = (dataset[date_feature].dt.is_month_start).astype(int)
dataset['is_month_end'] = (dataset[date_feature].dt.is_month_end).astype(int)
return dataset | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_date_features(df = None, date = None):\n #TODO",
"def get_date_pred():\r\n \r\n date_now = dt.datetime.now()\r\n date_pred = [date_now - dt.timedelta(days=1)+dt.timedelta(days=i) for i in range(8)]\r\n month_pred = [item.month for item in date_pred]\r\n day_pred = [item.day for item in date_pred]\r\n \r\n return date_pred,month_pred,day_pred",
"def generate_features(df):\n return np.array([np.array(xi) for xi in pd.to_datetime(df).apply(lambda x: [x.year, x.month, x.day, x.hour, x.minute, x.second, x.weekday()])])",
"def extractFeatures(sample, features):\n sample = pd.merge(sample, features, on=['Store', 'Date'])\n\n # Extract features from the Date\n sample['Date'] = pd.to_datetime(sample['Date'])\n sample['WeekOfYear'] = sample['Date'].dt.weekofyear\n sample['Year'] = sample['Date'].dt.year\n return sample",
"def datetime_features(\n s: pd.Series, result: Optional[pd.DataFrame] = None\n) -> pd.DataFrame:\n result = date_features(s, result)\n return time_features(s, result)",
"def add_date_features(data):\n data['member_day'] = data.became_member_on.dt.day\n data['member_weekday'] = data.became_member_on.dt.weekday\n data['member_year'] = data.became_member_on.dt.year\n data['member_month'] = data.became_member_on.dt.month\n\n return data",
"def separate_date(x):\n x[\"SALE DAY\"] = x.apply(get_day, axis=1)\n x[\"SALE MONTH\"] = x.apply(get_month, axis=1)\n x[\"SALE YEAR\"] = x.apply(get_year, axis=1)",
"def extract_dates(data):\n dates = []\n \n for line in data.splitlines():\n if line[6:8] == \"20\":\n dates.append(datetime.strptime(line[6:16], '%Y-%m-%d').date())\n \n return list(set(dates))\n pass",
"def date_features(s: pd.Series, result: Optional[pd.DataFrame] = None) -> pd.DataFrame:\n if result is None:\n result = pd.DataFrame(s, copy=False)\n index = cast(pd.DatetimeIndex, s.index)\n\n result[\"year\"] = index.year\n result[\"month\"] = index.month\n result[\"day\"] = index.day\n result[\"dayofweek\"] = index.dayofweek\n result[\"dayofyear\"] = index.dayofyear\n result[\"quarter\"] = index.quarter\n result[\"season\"] = _map(index.month, _SEASON_MAP)\n result[\"weekofyear\"] = index.weekofyear\n try:\n # Work around numpy Deprecation Warning about parsing timezones\n # by converting to UTC and removing the tz info.\n dates = index.tz_convert(None).to_numpy()\n except TypeError:\n # No timezone.\n dates = index.to_numpy()\n first_of_month = pd.to_datetime(dates.astype(\"datetime64[M]\"))\n week_of_month = np.ceil((first_of_month.dayofweek + index.day) / 7.0)\n result[\"weekofmonth\"] = week_of_month.astype(int)\n # result[\"is_holiday\"] = ?\n # result[\"holiday_types\"] = ?\n result[\"is_weekend\"] = index.dayofweek >= 5\n result[\"is_leap_year\"] = index.is_leap_year\n result[\"is_leap_day\"] = (index.month == 2) & (index.day == 29)\n result[\"is_month_end\"] = index.is_month_end\n result[\"is_quarter_end\"] = index.is_month_end & (index.month % 4 == 3)\n\n return result",
"def extract_temporal_info(self, featurelist, strExpDate, strOnsetDate, strReceiveDate):\n \n expDateInput = self.parse_time_string(strExpDate)\n onsetDateInput = self.parse_time_string(strOnsetDate) \n receiveDate = self.parse_time_string(strReceiveDate) \n \n self.exposureDate = expDateInput\n self.onsetDate = onsetDateInput\n self.receiveDate = receiveDate\n self.exposureDateConfidence = 0\n self.onsetDateConfidence = 0\n \n ##: Obtain timex list\n timexList = timexan.annotateTimexes(self.text, expDateInput) \n \n self.sentence_full_tags = self.create_sentence_full_tags(featurelist, timexList)\n \n timexList = self.preprocess_timex_list(timexList, featurelist)\n \n ###: divide features that contain multiple timexes\n featurelist = self.divide_feature_containing_multiple_timexes(featurelist, timexList)\n \n featurelist = self.create_feature_timex_association(featurelist, timexList)\n \n timexList = self.construct_timeline(timexList, featurelist)\n \n# (expDate, onsetDate, state) = self.calculate_exposure_onset_dates(\n# timexList, featurelist, sentences, taggedSentences, expDateInput, onsetDateInput, expDate)\n \n featurelist = self.process_feature_durations(featurelist)\n \n featurelist = self.postprocess_features(featurelist)\n \n if self.exposureDateConfidence==1:\n if self.onsetDateConfidence==1:\n datesConfidence = 1\n else:\n datesConfidence = 0.9\n else:\n datesConfidence = 0.8\n \n ##: Create DocumentFeature object for return\n docFeature = DocumentFeature(featurelist, timexList, self.exposureDate, self.onsetDate, self.receiveDate, datesConfidence, expDateInput, onsetDateInput) \n \n return docFeature",
"def get_cols_for_datetime(train: NumpyOrPandas) -> Tuple[List[str], List[str]]:\n base_dates = get_columns_by_role(train, \"Datetime\", base_date=True)\n datetimes = get_columns_by_role(train, \"Datetime\", base_date=False) + get_columns_by_role(\n train, \"Datetime\", base_date=True, base_feats=True\n )\n\n return base_dates, datetimes",
"def split_by_date(df):\n\n split_date = df.ix['Fahrenheit 9/11'].RelDate\n early = df[df.RelDate < split_date]\n late = df[df.RelDate > split_date]\n\n return early, late",
"def info_date(source_files: AllSourceFilenames = AllSourceFilenames(),\n out_datefirst: OutputCommonData = OutputCommonData(\"cwb.datefirst\"),\n out_datelast: OutputCommonData = OutputCommonData(\"cwb.datelast\"),\n datefrom: AnnotationAllSourceFiles = AnnotationAllSourceFiles(\"[dateformat.out_annotation]:dateformat.datefrom\"),\n dateto: AnnotationAllSourceFiles = AnnotationAllSourceFiles(\"[dateformat.out_annotation]:dateformat.dateto\"),\n timefrom: AnnotationAllSourceFiles = AnnotationAllSourceFiles(\"[dateformat.out_annotation]:dateformat.timefrom\"),\n timeto: AnnotationAllSourceFiles = AnnotationAllSourceFiles(\"[dateformat.out_annotation]:dateformat.timeto\")):\n first_date = None\n last_date = None\n\n for file in source_files:\n from_dates = sorted((int(x[0]), x[1]) for x in datefrom.read_attributes(file, (datefrom, timefrom)) if x[0])\n if from_dates and (first_date is None or from_dates[0] < first_date):\n first_date = from_dates[0]\n to_dates = sorted((int(x[0]), x[1]) for x in dateto.read_attributes(file, (dateto, timeto)) if x[0])\n if to_dates and (last_date is None or to_dates[-1] > last_date):\n last_date = to_dates[-1]\n\n if not first_date or not last_date:\n raise SparvErrorMessage(\"Corpus is configured as having date information, but no dates were found.\")\n\n # Parse and re-format dates (zero-padding dates with less than 8 digits, needed by strptime)\n first_date_d = datetime.strptime(f\"{str(first_date[0]).zfill(8)} {first_date[1]}\", \"%Y%m%d %H%M%S\")\n first_date_formatted = first_date_d.strftime(\"%Y-%m-%d %H:%M:%S\")\n last_date_d = datetime.strptime(f\"{str(last_date[0]).zfill(8)} {last_date[1]}\", \"%Y%m%d %H%M%S\")\n last_date_formatted = last_date_d.strftime(\"%Y-%m-%d %H:%M:%S\")\n\n out_datefirst.write(first_date_formatted)\n out_datelast.write(last_date_formatted)",
"def extract_features_temporal(self, text, expDateStr = None, onsetDateStr = None, refExpDateStr = None, textType='vaers'):\n \n featurelist = []\n \n sentences = util.sentence_tokenize(text)\n taggedSentences = [] \n# id = 0\n for sentnumber, sentence0 in enumerate(sentences):\n \n sentence = self.clean_text(sentence0)\n \n # tokenize each sentence to have a list of words to be processed\n tokens = nltk.word_tokenize(sentence)\n #run the above procedure\n sentence_to_parse = self.get_untagged(tokens)\n \n # Save tagged sentences for later computing of expose date\n taggedSentences.append(sentence_to_parse)\n \n #only if the cleaned sentence is NOT empty we parse it\n if sentence_to_parse!=[]:\n tree = self.cp.parse(sentence_to_parse)\n tree1 = self.cp1.parse(sentence_to_parse)\n \n# new_sentence_to_parse = ','.join([' '.join(nltk.tag.untag(subtree.leaves())) + ' ' for subtree in tree.subtrees() if subtree.node in self.st_filter])\n new_sentence_to_parse = ','.join([' '.join(nltk.tag.untag(subtree.leaves())) + ' ' for subtree in tree.subtrees() if subtree.label() in self.st_filter])\n\n #here we delete the dash and replace it with whitespace to convert post-vac to post vac\n new_sentence_to_parse = new_sentence_to_parse.replace(', ,', ',')\n #here we delete the dash and replace it with whitespace to convert post-vac to post vac\n new_sentence_to_parse = new_sentence_to_parse.replace(',', ', ')\n\n new_sentence_to_parse = nltk.word_tokenize(new_sentence_to_parse)\n\n #run the above procedure\n new_sentence_to_parse = self.get_untagged(new_sentence_to_parse)\n \n if new_sentence_to_parse!=[]:\n tree2 = self.cp.parse(new_sentence_to_parse)\n for subtree in tree2.subtrees():\n if subtree.label() in self.st_filter: \n featString = self.massage_features(subtree)\n featurelist.append((subtree.label(), featString, sentnumber, subtree.leaves()))\n \n for subtree in tree1.subtrees():\n if subtree.label() in self.labels_gram1:\n featString = self.massage_features(subtree)\n featurelist.append((subtree.label(), featString, sentnumber, subtree.leaves()))\n\n self.initialization_text_data(text, sentences, taggedSentences, textType)\n \n featObjList = self.initialize_feature_obj_list(featurelist)\n \n docFeature = self.extract_temporal_info(featObjList, expDateStr, onsetDateStr, refExpDateStr)\n \n return docFeature",
"def get_date_features(gt_ids=[], gt_masks=None, gt_shifts=None, first_year=None):\n # If particular arguments aren't lists, replace with repeating iterators\n if not isinstance(gt_masks, list):\n gt_masks = itertools.repeat(gt_masks)\n if not isinstance(gt_shifts, list):\n gt_shifts = itertools.repeat(gt_shifts)\n\n # Add each ground truth feature to dataframe\n df = None\n for gt_id, gt_mask, gt_shift in zip(gt_ids, gt_masks, gt_shifts):\n print \"Getting {}_shift{}\".format(gt_id, gt_shift)\n t = time.time()\n # Load ground truth data\n gt = get_ground_truth(gt_id, gt_mask, gt_shift)\n # Discard years prior to first_year\n gt = year_slice(gt, first_year = first_year)\n # If lat, lon columns exist, pivot to wide format\n if 'lat' in gt.columns and 'lon' in gt.columns:\n if gt_shift == None:\n measurement_variable = get_measurement_variable(gt_id)\n else:\n measurement_variable = get_measurement_variable(gt_id)+'_shift'+str(gt_shift)\n gt = pd.pivot_table(gt, values=measurement_variable, index='start_date',\n columns=['lat', 'lon']).reset_index()\n gt = pd.DataFrame(gt.to_records())\n gt.drop(\"index\", axis=1, inplace=True)\n # Rename columns to start_date and precip_(27.0,261.0), etc.\n gt.rename(columns={gt.columns[0]: 'start_date'}, inplace=True)\n gt.rename(columns=lambda x: x.replace('(',\n measurement_variable +\n '_('), inplace=True)\n # Use outer merge to include union of start_date values across all features\n # combinations across all features\n df = df_merge(df, gt, on=\"start_date\")\n print \"Elapsed: {}s\".format(time.time() - t)\n\n return df",
"def dates(self):\n pass",
"def merge_additional_features(df):\n col = [\"hour\",\"day\" ,\"dayofweek\", \"month\" , \"interval\" , \"season\", \"time_of_day\"]\n additional_featues = pd.DataFrame(data = [features_from_timestamp(i) for i in df.index ],columns=col).set_index(df.index)\n data = df.merge(additional_featues,on=\"dt\")\n data.sort_index(inplace=True) #make sure data is sorted by date\n\n return data",
"def create_date_feature_daytime(df = None, date = None):\n df[date] = pd.to_datetime(df[date])\n df['dayOfWeek'] = df[date].dt.dayofweek\n df['dayOfMonth'] = df[date].dt.day #???\n df['year'] = df[date].dt.year\n df['month'] = df[date].dt.month\n return df",
"def datetime_columns(df, feature):\r\n df['day'] = pd.to_datetime(df[feature]).dt.day\r\n df['month'] = pd.to_datetime(df[feature]).dt.month\r\n df['year'] = pd.to_datetime(df[feature]).dt.year\r\n return df",
"def date_prediction(config):\n if config['functionality'] == 'best_flights':\n departure_flight_date = date(config['departure_flight']['departure_date'][0],\n config['departure_flight']['departure_date'][1],\n config['departure_flight']['departure_date'][2])\n return_flight_date = date(config['return_flight']['departure_date'][0],\n config['return_flight']['departure_date'][1],\n config['return_flight']['departure_date'][2])\n div = config['prediction_period_days'] / 7\n dates_search = []\n for x in range(0, div + 1):\n dates_search.append(\n [(departure_flight_date + datetime.timedelta(days=x * 7)),\n (return_flight_date + datetime.timedelta(days=x * 7))])\n for i in dates_search:\n i[0] = str(i[0])\n year, month, day = i[0].split(\"-\")\n i[0] = \"%s/%s/%s\" % (day, month, year)\n i[1] = str(i[1])\n year, month, day = i[1].split(\"-\")\n i[1] = \"%s/%s/%s\" % (day, month, year)\n return dates_search\n elif config['functionality'] == 'flight_trends':\n departure_flight_date = date(\n config['departure_flight']['departure_date'][0],\n config['departure_flight']['departure_date'][1],\n config['departure_flight']['departure_date'][2])\n return_flight_date = date(config['return_flight']['departure_date'][0],\n config['return_flight']['departure_date'][1],\n config['return_flight']['departure_date'][2])\n dates_search = []\n for x in range(0, config['prediction_period_days']):\n dates_search.append(\n [(departure_flight_date + datetime.timedelta(days=x)),\n (return_flight_date + datetime.timedelta(days=x))])\n for i in dates_search:\n i[0] = str(i[0])\n year, month, day = i[0].split(\"-\")\n i[0] = \"%s/%s/%s\" % (day, month, year)\n i[1] = str(i[1])\n year, month, day = i[1].split(\"-\")\n i[1] = \"%s/%s/%s\" % (day, month, year)\n return dates_search",
"def getTimePointFeatures(self):\r\n\r\n def quarterToFeature():\r\n quarter = np.asarray([[0] * 4])\r\n if self.month in [12, 1, 2]:\r\n quarter[:, 0] = 1\r\n elif self.month in [3, 4, 5]:\r\n quarter[:, 1] = 1\r\n elif self.month in [6, 7, 8]:\r\n quarter[:, 2] = 1\r\n else:\r\n quarter[:, 3] = 1\r\n return quarter\r\n\r\n # Mon=0 tue=1 wed=2 thu=3 sun=6\r\n def dayToFeature(day):\r\n feature = np.asarray([[0] * 3])\r\n if day == 0 or day == 4:\r\n # Day is Mon or Fri\r\n feature[:, 0] = 1\r\n elif 0 < day < 4:\r\n # Day is Tue, Wed, Thu\r\n feature[:, 1] = 1\r\n else:\r\n # Weekend\r\n feature[:, 2] = 1\r\n return feature\r\n\r\n # Can split time of day as night and 4 halves\r\n def timeToFeature(time):\r\n feature = np.asarray([[0] * 17])\r\n if time >= 22 or time <= 5:\r\n feature[:, 0] = 1\r\n else:\r\n feature[:, time - 5] = 1\r\n return feature\r\n\r\n return np.concatenate((timeToFeature(self.hour).flatten(),\r\n dayToFeature(self.weekDay).flatten(),\r\n quarterToFeature().flatten()))",
"def _detect_columns_to_fold_dates(self):\n result = list()\n for index in range(len(self._column_names)):\n column_name = self._column_names[index]\n # do not want 12 to be parsed as date, minimum length should be 4 (year in YYYY format)\n if len(column_name) >= 4:\n try:\n # for now strict parsing is true, otherwise it'll parse 'year' as valid date.\n # in future, we'll have to specify date formats\n parsed_column_as_date = dateparser.parse(column_name, settings={'STRICT_PARSING': True})\n if parsed_column_as_date:\n # column_name has been parsed as a valid date, it is a candidate for fold\n result.append(index)\n except:\n # something went wrong, doesn't matter what\n pass\n return result",
"def get_mood_data(data):\n matches = re.findall(r'\\d{8} \\([1-5]\\)', data)\n dates_and_moods = []\n\n for match in matches:\n date, mood = match.split()\n date = datetime.strptime(date, '%Y%m%d')\n mood = mood[1]\n dates_and_moods.append((date, mood))\n\n return dates_and_moods",
"def test_dates_and_Datetimes(self):\n if self.skip_tests:\n return\n recipe = (\n self.recipe()\n .dimensions(\"year_by_format\")\n .metrics(\"count\")\n .order_by(\"year_by_format\")\n )\n self.assertRecipeCSV(\n recipe,\n \"\"\"\n year_by_format,count,year_by_format_id\n 2005-01-01 00:00:00,1,2005-01-01 00:00:00\n 2013-01-01 00:00:00,1,2013-01-01 00:00:00\n \"\"\",\n )\n recipe = (\n self.recipe()\n .dimensions(\"year_by_format\")\n .metrics(\"count\")\n .order_by(\"-year_by_format\")\n )\n self.assertRecipeCSV(\n recipe,\n \"\"\"\n year_by_format,count,year_by_format_id\n 2013-01-01 00:00:00,1,2013-01-01 00:00:00\n 2005-01-01 00:00:00,1,2005-01-01 00:00:00\n \"\"\",\n )\n\n # Test a month() conversion\n recipe = (\n self.recipe()\n .dimensions(\"test_month\")\n .metrics(\"age\", \"count\")\n .order_by(\"-test_month\")\n )\n self.assertRecipeCSV(\n recipe,\n \"\"\"\n test_month,age,count,test_month_id\n 2015-05-01,10,1,2015-05-01\n 2015-01-01,5,1,2015-01-01\n \"\"\",\n )",
"def get_dates_from_token_list(txt):\n pattern = r\"(?P<y1>19|20)(?P<y2>[0-9][0-9])(?P<month>0[1-9]|1[012])(?P<day>0[1-9]|[12][0-9]|3[01])$\"\n match_date = np.array([re.match(pattern, word) for word in txt])\n index_dates = np.argwhere(match_date).ravel()\n dates = np.array(txt)[index_dates]\n return index_dates, dates",
"def test_date_accept_date_minus_many_days(self):\n spi_search = \"find date 2011-02-24 - 946\"\n inv_search = \"year:2008-07-23\"\n self._compare_searches(inv_search, spi_search)",
"def __parse_dates(df):\n\t\tdf['release_date'] = pd.to_datetime(df['release_date'])\n\t\tdf['release_date'] = df['release_date'].fillna(df['release_date'].median())\n\t\tdf['year'] = df['release_date'].dt.year\n\t\tdf['month'] = df['release_date'].dt.month\n\t\tdf['day'] = df['release_date'].dt.weekday\n\t\tdf = pd.get_dummies(df, columns=['month', 'day'])\n\t\treturn df",
"def deconstruct_datetime(self, date: datetime) -> List[int]:\n year, month, day, hour, _, _, _, _, _ = date.timetuple()\n return [year, month, day, hour]",
"def get_dates(self):\n now = datetime.now()\n if now.month > 6 and now.month < 9:\n now = datetime(now.year, 6, 1)\n\n term = ReadingsTerm()\n out = list(term.get_year_interval(now)) + [now.month]\n return out",
"def date_parser(dates):\n # extract the date only from dates: Olwethu\n date_list = []\n for i in dates:\n i = i.split(' ')\n # append each date to a new list: Olwethu\n date_list.append(i[0])\n \n return date_list"
] | [
"0.6753624",
"0.61724424",
"0.6038192",
"0.6005854",
"0.598948",
"0.5953757",
"0.5917281",
"0.58833474",
"0.5878825",
"0.5850257",
"0.56891733",
"0.5656723",
"0.5644662",
"0.5643887",
"0.5638506",
"0.55055416",
"0.5479335",
"0.5466917",
"0.54546124",
"0.54105663",
"0.5387948",
"0.5303922",
"0.52709305",
"0.5255113",
"0.5241656",
"0.5237335",
"0.52329797",
"0.5222372",
"0.52174294",
"0.52082676"
] | 0.70033276 | 0 |
(str) > int sibling returns the next sibling of node, v. | def sibling(self, v):
# method here | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def next_sibling(node):\n try:\n i = node.get_parent().child_nodes.index(node)\n return node.get_parent().get_children()[i+1]\n except(IndexError, AttributeError):\n return None",
"def sibling(self, n):\n parent = self.parent(n)\n if parent is None: # n is root\n return None\n if n == self.left(parent): # n is left child\n return self.right(parent)\n else: # n is right child\n return self.left(parent)",
"def find_sibling(self, hashv):\n address = self.nodes[hashv]['address']\n if address == '':\n return None, None\n addr_n = int(address, 2)\n if addr_n % 2 == 1:\n sibl_n = addr_n - 1\n left_sibl = True\n else:\n sibl_n = addr_n + 1\n left_sibl = False\n sibl_address = int_to_address(sibl_n, len(address))\n if sibl_address in self.addresses:\n return self.addresses[sibl_address], left_sibl\n else:\n return hashv, False",
"def next_sibling(node, name):\n while node.nextSibling is not None:\n node = node.nextSibling\n if node.nodeType == node.ELEMENT_NODE and node.tagName == name:\n return node\n return None",
"def sibling(self, p):\n parent = self.parent(p)\n if parent is None:\n return None\n else:\n if p == self.left(parent):\n return self.right(parent)\n else:\n return self.left(parent)",
"def sibling(self, p):\n parent = self.parent(p)\n if parent is None:\n return None\n else:\n if p == self.left(parent):\n return self.right(parent)\n else:\n return self.left(parent)",
"def sibling(self, p):\n parent = self.parent(p)\n if parent is None:\n return None\n else:\n if p == self.left(parent):\n return self.right(parent)\n return self.left(parent)",
"def sibling(self, p):\n parent = self.parent(p)\n if parent is None: # p must be the root\n return None # root has no sibling\n else:\n if p == self.left(parent):\n return self.right(parent) # possibly None\n else:\n return self.left(parent) # possibly None",
"def getSiblings():",
"def sibling(self, p):\n parent = self.parent(p)\n if parent is None: # p must be the root\n return None # root has no sibling\n else:\n if p == self.left(parent):\n return self.right(parent) # possibly None\n else:\n return self.left(parent) # possibly None",
"def nextSibling(self):\n raise NotImplementedError(\"method must be implemented by subclass\")",
"def sibling(self, node):\n self._validate_node(node)\n if self.is_root(node):\n return None # Root node has no siblings\n parent = self.parent(node)\n if self.num_children(parent) == 1:\n return None # No siblings\n if node is self.left(parent): # If node is left child\n return self.right(parent) # Return right child\n return self.left(parent) # Else return left child",
"def nav_next_sibling(self):\r\n siblings = self.nav_siblings()\r\n next_sibling = None\r\n for i, sibling in enumerate(siblings):\r\n if sibling == self and i < len(siblings) - 1:\r\n next_sibling = siblings[i+1]\r\n return next_sibling",
"def test_getSiblings_nextOnly(self):\n previous, nextious = self.resolver.getSiblings(\n textId=\"urn:cts:latinLit:phi1294.phi002.perseus-lat2\", subreference=\"1.pr\"\n )\n self.assertEqual(\n previous, None,\n \"Previous Should not exist\"\n )\n self.assertEqual(\n nextious, \"1.1\",\n \"Next should be well computed\"\n )",
"def GetNextSibling(self, item):\r\n\r\n i = item\r\n parent = i.GetParent()\r\n \r\n if parent == None:\r\n \r\n # root item doesn't have any siblings\r\n return None\r\n \r\n siblings = parent.GetChildren()\r\n index = siblings.index(i)\r\n \r\n n = index + 1\r\n return (n == len(siblings) and [None] or [siblings[n]])[0]",
"def findNextSibling(self, name=None, attrs={}, text=None, **kwargs):\r\n return self._findOne(self.findNextSiblings, name, attrs, text,\r\n **kwargs)",
"def nextElementSibling(self):\n ret = libxml2mod.xmlNextElementSibling(self._o)\n if ret is None:return None\n __tmp = xmlNode(_obj=ret)\n return __tmp",
"def NextSibling(self):\n ret = libxml2mod.xmlTextReaderNextSibling(self._o)\n return ret",
"def sibling_extract(extracted_tag, next_tag = \"td\", replacement_value = None):\n try:\n # using find_next to find the sibling with the specified tag\n value = extracted_tag.find_next(next_tag).text\n except:\n value = None\n\n return value",
"def get_next(node):\n return node['next']",
"def get_next(self, variant_index=0):\n node = self.next(variant_index)\n if node:\n self.previous()\n return node",
"def sibling(self, p_int, p_int_1): # real signature unknown; restored from __doc__\n return QModelIndex",
"def hasSiblings():",
"def xpathNextFollowingSibling(self, cur):\n if cur is None: cur__o = None\n else: cur__o = cur._o\n ret = libxml2mod.xmlXPathNextFollowingSibling(self._o, cur__o)\n if ret is None:raise xpathError('xmlXPathNextFollowingSibling() failed')\n __tmp = xmlNode(_obj=ret)\n return __tmp",
"def xpathNextPrecedingSibling(self, cur):\n if cur is None: cur__o = None\n else: cur__o = cur._o\n ret = libxml2mod.xmlXPathNextPrecedingSibling(self._o, cur__o)\n if ret is None:raise xpathError('xmlXPathNextPrecedingSibling() failed')\n __tmp = xmlNode(_obj=ret)\n return __tmp",
"def sibling(self, segment):\n return self.__class__(self._url.sibling(_encode_reserved(segment)))",
"def get_sibling(self):\r\n if (not self) or (self.parent is None):\r\n return None\r\n else:\r\n if self is self.parent.left:\r\n return self.parent.right\r\n else:\r\n return self.parent.left",
"def find_next(self, v):\n if v + 1 < self.values[0] or v + 1 > self.values[-1]:\n raise IndexError('element not found')\n\n index = self._bin_search_recursive(v, 0, len(self.values) - 1)\n\n if index < len(self.values) - 1:\n return self.values[index + 1]\n else:\n raise IndexError('element not found')",
"def siblings(self, u):\n if u == self.virtual_root:\n return tuple()\n parent = self.parent(u)\n if self.is_root(u):\n parent = self.virtual_root\n if parent != tskit.NULL:\n return tuple(v for v in self.children(parent) if u != v)\n return tuple()",
"def next(self) -> int:\n node = self.list.pop()\n t = node.right\n while (t):\n self.list.append(t)\n t = t.left\n\n return node.val"
] | [
"0.72265977",
"0.69937146",
"0.6873892",
"0.6821024",
"0.68124634",
"0.68124634",
"0.67741656",
"0.6749736",
"0.67117226",
"0.6628516",
"0.6586244",
"0.65458",
"0.64845926",
"0.64164186",
"0.6274164",
"0.62141037",
"0.6191131",
"0.61486363",
"0.6053174",
"0.60463643",
"0.6020769",
"0.60111976",
"0.5931248",
"0.5921997",
"0.5921723",
"0.5912211",
"0.5904183",
"0.5892985",
"0.58153653",
"0.5780419"
] | 0.7328032 | 0 |
Test when user logs in again redis cache entry for that user is cleared | def test_redis_cache_updated(self, mocked_complete):
def log_user_again(request, *args, **kwargs): # pylint: disable=unused-argument
"""mock function to login the user again"""
request.user = self.user
return HttpResponse()
mocked_complete.side_effect = log_user_again
con = get_redis_connection("redis")
con.sadd(CACHE_KEY_FAILED_USERS_NOT_TO_UPDATE, self.user.id)
assert con.sismember(CACHE_KEY_FAILED_USERS_NOT_TO_UPDATE, self.user.id) is True
self.client.get(self.url)
assert mocked_complete.call_count == 1
assert con.sismember(CACHE_KEY_FAILED_USERS_NOT_TO_UPDATE, self.user.id) is False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def testClear(self):\n user = User(u'testuser', 'hash', u'fullname',\n u'[email protected]', Role.USER)\n user.objectID = UUID('04585bec-28cf-4a21-bc3e-081f3ed62680')\n user.id = 1\n self.userCache.save(user)\n self.assertNotEqual({}, json.loads(self.cache.get('user:testuser')))\n self.userCache.clear(u'testuser')\n self.assertEqual(None, self.cache.get('user:testuser'))",
"def delete_cached_account(username, registry):\n hmac_secret = registry.settings[\"userid_hmac_secret\"]\n cache_key = utils.hmac_digest(hmac_secret, ACCOUNT_CACHE_KEY.format(username))\n cache = registry.cache\n cache_result = cache.delete(cache_key)\n return cache_result",
"def refresh_cached_account(username, registry):\n settings = registry.settings\n cache_ttl = int(settings.get(\"account_cache_ttl_seconds\", 30))\n cache_key = get_account_cache_key(username, registry)\n cache = registry.cache\n cache_result = cache.expire(cache_key, cache_ttl)\n return cache_result",
"def testDeleteInvalidatesCachedUsers(self):\n cache = UserCache()\n self.users.create([(u'user', u'hash', u'User', u'[email protected]')])\n user = getUser(u'user')\n cache.save(user)\n self.users.delete([u'user'])\n\n cached = cache.get(u'user')\n self.assertIdentical(None, cached.results)\n self.assertEqual(u'user', cached.uncachedValues)",
"def clear_cache():\n run(\"/etc/init.d/memcached restart\")",
"def cleanPMUserCache(cache):\n\n removeUser = []\n now = int(time.time())\n\n for user, utime in cache.items():\n if now > utime:\n log.debug(\"removing author %s from recent list\", user)\n removeUser.append(user)\n\n for ku in removeUser:\n del cache[ku]",
"def delete_cached_reset_password(username, registry):\n hmac_secret = registry.settings[\"userid_hmac_secret\"]\n cache_key = utils.hmac_digest(hmac_secret, ACCOUNT_RESET_PASSWORD_CACHE_KEY.format(username))\n\n cache = registry.cache\n cache_result = cache.delete(cache_key)\n return cache_result",
"def test_user_cache(self):\n original_token = TestExpirableToken(user=self.user)\n token = TestExpirableToken.from_key(original_token.key)\n\n def test_init_cache():\n user = original_token.user\n\n def test_user_cache():\n user = token.user\n\n self.assertNumQueries(0, test_init_cache)\n self.assertNumQueries(0, test_user_cache)",
"def test_cache_clear(self):\n self.cache.set('superman', 'clark kent')\n self.cache.set('recipe', {'sugar': 2, 'wine': 5}, 10)\n self.cache.set('secret', ['remains secret'], 0)\n\n self.cache.clear()\n\n self.assertEqual(self.cache.get('superman'), None)\n self.assertEqual(self.cache.get('recipe'), None)\n self.assertEqual(self.cache.get('secret'), None)\n self.assertEqual(self.cache.get('ghost'), None)",
"def testGetReturnsUncachedUsername(self):\n result = self.userCache.get(u'testuser')\n self.assertIdentical(None, result.results)\n self.assertIdentical(u'testuser', result.uncachedValues)",
"def clear_cache():\n sudo('service varnish restart')",
"def test_clear_cache(self):\n api_helpers.clear_cache()",
"def delete_cached_validation_key(username, registry):\n hmac_secret = registry.settings[\"userid_hmac_secret\"]\n cache_key = utils.hmac_digest(hmac_secret, ACCOUNT_VALIDATION_CACHE_KEY.format(username))\n cache = registry.cache\n cache_result = cache.delete(cache_key)\n return cache_result",
"def testSetInvalidatesCachedUsers(self):\n cache = UserCache()\n self.users.create([(u'user', u'hash', u'User', u'[email protected]')])\n user = getUser(u'user')\n cache.save(user)\n self.users.set([(u'user', u'hash2', u'User2', u'[email protected]',\n Role.USER)])\n\n cached = cache.get(u'user')\n self.assertIdentical(None, cached.results)\n self.assertEqual(u'user', cached.uncachedValues)",
"def clean_cache(user_id, song_id, ple_id):\n\n key = 'ple_{}_{}'.format(user_id, ple_id)\n song_key = 'song_{}'.format(song_id)\n\n cache.delete(key)\n cache.delete(song_key)\n return 0",
"def clear_cache():\n # TODO\n pass",
"def test_sess_cache_no_auto_clear(self):\n assert 0x80 == SESS_CACHE_NO_AUTO_CLEAR",
"def clear_cache(self):\n pass",
"def testCachingGetUserUsesTheCache(self):\n createUser(u'user', u'password', u'User', u'[email protected]')\n user = self.getUser(u'user')\n self.assertIsInstance(user, User)\n\n # Delete the user from the store\n self.store.remove(user)\n user = self.getUser(u'user')\n self.assertIsInstance(user, User)",
"def clear_lockout_counter(cls, user):\r\n try:\r\n entry = LoginFailures.objects.get(user=user)\r\n entry.delete()\r\n except ObjectDoesNotExist:\r\n return",
"def _clear_cache(self):\n self.cache = {}",
"def invalidate_cache(self):\n #self.objects.objects = []\n return True",
"def expire_token(self):\n self.user_in_db = User.users_db.get(self.email)\n\n self.user_in_db.update({'token': ''})\n\n User.users_db.put(self.user_in_db)\n\n return {'success': True}",
"def clear_previous_ministry_login(request, user, *args, **kwargs):\n user.logged_in_as = None\n user.save()",
"def test_patient_in_cache_after_login(self):\n\n patient_id = PatientService().validate_login_info(self.valid_health_card_nb, self.password)\n\n assert(cache.get_from_cache(patient_id) == None)\n\n PatientService().test_and_set_patient_into_cache(patient_id)\n\n patient_obj = cache.get_from_cache(patient_id)\n\n assert (not (patient_obj == None))\n assert (patient_id == patient_obj.get_id())",
"def cache_clear(self):\n\t\tself.__cache = {}",
"def reset_user(self):\n\n if self.resin.auth.is_logged_in():\n self.wipe_application()\n self.resin.models.key.base_request.request(\n 'user__has__public_key', 'DELETE',\n endpoint=self.resin.settings.get('pine_endpoint'), login=True\n )",
"def user_logged_in(\n self, sender, request, user, **kwargs\n ): # pylint: disable=unused-argument\n\n username = user.get_username()\n credentials = get_credentials(username)\n client_str = get_client_str(\n username,\n request.axes_ip_address,\n request.axes_user_agent,\n request.axes_path_info,\n )\n\n log.info(\"AXES: Successful login by %s.\", client_str)\n\n if settings.AXES_RESET_ON_SUCCESS:\n cache_keys = get_client_cache_key(request, credentials)\n for cache_key in cache_keys:\n failures_since_start = self.cache.get(cache_key, default=0)\n self.cache.delete(cache_key)\n log.info(\n \"AXES: Deleted %d failed login attempts by %s from cache.\",\n failures_since_start,\n client_str,\n )",
"def test_delete_from_cache_removes_correctly():\n MEM_CACHE.clear()\n my_accessor = RallyAccessor('uname', 'pword', 'base_url')\n MEM_CACHE['cache_key']['cache_lookup'] = 'some_test_data'\n\n my_accessor.delete_from_cache('cache_key', 'cache_lookup')\n\n assert_equal(MEM_CACHE, {'cache_key': {}})",
"def test_clear(self):\n pkg = make_package()\n key = self.db.redis_key(pkg.filename)\n self.redis[key] = \"foobar\"\n self.db.clear(pkg)\n val = self.redis.get(key)\n self.assertIsNone(val)\n count = self.redis.scard(self.db.redis_set)\n self.assertEqual(count, 0)"
] | [
"0.7129745",
"0.6567952",
"0.6401381",
"0.6393035",
"0.63523793",
"0.6336943",
"0.6291996",
"0.62612945",
"0.6211057",
"0.6142848",
"0.61335856",
"0.6117898",
"0.61128783",
"0.6107032",
"0.60976183",
"0.6087185",
"0.608674",
"0.60846674",
"0.607397",
"0.6073541",
"0.5962341",
"0.5945442",
"0.59324765",
"0.5916521",
"0.59068674",
"0.5904479",
"0.5902423",
"0.58998615",
"0.5895081",
"0.5870896"
] | 0.6671589 | 1 |
Filter detected spots and get coordinates of the remaining spots. In order to make the thresholding robust, it should be applied to a | def spots_thresholding(image, mask_local_max, threshold,
remove_duplicate=True):
# check parameters
stack.check_array(image,
ndim=[2, 3],
dtype=[np.uint8, np.uint16, np.float32, np.float64])
stack.check_array(mask_local_max,
ndim=[2, 3],
dtype=[bool])
stack.check_parameter(threshold=(float, int, type(None)),
remove_duplicate=bool)
if threshold is None:
mask = np.zeros_like(image, dtype=bool)
spots = np.array([], dtype=np.int64).reshape((0, image.ndim))
warnings.warn("No spots were detected (threshold is {0})."
.format(threshold),
UserWarning)
return spots, mask
# remove peak with a low intensity
mask = (mask_local_max & (image > threshold))
if mask.sum() == 0:
spots = np.array([], dtype=np.int64).reshape((0, image.ndim))
return spots, mask
# make sure we detect only one coordinate per spot
if remove_duplicate:
# when several pixels are assigned to the same spot, keep the centroid
cc = label(mask)
local_max_regions = regionprops(cc)
spots = []
for local_max_region in local_max_regions:
spot = np.array(local_max_region.centroid)
spots.append(spot)
spots = np.stack(spots).astype(np.int64)
# built mask again
mask = np.zeros_like(mask)
mask[spots[:, 0], spots[:, 1]] = True
else:
# get peak coordinates
spots = np.nonzero(mask)
spots = np.column_stack(spots)
# case where no spots were detected
if spots.size == 0:
warnings.warn("No spots were detected (threshold is {0})."
.format(threshold),
UserWarning)
return spots, mask | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _detect_spots_from_images(images, threshold=None, remove_duplicate=True,\n return_threshold=False, voxel_size_z=None,\n voxel_size_yx=100, psf_z=None, psf_yx=200):\n # initialization\n sigma = stack.get_sigma(voxel_size_z, voxel_size_yx, psf_z, psf_yx)\n n = len(images)\n\n # apply LoG filter and find local maximum\n images_filtered = []\n pixel_values = []\n masks = []\n for image in images:\n # filter image\n image_filtered = stack.log_filter(image, sigma)\n images_filtered.append(image_filtered)\n\n # get pixels value\n pixel_values += list(image_filtered.ravel())\n\n # find local maximum\n mask_local_max = local_maximum_detection(image_filtered, sigma)\n masks.append(mask_local_max)\n\n # get optimal threshold if necessary based on all the images\n if threshold is None:\n\n # get threshold values we want to test\n thresholds = _get_candidate_thresholds(pixel_values)\n\n # get spots count and its logarithm\n all_value_spots = []\n minimum_threshold = float(thresholds[0])\n for i in range(n):\n image_filtered = images_filtered[i]\n mask_local_max = masks[i]\n spots, mask_spots = spots_thresholding(\n image_filtered, mask_local_max,\n threshold=minimum_threshold,\n remove_duplicate=False)\n value_spots = image_filtered[mask_spots]\n all_value_spots.append(value_spots)\n all_value_spots = np.concatenate(all_value_spots)\n thresholds, count_spots = _get_spot_counts(thresholds, all_value_spots)\n\n # select threshold where the kink of the distribution is located\n if count_spots.size > 0:\n threshold, _, _ = _get_breaking_point(thresholds, count_spots)\n\n # detect spots\n all_spots = []\n for i in range(n):\n\n # get images and masks\n image_filtered = images_filtered[i]\n mask_local_max = masks[i]\n\n # detection\n spots, _ = spots_thresholding(image_filtered, mask_local_max,\n threshold, remove_duplicate)\n all_spots.append(spots)\n\n # return threshold or not\n if return_threshold:\n return all_spots, threshold\n else:\n return all_spots",
"def _detect_spots(self, detector=LocalMax, **kwargs):\n if self._verbose > 0:\n print(\"Detecting...\", end=\"\")\n\n spots = detector(**kwargs).locate(self.image_filtered)\n\n # Spots are identified by their position:\n self.spots = [Spot(tuple(s)) for s in spots]\n if self._verbose > 0:\n print('%i spots detected.' % len(self.spots))",
"def detect_points(self):\r\n\r\n\t\r\n\r\n\t\tfeature_mask = np.zeros_like(self.gray) ## Create a mask so we only look for template features in the ROI\r\n\t\t\r\n\t\tfeature_mask[max(0,self.bb[1]):min(360,self.bb[1] + self.bb[3]),max(0,self.bb[0]):min(640,self.bb[0] + self.bb[2])] = 255\r\n\r\n\t\t# search for good points\r\n\t\tfeatures = cv2.goodFeaturesToTrack(self.gray, mask = feature_mask, **feature_params)\r\n\t\t# refine the corner locations\r\n\t\tcv2.cornerSubPix(self.gray,features, **subpix_params)\r\n\r\n\t\tself.features = features\r\n\r\n\t\tself.tracks = [[p] for p in features.reshape((-1,2))]\r\n\r\n\t\tself.prev_gray = self.gray",
"def paintings_detection(query_image, mask):\n\n image = cv2.imread(query_image)\n\n image_width = mask.shape[0]\n image_height = mask.shape[1]\n x_box_1, y_box_1, w_box_1, h_box_1, x_box_2, y_box_2, w_box_2, h_box_2 = 0, 0, 0, 0, 0, 0, 0, 0, \n\n contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2:]\n\n for cnt in contours:\n x, y, w, h = cv2.boundingRect(cnt)\n \n if (w > 0.15 * image_width) & (h > 0.15 * image_height) & (w < 0.98 * image_width) & (x_box_1 == 0):\n x_box_1, y_box_1, w_box_1, h_box_1 = x, y, w, h\n elif (w > 0.15 * image_width) & (h > 0.15 * image_height) & (w < 0.98 * image_width) & (x_box_1 != 0):\n x_box_2, y_box_2, w_box_2, h_box_2 = x, y, w, h\n\n if x_box_2 == 0:\n x_value_to_split = 0\n else:\n x_value_to_split = (x_box_1 + w_box_1/2 + x_box_2 + w_box_2/2) / 2\n\n\n return(x_value_to_split)",
"def segment_and_find_positions(self):\n initial_image = self.data\n xdim = self.data.shape[0]\n\n ydim = self.data.shape[1]\n downsized_image = transform.resize(\n initial_image,\n (xdim / DOWNSCALING_FACTOR, ydim / DOWNSCALING_FACTOR),\n mode=\"constant\",\n )\n rescaled_image = exposure.rescale_intensity(downsized_image)\n print(\"Starting Canny filtering\")\n g_edges = skimage.feature.canny(\n rescaled_image,\n sigma=self.canny_sigma,\n low_threshold=self.canny_low_threshold,\n )\n print(\"Starting dilation\")\n dilation = morphology.dilation(g_edges, morphology.disk(3))\n print(\"Starting erosion\")\n eroded = morphology.erosion(dilation, morphology.disk(4))\n dilation = morphology.dilation(\n eroded, morphology.diamond(4)\n ) # Dont change to disk\n print(\"Starting to remove small holes\")\n filled = morphology.remove_small_holes(\n dilation, area_threshold=self.remove_small_holes_area_threshold\n )\n print(\"Starting erosion\")\n eroded = morphology.erosion(filled, morphology.diamond(3))\n print(\"Applying filters\")\n filtered_image = eroded\n if self.colony_filters_dict is not None:\n for filter_name in self.colony_filters_dict.keys():\n filtered_image = segmentation_filters.apply_filter(\n filter_name, filtered_image, self.colony_filters_dict[filter_name]\n )\n\n colony_edges = morphology.dilation(feature.canny(filtered_image, 0.01))\n print(\"Starting outlining\")\n outline = downsized_image.copy()\n outline[colony_edges] = 65535\n distance = ndimage.distance_transform_edt(filtered_image)\n smoothed_well = ndimage.gaussian_filter(downsized_image, 0.35)\n outline.copy()\n objs, num_objs = ndimage.label(filtered_image)\n print(\"Applying filters for points\")\n if self.mode == \"A\":\n # point selection: Smoothest point in the center region\n for obj in range(1, num_objs + 1):\n print(\"On object {} of {}\".format(obj, num_objs))\n mask = objs == obj\n dist_mask = distance * mask\n # for each colony,\n # find the maximum distance from the two fold distance map.\n # The edge is at 0% and the center of the colony is at 100%\n d_max = dist_mask.max()\n # Getting the points which is at least 40% away from the edge\n top_percent = dist_mask > (d_max * 0.40)\n colony_mask = smoothed_well * top_percent\n colony_edges = feature.canny(colony_mask, 0.1)\n # applying the second distance transform\n # to find the smoothest point in the correct region\n inner_edges = ndimage.distance_transform_edt(\n ~colony_edges * top_percent\n )\n smooth_point = numpy.where(inner_edges == inner_edges.max())\n smooth_point = (smooth_point[0][0], smooth_point[1][0])\n smooth_point_corrected = (\n smooth_point[0] * DOWNSCALING_FACTOR,\n smooth_point[1] * DOWNSCALING_FACTOR,\n )\n self._point_locations.append(smooth_point_corrected)\n elif self.mode == \"C\":\n for obj in range(1, num_objs + 1):\n print(\"On object {} of {}\".format(obj, num_objs))\n mask = objs == obj\n dist_mask = distance * mask\n # point selection: edge, ridge & center respectively\n self.get_mode_c_points(dist_mask, 0, 0.03)\n self.get_mode_c_points(dist_mask, 0.15, 0.20)\n self.get_mode_c_points(dist_mask, 0.90, 0.99)",
"def pts_filter_color(points):\n pts = np.array(points).tolist()\n # Get rid of all points behind camera\n pts_fil = []\n for pt in pts:\n if pt[2] > 0: \n pts_fil.append(pt)\n \n # get pix size for x distance\n pts_col = []\n for point in pts_fil: \n x = point[0]\n y = point[1]\n z = point[2]\n pix_width = (2 * z * np.tan(fov_width/2))/img_width\n pix_height = (2 * z * np.tan(fov_height/2))/img_height\n # Get row and column coordinates\n y_mod = img_width/2 + y/pix_height + height_offset\n x_mod = img_height/2 - x/pix_width + width_offset\n row = int(y_mod)\n col = int(x_mod)\n # Check if point is inside image bounds\n if 0 <= col < img_msg_now.width and 0 <= row < img_msg_now.height: \n rgb = img[row][col] # Get color of that row and column\n pts_col.append(point + rgb)",
"def detectSpots(img, detectSpotsParameter = None, correctIlluminationParameter = None, removeBackgroundParameter = None,\n filterDoGParameter = None, findExtendedMaximaParameter = None, detectCellShapeParameter = None,\n verbose = False, out = sys.stdout, **parameter):\n\n timer = Timer();\n \n # normalize data -> to check\n #img = img.astype('float');\n #dmax = 0.075 * 65535;\n #ids = img > dmax;\n #img[ids] = dmax;\n #img /= dmax; \n #out.write(timer.elapsedTime(head = 'Normalization'));\n #img = dataset[600:1000,1600:1800,800:830];\n #img = dataset[600:1000,:,800:830];\n \n # correct illumination\n correctIlluminationParameter = getParameter(detectSpotsParameter, \"correctIlluminationParameter\", correctIlluminationParameter);\n img1 = img.copy();\n img1 = correctIllumination(img1, correctIlluminationParameter = correctIlluminationParameter, verbose = verbose, out = out, **parameter) \n\n # background subtraction in each slice\n #img2 = img.copy();\n removeBackgroundParameter = getParameter(detectSpotsParameter, \"removeBackgroundParameter\", removeBackgroundParameter);\n img2 = removeBackground(img1, removeBackgroundParameter = removeBackgroundParameter, verbose = verbose, out = out, **parameter) \n \n # mask\n #timer.reset();\n #if mask == None: #explicit mask\n # mask = img > 0.01;\n # mask = binary_opening(mask, self.structureELement('Disk', (3,3,3)));\n #img[img < 0.01] = 0; # masking in place # extended maxima\n #out.write(timer.elapsedTime(head = 'Mask')); \n \n #DoG filter\n filterDoGParameter = getParameter(detectSpotsParameter, \"filterDoGParameter\", filterDoGParameter);\n dogSize = getParameter(filterDoGParameter, \"size\", None);\n #img3 = img2.copy(); \n img3 = filterDoG(img2, filterDoGParameter = filterDoGParameter, verbose = verbose, out = out, **parameter);\n \n # normalize \n # imax = img.max();\n # if imax == 0:\n # imax = 1;\n # img /= imax;\n \n # extended maxima\n findExtendedMaximaParameter = getParameter(detectSpotsParameter, \"findExtendedMaximaParameter\", findExtendedMaximaParameter);\n hMax = getParameter(findExtendedMaximaParameter, \"hMax\", None);\n imgmax = findExtendedMaxima(img3, findExtendedMaximaParameter = findExtendedMaximaParameter, verbose = verbose, out = out, **parameter);\n \n #center of maxima\n if not hMax is None:\n centers = findCenterOfMaxima(img, imgmax, verbose = verbose, out = out, **parameter);\n else:\n centers = findPixelCoordinates(imgmax, verbose = verbose, out = out, **parameter);\n \n #cell size detection\n detectCellShapeParameter = getParameter(detectSpotsParameter, \"detectCellShapeParameter\", detectCellShapeParameter);\n cellShapeThreshold = getParameter(detectCellShapeParameter, \"threshold\", None);\n if not cellShapeThreshold is None:\n \n # cell shape via watershed\n imgshape = detectCellShape(img2, centers, detectCellShapeParameter = detectCellShapeParameter, verbose = verbose, out = out, **parameter);\n \n #size of cells \n csize = findCellSize(imgshape, maxLabel = centers.shape[0], out = out, **parameter);\n \n #intensity of cells\n cintensity = findCellIntensity(img, imgshape, maxLabel = centers.shape[0], verbose = verbose, out = out, **parameter);\n\n #intensity of cells in background image\n cintensity2 = findCellIntensity(img2, imgshape, maxLabel = centers.shape[0], verbose = verbose, out = out, **parameter);\n \n #intensity of cells in dog filtered image\n if dogSize is None:\n cintensity3 = cintensity2;\n else:\n cintensity3 = findCellIntensity(img3, imgshape, maxLabel = centers.shape[0], verbose = verbose, out = out, **parameter);\n \n if verbose:\n out.write(timer.elapsedTime(head = 'Spot Detection') + '\\n');\n \n #remove cell;s of size 0\n idz = csize > 0;\n \n return ( centers[idz], numpy.vstack((cintensity[idz], cintensity3[idz], cintensity2[idz], csize[idz])).transpose()); \n \n \n else:\n #intensity of cells\n cintensity = findIntensity(img, centers, verbose = verbose, out = out, **parameter);\n\n #intensity of cells in background image\n cintensity2 = findIntensity(img2, centers, verbose = verbose, out = out, **parameter);\n \n #intensity of cells in dog filtered image\n if dogSize is None:\n cintensity3 = cintensity2;\n else:\n cintensity3 = findIntensity(img3, centers, verbose = verbose, out = out, **parameter);\n\n if verbose:\n out.write(timer.elapsedTime(head = 'Spot Detection') + '\\n');\n \n return ( centers, numpy.vstack((cintensity, cintensity3, cintensity2)).transpose());",
"def peak_finder(filt_im, dist, threshold):\n from skimage.feature import peak_local_max\n coordinates = peak_local_max(filt_im, min_distance=dist, threshold_abs=threshold)\n return coordinates",
"def detect(image):\n markers = []\n # Stage 1: Detect edges in image\n gray = cvtColor(image, COLOR_BGR2GRAY)\n clahe = createCLAHE(clipLimit=1, tileGridSize=(6, 6))\n cl1 = clahe.apply(gray)\n _, thresh = threshold(cl1, 60, 255, THRESH_OTSU)\n blurred = GaussianBlur(thresh, (5, 5), 0)\n edges = Canny(blurred, 75, 100)\n\n # Stage 2: Find contours\n contours = findContours(edges, RETR_TREE, CHAIN_APPROX_SIMPLE)\n contours = sorted(contours, key=contourArea, reverse=True)[:]\n\n for contour in contours:\n # Stage 3: Shape check\n perimeter = arcLength(contour, True)\n approx = approxPolyDP(contour, 0.01*perimeter, True)\n\n if len(approx) == QUADRILATERAL_POINTS:\n area = contourArea(approx)\n # (x, y, w, h) = boundingRect(approx)\n # ar = float(h) / float(w)\n # if area > 100 and ar >= 0.8 and ar <= 1.2:\n if area > 700:\n # putText(image, str(area), (10, 30), FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)\n drawContours(image, [contour], -1, (0, 255, 0), 1)\n\n # Stage 4: Perspective warping\n topdown_quad = get_topdown_quad(thresh, approx.reshape(4, 2))\n\n # Stage 5: Border check\n if topdown_quad[int((topdown_quad.shape[0]/100.0)*5), int((topdown_quad.shape[1]/100.0)*5)] > BLACK_THRESHOLD:\n continue\n\n # Stage 6: Get marker pattern\n marker_pattern = None\n\n try:\n marker_pattern = get_marker_pattern(topdown_quad, THRESHOLD_PERCENT)\n except:\n continue\n\n if not marker_pattern:\n continue\n\n # Stage 7: Match marker pattern\n marker_found, marker_rotation, marker_name = match_marker_pattern(marker_pattern)\n\n if marker_found:\n markers.append([marker_name, marker_rotation])\n\n return markers, image",
"def apply_tracking2(td, num_spikes=20, alpha=0.5, threshold=-1):\n assert (alpha >= 0)\n assert (alpha <= 1)\n mix = 1 - alpha\n track_x = center_x = float(td.width / 2)\n track_y = center_y = float(td.height / 2)\n threshold_sq = math.floor(center_y ** 2)\n\n if threshold > 0:\n threshold_sq = math.floor(threshold ** 2)\n\n copy = np.copy(td.data).view(np.recarray)\n offset_x_arr = np.zeros(copy.size, np.float32)\n offset_y_arr = np.zeros(copy.size, np.float32)\n\n for spike_index in range(0, copy.size, num_spikes):\n frame_data = copy[spike_index:spike_index + num_spikes]\n distances = ((frame_data.x - track_x) ** 2) + (\n (frame_data.y - track_y) ** 2)\n valid_data = frame_data[distances < threshold_sq]\n\n if valid_data.size > 0:\n x_avg = float(np.sum(valid_data.x)) / valid_data.size\n y_avg = float(np.sum(valid_data.y)) / valid_data.size\n track_x = (track_x * alpha) + (x_avg * mix)\n track_y = (track_y * alpha) + (y_avg * mix)\n offset_x = int(round(center_x - track_x))\n offset_y = int(round(center_y - track_y))\n offset_x_arr[spike_index:spike_index + num_spikes] = offset_x\n offset_y_arr[spike_index:spike_index + num_spikes] = offset_y\n\n offset_x_arr[spike_index:] = offset_x\n offset_y_arr[spike_index:] = offset_y\n copy.x = (copy.x + offset_x_arr).astype(np.uint8)\n copy.y = (copy.y + offset_y_arr).astype(np.uint8)\n # remove the events that are out of bounds\n return copy[(copy.x >= 0) & (copy.y >= 0) & (copy.x < td.width) & (\n copy.y < td.height)]",
"def locate_tracker(self, debug):\n\n # tmp_image =\n # tmp_image = cv2.GaussianBlur(self.frame, (11, 11), 0) # Experiment with this\n\n hsv = cv2.cvtColor(self.frame, cv2.COLOR_BGR2HSV) # Convert to HSV Color Space. This is temporary for testing using colored objects)\n\n mask = cv2.inRange(hsv, self.hueLower, self.hueUpper)\n\n try:\n mask = cv2.inRange(hsv, self.hueLower2, self.hueUpper2) + mask\n except AttributeError:\n pass\n\n mask = cv2.erode(mask, None, iterations=2)\n mask = cv2.dilate(mask, None, iterations=2)\n\n if debug:\n tmpMask = imutils.resize(mask, width=1000, height=1000)\n cv2.imshow(\"mask\", tmpMask)\n\n\n # find contours in the mask and initialize the current (x, y) center of the object\n cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]\n center = None\n\n # only proceed if at least one contour was found\n if len(cnts) > 0:\n # find the largest contour in the mask, then use\n # it to compute the minimum enclosing circle and\n # centroid\n c = max(cnts, key=cv2.contourArea)\n\n ((x, y), radius) = cv2.minEnclosingCircle(c)\n M = cv2.moments(c)\n center = (int(M[\"m10\"] / M[\"m00\"]), int(M[\"m01\"] / M[\"m00\"]))\n\n # only proceed if the radius meets a minimum size\n # if radius > 10:\n # # draw the circle and centroid on the frame,\n # # then update the list of tracked points\n # cv2.circle(frame, (int(x), int(y)), int(radius),\n # (0, 255, 255), 2)\n # cv2.circle(frame, center, 5, (0, 0, 255), -1)\n if debug:\n cv2.drawContours(self.frame, c, -1, (0, 255, 0), 20)\n return center, radius\n # update the points queue\n cv2.imshow(\"mask\", imutils.resize(mask, width=1000, height=1000))\n cv2.imshow(\"frame\", imutils.resize(self.frame, width=1000, height=1000))\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n raise OpenCVError(\"Could not find tracker!\")\n\n # return (1, 1), 1",
"def take_contours(self):\n gray = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)\n blur = cv2.GaussianBlur(gray, (5, 5), 0)\n ret, self.th = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n im2, contours, hierarchy = cv2.findContours(self.th, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n return [contour for contour in contours if 1000 < cv2.contourArea(contour) < 150000]",
"def _collect_points(self, image, point_value=0):\n return zip(*np.where(image == point_value))",
"def find_tfl_lights(image: np.ndarray):\n kernel = np.array(\n [[0, 0, 0],\n [0, 0, 0],\n [0, 1, 0],\n [1, 3, 1],\n [0, 1, 0]])\n\n kernel = kernel - kernel.mean()\n\n red_image = image.copy()\n red_image = red_image[:, :, 0]\n _, red_image = cv2.threshold(red_image, 200, 255, cv2.THRESH_BINARY)\n output = cv2.filter2D(red_image, -1, kernel)\n output_copy = output.copy()\n output = ndimage.maximum_filter(output, size=30)\n output = output - output_copy\n mask = ((output == 0) & (output_copy > 0))\n red_points = np.where(mask)\n positions = []\n final_red_points = []\n for point1 in range(len(red_points[0])):\n point = (red_points[0][point1], red_points[1][point1])\n pixel = image[point[0], point[1]]\n if (pixel[1] < 170 or pixel[2] < 120) and pixel[0] >= 200:\n final_red_points.append(point)\n final_red_points = filter_points(final_red_points)\n positions += final_red_points\n auxilary = ['r'] * len(positions)\n red_x = [val[1] for val in final_red_points]\n red_y = [val[0] for val in final_red_points]\n green_image = image.copy()\n green_image = green_image[:, :, 1]\n _, green_image = cv2.threshold(green_image, 190, 255, cv2.THRESH_BINARY)\n output = cv2.filter2D(green_image, -1, kernel)\n output_copy = output.copy()\n output = ndimage.maximum_filter(output, size=30)\n output = output - output_copy\n mask = ((output == 0) & (output_copy > 0))\n green_points = np.where(mask)\n final_green_points = []\n for point1 in range(len(green_points[0])):\n point = (green_points[0][point1], green_points[1][point1])\n pixel = image[point[0], point[1]]\n if pixel[0] <= 180 and pixel[1] >= 220 and pixel[2] >= 160:\n final_green_points.append(point)\n\n final_green_points = filter_points(final_green_points)\n positions += final_green_points\n auxilary += ['g'] * len(final_green_points)\n green_x = [val[1] for val in final_green_points]\n green_y = [val[0] for val in final_green_points]\n print(f\"There are {len(green_x) + len(red_x)} points\")\n return positions, auxilary",
"def get_pos(self, frame):\n frame = self.perspective_shift(frame)\n \n puck_mask = self.color_mask(frame, self.color_green, thresh=15)\n striker_mask = self.color_mask(frame, self.color_orange, thresh=25, blur=5)\n \n puck_loc, _ = self.find_centroids(puck_mask)\n striker_locs, _ = self.find_centroids(striker_mask, 2)\n \n p_pos = self.abs_to_meter(puck_loc[0])\n # cases: (pos,pos), (pos,None), (None,None)\n if striker_locs[0] is not None:\n pos_1 = self.abs_to_meter(striker_locs[0])\n pos_2 = self.abs_to_meter(striker_locs[1])\n s1_pos = pos_1 if pos_1[1]<0 else pos_2\n s2_pos = pos_2 if pos_1[1]<0 else pos_1\n else:\n s1_pos, s2_pos = None, None \n \n return [p_pos, s1_pos, s2_pos]",
"def extract_blobs_closest_points(this_robot, in_image, active_mask):\n\n out_image = PointSampleImage(in_image.calib_array, in_image.neighbour_array)\n\n G = nx.Graph()\n\n # First add all nodes, where each node consists of an index into\n # calib_array for one of the active pixels.\n for i in range(in_image.n_rows):\n G.add_node(i)\n\n # We will add edges between neighbouring pixels. See\n # sensors/pointsamplecam for the definition of neighbouring.\n node_list = G.nodes()\n n = len(node_list)\n for i in range(n):\n if in_image.masks[i] & active_mask != 0:\n (ixi, iyi) = in_image.calib_array[i,0], in_image.calib_array[i,1]\n for j in in_image.neighbour_array[i]:\n if in_image.masks[j] & active_mask != 0:\n G.add_edge(i, j)\n\n clusters = nx.connected_component_subgraphs(G, copy=False)\n n_clusters = 0\n for cluster in clusters:\n n_clusters += 1\n # Find the closest pixel to the robot in this cluster. \n closest_i = None\n closest_distance = float('inf')\n for i in cluster.nodes():\n #(xr, yr) = in_image.calib_array[i,2], in_image.calib_array[i,3]\n #d = sqrt(xr*xr + yr*yr)\n\n # The pre-computed distance sqrt(xr*xr + yr*yr)\n d = in_image.calib_array[i,5]\n\n if d < closest_distance:\n closest_i = i\n closest_distance = d\n if closest_i != None:\n out_image.masks[closest_i] = in_image.masks[closest_i]\n\n return out_image",
"def clean_detections(npts, on_off):\n on = on_off[:,0]\n off = on_off[:,1]\n idx_on = [on[0]]\n idx_off = [off[0]]\n lowest_idx = on[0]\n\n for ion, ioff in zip(on, off):\n if ion > lowest_idx + npts:\n idx_on.append(ion)\n idx_off.append(ioff)\n lowest_idx = ion\n\n return np.asarray((idx_on, idx_off)).T",
"def sanitize_mask(orig_x, orig_y, mask):\n contours, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n\n # Draw contours:\n cv2.drawContours(mask, contours, 0, (0, 255, 0), 2)\n # Calculate image moments of the detected contour\n num_objects = (len(contours))\n #threshold\n threshold = 3\n\n center_list = []\n # print(num_objects)\n if num_objects > 1:\n for item in range(num_objects):\n M = cv2.moments(contours[item])\n try:\n center_x = round(M['m10'] / M['m00'])\n center_y = round(M['m01'] / M['m00'])\n center_list.append([center_y , center_x ])\n except:\n pass\n\n # initialize retmask\n retmask = mask\n if num_objects > 1:\n for x, y in center_list:\n if orig_x - threshold <= x <= orig_x + threshold and orig_y - threshold <= y <= orig_y + threshold:\n pass\n else:\n def dfs_removal(px , py, mask):\n R = len(mask)\n C = len(mask[0])\n if mask[px][py ] != 255: \n return\n mask[px][py] = 0\n if 0 <= px - 1 and mask[px - 1][py ] == 255: dfs_removal(px - 1 , py , mask)\n if px + 1 < R and mask[px + 1][py ] == 255: dfs_removal(px + 1 , py , mask)\n if 0 <= py - 1 and mask[px][py - 1] == 255: dfs_removal(px, py -1 , mask)\n if py + 1 < C and mask[px][py + 1] == 255: dfs_removal(px, py + 1 , mask)\n\n dfs_removal(x,y, mask)\n\n return retmask",
"def filter_images(self, images):\n status = self.day_or_night(images[0][1],\n self.gray_refs['day'][0],\n self.gray_refs['night'][0])\n print status\n exclusions = self.gray_refs[status]\n threshold = 0.7\n last_ref = None\n result = []\n\n for filename, gray_img, raw_img in images:\n skip = False\n if last_ref:\n dist = ssim(gray_img, exclusions[last_ref], multichannel=False)\n if dist > threshold:\n skip = True\n\n if not skip:\n for i, gray_ref in enumerate(exclusions):\n if i == last_ref:\n continue\n dist = ssim(gray_img, gray_ref, multichannel=False)\n if dist > threshold:\n skip = True\n last_ref = i\n break\n\n if not skip:\n if (time.time() - self.last_notify) > notify_thresh:\n send_alert('Alert! Motion detected near front door.')\n self.last_notify = time.time()\n result.append((filename, gray_img, raw_img))\n return result",
"def detect(self, source, target):\n \n movementLocations = []\n # Generate work image by blurring.\n self.workImg = cv2.blur(source, self.kSize)\n # Generate moving average image if needed\n if self.movingAvgImg == None:\n self.movingAvgImg = numpy.float32(self.workImg)\n # Generate moving average image\n cv2.accumulateWeighted(self.workImg, self.movingAvgImg, self.alpha)\n self.diffImg = cv2.absdiff(self.workImg, cv2.convertScaleAbs(self.movingAvgImg))\n # Convert to grayscale\n self.grayImg = cv2.cvtColor(self.diffImg, cv2.COLOR_BGR2GRAY)\n # Convert to BW\n return_val, self.grayImg = cv2.threshold(self.grayImg, self.blackThreshold, 255, cv2.THRESH_BINARY)\n # Apply ignore mask\n if self.ignoreMask != None:\n self.grayImg = numpy.bitwise_and(self.grayImg, self.ignoreMask) \n # Total number of changed motion pixels\n self.motionPercent = 100.0 * cv2.countNonZero(self.grayImg) / self.totalPixels\n # Detect if camera is adjusting and reset reference if more than maxChange\n if self.motionPercent > self.maxChange:\n self.logger.debug(\"%3.1f%% motion detected, resetting reference image\" % self.motionPercent) \n self.movingAvgImg = numpy.float32(self.workImg)\n movementLocations = self.contours(self.grayImg)\n # Mark objects (make sure to copy target image if you want to keep original image intact)\n if self.markObjects == True:\n self.mark(source, target, movementLocations, self.widthMultiplier, self.heightMultiplier, self.boxColor)\n if self.ignoreAreas != None: \n self.mark(source, target, self.ignoreAreas, self.widthMultiplier, self.heightMultiplier, self.ignoreAreasBoxColor)\n # Return filtered results\n return movementLocations",
"def find_gate_posts(img, display_results=False):\n\n greyscale_image = cv2.cvtColor(img.astype(np.uint8), cv2.COLOR_GRAY2BGR)\n cm_image = cv2.applyColorMap(greyscale_image, cv2.COLORMAP_VIRIDIS)\n\n kernel = np.ones((5, 5), np.uint8)\n\n # cm_image = cv2.erode(cm_image, kernel, iterations=1)\n kernel = np.ones((5, 5), np.uint8)\n cm_image = cv2.dilate(cm_image, kernel, iterations=3)\n kernel = np.ones((4, 4), np.uint8)\n cm_image = cv2.erode(cm_image, kernel, iterations=1)\n\n cm_image = cv2.medianBlur(cm_image, 5) # Removes salt and pepper noise\n\n cm_copy_image = cm_image\n cv2.copyTo(cm_image, cm_copy_image)\n\n mask = mask_sonar_image(cm_image, display_results)\n\n cm_circles = cv2.findContours(mask, cv2.RETR_LIST,\n cv2.CHAIN_APPROX_SIMPLE)[-2]\n\n cm_circles = list(filter(lambda x: (cv2.contourArea(x) > 200\n and cv2.contourArea(x) < 5000),\n cm_circles))\n cm_circles = sorted(cm_circles,\n key=lambda x: (arc_circ(x)),\n reverse=False)\n\n cm_circles = list(filter(lambda x: (cv2.arcLength(x, True)**2/(4\n * math.pi*cv2.contourArea(x)) > 2.5), cm_circles))\n\n if len(cm_circles) < 1:\n print(\"Not enough circles found\")\n return None\n\n filtered_circles = cm_circles[0:1]\n\n circle_positions = []\n for circle in filtered_circles: # find center of circle code\n M = cv2.moments(circle)\n cX = int(M[\"m10\"] / M[\"m00\"])\n cY = int(M[\"m01\"] / M[\"m00\"])\n circle_positions.append((cX, cY, arc_circ(circle), cv2.arcLength(\n circle, True)**2/(4*math.pi*cv2.contourArea(circle))))\n\n if display_results:\n cv2.drawContours(cm_copy_image, filtered_circles, -1, (0, 255, 0), 2)\n cv2.imshow(\"found_gate_posts\", cm_copy_image)\n cv2.waitKey(0)\n\n return circle_positions",
"def _preprocessing(self):\n if self.resize:\n self.click_list = self._remapping_coord(self.click_list,\n self.input_size,\n self.orig_size)\n clickers = self._get_clickers(self.click_list)\n clicks_list = clickers.get_clicks()\n clicks_lists = self._points_transform([clicks_list], self.image_width)\n points_nd = self._get_points_nd(clicks_lists, self.net_clicks_limit)\n return points_nd",
"def apply_threshold(outputs, frame, threshold):\n image = frame\n frame_height, frame_width = frame.shape[:-1]\n current_count = 0\n for output in outputs:\n if output[2] > threshold:\n ### draw bounding box around person\n start_point = ( int(output[3] * frame_width), int(output[4] * frame_height) )\n end_point = ( int(output[5] * frame_width), int(output[6] * frame_height) )\n image = cv2.rectangle(frame, start_point, end_point, (13,255,0), 2)\n # count the number of detected people\n current_count += 1\n return image, current_count",
"def remove_outside_points(points, world_cam, cam_img, image_shape):\n pts_cam = DataProcessing.world2cam(points[:, :3], world_cam)\n pts_img, depth = DataProcessing.cam2img(pts_cam, cam_img)\n\n val_flag_1 = np.logical_and(pts_img[:, 0] >= 0,\n pts_img[:, 0] < image_shape[1])\n val_flag_2 = np.logical_and(pts_img[:, 1] >= 0,\n pts_img[:, 1] < image_shape[0])\n val_flag_merge = np.logical_and(val_flag_1, val_flag_2)\n valid = np.logical_and(val_flag_merge, depth >= 0)\n\n return points[valid]",
"def find_components(image,deltaPix,lens_rad_arcsec = 6.0,lens_rad_ratio = None,\n center_x = None,center_y = None, gal_rad_ratio = 0.1,\n min_size_arcsec=0.7,thresh=0.5, many_sources = True,\n show_locations=False, title = None):\n\n # convert minimum component size in pixel units\n min_size = int(min_size_arcsec / deltaPix)\n \n #Convert lens radius and central galaxy radius to pixels\n if lens_rad_ratio == None:\n lens_rad = int(lens_rad_arcsec / deltaPix)\n else: lens_rad = int(len(image) * lens_rad_ratio)\n gal_rad = int(len(image) * gal_rad_ratio)\n \n \n# im2[im2 < im2.min() + 10.*thresh] = 0.\n \n # downscale source image to data resolution (for speed + easier for converting to data units)\n #down = image_util.re_size(image, factor=supersampling_factor_source)\n \n # apply laplacian of gaussian (LoG) filter to enhance maxima\n LoG = - gaussian_laplace(deepcopy(image), sigma = min_size, mode='constant', cval=0.) \n \n# LoG = - gaussian_laplace(deepcopy(im2), sigma = 2., mode='constant', cval=0.)\n \n filtered = deepcopy(LoG)\n \n# print(LoG.min(),LoG.max(),np.abs(LoG.min()) + thresh )\n \n# print(type(filtered))\n \n #background mean and std of filtered image \n corners = np.zeros([4,5,5])\n corners[0] = LoG[0:5,0:5]\n corners[1] = LoG[-5:,0:5]\n corners[2] = LoG[0:5,-5:]\n corners[3] = LoG[-5:,-5:]\n means = []\n stds = []\n for c in corners:\n mn,med,s = sigma_clipped_stats(c,sigma=3.0)\n means.append(mn)\n stds.append(s)\n \n stds=np.array(stds)\n means = np.array(means)\n means_std = np.std(means)\n# means_good = means[(means >= means.mean() - 1.0 * means_std) & (means <= means.mean() + 1.0 * means_std)]\n means_good = means[(np.abs(means) <= np.abs(means).min() + 1.0 * means_std)]\n mean_bg = np.mean(means_good)\n std_bg = np.mean(stds[(np.abs(means) <= np.abs(means).min() + 1.0 * means_std)])\n# print('LoG means: {}, Log means std: {}, Log means good: {}, LoG avg mean: {}'.format(means,means_std,means_good,mean_bg))\n# print('min: {}, max: {}, cut: {}'.format(LoG.min(),LoG.max(),mean_bg + thresh))\n# print(LoG.min(),LoG.max(),filtered.min() + thresh)\n \n \n # assume all value below max*threshold can not be maxima, so put all to zero\n# filtered[filtered < thresh*filtered.max()] = 0.\n \n# assume all value below min*threshold can not be maxima, so put all to zero\n# filtered[filtered < filtered.min() + thresh * np.abs(filtered.min())] = 0.\n# filtered[filtered < mean_bg + thresh] = 0.\n filtered[filtered < mean_bg + 6.*std_bg] = 0. #set pixels below the mean + 6x threshold to 0\n \n # find coordinates of local maxima\n #print(int(0.5 * min_size))\n max_idx_2d_small = peak_local_max(filtered, min_distance=0) #All bright pixels\n max_idx_2d_large = peak_local_max(filtered, min_distance=1) #peaks with min size of 1 pixel\n \n x_list_small, y_list_small = max_idx_2d_small[:, 1], max_idx_2d_small[:, 0]\n x_list_large, y_list_large = max_idx_2d_large[:, 1], max_idx_2d_large[:, 0]\n \n im_center_x, im_center_y = len(image) / 2., len(image) / 2. #center of image\n \n if (center_x == None) & (center_y == None):\n new_center_x, new_center_y = im_center_x,im_center_y\n else:\n new_center_x, new_center_y = center_x,center_y #new \"center\" = location of lens galaxy\n \n \n #distance of each detected peak from center\n R_small = np.sqrt((x_list_small - new_center_x)**2 + (y_list_small - new_center_y)**2) \n R_large = np.sqrt((x_list_large - new_center_x)**2 + (y_list_large - new_center_y)**2)\n \n #Contaminant light is only bright pixels further from center than lens_rad\n x_sats, y_sats = x_list_small[R_small > lens_rad], y_list_small[R_small > lens_rad]\n \n if many_sources:\n x_lens, y_lens = deepcopy(x_list_small), deepcopy(y_list_small)\n else:\n x_lens, y_lens = deepcopy(x_list_large), deepcopy(y_list_large)\n \n# x_lens, y_lens = x_list_small[R_small <= lens_rad], y_list_small[R_small <= lens_rad]\n \n if (len(x_lens) == 0) & (len(y_lens) == 0):\n x_lens = [0,15]\n y_lens = [0,15]\n \n sources = QTable([x_lens, y_lens],names={'x_local_peak','y_local_peak'}) #make table of all detected objects\n# print(x_list_large)\n# print(y_list_large)\n# print(sources)\n \n # show maxima on image for debug\n \n if show_locations:\n# fig = plt.figure(figsize=(4, 4))\n #plt.imshow(image, origin='lower', cmap=cmap_flux, norm=LogNorm(1e-2))\n \n f, axes = plt.subplots(1, 5, figsize=(20,5), sharex=False, sharey=False)\n# plt.figure(figsize = (8,8))\n# plt.subplot(1,2,1)\n \n axes[0].imshow(image, origin='lower', norm=SymLogNorm(5))\n axes[0].set_title('Image')\n axes[0].set_axis_off()\n \n \n axes[1].imshow(LoG, origin='lower', norm=SymLogNorm(5))\n axes[1].set_title('LoG Filtered Image')\n axes[1].set_axis_off()\n\n# plt.subplot(1,2,2)\n axes[2].imshow(filtered, origin='lower', norm=SymLogNorm(5))\n axes[2].set_title('Final Filtered Image')\n axes[2].set_axis_off()\n \n axes[3].imshow(image, origin='lower', norm=SymLogNorm(5))\n for i in range(len(x_lens)):\n axes[3].scatter([x_lens[i]], [y_lens[i]], c='red', s=60, marker='+')\n \n for i in range(len(x_list_large)):\n axes[3].scatter([x_list_large[i]], [y_list_large[i]], c='black', s=100, marker='x')\n axes[3].set_title('Detected Objects')\n axes[3].set_axis_off()\n \n axes[4].imshow(image, origin='lower', norm=SymLogNorm(5))\n \n for i in range(len(x_sats)):\n axes[4].scatter([x_sats[i]], [y_sats[i]], c='red', s=60, marker='+')\n \n# plt.annotate(i+1, (x_list[i], y_list[i]), color='black')\n \n# for i in range(len(x_mask)):\n# plt.scatter([x_mask[i]], [y_mask[i]], c='red', s=100, marker='*')\n# plt.annotate(i+1, (x_mask[i], y_mask[i]), color='red')\n axes[4].scatter(new_center_x, new_center_y,c='red', s=100, marker='*')\n \n draw_lens_circle = Circle((new_center_x, new_center_y),lens_rad ,fill=False)\n draw_gal_circle = Circle((new_center_x, new_center_y),gal_rad, fill = False)\n# plt.gcf().gca().add_artist(draw_lens_circle)\n# plt.gcf().gca().add_artist(draw_gal_circle)\n axes[4].add_patch(draw_lens_circle)\n# axes[4].add_patch(draw_gal_circle)\n \n axes[4].set_title('Pixels to Mask: \\n r = {:.3f}'.format(lens_rad_arcsec))\n axes[4].text(1, 1, \"detected components\", color='red')\n axes[4].set_axis_off()\n \n if title != None:\n f.suptitle(title, fontsize = 15)\n# plt.show()\n \n \n return (x_sats, y_sats), (new_center_x, new_center_y), sources",
"def _get_best_threshold(self, frame: np.ndarray, save_debug_thresh_images: bool) -> Tuple[float_, Ndarray_]:\n # 1) click_xy --> click_roi (click_xy.center; size = n * MAX_BALL_SIZE -->\n self.click_roi = ROI(frame.shape, self.click_xy, self.CLICK_ZONE_SIZE)\n self.click_roi_img = self.click_roi.extract_img(frame)\n cv.imwrite(f\"images/click_roi_img.png\", self.click_roi_img)\n\n # 2) --> preprocess(gray,blur,dilute) -->\n self.click_roi_gray = self._preprocess_image(self.click_roi_img, \"Start zone\")\n Util.write_bw(f\"images/click_roi_gray.png\", self.click_roi_gray, f\"frame {FrameProcessor.frame_cnt}\")\n # 3) --> find best threshold (one contour of biggest but reasonable size), ball_size, thresh_val -->\n level_results: List[Dict] = []\n for thresh in range(20, 255 - 20, 1):\n _, img_nomorphed = cv.threshold(self.click_roi_gray, thresh, 255, cv.THRESH_BINARY)\n kernel = np.ones((self.BLUR_LEVEL, self.BLUR_LEVEL), np.uint8)\n img = cv.morphologyEx(img_nomorphed, cv.MORPH_OPEN, kernel)\n img = cv.morphologyEx(img, cv.MORPH_CLOSE, kernel)\n # Util.show_img(img, f\"thresh level = {thresh}\", 1)\n\n contours, _ = cv.findContours(img, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)\n # log_zone.debug(f\"get_best_threshold: iterating {thresh=} {len(contours)=} {[cv.contourArea(c) for c in contours]=}\")\n # Util.write_bw(f\"images/thresh_{thresh}.png\", img, f\"#{FrameProcessor.frame_cnt}: {thresh=}\")\n\n if len(contours) != 1: # должен быть только один контур мяча. если несколько - меняем порог\n Util.write_bw(f\"images/{thresh}_not1.png\", img, f\"#{FrameProcessor.frame_cnt}: {thresh=} contours({len(contours)})\")\n continue\n contour = contours[0]\n area = cv.contourArea(contour)\n x, y, w, h = cv.boundingRect(contour)\n if max(w, h) / max(self.click_roi_gray.shape) > self.MAX_RECT_RATIO: # contour is as big as total image - so is useless\n Util.write_bw(f\"images/{thresh}_big.png\", img,\n f\"#{FrameProcessor.frame_cnt}: {thresh=} Big: {w=}{h=} max(shape)={max(self.click_roi_gray.shape)}\")\n continue\n if x == 0 or y == 0 or x + w == self.click_roi.w or y + h == self.click_roi.h:\n Util.write_bw(f\"images/{thresh}_touch.png\", img, f\"#{FrameProcessor.frame_cnt}: {thresh=} Touch: {x=} {y=} {w=} {h=}\")\n continue # contour is touched to border\n hull = cv.convexHull(contour, returnPoints=False)\n defects = cv.convexityDefects(contour, hull)\n max_defect_size = sorted(defects, key=lambda defect: defect[0][3], reverse=True)[0][0][3] if defects is not None else -1\n if max_defect_size > self.MAX_DEFECT_SIZE:\n Util.write_bw(f\"images/{thresh}_defects.png\", img, f\"#{FrameProcessor.frame_cnt}: {thresh=} {max_defect_size=}\")\n continue\n\n result = {\"thresh\": thresh, \"area\": area, \"contour\": contour}\n level_results.append(result)\n Util.write_bw(f\"images/{thresh}_thresh.png\", img,\n f\"#{FrameProcessor.frame_cnt}: {thresh=} area={result['area']} def_size={max_defect_size}\")\n Util.write_bw(f\"images/{thresh}_nomorphed.png\", img_nomorphed,\n f\"#{FrameProcessor.frame_cnt}: {thresh=} area={result['area']} def_size={max_defect_size}\")\n log_zone.debug(f\"get_best_thresh::: level result saved {result['thresh']=} {result['area']=} {ROI(frame.shape, contour=contour)} \")\n\n if len(level_results) == 0: # no appropriate thresh found\n return None, None\n if len(level_results) == 1: # return just the only found thresh\n best_result = level_results[0]\n elif 1 < len(level_results) <= 5: # len(level_results) in (1;5] -- return second best by area if possible\n level_results = sorted(level_results, key=lambda res: res[\"area\"], reverse=True)\n best_result = level_results[1]\n else: # len(level_results) > 5\n best_result = self.get_optimized_thresh_level(level_results)\n\n otsu_thresh, otsu_img = cv.threshold(self.click_roi_gray, 0, 255, cv.THRESH_BINARY + cv.THRESH_OTSU)\n log_zone.debug(f\"{best_result['thresh']=} {best_result['area']=} otsu = {otsu_thresh}\")\n if save_debug_thresh_images:\n Util.write_bw(f\"images/best_{best_result['thresh']}.png\",\n cv.threshold(self.click_roi_gray, best_result['thresh'], 255, cv.THRESH_BINARY)[1],\n f\"{best_result['area']=}\")\n Util.write_bw(f\"images/otsu_{otsu_thresh}.png\", otsu_img)\n return best_result[\"thresh\"], best_result[\"contour\"]",
"def panPeakDetect(detection, fs):\n\n min_distance = int(0.25 * fs)\n\n signal_peaks = [0]\n noise_peaks = []\n\n SPKI = 0.0\n NPKI = 0.0\n\n threshold_I1 = 0.0\n threshold_I2 = 0.0\n\n RR_missed = 0\n index = 0\n indexes = []\n\n missed_peaks = []\n peaks = []\n\n for i in range(len(detection)):\n\n if 0 < i < len(detection) - 1:\n if detection[i - 1] < detection[i] and detection[i + 1] < detection[i]:\n peak = i\n peaks.append(i)\n\n if detection[peak] > threshold_I1 and (peak - signal_peaks[-1]) > 0.25 * fs:\n\n signal_peaks.append(peak)\n indexes.append(index)\n SPKI = 0.125 * detection[signal_peaks[-1]] + 0.875 * SPKI\n if RR_missed != 0:\n if signal_peaks[-1] - signal_peaks[-2] > RR_missed:\n missed_section_peaks = peaks[indexes[-2] + 1:indexes[-1]]\n missed_section_peaks2 = []\n for missed_peak in missed_section_peaks:\n if missed_peak - signal_peaks[-2] > min_distance and signal_peaks[\n -1] - missed_peak > min_distance and detection[missed_peak] > threshold_I2:\n missed_section_peaks2.append(missed_peak)\n\n if len(missed_section_peaks2) > 0:\n missed_peak = missed_section_peaks2[np.argmax(detection[missed_section_peaks2])]\n missed_peaks.append(missed_peak)\n signal_peaks.append(signal_peaks[-1])\n signal_peaks[-2] = missed_peak\n\n else:\n noise_peaks.append(peak)\n NPKI = 0.125 * detection[noise_peaks[-1]] + 0.875 * NPKI\n\n threshold_I1 = NPKI + 0.25 * (SPKI - NPKI)\n threshold_I2 = 0.5 * threshold_I1\n\n if len(signal_peaks) > 8:\n RR = np.diff(signal_peaks[-9:])\n RR_ave = int(np.mean(RR))\n RR_missed = int(1.66 * RR_ave)\n\n index = index + 1\n # First possible peak detection\n first_possible_peak = np.argmax(detection[0:int(0.25 * fs)])\n if detection[first_possible_peak] > SPKI:\n signal_peaks[0] = first_possible_peak\n else:\n signal_peaks.pop(0)\n signal_peaks = np.array(signal_peaks)\n return signal_peaks",
"def condense_coords(matches):\n x = []\n y = []\n for m in matches:\n x += m['matches']['p'][0]\n x += m['matches']['q'][0]\n y += m['matches']['p'][1]\n y += m['matches']['q'][1]\n coords = np.transpose(np.vstack((np.array(x), np.array(y))))\n return coords",
"def process(self, img, enableMouthDetection = Global.DETECT_MOUTH_AREA_FLAG):\n mouthPos=[]\n [maxH,maxW,channels] = img.shape\n facePos = self.faceDetector.detect_biggest(img, self.faceScales, self.faceNeightbors)\n # print facePos\n if enableMouthDetection and len(facePos)>0:\n mouthDetectingArea = adjustRoiWithinRange(facePos[0], facePos[1], facePos[2], facePos[3], 0.1, 0.1, 0.5, -0.3, maxW, maxH)\n mouthRegionForDetection = getROI(img, mouthDetectingArea)\n mouthPos = self.mouthDetector.detect_biggest(mouthRegionForDetection, self.mouthScales, self.mouthNeighbors)\n if len(mouthPos) > 0:\n mouthPos[0] += mouthDetectingArea[0]\n mouthPos[1] += mouthDetectingArea[1]\n return [ facePos, mouthPos]",
"def flood_extraction(self, threshold=0.5, predictions=None):\n # define crop window\n top = self.crop_window['top']\n left = self.crop_window['left']\n height = self.crop_window['height']\n width = self.crop_window['width']\n\n # if prediction is not given as parameter then load predictions from stored pngs in the prediction directory\n if predictions is None:\n # natural sort the files in directory\n f_names = os.listdir(self.pred_dir)\n f_names.sort(key=lambda var: [int(x) if x.isdigit() else x for x in re.findall(r'[^0-9]|[0-9]+', var)])\n predictions = [cv2.imread(os.path.join(self.pred_dir, file)) for file in f_names]\n\n # iterate over each predicted frame, crop image and calculate flood index\n flood_index_crop = []\n flood_index = []\n for pred in predictions:\n pred_crop = pred[top:(top + height), left:(left + width)]\n flood_index_crop.append((pred_crop[:, :, 1] > threshold).sum() / (pred_crop.shape[0] * pred_crop.shape[1]))\n flood_index.append((pred[:, :, 1] > threshold).sum() / (pred.shape[0] * pred.shape[1]))\n\n return np.array(flood_index), np.array(flood_index_crop)"
] | [
"0.6502218",
"0.6376775",
"0.62228143",
"0.62143093",
"0.61916566",
"0.61537147",
"0.6047404",
"0.60279536",
"0.59590685",
"0.59526235",
"0.59399045",
"0.5929654",
"0.5877842",
"0.5805837",
"0.5779849",
"0.5760951",
"0.5747893",
"0.5747553",
"0.57224065",
"0.57049763",
"0.5695016",
"0.5657802",
"0.5641926",
"0.563322",
"0.5624286",
"0.56212",
"0.5614422",
"0.5614212",
"0.5612957",
"0.559437"
] | 0.64943624 | 1 |
Choose the candidate thresholds to test for the spot detection. | def _get_candidate_thresholds(pixel_values):
# choose appropriate thresholds candidate
start_range = 0
end_range = int(np.percentile(pixel_values, 99.9999))
if end_range < 100:
thresholds = np.linspace(start_range, end_range, num=100)
else:
thresholds = [i for i in range(start_range, end_range + 1)]
thresholds = np.array(thresholds)
return thresholds | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_candidate_thresholds(self, node, vec):\n\n if vec >= self.n_vec:\n msg = \"BUG: try to split on {0} which is after max_n_vec ({1})\"\n raise SplitError(msg.format(vec, self.n_vec))\n\n # the projections on the selected eigen-vector\n evs = self.E[node.ids, vec]\n\n # get the thresholds\n _scale = evs.max() - evs.min()\n if _scale < self.min_evect_amplitude:\n # not enough amplitude to split\n used_threshs = []\n else:\n # get quantiles as thresholds\n evs.sort()\n _threshs = evs[(self.percentiles * (len(evs) - 1)).astype(int)]\n # discard thresholds very close to each other\n # (unstable: small change yields very different split)\n used_threshs = [_threshs[0]] # always use the first one\n for _t in _threshs[1:]:\n if (_t - used_threshs[-1]) > 1e-2 * _scale:\n # keep: gap between thresholds is more than 1% of total scale\n used_threshs.append(_t)\n\n if len(used_threshs) == 0:\n msg = \"WARNING: too small amplitude ({0:0.1e})\"\n msg += \" or too close thresholds to split node {1} at vec {2}\\n\"\n sys.stderr.write(msg.format(_scale, node.name, vec))\n sys.stderr.flush()\n\n return used_threshs",
"def __init__(self, treshold: float = 0.5):\n self.threshold: float = treshold\n self.dice_scores: list = []\n self.iou_scores: list = []\n self.sens_scores: list = []\n self.spec_scores: list = []\n self.accu_scores: list = []",
"def threshold_selection(prevalence, CostFP_minus_CostTN, CostFN_minus_CostTP, y, y_hat):\n fpr, tpr, thresholds = roc_curve(y, y_hat)\n m = ((1 - prevalence) / prevalence) * ((CostFP_minus_CostTN) / (CostFN_minus_CostTP))\n fm_thresholds = []\n for i in range(len(fpr)):\n fm = tpr[i] - (m * fpr[i])\n fm_thresholds.append((thresholds[i], fm))\n fm_thresholds = sorted(fm_thresholds, key=lambda fm_value: fm_value[1], reverse=True)\n return fm_thresholds[0][0]",
"def selectThreshold(yval, pval):\n bestEpsilon = 0\n bestF1 = 0\n F1 = 0\n\n stepsize = (pval.max()-pval.min())/1000\n for epsilon in np.arange(pval.min(), pval.max()+stepsize/2, stepsize):\n predictions = (pval < epsilon)\n tp = ((predictions == 1) & (yval == 1)).sum()\n fp = ((predictions == 1) & (yval == 0)).sum()\n fn = ((predictions == 0) & (yval == 1)).sum()\n prec = tp/(tp+fp)\n rec = tp/(tp+fn)\n F1 = 2*prec*rec/(prec+rec)\n\n if F1 > bestF1:\n bestF1 = F1\n bestEpsilon = epsilon\n\n return bestEpsilon, bestF1",
"def _compute_thresholds(self, thresholds):\r\n thr = thresholds\r\n limit = int(1 / thresholds)\r\n thresholds = [x * thr for x in range(limit)]\r\n thresholds.append(1)\r\n return thresholds",
"def selectThreshold(yval, pval):\n\tbestEpsilon = 0\n\tbestF1 = 0\n\tstepsize = (np.max(pval) - np.min(pval)) / 1000\n\n\tfor epsilon in np.arange(np.min(pval), np.max(pval), stepsize):\n\t\tpredictions = (pval < epsilon) + 0\n\t\ttp = np.sum((yval == 1) & (predictions == 1))\n\t\tfp = np.sum((yval == 0) & (predictions == 1))\n\t\tfn = np.sum((yval == 1) & (predictions == 0))\n\t\tif tp + fp == 0:\n\t\t\tcontinue\n\t\tprec = float(tp) / (tp + fp) # tips: cast int to float, or you will get 0\n\t\trec = float(tp) / (tp + fn)\n\t\tF1 = 2.0 * prec * rec / (prec + rec)\n\t\tif F1 > bestF1:\n\t\t\tbestF1 = F1\n\t\t\tbestEpsilon = epsilon\n\treturn bestEpsilon, bestF1",
"def _findThreshold(self, loudnesses, annotations, measure):\n\n # Sort loudnesses and respective annotations\n sortedLoudnesses, sortedAnnotations = zip(\n *sorted(zip(loudnesses, annotations)))\n\n # Preparation\n scores = []\n loudnessesBelow = []\n loudnessesAbove = list(loudnesses)\n estimations = [True] * len(loudnesses)\n\n # Try out all reasonable thresholds\n for i in range(len(loudnesses) - 1):\n estimations[i] = False\n loudnessesBelow.append(loudnessesAbove.pop(0))\n scores.append(\n self._score(measure, estimations, sortedAnnotations,\n loudnessesBelow, loudnessesAbove))\n\n # Find optimal threshold\n idx = np.argmax(scores)\n threshold = (sortedLoudnesses[idx] + sortedLoudnesses[idx + 1]) * 0.5\n bestScore = scores[idx]\n\n return threshold, bestScore",
"def get_thresholds(kalpha, deltaelow, deltaehigh, maxphotons, nscatter, scatter):\n thresholds = tuple(\n [\n (float(n), float(s), n * kalpha + s * scatter - deltaelow, n * kalpha + s * scatter + deltaehigh, s * scatter)\n for s in range(nscatter + 1, -1, -1)\n for n in range(maxphotons - s + 1)\n if not (n == 0 and s == 0)\n ]\n )\n return thresholds",
"def _find_best_threshold(self, num_of_steps=20, verbose=False):\n xmin = self.x.min()\n xmax = self.x.max()\n step = (xmax - xmin)/num_of_steps\n \n lower_th = None\n lower_IR = 1\n\n # for each potential threshold\n for threshold in np.arange(xmin+step, xmax, step):\n IR = self._compute_isometric_ratio(threshold)\n \n if IR < lower_IR:\n lower_IR = IR\n lower_th = threshold\n \n self.threshold = lower_th\n if verbose:\n print(f'\\tThreshold:\\t\\t{lower_th}\\n\\tIsometric Ratio:\\t{lower_IR}')",
"def best_t(precisions, recalls, thresholds):\n f1 = [2 * (precisions[i] * recalls[i]) / (precisions[i] + recalls[i]) for i in range(0, len(thresholds))]\n return thresholds[np.argmax(f1)]",
"def calibrate_threshold(test_graphs):\r\n best_threshold = None\r\n best_result = None\r\n for threhold in range(1, 50):\r\n cur_res = evaluate_argument_mention(test_graphs, threhold)\r\n if (best_result is None) or (cur_res > best_result):\r\n best_result = cur_res\r\n best_threshold = threhold\r\n return (best_threshold, best_result)",
"def calculate_thresholds(self):\n \n for group in self.roi_groups:\n for roi in group.rois:\n for image in range(len(roi.counts)):\n # print(roi.autothreshs)\n # print('image',image)\n if roi.autothreshs[image]:\n values = np.fromiter(roi.counts[image].values(), dtype=float)\n roi.thresholds[image] = self.calculate_threshold(values)\n\n for image, im_copy in enumerate(self.copy_im_threshs): # copy values from a different image and set to manual thresh if needed\n if im_copy is not None:\n for group in self.roi_groups:\n for roi in group.rois:\n roi.autothreshs[image] = False\n roi.thresholds[image] = roi.thresholds[im_copy]",
"def determine_thresholds(confidence, resolution=100):\n if isinstance(confidence, list):\n confidence = np.array(confidence)\n confidence = confidence.flatten()\n confidence = confidence[~np.isnan(confidence)]\n confidence.sort()\n\n assert len(confidence) > resolution and resolution > 2\n\n thresholds = np.ones((resolution))\n thresholds[0] = - np.inf\n thresholds[-1] = np.inf\n delta = np.floor(len(confidence) / (resolution - 2))\n idxs = np.linspace(delta, len(confidence)-delta, resolution-2, dtype=np.int32)\n thresholds[1:-1] = confidence[idxs]\n return thresholds",
"def Thresholds(self) :\n\n from Hlt1Lines.Hlt1TrackLines import Hlt1TrackLinesConf\n from Hlt1Lines.Hlt1MuonLines import Hlt1MuonLinesConf\n from Hlt1Lines.Hlt1ElectronLines import Hlt1ElectronLinesConf\n from Hlt1Lines.Hlt1L0Lines import Hlt1L0LinesConf\n from Hlt1Lines.Hlt1MBLines import Hlt1MBLinesConf\n from Hlt1Lines.Hlt1CommissioningLines import Hlt1CommissioningLinesConf\n from Hlt1Lines.Hlt1DisplVertexLines import Hlt1DisplVertexLinesConf\n from Hlt2Lines.Hlt2CommissioningLines import Hlt2CommissioningLinesConf\n from Hlt1Lines.Hlt1BeamGasLines import Hlt1BeamGasLinesConf\n from Hlt2Lines.Hlt2diphotonDiMuonLines import Hlt2diphotonDiMuonLinesConf\n from Hlt2Lines.Hlt2InclusiveDiProtonLines import Hlt2InclusiveDiProtonLinesConf\n from Hlt2Lines.Hlt2DisplVerticesLines import Hlt2DisplVerticesLinesConf\n\n thresholds = { Hlt1TrackLinesConf : { 'AllL0Tight_PT' : 1700\n , 'AllL0Tight_P' : 3000\n , 'AllL0Tight_IP' : 0.100\n , 'AllL0Tight_IPChi2' : 16 \n , 'AllL0Tight_TrChi2' : 1.5 \n , 'AllL0Tight_GEC' : 'Loose'\n , 'AllL0Tight_Velo_NHits' : 9 \n , 'AllL0Tight_Velo_Qcut' : 3 \n , 'AllL0Tight_ValidateTT' : True \n , 'AllL0_PT' : 1600\n , 'AllL0_P' : 3000\n , 'AllL0_IP' : 0.100\n , 'AllL0_IPChi2' : 16\n , 'AllL0_TrChi2' : 2.0\n , 'AllL0_GEC' : 'Loose'\n , 'AllL0_Velo_NHits' : 9 \n , 'AllL0_Velo_Qcut' : 3 \n , 'AllL0_ValidateTT' : True \n , 'Muon_PT' : 1000 \n , 'Muon_P' : 3000 \n , 'Muon_IP' : 0.100\n , 'Muon_IPChi2' : 16\n , 'Muon_TrChi2' : 2.5 \n , 'Muon_GEC' : 'Loose'\n , 'Muon_ValidateTT' : False\n , 'Muon_L0Channels' : 'Muon,DiMuon,MuonNoSPD,DiMuonNoSPD' \n , 'Photon_PT' : 1200\n , 'Photon_P' : 3000\n , 'Photon_IP' : 0.100\n , 'Photon_IPChi2' : 16\n , 'Photon_TrChi2' : 2.0\n , 'Photon_L0Channels' : 'PhotonHi,ElectronHi' \n , 'Photon_GEC' : 'Loose'\n , 'Photon_ValidateTT' : True\n , 'Prescale' : {'Hlt1TrackAllL0' : 1.0, \n 'Hlt1TrackAllL0Tight' : 1.0,\n 'Hlt1TrackForwardPassThrough' : 0,\n 'Hlt1TrackForwardPassThroughLoose' : 0}\n \n }\n , Hlt1ElectronLinesConf : { 'SingleElectronNoIP_P' : 20000\n , 'SingleElectronNoIP_PT' : 10000\n , 'SingleElectronNoIP_TrChi2' : 3\n , 'SingleElectronNoIP_TrNTHits' : 0 #OFF\n , 'SingleElectronNoIP_Velo_NHits' : 0 #OFF\n , 'SingleElectronNoIP_Velo_Qcut' : 999 #OFF\n , 'SingleElectronNoIP_GEC' : 'Loose'\n , 'L0Channels': { 'SingleElectronNoIP' : ( 'Electron', ) }\n }\n , Hlt1MuonLinesConf : { 'SingleMuonHighPT_P' : 3000\n , 'SingleMuonHighPT_PT' : 4800\n , 'SingleMuonHighPT_TrChi2' : 3.\n , 'SingleMuonHighPT_GEC' : 'Loose'\n , 'SingleMuonNoIP_P' : 3000\n , 'SingleMuonNoIP_PT' : 1300\n , 'SingleMuonNoIP_TrChi2' : 3.\n , 'SingleMuonNoIP_GEC' : 'Loose'\n , 'DiMuonLowMass_VxDOCA' : 0.2\n , 'DiMuonLowMass_VxChi2' : 25\n , 'DiMuonLowMass_P' : 0\n , 'DiMuonLowMass_PT' : 0\n , 'DiMuonLowMass_TrChi2' : 3\n , 'DiMuonLowMass_M' : 0.\n , 'DiMuonLowMass_IPChi2' : 6.\n , 'DiMuonLowMass_GEC' : 'Loose'\n , 'DiMuonHighMass_VxDOCA' : 0.2\n , 'DiMuonHighMass_VxChi2' : 25\n , 'DiMuonHighMass_P' : 3000\n , 'DiMuonHighMass_PT' : 500\n , 'DiMuonHighMass_TrChi2' : 3\n , 'DiMuonHighMass_M' : 2700\n , 'DiMuonHighMass_GEC' : 'Loose'\n , 'MultiMuonNoIP_P' : 3000\n , 'MultiMuonNoIP_PT' : 500\n , 'MultiMuonNoIP_TrChi2' : 3.\n , 'MultiMuonNoIP_GT' : 2.5\n , 'MultiMuonNoIP_GEC' : 'Loose'\n ,'L0Channels' : {\n 'SingleMuonHighPT' : ( 'Muon', 'MuonNoSPD'),\n 'SingleMuonNoIP' : ( 'Muon', 'MuonNoSPD'),\n 'DiMuonLowMass' : ( 'Muon', 'MuonNoSPD', 'DiMuon', 'DiMuonNoSPD' ),\n 'DiMuonHighMass' : ( 'Muon', 'MuonNoSPD', 'DiMuon', 'DiMuonNoSPD' ),\n 'MultiMuonNoIP' : ( 'Muon', 'MuonNoSPD', 'DiMuon', 'DiMuonNoSPD' ) }\n\n , 'Prescale' : { 'Hlt1SingleMuonNoIP' : 0.01,\n 'Hlt1MultiMuonNoIP' : 0.0 }\n }\n , Hlt1L0LinesConf : { 'Postscale' : { 'Hlt1L0AnyRateLimited' : 'RATE(1)'\n , 'Hlt1L0AnyNoSPDRateLimited' : 'RATE(1)'\n }\n , 'Prescale' : { 'Hlt1L0HighSumETJet' : 1 \n , 'Hlt1L0AnyNoSPD' : 0.01\n }\n }\n , Hlt1BeamGasLinesConf : {\n 'Prescale' : { 'Hlt1BeamGasCrossingForcedRecoFullZ': 0.001 }\n , 'Postscale' : { 'Hlt1BeamGasNoBeamBeam1' : 'RATE(0.5)'\n , 'Hlt1BeamGasNoBeamBeam2' : 'RATE(0.5)'\n , 'Hlt1BeamGasBeam1' : 'RATE(2)'\n , 'Hlt1BeamGasBeam2' : 'RATE(2)'\n , 'Hlt1BeamGasCrossingEnhancedBeam1' : 'RATE(0)'\n , 'Hlt1BeamGasCrossingEnhancedBeam2' : 'RATE(0)'\n , 'Hlt1BeamGasCrossingForcedReco' : 'RATE(0.5)'\n , 'Hlt1BeamGasCrossingForcedRecoFullZ': 'RATE(0.5)'\n , 'Hlt1BeamGasCrossingParasitic' : 'RATE(1)'\n , 'Hlt1BeamGasHighRhoVertices' : 'RATE(4)'\n }\n }\n , Hlt1DisplVertexLinesConf: { \"VertexDisplVertex_DOCABL\" : \"2.0*mm\"\n , \"VertexDisplVertex_VELO_NSP\" : \"3\"\n , \"VertexDisplVertex_VELO_NCSP\" : \"2\"\n , \"VertexDisplVertex_VELOTrChi2\": \"2.5\"\n , \"VertexDisplVertex_VX_DOCA\" : \"0.3*mm\"\n , \"VertexDisplVertex_VX_CHI2\" : \"1000000000.0\"\n , \"VertexDisplVertex_VX_RHO\" : \"12.0*mm\"\n , \"VertexDisplVertex_Tr_P\" : \"10.0*GeV\"\n , \"VertexDisplVertex_Tr_PT\" : \"1.7*GeV\"\n , \"VertexDisplVertex_Tr_CHI2\" : \"2.5\"\n , \"VertexDisplVertex_GEC\" : \"Loose\"\n , \"Prescale\" : {'Hlt1VertexDisplVertex':1.0 }\n }\n #, Hlt1ProtonLinesConf : { 'Prescale' : { 'Hlt1DiProtonLowMult' : 0.01, \n # 'Hlt1DiProton' : 0.01\n # } } \n , Hlt1CommissioningLinesConf : { 'Postscale' : { 'Hlt1ErrorEvent' : 'RATE(0.01)'\n\n } }\n , Hlt2CommissioningLinesConf : { 'Prescale' : { 'Hlt2PassThrough' : 0.0001 \n , 'Hlt2Forward' : 0.00001\n , 'Hlt2DebugEvent' : 0.000001 }\n , 'Postscale' : { 'Hlt2ErrorEvent' : 'RATE(0.01)' } }\n # micro bias lines switched off for high mu physics running \n , Hlt1MBLinesConf : { 'Prescale' : { 'Hlt1MBMicroBiasVelo' : 0\n , 'Hlt1MBMicroBiasTStation' : 0\n , 'Hlt1MBMicroBiasVeloRateLimited' : 0\n , 'Hlt1MBMicroBiasTStationRateLimited' : 0 }\n , 'MaxNoBiasRate' : 1000000.\n }\n , Hlt2diphotonDiMuonLinesConf : { 'Prescale' : { 'Hlt2LowMultHadron' : 1.0 # for 0x0035, this is already done in L0\n , 'Hlt2LowMultPhoton' : 0.01\n } } \n\n #, Hlt2InclusiveDiProtonLinesConf: { 'Prescale' : { 'Hlt2DiProton' : 0.001\n # , 'Hlt2DiProtonLowMult' : 0.001\n # } } \n\n , Hlt2DisplVerticesLinesConf : { 'Prescale' : \n { 'Hlt2DisplVerticesHighMassSingle' : 1\n , 'Hlt2DisplVerticesSingle' : 1\n , 'Hlt2DisplVerticesDouble' : 1\n , 'Hlt2DisplVerticesHighMassSingle' : 1 \n , 'Hlt2DisplVerticesHighFDSingle' : 1\n , 'Hlt2DisplVerticesSinglePostScaled' : 1\n , 'Hlt2DisplVerticesSingleDown' : 1\n , 'Hlt2DisplVerticesDoublePostScaled' : 1\n , 'Hlt2DisplVerticesSingleHighMassPostScaled' : 1\n , 'Hlt2DisplVerticesSingleHighFDPostScaled' : 1\n , 'Hlt2DisplVerticesSingleMVPostScaled' : 1 } \n }\n\n }\n\n\n from Muons_April2012 import Muons_April2012\n __update_conf__(thresholds, Muons_April2012().Thresholds() )\n\n from Electrons_July2011 import Electrons_July2011\n __update_conf__(thresholds, Electrons_July2011().Thresholds() )\n\n from Hadrons_September2012 import Hadrons_September2012\n __update_conf__(thresholds, Hadrons_September2012().Thresholds() )\n\n from DV_draft2012 import DV_draft2012\n __update_conf__(thresholds, DV_draft2012().Thresholds() )\n\n from CharmLeptonic_draft2012 import CharmLeptonic_draft2012\n __update_conf__(thresholds, CharmLeptonic_draft2012().Thresholds() )\n\n return thresholds",
"def auto_find_limits(peaks, thresholds) :\n\n limits = {}\n diploid_peak = min(peaks)\n haploid_peak = max(peaks)\n\n for t, lim in thresholds.items() :\n if diploid_peak in t and haploid_peak in t :\n limits[\"dip\"] = lim\n elif diploid_peak in t and haploid_peak not in t :\n limits[\"low\"] = lim\n elif haploid_peak in t and diploid_peak not in t :\n limits[\"high\"] = lim\n else :\n continue\n\n if any(x not in limits for x in [\"low\", \"dip\", \"high\"]) :\n raise Exception(\"Could not automatically find limits...\")\n\n return limits",
"def get_canny_thresholds(img):\n gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n\n v = np.median(gray)\n\n lower = int(max(0, (1.0 - 0.33) * v))\n upper = int(min(255, (1.0 + 0.33) * v))\n\n return lower, upper",
"def customize_plark_detect_range(self, current_gameboard, stop_range, slow_range, fast_range):\n detect_dict = {\n 'stopped': stop_range,\n 'slow': slow_range,\n 'fast': fast_range\n }\n current_gameboard['detection_range'] = detect_dict",
"def Thresholds(self) :\n\n from Hlt1Lines.Hlt1PhiLines import Hlt1PhiLinesConf\n from Hlt1Lines.Hlt1CalibTrackingLines import Hlt1CalibTrackingLinesConf\n\n thresholds = { Hlt1PhiLinesConf : { 'TrackPT' : 800 # MeV\n ,'TrackP' : 4000 # MeV\n ,'TrackChi2DOF' : 5 # dimensionless\n ,'PhiMassWin' : 30 # MeV\n ,'PhiDOCA' : 0.2 # mm\n ,'PhiVCHI2' : 20 # dimensionless\n ,'Velo_Qcut' : 3 # dimensionless\n ,'TrNTHits' : 16.\n ,'ValidateTT' : False\n }\n , Hlt1CalibTrackingLinesConf : { 'TrackPT' : 500 # MeV\n ,'TrackP' : 2000 # MeV\n ,'TrackChi2DOF' : 5 # dimensionless\n ,'D0MassWin' : 100 # MeV\n ,'D0DOCA' : 0.2 # mm\n ,'D0VCHI2' : 20 # dimensionless\n ,'D0PT' : 2000 # MeV\n ,'Velo_Qcut' : 3 # dimensionless\n ,'TrNTHits' : 16\n ,'ValidateTT' : False\n }\n }\n\n return thresholds",
"def thresh_setup():\n pass",
"def test_autoThresh():\n edges = [0, 1000]\n spikes1 = SpikeTrain([64.88600, 305.81000, 696.00000, 800.0000], edges)\n spikes2 = SpikeTrain([67.88600, 302.81000, 699.00000], edges)\n spikes3 = SpikeTrain([164.88600, 205.81000, 796.00000, 900.0000], edges)\n spikes4 = SpikeTrain([263.76400, 418.45000, 997.48000], edges)\n spike_train_list = [spikes1, spikes2, spikes3, spikes4]\n\n Thresh = default_thresh(spike_train_list)\n print('default_thresh got %.4f'%Thresh)\n np.testing.assert_almost_equal(Thresh, 325.4342, decimal=4, err_msg=\"default_thresh\")\n\n c1 = spk.spike_sync(spikes1, spikes2, MRTS=Thresh)\n c2 = spk.spike_sync(spikes1, spikes2, MRTS='auto')\n np.testing.assert_almost_equal(c1, c2, err_msg=\"spike_sync\")\n\n # apply it to the first example avove\n v1 = [12.0000, 16.0000, 28.0000, 32.0000, 44.0000, 48.0000, 60.0000, 64.0000, 76.0000, 80.0000, ];\n v2 = [7.5376, 19.9131, 24.2137, 35.7255, 40.0961, 51.7076, 55.9124, 68.1017, 71.9863, 83.5994, ];\n edges=[0, 300]\n\n sp1 = spk.SpikeTrain(v1, edges)\n sp2 = spk.SpikeTrain(v2, edges)\n\n t = default_thresh([sp1, sp2])\n ## Look at all 4 algorithms\n\n c1 = spk.spike_sync(sp1, sp2, MRTS=t)\n c2 = spk.spike_sync(sp1, sp2, MRTS='auto')\n np.testing.assert_almost_equal(c1, c2, err_msg=\"spike_sync2\")\n print('SS thresh %.3f, results %.3f'%(t,c1))\n # compare with: {14:0., 15:.3, 16:.6, 17:.9, 18:1.}\n\n c1 = spk.spike_distance(sp1, sp2, MRTS=t)\n c2 = spk.spike_distance(sp1, sp2, MRTS='auto')\n np.testing.assert_almost_equal(c1, c2, err_msg=\"spike_distance\")\n\n c1 = spk.spike_distance(sp1, sp2, MRTS=t, RI=True)\n c2 = spk.spike_distance(sp1, sp2, MRTS='auto', RI=True)\n np.testing.assert_almost_equal(c1, c2, err_msg=\"RI\")\n\n c1 = spk.isi_distance(sp1, sp2, MRTS=t)\n c2 = spk.isi_distance(sp1, sp2, MRTS='auto')\n np.testing.assert_almost_equal(c1, c2, err_msg=\"ISI\")\n\n c1 = spk.spike_directionality(sp1, sp2, MRTS=t)\n c2 = spk.spike_directionality(sp1, sp2, MRTS='auto')\n np.testing.assert_almost_equal(c1, c2, err_msg=\"directionality\")\n\n print('OK2')",
"def tune_threshold(task, gt, cam_dir):\n cam_pkls = sorted(list(Path(cam_dir).rglob(f\"*{task}_map.pkl\")))\n thresholds = np.arange(0.2, .8, .1)\n mious = [compute_miou(threshold, cam_pkls, gt) for threshold in thresholds]\n best_threshold = thresholds[mious.index(max(mious))]\n return best_threshold",
"def _thresholding(qc_value, thresholds=None):\n MAX_BOUND, MIN_BOUND = (1, 0)\n if not thresholds:\n thresholds = TaskQC.criteria['default'].copy()\n if qc_value is None or np.isnan(qc_value):\n return int(-1)\n elif (qc_value > MAX_BOUND) or (qc_value < MIN_BOUND):\n raise ValueError(\"Values out of bound\")\n if 'PASS' in thresholds.keys() and qc_value >= thresholds['PASS']:\n return 0\n if 'WARNING' in thresholds.keys() and qc_value >= thresholds['WARNING']:\n return 1\n if 'FAIL' in thresholds and qc_value >= thresholds['FAIL']:\n return 2\n if 'NOT_SET' in thresholds and qc_value >= thresholds['NOT_SET']:\n return -1\n # if None of this applies, return 'NOT_SET'\n return -1",
"def evaluation_detections(thresholds, bboxes_gt, bboxes_detected, num_instances):\r\n TP = np.zeros(len(thresholds), dtype=int)\r\n FP = np.zeros(len(thresholds), dtype=int)\r\n\r\n scores_detections = [[] for i in range(len(thresholds))]\r\n # scores_detections is pair of values [result, confidence] where result is true if the example is correctly\r\n # classified and confidence is the confidence of the prediction. It's used to compute the precision-recall\r\n # curve. Confidence score is random if the predicted scores do not belong to a detector.\r\n\r\n for key in bboxes_detected.keys():\r\n for bbox_noisy in bboxes_detected[key]:\r\n if key in bboxes_gt: # if we have detected stuff and it is in the gt\r\n scores = [bbox_iou(bbox_noisy[1:5], bbox[1:5]) for bbox in bboxes_gt[key]]\r\n max_score = max(scores)\r\n for i, threshold in enumerate(thresholds):\r\n if max_score > threshold:\r\n TP[i] += 1\r\n # we give correct boxes a slightly higher confidence score\r\n scores_detections[i].append([1, bbox_noisy[5]])\r\n else:\r\n FP[i] += 1\r\n scores_detections[i].append([0, bbox_noisy[5]])\r\n else: # if we have detected stuff and it is not in the gt\r\n for i, threshold in enumerate(thresholds):\r\n FP[i] += 1\r\n\r\n FN = num_instances - TP # number of instances not detected\r\n return TP, FP, FN, np.array(scores_detections)",
"def set_camera_thresholds(self,thresholds):\n self.send_packet('\\x93'+struct.pack('<'+'B'*8,*thresholds))",
"def _determine_threshold(threshold, clip_min=0.1, clip_max=0.9):\n if threshold != -1:\n return threshold\n\n path = os.path.join(os.path.dirname(cfg.predictions_path), 'thresholds.p')\n\n if not os.path.isfile(path):\n print('Warning: Defaulting to threshold of 0.5')\n return 0.5\n\n with open(path, 'rb') as f:\n thresholds = pickle.load(f)\n return np.clip(thresholds, clip_min, clip_max)",
"def _detect_spots(self, detector=LocalMax, **kwargs):\n if self._verbose > 0:\n print(\"Detecting...\", end=\"\")\n\n spots = detector(**kwargs).locate(self.image_filtered)\n\n # Spots are identified by their position:\n self.spots = [Spot(tuple(s)) for s in spots]\n if self._verbose > 0:\n print('%i spots detected.' % len(self.spots))",
"def get_best_sensitivity_metrics(self,\n verbose: bool = True) -> Tuple[int, int]:\n sensitivity_scores = list()\n for i in self.search_space:\n classes = self.convert_classes(threshold=i)\n tn, fp, fn, tp = confusion_matrix(self.y_true, classes).ravel()\n sensitivity = tp / (tp + fn)\n sensitivity_scores.append(sensitivity)\n best_sensitivity_score, best_sensitivity_threshold = self._get_best_metrics(\n metric_type='sensitivity_score',\n scores=sensitivity_scores,\n greater_is_better=True,\n verbose=verbose\n )\n return best_sensitivity_score, best_sensitivity_threshold",
"def evaluate(self, eye_frame, side):\n threshold = self.find_best_threshold(eye_frame)\n\n if side == 0:\n self.thresholds_left.append(threshold)\n elif side == 1:\n self.thresholds_right.append(threshold)",
"def evaluate_recall(self, candidate_boxes=None, thresholds=None,\n area='all', limit=None):\n # Record max overlap value for each gt box\n # Return vector of overlap values\n areas = {'all': 0, 'small': 1, 'medium': 2, 'large': 3,\n '96-128': 4, '128-256': 5, '256-512': 6, '512-inf': 7}\n area_ranges = [[0 ** 2, 1e5 ** 2], # all\n [0 ** 2, 32 ** 2], # small\n [32 ** 2, 96 ** 2], # medium\n [96 ** 2, 1e5 ** 2], # large\n [96 ** 2, 128 ** 2], # 96-128\n [128 ** 2, 256 ** 2], # 128-256\n [256 ** 2, 512 ** 2], # 256-512\n [512 ** 2, 1e5 ** 2], # 512-inf\n ]\n assert area in areas, 'unknown area range: {}'.format(area)\n area_range = area_ranges[areas[area]]\n gt_overlaps = np.zeros(0)\n num_pos = 0\n for i in range(self.num_images):\n # Checking for max_overlaps == 1 avoids including crowd annotations\n # (...pretty hacking :/)\n max_gt_overlaps = self.roidb[i]['gt_overlaps'].toarray().max(axis=1)\n gt_inds = np.where((self.roidb[i]['gt_classes'] > 0) &\n (max_gt_overlaps == 1))[0]\n gt_boxes = self.roidb[i]['boxes'][gt_inds, :]\n gt_areas = self.roidb[i]['seg_areas'][gt_inds]\n valid_gt_inds = np.where((gt_areas >= area_range[0]) &\n (gt_areas <= area_range[1]))[0]\n gt_boxes = gt_boxes[valid_gt_inds, :]\n num_pos += len(valid_gt_inds)\n\n if candidate_boxes is None:\n # If candidate_boxes is not supplied, the default is to use the\n # non-ground-truth boxes from this roidb\n non_gt_inds = np.where(self.roidb[i]['gt_classes'] == 0)[0]\n boxes = self.roidb[i]['boxes'][non_gt_inds, :]\n else:\n boxes = candidate_boxes[i]\n if boxes.shape[0] == 0:\n continue\n if limit is not None and boxes.shape[0] > limit:\n boxes = boxes[:limit, :]\n\n overlaps = bbox_overlaps(boxes.astype(np.float),\n gt_boxes.astype(np.float))\n\n _gt_overlaps = np.zeros((gt_boxes.shape[0]))\n for j in range(gt_boxes.shape[0]):\n # find which proposal box maximally covers each gt box\n argmax_overlaps = overlaps.argmax(axis=0)\n # and get the iou amount of coverage for each gt box\n max_overlaps = overlaps.max(axis=0)\n # find which gt box is 'best' covered (i.e. 'best' = most iou)\n gt_ind = max_overlaps.argmax()\n gt_ovr = max_overlaps.max()\n assert (gt_ovr >= 0)\n # find the proposal box that covers the best covered gt box\n box_ind = argmax_overlaps[gt_ind]\n # record the iou coverage of this gt box\n _gt_overlaps[j] = overlaps[box_ind, gt_ind]\n assert (_gt_overlaps[j] == gt_ovr)\n # mark the proposal box and the gt box as used\n overlaps[box_ind, :] = -1\n overlaps[:, gt_ind] = -1\n # append recorded iou coverage level\n gt_overlaps = np.hstack((gt_overlaps, _gt_overlaps))\n\n gt_overlaps = np.sort(gt_overlaps)\n if thresholds is None:\n step = 0.05\n thresholds = np.arange(0.5, 0.95 + 1e-5, step)\n recalls = np.zeros_like(thresholds)\n # compute recall for each iou threshold\n for i, t in enumerate(thresholds):\n recalls[i] = (gt_overlaps >= t).sum() / float(num_pos)\n # ar = 2 * np.trapz(recalls, thresholds)\n ar = recalls.mean()\n return {'ar': ar, 'recalls': recalls, 'thresholds': thresholds,\n 'gt_overlaps': gt_overlaps}",
"def Thresholds(self) :\n \n # keep pass through thresholds\n d = { }\n\n\n from Hlt2Lines.SingleMuon.Lines import SingleMuonLines\n d.update({SingleMuonLines : \n {'Prescale' : {\"Hlt2SingleMuon\" : 0.5, \n \"Hlt2SingleMuonLowPT\" : 0.1},\n 'HltReq' : {\"SingleMuon\" : \"HLT_PASS_RE('Hlt1TrackMuonDecision')\"},\n 'Common' : {'TrChi2' : 3, # Adimensional\n 'Pt' : 1300 * MeV },\n 'SingleMuon' : {'IP' : 0.0 * mm,\n 'IPChi2' : 16 }, # Adimensional\n 'HighPT' : {'HighPt' : 10000 *MeV },\n 'LowPT' : { 'HighPt' : 4800 * MeV }\n }\n })\n \n return d"
] | [
"0.61890835",
"0.5996674",
"0.59721863",
"0.5928042",
"0.58958036",
"0.5890723",
"0.5830171",
"0.58152485",
"0.580196",
"0.5703099",
"0.56979644",
"0.5694947",
"0.5664172",
"0.56454784",
"0.5606317",
"0.55890864",
"0.5572626",
"0.55543137",
"0.553795",
"0.5525567",
"0.55046827",
"0.54774255",
"0.54589057",
"0.54462826",
"0.54186124",
"0.54084206",
"0.5384854",
"0.53484505",
"0.53470296",
"0.5328415"
] | 0.62408847 | 0 |
Compute and format the spots count function for different thresholds. | def _get_spot_counts(thresholds, value_spots):
# count spots for each threshold
count_spots = np.log([np.count_nonzero(value_spots > t)
for t in thresholds])
count_spots = stack.centered_moving_average(count_spots, n=5)
# the tail of the curve unnecessarily flatten the slop
count_spots = count_spots[count_spots > 2]
thresholds = thresholds[:count_spots.size]
return thresholds, count_spots | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def compute_detection_counts(kinds, valid_mask, aoi_mask, scene_counts):\n scene_counts = np.maximum(scene_counts, 1)\n if len(kinds):\n pairs = (kinds == 'pair_trawlers')\n singles = (kinds == 'single_trawler')\n scales = (kinds == 'pair_trawlers') * 2 + (kinds == 'single_trawler')\n aoi_pts = round((scales * (valid_mask & aoi_mask) / scene_counts).sum(), 1) \n aoi_pairs = round((pairs * (valid_mask & aoi_mask) / scene_counts).sum(), 1) \n else:\n aoi_pts = aoi_pairs = 0\n return aoi_pts, aoi_pairs",
"def gen_board_string_calc_resources_counts_points(self):\n self.calc_resources()\n self.cnt_T = self.cnt_S = self.cnt_U = self.cnt_P = self.cnt_G = self.cnt_F = self.cnt_A = self.cnt_1 = self.cnt_2 = self.cnt_3 = self.cnt_4 = self.cnt_5 = self.cnt_O = self.cnt_M = 0\n pop_norm = 0\n for i in range(20):\n b = self.b[i]\n if b == 'T':\n self.cnt_T += self.f[i]\n pop_norm += self.f[i] * 2\n elif b == 'S':\n self.cnt_S += 1\n elif b == 'U':\n self.cnt_U += 1\n elif b == 'P':\n self.cnt_P += 1\n elif b == 'G':\n self.cnt_G += 1\n pop_norm += 1\n elif b == 'F':\n self.cnt_F += 1\n elif b == 'A':\n self.cnt_A += 1\n elif b == '1':\n self.cnt_1 += 1\n pop_norm += 1\n elif b == '2':\n self.cnt_2 += 1\n elif b == '3':\n self.cnt_3 += 1\n elif b == '4':\n self.cnt_4 += 1\n pop_norm += 2\n elif b == '5':\n self.cnt_5 += 1\n elif b == 'O':\n self.cnt_O += self.f[i]\n elif b == 'M':\n self.cnt_M += 1\n if 'tvst' in args.exp:\n pop_norm += self.cnt_S\n if 'ward' in args.exp:\n pop_norm += 3\n self.cnt_total = self.cnt_T + self.cnt_S + self.cnt_U + self.cnt_P + self.cnt_G + self.cnt_F + self.cnt_A + self.cnt_1 + self.cnt_2 + self.cnt_3 + self.cnt_4 + self.cnt_5 + self.cnt_O + self.cnt_M\n self.pts_tower = self.calc_points_tower()\n self.pts_shop = self.calc_points_shop()\n self.pts_public = self.calc_points_public()\n self.pts_park = self.calc_points_park()\n self.pts_factory = self.calc_points_factory()\n self.pts_harbor = self.calc_points_harbor()\n self.pts_office = self.calc_points_office()\n self.pts_monument = self.calc_points_monument()\n self.pts_expansion = self.calc_points_expansion()\n self.pts_total = self.pts_tower + self.pts_shop + self.pts_public + self.pts_park + self.pts_factory + self.pts_harbor + self.pts_office + self.pts_monument + self.pts_expansion\n rb = [[]] * 4\n rb[0] = self.b[ 0: 5]\n rb[1] = self.b[ 5:10]\n rb[2] = self.b[10:15]\n rb[3] = self.b[15:20]\n rf = [[]] * 4\n rf[0] = self.f[ 0: 5]\n rf[1] = self.f[ 5:10]\n rf[2] = self.f[10:15]\n rf[3] = self.f[15:20]\n return '{}\\n{}\\nexpansion {}\\npop {:2} | {:2} | {:2} | {:2}\\nene {:2} | {:2} | {:2}\\n Total Towe Shop Publ Park Fact Harb Offi Monu Expa\\ncnt {:3} | {:2} | {:2} | {:2} | {:2} | {:2} | {:2} | {:2} | {:2}\\npts {:3} | {:2} | {:2} | {:2} | {:2} | {:2} | {:2} | {:2} | {:2} | {:2}'.format(pprint.pformat(rb, width=40), pprint.pformat(rf, width=20), ' '.join(sorted(args.exp)), self.popula, self.popula_used, self.popula - self.popula_used, self.popula - pop_norm, self.energy, self.energy_used, self.energy - self.energy_used, self.cnt_total, self.cnt_T, self.cnt_S, self.cnt_U, self.cnt_P + self.cnt_G, self.cnt_F + self.cnt_A, self.cnt_1 + self.cnt_2 + self.cnt_3 + self.cnt_4 + self.cnt_5, self.cnt_O, self.cnt_M, self.pts_total, self.pts_tower, self.pts_shop, self.pts_public, self.pts_park, self.pts_factory, self.pts_harbor, self.pts_office, self.pts_monument, self.pts_expansion)",
"def count():",
"def stats(detections, faces):\n vp, fp, fn, vn = 0, 0, 0, 0\n max_label = np.max(faces[:, 0])\n for i in range(max_label + 1):\n detections_i = get_label_with_index(detections, i)\n faces_i = get_label_with_index(faces, i)\n local_vp = 0\n for face in faces_i:\n found = False\n for detection in detections_i:\n if intersection_ratio(face, detection) >= 0.5:\n found = True\n break\n if found:\n vp += 1\n local_vp += 1\n else:\n fn += 1\n fp += len(detections_i) - local_vp\n\n precision = vp / (vp + fp)\n rappel = vp / (vp + fn)\n f_score = 2 * ((precision * rappel) / (precision + rappel))\n\n return precision, rappel, f_score",
"def resultCounter(detections):\n counter = 0\n for attribute, value in classIterator(detections):\n if 'crease' in attribute:\n counter += len(value)\n return counter",
"def get_correct_lap_count(self):",
"def _collect_counts(self):\n for t in self.system.keys():\n if t in self.gold:\n self.tp += 1\n else:\n self.fp += 1\n for t in self.gold.keys():\n if t not in self.system:\n self.fn += 1",
"def count_points(roi):\r\n # Performing Mean Shift Filtering\r\n shifted = cv2.pyrMeanShiftFiltering(roi, 21, 51)\r\n\r\n # Converting the image to grayscale\r\n gray = cv2.cvtColor(shifted, cv2.COLOR_BGR2GRAY)\r\n\r\n # Thresholding using Binary and OTSU\r\n thrsh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]\r\n # Using Watershed Algorithm\r\n D = ndimage.distance_transform_edt(thrsh)\r\n localMax = peak_local_max(D, indices=False, min_distance=1, labels=thrsh)\r\n markers = ndimage.label(localMax)[0]\r\n lbls = watershed(-D, markers, mask=thrsh)\r\n \r\n return lbls, len(np.unique(lbls)) - 1",
"def jumps(self, currFloor, floor):\r\n count = 0\r\n for t in self.targs:\r\n if (t > currFloor and t < floor) or (t < currFloor and t > floor):\r\n count = count + 1\r\n return count",
"def calc_score(pins_stats):\n count = 0\n new = pins_stats[:, :2] - ORIG_PINS_LOC\n for p in new:\n if np.linalg.norm(p) > R_PIN / 2:\n count += 1\n return count",
"def find_numerical_contours(counts):\n\tone_sigma_boundary = sigma_boundary(counts, 68)\n\tone_sigma = counts > one_sigma_boundary\n\ttwo_sigma_boundary = sigma_boundary(counts, 95)\n\ttwo_sigma = (counts > two_sigma_boundary) & (counts < one_sigma_boundary)\n\tthree_sigma_boundary = sigma_boundary(counts, 99)\n\tthree_sigma = (counts > three_sigma_boundary) & (counts < two_sigma_boundary)\n\n\t# Check method: Output actual percentages in each region\n\tprint('total no. samples:')\n\tprint(np.sum(counts))\n\tprint('included in 1st sigma region:')\n\tprint(np.sum(one_sigma * counts) / np.sum(counts))\n\tprint('included in 2 sigma region:')\n\tprint((np.sum(one_sigma * counts) + np.sum(two_sigma * counts)) / np.sum(counts))\n\tprint('included in 3 sigma region:')\n\tprint((np.sum(one_sigma * counts) + np.sum(two_sigma * counts) + np.sum(three_sigma * counts)) / np.sum(counts))\n\n\tfilled_numerical_contours = one_sigma * 1 + two_sigma * 2 + three_sigma * 3\n\n\treturn filled_numerical_contours",
"def create_function(self, dimensions, thresholds):\r\n def f(v):\r\n s = ''\r\n for i in range(len(dimensions)):\r\n if(float(v[dimensions[i]])>=thresholds[i]):\r\n s +='1'\r\n else:\r\n s +='0'\r\n return s\r\n raise NotImplementedError\r\n return f",
"def make_stats(mapping):\r\n stats = [\"Clustersize\\t#\"]\r\n counts = defaultdict(int)\r\n for key in mapping.keys():\r\n counts[len(mapping[key])] += 1\r\n\r\n keys = sorted(counts.keys())\r\n for key in keys:\r\n stats.append(\"%d:\\t\\t%d\" % (key + 1, counts[key]))\r\n return \"\\n\".join(stats)",
"def compute_num_tracks(x_offset: int, y_offset: int,\n x: int, y: int, track_info: Dict[int, int]):\n x_diff = x - x_offset\n y_diff = y - y_offset\n result = 0\n for length, num_track in track_info.items():\n if x_diff % length == 0 and y_diff % length == 0:\n # it's the tile\n result += num_track\n return result",
"def count_property_range_hits(prop, node_dict, hits):\n\tres = []\n\t# sets tuple position to use in dict value\n\tswitcher = {\n \"length\": (0,(0,4000,8000,12000,16000,20000)),\n \"steps\": (1,(0,2,4,8,16,32)),\n \"cov\": (2,(1,10,100,1000,10000,100000)),\n \"cv\": (3, (0,0.05,0.10,0.15,0.20,0.25))\n }\n\tif prop not in switcher:\n\t\treturn res\n\ttup_pos = switcher[prop][0]\n\tnode_cnt = 0\n\tpos_cnt = 0\n\tfor ind in range(len(switcher[prop][1])-1):\n\t\tmin_val = switcher[prop][1][ind]\n\t\tmax_val = switcher[prop][1][ind+1]\n\t\tfor node in node_dict.keys():\n\t\t\tval = node_dict[node][tup_pos]\n\t\t\tif ind < len(switcher[prop][1])-2:\n\t\t\t\trange_test_val = (min_val <= val < max_val)\n\t\t\telse:\n\t\t\t\trange_test_val = (min_val <= val <= max_val)\n\t\t\t# print \"range bool is\", range_test_val\n\t\t\tif range_test_val:\n\t\t\t\tnode_cnt += 1\n\t\t\t\tif node in hits: pos_cnt += 1\n\t\tif node_cnt > 0:\n\t\t\tres.append( (pos_cnt, node_cnt, round(float(pos_cnt)/node_cnt,2)))\n\t\telse:\n\t\t\tres.append((0,0,0))\n\t\tnode_cnt = 0\n\t\tpos_cnt = 0\n\treturn res",
"def _collect_counts(self, instance_list):\n \"\"\" Based on each instance, I augment empirical counts for every word and its BIO label in feature_count_table and for every transition from previous label to current label in transition_count_table.\n All \"rare words\" (those words that appear less than 3 times) are replaced by <UNK>.\n I also add label|START counts.\n \"\"\"\n # Build feature_count_table of V x labels and transition_count_table of labels x labels\n for instance in instance_list: # Set of <(w, pos), l>\n index = 0\n for t in instance.data: # Tuple of (w, pos)\n index = instance.data.index(t)\n # print t[0] # word\n # print instance.label[index] # label\n if t in self.V:\n self.feature_count_table[self.V.index(t)][self.labels.index(instance.label[index])] +=1\n else:\n self.feature_count_table[self.V.index('<UNK>')][self.labels.index(instance.label[index])] +=1\n if index > 0:\n self.transition_count_table[self.labels.index(instance.label[index-1])][self.labels.index(instance.label[index])] += 1\n else:\n self.transition_count_table[len(self.labels)][self.labels.index(instance.label[index])] += 1",
"def main():\n\n args = get_args()\n file_arg = args.file\n\n # print('file_arg = \"{}\"'.format(file_arg.name if file_arg else ''))\n\n result= {}\n result['<20'] = 0\n result['20-30'] = 0\n result['30-40'] = 0\n result['40-50'] = 0\n result['50-60'] = 0\n result['>=60'] = 0\n \n for line in file_arg:\n val = int(line.strip())\n if ( val < 20):\n result['<20'] += 1\n if ( val >= 20 and val < 30):\n result['20-30'] += 1\n if ( val >= 30 and val < 40):\n result['30-40'] += 1\n if ( val >= 40 and val < 50):\n result['40-50'] += 1\n if ( val >= 50 and val < 60):\n result['50-60'] += 1\n if ( val >= 60 ):\n result['>=60'] += 1\n \n \n print(result)",
"def update_cnt_map(self,s):\r\n cnts = []\r\n num_grid = self.cnt_map.shape[0]*self.cnt_map.shape[1]\r\n old_coverage =num_grid- self.cnt_map.flatten().tolist().count(0)\r\n for sj in s:\r\n grid_s = self.get_gridState(sj)\r\n self.cnt_map[grid_s[0], grid_s[1]] += 1\r\n cnts.append(self.cnt_map[grid_s[0], grid_s[1]])\r\n\r\n self.map_coverage = num_grid - self.cnt_map.flatten().tolist().count(0)\r\n print(\"Coverage:\",self.map_coverage)\r\n print(\"Change of coverage:\",self.map_coverage-old_coverage)\r\n\r\n return cnts",
"def __countPlayers(self, players):\n\n numLow = sum(map(lambda p: p.lowFps, players))\n numHigh = sum(map(lambda p: p.highFps, players))\n numMed = len(players) - numLow - numHigh\n\n return '%s, %s, %s' % (numLow, numMed, numHigh)",
"def _get_count(text, if_clause):\n return label(text, func.SUM(func.IF(if_clause, 1, 0)))",
"def _tally_limits(self, limits, elements, connections=None):\n counts = {}\n for x in limits:\n ele = elements[x]\n if self.use_coordination:\n ele += str(len(connections[x]))\n if ele not in counts:\n counts[ele] = 0\n counts[ele] += 1\n return counts",
"def compute_statistics(self):",
"def grid_point_counts(self):\n return [high-low for low, high in self._Limits]",
"def get_gini(self, rows):\n label_count = defaultdict(int)\n total_count = 0\n for row in rows:\n label = row[self.target_attribute]\n label_count[label] += 1\n total_count += 1\n return 1 - sum([np.square(float(label_count[label])/total_count) for label in label_count.keys()])",
"def _get_glow_counts(\n coord_stack: List[List[int]],\n input: List[List[int]],\n flashed: List[List[int]],\n glow_count: int = 0\n) -> Tuple[List[List[int]], int]:\n if not coord_stack:\n return input, glow_count\n\n coord = coord_stack.pop()\n row = coord[0]\n col = coord[1]\n\n if flashed and coord in flashed:\n return _get_glow_counts(coord_stack, input, flashed, glow_count)\n\n if input[row][col] < 9:\n input[row][col] += 1\n return _get_glow_counts(coord_stack, input, flashed, glow_count)\n\n # handle a flashing octo\n glow_count += 1\n input[row][col] = 0\n\n if coord not in flashed:\n flashed.append(coord)\n\n for ad in ADJACENT_DIRS.values():\n new_row = row + ad[0]\n new_col = col + ad[1]\n\n if 0 <= new_row < 10 and 0 <= new_col < 10:\n coord_stack.append([new_row, new_col])\n\n return _get_glow_counts(coord_stack, input, flashed, glow_count)",
"def _compute_thresholds(self, thresholds):\r\n thr = thresholds\r\n limit = int(1 / thresholds)\r\n thresholds = [x * thr for x in range(limit)]\r\n thresholds.append(1)\r\n return thresholds",
"def __calculate_statistics(self, candidates):\n pdf = {}\n for candidate in candidates:\n neighbors = list(self.G.neighbors(candidate))\n capacity = sum([self.G.get_edge_data(candidate, n)[\"satoshis\"] for n in neighbors])\n average = capacity / len(neighbors)\n pdf[candidate] = average\n cumsum = sum(pdf.values())\n pdf = {k:v/cumsum for k,v in pdf.items()}\n w = 0.7\n print(\"percentage smoothed percentage capacity numchannels alias\")\n print(\"----------------------------------------------------------------------\")\n res_pdf = {}\n for k,v in pdf.items():\n neighbors = list(self.G.neighbors(k))\n capacity = sum([self.G.get_edge_data(k, n)[\"satoshis\"] for n in neighbors])\n name = k\n if \"alias\" in self.G.node[k]:\n name = self.G.node[k][\"alias\"]\n print(\"{:12.2f} \".format(100*v), \"{:12.2f} \".format(100*(w * v + (1-w)/len(candidates))) ,\"{:10} {:10} \".format( capacity, len(neighbors)), name)\n res_pdf[k] = (w * v + (1-w)/len(candidates))\n return res_pdf",
"def count_ops(self, visual=None):\n from .function import count_ops\n return count_ops(self, visual)",
"def drawcntMap(orgimg,filteredimg,wThresh,hThresh):\r\n _, contour, _ = cv2.findContours(filteredimg, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\r\n\r\n cnt = cv2.drawContours(orgimg.copy(), contour, -1, (0, 0, 255), 2) # To draw filtered contours on original image\r\n\r\n digitCnts = [] # contours to be surrounded by bounding boxes\r\n\r\n for c in contour:\r\n (x, y, w, h) = cv2.boundingRect(c)\r\n if w >= wThresh and h >= hThresh and w*h <20000: # Length filters to reduce noise\r\n cv2.rectangle(cnt,(x,y),(x+w,y+h),[0,255,0],2)\r\n digitCnts.append(c)\r\n\r\n return cnt, digitCnts",
"def get_thresholds(kalpha, deltaelow, deltaehigh, maxphotons, nscatter, scatter):\n thresholds = tuple(\n [\n (float(n), float(s), n * kalpha + s * scatter - deltaelow, n * kalpha + s * scatter + deltaehigh, s * scatter)\n for s in range(nscatter + 1, -1, -1)\n for n in range(maxphotons - s + 1)\n if not (n == 0 and s == 0)\n ]\n )\n return thresholds"
] | [
"0.63846546",
"0.5974033",
"0.577967",
"0.5751799",
"0.56716186",
"0.56405896",
"0.56206876",
"0.56012255",
"0.5585708",
"0.5550997",
"0.54943913",
"0.5460852",
"0.5453262",
"0.5421152",
"0.5420449",
"0.5406624",
"0.5376833",
"0.5374169",
"0.53562254",
"0.53440434",
"0.5295686",
"0.52714866",
"0.52698755",
"0.5245526",
"0.5244298",
"0.5242521",
"0.52299696",
"0.522778",
"0.5216801",
"0.52166104"
] | 0.69019306 | 0 |
Select the xaxis value where a Lcurve has a kink. Assuming a Lcurve from A to B, the 'breaking_point' is the more distant point to the segment [A, B]. | def _get_breaking_point(x, y):
# select threshold where curve break
slope = (y[-1] - y[0]) / len(y)
y_grad = np.gradient(y)
m = list(y_grad >= slope)
j = m.index(False)
m = m[j:]
x = x[j:]
y = y[j:]
if True in m:
i = m.index(True)
else:
i = -1
breaking_point = float(x[i])
return breaking_point, x, y | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def plot_k(x, k, linewidth = 1.5, \n marker = 'o', color = 'k', marker_facecolor = 'k'):\n\n # Plot the first variable\n x1 = x[0]\n plt.figure(figsize = (9,5))\n plt.plot(x1, k, c = color, lw = linewidth, marker = marker, \n mec = color, mfc = marker_facecolor)\n plt.loglog()\n plt.ylabel('$K\\'$ (Pa)')\n\n # If there is more than one dependent variable, \n # Plot also the second variable in a different figure\n try: \n x2 = x[1]\n plt.xlabel('$\\sigma$ (Pa)')\n plt.pause(0.1)\n plt.figure(figsize =(9, 5))\n plt.plot(x2, k, c = color, lw = linewidth, marker = marker, \n mec = color, mfc = marker_facecolor)\n plt.loglog()\n plt.ylabel('$K\\'$ (Pa)')\n plt.xlabel('$\\gamma$ (%)')\n except IndexError: pass",
"def d1x(self, k, x, y):\n return np.sign(x / self.spHx - self.kx[k-1]) / self.spHx * self.d1_spline(\n abs(x / self.spHx - self.kx[k-1])) * self.spline_base(abs(y / self.spHy - self.ky[k-1]))",
"def elbow_point(\n data, pipeline, kmeans_step_name='kmeans', k_range=range(1, 11), ax=None\n):\n scores = []\n for k in k_range:\n pipeline.named_steps[kmeans_step_name].n_clusters = k\n pipeline.fit(data)\n scores.append(pipeline.score(data) * -1)\n\n if not ax:\n fig, ax = plt.subplots()\n ax.plot(k_range, scores, 'bo-')\n ax.set_xlabel('k')\n ax.set_ylabel('inertias')\n ax.set_title('Elbow Point Plot')\n\n return ax",
"def x(self):\n return self._kml['x']",
"def wrapPlotsOverEdges(self):\n if not self.__selectedCurves:\n return\n wrapcurve = self.__selectedCurves[-1]\n path = self.curve_path_dict[wrapcurve]\n times = []\n xdata = numpy.array(wrapcurve.data().xData())\n ydata = numpy.array(wrapcurve.data().yData())\n # It is a spike train, x values are spike times, wrap around those\n if 'spikes' in path:\n times = xdata\n # It is a stimulus: take the leadin edges\n elif 'stim' in path:\n times = xdata[numpy.r_[False, numpy.diff(ydata) < 0].nonzero()[0]]\n else:\n ydata = analyzer.smooth(ydata)\n mid = numpy.mean(ydata)\n ydata = ydata[ydata > mid] # Threshold at midpoint\n times = xdata[numpy.r_[True, ydata[1:] > ydata[:-1]] & numpy.r_[ydata[:-1] > ydata[1:], True]]\n # start from the first edge, ignoring everything before it\n # and put end of simulation as the upper bound\n for curve in self.itemList():\n ydata = numpy.array(curve.data().yData())\n xdata = numpy.array(curve.data().xData()) \n path = self.curve_path_dict[curve]\n path_curve_list = self.path_curve_dict[path]\n path_curve_list.pop(path_curve_list.index(curve))\n self.curve_path_dict.pop(curve)\n curve.detach()\n start = 0\n end = len(xdata)\n for ii in range(-1, - len(times) - 1, -1):\n points = numpy.nonzero(xdata >= times[ii])[0]\n if len(points) == 0:\n continue\n start = points[0]\n xx = numpy.array(xdata[start:end] - times[ii])\n xdata[start:end] = -1.0\n new_curve = Qwt.QwtPlotCurve('%s #%d' % (curve.title().text(), len(times) + ii, ))\n new_curve.setData(xx, ydata[start:end])\n new_curve.setStyle(curve.style())\n new_curve.setPen(QtGui.QPen(curve.pen()))\n new_curve.setSymbol(Qwt.QwtSymbol(curve.symbol()))\n new_curve.attach(self)\n self.curve_path_dict[new_curve] = path\n self.path_curve_dict[path].append(new_curve)\n end = start \n self.replot()",
"def kx_plus_b(bottom_x, bottom_y):\r\n x_plus_delta = []\r\n y_plus_delta = []\r\n pixel_step = 1\r\n poligon_dots = 0\r\n for i in range(len(bottom_x) - 1):\r\n next_x = int(bottom_x[i])\r\n next_y = bottom_y[i]\r\n x_plus_delta.append(next_x)\r\n y_plus_delta.append(round(next_y,1))\r\n poligon_dots = poligon_dots + 1\r\n try:\r\n k = (bottom_y[i] - bottom_y[i+1])/(bottom_x[i] - bottom_x[i+1])\r\n b = bottom_y[i] - k * bottom_x[i]\r\n dots_between_edges = int((((bottom_x[i+1] - bottom_x[i])**2 + (bottom_y[i+1] - bottom_y[i])**2)**0.5) / pixel_step)\r\n X_step = (bottom_x[i+1] - bottom_x[i]) / dots_between_edges\r\n for j in range(dots_between_edges):\r\n next_x = next_x + X_step\r\n next_y = k * next_x + b\r\n x_plus_delta.append(int(next_x))\r\n y_plus_delta.append(round(next_y,1))\r\n poligon_dots = poligon_dots + dots_between_edges\r\n except:\r\n print('Расстояние между точками 0 пикселей! Пропуск точки')\r\n\r\n x_plus_delta.append(int(bottom_x[-1]))\r\n y_plus_delta.append(round(bottom_y[-1],1))\r\n poligon_dots = poligon_dots + 1\r\n return poligon_dots, x_plus_delta, y_plus_delta",
"def x_lb(self):\n pass",
"def get_starting_point(self, Otrain, Ftrain, y):\n return self.get_curve_fmin(Otrain, Ftrain, [y])\n # xx = np.linspace(np.min(Otrain), np.max(Otrain), 50)\n # scores, xx = self.compute_scores(Otrain, Ftrain, y, xx)\n # bestScore = np.max(scores)\n # Ibest = np.where(scores == bestScore)[0]\n # x = xx[Ibest[0]]\n return x",
"def find_knee(x,y):\n\n # find ranges\n if len(x) != len(y):\n raise Exception(\"bad data\")\n tot_len = len(x)\n \n \n \n # fit strait lines to both\n\n # find intercept\n knee_r = (f_top.beta[1] - f_bottom.beta[1])/(-f_top.beta[0] + f_bottom.beta[0])",
"def get_horizantal_b_reference(top_kp: Keypoint, middle_kp: Keypoint, bottom_kp: Keypoint) -> Keypoint:\n vector_a = keypoint_to_vector(top_kp, middle_kp)\n vector_c = keypoint_to_vector(bottom_kp, middle_kp)\n # NOTE: checking angle_left_opening has the same effect as to check if biscetion point is positioned to the left or right of the neck point\n angle_left_opening = np.cross(vector_a, vector_c) < 0\n return Keypoint(middle_kp.x - 100, middle_kp.y) if angle_left_opening else Keypoint(middle_kp.x + 100, middle_kp.y)",
"def kx(self, k: int) -> float:\n result = self._read_inline(f\"kx({k})\")\n return result",
"def set_starting_point(f, x_input):\n\n k = 2\n bnd1_fda = FloatDPApproximation(x_input)\n bnd2_fda = bnd1_fda - k * (f(bnd1_fda) / derivative(f, bnd1_fda))\n bnd1_float = float(str(bnd1_fda))\n bnd2_float = float(str(bnd2_fda))\n if bnd1_float < bnd2_float:\n ix = UpperInterval({cast_exact(bnd1_float): cast_exact(bnd2_float)})\n else:\n ix = UpperInterval({cast_exact(bnd2_float): cast_exact(bnd1_float)})\n\n x = cast_singleton(ix)\n x_end = FloatDPApproximation(x)\n return x_end",
"def Sharp_k_window(self, x, step=1e-2):\n return 0.5*(1.+2./np.pi*np.arctan((1.-x)/step))",
"def spline_linear(x, f, x_k, x_ki):\n A = (x_ki - x) / (x_ki - x_k)\n B = (x - x_k) / (x_ki - x_k)\n \n return A*f(x_k) + B*f(x_ki)",
"def evaluate(x, amplitude, x_break, alpha_1, alpha_2):\n\n alpha = np.where(x < x_break, alpha_1, alpha_2)\n xx = x / x_break\n return amplitude * xx ** (-alpha)",
"def _extended_discrete_xaxis(x_axis, n_points=100, eps=0.10):\n min_value = np.min(x_axis)\n max_value = np.max(x_axis)\n distance = max_value - min_value\n return np.linspace(min_value - eps * distance, max_value + eps * distance,\n num=n_points)",
"def k_nearest_neighbors(x_test, df_training, k):\n\n return np.argpartition(distance_to_each_training_point(x_test,\n df_training), k-1)[:,0:k]",
"def d2x(self, k, x, y):\n return 1 / self.spHx ** 2 * self.d2_spline(abs(x / self.spHx - self.kx[k-1])) * self.spline_base(\n abs(y / self.spHy - self.ky[k-1]))",
"def test_to_knx_min_exceeded(self):\n with pytest.raises(ConversionError):\n DPTSceneNumber.to_knx(DPTSceneNumber.value_min - 1)",
"def k_nearest(node, pt, k, result):\n if node.items:\n visit_k_nearest(node, pt, k, result)\n return 1\n else:\n dx = pt[node.cutdim] - node.cutval\n if dx <= 0:\n near = node.left\n far = node.right\n else:\n near = node.right\n far = node.left\n ct_near = k_nearest(near, pt, k, result)\n # check if we found results, \n # if we have sufficient results and the closest of these\n # is closer than the split line, we do not have to search further\n if result and len(result) >= k and pow(dx, 2) >= result[0][0]:\n return ct_near \n ct_far = k_nearest(far, pt, k, result)\n return ct_near + ct_far",
"def fillCurveLE(self):\n\t\tsel = mn.ls( sl = True, dag = True, ni = True, typ = 'nurbsCurve' )\n\t\tself.curve_le.setText( sel[0].name )",
"def lagr(self, x):\n return",
"def spline(xk, yk, xnew, order=3, kind='smoothest', conds=None):\n return spleval(splmake(xk,yk,order=order,kind=kind,conds=conds),xnew)",
"def _khlp_to_dks_on_basis(self, la):\n Sym = self._kBoundedRing.ambient()\n kB = Sym.kBoundedSubspace(self.k, t=self.t)\n Qp = Sym.hall_littlewood(t=self.t).Qp()\n ks = kB.kschur()\n return sum( Qp(ks(x)).coefficient(la) * self(x) for x in PartitionsGreatestLE(sum(la), self.k))",
"def _dks_to_khlp_on_basis(self, la):\n Sym = self._kBoundedRing.ambient()\n kB = Sym.kBoundedSubspace(self.k, t=self.t)\n Qp = Sym.hall_littlewood(t=self.t).Qp()\n ks = kB.kschur()\n kHLP = self._kBoundedRing.kHallLittlewoodP()\n return sum( ks(Qp(x)).coefficient(la) * kHLP(x) for x in PartitionsGreatestLE(sum(la), self.k))",
"def gen_kpath( atoms, lattice, Nkpts=60 ):\n #\n points = get_special_points(atoms.cell, lattice)\n paths = parse_path_string(special_paths[lattice])\n #print(paths[0])\n kpts_spec = [points[k] for k in paths[0]]\n kpts, x, Xkpt = get_bandpath(kpts_spec,atoms.cell,Nkpts)\n #\n # TODO: also return string for special k-points\" symbol\n # probably using variable `paths`.\n return kpts, x, Xkpt",
"def test_x(self):\n g = gca()\n lines = g.get_lines() \n self.assertEquals(lines[0].get_xdata().tolist(), [3, 5, 5, 3, 3])",
"def check_pointing(kd):\n # from .kiss_pointing_model import KISSPmodel\n\n kd._KissRawData__check_attributes([\"F_sky_Az\", \"F_sky_El\", \"F_tl_Az\", \"F_tl_El\", \"A_hours\", \"A_time_pps\"])\n\n fig_pointing, ax = plt.subplots()\n mask = kd.telescope_position.mask\n ax.plot(kd.F_sky_Az[~mask], kd.F_sky_El[~mask], label=\"F_sky\")\n ax.plot(kd.F_tl_Az[~mask], kd.F_tl_El[~mask], label=\"F_tl\")\n # ax.plot(*KISSPmodel().telescope2sky(kd.F_tl_Az[mask], kd.F_tl_El[mask]), label=\"F_sky computed\")\n # ax.plot(*KISSPmodel(model=\"Q1\").telescope2sky(kd.F_tl_Az[mask], kd.F_tl_El[mask]), label=\"F_sky Q1 computed\")\n\n obstime = kd.obstime[mask]\n interp_az, interp_el = kd.get_object_altaz(npoints=100)\n ax.plot(interp_az(obstime.mjd), interp_el(obstime.mjd), label=kd.source)\n ax.legend(loc=\"best\")\n ax.set_xlabel(\"Az [deg]\")\n ax.set_ylabel(\"El [deg]\")\n fig_pointing.suptitle(kd.filename)\n\n return kd, fig_pointing",
"def get_landmarks(self, sorted_cut_endo_pts, lowest_pt_idx, display_opt):\n\n # make polydata out of sorted endo pts\n numPoints = sorted_cut_endo_pts.shape[0]\n vtk_float_arr = numpy_support.numpy_to_vtk(num_array=np.asarray(sorted_cut_endo_pts), deep=True, array_type=vtk.VTK_FLOAT)\n vtkpts = vtk.vtkPoints()\n vtkpts.SetData(vtk_float_arr)\n cut_endo_poly = vtk.vtkPolyData()\n cut_endo_poly.SetPoints(vtkpts)\n\n # now make lines\n polyLine = vtk.vtkPolyLine()\n polyLine.GetPointIds().SetNumberOfIds(numPoints)\n\n for i in range(numPoints):\n polyLine.GetPointIds().SetId(i, i) # from 0,1 then 2,3 then 4,5 ...\n\n cells = vtk.vtkCellArray()\n cells.InsertNextCell(polyLine)\n\n # add points and lines to polydata container\n cut_endo_poly.SetLines(cells)\n\n # create tree for intersection process\n bspTree = vtk.vtkModifiedBSPTree() # bsp tree is much faster than obbtree due to rejection test\n bspTree.SetDataSet(cut_endo_poly)\n bspTree.BuildLocator()\n\n top_left = np.asarray(sorted_cut_endo_pts[0])\n top_right = np.asarray(sorted_cut_endo_pts[-1])\n low_pt = np.asarray(sorted_cut_endo_pts[lowest_pt_idx])\n\n # get direction of lines\n line_dir = normalize(top_right - top_left) # top_pt[0] to top_pt[1]\n\n # add distance on both sides to make sure the line can pass through the entire LV horizontally\n dist = np.linalg.norm(top_right - top_left)\n pSource_0 = top_right + dist*line_dir\n pTarget_0 = top_left - dist*line_dir\n\n # determine the length to travel from top to bottom\n top_center = (top_right + top_left)/2.0\n midline = normalize(low_pt - top_center)\n max_dist = np.linalg.norm(low_pt - top_center)\n\n left_pts = []\n right_pts = []\n\n weights = np.linspace(0.00, 0.98, self.numSamples)\n\n for i in range(self.numSamples):\n # determine source and target points\n pSource = pSource_0 + weights[i]*max_dist*midline\n pTarget = pTarget_0 + weights[i]*max_dist*midline\n center = (pSource + pTarget) / 2.0\n\n # set empty variables\n subId = vtk.mutable(0)\n pcoords = [0, 0, 0]\n t = vtk.mutable(0)\n left = [0, 0, 0]\n right = [0, 0, 0]\n\n # # run interesect command\n # pointid1 = bspTree.IntersectWithLine(pSource, pTarget, 0.001, t, left, pcoords, subId)\n # pointid2 = bspTree.IntersectWithLine(pTarget, pSource, 0.001, t, right, pcoords, subId)\n\n # intersect with line that goes from source to center or target to center\n pointid1 = bspTree.IntersectWithLine(pSource, center, 0.001, t, left, pcoords, subId)\n pointid2 = bspTree.IntersectWithLine(pTarget, center, 0.001, t, right, pcoords, subId)\n\n left_pts.append(list(left))\n right_pts.append(list(right))\n\n if display_opt:\n mapper = vtk.vtkPolyDataMapper()\n mapper.SetInputData(cut_endo_poly)\n\n all_act = vtk.vtkActor()\n all_act.SetMapper(mapper)\n\n right_act = include_points(left_pts, len(left_pts), 4, (1,0,0))\n left_act = include_points(right_pts, len(right_pts), 4, (1,0,0))\n low_pt_act = include_points(list(low_pt), 1, 10, (1,0,1))\n\n top_right_act = include_points(list(top_right), 1, 10, (0,0,1))\n top_left_act = include_points(list(top_left), 1, 10, (0,0,1))\n\n ren = vtk.vtkRenderer()\n ren.AddActor(all_act)\n ren.AddActor(right_act)\n ren.AddActor(left_act)\n ren.AddActor(top_right_act)\n ren.AddActor(top_left_act)\n ren.AddActor(low_pt_act)\n\n vtk_show(ren)\n\n # ensure that left and right points have the same number of points as numSamples\n if len(left_pts) != self.numSamples or len(right_pts) != self.numSamples:\n print('Either left or right points do not have the same number of points as numSamples!')\n\n return left_pts, right_pts",
"def get_horizontal_points_interp(self, sorted_cut_endo_pts, lowest_pt_idx, display_opt):\n # assign special pts\n top_left = np.asarray(sorted_cut_endo_pts[0])\n top_right = np.asarray(sorted_cut_endo_pts[-1])\n low_pt = np.asarray(sorted_cut_endo_pts[lowest_pt_idx])\n\n\n # make polydata out of sorted endo pts\n numPoints = sorted_cut_endo_pts.shape[0]\n vtk_float_arr = numpy_support.numpy_to_vtk(num_array=np.asarray(sorted_cut_endo_pts), deep=True, array_type=vtk.VTK_FLOAT)\n vtkpts = vtk.vtkPoints()\n vtkpts.SetData(vtk_float_arr)\n\n # add the basal points that connect from top_right to top_left\n basal_pts = getEquidistantPoints(top_left, top_right, 100, 1, 1)\n num_basal_pts = len(basal_pts)\n for basal_pt in basal_pts:\n vtkpts.InsertNextPoint(basal_pt)\n\n # now make lines\n polyLine = vtk.vtkPolyLine()\n polyLine.GetPointIds().SetNumberOfIds(numPoints + num_basal_pts)\n\n for i in range(numPoints + num_basal_pts):\n polyLine.GetPointIds().SetId(i, i)\n\n cells = vtk.vtkCellArray()\n cells.InsertNextCell(polyLine)\n\n # add points and lines to polydata container\n cut_endo_poly = vtk.vtkPolyData()\n cut_endo_poly.SetPoints(vtkpts)\n cut_endo_poly.SetLines(cells)\n\n # create tree for intersection process\n bspTree = vtk.vtkModifiedBSPTree() # bsp tree is much faster than obbtree due to rejection test\n bspTree.SetDataSet(cut_endo_poly)\n bspTree.BuildLocator()\n\n # get horizontal direction (cross product between vertical and normal)\n top_center = (top_left + top_right) / 2.0\n vertical_vec = normalize(top_center - low_pt)\n normal = find_plane_eq(top_left, top_right, low_pt)[:3]\n horizontal_vec = normalize(np.cross(normalize(normal), vertical_vec))\n\n # add distance on both sides to make sure the line can pass through the entire LV horizontally\n dist = np.linalg.norm(top_right - top_left)\n pSource_0 = low_pt + dist*horizontal_vec\n pTarget_0 = low_pt - dist*horizontal_vec\n\n # determine the length to travel from top to bottom\n max_dist = np.linalg.norm(low_pt - top_center)\n\n left_pts = []\n right_pts = []\n\n weights = np.linspace(0.02, 0.99, self.numSamples)\n\n for i in range(self.numSamples):\n # determine source and target points\n pSource = pSource_0 + weights[i]*max_dist*vertical_vec\n pTarget = pTarget_0 + weights[i]*max_dist*vertical_vec\n center = (pSource + pTarget) / 2.0\n\n # set empty variables\n subId = vtk.mutable(0)\n pcoords = [0, 0, 0]\n t = vtk.mutable(0)\n left = [0, 0, 0]\n right = [0, 0, 0]\n\n # # run interesect command\n # pointid1 = bspTree.IntersectWithLine(pSource, pTarget, 0.001, t, left, pcoords, subId)\n # pointid2 = bspTree.IntersectWithLine(pTarget, pSource, 0.001, t, right, pcoords, subId)\n\n # intersect with line that goes from source to center or target to center\n pointid1 = bspTree.IntersectWithLine(pSource, center, 0.001, t, left, pcoords, subId)\n pointid2 = bspTree.IntersectWithLine(pTarget, center, 0.001, t, right, pcoords, subId)\n\n if display_opt:\n rightact = include_points(list(top_right), 1, 10, (0,1,0))\n leftact = include_points(list(top_left), 1, 10, (0,1,0))\n lowptact = include_points(list(low_pt), 1, 10, (0,1,0))\n psourceact = include_points(list(pSource), 1, 10, (1,0,0))\n ptargetact= include_points(list(pTarget), 1, 10, (1,0,0))\n left_found = include_points(list(left), 1, 10, (1,1,0))\n right_found = include_points(list(right), 1, 10, (1,1,0))\n\n mapper = vtk.vtkPolyDataMapper()\n mapper.SetInputData(cut_endo_poly)\n act = vtk.vtkActor()\n act.SetMapper(mapper)\n ren = vtk.vtkRenderer()\n ren.AddActor(act)\n ren.AddActor(psourceact)\n ren.AddActor(ptargetact)\n ren.AddActor(rightact)\n ren.AddActor(leftact)\n ren.AddActor(lowptact)\n ren.AddActor(left_found)\n ren.AddActor(right_found)\n vtk_show(ren)\n\n\n if pointid1 + pointid2 == 2: # i.e. pointid = 1 and pointid2 = 1\n left_pts.append(list(left))\n right_pts.append(list(right))\n\n # display purposes #\n # 1.a actor for all left and right pts\n mapper = vtk.vtkPolyDataMapper()\n mapper.SetInputData(cut_endo_poly)\n\n all_act = vtk.vtkActor()\n all_act.SetMapper(mapper)\n\n left_act = include_points(left_pts, len(left_pts), 4, (1,0,0))\n right_act = include_points(right_pts, len(right_pts), 4, (1,0,0))\n low_pt_act = include_points(list(low_pt), 1, 10, (1,0,1))\n\n # 2.a now add horizontal lines\n VTK_horiz_all = vtk.vtkPoints()\n\n for i in range(len(left_pts)):\n VTK_horiz_all.InsertNextPoint(left_pts[i])\n VTK_horiz_all.InsertNextPoint(right_pts[i])\n\n lineArray = vtk.vtkCellArray()\n\n for i in range(len(left_pts)):\n line = vtk.vtkLine()\n line.GetPointIds().SetId(0, i + i) # 0, 2, 4, 6 ,..\n line.GetPointIds().SetId(1, i + i + 1) # 1, 3, 5, 7,...\n lineArray.InsertNextCell(line)\n\n # 2.b create polydata\n polyLine = vtk.vtkPolyData()\n\n # 2.c add points and lines to polydata container\n polyLine.SetPoints(VTK_horiz_all)\n polyLine.SetLines(lineArray)\n\n lineMapper = vtk.vtkPolyDataMapper()\n lineMapper.SetInputData(polyLine)\n\n lineActor = vtk.vtkActor()\n lineActor.SetMapper(lineMapper)\n lineActor.GetProperty().SetColor(0, 0, 1)\n lineActor.GetProperty().SetLineWidth(2)\n\n # 3.a also add one more (line to represent vertical direction, to show perpendicular)\n long_axis_array = vtk.vtkCellArray()\n long_axis = vtk.vtkLine()\n long_axis.GetPointIds().SetId(0, 0)\n long_axis.GetPointIds().SetId(1, 1)\n long_axis_array.InsertNextCell(long_axis)\n\n long_axis_pts = vtk.vtkPoints()\n long_axis_pts.InsertNextPoint(top_center)\n long_axis_pts.InsertNextPoint(low_pt)\n\n # 3.b create actor for long axis line\n long_axis_polydata = vtk.vtkPolyData()\n\n # 2.c add points and lines to polydata container\n long_axis_polydata.SetPoints(long_axis_pts)\n long_axis_polydata.SetLines(long_axis_array)\n\n la_mapper = vtk.vtkPolyDataMapper()\n la_mapper.SetInputData(long_axis_polydata)\n\n la_act = vtk.vtkActor()\n la_act.SetMapper(la_mapper)\n la_act.GetProperty().SetColor(0, 0, 1)\n la_act.GetProperty().SetLineWidth(2)\n\n ren = vtk.vtkRenderer()\n ren.AddActor(all_act)\n ren.AddActor(right_act)\n ren.AddActor(left_act)\n ren.AddActor(low_pt_act)\n ren.AddActor(lineActor)\n ren.AddActor(la_act)\n\n if display_opt:\n vtk_show(ren)\n\n # ensure that left and right points have the same number of points as numSamples\n if len(left_pts) != self.numSamples or len(right_pts) != self.numSamples:\n print('Either left or right points do not have the same number of points as numSamples!')\n\n return left_pts, right_pts, ren"
] | [
"0.5472993",
"0.53096104",
"0.5299262",
"0.5272989",
"0.5262331",
"0.5251701",
"0.52380073",
"0.5229801",
"0.5160584",
"0.51584435",
"0.50303984",
"0.50249135",
"0.5024208",
"0.499612",
"0.4986702",
"0.49020177",
"0.48961875",
"0.4894596",
"0.4867856",
"0.48623985",
"0.48400655",
"0.4835801",
"0.48113355",
"0.4806073",
"0.48004344",
"0.47917292",
"0.4791323",
"0.4789061",
"0.4787602",
"0.47807473"
] | 0.61048186 | 0 |
Basic function to make email messages RFC2822 2.1.1 Compliant (Line Length Limit) Split the message at {max_line_length} (default 900) | def email_rfc2822_compliance(message, max_line_length=900):
returnmsg = ""
while len(message) > 0:
returnmsg = returnmsg + message[:max_line_length] + "\r\n"
message = message[max_line_length:]
return returnmsg | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def split_message(message, max_length):\n ms = []\n while len(message) > max_length:\n ms.append(message[:max_length])\n message = message[max_length:]\n ms.append(message)\n return ms",
"def split_message(text, max_length=640):\n res = []\n sub_message = ''\n sentences = split_into_sentences(text)\n for sentence in sentences:\n new_sub_message = sub_message + ' ' + sentence if sub_message else sentence\n if len(sentence) > max_length:\n res.extend(split_by_spaces(sentence, max_length))\n elif len(new_sub_message) > max_length:\n if len(sub_message) > 0:\n res.append(sub_message)\n sub_message = sentence\n else:\n sub_message = new_sub_message\n if len(sub_message) > 0:\n res.append(sub_message)\n return res",
"def chunk_split(cls, text):\n parts = []\n current = []\n for line in text.splitlines():\n size = sum(len(part) + 1 for part in current)\n extra = len(line)\n if size + extra >= 2000:\n if current:\n # The message is full, split here.\n parts.append(\"\\n\".join(current))\n current.clear()\n if extra >= 2000:\n # The line itself is too long, split on whitespace instead.\n *lines, line = wrap(line, 2000, expand_tabs=False, replace_whitespace=False)\n parts.extend(lines)\n current.append(line)\n if current:\n parts.append(\"\\n\".join(current))\n return parts",
"def chunkify(msg):\n return [\"%s %s\" % (i, msg[i*158 : (i+1)*158]) for i in range(len(msg)/158 + 1)]",
"def get_sendable_message(text, max_length=400):\n unicode_max_length = max_length\n excess = ''\n\n while len(text.encode('utf-8')) > max_length:\n last_space = text.rfind(' ', 0, unicode_max_length)\n if last_space == -1:\n # No last space, just split where it is possible\n excess = text[unicode_max_length:] + excess\n text = text[:unicode_max_length]\n # Decrease max length for the unicode string\n unicode_max_length = unicode_max_length - 1\n else:\n # Split at the last best space found\n excess = text[last_space:] + excess\n text = text[:last_space]\n\n return text, excess.lstrip()",
"def test_newlinesBeforeLineBreaking(self):\n # Because MAX_COMMAND_LENGTH includes framing characters, this long\n # line is slightly longer than half the permissible message size.\n longline = \"o\" * (irc.MAX_COMMAND_LENGTH // 2)\n\n self.client.msg(\"foo\", longline + \"\\n\" + longline)\n self.assertEqual(\n self.client.lines, [\"PRIVMSG foo :\" + longline, \"PRIVMSG foo :\" + longline]\n )",
"def test_lineBreakOnWordBoundaries(self):\n # Because MAX_COMMAND_LENGTH includes framing characters, this long\n # line is slightly longer than half the permissible message size.\n longline = \"o\" * (irc.MAX_COMMAND_LENGTH // 2)\n\n self.client.msg(\"foo\", longline + \" \" + longline)\n self.assertEqual(\n self.client.lines, [\"PRIVMSG foo :\" + longline, \"PRIVMSG foo :\" + longline]\n )",
"def test_splitLongMessagesWithOverride(self):\n message = \"o\" * (irc.MAX_COMMAND_LENGTH - 2)\n self.assertLongMessageSplitting(message, 3, length=irc.MAX_COMMAND_LENGTH // 2)",
"def test_splitLongMessagesWithDefault(self):\n message = \"o\" * (irc.MAX_COMMAND_LENGTH - 2)\n self.assertLongMessageSplitting(message, 2)",
"def limit_size(msg, max_size, trunc_symbol=\"...\"):\n if len(msg) > max_size:\n msg = msg[:max_size - len(trunc_symbol)] + trunc_symbol\n return msg",
"def line_wrap(text, max_length = 80):\n output = []\n while text.__len__() >= max_length:\n split = text.rfind(' ', 0, max_length - 1)\n output.append(text[:split])\n text = text[split + 1:]\n\n return output",
"def wrap_lines(msg: str) -> str:\n lines = msg.splitlines()\n fixed_l = []\n\n for line in lines:\n fixed_l.append(textwrap.fill(\n line,\n 80,\n break_long_words=False,\n break_on_hyphens=False))\n\n return '\\n'.join(fixed_l)",
"def shorten_lines(string, lineLen, maxLines):\n lines = string.split(\"\\n\")\n lineCount = 0\n newLines = \"\"\n for line in lines:\n length = int(len(line) / lineLen) if len(line) > lineLen else 1\n if len(line) > length * lineLen:\n length += 1\n lineCount += length\n if lineCount > maxLines:\n break\n newLines += \"{}\\n\".format(line)\n return newLines",
"def step_impl_the_msg_to_is_set_too_long(context):\n context.bdd_helper.message_data[\"msg_to\"][0] = \"x\" * (constants.MAX_TO_LEN + 1)",
"def split_large_text(text, length=4096):\n\n text = str(text)\n\n yield text[0: length]\n\n for i in range(length, len(text), length):\n yield text[i: i + length]",
"def wrap_message(message, width=MAX_LINE_LENGTH):\r\n lines = message.split('\\n')\r\n wrapped_lines = [textwrap.fill(\r\n line, width, expand_tabs=False, replace_whitespace=False, drop_whitespace=False, break_on_hyphens=False\r\n ) for line in lines]\r\n wrapped_message = '\\n'.join(wrapped_lines)\r\n\r\n return wrapped_message",
"def splitLine(string, overflow=70):\n w=[]\n n=len(string)\n for i in range(0,n,overflow):\n w.append(string[i:i+overflow])\n return w",
"def limit_max_len(data, indentation, max_length=MAX_LENGTH): \n buf = ''\n while len(data) > MAX_LENGTH:\n idx = data.rfind(' ', 0, MAX_LENGTH)\n buf += '%s\\n%s' % (data[:idx], indentation)\n data = data[idx+1:]\n else:\n buf += data\n return buf",
"def test_long_message(self):\n message = \"few characters\"\n message_displayed = truncate_message(message, limit=5)\n\n self.assertLessEqual(len(message_displayed), 5)\n self.assertEqual(message_displayed, \"fe...\")",
"def format_MT103_field70(self, val, character_limit, n_lines):\n text = []\n val = val.split('newline')\n for values in val:\n line = FSwiftWriterUtils.split_text_on_character_limit(values, character_limit)\n text.append(line)\n text = '\\n'.join(str(i) for sub_list in text for i in sub_list)\n text = '\\n'.join(text.split('\\n')[:n_lines])\n return text",
"def _string_to_chunks(text, **kwargs):\n text_limit = kwargs.get('text_limit', 1024)\n lines = \"\"\n for line in text:\n if len(lines) + len(line) < text_limit:\n lines += line\n else:\n yield lines\n lines = line[0:text_limit]\n else:\n yield lines",
"def test_sufficientWidth(self):\n msg = \"barbazbo\"\n maxLen = len(\"PRIVMSG foo :{}\".format(msg)) + 2\n self.client.msg(\"foo\", msg, maxLen)\n self.assertEqual(self.client.lines, [\"PRIVMSG foo :{}\".format(msg)])\n self.client.lines = []\n self.client.msg(\"foo\", msg, maxLen - 1)\n self.assertEqual(2, len(self.client.lines))\n self.client.lines = []\n self.client.msg(\"foo\", msg, maxLen + 1)\n self.assertEqual(1, len(self.client.lines))",
"def splitLine(value, lineLength = 30 ):\n \n \"wrap at first delimiter left of size\" \n wraplines = []\n \n \n line = ''\n for word in value.split(' '):\n if len(line) + len(word) <= lineLength:\n line += word + ' '\n else:\n wraplines.append(line)\n line = word + ' '\n wraplines.append(line)\n\n return mark_safe('<br />'.join(wraplines))",
"def generate_email(self, length=10):\n self.update()\n # Generate the emails\n lines = []\n line_count = 0\n while line_count < length:\n sent = self.markov.make_sentence()\n if sent != None:\n lines.append(sent)\n line_count +=1\n else:\n print(sent)\n email_body = '\\n'.join(lines)\n email_header = random.choice(get_best_trigrams(filter(email_body),5))\n return email_header.decode('utf-8').strip() + \"\\n\" + email_body.decode('utf-8').strip()",
"def chunk_str(str_def):\n if len(str_def) > 15:\n str_split = str_def.split()\n str_def = \"\\n\".join([\" \".join(str_split[i:i + 15]) for i in range(0, len(str_split), 15)])\n return str_def",
"def ssplit(str_, length=420):\n buf = list()\n for line in str_.split('\\n'):\n buf.extend(textwrap.wrap(line.rstrip('\\r'), length))\n return buf",
"def split_txt_multiline(data: str) -> list[str]:\n limit = 255\n\n items = []\n data2 = data\n while len(data2) > limit:\n items.append(f'\"{data2[:limit]}\"')\n data2 = data2[limit:]\n items.append(f'\"{data2}\"')\n\n return items",
"def split_text(text, max_length, recursive_until=None, step=10):\n if len(text) <= max_length:\n return [text]\n breaks = [i for i in re.finditer(' |\\n|\\:|\\:|\\,|\\,|\\﹐|\\。|\\ㄧ|\\?|\\?|\\!|\\!|\\;|\\;|\\、|\\.', text)]\n segments = []\n start_offset = 0\n for k, p in enumerate(breaks):\n if p.end() - start_offset > max_length:\n start = start_offset\n end = breaks[k-1].end()\n segment = text[start:end]\n start_offset = breaks[k-1].end()\n segments.append(segment)\n\n if segments == []:\n if len(breaks) == 0:\n if len(text) < max_length:\n return [text]\n else:\n return [text[:recursive_until]]\n else:\n mid = len(breaks)//2\n segments = [text[:breaks[mid-1].end()], text[breaks[mid-1].end():]]\n\n if segments == []:\n raise Exception(f'something is wrong \\n{max_length}\\n{text}')\n\n for segment in segments:\n if len(segment) > max_length:\n if recursive_until:\n if max_length+step < recursive_until:\n return split_text(text, max_length+step, recursive_until=recursive_until)\n else:\n return [text[:recursive_until]]\n # raise Exception(f'splitted segment is larger than recursive limit {recursive_until}\\n{segment}\\n{text}')\n else:\n raise Exception(f'splitted segment is larger than {max_length}\\n{segment}\\n{text}')\n return segments",
"def split_text(text: Union[str, List], max_size: int = 2000, delimiter: str = \"\\n\") -> List[str]:\n delim_length = len(delimiter)\n\n if isinstance(text, str):\n if len(text) < max_size:\n return [text]\n text = text.split(delimiter)\n else:\n if sum(len(i) for i in text) < max_size:\n return [\"\\n\".join(text)]\n\n output = []\n tmp_str = \"\"\n count = 0\n for fragment in text:\n fragment_length = len(fragment) + delim_length\n if fragment_length > max_size:\n raise ValueError(\"A single line exceeded the max length. Can not split!\") # TODO: Find a better way than throwing an error.\n if count + fragment_length > max_size:\n output.append(tmp_str)\n tmp_str = \"\"\n count = 0\n\n count += fragment_length\n tmp_str += f\"{fragment}{delimiter}\"\n\n output.append(tmp_str)\n\n return output",
"def create_formatted_long_journal(long_journal):\n output = ''\n\n for index, line in enumerate(long_journal):\n while len(line) > 76:\n edge = 75\n while line[edge] != ' ':\n edge -= 1\n output += line[0:edge].strip()\n line = line[edge:]\n output += '\\n'\n output += line.strip() + '\\n\\n'\n\n return output"
] | [
"0.6790232",
"0.6611094",
"0.6376905",
"0.61982185",
"0.6119895",
"0.5845836",
"0.5761279",
"0.5732705",
"0.56816584",
"0.5646598",
"0.55363816",
"0.55151194",
"0.5499898",
"0.5486851",
"0.54598796",
"0.5451781",
"0.54024404",
"0.5397445",
"0.5381521",
"0.536209",
"0.53542113",
"0.5326229",
"0.5322062",
"0.52880704",
"0.52587265",
"0.5210271",
"0.5205008",
"0.517592",
"0.51674336",
"0.5135834"
] | 0.7511238 | 0 |
Validates multiple predictions at a time for a specified date range. predictions = list of dataframes with predictions (submissions) test_source = original data to compare with start = starting date in format "%Y%m%d" end = ending date in format "%Y%m%d" summary_df = regions summary information dataframe key = column with region/country codes name_col = column with region/country names custom_ids = None or list of strings with distinct team ids | def get_validation_results(
predictions,
test_source,
start,
end,
summary_df,
key="region",
name_col="csse_province_state",
custom_ids=None,
):
fixed_predictions = []
for prediction_df in predictions:
preds = prediction_df.copy()
preds["geoname_code"] = preds[key].apply(
lambda x: summary_df.loc[x, "geoname_code"]
)
preds["region_name"] = preds[key].apply(lambda x: summary_df.loc[x, name_col])
preds = preds.query(f'date >= "{start}" & date <= "{end}"').set_index(
["region", "date"]
)
fixed_predictions.append(preds)
test_source["date"] = pd.to_datetime(test_source.date).dt.strftime("%Y-%m-%d")
true_values = (
test_source.query(f'date >= "{start}" & date <= "{end}"')
.reset_index()
.set_index(["region", "date"])
)
scores = pd.concat(
[collect_scores(true_values, preds) for preds in fixed_predictions], 1
)
if custom_ids is None:
scores.columns = [f"source_{x}" for x in range(len(scores.columns))]
else:
scores.columns = custom_ids
return scores | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def run(self, orig_target_df):\n\n # For each fold\n for fold_idx, (fold_training_set_df, fold_testing_set_df, fold_target_df, fold_truth_df) in enumerate(self._generate_validation_fold()):\n train_test_date_split = fold_training_set_df[\"date\"].max()\n eval_start_date = train_test_date_split - timedelta(days = self.test_nb_days)\n date_to_predict = train_test_date_split + timedelta(days = 1)\n print(\"Warning: date_to_predict offset should be computed dynamically. Currently fixed to 1.\")\n\n # For each prediction method\n for process, process_name in zip(self.process_lst, self.process_names_lst):\n print(\"Running validation for process:\", process_name, \"on fold:\", fold_idx, \"...\")\n\n # Train the model\n with open(self.data_cache_path_str + \"data_bkp.pkl\", \"wb\") as f:\n pickle.dump((fold_training_set_df, fold_testing_set_df, fold_target_df, fold_truth_df), f)\n\n y_train = fold_target_df[\"demand\"].reset_index(drop = True)\n model = process(train_test_date_split, eval_start_date)\n model.fit(fold_training_set_df, y_train)\n\n # Generate predictions for validation set\n preds = model.predict(fold_testing_set_df, date_to_predict)\n preds[\"demand\"] = (orig_target_df[\"shifted_demand\"] + preds[\"demand\"]).apply(np.expm1)\n\n # Score the predictions\n preds2 = preds.copy()\n preds2.columns = [\"id\", \"date\", \"preds\"]\n preds_rmse_by_date_df = preds2.merge(fold_truth_df, how = \"left\", on = [\"id\", \"date\"])\n preds_rmse_by_date_df = preds_rmse_by_date_df[[\"date\", \"preds\", \"demand\"]].groupby(\"date\").apply(lambda x: self._rmse(x[\"demand\"], x[\"preds\"])).reset_index()\n preds_rmse_by_date_df.columns = [\"date\", \"preds_rmse\"]\n\n best_preds_piv = preds[[\"id\", \"date\", \"demand\"]].pivot(index = \"id\", columns = \"date\", values = \"demand\").reset_index()\n truth_piv = fold_truth_df[[\"id\", \"date\", \"demand\"]].pivot(index = \"id\", columns = \"date\", values = \"demand\").reset_index()\n truth_piv.set_index(\"id\", inplace = True)\n best_preds_piv.set_index(\"id\", inplace = True)\n best_preds_piv.columns = [\"F\" + str(i) for i in range(1, 29)]\n truth_piv.columns = [\"F\" + str(i) for i in range(1, 29)]\n validation_WRMSSE = round(model.evaluator.wrmsse(best_preds_piv, truth_piv, score_only = True), 6)\n\n # Save result for later use\n self.scores[process_name].append((fold_idx, preds_rmse_by_date_df, validation_WRMSSE))\n \n if self.verbose == True: \n print(process_name, \"had a score of\", validation_WRMSSE, \"on validation period\", fold_testing_set_df[\"date\"].min(), \"to\", fold_testing_set_df[\"date\"].max())\n\n metrics_lst = []\n for process_name, content in self.scores.items():\n for fold_idx, preds_rmse_by_date_df, validation_WRMSSE in content:\n preds_rmse_by_date_df[\"process_name\"] = process_name\n preds_rmse_by_date_df[\"fold_idx\"] = fold_idx\n preds_rmse_by_date_df[\"WRMSSE\"] = validation_WRMSSE\n metrics_lst.append(preds_rmse_by_date_df)\n\n metrics_df = pd.concat(metrics_lst, axis = 0)\n metrics_df.set_index(\"date\", inplace = True)\n\n return metrics_df",
"def _violations(\n self,\n total_violations: bool = False,\n timezone: str = \"\",\n start_date_range: str = None,\n end_date_range: str = None,\n **_,\n ):\n outputs: dict = {}\n\n # List of properties needed by the plot, properties are a set of tuples and\n # contain 3 parts: required True/False, property name and scenarios required,\n # scenarios must be a list.\n properties = [(True, \"line_Violation\", self.Scenarios)]\n\n # Runs get_formatted_data within PlotDataStoreAndProcessor to populate PlotDataStoreAndProcessor dictionary\n # with all required properties, returns a 1 if required data is missing\n check_input_data = self.get_formatted_data(properties)\n\n if 1 in check_input_data:\n return MissingInputData()\n\n for zone_input in self.Zones:\n logger.info(f\"Zone = {zone_input}\")\n scenario_df_list = []\n\n for scenario in self.Scenarios:\n logger.info(f\"Scenario = {str(scenario)}\")\n\n if self.AGG_BY == \"zone\":\n lines = self.meta.zone_lines(scenario)\n else:\n lines = self.meta.region_lines(scenario)\n\n line_v = self[\"line_Violation\"].get(scenario)\n\n if pd.notna(start_date_range):\n line_v = set_timestamp_date_range(\n line_v, start_date_range, end_date_range\n )\n if line_v.empty is True:\n logger.warning(\"No data in selected Date Range\")\n continue\n\n line_v = line_v.reset_index()\n viol = line_v.merge(lines, on=\"line_name\", how=\"left\")\n\n if self.AGG_BY == \"zone\":\n viol = viol.groupby([\"timestamp\", \"zone\"]).sum()\n else:\n viol = viol.groupby([\"timestamp\", self.AGG_BY]).sum()\n\n one_zone = viol.xs(zone_input, level=self.AGG_BY)\n one_zone = one_zone.rename(columns={\"values\": scenario})\n one_zone = (\n one_zone.abs()\n ) # We don't care the direction of the violation\n scenario_df_list.append(one_zone)\n\n all_scenarios = pd.concat(scenario_df_list, axis=1)\n\n # remove columns that are all equal to 0\n all_scenarios = all_scenarios.loc[:, (all_scenarios != 0).any(axis=0)]\n\n if all_scenarios.empty:\n outputs[zone_input] = MissingZoneData()\n continue\n\n unitconversion = self.capacity_energy_unitconversion(\n all_scenarios, self.Scenarios\n )\n all_scenarios = all_scenarios / unitconversion[\"divisor\"]\n\n data_table_out = all_scenarios.add_suffix(f\" ({unitconversion['units']})\")\n\n mplt = PlotLibrary()\n fig, ax = mplt.get_figure()\n\n if total_violations is True:\n all_scenarios_tot = all_scenarios.sum()\n\n # Set x-tick labels\n if self.custom_xticklabels:\n tick_labels = self.custom_xticklabels\n else:\n tick_labels = all_scenarios_tot.index\n mplt.barplot(\n all_scenarios_tot,\n color=self.color_list,\n stacked=False,\n custom_tick_labels=tick_labels,\n )\n else:\n for column in all_scenarios:\n mplt.lineplot(\n all_scenarios, column, color=self.color_list, label=column\n )\n ax.margins(x=0.01)\n mplt.set_subplot_timeseries_format(minticks=6, maxticks=12)\n ax.set_xlabel(timezone, color=\"black\", rotation=\"horizontal\")\n mplt.add_legend()\n\n if plot_data_settings[\"plot_title_as_region\"]:\n mplt.add_main_title(zone_input)\n ax.set_ylabel(\n f\"Line violations ({unitconversion['units']})\",\n color=\"black\",\n rotation=\"vertical\",\n )\n\n outputs[zone_input] = {\"fig\": fig, \"data_table\": data_table_out}\n return outputs",
"def fill_testing_dates(self):\r\n \r\n now = datetime.now()\r\n month = now.strftime('%m')\r\n year = now.year \r\n most_recent_date = '{}-{}-01'.format(year, month)\r\n self.testing_dates[1] = {'cv_start': '1972-01-01', \r\n 'cv_end': '1975-12-01', \r\n 'pred_start': '1976-01-01',\r\n 'pred_end': '1981-07-01'}\r\n self.testing_dates[2] = {'cv_start': '1976-01-01', \r\n 'cv_end': '1981-07-01', \r\n 'pred_start': '1981-08-01',\r\n 'pred_end': '1983-07-01'}\r\n self.testing_dates[3] = {'cv_start': '1976-01-01', \r\n 'cv_end': '1983-07-01', \r\n 'pred_start': '1983-08-01',\r\n 'pred_end': '1992-12-01'}\r\n self.testing_dates[4] = {'cv_start': '1983-08-01', \r\n 'cv_end': '1992-12-01', \r\n 'pred_start': '1993-01-01',\r\n 'pred_end': '2003-07-01'}\r\n self.testing_dates[5] = {'cv_start': '1993-01-01', \r\n 'cv_end': '2003-07-01', \r\n 'pred_start': '2003-08-01',\r\n 'pred_end': '2010-09-01'}\r\n self.testing_dates[6] = {'cv_start': '2003-08-01', \r\n 'cv_end': '2010-09-01', \r\n 'pred_start': '2010-10-01',\r\n 'pred_end': '2021-07-01'}\r\n self.testing_dates[7] = {'cv_start': '2010-10-01', \r\n 'cv_end': '2021-07-01', \r\n 'pred_start': '2021-08-01',\r\n 'pred_end': most_recent_date}",
"def setUp(self):\n self.df_casesrecord = pd.DataFrame(\n {\n \"date\": [\n date(2020, 4, 9),\n date(2020, 4, 9),\n date(2020, 4, 10),\n date(2020, 4, 10),\n date(2020, 4, 11),\n date(2020, 4, 11),\n ],\n \"iso_code\": [\"FRA\", \"GBR\", \"FRA\", \"GBR\", \"FRA\", \"GBR\"],\n \"area\": [547557, 241930, 547557, 241930, 547557, 241930],\n \"population\": [\n 67059887,\n 66834405,\n 67059887,\n 66834405,\n 67059887,\n 66834405,\n ],\n \"weekly_avg_cases\": [7000, 4800, 7130, 4400, 7299, 4250],\n }\n )\n self.df_knotdateset = pd.DataFrame(\n {\n \"growth_factor_0_1\": [1.28, 1.29, 1.25, 1.25],\n \"growth_factor_1_2\": [1.1, 1.1, 1.1, 1.05],\n \"growth_factor_2_3\": [0.9, 0.9, 0.95, 0.95],\n \"iso_code\": [\"FRA\", \"FRA\", \"GBR\", \"GBR\"],\n \"knot_date_1\": [\n date(2020, 3, 15),\n date(2020, 3, 15),\n date(2020, 3, 19),\n date(2020, 3, 21),\n ],\n \"knot_date_2\": [\n date(2020, 4, 4),\n date(2020, 4, 5),\n date(2020, 4, 4),\n date(2020, 4, 18),\n ],\n \"weight\": [24, 8, 14, 12],\n }\n )\n self.df_modeldaterange = pd.DataFrame(\n {\n \"initial_date\": [date(2020, 3, 1), date(2020, 3, 2)],\n \"maximum_date\": [date(2020, 6, 8), date(2020, 6, 10)],\n \"first_restrictions_date\": [date(2020, 2, 29), date(2020, 3, 13)],\n \"lockdown_date\": [date(2020, 3, 17), date(2020, 3, 21)],\n \"iso_code\": [\"FRA\", \"GBR\"],\n }\n )\n self.df_possibledateset = pd.DataFrame(\n {\n \"n_days_first_restrictions\": [0, 0, 1, 1, 0, 0, 1, 1],\n \"n_days_lockdown\": [15, 16, 0, 1, 6, 7, 0, 1],\n \"dates_counterfactual_first_restrictions\": [\n date(2020, 2, 29),\n date(2020, 2, 29),\n date(2020, 2, 28),\n date(2020, 2, 28),\n date(2020, 3, 13),\n date(2020, 3, 13),\n date(2020, 3, 12),\n date(2020, 3, 12),\n ],\n \"dates_counterfactual_lockdown\": [\n date(2020, 3, 2),\n date(2020, 3, 1),\n date(2020, 3, 17),\n date(2020, 3, 16),\n date(2020, 3, 15),\n date(2020, 3, 14),\n date(2020, 3, 21),\n date(2020, 3, 20),\n ],\n \"iso_code\": [\"FRA\", \"FRA\", \"FRA\", \"FRA\", \"GBR\", \"GBR\", \"GBR\", \"GBR\"],\n }\n )",
"def validate(self, data):\r\n budgets = Budget.objects.filter(\r\n campaign__pk=self.context['view'].kwargs['campaign_pk']\r\n )\r\n covered = budgets.values_list('start_date', 'end_date')\r\n for each in covered:\r\n sd_fail = (\r\n data['start_date'] >= each[0] and\r\n data['start_date'] <= each[1]\r\n )\r\n ed_fail = (\r\n data['end_date'] >= each[0] and\r\n data['end_date'] <= each[1]\r\n )\r\n over_fail = (\r\n each[0] >= data['start_date'] and\r\n each[1] <= data['end_date']\r\n )\r\n if sd_fail:\r\n raise serializers.ValidationError({\r\n 'start_date': \"Start date conflicts with existing budget\"\r\n })\r\n if ed_fail:\r\n raise serializers.ValidationError({\r\n 'end_date': \"End date conflicts with existing budget\"\r\n })\r\n if over_fail:\r\n raise serializers.ValidationError(\r\n \"Date conflicts with existing budget\"\r\n )\r\n return data",
"def create_lat_lon_date_data(gt_id,\n target_horizon,\n experiment,\n past_gt_ids=[\"contest_precip\", \"contest_tmp2m\"],\n forecast_models=[\"nmme\",\"nmme0\"],\n other_lat_lon_date_features=[\"contest_rhum.sig995\",\n \"contest_pres.sfc.gauss\"]):\n\n time_start = time.time()\n\n # Add forecasts to list of forecast IDs\n forecast_variable = get_forecast_variable(gt_id) # 'prate' or 'tmp2m'\n forecast_ids = ['{}-{}-{}'.format(forecast, forecast_variable, target_horizon)\n for forecast in forecast_models]\n\n # -----------\n # Generate relevant variable and column names\n # -----------\n\n # Identify measurement variable name\n measurement_variable = get_measurement_variable(gt_id) # 'tmp2m' or 'prate'\n\n # Keep track of relevant column names\n gt_col = measurement_variable\n clim_col = measurement_variable+\"_clim\"\n anom_col = measurement_variable+\"_anom\"\n\n # Inverse of standard deviation of anomalies for each start_date\n anom_inv_std_col = anom_col+\"_inv_std\"\n\n # --------\n # Prepare experiment cache directory and saved file names\n # --------\n\n # Name of cache directory for storing non-submission-date specific\n # intermediate files\n cache_dir = os.path.join('results', experiment, 'shared',\n '{}_{}'.format(gt_id, target_horizon))\n # e.g., cache_dir = 'results/regression/shared/contest_precip_34w'\n\n # if cache_dir doesn't exist, create it\n if not os.path.isdir(cache_dir):\n os.makedirs(cache_dir)\n\n # Filenames for data file to be stored in cache_dir\n lat_lon_date_data_file = os.path.join(\n cache_dir, \"lat_lon_date_data-{}_{}.h5\".format(gt_id, target_horizon))\n\n # --------\n # Load mask indicating which grid points count in the contest (1=in, 0=out)\n # --------\n print \"Loading contest mask\"\n t = time.time()\n mask_df = get_contest_mask()\n print \"Elapsed: {}s\".format(time.time() - t)\n\n # --------\n # Creates and saves lat_lon_date_data dataframe\n # --------\n # Load masked lat lon date features restricted to years >= get_first_year(gt_id)\n # Note: contest lat lon date features and forecasts are pre-masked, so there\n # is no need to mask explcitily\n print \"Loading lat lon date features\"\n num_gt_ids = len(past_gt_ids)\n # For each measurement,\n # get number of days between start date of observation period used for prediction\n # (2 weeks + 1 submission day behind for most predictors) and start date of\n # target period (2 or 4 weeks ahead)\n past_start_deltas = [get_start_delta(target_horizon, past_gt_id)\n for past_gt_id in past_gt_ids]\n other_start_deltas = [get_start_delta(target_horizon, other_gt_id)\n for other_gt_id in other_lat_lon_date_features]\n # Additionally keep track of days between forecast date and start date of\n # target period\n forecast_delta = get_forecast_delta(target_horizon)\n\n lat_lon_date_data = get_lat_lon_date_features(\n gt_ids=other_lat_lon_date_features + other_lat_lon_date_features\n + other_lat_lon_date_features,\n gt_masks=None,\n gt_shifts=other_start_deltas +\n [2*delta for delta in other_start_deltas] +\n [365]*len(other_lat_lon_date_features),\n forecast_ids=forecast_ids + forecast_ids,\n forecast_masks=None,\n forecast_shifts=[None]*len(forecast_ids) + [forecast_delta]*len(forecast_ids),\n anom_ids=[gt_id] + past_gt_ids + past_gt_ids + past_gt_ids,\n anom_masks=None,\n anom_shifts=[None] + past_start_deltas +\n [2*delta for delta in past_start_deltas] +\n [365]*len(past_gt_ids),\n first_year=get_first_year(gt_id)\n )\n\n print \"Loading additional lat lon date features\"\n t = time.time()\n # Add CFSv2 mean as feature\n if 'cfsv2' in forecast_models:\n cfsv2_models = ['cfsv2_op_delta_2w_1d_6h', 'cfsv2_op_delta_2w_1d_12h',\n 'cfsv2_op_delta_2w_1d_18h', 'cfsv2_op_delta_2w_2d_0h',\n 'cfsv2_op_delta_2w_2d_6h', 'cfsv2_op_delta_2w_2d_12h',\n 'cfsv2_op_delta_2w_2d_18h', 'cfsv2_op_delta_2w_3d_0h']\n lat_lon_date_data['cfsv2_mean'] = lat_lon_date_data[cfsv2_models].mean(axis=1)\n lat_lon_date_data[\"cfsv2_mean_shift\"+str(start_delta)] = lat_lon_date_data[\n [model+\"_shift\"+str(start_delta) for model in cfsv2_models]].mean(axis=1)\n # Add inverse of standard deviation of anomalies for each start_date\n lat_lon_date_data[anom_inv_std_col] = \\\n 1.0/lat_lon_date_data.groupby([\"start_date\"])[anom_col].transform('std')\n\n print \"Elapsed: {}s\".format(time.time() - t)\n\n # Save lat lon date features to disk\n print \"Saving lat lon date features to \"+lat_lon_date_data_file\n t = time.time()\n lat_lon_date_data.to_hdf(lat_lon_date_data_file, key=\"data\", mode=\"w\")\n subprocess.call(\"chmod a+w \"+lat_lon_date_data_file, shell=True)\n print \"Elapsed: {}s\".format(time.time() - t)\n print \"Finished generating lat_lon_date_data matrix.\"\n print \"Total time elapsed: {}s\".format(time.time()-time_start)\n return list(lat_lon_date_data)",
"def get_predictions(year, month):\n \n start_date = str(year)+\"-\"+str(month)+\"-01\"\n end_date = str(year)+\"-\"+str(month)+\"-\"+str(monthrange(year, month)[1])\n\n date_range = pd.date_range(start_date,end_date, freq='D').strftime(\"%Y-%m-%d\").tolist()\n\n # predictfunction \n # do predictions\n pred_arr = []\n file_name = '../predictions/model_'+str(year)+'_'+str(month)+'.csv'\n \n try:\n predictions = load_predictions(file_name)\n predictions = predictions.round()\n except:\n print(\"An exception occurred\")\n predictions = pd.DataFrame(data = date_range,columns=['Datum'])\n \n \n for index,row in predictions.iterrows():\n \n pred_mail = 0\n pred_counter = 0\n pred_tel = 0\n \n # check predictions dataframe for 'Datum'\n if 'Datum' in predictions.columns:\n date = row['Datum']\n else:\n break;\n\n # check predictions dataframe for 'Mail'\n if 'Mail' in predictions.columns:\n pred_mail = row['Mail']\n\n # check predictions dataframe for 'Schalter'\n if 'Schalter' in predictions.columns:\n pred_counter = row['Schalter']\n\n # check predictions dataframe for 'Tel'\n if 'Tel' in predictions.columns:\n pred_tel = row['Tel']\n \n \n pred_dict = {'date': date, \n 'predictions':{'mail' : pred_mail, \n 'tel' : pred_tel, \n 'counter' : pred_counter\n }\n }\n\n pred_arr.append(pred_dict)\n\n print(pred_arr) \n \n return pred_arr",
"def __init__(self, calendar_data_path_str, sell_prices_data_path_str, sales_train_validation_data_path_str, train_test_date_split, dept_id = None):\n\n self.sales_train_validation_df = pd.read_csv(sales_train_validation_data_path_str)\n self.sales_train_validation_df.sort_values(\"id\", inplace = True)\n self.calendar_df = pd.read_csv(calendar_data_path_str)\n self.sell_prices_df = pd.read_csv(sell_prices_data_path_str)\n\n # Filter by dept_id if needed\n if dept_id is not None:\n self.sales_train_validation_df = self.sales_train_validation_df.loc[self.sales_train_validation_df[\"cat_id\"] == dept_id]\n\n # Generate a validation set if enable_validation is True\n train_cols_lst = [i for i in self.sales_train_validation_df.columns if not i.startswith(\"d_\")] + self.calendar_df[\"d\"].loc[self.calendar_df[\"date\"] <= \"2016-03-27\"].tolist()\n test_cols_lst = [i for i in self.sales_train_validation_df.columns if not i.startswith(\"d_\")] + self.calendar_df[\"d\"].loc[(self.calendar_df[\"date\"] > \"2016-03-27\") & (self.calendar_df[\"date\"] <= \"2016-04-24\")].tolist()\n self.train_df = self.sales_train_validation_df[train_cols_lst].reset_index(drop = True)\n self.valid_df = self.sales_train_validation_df[test_cols_lst].reset_index(drop = True)\n \n self.train_target_columns = [i for i in self.train_df.columns if i.startswith(\"d_\")]\n self.weight_columns = self.train_df.iloc[:, -28:].columns.tolist()\n self.train_df[\"all_id\"] = \"all\"\n self.valid_df[\"all_id\"] = \"all\"\n \n self.id_columns = [i for i in self.train_df.columns if not i.startswith(\"d_\")]\n self.valid_target_columns = [i for i in self.valid_df.columns if i.startswith(\"d_\")]\n\n self.group_ids = (\"all_id\", \"state_id\", \"store_id\", \"cat_id\", \"dept_id\", \"item_id\", \n [\"state_id\", \"cat_id\"], [\"state_id\", \"dept_id\"], [\"store_id\", \"cat_id\"], \n [\"store_id\", \"dept_id\"], [\"item_id\", \"state_id\"], [\"item_id\", \"store_id\"])\n\n self.train_series = self.trans_30490_to_42840(self.train_df, self.train_target_columns)\n self.valid_series = self.trans_30490_to_42840(self.valid_df, self.valid_target_columns)\n self.roll_mat_csr, self.roll_index = self.create_rollup_index()\n self.group_ids_items = self.generate_group_ids_items()\n\n self.S = self.get_s()\n self.W = self.get_w()\n self.SW = self.W / np.sqrt(self.S)\n\n self.weights = self.get_weight_df() # Equivalent to get_w()\n self.scale = self.get_scale() # Equivalent to get_s()\n\n #self.train_series = None\n #self.train_df = None\n gc.collect()",
"def prep_data_fn(self, st_train_dt, end_train_dt, st_val_dt, end_val_dt, st_test_dt, end_test_dt):\n df = self.get_prep_data()\n train = df[(df['ft_data_dt'] >= st_train_dt) & (df['ft_data_dt'] <= end_train_dt)]\n val = df[(df['ft_data_dt'] >= st_val_dt) & (df['ft_data_dt'] <= end_val_dt)].sample(frac=0.4, random_state=2021)\n test = df[(df['ft_data_dt'] >= st_test_dt) & (df['ft_data_dt'] <= end_test_dt)]\n print(f'----train----')\n print(train[['ft_data_dt', 'target', 'idd']].groupby(['ft_data_dt', 'target']).agg(['count']))\n print(f'----validation----')\n print(val[['ft_data_dt', 'target', 'idd']].groupby(['ft_data_dt', 'target']).agg(['count']))\n print(f'----test----')\n print(test[['ft_data_dt', 'target', 'idd']].groupby(['ft_data_dt', 'target']).agg(['count']))\n self.set_train(train)\n self.set_validation(val)\n self.set_test(test)\n train_X = train[[c for c in train.columns if c not in ['idd', 'ft_data_dt', 'target']]]\n train_y = train['target']\n val_X = val[[c for c in train.columns if c not in ['idd', 'ft_data_dt', 'target']]]\n val_y = val['target']\n test_X = test[[c for c in train.columns if c not in ['idd', 'ft_data_dt', 'target']]]\n test_y = test['target']\n self.set_train_X(train_X)\n self.set_train_y(train_y)\n self.set_val_X(val_X)\n self.set_val_y(val_y)\n self.set_test_X(test_X)\n self.set_test_y(test_y)",
"def preprocess_new_data(start_dates, end_dates, mail_server, account,\n sender, password, test_mode):\n if test_mode:\n dfs = {}\n time_flag = datetime(2020, 8, 17)\n for test_type in [\"covid_ag\", \"flu_ag\"]:\n test_data_dir = f\"./test_data/{test_type}_test_data.xlsx\"\n dfs[test_type] = pd.read_excel(test_data_dir)\n else:\n # Get new data from email\n dfs, time_flag = get_from_email(COLUMN_NAMES, start_dates, end_dates,\n mail_server, account, sender, password)\n\n # No new data can be pulled\n if time_flag is None:\n return dfs, time_flag\n\n df_finals = {}\n for test_type in TEST_TYPES:\n print(f\"For {test_type}:\")\n df = dfs[test_type]\n # Fix some of the fipcodes that are 9 digit instead of 5 digit\n df = fix_zipcode(df)\n # Create a column CanonicalDate according to StarageDate and TestDate\n df = fix_date(df)\n\n # Compute numUniqueDevices\n numUniqueDevices = df.groupby(\n by=[\"timestamp\", \"zip\"],\n as_index=False)[\"SofiaSerNum\"].agg({\"SofiaSerNum\": \"nunique\"}).rename(\n columns={\"SofiaSerNum\": \"numUniqueDevices\"}\n )\n\n if test_type == \"covid_ag\":\n # Compute overallTotal\n overall_total = df.groupby(\n by=[\"timestamp\", \"zip\"],\n as_index=False)['OverallResult'].count()\n overall_total[\"totalTest\"] = overall_total[\"OverallResult\"]\n overall_total.drop(labels=\"OverallResult\", axis=\"columns\", inplace=True)\n\n # Compute overallPositive\n overall_pos = df[df[\"OverallResult\"] == \"positive\"].groupby(\n by=[\"timestamp\", \"zip\"],\n as_index=False)['OverallResult'].count()\n overall_pos[\"positiveTest\"] = overall_pos[\"OverallResult\"]\n overall_pos.drop(labels=\"OverallResult\", axis=\"columns\", inplace=True)\n else:\n # Compute overallTotal\n overall_total = df.groupby(\n by=[\"timestamp\", \"zip\"],\n as_index=False)['FluA'].count()\n overall_total[\"totalTest\"] = overall_total[\"FluA\"]\n overall_total.drop(labels=\"FluA\", axis=\"columns\", inplace=True)\n\n # Compute overallPositive\n overall_pos = df[\n (df[\"FluA\"] == \"positive\") | (df[\"FluB\"] == \"positive\")\n ].groupby(\n by=[\"timestamp\", \"zip\"],\n as_index=False)['FluA'].count()\n overall_pos[\"positiveTest\"] = overall_pos[\"FluA\"]\n overall_pos.drop([\"FluA\"], axis=\"columns\", inplace=True)\n\n df_finals[test_type] = overall_total.merge(\n numUniqueDevices, on=[\"timestamp\", \"zip\"], how=\"left\"\n ).merge(\n overall_pos, on=[\"timestamp\", \"zip\"], how=\"left\"\n ).fillna(0).drop_duplicates()\n\n return df_finals, time_flag",
"def setUp(self):\n super().setUp()\n self.test_data = pd.DataFrame({\n 'date':\n pd.to_datetime([\n '2019-01-01', '2019-10-01', '2019-01-01', '2019-10-01',\n '2019-01-01', '2019-10-01', '2019-01-01', '2019-10-01'\n ]),\n 'geo': [1, 1, 2, 2, 3, 3, 4, 4],\n 'response': [1, 2, 2, 5, 1, 2, 3, 4],\n 'spend': [1, 1.5, 2, 2.5, 1, 1.5, 5, 6],\n 'metric': [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]\n })\n\n geos = list(range(5, 23)) * 2\n geos.sort()\n\n self.add_pair = pd.DataFrame({\n 'date':\n pd.to_datetime(\n ['2019-01-01', '2019-10-01'] * 18),\n 'geo': geos,\n 'response': [10, 20] * 18,\n 'spend': [10, 10] * 18\n })\n\n self.design_window = TimeWindow(\n pd.Timestamp('2019-01-01'), pd.Timestamp('2019-10-01'))\n self.evaluation_window = TimeWindow(\n pd.Timestamp('2019-09-01'), pd.Timestamp('2019-10-01'))\n self.test_class = TrimmedMatchGeoXDesign(\n GeoXType.HEAVY_UP,\n pretest_data=self.test_data,\n time_window_for_design=self.design_window,\n time_window_for_eval=self.evaluation_window,\n matching_metrics={'response': 1.0})\n\n self.nontrivial_data = pd.DataFrame({\n 'date':\n pd.to_datetime(\n ['2019-01-01', '2019-10-01'] * 20),\n 'geo': sorted(list(range(1, 21)) * 2),\n 'response': range(100, 140),\n 'spend': range(1, 41)\n })",
"def test_model(predictions: np.array, configs: dict, folder_path: str, test_data_index: pd.Index,\n y_test: np.array,\n study_period_data: pd.DataFrame, parent_model_type: str = 'deep_learning', model_type: str = None,\n history=None, index_id='',\n index_name='', study_period_length: int = 0, model=None, period_range: tuple = (0, 0),\n start_date: datetime.date = datetime.date.today(), end_date: datetime.date = datetime.date.today(),\n get_val_score_only=False, weighting_criterion=None, plotting=False, market_logs=False, **kwargs):\n\n if get_val_score_only:\n # In case classifier is part of MixedEnsemble as is being validated\n y_test = y_test[kwargs['model_index']]\n test_data_index = test_data_index[kwargs['model_index']]\n print(f'\\nGetting validation score for {Style.BRIGHT}{Fore.BLUE}{model_type}{Style.RESET_ALL} ...')\n else:\n print(f'\\nTesting {Style.BRIGHT}{Fore.BLUE}{model_type}{Style.RESET_ALL} model on unseen data ...')\n\n # print(f'{Style.BRIGHT}{Fore.MAGENTA}Length of test data: {len(y_test)}{Style.RESET_ALL}')\n\n study_period_data = study_period_data.copy()\n y_test = y_test.copy()\n predictions = predictions.copy()\n\n timer = Timer().start()\n # JOB: Create data frame with true and predicted values\n if isinstance(test_data_index, pd.MultiIndex):\n test_set_comparison = pd.DataFrame({'y_test': y_test.astype('int8').flatten(), 'prediction': predictions},\n index=test_data_index)\n\n else:\n test_set_comparison = pd.DataFrame({'y_test': y_test.astype('int8').flatten(), 'prediction': predictions},\n index=pd.MultiIndex.from_tuples(test_data_index,\n names=['datadate', 'stock_id']))\n\n # JOB: Transform index of study period data to match test_set_comparison index\n study_period_data.index = study_period_data.index.tolist() # Flatten MultiIndex to tuples\n study_period_data.index.name = 'stock_id' # Rename index\n study_period_data.set_index('datadate', append=True, inplace=True)\n\n # JOB: Merge test set with study period data\n test_set_comparison = test_set_comparison.merge(study_period_data, how='left', left_index=True,\n right_on=['datadate', 'stock_id'])\n\n del study_period_data\n\n # JOB: Create normalized predictions (e.g., directional prediction relative to cross-sectional median of predictions)\n test_set_comparison.loc[:, 'norm_prediction'] = test_set_comparison.loc[:, 'prediction'].gt(\n test_set_comparison.groupby('datadate')['prediction'].transform('median')).astype(np.int16)\n\n # JOB: Create cross-sectional ranking\n test_set_comparison.loc[:, 'prediction_rank'] = test_set_comparison.groupby('datadate')['prediction'].rank(\n method='first', ascending=False).astype('int16')\n test_set_comparison.loc[:, 'prediction_percentile'] = test_set_comparison.groupby('datadate')['prediction'].rank(\n pct=True)\n\n test_data_start_date = test_set_comparison.index.get_level_values('datadate').min().date()\n test_data_end_date = test_set_comparison.index.get_level_values('datadate').max().date()\n test_set_n_days = test_set_comparison.index.get_level_values('datadate').unique().size\n test_set_n_constituents = test_set_comparison.index.get_level_values('stock_id').unique().size\n\n cross_section_size = int(round(test_set_comparison.groupby('datadate')['y_test'].count().mean()))\n print(f'Average size of cross sections: {int(cross_section_size)}')\n\n # Define top k values\n top_k_list = [5, 10]\n\n if cross_section_size > 30:\n top_k_list.extend([50, 100, 150, 200, 250])\n\n # JOB: Create empty dataframe for recording top-k accuracies\n top_k_metrics = pd.DataFrame()\n top_k_metrics.index.name = 'k'\n\n t_costs = 0.0005 # Set transaction costs per half-turn\n\n top_10_excess_return_series = None\n top_10_error_series = None\n market_return_series = None\n market_cum_returns = None\n market_metrics = None\n\n if not get_val_score_only:\n market_metrics, market_return_series, market_cum_returns = get_market_metrics(test_set_comparison,\n t_costs=t_costs,\n index_id=index_id,\n index_name=index_name,\n test_data_start_date=test_data_start_date,\n test_data_end_date=test_data_end_date,\n market_logs=market_logs)\n\n for top_k in top_k_list:\n # JOB: Filter test data by top/bottom k affiliation\n long_positions = test_set_comparison[test_set_comparison['prediction_rank'] <= top_k]\n short_positions = test_set_comparison[\n test_set_comparison['prediction_rank'] > test_set_comparison['cs_length'] - top_k]\n short_positions.loc[:, 'daily_return'] = - short_positions.loc[:, 'daily_return']\n\n full_portfolio = pd.concat([long_positions, short_positions], axis=0)\n\n if not get_val_score_only:\n if top_k == 5:\n # Get series of daily portfolio returns\n top_10_excess_return_series = calc_excess_returns(\n full_portfolio.groupby(level=['datadate'])['daily_return'].mean()).rename('daily_excess_return')\n top_10_excess_return_series = top_10_excess_return_series.reset_index()\n top_10_excess_return_series.loc[:, 'datadate'] = top_10_excess_return_series['datadate'].dt.strftime(\n '%Y-%m-%d')\n top_10_excess_return_series.set_index('datadate', inplace=True)\n\n sorted_portfolio = full_portfolio.set_index('prediction_rank', append=True, inplace=False)\n sorted_portfolio.reset_index(['stock_id'], inplace=True)\n sorted_portfolio.sort_index(level=['datadate', 'prediction_rank'], inplace=True)\n sorted_portfolio.reset_index(level='datadate', inplace=True, drop=True)\n top_10_error_series = (sorted_portfolio['norm_prediction'] - sorted_portfolio['y_test']).abs()\n top_10_error_series = top_10_error_series.values.tolist()\n\n cumulative_return = (top_10_excess_return_series.get('daily_excess_return') + 1).cumprod().rename(\n 'Cumulative Portfolio Return')\n cumulative_return.index.name = 'Time'\n\n if plotting:\n # Merge market and portfolio returns\n merged = pd.concat([cumulative_return, market_cum_returns], axis=1, join='outer')\n merged.plot()\n plt.legend(loc='best')\n plt.title(label=model_type)\n plt.show()\n\n annualized_sharpe = calc_sharpe(full_portfolio.loc[:, ['daily_return']].groupby(level=['datadate']).mean(),\n annualize=True)\n annualized_sharpe_atc = calc_sharpe(\n full_portfolio.loc[:, ['daily_return']].groupby(level=['datadate']).mean() - 4 * t_costs,\n annualize=True)\n annualized_sortino = calc_sortino(full_portfolio.loc[:, ['daily_return']].groupby(level=['datadate']).mean(),\n annualize=True)\n annualized_sortino_atc = calc_sortino(\n full_portfolio.loc[:, ['daily_return']].groupby(level=['datadate']).mean() - 4 * t_costs,\n annualize=True)\n\n accuracy = None\n\n # JOB: Calculate accuracy score over all trades\n if parent_model_type == 'deep_learning':\n accuracy = binary_accuracy(full_portfolio['y_test'].values,\n full_portfolio['norm_prediction'].values).numpy()\n\n elif parent_model_type == 'tree_based':\n accuracy = accuracy_score(full_portfolio['y_test'].values,\n full_portfolio['norm_prediction'].values)\n\n elif parent_model_type == 'mixed':\n accuracy = accuracy_score(full_portfolio['y_test'].values,\n full_portfolio['norm_prediction'].values)\n\n mean_daily_return = full_portfolio.groupby(level=['datadate'])['daily_return'].mean().mean()\n\n mean_daily_excess_return = calc_excess_returns(\n full_portfolio.groupby(level=['datadate'])['daily_return'].mean().rename('daily_return')).mean()\n\n mean_daily_short = short_positions.groupby(level=['datadate'])['daily_return'].mean().mean()\n mean_daily_long = long_positions.groupby(level=['datadate'])['daily_return'].mean().mean()\n\n top_k_metrics.loc[top_k, 'Accuracy'] = accuracy\n top_k_metrics.loc[top_k, 'Mean Daily Return'] = mean_daily_return\n top_k_metrics.loc[top_k, 'Annualized Return'] = annualize_metric(mean_daily_return)\n top_k_metrics.loc[top_k, 'Mean Daily Excess Return'] = mean_daily_excess_return\n top_k_metrics.loc[top_k, 'Annualized Excess Return'] = annualize_metric(mean_daily_excess_return)\n top_k_metrics.loc[top_k, 'Annualized Sharpe'] = annualized_sharpe\n top_k_metrics.loc[top_k, 'Annualized Sortino'] = annualized_sortino\n top_k_metrics.loc[top_k, 'Mean Daily Return (Short)'] = mean_daily_short\n top_k_metrics.loc[top_k, 'Mean Daily Return (Long)'] = mean_daily_long\n\n # JOB: Add metrics incl. transaction costs of 5 bps per half-turn\n top_k_metrics.loc[top_k, 'Mean Daily Return_atc'] = mean_daily_return - 4 * t_costs\n top_k_metrics.loc[top_k, 'Annualized Return_atc'] = annualize_metric(mean_daily_return - 4 * t_costs)\n top_k_metrics.loc[top_k, 'Mean Daily Excess Return_atc'] = mean_daily_excess_return - 4 * t_costs\n top_k_metrics.loc[top_k, 'Annualized Excess Return_atc'] = annualize_metric(\n mean_daily_excess_return - 4 * t_costs)\n top_k_metrics.loc[top_k, 'Annualized Sharpe_atc'] = annualized_sharpe_atc\n top_k_metrics.loc[top_k, 'Annualized Sortino_atc'] = annualized_sortino_atc\n top_k_metrics.loc[top_k, 'Mean Daily Return (Short)_atc'] = mean_daily_short - 2 * t_costs\n top_k_metrics.loc[top_k, 'Mean Daily Return (Long)_atc'] = mean_daily_long - 2 * t_costs\n\n if get_val_score_only:\n print(f'{weighting_criterion} score: {round(top_k_metrics.loc[5, weighting_criterion], 4)}')\n return top_k_metrics.loc[5, weighting_criterion]\n\n top_k_metrics = pd.concat([top_k_metrics, market_metrics.to_frame().T], join='outer', verify_integrity=True)\n top_k_metrics.fillna('-', inplace=True)\n\n # JOB: Display top-k metrics\n pretty_print_table(top_k_metrics)\n\n # JOB: Plot accuracies and save figure to file\n if plotting:\n for col in top_k_metrics.columns:\n top_k_metrics[col].plot(kind='line', legend=True, fontsize=14)\n plt.savefig(os.path.join(ROOT_DIR, folder_path, f'top_k_{col.lower()}.png'), dpi=600)\n plt.show()\n\n if parent_model_type == 'deep_learning':\n # JOB: Plot training and validation metrics for LSTM\n try:\n plot_train_val(history, configs['model']['metrics'], store_png=True, folder_path=folder_path)\n except AttributeError as ae:\n print(f'{Fore.RED}{Style.BRIGHT}Plotting failed.{Style.RESET_ALL}')\n # print(ae)\n except UnboundLocalError as ule:\n print(\n f'{Fore.RED}{Back.YELLOW}{Style.BRIGHT}Plotting failed. History has not been created.{Style.RESET_ALL}')\n # print(ule)\n\n # JOB: Evaluate model on full test data\n test_score = None\n if parent_model_type == 'deep_learning':\n test_score = float(binary_accuracy(test_set_comparison['y_test'].values,\n test_set_comparison['norm_prediction'].values).numpy())\n\n print(f'\\nTest score on full test set: {float(np.round(test_score, 4))}')\n\n elif parent_model_type in ['tree_based', 'mixed']:\n test_score = accuracy_score(test_set_comparison['y_test'].values,\n test_set_comparison['norm_prediction'].values)\n print(f'\\nTest score on full test set: {np.round(test_score, 4)}')\n\n # pretty_print_table(\n # pd.DataFrame({'y_test': test_set_comparison['y_test'].values, 'norm_prediction': test_set_comparison[\n # 'norm_prediction'].values}).sample(100)) # TODO: Remove\n\n total_epochs = len(history.history['loss']) if history is not None else None\n\n # JOB: Fill dict for logging\n data_record = {\n 'ID': config.run_id,\n 'Experiment Run End': datetime.datetime.now().isoformat(),\n 'Parent Model Type': parent_model_type,\n 'Model Type': model_type,\n 'Index ID': index_id,\n 'Index Name': index_name,\n 'Study Period ID': config.study_period_id,\n 'Study Period Length': study_period_length,\n 'Period Range': period_range,\n 'Study Period Start Date': start_date.isoformat(),\n 'Study Period End Date': end_date.isoformat(),\n 'Test Set Size': y_test.shape[0],\n 'Days Test Set': test_set_n_days,\n 'Constituent Number': test_set_n_constituents,\n 'Average Cross Section Size': cross_section_size,\n 'Test Set Start Date': test_data_start_date.isoformat(),\n 'Test Set End Date': test_data_end_date.isoformat(),\n 'Total Accuracy': test_score,\n\n 'Top-k Accuracy Scores': top_k_metrics['Accuracy'].to_dict(),\n 'Top-k Mean Daily Return': top_k_metrics['Mean Daily Return'].to_dict(),\n 'Top-k Mean Daily Excess Return': top_k_metrics['Mean Daily Excess Return'].to_dict(),\n 'Top-k Annualized Excess Return': top_k_metrics['Annualized Excess Return'].to_dict(),\n 'Top-k Annualized Return': top_k_metrics['Annualized Return'].to_dict(),\n 'Top-k Annualized Sharpe': top_k_metrics['Annualized Sharpe'].to_dict(),\n 'Top-k Annualized Sortino': top_k_metrics['Annualized Sortino'].to_dict(),\n 'Mean Daily Return (Short)': top_k_metrics['Mean Daily Return (Short)'].to_dict(),\n 'Mean Daily Return (Long)': top_k_metrics['Mean Daily Return (Long)'].to_dict(),\n\n 'Top-k Mean Daily Return_atc': top_k_metrics['Mean Daily Return_atc'].to_dict(),\n 'Top-k Annualized Return_atc': top_k_metrics['Annualized Return_atc'].to_dict(),\n 'Top-k Mean Daily Excess Return_atc': top_k_metrics['Mean Daily Excess Return_atc'].to_dict(),\n 'Top-k Annualized Excess Return_atc': top_k_metrics['Annualized Excess Return_atc'].to_dict(),\n 'Top-k Annualized Sharpe_atc': top_k_metrics['Annualized Sharpe_atc'].to_dict(),\n 'Top-k Annualized Sortino_atc': top_k_metrics['Annualized Sortino_atc'].to_dict(),\n 'Top-k Mean Daily Return (Short)_atc': top_k_metrics['Mean Daily Return (Short)_atc'].to_dict(),\n 'Top-k Mean Daily Return (Long)_atc': top_k_metrics['Mean Daily Return (Long)_atc'].to_dict(),\n\n 'Model Configs': model.get_params(),\n 'Total Epochs': total_epochs,\n\n 'Return Series': top_10_excess_return_series['daily_excess_return'].to_dict(),\n 'Prediction Error': top_10_error_series\n }\n\n # JOB: Write to logs\n write_to_logs(data_record)\n\n print('Done testing on unseen data.')\n timer.stop()\n\n return top_10_error_series",
"def walkforward_validation(data, test_start_date, test_end_date=None, step_size=15, testsize=15, model='SARIMA'):\n test_start_date = pd.to_datetime(test_start_date)\n current_max_date = test_start_date\n\n modelling_results = pd.DataFrame(columns=['series_name', 'model_type', 'test_start', 'test_end', 'MAE', 'MAPE', 'RMSE'])\n\n if test_end_date is None:\n test_end_date = data.index.max()\n test_end_date = pd.to_datetime(test_end_date)\n else:\n test_end_date = pd.to_datetime(test_end_date)\n\n while current_max_date < test_end_date:\n data.index = pd.to_datetime(data.index)\n iter_data = data[data.index <= current_max_date + timedelta(days=testsize)]\n test, train = test_train_spl(iter_data, testsize=testsize)\n\n if (model.upper() == 'SARIMA') | (model.upper() == 'SARIMAX'):\n print('USING SARIMA MODEL')\n mae, rmse, mape, name, preds, conf_intervals = mod_sarima(train=train, test=test, **arima_model_params)\n elif model.upper() == 'PROPHET':\n print('USING PROPHET MODEL')\n mae, rmse, mape, name, preds, conf_intervals = mod_prophet(train=train, test=test, **prophet_model_params)\n else:\n print('model name not known')\n iter_results = pd.DataFrame({'series_name': name, 'model_type': model, 'test_start': [current_max_date],\n 'test_end': [current_max_date + timedelta(testsize)], 'MAE': [mae], 'MAPE': [mape], 'RMSE': [rmse]})\n modelling_results = modelling_results.append(iter_results, ignore_index=True)\n\n # this line is just for validation of the effect of regressors in the forecast\n preds.to_csv(mod_report_path + arima_model_params['name'] + 'forecast_' + str(current_max_date).replace(':', '')+ '.csv')\n\n current_max_date = current_max_date + timedelta(days=step_size)\n\n return modelling_results",
"def date_prediction(config):\n if config['functionality'] == 'best_flights':\n departure_flight_date = date(config['departure_flight']['departure_date'][0],\n config['departure_flight']['departure_date'][1],\n config['departure_flight']['departure_date'][2])\n return_flight_date = date(config['return_flight']['departure_date'][0],\n config['return_flight']['departure_date'][1],\n config['return_flight']['departure_date'][2])\n div = config['prediction_period_days'] / 7\n dates_search = []\n for x in range(0, div + 1):\n dates_search.append(\n [(departure_flight_date + datetime.timedelta(days=x * 7)),\n (return_flight_date + datetime.timedelta(days=x * 7))])\n for i in dates_search:\n i[0] = str(i[0])\n year, month, day = i[0].split(\"-\")\n i[0] = \"%s/%s/%s\" % (day, month, year)\n i[1] = str(i[1])\n year, month, day = i[1].split(\"-\")\n i[1] = \"%s/%s/%s\" % (day, month, year)\n return dates_search\n elif config['functionality'] == 'flight_trends':\n departure_flight_date = date(\n config['departure_flight']['departure_date'][0],\n config['departure_flight']['departure_date'][1],\n config['departure_flight']['departure_date'][2])\n return_flight_date = date(config['return_flight']['departure_date'][0],\n config['return_flight']['departure_date'][1],\n config['return_flight']['departure_date'][2])\n dates_search = []\n for x in range(0, config['prediction_period_days']):\n dates_search.append(\n [(departure_flight_date + datetime.timedelta(days=x)),\n (return_flight_date + datetime.timedelta(days=x))])\n for i in dates_search:\n i[0] = str(i[0])\n year, month, day = i[0].split(\"-\")\n i[0] = \"%s/%s/%s\" % (day, month, year)\n i[1] = str(i[1])\n year, month, day = i[1].split(\"-\")\n i[1] = \"%s/%s/%s\" % (day, month, year)\n return dates_search",
"def perform_backtests(self):\r\n \r\n for test_name in self.testing_dates:\r\n print('\\t|--Test #{}'.format(test_name))\r\n test_dates = self.testing_dates[test_name]\r\n print('\\t\\t|--Performing Nested Cross-Validation')\r\n cross_validation = CrossValidate()\r\n cross_validation.output_names = self.output_names\r\n cross_validation.feature_names = self.feature_names\r\n cross_validation.feature_dict = self.feature_dict\r\n cross_validation.full_df = self.final_df_output\r\n cross_validation.cv_params = self.testing_dates\r\n cross_validation.test_name = test_name\r\n cross_validation.walk_forward_cv()\r\n self.optimal_params['Test #{}'.format(test_name)] = cross_validation.optimal_params_by_output\r\n self.cv_model_metadata['Test #{}'.format(test_name)] = cross_validation.cv_metadata_by_output\r\n \r\n print('\\t\\t|--Performing Out-Of-Sample Testing')\r\n prediction = Predict()\r\n prediction.output_names = self.output_names\r\n prediction.feature_names = self.feature_names\r\n prediction.feature_dict = self.feature_dict\r\n prediction.optimal_params_by_output = cross_validation.optimal_params_by_output\r\n prediction.cv_predictions_by_output = cross_validation.cv_predictions_by_output\r\n prediction.full_df = self.final_df_output\r\n prediction.pred_start = test_dates['pred_start']\r\n prediction.pred_end = test_dates['pred_end']\r\n prediction.run_prediction()\r\n self.full_predictions['Test #{}'.format(test_name)] = prediction.predictions_by_output\r\n self.pred_model_metadata['Test #{}'.format(test_name)] = prediction.pred_metadata_by_output\r\n \r\n print('\\nSaving model metadata...')\r\n with open(path.deployment_cv_results, 'w') as file:\r\n json.dump(self.optimal_params, file)\r\n with open(path.deployment_cv_metadata, 'w') as file:\r\n json.dump(self.cv_model_metadata, file)\r\n with open(path.deployment_pred_model_metadata, 'w') as file:\r\n json.dump(self.pred_model_metadata, file)\r\n with open(path.deployment_full_predictions, 'w') as file:\r\n json.dump(self.full_predictions, file)",
"def prepare_scale_train_valid_test(\n data: Union[pd.DataFrame, pd.Series],\n n_input_days: int,\n n_predict_days: int,\n test_size: float,\n s_end_date: str,\n no_shuffle: bool,\n):\n\n # Pre-process data\n if PREPROCESSER == \"standardization\":\n scaler = StandardScaler()\n\n elif PREPROCESSER == \"minmax\":\n scaler = MinMaxScaler()\n\n elif PREPROCESSER == \"normalization\":\n scaler = Normalizer()\n\n elif (PREPROCESSER == \"none\") or (PREPROCESSER is None):\n scaler = None\n # Test data is used for forecasting. Takes the last n_input_days data points.\n # These points are not fed into training\n\n if s_end_date:\n data = data[data.index <= s_end_date]\n if n_input_days + n_predict_days > data.shape[0]:\n print(\"Cannot train enough input days to predict with loaded dataframe\\n\")\n return (\n None,\n None,\n None,\n None,\n None,\n None,\n None,\n None,\n None,\n None,\n None,\n True,\n )\n\n test_data = data.iloc[-n_input_days:]\n train_data = data.iloc[:-n_input_days]\n\n dates = data.index\n dates_test = test_data.index\n if scaler:\n train_data = scaler.fit_transform(data.values.reshape(-1, 1))\n test_data = scaler.transform(test_data.values.reshape(-1, 1))\n else:\n train_data = data.values.reshape(-1, 1)\n test_data = test_data.values.reshape(-1, 1)\n\n prices = train_data\n\n input_dates = []\n input_prices = []\n next_n_day_prices = []\n next_n_day_dates = []\n\n for idx in range(len(prices) - n_input_days - n_predict_days):\n input_prices.append(prices[idx : idx + n_input_days])\n input_dates.append(dates[idx : idx + n_input_days])\n next_n_day_prices.append(\n prices[idx + n_input_days : idx + n_input_days + n_predict_days]\n )\n next_n_day_dates.append(\n dates[idx + n_input_days : idx + n_input_days + n_predict_days]\n )\n\n input_dates = np.asarray(input_dates)\n input_prices = np.array(input_prices)\n next_n_day_prices = np.array(next_n_day_prices)\n next_n_day_dates = np.asarray(next_n_day_dates)\n\n (\n X_train,\n X_valid,\n y_train,\n y_valid,\n X_dates_train,\n X_dates_valid,\n y_dates_train,\n y_dates_valid,\n ) = train_test_split(\n input_prices,\n next_n_day_prices,\n input_dates,\n next_n_day_dates,\n test_size=test_size,\n shuffle=no_shuffle,\n )\n return (\n X_train,\n X_valid,\n y_train,\n y_valid,\n X_dates_train,\n X_dates_valid,\n y_dates_train,\n y_dates_valid,\n test_data,\n dates_test,\n scaler,\n False,\n )",
"def test_ehr_submission_data_cutoff(self, mock_get_affected_tables):\n # mocks the return value of get_affected_tables as we only want to loop through the\n # visit_occurrence not all of the CDM tables\n mock_get_affected_tables.return_value = [common.VISIT_OCCURRENCE]\n\n queries = []\n visit_occurrence_tmpl = self.jinja_env.from_string(\"\"\"\n INSERT INTO `{{fq_dataset_name}}.{{cdm_table}}`\n (visit_occurrence_id, person_id, visit_concept_id, visit_start_date, \n visit_start_datetime, visit_end_date, visit_end_datetime, visit_type_concept_id)\n VALUES\n (111, 222, 3, date('2018-03-06'), timestamp('2018-03-06 11:00:00'), \n date('2018-03-07'), timestamp('2018-03-07 11:00:00'), 4),\n (222, 333, 3, date('2019-03-06'), timestamp('2019-03-06 11:00:00'), \n date('2019-03-07'), timestamp('2019-03-07 11:00:00'), 4),\n (333, 444, 3, date('2020-03-06'), timestamp('2020-03-06 11:00:00'), \n date('2020-03-07'), timestamp('2020-03-07 11:00:00'), 4),\n (444, 555, 3, date('2021-03-06'), timestamp('2021-03-06 11:00:00'), \n date('2021-03-07'), timestamp('2021-03-07 11:00:00'), 4),\n (555, 666, 3, date('2022-03-06'), timestamp('2022-03-06 11:00:00'), \n date('2022-03-07'), timestamp('2022-03-07 11:00:00'), 4)\n \"\"\").render(fq_dataset_name=self.fq_dataset_name,\n cdm_table=common.VISIT_OCCURRENCE)\n queries.append(visit_occurrence_tmpl)\n\n self.load_test_data(queries)\n\n table_and_counts = [{\n 'fq_table_name':\n '.'.join([self.fq_dataset_name, 'visit_occurrence']),\n 'fq_sandbox_table_name':\n f'{self.fq_sandbox_name}.{self.rule_instance.sandbox_table_for(common.VISIT_OCCURRENCE)}',\n 'loaded_ids': [111, 222, 333, 444, 555],\n 'sandboxed_ids': [444, 555],\n 'fields': [\n 'visit_occurrence_id', 'person_id', 'visit_concept_id',\n 'visit_start_date', 'visit_start_datetime', 'visit_end_date',\n 'visit_end_datetime', 'visit_type_concept_id'\n ],\n 'cleaned_values': [\n (111, 222, 3, parse('2018-03-06').date(),\n parse('2018-03-06 11:00:00 UTC'), parse('2018-03-07').date(),\n parse('2018-03-07 11:00:00 UTC'), 4),\n (222, 333, 3, parse('2019-03-06').date(),\n parse('2019-03-06 11:00:00 UTC'), parse('2019-03-07').date(),\n parse('2019-03-07 11:00:00 UTC'), 4),\n (333, 444, 3, parse('2020-03-06').date(),\n parse('2020-03-06 11:00:00 UTC'), parse('2020-03-07').date(),\n parse('2020-03-07 11:00:00 UTC'), 4)\n ]\n }]\n\n self.default_test(table_and_counts)",
"def validate(args, limit_to=None):\n validation_warnings = validation.validate(\n args, MODEL_SPEC['args'])\n\n sufficient_keys = validation.get_sufficient_keys(args)\n invalid_keys = validation.get_invalid_keys(validation_warnings)\n\n if (\"landcover_snapshot_csv\" not in invalid_keys and\n \"landcover_snapshot_csv\" in sufficient_keys):\n snapshots = utils.read_csv_to_dataframe(\n args['landcover_snapshot_csv'],\n MODEL_SPEC['args']['landcover_snapshot_csv']\n )['raster_path'].to_dict()\n\n for snapshot_year, snapshot_raster_path in snapshots.items():\n raster_error_message = validation.check_raster(\n snapshot_raster_path)\n if raster_error_message:\n validation_warnings.append((\n ['landcover_snapshot_csv'],\n INVALID_SNAPSHOT_RASTER_MSG.format(\n snapshot_year=snapshot_year\n ) + ' ' + raster_error_message))\n\n if (\"analysis_year\" not in invalid_keys\n and \"analysis_year\" in sufficient_keys):\n if max(set(snapshots.keys())) > int(args['analysis_year']):\n validation_warnings.append((\n ['analysis_year'],\n INVALID_ANALYSIS_YEAR_MSG.format(\n analysis_year=args['analysis_year'],\n latest_year=max(snapshots.keys()))))\n\n # check for invalid options in the translation table\n if (\"landcover_transitions_table\" not in invalid_keys and\n \"landcover_transitions_table\" in sufficient_keys):\n transitions_spec = MODEL_SPEC['args']['landcover_transitions_table']\n transition_options = list(\n transitions_spec['columns']['[LULC CODE]']['options'].keys())\n # lowercase options since utils call will lowercase table values\n transition_options = [x.lower() for x in transition_options]\n transitions_df = utils.read_csv_to_dataframe(\n args['landcover_transitions_table'], transitions_spec)\n transitions_mask = ~transitions_df.isin(transition_options) & ~transitions_df.isna()\n if transitions_mask.any(axis=None):\n transition_numpy_mask = transitions_mask.values\n transition_numpy_values = transitions_df.to_numpy()\n bad_transition_values = list(\n numpy.unique(transition_numpy_values[transition_numpy_mask]))\n validation_warnings.append((\n ['landcover_transitions_table'],\n INVALID_TRANSITION_VALUES_MSG.format(\n model_transitions=(transition_options),\n transition_values=bad_transition_values)))\n\n return validation_warnings",
"def run(input_dir, output_dir, team_name=\"OrganizersTeam\", predict_window=12):\n\n models_dir = os.path.join(output_dir, \"models\")\n plots_dir = os.path.join(output_dir, \"plots\")\n sub_dir = os.path.join(output_dir, \"submissions\")\n submission_file_name_fmt = \"{}_{}.csv\"\n\n make_directory_tree([\"models\", \"plots\", \"submissions\"], output_dir)\n\n datasets = get_datasets(input_dir)\n\n print(\"Will train a total of {} models\".format(len(datasets) * 3))\n\n # create a scores table to keep MAE for each location:model pair\n scores = pd.DataFrame(columns=[\"Location\", \"Model\", \"MAE\"])\n\n for dataset in datasets:\n # load the dataset\n df = read_csv_series(os.path.join(input_dir, dataset))\n loc = dataset.split(\".\")[0]\n\n # shift PM10 for `predict_window` hours ahead\n df[\"PM10\"] = df[\"PM10\"].shift(-predict_window)\n\n # split dataset into train, test and evaluation by dates\n # additionally, leave the last 48 hours for final evaluation\n train_len = int(len(df) * 0.65) - (2 * predict_window)\n test_len = int(len(df) * 0.25) - (2 * predict_window)\n eval_len = len(df) - train_len - test_len - (2 * predict_window)\n train, test, evaluation = df[:train_len], df[train_len:train_len +\n test_len], df[train_len+test_len:train_len+test_len+eval_len]\n final_eval = df[-(2 * predict_window):-predict_window].copy()\n\n # initialize models\n models = [\n (\"Linear Regression\", LinearRegression()),\n (\"Extra Trees Regressor\", ExtraTreesRegressor(n_estimators=100, n_jobs=4, min_samples_split=25,\n min_samples_leaf=35, random_state=0)),\n (\"XGBoost Trees Regression\", xgb.XGBRegressor(objective=\"reg:linear\", random_state=0))\n ]\n\n mae_min = 1e10\n yhat_sub = []\n\n for model in models:\n # get predictions and MAE\n yhat, mae = train_and_evaluate(\"{} - {}\".format(loc,model[0]),model[1],train,test,evaluation,final_eval, output_dir)\n\n # save the score (MAE) for the model\n scores = scores.append(\n {\"Location\": loc, \"Model\": model[0], \"MAE\": mae}, ignore_index=True)\n\n # save the better predictions to `yhat_sub`\n if mae < mae_min:\n mae_min = mae\n yhat_sub = yhat\n\n sub_df = pd.DataFrame(yhat_sub, columns=[\"PM10\"])\n sub_df.to_csv(os.path.join(sub_dir, submission_file_name_fmt.format(team_name, loc)))\n\n scores.to_csv(os.path.join(output_dir, \"scores.csv\"))\n\n print(\"Done\")\n print(\"Saved models can be found at {}\".format(models_dir))\n print(\"Plots can be found at {}\".format(plots_dir))\n print(\"Submissions can be found at {}\".format(sub_dir))\n\n return scores",
"def time_split_dataset(df, train_start_date, train_end_date, holdout_end_date, date_col):\n\n train_set = df.copy()[\n (df[date_col] >= train_start_date) & (df[date_col] <= train_end_date)]\n\n test_set = df.copy()[\n (df[date_col] > train_end_date) & (df[date_col] <= holdout_end_date)]\n\n return train_set, test_set",
"def build_shape_data(self, start=None, end=None):\n # If start and end are None, then set them to be min/max of self.df_demand\n if start is None:\n start = self.df_demand['date'].min()\n if end is None:\n end = self.df_demand['date'].max()\n print(f\"date range for shape data is from {start} to {end}\")\n # Extract part of df_demand that is within start and end\n df_sub = self.df_demand[(self.df_demand['date'] >= start) & (self.df_demand['date'] <= end)]\n assert df_sub['date'].min() >= start\n assert df_sub['date'].max() <= end\n num_days = len(pd.date_range(iso8601.parse_date(start), iso8601.parse_date(end), freq='d'))\n print(f\"number of days is {num_days}\")\n # When finding variance and mean, add in missing days as 0s\n # Obtain the counts for each lat/lng region\n counts = df_sub.groupby(['left_lng', 'right_lng', 'lower_lat', 'upper_lat']).size().reset_index(name='counts')\n # Group demand data by lat/lng region and average across other cols\n df = df_sub.groupby(['left_lng', 'right_lng', 'lower_lat', 'upper_lat'])[['avail_count', 'avail_mins', 'trips', 'prob_scooter_avail', 'adj_trips']].mean().reset_index()\n df = df.merge(counts, on=['left_lng', 'right_lng', 'lower_lat', 'upper_lat'])\n # print(df.head())\n # Modify averages by multiplying each by count and divide by num_days\n vars = ['avail_count', 'avail_mins', 'trips', 'prob_scooter_avail', 'adj_trips']\n for var in vars:\n df[var] = df[var]*df['counts']/num_days\n # print(df.head())\n # Calculate the variance for prob_scooter_avail\n probVariance = df_sub.groupby(['left_lng', 'right_lng', 'lower_lat', 'upper_lat']).apply(lambda x: ((x['prob_scooter_avail'] - (x['prob_scooter_avail'].sum()/num_days))**2).sum()/(num_days-1)).reset_index(name='prob_scooter_avail')\n # print(probVariance.head())\n df['prob_scooter_avail_var'] = probVariance['prob_scooter_avail']\n # Check to see if there are any Nan values\n print(f\"Nan values in df? {df.isnull().values.any()}\")\n # print(df.head())\n # For each var col, create corresponding color columns (log and unlog)\n # Also create the factors list that get passed into self.create_rectangle_lst\n factors = [('avail_count', 'decimal'), ('avail_mins', 'decimal'),\n ('trips', 'decimal'), ('prob_scooter_avail', 'percent'), ('adj_trips', 'decimal')]\n i = 0\n original_len = len(factors)\n while i < original_len:\n name, type = factors[i]\n # print(f\"name={name}, type={type}\")\n # Create color column\n df = self.map_values_to_color(df, name)\n # If type is not percent than create log version\n if type != 'percent':\n df = self.create_log_column(df, name)\n factors.append(('log_'+name, type))\n i += 1\n # Deal with estimated demand and unmet demand\n # Filter out rows where prob_scooter_avail sig diff from 0\n sigDiffIdx = df.apply(lambda x: utils.sig_diff_from_zero(x['prob_scooter_avail'], x['prob_scooter_avail_var']), axis=1)\n # print(sigDiffIdx.head())\n df_sig_diff = df[sigDiffIdx]\n # Calculate estimated demand and unmet demand\n df_sig_diff = self.calculate_demand(df_sig_diff)\n # print(df_sig_diff.head())\n # Create color column and log column for unmet demand\n df_sig_diff = self.map_values_to_color(df_sig_diff, 'unmet_demand')\n df_sig_diff = self.map_values_to_color(df_sig_diff, 'estimated_demand')\n df_sig_diff = self.create_log_column(df_sig_diff, 'unmet_demand')\n factors.extend([('estimated_demand', 'decimal'), ('unmet_demand', 'decimal'), ('log_unmet_demand', 'decimal')])\n # Fill in the colors for the grid cells that aren't significantly different\n df_not_sig_diff = df[~sigDiffIdx]\n # print(df_not_sig_diff.head())\n df = pd.concat([df_sig_diff, df_not_sig_diff])\n # df.to_csv('../../../data_files/20210427_estimatedDemand.csv', index=False)\n # Create Rectangle information\n rectangles = self.create_rectangle_lst(df, factors)\n return rectangles, start, end",
"def groups_of_train_test_set(df, config, fbprophet=None):\n \n tvar = config.variables['tvar']\n xvar = config.variables['xvar'] + config.variables['xvar_derived']\n \n begin_date = config.timestamps['begin_date']\n end_date = config.timestamps['end_date']\n deltat = config.timestamps['deltat']\n\n begin_date = datetime.datetime.strptime(begin_date, \"%Y-%m-%d %H:%M:%S\")\n end_date = datetime.datetime.strptime(end_date, \"%Y-%m-%d %H:%M:%S\")\n\n \n # Check if ustar threshold is provided for year of interest\n if config.data['ustar']==True:\n if not begin_date.year in config.data['ustar_map'].keys():\n raise ValueError('{} is missing from config/data/ustar_map'.format(begin_date.year))\n if not end_date.year in config.data['ustar_map'].keys():\n raise ValueError('{} is missing from config/data/ustar_map'.format(end_date.year))\n\n\n if (end_date - begin_date).days < deltat:\n raise ValueError(\"Time difference in days between begin and end date\" + \n \"must be greater than deltat.\")\n\n\n df = df.loc[df[tvar] <= end_date]\n\n number_of_train_test_sets = int((end_date - begin_date).total_seconds()/\\\n datetime.timedelta(deltat).total_seconds())\n\n begin_test_timestamp = begin_date\n \n\n test_df = []\n train_df = [] \n for i in range(number_of_train_test_sets):\n if i == number_of_train_test_sets-1:\n end_test_timestamp = end_date\n else:\n end_test_timestamp = None\n\n\n i_test_set, i_train_set, end_test_timestamp =\\\n _train_test_split(df.copy(), config,\n begin_test_timestamp,\n deltat, end_test_timestamp,\n fbprophet)\n begin_test_timestamp = end_test_timestamp\n \n # Interpolating where x-var is nan.\n i_test_set[xvar] = i_test_set[xvar].interpolate()\n i_train_set[xvar] = i_train_set[xvar].interpolate()\n \n\n i_test_set['Set_rank'] = i\n i_train_set['Set_rank'] = i\n \n if i == 0:\n test_df = i_test_set\n train_df = i_train_set\n else:\n test_df = pd.concat((test_df, i_test_set))\n train_df = pd.concat((train_df, i_train_set))\n\n\n return test_df, train_df",
"def test_aggr_date_input(self):\n\n actual_start_date = set([])\n actual_end_date = set([])\n for year in self.years:\n for my_date in self.dates:\n input_date = date(year, my_date[0], my_date[1])\n retail_date = RetailDate(input_date)\n actual_start_date.add(retail_date.year_start_date)\n actual_end_date.add(retail_date.year_end_date)\n\n # Verify the retail start dates\n expected_start = set([date(mTup[0], mTup[1], mTup[2]) for mTup in self.retail_start_dates])\n diff = expected_start.symmetric_difference(actual_start_date)\n self.assertEqual(len(diff), 0, \"Diff: \" + str(diff))\n\n # Verify the retail end dates\n expected_end = set([date(mTup[0], mTup[1], mTup[2]) for mTup in self.retail_end_dates])\n diff = expected_end.symmetric_difference(actual_end_date)\n self.assertEqual(len(diff), 0, \"Diff: \" + str(diff))",
"def same_extremes(df, train, test):\n\n # Merge together the data\n lookup = df.join(train.append(test)[[\"group_1\", \"date_act\"]])\n\n # Caluculate the easy ones\n grp = pd.DataFrame()\n grp[\"count\"] = lookup.groupby([\"group_1\", \"date_act\"])[\"outcome\"].count()\n grp[\"min\"] = lookup.groupby([\"group_1\", \"date_act\"])[\"outcome\"].min()\n grp[\"max\"] = lookup.groupby([\"group_1\", \"date_act\"])[\"outcome\"].max()\n grp = grp[grp[\"count\"] > 1]\n grp[\"value\"] = None\n grp.loc[(grp[\"max\"] < 0.5), \"value\"] = grp[\"min\"]\n grp.loc[(grp[\"min\"] > 0.5), \"value\"] = grp[\"max\"]\n\n # Do the remaining ones by loop\n for index, row in grp[grp[\"value\"].isnull()].iterrows():\n if 0.5-row[\"min\"] > row[\"max\"]-0.5:\n grp.loc[index, \"value\"] = row[\"min\"]\n else:\n grp.loc[index, \"value\"] = row[\"max\"]\n\n # Merge to lookup for indexing and filling\n lookup = lookup.reset_index().merge(grp[[\"value\"]].reset_index(), how=\"left\", on=[\"group_1\", \"date_act\"]\n ).set_index(\"activity_id\")\n lookup[\"value\"] = lookup[\"value\"].fillna(lookup[\"outcome\"])\n\n df[\"outcome\"] = lookup[\"outcome\"]\n\n return df",
"def get_pred_score(file_name, kpi, start_train, end_train, periods=365):\n # set names for columns in order\n names = ['date', 'handle_time', 'handle_time_forecast',\n 'volume', 'volume_forecast']\n df = pd.read_csv('./data/'+file_name, names=names, header=1, index_col=0)\n\n df['date'] = pd.to_datetime(df['date'])\n df.index = df['date']\n\n # create column for aht\n df['aht'] = df['handle_time']/df['volume']\n df['aht_forecast'] = df['handle_time_forecast']/df['volume_forecast']\n\n # df = df[(df['aht'] > 200) & (df['aht'] < 1500)]\n\n df1 = df.copy()\n\n # create business unit name\n bu = file_name[:6]\n\n # # remove outliers if it helps the model\n # df = remove_outliers(df, bu)\n\n # remove holidays\n df = df[~df.index.isin(h.iloc[:,0].tolist())]\n\n # remove weekends\n df = df[~df.index.weekday.isin([5,6])]\n\n # # plot data in a notebook\n # plot data\n # plot_time_vol(df)\n\n # create training\n df = create_training_data(df, kpi, start_train, end_train)\n\n \n # display(df.head())\n m,future = create_forecast(df, periods=int(periods))\n forecast = m.predict(future)\n\n # # plot forecast in a notebook\n # plot_forecast(forecast)\n\n # set forecast beginning and end date\n f = forecast.copy()\n end_test = df1.index.max()\n f = f[(forecast['ds'] > end_train) & (f['ds'] <= end_test)]\n\n # create validation dataset\n df2 = df1.copy()\n df2['ds'] = pd.to_datetime(df2.index.date) \n\n # set start date of validation data equal to June 1st, 2020 or any other date\n df2 = df2[df2['ds'] > end_train]\n\n # remove weekends and holidays from data\n df2 = df2[~df2.index.isin(h.iloc[:,0].tolist())]\n df2 = df2[~df2.index.weekday.isin([5,6])]\n os.makedirs('./preds/'+kpi, exist_ok=True)\n\n future_forecast=forecast[['ds', 'yhat_lower', 'yhat', 'yhat_upper']][forecast['ds']>datetime.datetime.now()]\n\n f[['ds', 'yhat_lower', 'yhat', 'yhat_upper']].to_csv('./preds/'+kpi+'/'+bu+'.csv')\n forecast.to_csv('./preds/analysis/'+kpi+'_'+bu+'.csv')\n\n # Validate test data, it must match for scoring\n mae = evaluate_model(f,df2, kpi, metric='mae')\n\n mae.update({'kpi': kpi, 'start_train':start_train, 'end_train': end_train, 'end_test': end_test}) \n\n curr = pd.read_csv('scores/'+kpi+'_score.csv',index_col=0)\n new = pd.DataFrame(mae, index=[bu])\n\n new.to_csv('./scores/current/'+bu+'_'+kpi+'.csv')\n \n if bu not in curr.index:\n curr = pd.concat([curr, new], 0)\n # create log everytime a score is superceded\n elif new.loc[bu,'prophet'] < curr.loc[bu,'prophet']:\n data = pd.concat([curr.loc[[bu],:],new.loc[[bu],:]],0)\n data.to_csv('./scores/logs/'+bu+'_'+kpi+'_'+datetime.datetime.now()\n .strftime(\"%b %d %Y %H:%M:%S\").replace(' ', '_')+'.csv')\n curr.update(new)\n curr.to_csv('./scores/'+kpi+'_score.csv')\n\n return 'error with metric'\n print('\\ntraining',file_name, 'on', kpi, 'start train:',start_train, \n 'end train:', end_train, 'end_test:', )",
"def evaluate_mapped_inputs(self,retrofit_start_date,retrofit_completion_date,**kwargs):\n\n pre_kwargs = {\n \"is_pre\":True,\n \"is_post\":False,\n \"retrofit_start_date\": retrofit_start_date,\n \"retrofit_completion_date\": retrofit_completion_date\n }\n post_kwargs = {\n \"is_pre\": False,\n \"is_post\": True,\n \"retrofit_start_date\": retrofit_start_date,\n \"retrofit_completion_date\": retrofit_completion_date\n }\n split_kwargs = {}\n for k,v in kwargs.items():\n if k in self.splittable_args:\n pre_kwargs[k] = v.before(retrofit_start_date)\n post_kwargs[k] = v.after(retrofit_completion_date)\n split_kwargs[k + \"_pre\"] = pre_kwargs[k]\n split_kwargs[k + \"_post\"] = post_kwargs[k]\n else:\n pre_kwargs[k] = v\n post_kwargs[k] = v\n pre_results = self.pre_meter.evaluate(**pre_kwargs)\n post_results = self.post_meter.evaluate(**post_kwargs)\n pre_results = {k + \"_pre\":v for k,v in pre_results.items()}\n post_results = {k + \"_post\":v for k,v in post_results.items()}\n results = {k:v for k,v in chain(pre_results.items(),\n post_results.items(),\n split_kwargs.items())}\n return results",
"def create_date_data(gt_id,\n target_horizon,\n experiment,\n date_features=[\"mei\", \"mjo\",\n \"pca_sst_2010\", \"pca_icec_2010\",\n \"pca_wind_hgt_10_2010\",\n \"pca_wind_hgt_100_2010\",\n \"pca_wind_hgt_500_2010\",\n \"pca_wind_hgt_850_2010\"]):\n\n time_start = time.time()\n\n # --------\n # Prepare experiment cache directory and saved file names\n # --------\n\n # Name of cache directory for storing non-submission-date specific\n # intermediate files\n cache_dir = os.path.join('results', experiment, 'shared',\n '{}_{}'.format(gt_id, target_horizon))\n # e.g., cache_dir = 'results/regression/shared/contest_precip_34w'\n\n # if cache_dir doesn't exist, create it\n if not os.path.isdir(cache_dir):\n os.makedirs(cache_dir)\n\n # Filenames for data file to be stored in cache_dir\n date_data_file = os.path.join(\n cache_dir, \"date_data-{}_{}.h5\".format(gt_id, target_horizon))\n\n # --------\n # Creates date_data dataframe.\n # --------\n # Get number of days between start date of observation period used for prediction\n # (2 weeks behind) and start date of target period (2 or 4 weeks ahead)\n start_deltas = [get_start_delta(target_horizon, gt_id)\n for gt_id in date_features]\n\n # Load masked date features\n print \"Loading date features\"\n date_data = get_date_features(gt_ids=date_features, gt_shifts=start_deltas,\n first_year=get_first_year(gt_id))\n\n print \"Loading additional date features\"\n t = time.time()\n if 'mjo' in date_features:\n # Add cosine and sine transforms of MJO phase\n mjo_phase_name = 'phase_shift'+str(get_start_delta(target_horizon, 'mjo'))\n date_data['cos_'+mjo_phase_name] = np.cos((2*np.pi*date_data[mjo_phase_name])/8)\n date_data['sin_'+mjo_phase_name] = np.sin((2*np.pi*date_data[mjo_phase_name])/8)\n print \"Elapsed: {}s\".format(time.time() - t)\n # Save date features to disk\n print \"Saving date features to \"+date_data_file\n t = time.time()\n date_data.to_hdf(date_data_file, key=\"data\", mode=\"w\")\n subprocess.call(\"chmod a+w \"+date_data_file, shell=True)\n print \"Elapsed: {}s\".format(time.time() - t)\n\n print \"Finished generating date_data matrix.\"\n print \"Total time elapsed: {}s\".format(time.time()-time_start)\n return list(date_data)",
"def predictAndSubmit(train, features, predCols):\n realTest = pd.read_csv('Data\\\\test.csv')\n realTest['Id'] = (realTest['Store'].map(str) + '_' +\n realTest['Dept'].map(str) + '_' +\n realTest['Date'].map(str))\n\n realTest = extractFeatures(realTest, features)\n realTestX = realTest[predCols]\n\n pipe = Pipeline([('scal', StandardScaler()),\n ('clf', xgb.XGBRegressor(learning_rate=0.07, max_depth=6,\n n_estimators=100))])\n trainX = train[predCols]\n trainY = train['Weekly_Sales']\n pipe.fit(trainX, trainY)\n prediction = pipe.predict(realTestX)\n realTest['Weekly_Sales'] = prediction\n realTest[['Id', 'Weekly_Sales']].to_csv('Output\\\\XGBSubmission.csv',\n index=False)\n\n pipe = Pipeline([('scal', StandardScaler()),\n ('clf', SGDRegressor())])\n pipe.fit(trainX, trainY)\n prediction = pipe.predict(realTestX)\n realTest['Weekly_Sales'] = prediction\n realTest[['Id', 'Weekly_Sales']].to_csv('Output\\\\SGDSubmission.csv',\n index=False)\n\n testDates = pd.to_datetime(realTest.Date)\n testDates = pd.DatetimeIndex(testDates.unique())\n storeDepts = extractStoreDeptCombos(realTest)\n noNotFit = 0\n allPred = pd.DataFrame()\n for store, dept in storeDepts.itertuples(index=False):\n trainThis = train[train['Store'] == store]\n trainThis = trainThis[trainThis['Dept'] == dept]\n if len(trainThis.index) > 142:\n # Only fit if all dates available\n trainFB = pd.DataFrame()\n trainFB['ds'] = trainThis['Date'].astype(str)\n trainFB['y'] = trainThis['Weekly_Sales']\n m = Prophet()\n m.fit(trainFB)\n realTestFBx = pd.DataFrame()\n realTestFBx['ds'] = testDates\n prediction = m.predict(realTestFBx)\n predRows = pd.DataFrame({'Store': store, 'Dept': dept,\n 'Date': testDates, 'y': prediction.yhat})\n else:\n print(\"Not enough Data\")\n noNotFit += 1\n print(\"Store: {} Dept: {}\".format(store, dept))\n allPred = allPred.append(predRows, ignore_index=True)\n\n print(\"{} store-date combos not fit\".format(noNotFit))\n\n allPred.drop_duplicates(inplace=True)\n realSub = pd.merge(realTest[['Store', 'Date', 'Dept', 'Id']], allPred,\n on=['Store', 'Date', 'Dept'], how='left')\n\n # Fill all NaNs wit the total mean. This could be more advanced obviously,\n # but this will do for the time being\n realSub['y'].fillna((realSub['y'].mean()), inplace=True)\n realSub = realSub[['Id', 'y']]\n realSub.columns = ['Id', 'Weekly_Sales']\n realSub.to_csv('Output\\\\FBProphetSubmission.csv', index=False)\n\n # XGBScore = 7972.37008\n # FBProphet score = 5357.68674\n\n FBSub = pd.read_csv('Output\\\\FBProphetSubmission.csv')\n XGSub = pd.read_csv('Output\\\\XGBSubmission.csv')\n ensembleSub = pd.DataFrame((FBSub['Weekly_Sales']*3/4\n + XGSub['Weekly_Sales']*1/4))\n ensembleSub['Id'] = FBSub['Id']\n ensembleSub[['Id', 'Weekly_Sales']].to_csv(\n 'Output\\\\EnsembleSubmission.csv', index=False)",
"def train_test_split(dataframe, start_date, window_days=100, train_percent=80.,\n return_arrays=False):\n data = dataframe.copy()\n if window_days*24 > data.values.shape[0]:\n raise ValueError('Variable window_days has too large value: {}*24h = {} > {}, which is more than there is data!'.format(window_days, window_days*24, \n data.values.shape[0]))\n \n # Split dataframe into X, y\n columns = data.columns.values\n outputs = [col_name for col_name in columns if 'Load+' in col_name]\n inputs = [col_name for col_name in columns if col_name not in outputs]\n # inputs (features)\n X = data[inputs]\n # outputs\n y = data[outputs]\n \n # Training period\n train_percent = train_percent/100.\n st = pd.to_datetime(start_date) # start date\n et = st + dt.timedelta(days=int(train_percent*window_days)) # end date\n X_train = X.loc[st:et]\n y_train = y.loc[st:et]\n \n # Testing / Validation period\n sv = et \n ev = sv + dt.timedelta(days=int((1-train_percent)*window_days)+1)\n X_test = X.loc[sv:ev]\n y_test = y.loc[sv:ev]\n \n if return_arrays:\n # Returning numpy arrays\n return X_train.values, y_train.values, X_test.values, y_test.values\n else:\n # Returning pandas dataframes\n return X_train, y_train, X_test, y_test",
"def report_creation(data_all_df, pred_start):\n data_all_df[\"HD MM/GSD\"] = np.where(data_all_df.MM.str.contains(\"HD\"), \"HD MM\", \"GSD\")\n \n\n select_cols = [\"IDV_LEAD_MNTS_Pred\", \"IDV_HELPER_MNTS_Pred\",\"IDV_HELPER_OVR_Pred\",\n \"IDV_LD_UNCLAIMED_Pred\", \"IDV_HP_UNCLAIMED_Pred\", \"IDV_HPN_UNCLAIMED_Pred\",\n \"IDV_LD_DRIVE_Pred\", \"IDV_HP_DRIVE_Pred\", \"IDV_HPN_DRIVE_Pred\"]\n\n for col in select_cols:\n data_all_df[col.replace('MNTS','HRS')] = round(data_all_df[col]/60,1)\n \n \n data_all_df[\"Lead Hrs/Appnt\"] = data_all_df[\"IDV_LEAD_HRS_Pred\"]/data_all_df[\"IDV_APPT_ID_Pred\"]\n data_all_df[\"Helper Hrs/Appnt\"] = data_all_df[\"IDV_HELPER_HRS_Pred\"]/data_all_df[\"IDV_APPT_ID_Pred\"]\n \n res_cols = [\"MM\", \"HD MM/GSD\", \"FISC_WK_OF_MTH_ID\", \"RSS\",\n \"Prediction_Trf\", \"Lead Hrs/Appnt\",\n \"IDV_LEAD_HRS_Pred\", \"IDV_HELPER_HRS_Pred\", \"Helper Hrs/Appnt\", \n \"IDV_HELPER_OVR_Pred\",\n \"IDV_LD_UNCLAIMED_Pred\", \"IDV_HP_UNCLAIMED_Pred\", \"IDV_HPN_UNCLAIMED_Pred\",\n \"IDV_LD_DRIVE_Pred\", \"IDV_HP_DRIVE_Pred\", \"IDV_HPN_DRIVE_Pred\"]\n data_result = data_all_df[res_cols]\n data_result = data_result[data_result.FISC_WK_OF_MTH_ID >= pred_start]\n data_result.columns = [\"MM\", \"HD MM/GSD\", \"FISCAL_WK_ID\", \"RSS\",\n \"Order Lines\", \"Lead Hrs/Appnt\",\n \"Lead Hrs\", \"Helper Hrs\", \"Helper Hrs/Appnt\",\n \"Helper Hrs Float\",\n \"Unclaimed Lead\", \"Unclaimed Helper\", \"Unclaimed Helper Hrs Float\",\n \"Lead Drive Time\", \"Helper Drive Time\", \"Helper Hrs Float Drive Time\"]\n data_result['FISCAL_WK_ID'] = data_result['FISCAL_WK_ID'].astype(int)\n return data_result"
] | [
"0.6189123",
"0.5795984",
"0.5755057",
"0.56774807",
"0.5629217",
"0.55658317",
"0.553849",
"0.55241734",
"0.54923284",
"0.5492266",
"0.5489832",
"0.5391152",
"0.53785956",
"0.5373168",
"0.536862",
"0.53663355",
"0.53045744",
"0.5297892",
"0.5294176",
"0.5293914",
"0.5279209",
"0.5272605",
"0.5223681",
"0.52080774",
"0.52074164",
"0.51847166",
"0.5172151",
"0.51672125",
"0.5166995",
"0.5166479"
] | 0.74037975 | 0 |
Register a recipient in the DMPlatform | def register_recipient(self, id, gender, need_score, age_range,
ethnicity, home_loc):
r = Recipient(id)
r.set_gender(gender)
r.set_need_score(need_score)
r.set_age_range(age_range)
r.set_ethnicity(ethnicity)
r.set_home_location(home_loc)
self._recipients_list.append(r) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_recipient(self, recipient):\n\n self.recipients.append(recipient)",
"def recipient(self, recipient):\n\n self._recipient = recipient",
"def manage_addMailSender( self, id='MailHost', title='', host=None, port=None, REQUEST=None ):\n self._setObject( id, MailSender( id, title, host, port ) )\n\n if REQUEST is not None:\n REQUEST.RESPONSE.redirect( REQUEST.URL1 )",
"def add(self, transport, address=None):\r\n\r\n if not address:\r\n address = str(uuid.uuid1())\r\n\r\n if address in self.recipients:\r\n self.recipients[address].add(transport)\r\n else:\r\n self.recipients[address] = RecipientManager(transport, address)\r\n\r\n return address",
"def register_message():\n\n logger.info('Nos registramos')\n\n gr = register_agent(AgGestordeTransporte, DirectoryAgent, AgGestordeTransporte.uri, get_count())\n return gr",
"def send_msg(self, recipient, message):\n bus = SessionBus()\n purple = bus.get(\n \"im.pidgin.purple.PurpleService\",\n \"/im/pidgin/purple/PurpleObject\"\n )\n my_id = purple.PurpleAccountsGetAllActive()[0]\n conv = purple.PurpleConversationNew(1, my_id, recipient)\n conv_im = purple.PurpleConvIm(conv)\n purple.PurpleConvImSend(conv_im, message)",
"def register_message():\n\n logger.info('Nos registramos')\n\n gr = register_agent(AgenteAlojamientosExternoAmadeus, DirectoryAgent, AgenteAlojamientosExternoAmadeus.uri, get_count())\n return gr",
"def register(self, target, hostname, listener_type, expire=-1):",
"def register_message():\n\n logger.info('Nos registramos')\n\n gr = registerAgent(VendedorAgent, DirectoryAgent, VendedorAgent.uri, getMessageCount())\n return gr",
"def add_recipients(self, recipient):\n\n if isinstance(recipients, list):\n self.recipients.extend(recipients)\n else:\n self.recipients.append(recipients)",
"def register_msg(self, path, msgtype, msg):",
"def register_to_core(self):\n self.channel.basic_publish(exchange='', routing_key='peripheral_register', body=json.dumps({self.name: api}))",
"def add_person(room_id, person=None, isModerator='false'):\n\n url = 'https://api.ciscospark.com/v1/memberships'\n headers = {'Authorization': 'Bearer '+context.get('spark.CISCO_SPARK_PLUMBERY_BOT')}\n payload = {'roomId': room_id,\n 'personEmail': person,\n 'isModerator': isModerator }\n response = requests.post(url=url, headers=headers, data=payload)\n\n if response.status_code != 200:\n print(response.json())\n raise Exception(\"Received error code {}\".format(response.status_code))",
"def sendMessage(self, destinationUUID, message):\n with self.lock:\n packet = {}\n packet[destinationUUID] = message\n # print '\\tService.sendMessage():', message\n self.outbox.put(packet)",
"def install_recipient_instruction(self, token_type=\"Unrestricted\",\r\n transaction_id=None):\r\n response = self.install_payment_instruction(\"MyRole=='Recipient';\",\r\n token_type=token_type,\r\n transaction_id=transaction_id)\r\n body = response.read()\r\n if(response.status == 200):\r\n rs = ResultSet()\r\n h = handler.XmlHandler(rs, self)\r\n xml.sax.parseString(body, h)\r\n recipient_token = rs.TokenId\r\n try:\r\n boto.config.save_system_option(\"FPS\", \"recipient_token\",\r\n recipient_token)\r\n except(IOError):\r\n boto.config.save_user_option(\"FPS\", \"recipient_token\",\r\n recipient_token)\r\n\r\n return recipient_token\r\n else:\r\n raise FPSResponseError(response.status, response.reason, body)",
"def add(name, other, send_events=True, allow_services=False):",
"def sender(self, addr,name):\n self.s[name] = (addr,self.ssn.sender(addr)) \n return self.s[name]",
"def register_publisher(self, hostname, expire=-1):",
"def register_message():\n global mss_cnt\n\n gmess = Graph()\n\n # Construimos el mensaje de registro\n gmess.bind('foaf', FOAF)\n gmess.bind('dso', DSO)\n reg_obj = agn[InfoAgent.name+'-Register']\n gmess.add((reg_obj, RDF.type, DSO.Register))\n gmess.add((reg_obj, DSO.Uri, InfoAgent.uri))\n gmess.add((reg_obj, FOAF.Name, Literal(InfoAgent.name)))\n gmess.add((reg_obj, DSO.Address, Literal(InfoAgent.address)))\n gmess.add((reg_obj, DSO.AgentType, DSO.TransportAgent))\n\n # Lo metemos en un envoltorio FIPA-ACL y lo enviamos\n gr = send_message(\n build_message(gmess, perf= ACL.request,\n sender= InfoAgent.uri,\n receiver= AgentDirectori.uri,\n content= reg_obj,\n msgcnt= mss_cnt),\n AgentDirectori.address)\n mss_cnt += 1\n\n return gr",
"def _register(self, comm, handler):",
"def attach(self, destination): \r\n self.destination= destination",
"def _register(self):\n self._log(self.botlog, 'Registering as %s' % self.nickname)\n self._send('USER %s B C :%s' % (self.ident, self.realname))\n self._send('NICK %s' % self.nickname)",
"def register_hosting(self,\n destination, \n input_date, \n output_date, \n number_of_rooms, \n guest_ages):\n\n self.client.service.registerHosting(destination, input_date[0], input_date[1], input_date[2],\n output_date[0], output_date[1], output_date[2], number_of_rooms, guest_ages)",
"def sendMessageToSharedNotebookMembers(self, authenticationToken, notebookGuid, messageText, recipients):\r\n pass",
"def register(self, dbus_path, uuid, codec, capabilities):\n self._media_proxy.proxy.RegisterEndpoint(\n dbus_path,\n {\n \"UUID\": uuid,\n \"Codec\": Byte(codec),\n \"Capabilities\": Array(capabilities, signature=\"y\")\n })",
"def attach(self, destination): \r\n self.destination=destination",
"def send(self, recipient, sender, price, country, message):\n raise NotImplementedError",
"def register_message():\n global mss_cnt\n\n gmess = Graph()\n\n # Construimos el mensaje de registro\n gmess.bind('foaf', FOAF)\n gmess.bind('dso', DSO)\n reg_obj = agn[InfoAgent.name+'-Register']\n gmess.add((reg_obj, RDF.type, DSO.Register))\n gmess.add((reg_obj, DSO.Uri, InfoAgent.uri))\n gmess.add((reg_obj, FOAF.Name, Literal(InfoAgent.name)))\n gmess.add((reg_obj, DSO.Address, Literal(InfoAgent.address)))\n gmess.add((reg_obj, DSO.AgentType, DSO.HotelsAgent))\n\n # Lo metemos en un envoltorio FIPA-ACL y lo enviamos\n gr = send_message(\n build_message(gmess, perf= ACL.request,\n sender= InfoAgent.uri,\n receiver= AgentDirectori.uri,\n content= reg_obj,\n msgcnt= mss_cnt),\n AgentDirectori.address)\n mss_cnt += 1\n\n return gr",
"def registerEvent(eventName, publisher, msgInterface, exclusive=FALSE):",
"def agentbehavior1():\n gr = register_message()\n\n pass"
] | [
"0.61973524",
"0.59831744",
"0.5846386",
"0.5775348",
"0.57607996",
"0.5657584",
"0.56348884",
"0.5614726",
"0.5613135",
"0.54141515",
"0.53985727",
"0.53896177",
"0.5369155",
"0.53325903",
"0.5322393",
"0.53180146",
"0.53143936",
"0.52928996",
"0.5290928",
"0.52667665",
"0.5259475",
"0.5258275",
"0.52435434",
"0.5233279",
"0.5227964",
"0.5218421",
"0.52007127",
"0.51601315",
"0.515423",
"0.51400703"
] | 0.61426806 | 1 |
Registers a restaurant in the DMPlatform | def register_restaurant(self, id, location, meals_list):
r = Restaurant(id)
r.set_location(location)
r.set_meals_offered_list(meals_list)
self._restaurants_list.append(r) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def register(self):\n self.logger.info(\"Registering agent %s\", \"/registry/\" + self._configuration[\"identification\"][\"uuid\"])\n self._coordination.update(\"/registry/\" + self._configuration[\"identification\"][\"uuid\"], self._configuration[\"identification\"])",
"def post(self):\n reg = self.request.get('registry')\n region_name = self.request.get('region')\n if reg and len(reg) > 0 and reg.isalnum() and validate_region(region_name):\n region = get_region_id(region_name)\n # Create Registry on IOT Core\n iot = IOT()\n success, message = iot.create_registry(region,reg)\n if success:\n # Add registry to Datastore\n ds = Datastore()\n status = ds.add_registry(reg, region_name)\n self.response.headers['Content-Type'] = 'text/plain'\n if status:\n self.response.write('Registry Added')\n else:\n self.response.write('Registry already exists')\n else:\n self.response.write(message)\n else:\n self.response.write('invalid parameters: ' + reg + \" \" + region_name )",
"def registerDevice(self):\n\t\tr = req.post(\"http://localhost:9090/devices?id={}&sensors={}_{}&board={}\".format(\n\t\t\tBOARD_ID,\n\t\t\tSENSOR1,\n\t\t\tSENSOR2,\n\t\t\tBOARD\n\t\t))\n\t\tprint (\"[{}] Device Registered on Room Catalog\".format(\n\t\t\tint(time.time()),\n\t\t))",
"def test_create_restaurant_with_token(self):\n url = '/api/places/'\n client = APIClient()\n\n client.credentials(HTTP_AUTHORIZATION=self.test_user1_token)\n response = client.post(url, self.restaurant_data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)",
"def _registerOnServer(self, daemon, nameserver,vclock):\n uri = daemon.register(self)\n nameserver.register(self._name, uri)\n self.updateVectorClock(vclock)\n print(\"Gateway registered. Name {} and uri {} \".format(self._name,uri))",
"def self_register(self):\n req_url = 'http://{}:{}/api/locks/'.format(\n self.server, self.port,\n )\n while True:\n name = input('Name (required): ')\n if name:\n break\n while True:\n location = input('Location (required): ')\n if location:\n break\n\n rfid = ''\n use_rfid = input('Would you like to scan an RFID card now(y/n)? ')\n while True:\n if use_rfid != 'y' or rfid:\n break\n rfid = get_RFID()\n use_rfid = False\n\n json = {\n 'name': name,\n 'location': location,\n 'serial': self.serial,\n 'active': True,\n 'status': 'pending',\n 'RFID': rfid\n }\n added_lock = requests.post(\n req_url,\n auth=requests.auth.HTTPBasicAuth(\n self.user.username,\n self.user.password\n ),\n json=json\n ).json()\n return added_lock['pk']",
"def register(cls, L):\r\n ...",
"def register(self, **form_data):\n g.security.require_access(self.neighborhood, 'register')\n shortname, reg_kwargs = self._parse_add_project_data(form_data)\n\n # install the project\n try:\n c.project = self.neighborhood.register_project(\n shortname, **reg_kwargs)\n except RegistrationError:\n redirect_to = self.neighborhood.url()\n ming.odm.odmsession.ThreadLocalODMSession.close_all()\n flash(\"You do not have permission to register\", \"error\")\n else:\n redirect_to = c.project.script_name + 'home/'\n ming.odm.odmsession.ThreadLocalODMSession.flush_all()\n flash('Welcome to your new project!')\n\n redirect(redirect_to)",
"def __init__(self, restaurant_name, cuisine_type):\n\t\tself.name = restaurant_name\n\t\tself.type = cuisine_type",
"def register(self):\n raise NotImplementedError",
"def register(self):\n raise NotImplementedError",
"async def register(hass, token, symbol, pin):\n keystore = await Keystore.create(device_model=\"Home Assistant\")\n account = await Account.register(keystore, token, symbol, pin)\n return {\"account\": account, \"keystore\": keystore}",
"def register_device():\n payload = request.get_json()\n return _register_device(payload)",
"async def _perform_register(self):\n data = {\"username\": self.user, \"password\": self.password}\n return await self._perform_request(\"register\", data, lambda r: r.text())",
"def register(locator: str, entry_point, **kwargs):\n\n agent_registry.register(name=locator, entry_point=entry_point, **kwargs)",
"def register(project_id, runner):\n pass",
"def __init__(self, restaurant_name, cuisine_type):\r\n\t\tself.restaurant_name = restaurant_name\r\n\t\tself.cuisine_type = cuisine_type",
"def restaurants_new():\n # If the user isn't logged in, send to the login page\n if helper.handle_login(login_session) is False:\n return redirect('/login')\n\n if request.method == 'POST':\n if len(request.form['name']) > 0:\n new_restaurant = Restaurant(name=request.form['name'],\n address=request.form['address'],\n phone=request.form['phone'],\n web=helper.check_restaurant_URL(request.form['web']),\n description=request.form['description'],\n user_id=login_session['user_id'])\n session.add(new_restaurant)\n session.commit()\n flash(\"New restaurant created - {}\".format(new_restaurant.name))\n tag_line = request.form['tag_line']\n tag_list = tag_line.split(',')\n for tag in tag_list:\n helper.add_tag_if_not_exists(tag, new_restaurant.id)\n return redirect(url_for('restaurants_page'))\n else:\n flash(\"Incorrect Restaurant details - Please include a name!\")\n\n user_info = helper.get_user_if_exists(login_session)\n return render_template('newrestaurant.html', user_info=user_info)",
"def register_travel_pack(self, origin, destination, departure_date, arrival_date, number_of_rooms, is_promo, guest_ages):\n\n self.client.service.registerTravelPack(origin, destination, departure_date[0], departure_date[1], departure_date[2],\n arrival_date[0], arrival_date[1], arrival_date[2], number_of_rooms, is_promo,\n guest_ages)",
"def registerWithSitemap(self):\n\n self.core.requireUniqueService('registerWithSitemap')\n\n #from soc.modules.seeder.views import seeder\n #self.core.registerSitemapEntry(seeder.view.getDjangoURLPatterns())",
"def add_data(self, data: Restaurant):\n if not data.name_seq_nr:\n raise ValueError('Expected data to have \"name_seq_nr\" key')\n\n self.__data[data.name_seq_nr] = data\n\n data_to_dump = []\n for restaurant in self.__data.values():\n data_to_dump.append(restaurant.as_dict())\n \n self._write_json(data_to_dump)",
"def register(self):\n raise NotImplementedError()",
"def _update_restaurant_info(self):\n Restaurant().update(self._entity_id, self._entity_info)",
"def register(self):\n raise NotImplementedError(\"Should have implemented this\")",
"def register():\n json_data = request.get_json()\n if not json_data:\n return {\"message\": \"No input data provided\"}, 400\n\n # Register address\n # Validate and deserialize input\n try:\n address_data = AddressSchema(json_data)\n except ma.ValidationError as err:\n print(err.messages)\n return err.messages, 422\n\n address = Address(address_data)\n db.session.add(address)\n db.session.commit()\n id_address = AddressSchema().dump(Address.query.get(address.id))\n\n # register donor\n # Validate and deserialize input\n try:\n donor_data = DonorSchema(json_data)\n donor_data['address'] = id_address\n except ma.ValidationError as err:\n print(err.messages)\n return err.messages, 422\n\n donor = Donor(donor_data)\n db.session.add(donor)\n db.session.commit()\n id_donor = DonorSchema().dump(donor.query.get(donor.id))\n\n return {\"message\": \"Donor user registered.\", \"id\": id_donor}, 200",
"def register(self, carrier):\n self.carriers[carrier.name] = carrier()",
"def register_user():\n pass",
"def register(self, type, name,vclock):\n print (\"Gateway Registry invoked for {}\".format(name))\n if vclock is not None:\n self.updateVectorClock(vclock)\n self._IDtoTypeMap[self._counter] = (name, type)\n if type == constants.ProcessConstants.TYPE_DATABASE:\n self._databaseName = name\n self._counter += 1\n return self._counter",
"def __init__(self, restaurant_name, cuisine_type):\n self.name = restaurant_name\n self.type = cuisine_type",
"def regen(self):\n self.create(overwrite=True)\n self.load()"
] | [
"0.62473947",
"0.6196962",
"0.61499894",
"0.59200966",
"0.5669677",
"0.56151634",
"0.5595705",
"0.5543274",
"0.5539966",
"0.5462055",
"0.5462055",
"0.5450199",
"0.54481614",
"0.54412204",
"0.54329973",
"0.5423446",
"0.5412657",
"0.54084283",
"0.53993064",
"0.5379552",
"0.53782815",
"0.5342592",
"0.53422534",
"0.5334167",
"0.5333471",
"0.53290516",
"0.53104365",
"0.52898616",
"0.5288892",
"0.52614915"
] | 0.72535706 | 0 |
Parses the .ac lines and modifies the frequency of each voltage source and set impedances of passive elements | def ac_parse(obj_list, ac_lines):
freq_set = set()
for obj in obj_list:
if obj.form == 'dc':
freq_set.add(0)
for line in ac_lines:
tokens = line.split()
if len(tokens) != 3:
print("ERR: Invalid .ac command:", line)
return None
name = tokens[1]
freq = get_quant(tokens[2])
if freq == None:
print("ERR: Frequency not a numeric type:", tokens[2])
return None
if freq not in freq_set and len(freq_set) == 0:
freq_set.add(freq)
elif freq not in freq_set:
print("ERR: Multiple source frequencies not supported")
return None
for ind in range(len(obj_list)):
obj = obj_list[ind]
if obj.name == name:
if obj.el_type not in 'VI' or obj.form != 'ac':
print("ERR: Element", name, "not an AC voltage or current source")
return None
else:
obj.freq = freq
break
obj_list[ind] = obj
else:
print("ERR: No source named", name, "defined")
return None
for ind in range(len(obj_list)):
obj = obj_list[ind]
if obj.el_type == 'L':
obj.imp = 2*np.pi*freq*1j*obj.value
elif obj.el_type == 'C':
obj.imp = -1j/(2*np.pi*freq*obj.value)
obj_list[ind] = obj
return obj_list | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def parse_afos(self):\n # at most, only look at the top four lines\n data = \"\\n\".join([line.strip()\n for line in self.sections[0].split(\"\\n\")[:4]])\n tokens = re.findall(\"^([A-Z0-9 ]{4,6})$\", data, re.M)\n if tokens:\n self.afos = tokens[0]",
"def set_test_afc_val(self):\r\r\n\r\r\n MAX_NUM_AGC_ITER = 10\r\r\n MAX_CHANGE = 2000\r\r\n afc_per_hz = 0.28 #Reasonable starting factor\r\r\n\r\r\n func_name = sys._getframe(0).f_code.co_name\r\r\n loggerDisplay = logging.getLogger(__name__ + func_name)\r\r\n afc_val = self.modemObj.get_afc_val()\r\r\n assert(afc_val is not None)\r\r\n afc_val = int(afc_val)\r\r\n loggerDisplay.info('Current afc value is %s' %afc_val)\r\r\n freq_err_Hz, freq_err_lim_str = self.get_freq_err_info_tuple()\r\r\n\r\r\n iteration = 0\r\r\n while freq_err_lim_str.upper() != \"OK\" and iteration < MAX_NUM_AGC_ITER:\r\r\n if freq_err_lim_str.upper() not in ['ULEL', 'NMAU', 'ULEU', 'NMAL']:\r\r\n loggerDisplay.info('Unexpected response; %s' %freq_err_lim_str.upper())\r\r\n loggerDisplay.info('Will continue with current afc value')\r\r\n break\r\r\n\r\r\n afc_change = int(freq_err_Hz * afc_per_hz)\r\r\n afc_change = min(afc_change, MAX_CHANGE)\r\r\n afc_change = max(afc_change, -MAX_CHANGE)\r\r\n afc_val += afc_change\r\r\n\r\r\n self.modemObj.set_afc_val(afc_val)\r\r\n loggerDisplay.info('Iteration %s' %(iteration+1))\r\r\n loggerDisplay.info(\"freq_err_Hz=%s, %s will try with new AFC value %s change %s\"\r\r\n %(freq_err_Hz, self.evm.dictKeysValidLim[freq_err_lim_str], afc_val, afc_change))\r\r\n old_freq_err_Hz = freq_err_Hz\r\r\n freq_err_Hz, freq_err_lim_str = self.get_freq_err_info_tuple()\r\r\n try:\r\r\n afc_per_hz = afc_change/(old_freq_err_Hz-freq_err_Hz)\r\r\n except ZeroDivisionError:\r\r\n afc_per_hz = 0.2\r\r\n if afc_per_hz < 0:\r\r\n afc_per_hz = 0.2 #It got worse, go back to something safe\r\r\n if afc_per_hz > 1:\r\r\n afc_per_hz = 1\r\r\n loggerDisplay.debug(\"afc_per_hz=%s\" %(afc_per_hz))\r\r\n iteration += 1\r\r\n\r\r\n if iteration < MAX_NUM_AGC_ITER:\r\r\n loggerDisplay.info(\"Carrier Frequency Error %s is within the required tolerance, Converged AFC value is %s after %s iterations\"\r\r\n %(freq_err_Hz, afc_val, iteration))\r\r\n else:\r\r\n loggerDisplay.info(\"Carrier Frequency Error %s is outside the required tolerance after %s iterations\"\r\r\n %(freq_err_Hz, iteration))\r\r\n loggerDisplay.info(\"Will use AFC value %s\" %afc_val)\r\r\n raise ExGeneral(\"Fail: AFC correction did not converge to a frequency error within tolerance.\")",
"def set_configure_ac(self):\n if self._fhandle_configure_ac is None:\n config_file_name = check_configure_scan(self._project_path)\n if config_file_name:\n self._fhandle_configure_ac = open(config_file_name, \"r\")\n else:\n logger.warning(\"Not found configure.ac or configure.in file\")\n return\n\n try:\n raw_data = self._fhandle_configure_ac.read()\n except IOError:\n logger.warning(\"Couldn't read configure.ac file\")\n return\n mylexer = m4_macros_analysis.M4Lexer()\n mylexer.build()\n generator = mylexer.get_token_iter(raw_data)\n cache_generator = m4_macros_analysis.CacheGenerator(generator, origin_data=raw_data)\n # self.m4_macros_info = m4_macros_analysis.functions_analyze(cache_generator)\n # initialize functions\n self._m4_analyzer.configure_ac_analyze(generator, level=1)\n\n self.m4_macros_info = self._m4_analyzer.m4_libs\n self.configure_ac_info = self._m4_analyzer.functions\n self.config_h = self._m4_analyzer.config_h\n self.ac_headers = self._m4_analyzer.ac_headers",
"def ac_voltage(self, ac):\n self.set_ac_voltage(f'{ac}' if self._is_min_max(ac) else f'{ac} V')",
"def attach_AC(self):\n n = self.pC - 1\n self.A[n] = self._mps_AC(self.A[n], self.C)",
"def _update_feedback(self):\n #First read in the current voltage (power)\n #Read in numReadsPerCycle signals (arb) to average\n #TODO: allow user to select reads per signal\n currSignal = self._ai_client.get_ai_voltage(self._ai_channel, self.numReadsPerCycle, max_range=self.max_input_voltage)\n\n #Add new data to the pid\n self.pid.set_pv(np.atleast_1d(np.mean(currSignal)))\n\n #Now compute the new control value and update the AO\n self.pid.set_cv()\n self._curr_output_voltage = self._curr_output_voltage + self.pid.cv\n if self._curr_output_voltage < self.min_voltage:\n self._curr_output_voltage = self.min_voltage\n elif self._curr_output_voltage > self.max_voltage:\n self._curr_output_voltage = self.max_voltage\n\n\n #Finally updating the analog output\n\n #Do a final check to make sure that if you are in hardware control mode that the voltage control is still HIGH\n #This is to avoid the potential error if the voltage control is toggled low between the last call of _check_hardware_control\n #and update_feedback, whcih would mean that currSignal would be 0 (assuming a pulsed experiment), and causing a garbage\n #feedback which could be an issue in the next pulse.\n if (~self._under_hardware_control or self.ai_client.get_ai_voltage(self._hwc_ai_channel)[-1] > self._hwc_thresh):\n self._ao_client.set_ao_voltage(self._ao_channel, self._curr_output_voltage)",
"def auxcheck(lines, matchcond, rfac):\n\n # Compile the regular expression to match the Arrhenius\n # coefficients. This is intentionally different from the Amatch in\n # run_sens.py\n Amatch = re.compile(r'(([-+]?[0-9]+(\\.[0-9]+)?[eE][-+]?[0-9]+)|(?<![\\d\\.])([0]+\\.?[0]+)(?![\\d]))')\n\n # Loop through the lines in the input list\n for lineNum in range(len(lines)):\n line = lines[lineNum]\n\n # Check that the line matches the input matching condition. If\n # not, the line is not modified\n skip1 = matchcond.search(line)\n if skip1 is not None:\n\n # If the line matches the proper condition, find the\n # Arrhenius coefficient, multiply it by two, reconstruct\n # the line, and overwrite the original line in the input\n # list.\n Afactor = Amatch.search(line)\n x = Decimal(Afactor.group(1))\n x = Decimal(rfac) * x\n modline = line[:Afactor.start()] + str(x) + line[Afactor.end():]\n lines[lineNum] = modline\n\n # Return the list of modified lines\n return lines",
"def read_apr(self, lexclude=[], discontinuity=None, rename=None, verbose=False):\n###############################################################################\n \n import pyacs.lib.astrotime\n from pyacs.sol.gpoint import Gpoint\n\n # DEAL WITH RENAME IF PROVIDED\n \n if rename is not None:\n \n if verbose:print(\"-- Rename info provided for apr file: \", self.name)\n\n H_rename = {}\n\n # Case for a CODE rename applying for all SINEX files\n if 'all' in rename:\n \n for (code, new_code) in rename['all']:\n H_rename[code] = new_code\n \n # Case for a CODE rename applying for the current SINEX\n \n if self.name in list(rename.keys()):\n\n for (code, new_code) in rename[self.name]:\n H_rename[code] = new_code\n \n # READING APR FILE\n \n if verbose:\n print('-- Reading Globk apr file ', self.name)\n\n try:\n APR_VALUE = np.genfromtxt(self.name, comments='#', usecols=(1,2,3,4,5,6,7,8,9,10,11,12,12))\n APR_NAME = np.genfromtxt(self.name, comments='#', usecols=(0), dtype=str)\n except:\n print('!!!ERROR: could not read Globk format apr file:' , self.name)\n import sys\n sys.exit()\n \n for i in np.arange( APR_VALUE.shape[0]) :\n print('-- processing ', APR_NAME[i][:4])\n [x,y,z,sx,sy,sz,epoch, vx,vy,vz,svx,svy,svz]= APR_VALUE[i,:]\n M=Gpoint(X=x,Y=y,Z=z,\\\n SX=sx,SY=sy,SZ=sz,\\\n VX=vx,VY=vy,VZ=vz,SVX=svx,SVY=svy,SVZ=svz, \\\n epoch=epoch,code=APR_NAME[i][:4],pt='A',soln=1)\n \n self.estimates[ APR_NAME[i][:4], 1 ] = M",
"def read_log_imuaccel(ac_id, filename):\n f = open(filename, 'r')\n pattern = re.compile(\"(\\S+) \"+ac_id+\" IMU_ACCEL (\\S+) (\\S+) (\\S+)\")\n list_meas = []\n while True:\n line = f.readline().strip()\n if line == '':\n break\n m = re.match(pattern, line)\n if m:\n list_meas.append([float(m.group(1)), float(m.group(2)), float(m.group(3)), float(m.group(4))])\n return np.array(list_meas)",
"def read_adas(self):\n for name in self.files_atte:\n self.beam_atte.append(adas.ADAS21(name))\n for name in self.files_emis:\n self.beam_emis.append(adas.ADAS22(name))",
"def AXPreg(dic,organism):\n\twith open('./results/'+organism+'/logfiles/ATPreg.log','w') as out:\n\t\trlst = []\n\t\tAXP = ['ATP', 'ADP', 'AMP']\n\t\tfor r in dic.rlst:\n\t\t\tif intersection(AXP,r.innames) != [] and intersection(AXP,r.outnames) != []:\n\t\t\t\trlst.append(r)\n\t\trevcount = 0\n\t\tcontrcount = 0\n\t\tcycount = 0\n\t\tout.write('CONTROLLED AXP reactions:\\n\\n')\n\t\tfor r in rlst:\n\t\t\tif r.reversible == True:\n\t\t\t\trevcount += 1\n\t\t\tif r.elst != []:\n\t\t\t\tout.write(r.name+' ['+', '.join(map(str,[p for p in r.inpath]))+']\\t'+', '.join(map(str,r.innames))+'\\t'+', '.join(map(str,r.outnames))+'\\n'+'\\n'.join(map(str,[([c.name for c in e.cs],e.activation) for e in r.elst]))+'\\n\\n')\n\t\t\t\tcontrcount += 1\n\t\tout.write('self-controlled AXP reactions (reactions that produce AXP and are controlled by it:\\n\\n')\n\t\tfor r in rlst:\n\t\t\tif r.reversible == True:\n\t\t\t\trevcount += 1\n\t\t\tif r.elst != [] and intersection(AXP,[c.name for c in e.cs]) != []:\n\t\t\t\tout.write(r.name+' ['+', '.join(map(str,[p for p in r.inpath]))+']\\t'+', '.join(map(str,r.innames))+'\\t'+', '.join(map(str,r.outnames))+'\\n'+'\\n'.join(map(str,[([c.name for c in e.cs],e.activation) for e in r.elst]))+'\\n\\n')\n\t\tout.write('---------------------------------------------------------------------------------------\\nLOOSE Futile cycles controlled by AXP\\n\\n')\n\t\tfilterlst = dic.create_filterlst(int(max(dic.dgraph.degree(dic.dgraph.vs[:]))*0.06))\n\t\tfor r1 in rlst:\n\t\t\tfor r2 in dic.rlst:\n\t\t\t\tint1 = [c for c in intersection(r1.inels,r2.outels) if c.name not in filterlst]\n\t\t\t\tint2 = [c for c in intersection(r2.inels,r1.outels) if c.name not in filterlst]\n\t\t\t\tif len(int1) != 0 and len(int2) != 0 and r1 != r2:\n\t\t\t\t\tout.write(r1.name+' ['+', '.join(map(str,[p for p in r1.inpath]))+']\\t'+', '.join(map(str,r1.innames))+'\\t'+', '.join(map(str,r1.outnames))+'\\n')\n\t\t\t\t\tout.write(r2.name+' ['+', '.join(map(str,[p for p in r2.inpath]))+']\\t'+', '.join(map(str,r2.innames))+'\\t'+', '.join(map(str,r2.outnames))+'\\nREGULATED-AND:\\t'+str(r1.elst != [] or r2.elst != [])+'\\nREGULATED-OR:\\t'+str(r1.elst != [] and r2.elst != [])+'\\n\\n')\n\n\t\tout.write('---------------------------------------------------------------------------------------\\nSTRICT Futile cycles controlled by AXP\\n\\n')\n\t\tfor r1 in rlst:\n\t\t\tfor r2 in rlst:\n\t\t\t\tif len(intersection(r1.inels,r2.outels)) > 1 and len(intersection(r2.inels,r1.outels)) > 1 and r1 != r2:\n\t\t\t\t\tout.write(r1.name+' ['+', '.join(map(str,[p for p in r1.inpath]))+']\\t'+', '.join(map(str,r1.innames))+'\\t'+', '.join(map(str,r1.outnames))+'\\n')\n\t\t\t\t\tout.write(r2.name+' ['+', '.join(map(str,[p for p in r2.inpath]))+']\\t'+', '.join(map(str,r2.innames))+'\\t'+', '.join(map(str,r2.outnames))+'\\nREGULATED-AND:\\t'+str(r1.elst != [] or r2.elst != [])+'\\nREGULATED-OR:\\t'+str(r1.elst != [] and r2.elst != [])+'\\n\\n')\n\t\tprint 'very long one'\n\t\tout.write('---------------------------------------------------------------------------------------\\nVERY LOOSE Futile cycles controlled by AXP\\n\\n')\n\t\tfor r1 in dic.rlst:\n\t\t\tfor r2 in dic.rlst:\n\t\t\t\tint1 = [c for c in intersection(r1.inels,r2.outels) if c.name not in filterlst]\n\t\t\t\tint2 = [c for c in intersection(r2.inels,r1.outels) if c.name not in filterlst]\n\t\t\t\tif len(int1) != 0 and len(int2) != 0 and r1 != r2:\n\t\t\t\t\tcycount += 1\n\t\t\t\t\tout.write(r1.name+' ['+', '.join(map(str,[p for p in r1.inpath]))+']\\t'+', '.join(map(str,r1.innames))+'\\t'+', '.join(map(str,r1.outnames))+'\\n')\n\t\t\t\t\tout.write(r2.name+' ['+', '.join(map(str,[p for p in r2.inpath]))+']\\t'+', '.join(map(str,r2.innames))+'\\t'+', '.join(map(str,r2.outnames))+'\\nREGULATED-AND:\\t'+str(r1.elst != [] or r2.elst != [])+'\\nREGULATED-OR:\\t'+str(r1.elst != [] and r2.elst != [])+'\\n\\n')\n\n\t\tprint('% of REVERSIBLE AXP-converting reactions:\\t'+str(revcount)+' out of '+str(len(rlst)))\n\t\tprint('% of CONTROLLED AXP-converting reactions:\\t'+str(contrcount)+' out of '+str(len(rlst)))\n\t\tprint('# of cycles:\\t'+str(cycount))",
"def autocor(self):\n flag=0\n input=None\n level=None\n board=None\n ainps={'L0':[],'L1':[],'L2':[],'H0':[]} \n for i in self.inputs:\n print i.inputnumber.var.get()\n if(i.inputnumber.var.get() == 1):\n print i.inpnumall,\" \",rareradio.get()\n if i.inpnumall == rareradio.get():\n input=i.inpnum\n level=i.level\n board=i.board\n print 'Rare chosen:',level,input,i.board\n ainps[i.level].append(i.inpnum)\n flag=1\n #print 'ainps:',ainps \n if flag == 0:\n print \"Autocorrelation: No inputs chosen. \" \n return\n # set rare flag in c for board,mode and input\n if input==None:\n cmd=\"setRareFlag(0,0,0)\"\n else:\n mode='0'\n if level == 'H0': mode = '1'\n cmd=\"setRareFlag(\"+board+','+input+','+mode+\")\"\n output=self.vb.io.execute(cmd,log=\"yes\",applout=\"<>\") \n self.auto=Corel(self.vb,ainps)\n self.auto.autocor()",
"def maf2vcf(maf, ref):\n f = open(maf + \".aa\", 'w')\n with open(maf, 'r') as maf:\n for line in maf:\n if line.startswith(\"s\"):\n if ref in line:\n aa = line.split()\n ancallele = aa[6]\n if \"-\" in aa[4]:\n # flip to opposite base\n if aa[6] == 'A':\n ancallele = 'T'\n elif aa[6] == 'T':\n ancallele = 'A'\n elif aa[6] == 'C':\n ancallele = 'G'\n elif aa[6] == 'G':\n ancallele = 'C'\n else:\n print(\"ERROR allele not iupac\")\n else:\n pass\n line = next(maf)\n aa = line.split()\n pos = int(aa[2])\n size = int(aa[5])\n if \"-\" in aa[4]:\n pos_1 = size - pos\n else:\n pos_1 = pos\n f.write(\"{}\\t{}\\t{}\\n\".format(aa[1][3:], pos_1 + 1, ancallele))\n return(None)",
"def VACF(df,conversion = \"x\"):\n #conversion from pixels to micrometers\n if conversion == \"y\":\n df = df/1200*633\n else:\n df = df/1600*844\n #computes the velocity in one direction between the frames\n dif = pd.DataFrame()\n\n for i in range(1,len(df.T)):\n dif[i-1] = velocity(df[i-1],df[i])\n vel = []\n for i in range(len(dif)):\n vel.append(tidynamics.acf(dif.T[i]))\n\n #return the velocities in array\n return np.array(vel)",
"def cal(state, ants=0, tmo=15, waiton=ALL, subarray=DEFAULT, setAttens=True) :\n #print \"cal subarray =\", subarray\n antlist = helpers.makeList(ants)\n # This must be started first so that the atten changes don't confuse things\n multiSubarray('cal', subarray, state, antlist)\n is1cm = lofreq(subarray) < 50\n if is1cm:\n # Make sure that the last AMB psys seen by the pipeline is not\n # erroneously affected by changing the atten too quickly.\n # If we are going to the SKY, wait an extra two seconds before \n # changing the attenuation.\n if state == SKY and setAttens: wait(TIME, tmo=2.0, subarray=subarray)\n # 1cm, need to set attens\n if setAttens :\n for a in makeAntList(antlist, subarray) :\n mp = \"Control.Antenna%d.ifAttenAmb1\" %a\n try:\n atten = queryDouble(mp, retries=2)\n except:\n atten = 8\n m = \"Cannot get valid antenna IF atten on ambient\"\n m += \"for C%d from the monitor system\" %a\n print m\n commandlog(m)\n if state == SKY: \n atten -= 8\n if atten <= 0:\n if atten < 0:\n m = \"Cannot set sky atten to %.1fdB\" %atten\n m += \" for C%d; setting to 0dB\" %a \n print m\n commandlog(m)\n else:\n m = \"Attenuation is too low on C%d,\" %a\n m += \" indicating a potential low power issue\"\n print m\n commandlog(m)\n \n m = \"A possible cause is that the last time\"\n m += \" the attenuation was set with a \\n\"\n m += \"tsys(ifsetup=True) the power level was \"\n m += \"too low to use an attenuation > 8 dB.\\n\"\n m += \"You can try:\\n\"\n m += \" - Another tsys(ifsetup=True)\\n\"\n m += \" - Reporting it as a hardware error to the \"\n m += \"hardware group.\"\n print m \n atten = 0\n ifnum = 1\n antennaIFatten(atten, ifnum, invalidateTsys=False, ants=a, \n subarray=subarray)\n #print \"Setting attenation for C%d:\" %(a), atten\n return wait(CAL, antlist, tmo, waiton, subarray=subarray)",
"def reduc(self,zarange=[20,50]):\n \n # First, take out a secular gain drift for each constant elevation\n # stare. Fit P(t) to each channel in a contiguous elevation stare,\n # normalize fit to mean=1, and normalize each chan to this.\n #deg=10\n #self.removedrift(deg)\n\n # Convert P-> T RJ\n #self.P2T()\n\n # Now fit a line to P(am) in each scan and store the results.\n self.fitam(zarange)",
"def ProcessAsl(self):\n for entry in self.info:\n if self.info[entry]['type'] == 'asl':\n if self.verbose:\n print 'Processing ASL data in %s' % os.path.basename(entry)\n cmd = 'convert_file %s %s %s' % (entry, \\\n self.info[entry]['imgfile'], self.info[entry]['filetype'])\n fname = '%s%s' % \\\n (self.info[entry]['imgfile'], self.info[entry]['suffix'])\n self.CheckExec(cmd, [fname])",
"def read_log_adc_generic(ac_id, filename):\n f = open(filename, 'r')\n pattern = re.compile(\"(\\S+) \"+ac_id+\" ADC_GENERIC (\\S+) (\\S+)\")\n list_meas = []\n while True:\n line = f.readline().strip()\n if line == '':\n break\n m = re.match(pattern, line)\n if m:\n list_meas.append([float(m.group(1)), float(m.group(2)), float(m.group(3))])\n return np.array(list_meas)",
"def read_log_attitude(ac_id, filename):\n f = open(filename, 'r')\n pattern = re.compile(\"(\\S+) \"+ac_id+\" ATTITUDE (\\S+) (\\S+) (\\S+)\")\n list_meas = []\n while True:\n line = f.readline().strip()\n if line == '':\n break\n m = re.match(pattern, line)\n if m:\n list_meas.append([float(m.group(1)), float(m.group(2)), float(m.group(3)), float(m.group(4))])\n return np.array(list_meas)",
"def load_gap_free_trace(file_to_load):\n\t\n\tfilename = file_to_load; \n\texperiment_name = filename.rstrip('.abf');\n\n\tr = io.AxonIO(filename=file_to_load)\n\t#bl = r.read_block(lazy=False, cascade=True)\n\tbl = r.read_block(lazy=False)\n\t#segments are sweeps\n\n\t\n\tprint bl.segments[0].analogsignals[0].magnitude\n\t\n\t##get sampling rate\n\tsampling_rate = bl.segments[0].analogsignals[0].sampling_rate\n\tprint(sampling_rate)\n\n\t##adds channel 0 from each sweep to array \n\tprint('file has')\n\tprint(len(bl.segments))\n\tprint('sweeps')\n\tprint(len(bl.segments[0].analogsignals[0].magnitude))\n\tprint('samples')\n\tchannel_array = np.empty((len(bl.segments)+1,(len(bl.segments[0].analogsignals[0])))); \n\tprint(channel_array.shape)\n\tfor sweep in range(len(bl.segments)):\n\t\tchannel_0_sweep = [] \n\t\tfor data_point in range(len(bl.segments[sweep].analogsignals[0].magnitude)):\t\n\t\t\t#print(bl.segments[sweep].analogsignals[0].magnitude[data_point])\n\t\t\tchannel_array[sweep+1][data_point] = (bl.segments[sweep].analogsignals[0].magnitude[data_point]);\n\t\n\t\n\tprint channel_array[0][0:10]\n\t\n\n\n\t## make additional row for time\n\tsamplingrate_Hz = sampling_rate.magnitude ;\n\tsampling_interval_msec = (1000 / float(samplingrate_Hz));\n\tfor time_point in range(len(bl.segments[sweep].analogsignals[0].magnitude)):\n\t\tchannel_array[0][time_point] = (float(time_point)*sampling_interval_msec); \n\n\t## write a csv file \n\n\tnp.savetxt(experiment_name + 'abf_to_csv.csv', np.transpose(channel_array), delimiter=',', newline='\\n');\n\treturn(channel_array)",
"def parse_line(self, atline: List, list_of_lines: List, part: PART, afix: AFIX, resi: RESI) -> None:\n uvals = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]\n self.name = atline[0][:4] # Atom names are limited to 4 characters\n for n, u in enumerate(atline[6:12]):\n uvals[n] = float(u)\n self.uvals_orig = uvals[:]\n self.set_uvals(uvals)\n self._line_numbers = list_of_lines\n self.part = part\n self.afix = afix\n self.resi = resi\n self._get_part_and_occupation(atline)\n self.x, self.y, self.z = self._get_atom_coordinates(atline)\n self.xc, self.yc, self.zc = self._cell.o * Array(self.frac_coords)\n if abs(self.uvals[1]) > 0.0 and self.uvals[2] == 0.0 and self.shx.hklf: # qpeaks are always behind hklf\n self.peak_height = uvals[1]\n self.qpeak = True\n if self.shx.end: # After 'END' can only be Q-peaks!\n self.qpeak = True\n self.sfac_num = int(atline[1])\n self.shx.fvars.set_fvar_usage(self.fvar)\n self.Ucif = self.set_ucif(uvals)\n # TODO: I am still unsure if this these are correct values:\n # self.Ustar = self.Ucif * self._cell.N * self._cell.N.T\n # self.Ucart = self.Ustar * self._cell.o * self._cell.o.T\n # self.Ueq = self.set_ueq(uvals)\n # self.Uiso = self.Ueq\n # transformed_u = self.transform_u_by_symmetry(2)\n # print(self.name, [round(x, 6) for x in transformed_u], self.frac_coords)",
"def maf2vcf_mrefs(maf):\n f = open(maf + \".aa\", 'w')\n with open(maf, 'r') as maf:\n for line in maf:\n if line.startswith(\"a\"):\n ancallele = ''\n refout = ''\n line = next(maf)\n while line.startswith(\"s\"):\n if \"Wb\" in line:\n aa = line.split()\n pos = int(aa[2])\n size = int(aa[5])\n chrom = aa[1].split(\".\")[1]\n if \"-\" in aa[4]:\n if aa[6] == 'A':\n rallele = 'T'\n elif aa[6] == 'T':\n rallele = 'A'\n elif aa[6] == 'C':\n rallele = 'G'\n elif aa[6] == 'G':\n rallele = 'C'\n else:\n print(\"ERROR allele not iupac\")\n pos_1 = size - pos\n else:\n pos_1 = pos\n rallele = aa[6]\n else:\n # read in other refs\n aa = line.split()\n refout += aa[1][0]\n if \"-\" in aa[4]:\n # flip to opposite base\n if aa[6] == 'A':\n ancallele += 'T'\n elif aa[6] == 'T':\n ancallele += 'A'\n elif aa[6] == 'C':\n ancallele += 'G'\n elif aa[6] == 'G':\n ancallele += 'C'\n else:\n print(\"ERROR allele not iupac\")\n else:\n ancallele += aa[6]\n line = next(maf)\n if ancallele:\n f.write(\"{}\\t{}\\t{}\\t{}\\t{}\\n\".format(chrom, pos_1 + 1,\n rallele, ancallele,\n refout))\n else:\n pass\n return(None)",
"def M_a(self, la_dep=True):\n\n print(\"Updating a\", file=self.logfile)\n # Initialize vec_F_a\n self.vec_F_a = np.square(self.pie) + self.pie_var\n\n min = 0\n max = self.a\n\n while (self._F_a(max, la_dep=la_dep, calc=False) > 0):\n max = 2*max\n\n for it in range(40):\n if self._F_a((min+max)/2, la_dep=la_dep, calc=False) > 0:\n min = (min+max)/2\n else:\n max = (min+max)/2\n\n self.a = (min+max)/2\n\n self._propagate_a()",
"def __set_AP(self):\n\t\tif self.version == 1:\n\t\t\treturn 0\n\t\tloc = self.__get_AP_position()\n\t\tfor r in range(-2,3):\n\t\t\tfor c in range(-2,3):\n\t\t\t\tif (r == -2 or r == 2 or c == -2 or c == 2 or (r == 0 and c == 0)):\n\t\t\t\t\tself.matrix[loc+r][loc+c] = 1\n\t\t\t\telse:\n\t\t\t\t\tself.matrix[loc+r][loc+c] = 0",
"def evaluateIFsetup(ants=0, timerange=3.4, deltat=0.1,\n verbose=False, iniAtten=0) :\n antlist = makeAntList(ants)\n for a in antlist:\n if a < 16:\n print \"SZA antennas only can be used - exiting\"\n return\n tpMP = \"Ifmod.ifTotalPower\"\n attenMP = \"Ifmod.totalAtten\"\n currentPos = \"AMBIENT\"\n print \"Inserting ambient load into beam\"\n rtn = amb(ants)\n pos1ants = rtn.ready\n mplist = []\n for a in pos1ants:\n prefix = \"Sza%d.\" %(a-15)\n mplist.append([prefix+tpMP, prefix+attenMP])\n #print mplist\n if len(rtn.notready) != 0:\n print \"Ambient load not inserted for ants:\", rtn.notready\n if iniAtten != None :\n print \"Setting IF atten to %.1f\" %iniAtten\n antennaIFatten(iniAtten, 1, invalidateTsys=False, \n ants=antlist)\n wait(tmo=2.0)\n t0=time.time()\n antennaIFpower(0.3, ants)\n t1 = time.time()\n cmdtime = t1-t0\n results = []\n times = []\n nsamps = int(round(timerange/deltat))\n for i in range(nsamps):\n sleep(deltat)\n dt=time.time()-t1\n times.append(dt)\n results.append(queryMpValues(mplist))\n #print results\n print \"Command time = %.3f seconds\" %cmdtime\n for j in range(len(pos1ants)):\n aname = \"Sza%d\" %(pos1ants[j]-15)\n if verbose: print \"======== %s ========\" %aname\n for i in range(nsamps):\n r = results[i][j]\n st = \"%s: %4.1f %6.3f %4.1f\" %(aname,times[i],r[0],r[1])\n if verbose: print st\n print \"\\n==== Inferred timing & final atten =====\"\n for j in range(len(pos1ants)):\n aname = \"Sza%d\" %(pos1ants[j]-15)\n finalAtten = results[nsamps-1][j][1]\n timing = times[nsamps-1]\n for i in range(nsamps):\n index = nsamps-1-i\n atten = results[index][j][1]\n if atten != finalAtten: break\n timing = times[index]\n st = \" %s: %4.1f %4.1f\" %(aname,timing, finalAtten)\n print st\n \n #print \"Moving ambient load out of the beam\"\n pos2ants = sky(ants).ready\n # Wait for two seconds to get final psys reading\n wait(tmo=2.0)\n skyants = pos2ants\n complete = []\n for a in pos1ants :\n if (pos2ants.count(a) > 0) : complete.append(a)\n return complete",
"def read_abinit(filename='abinit.in'):\n\n from ase import Atoms, units\n\n if isinstance(filename, str):\n f = open(filename)\n else: # Assume it's a file-like object\n f = filename\n\n lines = f.readlines()\n if type(filename) == str:\n f.close()\n\n full_file = ''\n for line in lines:\n if '#' in line:\n meat, comment = line.split('#')\n else:\n meat = line\n full_file = full_file + meat + ' '\n\n full_file.strip()\n tokens = full_file.lower().split()\n\n # note that the file can not be scanned sequentially\n\n index = tokens.index(\"acell\")\n unit = 1.0\n if(tokens[index+4].lower()[:3] != 'ang'):\n unit = units.Bohr\n acell = [unit*float(tokens[index+1]),\n unit*float(tokens[index+2]),\n unit*float(tokens[index+3])]\n\n index = tokens.index(\"natom\")\n natom = int(tokens[index+1])\n\n index = tokens.index(\"ntypat\")\n ntypat = int(tokens[index+1])\n\n index = tokens.index(\"typat\")\n typat = []\n for i in range(natom):\n typat.append(int(tokens[index+1+i]))\n\n index = tokens.index(\"znucl\")\n znucl = []\n for i in range(ntypat):\n znucl.append(int(tokens[index+1+i]))\n\n index = tokens.index(\"rprim\")\n rprim = []\n for i in range(3):\n rprim.append([acell[i]*float(tokens[index+3*i+1]),\n acell[i]*float(tokens[index+3*i+2]),\n acell[i]*float(tokens[index+3*i+3])])\n\n # create a list with the atomic numbers\n numbers = []\n for i in range(natom):\n ii = typat[i] - 1\n numbers.append(znucl[ii])\n\n # now the positions of the atoms\n if \"xred\" in tokens:\n index = tokens.index(\"xred\")\n xred = []\n for i in range(natom):\n xred.append([float(tokens[index+3*i+1]),\n float(tokens[index+3*i+2]),\n float(tokens[index+3*i+3])])\n atoms = Atoms(cell=rprim, scaled_positions=xred, numbers=numbers,\n pbc=True)\n else:\n if \"xcart\" in tokens:\n index = tokens.index(\"xcart\")\n unit = units.Bohr\n elif \"xangst\" in tokens:\n unit = 1.0\n index = tokens.index(\"xangst\")\n else:\n raise IOError(\n \"No xred, xcart, or xangs keyword in abinit input file\")\n\n xangs = []\n for i in range(natom):\n xangs.append([unit*float(tokens[index+3*i+1]),\n unit*float(tokens[index+3*i+2]),\n unit*float(tokens[index+3*i+3])])\n atoms = Atoms(cell=rprim, positions=xangs, numbers=numbers, pbc=True)\n \n try:\n i = tokens.index('nsppol')\n except ValueError:\n nsppol = None\n else:\n nsppol = int(tokens[i + 1])\n\n if nsppol == 2:\n index = tokens.index('spinat')\n magmoms = [float(tokens[index + 3 * i + 3]) for i in range(natom)]\n atoms.set_initial_magnetic_moments(magmoms)\n\n return atoms",
"def _SetAnatTgts(self):\n anat_candidates = {}\n fmap_candidates = {}\n for entry in self.entry_map['anat']:\n if self.info[entry]['type'] == 'T1High':\n anat_candidates[entry] = self.info[entry]['acqtime']\n\n# Find the valid anatomical acquired nearest to fieldmap.\n tdiff_min = 1e6\n if len(self.entry_map['fmap']) > 0:\n for entry in self.entry_map['fmap']:\n anat_tgt = self. _FindNearestAnat(self.info[entry]['acqtime'])\n self.info[entry]['anat_ref'] = anat_tgt\n else:\n# No fieldmaps were collected. Find the structural nearest the\n# beginning of the EPIs.\n if len(self.entry_map['anat']) == 1:\n anat_tgt = self.entry_map['anat'][0]\n else:\n epi_start = []\n tmin = 1e6\n for anat in self.entry_map['anat']:\n if self.info[anat]['type'] != 'T1High':\n continue\n tsum1 = 0; tsum2 = 0;\n for epi in self.entry_map['epi']:\n# Difference from start of structural and first epi\n tsum1 += abs(self.info[anat]['acqtime'] - \\\n self.info[epi]['acqtime'])\n# Difference from start of structural and last epi\n tsum2 += abs(self.info[anat]['acqtime'] - \\\n (self.info[epi]['acqtime'] +\\\n self.info[epi]['TR']*self.info[epi]['tdim']))\n if tsum1 < tmin or tsum2 < tmin:\n tmin = min(tsum1, tsum2)\n anat_tgt = anat\n\n# Resolve anatomical names and links.\n self._SetAnatNames(anat_tgt)\n\n# Set appropriate attributes in the entry for each EPI.\n for epi in self.entry_map['epi']:\n if len(self.entry_map['fmap']) > 0 and not self.no_fmapcorr:\n fmap_entry = self.info[epi]['fmap_entry']\n anat_ref = self.info[fmap_entry]['anat_ref']\n self.info[epi]['anat_tgt'] = fmap_entry\n self.info[epi]['anat_matfile'] = self.info[fmap_entry]['matfile']\n if self.align_fmaps or (not self.no_align_fmaps and \\\n self._SetCatMotionFmapMats(fmap_entry, anat_ref)):\n# Concatenate motion-correction matrices with tranform from\n# fieldmap to structural. Use the registered fieldmap.\n self.info[epi]['catmats'] = True\n fmap_info = self.info[self.info[epi]['fmap_entry']]\n self.info[epi]['fmapname'] = \\\n fmap_info['imgfile_r'] + fmap_info['suffix']\n else:\n# Assume fieldmap is in register with the structural.\n self.info[epi]['catmats'] = False\n else:\n self.info[epi]['anat_tgt'] = anat_tgt\n self.info[epi]['anat_matfile'] = None\n self.info[epi]['catmats'] = False\n self.info[epi]['anat_link'] = self.info[anat_tgt]['imgfile'] + \\\n self.info[anat_tgt]['suffix']",
"def update_acm(self, epochs: int, pretrain: bool = False):\n obs = torch.tensor(self.replay_buffer.obs, dtype=torch.float32)\n next_obs = torch.tensor(self.replay_buffer.next_obs, dtype=torch.float32)\n if self.discrete:\n actions_acm = torch.tensor(self.replay_buffer.actions_acm, dtype=torch.long)\n else:\n actions_acm = torch.tensor(\n self.replay_buffer.actions_acm, dtype=torch.float32\n )\n\n acm_obs = self.acm_cat(obs, next_obs)\n new_data = TensorDataset(acm_obs, actions_acm)\n\n # TODO: add num_workers when self.num_cpu will be added to A2C\n loader = DataLoader(new_data, batch_size=self.acm_batch_size, shuffle=True)\n for e in range(epochs):\n epoch_loss = 0\n for i, (x, y) in enumerate(loader):\n loss = self.batch_update(x, y)\n epoch_loss += loss\n\n epoch_loss /= i + 1\n if self.debug_mode:\n if pretrain and self.acm_val_buffer_size:\n self.log_train_validation_loss_pretrain(e, epoch_loss)\n logger.debug(\"Epoch %d, loss = %.4f\" % (e, epoch_loss))\n\n self.acm_scheduler.step()\n self.loss[\"acm\"] = epoch_loss\n if self.acm_val_buffer_size:\n val_loss = self.calculate_validation_loss()\n self.loss[\"acm_val\"] = val_loss",
"def extract_ace_data(event, start, stop):\n#\n#--- year of starting time\n#\n atemp = re.split(':', start)\n syear = int(float(atemp[0]))\n atemp = re.split(':', stop)\n eyear = int(float(atemp[0]))\n#\n#--- convert time in Chandra Time\n#\n lstart = start\n start = time.strftime('%Y:%j:%H:%M:00', time.strptime(start, '%Y:%m:%d:%H:%M'))\n stop = time.strftime('%Y:%j:%H:%M:00', time.strptime(stop, '%Y:%m:%d:%H:%M'))\n start = int(Chandra.Time.DateTime(start).secs)\n stop = int(Chandra.Time.DateTime(stop).secs)\n#\n#--- set to data collecting period\n#\n pstart = start - 2 * aday\n period = int((stop - start) / (5 * aday)) + 1\n pstop = start + 5 * period * aday\n\n data = []\n for year in range(syear, eyear+1):\n ifile = data_dir + 'rad_data' + str(syear)\n tdata = mcf.read_data_file(ifile)\n data = data + tdata\n\n hline = 'Science Run Interruption: ' + lstart + '\\n'\n hline = hline + 'dofy electron38 electron175 protont47 proton112 '\n hline = hline + 'proton310 proton761 proton1060 aniso\\n'\n hline = hline + '-' * 100\n\n for ent in data:\n atemp = re.split('\\s+', ent)\n if atemp[0].isdigit():\n ltime = atemp[0] + ':' + atemp[1] + ':' + atemp[2] + ':' + atemp[3]\n ltime = time.strftime('%Y:%j:%H:%M:00', time.strptime(ltime, '%Y:%m:%d:%H%M'))\n stime = int(Chandra.Time.DateTime(ltime).secs)\n if (stime >= pstart) and (stime < pstop):\n hline = hline + '%3.4f\\t' % chandara_time_to_yday(stime, syear) \n hline = hline + atemp[7] + '\\t' + atemp[8] + '\\t'\n hline = hline + atemp[10] + '\\t' + atemp[11] + '\\t'\n hline = hline + atemp[12] + '\\t' + atemp[13] + '\\t'\n hline = hline + atemp[14] + '\\t' + atemp[15] + '\\n'\n\n#\n#--- print out the data\n#\n ofile = wdata_dir + event + '_dat.txt'\n\n with open(ofile, 'w') as fo:\n fo.write(hline)",
"def calibrateData(data, cal, antennas, sourceInfo, file=True, niter=None):\n # Loop over data\n for iant, dant in data.items():\n # Write results to a file\n writeOutputFile = False\n if file != False and file <> None:\n # Set file name\n writeOutputFile = True\n\n # date the output file to avoid having to parse huge files later on\n today = dt.date.today()\n dateStr = \"%i%02i%02i\" % (today.timetuple()[0], today.timetuple()[1], today.timetuple()[2]) \n \n if file == True:\n outputFileRoot = '%s_%.2d_%s.dat' % (RPNT_RESULTS, antennas[iant], dateStr)\n else:\n outputFileRoot = \"%s_%.2d_%s.dat\" % (file, antennas[iant], dateStr)\n\n # Open file\n fout = open(outputFileRoot, \"a\")\n fout.write(\"# Pointing data for antenna %d : %s\\n\" % (antennas[iant], time.asctime()))\n f=commands.freqSetup()\n fout.write(\"# Rest Frequency : %d\\n\" % f[0])\n fout.write(\"# UT : %s\\n\" % utils.getUT(timestamp=True))\n fout.write(\"# Source %s\\n\" % sourceInfo['name'])\n fout.write(\"#\\n\");\n fout.write(\"# Iter offset(az) offset(el) Amp sigma Az El\\n\");\n fout.write(\"# (arcmin) (arcmin) (Jy) (Jy) (deg) (deg)\\n\");\n\n # Get az/el\n mpAz = utils.getAntennaMp(antennas[iant]) + \".AntennaCommon.Drive.Track.actualAzimuth\"\n mpEl = utils.getAntennaMp(antennas[iant]) + \".AntennaCommon.Drive.Track.actualElevation\"\n antaz = commands.queryDouble(mpAz)\n antel = commands.queryDouble(mpEl)\n\n # Initialize\n cal[iant] = list()\n\n # Compute mean amplitude\n for d in dant:\n # Initialize\n sum = 0.0\n sumw = 0.0\n nwindows = len(d['use'])\n weights = np.zeros(nwindows)\n\n # Compute weighted average\n x = []\n for i in range(nwindows):\n if d['use'][i]:\n sum += d['amp'][i] * d['wt'][i]\n sumw += d['wt'][i]\n x.append(d['amp'][i])\n\n # Save data\n result = dict()\n if sumw > 0.0:\n # result['amp'] = sum / sumw\n x = sorted(x)\n n1 = len(x) / 2\n n2 = (len(x)-1)/ 2\n result['amp'] = 0.5 * (x[n1] + x[n2])\n result['fwhm'] = getFWHM(antennas[iant], sourceInfo['lofreq'])\n result['offaz'] = d['offaz']\n result['offel'] = d['offel']\n result['sigma'] = 1.0 / math.sqrt(sumw)\n cal[iant].append(result)\n\n # Write data\n if writeOutputFile and (niter == None or niter == d['niter']):\n fout.write(\"%6s %10.3f %10.3f %10.3f %10.3f %10.3f %10.3f\\n\" % \\\n (str(d['niter']), result['offaz'], result['offel'], result['amp'], result['sigma'], antaz, antel))\n\n # Close file\n fout.close()"
] | [
"0.5577595",
"0.5564011",
"0.54784346",
"0.5393736",
"0.5388402",
"0.534639",
"0.5306698",
"0.5298406",
"0.52770185",
"0.5204404",
"0.5174855",
"0.51437944",
"0.50771433",
"0.5069645",
"0.5057304",
"0.5054266",
"0.5053029",
"0.5044348",
"0.5043592",
"0.5039836",
"0.5017756",
"0.49919832",
"0.49765357",
"0.49650148",
"0.4946583",
"0.49417588",
"0.49378765",
"0.49285606",
"0.4895571",
"0.48846954"
] | 0.7144079 | 0 |
Takes path to netlist file and converts to data usable for circuit solving | def netlist_to_data(path):
# Open file
try:
f = open(path, 'r')
except FileNotFoundError:
print("ERR: File not found:", path)
return None
# Read file
lines = f.readlines()
f.close()
# Remove comment portion, and extra leading and trailing space of each line
lines = [i.split('#')[0].lstrip().rstrip() for i in lines]
# Search for .circuit, exit on erroneous input
start = None
end = None
for i in range(len(lines)):
if lines[i][:len(CIRCUIT)] == CIRCUIT:
if not start:
start = i+1
else:
print("ERR: Extra .circuit found")
return None
elif lines[i][:len(END)] == END:
if not start:
print("ERR: .end found before .circuit")
return None
elif not end:
end = i
break
elif lines[i][:len(AC)] == AC:
if not start:
print("ERR: .ac found before .circuit")
return None
elif not end:
print("ERR: .ac found inside .circuit and .end")
if not start:
print("ERR: No .circuit found")
return None
if not end:
print("ERR: No .end found")
return None
ac_lines = []
for i in range(end+1, len(lines)):
if lines[i][:len(AC)] == AC:
ac_lines.append(lines[i])
# Get all relevant lines, reverse the order of lines
lines = lines[start:end]
# Parse tokens into correct fields, exit on erroneous input
obj_list= []
name_set = set() # to detect repeated names
for line in lines:
# Omit empty lines
if line == '':
continue
# Get space separated tokens
tokens = line.split()
# Parse tokens into objects
el = token_parse(tokens)
if not el:
return None
elif el.name in name_set:
print("ERR: Repeated definition for", el.name)
return None
else:
obj_list.append(el)
return [obj_list, ac_lines] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def from_file(path, name=None, seq_types=None):\n ext = path.split(\".\")[-1]\n if name is None:\n name = path.split(\"/\")[-1].replace(f\".{ext}\", \"\")\n with open(path, \"r\") as f:\n netlist = f.read()\n if ext == \"v\":\n return verilog_to_circuit(netlist, name, seq_types)\n elif ext == \"bench\":\n return bench_to_circuit(netlist, name)\n else:\n raise ValueError(f\"extension {ext} not supported\")",
"def __init__(self, netlist_file):\n with open(netlist_file, 'r') as f:\n self.netlist = _parse_netlist(f)\n self.G = _create_graph(self.netlist)",
"def parse_netlist(netlist, netfile):\n assert isinstance(netlist, dict)\n assert isinstance(netfile, str)\n \n gate_strlist = []\n with open(netfile) as f:\n for line in f:\n # Clean up gate string so it can be formatted for netlist addition\n gate_strlist.append(line.replace(' ', '').replace('(', '').replace(')', '').strip())\n\n # Create netlist from test file\n key = 0 # key of dictionary\n for gate in gate_strlist:\n assert (isinstance(gate, str))\n # Clean up gate so it can be formatted for netlist addition\n gate = gate.split(',')\n\n # Assert that gate type is valid.\n assert (gate[TYPE] in VALID)\n\n # Format the gate\n # Keys are ascending integers starting at 0\n # Values are tuples with: gate type (string), input nodes (tuple), and output node (string)\n gate = tuple([gate[TYPE], # insert type\n tuple(gate[x] for x in range(1, len(gate) - 1)), # insert input nodes\n gate[len(gate) - 1]]) # insert output node\n\n netlist[key] = gate\n key += 1",
"def read_netlist(filename):\n # Read the netlist\n try:\n netlist = pickle.load(open(filename, \"rb\"))\n except IOError:\n sys.stdout.write(\"Netlist file not found\\n\")\n sys.exit(1)\n except (pickle.PickleError, AttributeError, EOFError, IndexError):\n sys.stdout.write(\"Netlist could not be unpickled\\n\")\n sys.exit(1)\n \n # Check the netlist contains the bare minimum of information\n if not isinstance(netlist, dict):\n sys.stdout.write(\n \"Netlist must be defined in a dictionary\\n\")\n sys.exit(1)\n \n logger.info(\"Loaded netlist with fields: {}\".format(\n \", \".join(netlist)))\n \n return netlist",
"def read_networkx_data(parsed_filename_path, networkx_path):\n with open(parsed_filename_path, 'rb') as f:\n file_name = pk.load(f)\n with open(networkx_path, 'rb') as f:\n networkx_list = pk.load(f)\n return file_name, networkx_list",
"def load_net(filepath):\n\twith open(filepath, 'r') as fh:\n\t\treturn load(file = fh)",
"def LoadFromPajek(filepath, getlabels=False):\n # 0) OPEN THE FILE AND READ THE SIZE OF THE NETWORK\n pajekfile = open(filepath, 'r')\n firstline = pajekfile.readline()\n firstline = firstline.split()\n N = int(firstline[1])\n\n # 1) READ THE LABELS OF THE NODES IF WANTED\n if getlabels:\n labels = []\n\n # Security check, make sure that labels of nodes are listed in file\n line = pajekfile.readline()\n if line.split()[0] != '1':\n pajekfile.seek(1)\n print('LoadFromPajek() warning: No labels found to read.')\n\n # If labels are in file continue reading the labels.\n else:\n # If labels are wrapped in between quotes\n try:\n idx1 = line.index('\"') + 1\n # Add the first label\n idx2 = line[idx1:].index('\"')\n label = line[idx1:idx1+idx2]\n labels.append(label)\n\n # And now read the labels for the rest of the nodes\n for i in range(1,N):\n line = pajekfile.readline()\n idx1 = line.index('\"') + 1\n idx2 = line[idx1:].index('\"')\n label = line[idx1:idx1+idx2]\n labels.append(label)\n\n # Otherwise, make a wild guess of what the label is\n except ValueError:\n # Add the first label\n label = line.split()[1]\n labels.append(label)\n\n # And now read the labels of the rest of the nodes\n for i in range(1,N):\n line = pajekfile.readline()\n label = line.split()[1]\n labels.append(label)\n\n # 2) READ THE LINKS AND CREATE THE ADJACENCY MATRIX\n # 2.1) Find out whether the network is directed or undirected\n # while loop to skip empty lines if needed or the lines of the labels\n done = False\n while not done:\n line = pajekfile.readline()\n if line[0] == '*':\n if 'Edges' in line:\n directed = False\n elif 'Arcs' in line:\n directed = True\n else:\n print('Could not find whether network is directed or undirected')\n break\n done = True\n\n # 2.2) Read the first line contining a link\n line = pajekfile.readline()\n line = line.split()\n\n # If link information is BINARY, just read the adjacency list links\n if len(line) == 2:\n # 2.3) Declare the adjacency matrix and include the first link\n adjmatrix = np.zeros((N,N), np.uint8)\n i = int(line[0]) - 1\n j = int(line[1]) - 1\n adjmatrix[i,j] = 1\n if not directed:\n adjmatrix[j,i] = 1\n\n # 2.4) Include the rest of the links\n for line in pajekfile:\n i, j = line.split()\n i = int(i) - 1\n j = int(j) - 1\n adjmatrix[i, j] = 1\n if not directed:\n adjmatrix[j, i] = 1\n\n # If the link information is WEIGHTED, read the weighted links\n elif len(line) == 3:\n # 2.3) Find whether link weights are integer or floating poing\n i, j, aij = line\n outdtype = np.int\n try:\n outdtype(aij)\n except ValueError:\n outdtype = np.float\n\n # 2.4) Declare the adjacency matrix and include the first link\n adjmatrix = np.zeros((N, N), outdtype)\n i = int(i) - 1\n j = int(j) - 1\n adjmatrix[i, j] = outdtype(aij)\n if not directed:\n adjmatrix[j, i] = outdtype(aij)\n\n # 2.5) Read the rest of the file and fill-in the adjacency matrix\n for line in pajekfile:\n i, j, aij = line.split()\n i = int(i) - 1\n j = int(j) - 1\n adjmatrix[i, j] = outdtype(aij)\n if not directed:\n adjmatrix[j, i] = adjmatrix[i, j]\n\n # 3) CLOSE FILE AND RETURN RESULTS\n pajekfile.close()\n\n if getlabels:\n return adjmatrix, labels\n else:\n return adjmatrix",
"def load_net_from_file(filename):\n\n print(\"Loading neural net from {}\".format(filename))\n with open(filename, \"r\") as fd:\n net = json.load(fd)\n\n print(\"net = {}\".format(pprint.pformat(net)))\n return net",
"def import_data(self, filename=None, rawdata=None, append=False):\n \n if filename:\n with open(filename,\"r\") as f:\n data = f.read()\n elif rawdata:\n data = rawdata\n else:\n raise Exception(\"No data given\")\n\n if not append:\n self.nodelist = []\n\n d = deserialize(data, self.consolidator)\n self.nodelist += list(d.nodes.values())\n if append:\n self.domain_obj = None #mark as outdated\n else:\n self.domain_obj = d",
"def loadNetworkFromFile(self, file):\r\n for line in open(file, 'r'):\r\n fromVertex, toVertex, capacity = map(int, line.split())\r\n self.addEdge(fromVertex, toVertex, capacity)",
"def mk_parses(listfile, corenlp_host):\n # if not listfile.endswith('.listfile'):\n # filetype = 'Co-Reference List file'\n # error = 'has incorrect file type'\n # raise FilenameException(\"Error: %s %s\" % (filetype, error))\n\n try:\n with open(listfile) as f:\n pserver = jsonrpc.ServerProxy(jsonrpc.JsonRpc20(),\n jsonrpc.TransportTcpIp(\n addr=(corenlp_host, 8080), limit=1000))\n parses = dict([(get_id(path), FileParse(path, pserver))\n for path in f.readlines()\n if path.lstrip()[0] != '#'])\n except IOError:\n stderr.write(strerror(EIO)) # stderr.write does not have newlines\n stderr.write(\"\\nERROR: Could not open list file\\n\")\n exit(EIO)\n else:\n return parses",
"def load_network(fpath):\n\twith open(fpath, \"rb\") as f:\n\t\tnetwork = pickle.load(f)\n\treturn network",
"def readNetwork(filename):\r\n\r\n fileIn = open(filename, 'r')\r\n\r\n outputList = []\r\n network = []\r\n\r\n for line in fileIn:\r\n #Placing each individual parameter of the line into a list\r\n lineList = line.replace(\"\\n\",\"\").split(\", \")\r\n #Extracting the list of direct contacts from the lineList\r\n directList = lineList[3:-2]\r\n #Deleting the copy of the direct contacts\r\n del lineList[3:-2]\r\n #Removing < and > from the direct contacts list\r\n for i in range(len(directList)):\r\n directList[i] = directList[i].strip(\"<>\")\r\n #Appending all the direct contacts to the end of the list\r\n lineList.append(directList)\r\n outputList.append(lineList)\r\n \r\n fileIn.close()\r\n\r\n for parameters in outputList:\r\n #Creating a Person object from the parameters given by each line\r\n person = Person(parameters)\r\n #Appending each Person object to the network list\r\n network.append(person)\r\n \r\n return network",
"def get_coordinates_net(net_file, net_name):\r\n pl_file = net_file.replace('.nets', '.pl')\r\n net = {}\r\n net_name_number = int(net_name.replace('n', ''))\r\n nodes_in_net_num = 0\r\n node_names = []\r\n data = []\r\n pos = 0\r\n counter = -1\r\n with open(net_file) as nf:\r\n for num, line in enumerate(nf, 0):\r\n if \"NetDegree\" in line:\r\n counter += 1\r\n if counter == net_name_number:\r\n pos = num\r\n data = line.split()\r\n nodes_in_net_num = data[2]\r\n\r\n with open(net_file) as nf:\r\n for num, line in enumerate(nf, 0):\r\n if pos < num <= pos + int(nodes_in_net_num):\r\n data = line.split()\r\n node_names.append(data[0])\r\n\r\n data.clear()\r\n with open(pl_file) as p:\r\n for num, line in enumerate(p):\r\n if num == 0 or '#' in line or line == '\\n':\r\n continue\r\n else:\r\n data.append(line.split())\r\n\r\n for i in node_names:\r\n for j in data:\r\n if i == j[0]:\r\n net[i] = [j[1]]\r\n net[i].append(j[2])\r\n\r\n return net",
"def convert_data(DataPath, labeldict):\n\n inputlist, inputnamelist = ark_parser(DataPath, 'train.ark')\n \n label = []\n assert len(inputnamelist) == len(labeldict.keys())\n\n for name in inputnamelist:\n label.append(labeldict[name])\n\n convert_label_to_int(DataPath, '/48phone_char.map', label)\n\n with open('./train_data.pkl', 'wb') as train_data:\n pickle.dump(inputlist, train_data)",
"def load_nli_file(data_path, num_par=2):\n tokenizer = tokenization.NltkTokenizer()\n dataset = tf.data.TextLineDataset(data_path)\n dataset = dataset.map(\n functools.partial(_nli_line_to_tensors, tokenizer=tokenizer),\n num_parallel_calls=num_par)\n dataset = dataset.filter(lambda x: tf.greater_equal(x[\"label\"], 0))\n return dataset",
"def load_network(file_name):\n with open(file_name) as file:\n data = json.load(file)\n\n cost_fn = getattr(sys.modules[__name__], data[\"cost_func\"])\n act_fn = getattr(sys.modules[__name__], data[\"act_func\"])\n metric = getattr(sys.modules[__name__], data[\"metric\"])\n\n network = Network([1, 1], act_func=act_fn, cost_func=cost_fn, metric=metric)\n network.layers_num = data[\"layers_num\"]\n network.weights = [np.array(w) for w in data[\"weights\"]]\n network.biases = [np.array(b) for b in data[\"biases\"]]\n\n return network",
"def load_network_for_training(file_name):\n global training_set, start_round, start_digit\n try:\n with open (file_name, 'r') as f:\n w = np.load(f)\n w_min = np.load(f)\n w_max = np.load(f)\n a_plus = np.load(f)\n a_minus = np.load(f)\n b_plus = np.load(f)\n b_minus = np.load(f)\n v_th = np.load(f)\n training_set = np.reshape(np.load(f), (TRAINING_SIZE, N))\n start_round = np.load(f)\n start_digit = np.load(f)\n\n Output.set_states({'v_th' : v_th})\n S.set_states({\n 'w' : w,\n 'w_min' : w_min, \n 'w_max' : w_max, \n 'a_plus' : a_plus, \n 'a_minus' : a_minus, \n 'b_plus' : b_plus, \n 'b_minus' : b_minus\n })\n print start_round\n print start_digit\n print v_th\n except IOError as e:\n print \"error opening file: %s\" % e.strerror\n sys.exit()",
"def load(uDir):\n import sys\n sys.path.append(uDir)\n from net_spec import spec\n \n builder = NetworkBuilder(spec)\n htm = builder.build()\n htm.start()\n \n ## restore each node state\n layers = htm.layers\n \n for l in range(len(layers) - 1):\n (r,c) = spec[l]['shape']\n\n if layers[l].node_sharing:\n state = {}\n state['coincidences'] = np.load(uDir + str(l) + \".0.0.coincidences.npy\")\n state['temporal_groups'] = [] ## !FIXME temporal groups should be saved, first\n state['PCG'] = np.load(uDir + str(l) + \".0.0.PCG.npy\")\n\n for i in range(r):\n for j in range(c):\n layers[l].pipes[i][j].send((\"set_state\", state))\n\n else:\n for i in range(r):\n for j in range(c):\n state = {}\n state['coincidences'] = np.load(uDir + str(l) + \".\" + str(i) + \".\" + str(j) + \".coincidences.npy\")\n state['temporal_groups'] = [] ## !FIXME temporal groups should be saved, first\n state['PCG'] = np.load(uDir + str(l) + \".\" + str(i) + \".\" + str(j) + \".PCG.npy\")\n layers[l].pipes[i][j].send((\"set_state\", state))\n \n ## restore also last node's state\n state = {}\n state['coincidences'] = np.load(uDir + str(len(layers) - 1) + \".0.0.coincidences.npy\")\n state['cls_prior_prob'] = np.load(uDir + str(len(layers) - 1) + \".0.0.cls_prior_prob.npy\")\n state['PCW'] = np.load(uDir + str(len(layers) - 1) + \".0.0.PCW.npy\")\n layers[-1].pipes[0][0].send((\"set_state\", state))\n\n return htm",
"def to_net(self, filename):\n if len(self.nodes1)>0:\n h = open(filename, \"w\")\n for n1,n2,s in zip(self.nodes1, self.nodes2, self.signs):\n h.write(\"%s -> %s %s\\n\" % (n1, n2, s))\n h.close()",
"def _read_network_file(in_name, in_format=\"\", directed=False):\n\n if in_format == 'edges':\n if directed:\n g = nx.read_edgelist(in_name, create_using=nx.DiGraph())\n else:\n g = nx.read_edgelist(in_name, data=False)\n elif in_format == 'gefx':\n g = nx.read_gexf(in_name)\n elif in_format == 'gml':\n g = nx.read_gml(in_name)\n elif in_format == 'graphML' or in_format == 'graphml':\n g = nx.read_graphml(in_name)\n nodesInfo = g.nodes(data=True)\n if len(nx.get_node_attributes(g,\"label\"))>0:\n node2Label = {nodeid: data[\"label\"].replace(\" \",\"_\") for (nodeid, data) in nodesInfo}\n g = nx.relabel_nodes(g, node2Label, copy=False)\n elif in_format == 'pajek':\n g = nx.read_pajek(in_name)\n elif in_format == 'ncol':\n g = nx.read_edgelist(in_name)\n else:\n raise Exception(\"UNKNOWN FORMAT \" + in_format)\n return g",
"def parse_and_map(self, local_inet_path):\n for file_name in tqdm(self.filenames):\n # TODO: Add some log while processing data\n # Reads file name from full file path\n sliced_list = file_name.split(sep='/t')[-1].split(sep='_')\n self.data_dict['path'].append(file_name)\n self.data_dict['dataset'].append(sliced_list[1])\n self.data_dict['device'].append(sliced_list[2])\n self.data_dict['wn_id'].append(sliced_list[3])\n self.data_dict['im_id'].append(sliced_list[4])\n self.data_dict['eeg_session'].append(sliced_list[5])\n self.data_dict['global_session'].append(sliced_list[6].split(sep='.')[0])\n # File name: /MindBigData_Imagenet_Insight_n00007846_6247_1_785\n # Imagenet file path: /n00007846/n00007846_6247.JPEG\n file_name = str(sliced_list[3] + '_' + sliced_list[4] + '.JPEG')\n inet_path = os.path.join(local_inet_path, sliced_list[3], file_name)\n # If copy is true, data related local ImageNet images will be copied to separate folder\n if self.copy:\n try:\n # New file paths\n new_dir_path = os.path.join(self.copy_path, sliced_list[3])\n new_inet_path = os.path.join(new_dir_path, file_name)\n # Creates recursive folders in disk\n os.makedirs(new_dir_path, exist_ok=True, mode=0o771)\n # Copies file to destination\n shutil.copy(inet_path, new_inet_path)\n # Appends new file path to list\n self.data_dict['inet_path'].append(new_inet_path)\n except Exception as e:\n # TODO: More useful exception\n print(e)\n else:\n # Append local ImageNet path to list\n self.data_dict['inet_path'].append(inet_path)",
"def load(self, name=\"\"):\n\n self.constructed = True\n if name == \"\":\n name = \"/home/unai/Escritorio/MultiNetwork/model/model\"\n\n network_descriptors = {\"Generic\": GenericDescriptor, \"Decoder\": DecoderDescriptor, \"Discrete\": DiscreteDescriptor, \"Convolution\": ConvolutionDescriptor}\n\n if not os.path.isfile(name):\n print(\"Error at loading the model\")\n return None\n\n f = open(name, \"r+\")\n\n lines = f.readlines()\n\n i = 0\n while lines[i] != \"\\n\": # Each component is stored in a line\n ident, n_inp, kind, n_hidden, layers, init, act, cond_rand, taking, producing, depth, reachable, belows = lines[i][:-1].split(\"_\")\n kwargs = {}\n if int(ident[1:]) > self.last_net:\n self.last_net = int(ident[1:])\n\n self.reachable[ident] = reachable.split(\",\")\n self.comps_below[ident] = belows.split(\",\")\n\n if \"onv\" in kind: # Not working right now\n filters, sizes, layers, strides = layers.split(\"*\")\n sizes = sizes.split(\",\")\n s = np.array([[int(sz) for sz in szs.split(\"/\")] for szs in sizes])\n desc = network_descriptors[kind](int(inp), int(outp), int(n_inp), layers.split(\",\"), filters.split(\",\"), [int(x) for x in strides.split(\",\")], s, [int(x) for x in act.split(\",\")], [int(x) for x in init.split(\",\")], kwargs)\n else:\n if len(kwargs) > 0: # Not working right now\n kwargs = kwargs.split(\"-\")\n kwargs[0] = [int(x) for x in kwargs[0].split(\".\") if len(x) > 0]\n kwargs[1] = [int(x) for x in kwargs[1].split(\".\") if len(x) > 0]\n if len(cond_rand) > 0:\n cond_rand = cond_rand.split(\"-\")\n cond_rand[0] = [int(x) for x in cond_rand[0].split(\",\") if len(x) > 0]\n cond_rand[1] = [int(x) for x in cond_rand[1].split(\",\") if len(x) > 0]\n kwargs[\"conds\"] = cond_rand\n desc = network_descriptors[kind](int(taking.split(\",\")[0]), int(producing.split(\",\")[0]), int(n_inp), int(n_hidden), [int(x) for x in layers.split(\",\") if x != \"-1\"], init_functions[[int(x) for x in init.split(\",\") if x != \"-1\"]],\n act_functions[[int(x) for x in act.split(\",\") if x != \"-1\"]], **kwargs)\n\n # print(\"ident\", ident, \"n_inp\", n_inp, \"kind\", kind, \"inp\", inp, \"outp\", outp, \"layers\", layers, \"init\", init, \"act\", act, \"taking\", taking, \"producing\", producing, \"depth\", depth, \"kwargs\", kwargs)\n net = NetworkComp(desc, InOut(size=int(taking.split(\",\")[0]), data_type=taking.split(\",\")[1]), InOut(data_type=producing.split(\",\")[1], size=int(producing.split(\",\")[0])), int(depth))\n\n self.add_net(net, ident)\n i += 1\n\n i += 1\n\n while lines[i] != \"\\n\": # Inputs\n\n ident, size, kind, depth = lines[i].split(\"_\")\n\n self.inputs[ident] = ModelComponent(None, InOut(size=int(size), data_type=kind), int(depth))\n i += 1\n\n i += 1\n\n while lines[i] != \"\\n\": # Outputs\n\n ident, size, kind, depth, belows = lines[i].split(\"_\")\n\n self.outputs[ident] = ModelComponent(InOut(size=int(size), data_type=kind), None, int(depth))\n self.comps_below[ident] = belows.split(\",\")\n i += 1\n\n i += 1\n\n while i < len(lines): # Connections\n name, inp, outp, kind, size = lines[i].split(\"_\")\n\n if int(name[1:]) > self.last_con:\n self.last_con = int(name[1:])\n\n self.connections[name] = Connection(inp, outp, InOut(kind, int(size)), name)\n i += 1\n self.update_below()",
"def load(self, name, path=None):\n if path is None:\n path = '.'\n if path[0] == '~':\n path = os.getenv(\"HOME\") + path[1:]\n\n try:\n with open(path + '/' + name, 'rb') as f:\n return pickle.load(f)\n except IOError as e:\n msg = str(e) + '\\nNeuralNetwork.load failed.'\n raise DNNetIOError(msg)",
"def load_data(filename):\n return InferenceData.from_netcdf(filename)",
"def load(filename):\n f = open(filename, \"r\")\n data = json.load(f)\n f.close()\n cost = getattr(sys.modules[__name__], data[\"cost\"])\n net = Network(data[\"sizes\"], cost=cost)\n net.weights = [np.array(w) for w in data[\"weights\"]]\n net.biases = [np.array(b) for b in data[\"biases\"]]\n return net",
"def convert_to_pybel(file_list, mol_format):\n if not mol_format:\n mol_format = \"mopout\"\n return [pybel.readfile(mol_format, name).next() for name in file_list]",
"def load_graph(input_file=None, input_list=None):\n G = nx.Graph()\n if input_file:\n with open(input_file, 'r') as file:\n for line in file.readlines():\n line = line.strip().split(\" \")\n G.add_edge(line[0], line[1])\n elif input_list:\n G.add_edges_from(input_list)\n return G",
"def read_from_file(self, filename):\n with open(filename, 'r') as f:\n for line in f.read().splitlines():\n name, neighbours, r_table = line.split('!')\n\n self.add_new(name)\n if neighbours:\n for neighbour in neighbours.split(';'):\n try:\n self.add_neighbours(name, neighbour)\n except Exception as e:\n\n pass\n if r_table:\n for network in r_table.split(';'):\n net_name, distance = network.split(':')\n\n distance = int(distance)\n self.add_network(name, net_name, distance)",
"def read_file(path):\n\tG = nx.Graph()\n\n\twith open(path, 'r') as in_file:\n\t\tfor line in in_file:\n\t\t\tcontents = line.split(\" \")\n\t\t\tu = int(contents[0])\n\t\t\tv = int(contents[1])\n\t\t\tstreet_type = int(contents[2])\n\t\t\ttime = int(contents[3])\n\t\t\tlength = int(contents[4])\n\t\t\tcost = 1/float(length)\n\t\t\t\n\t\t\tG.add_node(u)\n\t\t\tG.add_node(v)\n\t\t\tif street_type is 1:\n\t\t\t\tG.add_edge(u, v, street_type=street_type, time=time, length=length, cost=cost)\n\t\t\telse:\n\t\t\t\tG.add_edge(u, v, street_type=street_type, time=time, length=length, cost=cost)\n\t\t\t\tG.add_edge(v, u, street_type=street_type, time=time, length=length, cost=cost)\n\n\treturn G"
] | [
"0.6311136",
"0.60333127",
"0.5969786",
"0.59286124",
"0.588238",
"0.5635766",
"0.56275123",
"0.557719",
"0.55747956",
"0.5564225",
"0.55455184",
"0.5541207",
"0.5479334",
"0.54560566",
"0.54214746",
"0.5388568",
"0.53870106",
"0.53783447",
"0.5365531",
"0.5320062",
"0.5312747",
"0.529524",
"0.5264339",
"0.5252912",
"0.5252704",
"0.52223",
"0.52217686",
"0.52094585",
"0.5205591",
"0.52045625"
] | 0.74806345 | 0 |
Takes list of Element objects and returns them as dictionary of elements grouped by type | def separate_types(obj_list):
obj_dict = {
'R':[],
'L':[],
'C':[],
'V':[],
'I':[],
'E':[],
'G':[],
'H':[],
'F':[]
}
for obj in obj_list:
obj_dict[obj.el_type].append(obj)
return obj_dict | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def group_elements_by_property_type_and_element_type(elements, pid_data):\n # find unique groups\n #print(\"pid_data = \\n%s\\n\" % str(pid_data))\n pid_elementnum = unique2d(pid_data[:, 1:])\n\n data2 = {}\n etype_map = {\n 1 : 'CROD', 5: 'CONROD',\n 2 : 'CBEAM', 3 : 'CBAR',\n 4 : 'CSHEAR',\n 10 : 'CELAS1', 11 : 'CELAS2', 12 : 'CELAS3', 13 : 'CELAS4',\n 73 : 'CTRIA3', 144 : 'CQUAD4',\n\n 60 : 'CTETRA4', 61 : 'CTETRA10',\n 62 : 'CPENTA6', 63 : 'CPENTA15',\n 64 : 'CHEXA8', 65 : 'CHEXA20',\n }\n\n #self.model.log.debug(\"pid_elementnum = \\n%s\\n\" % str(pid_elementnum))\n for (pid, element_num) in pid_elementnum:\n if pid not in elements.property_ids:\n print('Property pid=%s does not exist' % pid)\n #continue\n i = np.where(pid_data[:, 1] == pid)[0]\n #self.model.log.debug(\"pid=%i element_num=%s Step #1=> \\n%s\\n\" % (\n #pid, element_num, pid_data[i, :]))\n j = np.where(pid_data[i, 2] == element_num)[0]\n eids = pid_data[i[j], 0]\n #self.model.log.debug(\"pid=%i element_num=%s eids=%s Step #2=> \\n%s\\n\" % (\n #pid, element_num, eids, pid_data[i[j], :]))\n element_type = etype_map[element_num]\n data2[(pid, element_type)] = eids\n return data2",
"def get_items_from_element(element):\n data = {'element': element,\n 'items': []}\n for item in element[len(element)-1]:\n item_info = {'data': item.items(),\n 'tag': item.tag,\n 'keys': item.keys()}\n data['items'].append(item_info)\n return data",
"def fields_dict(slist, type=SList):\n fields = slist.fields()\n names = fields.pop(0)\n out = collections.OrderedDict()\n for i, name in enumerate(names[:-1]):\n out[name] = type(slist.fields(i)[1:])\n out[names[-1]] = type([' '.join(f[i + 1:]) for f in fields])\n return out",
"def _create_group_map(elem_list, paired):\n # Create groups for the multilane files\n group_map = defaultdict(list)\n for elem in elem_list:\n search_elem = elem if not paired else elem[0]\n if pattern_multilane.search(search_elem):\n group = _group_for(search_elem)\n group_map[group].append(elem)\n\n # Only multifile groups are returned\n return {\n group: sorted(elems, key=lambda x: x[0] if paired else x)\n for group, elems in group_map.items()\n if len(elems) > 1\n }",
"def getElements(iface, type=IElement):\n items = {}\n for name in iface:\n attr = iface[name]\n if type.providedBy(attr):\n items[name] = attr\n return items",
"def parse_element(elem):\n return_dict = {}\n for e in elem:\n return_dict[e.tag] = e.text\n return return_dict",
"def dom2dict(element):\n keys = list(element.attributes.keys())\n values = [val.value for val in list(element.attributes.values())]\n return dict(list(zip(keys, values)))",
"def dom2dict(element):\n keys = list(element.attributes.keys())\n values = [val.value for val in list(element.attributes.values())]\n return dict(list(zip(keys, values)))",
"def elem2dict(node):\n result = {}\n\n for element in node.iterchildren():\n # Remove namespace prefix\n key = element.tag.split('}')[1] if '}' in element.tag else element.tag\n key = key[:1].lower() + key[1:]\n\n # Process element as tree element if the inner XML contains non-whitespace content\n if element.text and element.text.strip():\n value = element.text\n else:\n value = elem2dict(element)\n if key in result:\n if type(result[key]) is list:\n result[key].append(value)\n else:\n tempvalue = result[key].copy()\n result[key] = [tempvalue, value]\n else:\n result[key] = value\n return result",
"def get_repeated_children_type(self):\n result_dict = dict()\n for v in self.graph.vertices():\n parent_type = self.graph.vp.type[v]\n if parent_type not in result_dict:\n result_dict[parent_type] = set()\n\n children_types = set()\n for u in v.out_neighbors():\n child_type = self.graph.vp.type[u]\n if child_type not in children_types:\n children_types.add(child_type)\n else:\n result_dict[parent_type].add(child_type)\n return result_dict",
"def convert_element_list_to_map(element, list_attr_name,\n foreign_key_attr='name'):\n if list_attr_name in element:\n element[list_attr_name] = OrderedDict(\n [(item[foreign_key_attr], item,)\n for item in element[list_attr_name]])\n else:\n element[list_attr_name] = OrderedDict()\n return element[list_attr_name]",
"def _group_objects(list_, attr=None, key=None, default=None,\n minimum=MIN_GROUPED):\n if not bool(attr) ^ bool(key):\n raise AttributeError(\"Either an attribute or a key must be specified.\")\n\n name = \"A-Z\" if default is None else default\n groups = collections.defaultdict(list)\n\n if list_ and (minimum is None or len(list_) > minimum):\n for item in list_:\n value = getattr(item, attr) if attr is not None else item[key]\n letter = value[0].upper()\n if letter not in string.ascii_uppercase:\n groups[\"#\"].append(item)\n else:\n groups[letter].append(item)\n elif list_:\n groups[name] = list_\n\n return groups",
"def create_seqfeature_dictionary(seqfeature_list):\n\n seqfeature_type_set = set()\n seqfeature_dict = {}\n for seqfeature in seqfeature_list:\n seqfeature_type_set.add(seqfeature.type)\n for type in seqfeature_type_set:\n sublist = []\n for index in range(len(seqfeature_list)):\n seqfeature = seqfeature_list[index]\n if seqfeature.type == type:\n sublist.append(seqfeature)\n seqfeature_dict[type] = sublist\n return seqfeature_dict",
"def _extract_elements(self, tree, element_type):\n # creates a new attribute, e.g. 'self.nodes' and assigns it an\n # empty list\n setattr(self, element_type, [])\n etree_elements = get_elements(tree, element_type)\n for i, etree_element in enumerate(etree_elements):\n # create an instance of an element class (e.g. TokenNode)\n salt_element = create_class_instance(etree_element, i, self.doc_id)\n # and add it to the corresponding element type list,\n # e.g. 'self.nodes'\n getattr(self, element_type).append(salt_element)\n # In case of a 'nodes' element this is equivalent to:\n # self.nodes.append(TokenNode(etree_element, document_id))",
"def tag_dict(self):\n tag_dict = dict()\n for document in self.documents:\n for tag in document.tags:\n tag_type = tag['tag']\n tag_dict[tag_type] = tag_dict.get(tag_type, []) + [tag]\n return tag_dict",
"def _item_to_elements_parser(self, item):\n elements = {}\n\n ####### Sad solution - look for better one. #######\n items = [\"data\", \"img\", \"title\", \"link\", \"price\"]\n values = (\"item.p.string.strip()\", 'item.img[\"src\"]', 'item.img[\"alt\"]',\n '''item.find(\"a\", {\"class\":\"detailsLink\"})['href']''',\n '''item.find('strong').string.strip()''')\n for key, value in zip(items, values):\n\n # CONVERT TIME\n # if key == \"data\":\n # try:\n # print (time.strptime(eval(value), \"%d %b\"))\n # except Exception as error:\n # print (error) # time data '5 paz' does not match format '%d %b'\n\n try:\n elements.update({key:eval(value)})\n except (TypeError, AttributeError):\n elements.update({key:None})\n\n\n # print()\n # for key, val in elements.items():\n # print (key, val)\n # print()\n ###################################################\n return elements",
"def _elements_to_dict(data, position, obj_end, opts, subdocument=None):\n if type(opts.document_class) == tuple:\n result = opts.document_class[0](**opts.document_class[1]) if not subdocument else dict()\n else:\n result = opts.document_class() if not subdocument else dict()\n pos = position\n for key, value, pos in _iterate_elements(data, position, obj_end, opts):\n if key in [\"firstBatch\", \"nextBatch\"] and type(opts.document_class) == tuple:\n batches = []\n for batch in value:\n batch_document = opts.document_class[0](**opts.document_class[1])\n batch_document.update(batch)\n batches.append(batch_document)\n result[key] = batches\n else:\n result[key] = value\n if pos != obj_end:\n raise bson.InvalidBSON('bad object or element length')\n return result",
"def group_by(s, fn):\n\n dict = {}\n for item in s:\n output = fn(item)\n if output in dict.keys():\n dict[output].extend([item])\n else:\n dict[output] = [item]\n return dict",
"def by_type(environments):\n types = {}\n for env in environments:\n et = env.environmentType\n options = types.setdefault(et.id, set())\n options.add(env.id)\n return types",
"def _element_to_dict(data, position, obj_end, opts):\n element_type = data[position:position + 1]\n position += 1\n element_name, position = bson._get_c_string(data, position, opts)\n value, position = bson._ELEMENT_GETTER[element_type](data, position,\n obj_end, opts,\n element_name)\n return element_name, value, position",
"def parse(cls, el):\n if isinstance(el, list):\n for i, x in enumerate(el):\n el[i] = HashableDict.parse(x)\n elif isinstance(el, dict):\n d = HashableDict()\n for k, v in el.iteritems():\n d[k] = HashableDict.parse(v)\n return d\n return el",
"def environments_of(groups):\n types = {}\n for group in groups:\n for env in group.environments:\n et = env.environmentType\n envs = types.setdefault((et.id, et.name), set())\n envs.add((env.id, env.name))\n return types",
"def to_dict(self):\n result = {}\n\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n\n if issubclass(IndexedElement, dict):\n for key, value in super.items():\n result[key] = value\n\n if issubclass(Software, dict):\n for key, value in self.items():\n result[key] = value\n\n return result",
"def group_by(self, key):\n ret = {}\n for test in self._testset():\n if hastag(test, key):\n ret.setdefault(gettag(test, key), []).append(test)\n continue\n ret.setdefault(\"others\", []).append(test)\n for k in ret:\n ret[k] = self.__class__(ret[k])\n return ret",
"def _preprocess(*elements):\n output_dict = {}\n for idx, elem in enumerate(elements):\n uint8_img = elem['image']\n patch = data_provider.full_image_to_patch(uint8_img, patch_size, num_channels)\n label = tf.one_hot(idx, num_classes)\n output_dict[idx] = {'images': patch, 'labels': label}\n return output_dict",
"def listToDict(listElem):\n res = dict()\n for elem in listElem:\n res[elem] = 0\n return res",
"def dict_by_attr(collection, attrname, value_attrname=None):\n mapping = {}\n for item in collection:\n if callable(attrname):\n key = attrname(item)\n else:\n key = extended_getattr(item, attrname)\n if value_attrname:\n item = extended_getattr(item, value_attrname)\n mapping[key] = mapping.get(key, []) + [item]\n return mapping",
"def get_nodes_by_type(ntwrk, typ='switch'):\r\n return {k: v for el in ntwrk\r\n for k, v in el.items() if v['type'] == typ}",
"def expected_types_and_attributes(self):\n return {self.configs[\"entry_type\"]: set()}",
"def _assoc_list_to_map(lst):\n d = {}\n for run_id, metric in lst:\n d[run_id] = d[run_id] + [metric] if run_id in d else [metric]\n return d"
] | [
"0.6465859",
"0.6432415",
"0.6038449",
"0.5934554",
"0.579664",
"0.5792297",
"0.5668392",
"0.55965745",
"0.5593538",
"0.5515256",
"0.5470534",
"0.5427659",
"0.5405782",
"0.53900087",
"0.5383047",
"0.5328841",
"0.52936536",
"0.5274927",
"0.5270392",
"0.526419",
"0.52565235",
"0.52399766",
"0.5239088",
"0.5231037",
"0.5230433",
"0.5230247",
"0.52296513",
"0.5223224",
"0.52164567",
"0.5209348"
] | 0.6921661 | 0 |
Takes dictionary of objects by type and the node mapping, returns Numpy arrays representing equations governing the circuit. | def get_equation_matrix(obj_dict, node_dict):
v_src_count = len(obj_dict['V'])
v_src_map = {}
for ind in range(len(obj_dict['V'])):
v_src_map[obj_dict['V'][ind].name] = ind
node_count = len(node_dict.keys())-1 # excluded GND
A = np.zeros((node_count, node_count), dtype='complex') # top left quad
B = np.zeros((node_count, v_src_count), dtype='complex') # top right quad
aux = np.zeros((v_src_count, node_count), dtype='complex') # bottom left quad
zero_quad = np.zeros((v_src_count,v_src_count), dtype='complex') # bottom right quad
const_vec_nodes = np.zeros((node_count), dtype='complex')
const_vec_aux = np.zeros((v_src_count), dtype='complex')
for k in obj_dict.keys():
lst = obj_dict[k]
for obj in lst:
n1 = node_dict[obj.n1]-1
n2 = node_dict[obj.n2]-1
value = obj.value
if obj.el_type in 'RLC':
if obj.imp == None: # dc
if obj.el_type == 'L':
value = EPS
elif obj.el_type == 'C':
value = INF
else:
value = obj.imp + EPS
if n1 != -1 and n2 != -1:
A[n1,n1] += 1/value
A[n1,n2] += -1/value
A[n2,n1] += -1/value
A[n2,n2] += 1/value
elif n1 == -1:
A[n2,n2] += 1/value
elif n2 == -1:
A[n1,n1] += 1/value
elif obj.el_type == 'V':
nv = v_src_map[obj.name]
if n1 != -1 and n2 != -1:
B[n1,nv] += 1
B[n2,nv] += -1
aux[nv,n1] += 1
aux[nv,n2] += -1
elif n1 == -1:
B[n2,nv] += -1
aux[nv,n2] += -1
elif n2 == -1:
B[n1,nv] += 1
aux[nv,n1] += 1
const_vec_aux[nv] += value
elif obj.el_type == 'I':
if n1 != -1 and n2 != -1:
const_vec_nodes[n1] += obj.value
const_vec_nodes[n2] += -obj.value
elif n1 == -1:
const_vec_nodes[n2] += -obj.value
elif n2 == -1:
const_vec_nodes[n1] += obj.value
elif obj.el_type in 'EGHF':
print("ERR: Controlled sources not supported yet")
return None
top_half = np.concatenate((A, B), axis=1)
bot_half = np.concatenate((aux, zero_quad), axis=1)
coeff_mat = np.concatenate((top_half, bot_half), axis=0)
const_vec = np.concatenate((const_vec_nodes, const_vec_aux), axis=0)
return [coeff_mat, const_vec] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def nodedict2xarraydict(nodedict):\n return {name:getXarray(node) for name,node in nodedict.items()}",
"def get_data(nodes=[]):\n\n # get nodes\n if not nodes:\n nodes = mc.ls(sl=1)\n\n # decipher if the nodes are constraints themselves or are driven by constraints\n nodes = mc.ls(nodes)\n constraints = [n for n in nodes if mc.nodeType(n) in constraint_types]\n non_con_nodes = [n for n in nodes if n not in constraints]\n constraints.extend(utils.get_constraints(non_con_nodes))\n\n data = {}\n\n for constraint in constraints:\n\n # get driven target nodes\n ntype = mc.nodeType(constraint)\n constraint_func = get_constraint_func(ntype)\n driven = mc.listConnections(constraint+'.constraintParentInverseMatrix') or []\n drivers = constraint_func(constraint, q=1, tl=1)\n\n if not ntype in constraint_types or not driven or not drivers:\n continue\n\n driven = list(set(driven))\n weight_alias_list = constraint_func(constraint, q=1, wal=1)\n\n con_data = {\n 'con_type': ntype,\n 'drivers': drivers,\n 'driven': driven,\n 'weight_list': [mc.getAttr(constraint+'.'+w) for w in weight_alias_list]\n }\n\n # Create dict entry for constrant types with upvectors\n if ntype in ['aimConstraint', 'tangentConstraint', 'normalConstraint']:\n\n aim = constraint_func(constraint, q=1, aim=1)\n upv = constraint_func(constraint, q=1, u=1)\n wupv = constraint_func(constraint, q=1, wu=1)\n wut = constraint_func(constraint, q=1, wut=1)\n wuo = constraint_func(constraint, q=1, wuo=1)\n\n if type(wuo) == list:\n wuo = wuo[0]\n\n con_data['aim'] = aim\n con_data['u'] = upv\n con_data['wu'] = wupv\n con_data['wut'] = wut\n con_data['wuo'] = wuo\n\n if mc.objExists(constraint+'.interpType'):\n con_data['interp_type'] = mc.getAttr(constraint+'.interpType')\n\n data[constraint] = con_data\n\n return data",
"def __init__(self, equation_dict):\n self.equation = equation_dict['equation']\n self.variables = equation_dict['variables']\n self.dict = equation_dict\n self.x = list(self.variables)[-1]['variable'] # The variable to solve for",
"def __init__(self, node_0, edge_0_1):\n\n # -------------------------------------------------------------------------\n # YOUR CODE GOES HERE\n #\n #self._joint = edge_0_1\n self._table = {}\n \n # calculate the intersection of h and e using the simple product rule\n for h in node_0:\n for e in edge_0_1[h]:\n self._table[(h, e)] = node_0[h] * edge_0_1[h][e]\n \n #\n # END OF YOUR CODE\n # ------------------------------------------------------------------------- ",
"def allocate(tree):\n\n\twp = tree['misc']['working precision']\t\n\tdim = tree['eqns']['ndim']\n\tnvar = tree['eqns']['qvec']['nvars']\n\tnvarst = len(tree['eqns']['qvec']['stored'])\t\n\thlod = tree['num']['deriv']['hlo']\n\thlof = tree['num']['filtr']['hlo']\n \n\thlo = tree['num']['hlo'] \n\tnx = tree['mpi']['dMpi'].nx \n\tny = tree['mpi']['dMpi'].ny \n\tnz = tree['mpi']['dMpi'].nz \n\n\tvariables = []\n\tif tree['eqns']['qvec']['solved']:\n\t\tfor v in tree['eqns']['qvec']['solved']:\n\t\t\tvariables.append(v[1])\n\n\tif tree['eqns']['qvec']['stored']:\n\t\tfor v in tree['eqns']['qvec']['stored']:\n\t\t\tvariables.append(v[1])\t\n\n\tvariables_face = {}\n\tfor dir in ['i','j','k']:\n\t\tvariables_face[dir] = []\n\t\tfor v in tree['eqns']['qvec']['bcs']['face'][dir]:\n\t\t\t\tvariables_face[dir].append(v[1])\t\t\t\n\n\tvariables_edge = {}\n\tfor dir in ['ij','jk','ik']:\n\t\tvariables_edge[dir] = []\n\t\tfor v in tree['eqns']['qvec']['bcs']['edge'][dir]:\n\t\t\t\tvariables_edge[dir].append(v[1])\t\t\n\t\n\n\tnvar_face = {'i': len(variables_face['i']),\n\t\t\t\t 'j': len(variables_face['j']),\n\t\t\t\t 'k': len(variables_face['k'])}\n\n\tnvar_edge = {'ij': len(variables_edge['ij']),\n\t\t\t\t 'jk': len(variables_edge['jk']),\n\t\t\t\t 'ik': len(variables_edge['ik'])}\t\t\t \n\n\n\n\tcoeff = []\n\tif tree['eqns']['coeff']:\n\t\tfor v in tree['eqns']['coeff']:\n\t\t\tcoeff.append(v[1])\n\n\tsizex = nx + 2*hlo\n\t\n\tif(ny == 1):\n\t\tsizey = 1\n\telse:\t\t\n\t\tsizey = ny + 2*hlo\n\t\n\tif(nz == 1) : \n\t\tsizez = 1\n\telse:\t\n\t\tsizez = nz + 2*hlo\n\t\n\n\tndimpt = sizex*sizey*sizez\n\tndimtot = ndimpt*nvar\n\n\n\t# alloc bcs fields:\n\tndimptbcs = {}\n\tndimptbcs['i'] =sizey*sizez\n\tndimptbcs['j'] =sizex*sizez\n\tndimptbcs['k'] =sizex*sizey\n\tndimptbcs['ij'] =sizez \n\tndimptbcs['jk'] =sizex \n\tndimptbcs['ik'] =sizey \n\n\tsizebcs = {}\n\tsizebcs['i'] =(sizey,sizez)\n\tsizebcs['j'] =(sizex,sizez)\n\tsizebcs['k'] =(sizex,sizey)\n\tsizebcs['ij'] =(sizez) \n\tsizebcs['jk'] =(sizex) \n\tsizebcs['ik'] =(sizey) \n\n\t# faces:\n\tnfacei = max(1,nvar_face['i']*ndimptbcs['i'])\n\tnfacej = max(1,nvar_face['j']*ndimptbcs['j'])\n\tnfacek = max(1,nvar_face['k']*ndimptbcs['k'])\n\t\n\n\t# edges:\n\tnedgeij = max(1,nvar_edge['ij']*ndimptbcs['ij'])\n\tnedgejk = max(1,nvar_edge['jk']*ndimptbcs['jk'])\n\tnedgeik = max(1,nvar_edge['ik']*ndimptbcs['ik'])\t\n\n\t# unpack bc info:\n\ttree = unpack_bcs(tree)\n\t\n\tmybc = tree['bc']['mybc']\n\tnbc = len(mybc)\n\n\t# Integers parameters to be passed to the fortran layer\n\tparam_intDim = 12 + nvar + nvarst + 1 + nbc + 6 + 6\n\tparam_int = np.empty(shape=param_intDim, dtype=np.int32, order='F') \n\tparam_int[0] = hlo\n\tparam_int[1] = nx\n\tparam_int[2] = ny\n\tparam_int[3] = nz\n\tparam_int[4] = nvar\n\tparam_int[5] = nvarst\n\tparam_int[6] = ndimtot\n\tparam_int[7] = 3 # for RK sub steps in Python\n\tparam_int[8] = ndimpt\n\tparam_int[9:9+3] = tree['libs']['cache blocking']\n\tadr = 9+3\n\tparam_int[adr:adr+nvar] = variables[0:0+nvar] # variables location (in q).\n\tadr = adr+nvar\n\tparam_int[adr:adr+nvarst] = variables[nvar:nvar+nvarst] # variables location (in qst).\n\tadr = adr+nvarst\n\tparam_int[adr] = nbc\n\tadr = adr + 1\n\tparam_int[adr:adr+nbc] = mybc\n\tadr = adr + nbc\n\tparam_int[adr:adr+6] = list(nvar_face.values()) + list(nvar_edge.values())\n\tadr = adr + 6\n\tparam_int[adr:adr+6] = [nfacei,nfacej,nfacek,nedgeij,nedgejk,nedgeik]\n\n\tif tree['eqns']['coeff']: \n\t\tncoef = len(tree['eqns']['coeff'])\n\telse:\n\t\tncoef = 0\t\n\n\t# Floating point parameters to be passed to the Fortran layer (3 additional floats for the metrics, uniforme grid + 1 for dt +1 for eps filter)\n\tparam_float = np.zeros(shape=ncoef+5, dtype=wp, order='F') \n\n\tparam_float[0] = cst(1.0)/tree['grid']['geom']['dx']\n\tif(dim>=2) : param_float[1] = cst(1.0)/tree['grid']['geom']['dy']\n\tif(dim == 3) : param_float[2] = cst(1.0)/tree['grid']['geom']['dz']\n\n\tparam_float[3] = tree['num']['tint']['tstep']\n\tparam_float[4] = tree['num']['filtr']['eps']\n\n\tfor i,v in enumerate(tree['eqns']['coeff']):\n\t\tparam_float[i+5] = v[1]\n\n\t# Floating point array (contiguous, aligned, actually NOT aligned yet...)\n\t\n\tnfieldbcs = sum([nfacei,nfacej,nfacek,nedgeij,nedgejk,nedgeik])\n\n\tif nvarst != 0 :\n\t\tdata = np.empty(shape=ndimtot*4 + ndimpt*nvarst + nfieldbcs, dtype=wp,order='F') # 4 --> q,q1,q2 + rhs + nvarstored\n\telse:\t\n\t\tdata = np.empty(shape=ndimtot*4 + 1 + nfieldbcs, dtype=wp,order='F') # 4 --> q,q1,q2, rhs, + 1 (address for qst in fortran layer)\n\n\t# Explicit view of data (only references here, no copy) \n\tviews = {}\n\tnvsolved = len(tree['eqns']['qvec']['solved'])\n\t\n\t# WARNING assume contiguous addresses of stored variables in data_float...\n\tif nvarst != 0:\n\t\taddrstored_beg = ndimtot*4 \n\t\taddrstored_end = addrstored_beg + ndimpt*nvarst\n\telse:\n\t\taddrstored_beg = ndimtot*4\t\n\t\taddrstored_end = addrstored_beg + 1\n\n\taddrbcfields_beg = addrstored_end\t\n\taddrbcfields_edge_beg = addrbcfields_beg + sum([nfacei,nfacej,nfacek])\n\n\tif dim == 3:\n\t\tviews['q'] = data[0:ndimpt*nvsolved].view().reshape(sizex,sizey,sizez,nvsolved, order='F')\n\t\tif nvarst !=0:\n\t\t\tviews['qstored'] = data[addrstored_beg:addrstored_end].view().reshape(sizex,sizey,sizez,nvarst, order='F')\n\t\t\n\t\tfor v in tree['eqns']['qvec']['solved']:\n\t\t\tviews[v[0]] = data[(v[1]-1)*(ndimpt):(v[1])*ndimpt].view().reshape(sizex,sizey,sizez, order='F')\n\t\tfor v in tree['eqns']['qvec']['stored']:\n\t\t\tviews[v[0]] = data[(v[1]-1)*(ndimpt)+addrstored_beg:(v[1])*ndimpt+addrstored_beg].view().reshape(sizex,sizey,sizez, order='F')\t\t\t\n\n\t\t# bc faces:\t\n\t\tshift = addrbcfields_beg\n\t\tfor dir in ['i','j','k']:\n\t\t\tfor v in tree['eqns']['qvec']['bcs']['face'][dir]:\t\t\t\t\n\t\t\t\tviews[v[0]] = data[shift:ndimptbcs[dir]+shift].view().reshape(sizebcs[dir], order='F')\t\t\t\n\t\t\t\tshift = shift + ndimptbcs[dir]\t\n\n\t\t# bc edges:\t\n\t\tshift = addrbcfields_edge_beg\n\t\tfor dir in ['ij','jk','ik']:\n\t\t\tfor v in tree['eqns']['qvec']['bcs']['edge'][dir]:\t\n\t\t\t\tviews[v[0]] = data[shift:ndimptbcs[dir]+shift].view().reshape(sizebcs[dir], order='F')\t\t\t\n\t\t\t\tshift = shift + ndimptbcs[dir]\n\telif dim == 2:\n\t\tviews['q'] = data[0:ndimpt*nvsolved].view().reshape(sizex,sizey,nvsolved, order='F')\n\t\tif nvarst !=0:\n\t\t\tviews['qstored'] = data[addrstored_beg:addrstored_end].view().reshape(sizex,sizey,nvarst, order='F')\n\t\t\n\t\tfor v in tree['eqns']['qvec']['solved']:\n\t\t\tviews[v[0]] = data[(v[1]-1)*(ndimpt):(v[1])*ndimpt].view().reshape(sizex,sizey, order='F')\t\n\t\tfor v in tree['eqns']['qvec']['stored']:\n\t\t\tviews[v[0]] = data[(v[1]-1)*(ndimpt)+addrstored_beg:(v[1])*ndimpt+addrstored_beg].view().reshape(sizex,sizey, order='F')\t\n\n\n\t\t# bc faces:\t\n\t\tshift = addrbcfields_beg\t\n\t\tfor dir in ['i','j']:\n\t\t\tfor v in tree['eqns']['qvec']['bcs']['face'][dir]:\t\t\t\t\t\n\t\t\t\tviews[v[0]] = data[shift:ndimptbcs[dir]+shift].view().reshape(sizebcs[dir], order='F')\t\t\t\n\t\t\t\tshift = shift + ndimptbcs[dir]\t\n\t\t\t\t\t\t\n\telse:\n\t\tviews['q'] = data[0:ndimpt*nvsolved].view().reshape(sizex,nvsolved, order='F')\n\t\tif nvarst !=0:\n\t\t\tviews['qstored'] = data[addrstored_beg:addrstored_end].view().reshape(sizex,nvarst, order='F')\n\t\t\n\t\tfor v in tree['eqns']['qvec']['solved']:\n\t\t\tviews[v[0]] = data[(v[1]-1)*(ndimpt):(v[1])*ndimpt].view().reshape(sizex, order='F')\t\t\n\t\tfor v in tree['eqns']['qvec']['stored']:\n\t\t\tviews[v[0]] = data[(v[1]-1)*(ndimpt)+addrstored_beg:(v[1])*ndimpt+addrstored_beg].view().reshape(sizex, order='F')\t\t\t\t\t\t\t\t\t\n\t\n\ttree['libs']['fort']['integers'] = param_int\n\ttree['libs']['fort']['floats'] = param_float\n\ttree['libs']['fort']['data'] = data\n\ttree['eqns']['qvec']['views'] = views\n\n\tdnamiF.init(param_int,param_float,data)\t\n\t\n\treturn tree",
"def map_name_and_data(cls, onnx_model: onnx.ModelProto):\n params = {}\n for init in onnx_model.graph.initializer:\n params[init.name] = numpy_helper.to_array(init)\n for node in onnx_model.graph.node:\n # If two zero_points are identity, one is a reference to the other\n # after optimized by onnx.\n if node.op_type == 'Identity' and len(node.input) == 1 and \\\n node.input[0] in params:\n params[node.output[0]] = copy.deepcopy(params[node.input[0]])\n if node.op_type == 'Constant':\n for attr in node.attribute:\n if attr.name == 'value':\n params[node.output[0]] = numpy_helper.to_array(attr.t)\n return params",
"def _retrieve_solution(self, m):\n result = {} # {component: {resource: production}}\n for comp in m.Components:\n prod = getattr(m, '{n}_production'.format(n=comp.name))\n result[comp.name] = {}\n for res, comp_r in m.resource_index_map[comp].items():\n result[comp.name][res] = np.fromiter((prod[comp_r, t].value for t in m.T), dtype=float, count=len(m.T))\n return result",
"def __call__(self, par_dict: dict) -> np.ndarray:",
"def physical_maps(x, y):\n assert x.shape == (3,) and y.shape == (3,)\n assert x.dtype == np.float64 and y.dtype == np.float64\n\n C = np.empty((21,21), dtype=np.float64)\n B = np.empty((2,2), dtype=np.float64)\n b = np.empty((2,), dtype=np.float64)\n _ap.ap_physical_maps(x, y, C, B, b)\n return (C, B, b)",
"def band_structure_dynamic(self, kx_range, ky_range, N_res):\n kxR = np.linspace(kx_range[0], kx_range[1], N_res)\n kyR = np.linspace(ky_range[0], ky_range[1], N_res)\n\n Nt = 3\n E_arr = np.zeros((2,N_res,N_res), float)\n \n # mesh over area in k-space\n# Kx, Ky = np.meshgrid(kx,ky)\n\n for ix, kx in enumerate(kxR):\n for iy, ky in enumerate(kyR):\n k_vec = np.array([kx,ky], float)\n\n # Floquet eigenvalues and eigenenergies\n E_arr[:,ix,iy] , Uaux = self.evolve(k_vec, Nt)\n\n #end-loop ky\n #end-loop kx\n\n return E_arr",
"def index_nodes(self):\n out = {}\n\n #avg = np.mean(list(self.rtype_vectors.values()),axis=0)\n\n\n #for name, node in self.nodes.items():\n # tmp1 = [self.rtype_vectors[rtype]\n # for rtype, dest in node.outgoing_relations] or [NULL_VEC()]\n # tmp2 = [permute_rtype_vector(self.rtype_vectors[rtype])\n # for rtype, prev in node.incoming_relations] or [NULL_VEC()]\n\n # net = tmp1 + tmp2\n\n # #out[name] = np.asarray(net).mean(axis=0)\n # #out[name] = np.asarray(net).sum(axis=0)\n # v = np.asarray(net).sum(axis=0)\n # if v.any():\n # out[name] = v/max(v)#softmax(v/max(v))\n # else:\n # out[name] = v\n\n\n #avg = np.mean(list(out.values()),axis=0)\n\n #maxm = np.max(list(out.values()),axis=0)\n\n ####normalize everything\n #for r,v in out.items():\n # if v.any():\n # #out[r] = v / sqrt(v.dot(v))\n # out[r] = softmax((v-avg)/maxm)\n\n\n\n # PCA method 0001701\n rmap = self.rtype_vectors\n data = np.zeros((len(self.nodes), JACCARD_DIMENSIONS), dtype=np.float)\n ix = 0\n for node in self.nodes.values():\n\n #compute weighted average of each relation type\n tmp = [rmap[rtype] for \n rtype, dest in node.outgoing_relations] + \\\n [permute_rtype_vector(rmap[rtype]) for \n rtype, prev in node.incoming_relations]\n\n v = np.asarray(tmp).mean(axis=0) if tmp else NULL_VEC()\n\n #normalize\n if v.any():\n data[ix] = v / sqrt(v.dot(v))\n else:\n data[ix] = v\n ix += 1\n\n #eliminate projection onto first 7 principal components\n d2 = data - PCA(data, 7)\n\n #order of nodes is preserved\n for i,v in enumerate(self.nodes):\n out[v] = softmax(d2[i])\n\n return out",
"def get_equations(self, combo=None):\n if combo is None:\n return self._equations\n else:\n return [self._equations[i] for i in self.combos[combo]]",
"def _form_computation_graph(self, idx):\n _list, _set = list, set\n if type(idx) is int:\n node_layers = [np.array([idx], dtype=np.int64)]\n elif type(idx) is list:\n node_layers = [np.array(idx, dtype=np.int64)]\n\n for _ in range(self.n_layers):\n prev = node_layers[-1]\n arr = [node for node in prev]\n arr.extend([e[0] for node in arr for e in self.nbrs_t[node]])\n arr = np.array(_list(_set(arr)), dtype=np.int64)\n node_layers.append(arr)\n node_layers.reverse()\n\n mappings = [{j: i for (i, j) in enumerate(arr)} for arr in node_layers]\n\n return node_layers, mappings",
"def oxy_dict(calib, P, K, T, S, V):\n\n \"\"\"Assumes all are arrays, or none are arrays. Need way to test for them. \"\"\"\n try:\n oxygen = []\n for P_x, K_x, T_x, S_x, V_x in zip(P, K, T, S, V):\n temp = (calib['Soc'] * (V_x + calib['offset'])\n * (1.0 + calib['A'] * T_x + calib['B'] * math.pow(T_x,2) + calib['C'] * math.pow(T_x,3) )\n * OxSol(T_x,S_x)\n * math.exp(calib['E'] * P_x / K_x)) #foo\n temp = round(temp,4)\n oxygen.append(temp)\n #Single mode.\n except:\n oxygen = (calib['Soc'] * (V + calib['offset'])\n * (1.0 + calib['A'] * T + calib['B'] * math.pow(T,2) + calib['C'] * math.pow(T,3) )\n * OxSol(T,S)\n * math.exp(calib['E'] * P / K))\n return oxygen",
"def _solve(self) -> CasADiArrayType:\n pass",
"def map(inputs, e0,e1,k):\r\n codebook = tf.cast(inputs[0][0:2 ** k], tf.float32)\r\n soft_map = tf.TensorArray(tf.float32, size=0, dynamic_size=True)\r\n for y in inputs[1]:\r\n Pxy_map = pyx(y, codebook, e0, e1)\r\n soft_map = soft_map.write(soft_map.size(), Pxy_map)\r\n\r\n soft_map = soft_map.stack()\r\n return soft_map",
"def enstrophy_static(\n omega1, # vorticity-1 component\n omega2, # vorticity-2 component\n omega3): # vorticity-3 component\n #---------------------------------------------------------------------#\n # Determining the shape of the enstrophy field #\n #---------------------------------------------------------------------#\n dim = omega1.shape\n ens = np.empty((dim[0], dim[1], dim[2]))\n #---------------------------------------------------------------------#\n # Time loop #\n #---------------------------------------------------------------------#\n term1 = np.square(omega1)\n term2 = np.square(omega2)\n term3 = np.square(omega3)\n ens = 0.5*(term1 + term2 + term3)\n\n return ens",
"def input_data(self):\n # Creating a empty matrix to store the objects data\n input_matrix = np.empty((len(self.objects), 7))\n # Reading inital values from the data file, creating the input matrix\n data = open('planets_data.txt', 'r')\n # Using loops and regular expressions to achieve the data from file\n i = 0\n legend = []\n for lines in data.read().splitlines():\n name = re.findall(r\"(^\\D+):\", lines)\n # Extracting data on the objects of interest\n # Appending the objects name to the list of legends to get the\n # correct order.\n if len(set(name) & set(self.objects)) >= 1:\n legend.append(name)\n index = lines.index(':')\n numbers = lines[index+1:]\n array = np.asarray([float(value) for value in numbers.split(',')])\n input_matrix[i, :] = array\n i += 1\n # Converting velocity unit from [AU/day] to [AU/year]\n input_matrix[:, 4:7] = input_matrix[:, 4:7]*365\n # The returned matrix are shaped to match the solver method input\n return input_matrix, legend",
"def ew(node):\n cY = {}\n gM = node.dependent(b.INPUTS | b.HIDDEN_INPUTS)\n for p in gM:\n cY[p] = []\n for c in range(p.inputs()):\n if p.input(c) == node:\n cY[p].append(c)\n\n return cY",
"def linearize(self, params, unknowns, resids):\n\n x = hash(params['x'])\n y = params['y']\n J = {}\n\n J['f_xy', 'x'] = 2.0*x - 6.0 + y\n J['f_xy', 'y'] = 2.0*y + 8.0 + x\n return J",
"def evaluate(\n self, nodes, derivatives=np.array([0, 0, 0]), modes=None, unique=False\n ):\n if modes is None:\n modes = self.modes\n if derivatives[2] != 0:\n return jnp.zeros((nodes.shape[0], modes.shape[0]))\n if not len(modes):\n return np.array([]).reshape((len(nodes), 0))\n\n r, t, z = nodes.T\n l, m, n = modes.T\n lm = modes[:, :2]\n\n if unique:\n _, ridx, routidx = np.unique(\n r, return_index=True, return_inverse=True, axis=0\n )\n _, tidx, toutidx = np.unique(\n t, return_index=True, return_inverse=True, axis=0\n )\n _, lmidx, lmoutidx = np.unique(\n lm, return_index=True, return_inverse=True, axis=0\n )\n _, midx, moutidx = np.unique(\n m, return_index=True, return_inverse=True, axis=0\n )\n r = r[ridx]\n t = t[tidx]\n lm = lm[lmidx]\n m = m[midx]\n\n # some logic here to use the fastest method, assuming that you're not using\n # \"unique\" within jit/AD since that doesn't work\n if unique and (np.max(modes[:, 0]) <= 24):\n radial_fun = zernike_radial_poly\n else:\n radial_fun = zernike_radial\n\n radial = radial_fun(r[:, np.newaxis], lm[:, 0], lm[:, 1], dr=derivatives[0])\n poloidal = fourier(t[:, np.newaxis], m, 1, derivatives[1])\n\n if unique:\n radial = radial[routidx][:, lmoutidx]\n poloidal = poloidal[toutidx][:, moutidx]\n\n return radial * poloidal",
"def nodes_from_dict(nd=None,**kwargs):\n\n if not nd:\n err_msg = \"ERROR: No nodes data provided\"\n print(err_msg)\n return 1\n \n nodes = []\n\n ####################\n #Create BUS objects#\n ####################\n busd = {}\n for i, row in nd[\"buses\"].iterrows():\n if row[\"active\"] and not pd.isnull(row[\"active\"]):\n logger.info(\"bus {} will be created\".format(row[\"label\"]))\n bus = solph.Bus(label=row[\"label\"])\n nodes.append(bus)\n busd[row[\"label\"]] = bus\n \n if row[\"excess\"] and not pd.isnull(row[\"excess\"]):\n # Automatically add Sink for curtailment (excess)\n # Add variable cost for excess cost --> minimise curtailment\n nodes.append(\n solph.Sink(\n label=row[\"label\"] + \"_excess\",\n inputs={\n busd[row[\"label\"]]:solph.Flow(\n variable_costs = row[\"excess costs\"]\n )\n },\n )\n )\n # Automatically add Source for shortage\n # Add variable cost for shortage --> minimize shortage\n if row[\"shortage\"] and not pd.isnull(row[\"shortage\"]):\n nodes.append(\n solph.Source(\n label = row[\"label\"] + \"_shortage\",\n outputs={\n busd[row[\"label\"]]:solph.Flow(\n variable_costs=row[\"shortage costs\"]\n )\n },\n )\n )\n ########################\n # Create Source objects#\n ########################\n for i, row in nd[\"commodity_sources\"].iterrows():\n if row[\"active\"] and not pd.isnull(row[\"active\"]):\n nodes.append(\n solph.Source(\n label=row[\"label\"],\n outputs={\n busd[row[\"to\"]]: solph.Flow(\n variable_costs = row[\"variable costs\"]\n )\n },\n )\n )\n ########################\n # Create Source objects with fixed time series from 'renewables' table\n ########################\n \"\"\"\n A source can represent a pv-system, a wind power plant, an import of natural gas or a slack variable to avoid creating an in-feasible model.\n While a wind power plant will have an hourly feed-in depending on the weather conditions the natural_gas import might be restricted by \n maximum value (nominal_value) and an annual limit (summed_max). As we do have to pay for imported gas we should set variable costs. \n Comparable to the demand series an fix is used to define a fixed the normalised output of a wind power plant. \n Alternatively, you might use max to allow for easy curtailment. The nominal_value sets the installed capacity.\n \"\"\"\n for i, row in nd[\"renewables\"].iterrows():\n if row[\"active\"] and not pd.isnull(row[\"active\"]):\n # set static outflow values\n outflow_args = {}\n\n # get time series for node and parameter\n for col in nd[\"timeseries\"].columns.values:\n if col.split(\".\")[0] == row[\"label\"]:\n outflow_args[col.split(\".\")[1]]=nd[\"timeseries\"][col]\n # outflow_args[\"fix\"]=nd[\"timeseries\"][col]\n \n # TODO add NON-CONVEX to outflow_args\n if row[\"capex\"] and not pd.isnull(row[\"capex\"]):\n # with investment mode, nominal_value must be None\n logger.info(\"Invest {} capacity\".format(row[\"label\"]))\n invest_args = {}\n if not row[\"epc_invest\"] or pd.isnull(row[\"epc_invest\"]):\n epc_invest = economics.annuity(row[\"capex\"],20,0.08)\n else:\n epc_invest=row[\"epc_invest\"]\n invest_args[\"ep_costs\"] = epc_invest\n\n if row[\"max\"] and not pd.isnull(row[\"max\"]):\n invest_args[\"maximum\"] = row[\"max\"]\n\n if row[\"min\"] and not pd.isnull(row[\"min\"]):\n invest_args[\"minimum\"]=row[\"min\"]\n\n if row[\"existing\"] and not pd.isnull(row[\"existing\"]):\n invest_args[\"existing\"]=row[\"existing\"]\n \n outflow_args[\"investment\"] = solph.Investment(**invest_args) \n else: \n outflow_args[\"nominal_value\"] = row[\"capacity\"]\n \n # create\n nodes.append(\n solph.Source(\n label=row[\"label\"],\n outputs = {\n busd[row[\"to\"]]:solph.Flow(**outflow_args)\n }\n )\n )\n #######################\n # Create Sink objects # \n #######################\n \"\"\"\n A sink is normally used to define the demand within an energy model but it can also be used to detect excesses.\n\n The example shows the electricity demand of the electricity_bus defined above.\n - 'nd['timeseries'][col]' should be sequence of normalised values\n - 'nominal_value' is the maximum demand the normalised sequence is multiplied with.\n - Giving 'nd['timeseries'][col]' as parameter 'fix' means that the demand cannot be changed by the solver. \n \n In contrast to the 'demand sink' the 'excess sink' has normally less restrictions but is open to take the whole excess.\n \"\"\"\n for i, de in nd[\"demand\"].iterrows():\n if de[\"active\"] and not pd.isnull(de[\"active\"]):\n # set static inflow values\n inflow_args = {\n \"nominal_value\":de[\"nominal value\"]\n }\n # get time series for node and parameter\n for col in nd[\"timeseries\"].columns.values:\n if col.split(\".\")[0]==de[\"label\"]:\n # inflow_args[col.split(\".\")[1]]=nd[\"timeseries\"][col]\n # TODO: veriry other key than 'fix'?????\n inflow_args[\"fix\"]=nd[\"timeseries\"][col] \n \n # Create Sink object and append to nodes\n nodes.append(\n solph.Sink(\n label=de[\"label\"],\n inputs={\n busd[de[\"from\"]]:solph.Flow(**inflow_args)\n }\n )\n )\n #############################\n # Create Transformer object #\n #############################\n \"\"\"\n An instance of the Transformer class can represent a node with multiple input and output flows such as:\n - a power plant\n - a transport line \n - or any kind of a transforming process as electrolysis, a cooling device or a heat pump. \n The efficiency has to be constant within one time step to get a linear transformation.\n You can define a different efficiency for every time step (e.g. the thermal powerplant efficiency according \n to the ambient temperature) but this series has to be predefined and cannot be changed within the optimisation.\n\n A condensing power plant can be defined by a transformer with one input (fuel) and one output (electricity)\n ```\n b_gas = solph.Bus(label='natural_gas')\n b_el = solph.Bus(label='electricity')\n solph.Transformer(\n label=\"pp_gas\",\n inputs={bgas: solph.Flow()},\n outputs={b_el: solph.Flow(nominal_value=10e10)},\n conversion_factors={electricity_bus: 0.58})\n ```\n\n A CHP power plant would be defined in the same manner but with two outputs:\n ```\n b_gas = solph.Bus(label='natural_gas')\n b_el = solph.Bus(label='electricity')\n b_th = solph.Bus(label='heat')\n\n solph.Transformer(\n label='pp_chp',\n inputs={b_gas: Flow()},\n outputs={b_el: Flow(nominal_value=30),\n b_th: Flow(nominal_value=40)},\n conversion_factors={b_el: 0.3, b_th: 0.4})\n ```\n A CHP power plant with 70% coal and 30% natural gas can be defined with two inputs and two outputs:\n ```\n b_gas = solph.Bus(label='natural_gas')\n b_coal = solph.Bus(label='hard_coal')\n b_el = solph.Bus(label='electricity')\n b_th = solph.Bus(label='heat')\n\n solph.Transformer(\n label='pp_chp',\n inputs={b_gas: Flow(), b_coal: Flow()},\n outputs={b_el: Flow(nominal_value=30),\n b_th: Flow(nominal_value=40)},\n conversion_factors={b_el: 0.3, b_th: 0.4,\n b_coal: 0.7, b_gas: 0.3})\n ```\n \"\"\"\n for i, row in nd[\"transformers\"].iterrows():\n if row[\"active\"] and not pd.isnull(row[\"active\"]):\n # set static inflow values\n inflow_args = {\n \"variable_costs\":row[\"variable input costs\"]\n }\n # inflow_args = {}\n outflow_args = {}\n # get time series for inflow transformer\n for col in nd[\"timeseries\"].columns.values:\n if col.split(\".\")[0]==row[\"label\"]:\n # inflow_args[col.split(\".\")[1]] = nd[\"timeseries\"][col]\n inflow_args[\"fix\"] = nd[\"timeseries\"][col]\n \n #TODO: multi inputs/outputs and add investment\n\n if row[\"capex inflow\"] and not pd.isnull(row[\"capex inflow\"]):\n logger.info(\"Invest {} inflow capacity\".format(row[\"label\"])) \n invest_args = {}\n invest_args[\"ep_costs\"] = economics.annuity(row[\"capex inflow\"],20,0.08)\n\n if row[\"max inflow\"] and not pd.isnull(row[\"max inflow\"]):\n invest_args[\"maximum\"] = row[\"max inflow\"]\n\n if row[\"min inflow\"] and not pd.isnull(row[\"min inflow\"]):\n invest_args[\"minimum\"] = row[\"min inflow\"]\n\n if row[\"existing inflow\"] and not pd.isnull(row[\"existing inflow\"]):\n invest_args[\"existing\"] = row[\"existing inflow\"]\n\n inflow_args[\"investment\"] = solph.Investment(**invest_args)\n else: \n outflow_args[\"nominal_value\"] = row[\"capacity\"] # should be specify capacity inflow or outflow\n\n # create\n nodes.append(\n solph.Transformer(\n label=row[\"label\"],\n inputs = {\n busd[row[\"from\"]]:solph.Flow(**inflow_args)\n },\n outputs={\n busd[row[\"to\"]]:solph.Flow(**outflow_args)\n },\n conversion_factors = {\n busd[row[\"to\"]]:row[\"efficiency\"]\n }\n )\n )\n ##################################\n # Create Transformer CHP objects #\n ##################################\n for i, row in nd[\"transformers_chp\"].iterrows():\n if row[\"active\"] and not pd.isnull(row[\"active\"]):\n\n inflow_args = {}\n outflow_elec_args = {}\n outflow_heat_args = {}\n\n inflow_args[\"variable_costs\"] = row[\"variable input costs\"]\n\n if row[\"capex elec\"] and not pd.isnull(row[\"capex elec\"]):\n logger.info(\"Invest {} inflow capacity\".format(row[\"label\"])) \n invest_args = {}\n invest_args[\"ep_costs\"] = economics.annuity(row[\"capex elec\"],20,0.08)\n if row[\"max elec\"] and not pd.isnull(row[\"max elec\"]):\n invest_args[\"maximum\"] = row[\"max elec\"]\n if row[\"min elec\"] and not pd.isnull(row[\"min elec\"]):\n invest_args[\"minimum\"] = row[\"min elec\"]\n if row[\"existing elec\"] and not pd.isnull(row[\"existing elec\"]):\n invest_args[\"existing\"] = row[\"existing elec\"]\n \n outflow_elec_args[\"investment\"] = solph.Investment(**invest_args)\n investment = solph.Investment(**invest_args)\n else:\n # inflow_args[\"nominal_value\"] = row[\"capacity_el\"]\n outflow_elec_args[\"nominal_value\"] = row[\"capacity_el\"]\n outflow_heat_args[\"nominal_value\"] = row[\"capacity_heat\"]\n\n # Create\n nodes.append(\n solph.Transformer(\n label = row[\"label\"],\n inputs ={\n busd[row[\"from\"]]:solph.Flow(**inflow_args)\n },\n outputs={\n busd[row[\"to_el\"]]:solph.Flow(**outflow_elec_args),\n busd[row[\"to_heat\"]]:solph.Flow(**outflow_heat_args)\n },\n conversion_factors={\n busd[row[\"to_el\"]]:row[\"efficiency_el\"],\n busd[row[\"to_heat\"]]:row[\"efficiency_heat\"]\n }\n )\n )\n\n ##########################\n # Create Storage objects #\n ##########################\n for i, row in nd[\"storages\"].iterrows():\n if row[\"active\"] and not pd.isnull(row[\"active\"]):\n\n inflow_args = {}\n outflow_args = {}\n\n if row[\"capex\"] and not pd.isnull(row[\"capex\"]):\n logger.info(\"Invest {} storage capacity\".format(row[\"label\"]))\n\n invest_args = {}\n invest_args[\"ep_costs\"] = economics.annuity(row[\"capex\"],20,0.08)\n if row[\"max\"] and not pd.isnull(row[\"max\"]):\n invest_args[\"maximum\"] = row[\"max\"]\n if row[\"min\"] and not pd.isnull(row[\"min\"]):\n invest_args[\"minimum\"] = row[\"min\"]\n if row[\"existing\"] and not pd.isnull(row[\"existing\"]):\n invest_args[\"existing\"] = row[\"existing\"]\n\n investment=solph.Investment(\n **invest_args\n )\n nominal_capacity=None\n \n #TODO add if row[\"capex inflow\"] and if row[\"capex outflow\"]\n #TODO read relation_capacity_inflow/outflow from excel\n \n else:\n investment = None\n nominal_capacity = row[\"nominal capacity\"] \n \n if row[\"capacity inflow\"] and row[\"capacity inflow ratio\"]:\n logger.error(\"{} is overdetermined, only capacity inflow or capacity inflow ratio shoul be set\".format(row[\"label\"]))\n return 1\n if row[\"capacity inflow\"]:\n inflow_args[\"nominal_value\"] = row[\"capacity inflow\"]\n if row[\"capacity inflow ratio\"]:\n capacity_inflow_ratio = row[\"capacity inflow ratio\"]\n else:\n capacity_inflow_ratio = None\n inflow_args[\"variable_costs\"] = row[\"variable input costs\"]\n\n \n if row[\"capacity outflow\"] and row[\"capacity outflow ratio\"]:\n logger.error(\"{} is overdetermined, only capacity outflow or capacity outflow ratio shoul be set\".format(row[\"label\"]))\n return 1\n if row[\"capacity outflow\"]:\n outflow_args[\"nominal_value\"] = row[\"capacity outflow\"]\n if row[\"capacity outflow ratio\"]:\n capacity_outflow_ratio = row[\"capacity outflow ratio\"]\n else:\n capacity_outflow_ratio = None\n\n outflow_args[\"variable_costs\"] = row[\"variable output costs\"]\n\n nodes.append(\n solph.components.GenericStorage(\n label=row[\"label\"],\n inputs = {\n busd[row[\"bus\"]]:solph.Flow(**inflow_args)\n },\n outputs = {\n busd[row[\"bus\"]]:solph.Flow(**outflow_args)\n },\n investment=investment,\n nominal_storage_capacity=nominal_capacity,\n loss_rate = row[\"capacity loss\"],\n initial_storage_level = row[\"initial capacity\"],\n max_storage_level=row[\"capacity max\"],\n min_storage_level=row[\"capacity min\"],\n invest_relation_input_capacity = capacity_inflow_ratio,\n invest_relation_output_capacity = capacity_outflow_ratio,\n inflow_conversion_factor = row[\"efficiency inflow\"],\n outflow_conversion_factor = row[\"efficiency outflow\"]\n )\n )\n #######################\n # Create Link objects #\n #######################\n \"\"\"\n A Link object with 1...2 inputs and 1...2 outputs\n Note: This component is experimental. Use it with care\n \"\"\"\n for i, p in nd[\"powerlines\"].iterrows():\n if p[\"active\"] and not pd.isnull(p[\"active\"]):\n bus1 = busd[p[\"bus_1\"]]\n bus2 = busd[p[\"bus_2\"]]\n nodes.append(\n solph.custom.Link(\n label = \"powerline\" + \"_\" + p[\"bus_1\"] + \"_\" + p[\"bus_2\"],\n inputs = {\n bus1:solph.Flow(),\n bus2:solph.Flow()\n },\n outputs = {\n bus1: solph.Flow(nominal_value = p[\"capacity\"]),\n bus2: solph.Flow(nominal_value=p[\"capacity\"]),\n },\n conversion_factors={\n (bus1,bus2):p[\"efficiency\"],\n (bus2,bus1):p[\"efficiency\"]\n }\n )\n ) \n return nodes",
"def solve(self):\n\n # Assign variables to each quantity being solved.\n r_lookup, lookup, num = {}, {}, 0\n for element in self.elements:\n if is_wire(element) and element is not self.ground:\n lookup[num] = element\n r_lookup[element] = num\n num += 1\n elif not is_cs(element) and element is not self.ground:\n lookup[num] = element\n r_lookup[element] = num\n num += 1\n\n # Set up the linear algebraic equation Ax=b\n A = np.zeros((num, num))\n b = np.zeros(num)\n for row, element in lookup.items():\n if is_wire(element) and element is not self.ground:\n for two_sided in element.attached:\n if is_cs(two_sided):\n if two_sided.pos is element:\n b[row] += -1 * two_sided.current\n else:\n b[row] += two_sided.current\n else:\n if two_sided.pos is element:\n flow = 1\n else:\n flow = -1\n A[row, r_lookup[two_sided]] = flow\n elif is_vs(element):\n check_connected(element)\n if element.pos is not self.ground:\n A[row, r_lookup[element.pos]] = 1\n if element.neg is not self.ground:\n A[row, r_lookup[element.neg]] = -1\n b[row] = element.voltage\n elif is_resistor(element):\n check_connected(element)\n if element.pos is not self.ground:\n A[row, r_lookup[element.pos]] = 1\n if element.neg is not self.ground:\n A[row, r_lookup[element.neg]] = -1\n A[row, r_lookup[element]] = -1 * element.resistance\n\n b = b.reshape((num, 1))\n try:\n x = np.linalg.solve(A, b)\n except np.linalg.LinAlgError:\n raise CircuitError('Insufficient information to solve circuit')\n\n # Assign values to all circuit components\n for i in range(num):\n item = lookup[i]\n if is_wire(item):\n item.potential = x[i, 0]\n elif isinstance(item, DualSided):\n item.current = x[i, 0]\n\n # Mark circuit as solved\n self.been_solved = True",
"def evaluate(\n self, nodes, derivatives=np.array([0, 0, 0]), modes=None, unique=False\n ):\n if modes is None:\n modes = self.modes\n if not len(modes):\n return np.array([]).reshape((len(nodes), 0))\n\n # TODO: avoid duplicate calculations when mixing derivatives\n r, t, z = nodes.T\n l, m, n = modes.T\n lm = modes[:, :2]\n\n if unique:\n _, ridx, routidx = np.unique(\n r, return_index=True, return_inverse=True, axis=0\n )\n _, tidx, toutidx = np.unique(\n t, return_index=True, return_inverse=True, axis=0\n )\n _, zidx, zoutidx = np.unique(\n z, return_index=True, return_inverse=True, axis=0\n )\n _, lmidx, lmoutidx = np.unique(\n lm, return_index=True, return_inverse=True, axis=0\n )\n _, midx, moutidx = np.unique(\n m, return_index=True, return_inverse=True, axis=0\n )\n _, nidx, noutidx = np.unique(\n n, return_index=True, return_inverse=True, axis=0\n )\n r = r[ridx]\n t = t[tidx]\n z = z[zidx]\n lm = lm[lmidx]\n m = m[midx]\n n = n[nidx]\n\n # some logic here to use the fastest method, assuming that you're not using\n # \"unique\" within jit/AD since that doesn't work\n if unique and (np.max(modes[:, 0]) <= 24):\n radial_fun = zernike_radial_poly\n else:\n radial_fun = zernike_radial\n\n radial = radial_fun(r[:, np.newaxis], lm[:, 0], lm[:, 1], dr=derivatives[0])\n poloidal = fourier(t[:, np.newaxis], m, dt=derivatives[1])\n toroidal = fourier(z[:, np.newaxis], n, NFP=self.NFP, dt=derivatives[2])\n if unique:\n radial = radial[routidx][:, lmoutidx]\n poloidal = poloidal[toutidx][:, moutidx]\n toroidal = toroidal[zoutidx][:, noutidx]\n\n return radial * poloidal * toroidal",
"def operator_dict(self, index, vars, **kw):\n out = defaultdict(int)\n # Freeze arg1 metadata for caching ncc matrices\n frozen_arg1_basis_meta = freeze_meta(self.args[1].meta)[-1]\n op0 = self.args[0].as_ncc_operator(frozen_arg1_basis_meta, **kw)\n op1 = self.args[1].operator_dict(index, vars, **kw)\n for var in op1:\n out[var] = op0 * op1[var]\n return out",
"def mapping(array, map):\r\n n = map.shape[0] \r\n if array.dtype == np.complex :\r\n arrayout = np.zeros((2*n),dtype=array.real.dtype)\r\n for ii in range(n):\r\n i = map[ii,0]\r\n j = map[ii,1]\r\n arrayout[ii] = array[i,j].real\r\n arrayout[ii+n] = array[i,j].imag\r\n else :\r\n arrayout = np.zeros((n),dtype=array.dtype)\r\n for ii in range(n):\r\n i = map[ii,0]\r\n j = map[ii,1]\r\n arrayout[ii] = array[i,j]\r\n return arrayout",
"def equation(self):\n mat = np.empty((self.nunknowns, self.model.neq))\n rhs = np.zeros(self.nunknowns) # Needs to be initialized to zero\n for icp in range(self.ncp):\n istart = icp * self.nlayers\n ieq = 0\n for e in self.model.elementlist:\n if e.nunknowns > 0:\n mat[istart:istart + self.nlayers, ieq:ieq + e.nunknowns] = \\\n e.potinflayers(self.xcin[icp], self.ycin[icp], self.layers, aq=self.aqin) / self.aqin.Tcol - \\\n e.potinflayers(self.xcout[icp], self.ycout[icp], self.layers, aq=self.aqout) / self.aqout.Tcol\n ieq += e.nunknowns\n else:\n rhs[istart:istart + self.nlayers] -= \\\n e.potentiallayers(self.xcin[icp], self.ycin[icp], self.layers, aq=self.aqin) / self.aqin.T - \\\n e.potentiallayers(self.xcout[icp], self.ycout[icp], self.layers, aq=self.aqout) / self.aqout.T\n return mat, rhs",
"def __init__(self, nodes: Dict[Hashable, List[List]], crs=None):\n\n for coords, _ in nodes.values():\n if len(coords) != 2:\n raise ValueError(\n 'Coordinate vertices for a gr3 type must be 2D, but got '\n f'coordinates {coords}.')\n\n self._id = list(nodes.keys())\n self._coords = np.array(\n [coords for coords, _ in nodes.values()])\n self._crs = CRS.from_user_input(crs) if crs is not None else crs\n self._values = np.array(\n [value for _, value in nodes.values()])",
"def index_rtypes(self):\n metric = self.index_metric\n out = {}\n for fnode in self.nodes.values():\n # only consider outgoing relationships because looping over\n # all object anyways, so will cover everything\n\n for (rtype, dest) in fnode.outgoing_relations:\n dnode = self.nodes[dest]\n\n # merge outgoing and attributes - distinction should not change\n # how vectors are formed\n a1 = fnode.rtypes | fnode.atypes\n b1 = dnode.rtypes | dnode.atypes\n c1 = a1 - b1\n d1 = b1 - a1\n e1 = b1 & a1\n f1 = b1 ^ a1\n g1 = b1 | a1\n\n # merge outgoing and attributes - distinction should not change\n # how vectors are formed\n #a2 = {b for a,b in fnode.outgoing_relations} | {b for a,b in fnode.attributes}\n #b2 = {b for a,b in dnode.outgoing_relations} | {b for a,b in dnode.attributes}\n #c2 = a2 - b2\n #d2 = b2 - a2\n #e2 = b2 & a2\n #f2 = b2 ^ a2\n #g2 = b2 | a2\n\n rval = out.setdefault(rtype, NULL_VEC())\n\n \"\"\"\n TODO: add similarity measure between node and prototype nodes\n\n Idea is to get a ground-truth value for the rtype by measuring\n how src --<rtype>--> dest compares to prototype transformations\n\n \n \n \"\"\"\n\n #types only\n score = np.array([metric(a1, b1),\n metric(a1, c1),#1\n metric(a1, e1),#2\n metric(a1, f1),#3\n metric(a1, g1),#4\n metric(b1, d1),#1\n metric(b1, e1),#2\n metric(b1, f1),#3\n metric(b1, g1),#4\n metric(c1, d1),\n metric(c1, f1),#5\n metric(d1, f1),#5\n metric(f1, g1),\n ],dtype=np.float)\n\n \n #types and objects\n #score = np.array([metric(a1, b1),\n # metric(a1, c1),\n # metric(a1, e1),\n # metric(a1, f1),\n # metric(a1, g1),\n # metric(b1, d1),\n # metric(b1, e1),\n # metric(b1, f1),\n # metric(b1, g1),\n # metric(c1, d1),\n # metric(c1, f1),\n # metric(c1, c2),\n # metric(c1, e2),\n # metric(d1, f1),\n # metric(d1, d2),\n # metric(d1, e2),\n # metric(f1, g1),\n # metric(a2, b2),\n # metric(a2, c2),\n # metric(a2, e2),\n # metric(a2, f2),\n # metric(a2, g2),\n # metric(b2, d2),\n # metric(b2, e2),\n # metric(b2, f2),\n # metric(b2, g2),\n # metric(c2, f2),\n # metric(d2, f2),\n # metric(f2, g2)],dtype=np.float)\n\n out[rtype] = rval + score\n\n #avg = np.mean(list(out.values()),axis=0)\n\n #maxm = np.max(list(out.values()),axis=0)\n\n\n \n #with open(\"rrw.pkl\",\"wb+\") as f:\n # pickle.dump(out, f, -1)\n\n #normalize everything\n for r,v in out.items():\n #out[r] = v / max(v)\n out[r] = v / sqrt(v.dot(v))\n #out[r] = softmax(v/maxm)\n #out[r] = softmax(v/max(v))\n #out[r] = softmax((v-avg)/maxm)\n\n #for debugging purposes\n #np.save(\"utils/vectest.npy\",np.array(list(out.values())))\n \n\n '''\n rcount = self.usage_counts\n vs1 = {}\n for rtype, vec in out.items():\n vs1[rtype] = softmax(vec/rcount[rtype])\n\n data = np.array(list(vs1.values()))\n d2 = data - PCA(data, 1)#eliminate projection onto first principal component\n\n for i,v in enumerate(vs1):#iteration order is preserved\n #rescale output\n out[v] = softmax(d2[i]/rcount[v])\n '''\n return out",
"def equation(self):\n mat = np.empty((self.nunknowns, self.model.neq))\n rhs = np.zeros(self.nunknowns) # Needs to be initialized to zero\n for icp in range(self.ncp):\n istart = icp * self.nlayers\n rhs[istart:istart + self.nlayers] = self.pc\n ieq = 0\n for e in self.model.elementlist:\n if e.nunknowns > 0:\n mat[istart:istart + self.nlayers, ieq:ieq + e.nunknowns] = \\\n e.potinflayers(self.xc[icp], self.yc[icp], self.layers)\n ieq += e.nunknowns\n else:\n rhs[istart:istart + self.nlayers] -= \\\n e.potentiallayers(self.xc[icp], self.yc[icp],\n self.layers) # Pretty cool that this works, really\n return mat, rhs"
] | [
"0.57185227",
"0.56100166",
"0.54828674",
"0.51971954",
"0.5119138",
"0.51168716",
"0.5112834",
"0.509553",
"0.50917715",
"0.5088738",
"0.50783044",
"0.49970037",
"0.4962442",
"0.49544486",
"0.4950212",
"0.49017525",
"0.4892764",
"0.48732916",
"0.4864263",
"0.4856209",
"0.48559493",
"0.48519564",
"0.48358762",
"0.48233342",
"0.4809613",
"0.48095635",
"0.48061684",
"0.48037684",
"0.4803397",
"0.47829178"
] | 0.68733394 | 0 |
Takes solution vector and variable names in the form of node voltages and voltage source currents, and prints them in userfriendly fashion | def display_sol(sol, node_list, v_src_list):
print("\n")
node_list = node_list[1:]
for i in range(len(node_list)):
print("V_" + node_list[i] + ": ", ffs(np.real(sol[i]), precision=5), '+', ffs(np.imag(sol[i]), precision=5)+'j')
for i in range(len(v_src_list)):
v = v_src_list[i]
print("I_" + v.name + ": ", ffs(np.real(sol[len(node_list)+i]), precision=5), '+', ffs(np.imag(sol[len(node_list)+i]), precision=5)+'j')
print("\n") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def printSolution(self):\n print \"----- Solution -----\"\n for feature in self.features:\n print \"Name = \" + feature.name + \" Value = \" + str(feature.value)",
"def print_solution():\n pass",
"def print_solution(self):\n print(f'Objective: {self.solution.ObjectiveValue()}')\n total_distance = 0\n total_load = 0\n max_route_distance = 0\n for vehicle_id in range(self.data['num_vehicles']):\n index = self.routingManager.Start(vehicle_id)\n plan_output = 'Route for vehicle {}:\\n'.format(vehicle_id)\n route_distance = 0\n route_load = 0\n while not self.routingManager.IsEnd(index):\n node_index = self.manager.IndexToNode(index)\n route_load += self.data['demands'][node_index]\n plan_output += ' {0} Load({1}) -> '.format(self.data['names'][node_index], route_load)\n\n previous_index = index\n index = self.solution.Value(self.routingManager.NextVar(index))\n route_distance += self.routingManager.GetArcCostForVehicle(\n previous_index, index, vehicle_id\n )\n print(route_distance)\n\n plan_output += '{0}, Load({1}) \\n '.format(self.data['names'][self.manager.IndexToNode(index)], route_load)\n\n plan_output += 'Distance of the route: {}\\n'.format(route_distance)\n plan_output += 'Load of the route: {}\\n'.format(route_load)\n\n print(plan_output)\n total_distance += route_distance\n total_load += route_load\n\n print('Total distance of all routes: {}km'.format(total_distance))\n print('Total load of all routes: {}'.format(total_load))",
"def show_variables(self):\r\n\r\n variablelist = [(x_temp,self.variables[x_temp]) for x_temp in sorted(self.variables.keys())]\r\n display.noteprint(('/C/ '+labels.VARIABLES.upper(), EOL.join([x_temp[0]+BLANK\r\n +COLON+BLANK\r\n +abridge(str(x_temp[1]),40)\r\n for x_temp in variablelist])))",
"def print_solution(self, solution_path):\n print(\"---SOLUTION---: \")\n for node in solution_path:\n node.state.plot_cube(\n \"SOLUTION: Node [\" + str(node.id) + \"] at depth \" + str(node.node_depth)\n )\n if node.last_action != None:\n print(\"Next action: \", node.last_action)\n print(\"[\" + str(node.id) + \"] \" + str(node.state.create_md5()))\n\n print(\"\\n TOTAL COST: \", solution_path[len(solution_path) - 1].cost)",
"def _debug_print_soln(self, m):\n print('*'*80)\n print('DEBUGG solution:')\n print(' objective value:', m.obj())\n for c, comp in enumerate(m.Components):\n name = comp.name\n print(' component:', c, name)\n for res, r in m.resource_index_map[comp].items():\n print(' resource:', r, res)\n for t, time_index in enumerate(m.T):\n prod = getattr(m, '{n}_production'.format(n=name))\n print(' time:', t, time_index, prod[r, time_index].value)\n print('*'*80)",
"def PrintSolution(self):\n sol = \"\"\n charMap = {\n Magnets.EMPTY: '.',\n Magnets.PLUS: '+',\n Magnets.MINUS: '-',\n }\n for row in self.Solution():\n for space in row:\n sol = sol + charMap.get(space, '?')\n sol = sol + '\\n'\n return sol",
"def print_vector(self):\n print self.x, self.y, self.z",
"def pretty_print_equation(self):\n\n for n in self.nodes:\n # Get a list of tuples, first is the v\n parents = self.adj_inv[n]\n if len(parents) == 0:\n if self.binary:\n right_side = '{0,1}'\n else:\n right_side = 'N(0, 1)'\n else:\n right_side = ' + '.join(['{:.3f}*x_{}'.format(self.weights[i, n], i)\n for i in parents])\n \n right_side.replace('+ -', '-')\n print('x_{} = {}'.format(n, right_side))",
"def print_solution(data, manager, routing, solution):\n time_dimension = routing.GetDimensionOrDie('Time')\n total_time = 0\n for vehicle_id in range(data['num_vehicles']):\n index = routing.Start(vehicle_id)\n plan_output = 'Route for vehicle {}:\\n'.format(vehicle_id)\n while not routing.IsEnd(index):\n time_var = time_dimension.CumulVar(index)\n plan_output += '{0} Time({1},{2}) -> '.format(\n manager.IndexToNode(index), solution.Min(time_var),\n solution.Max(time_var))\n index = solution.Value(routing.NextVar(index))\n time_var = time_dimension.CumulVar(index)\n plan_output += '{0} Time({1},{2})\\n'.format(manager.IndexToNode(index),\n solution.Min(time_var),\n solution.Max(time_var))\n plan_output += 'Time of the route: {}min\\n'.format(\n solution.Min(time_var))\n print(plan_output)\n total_time += solution.Min(time_var)\n print('Total time of all routes: {}min'.format(total_time))",
"def show_vdcs(self):\n for v in self.vdcs:\n print v",
"def print_solution(data, manager, routing, solution):\r\n time_dimension = routing.GetDimensionOrDie('Time')\r\n total_distance = 0\r\n total_load = 0\r\n total_time = 0\r\n for vehicle_id in range(data['num_vehicles']):\r\n index = routing.Start(vehicle_id)\r\n plan_output = 'Route for vehicle {}:\\n'.format(vehicle_id)\r\n route_distance = 0\r\n route_load = 0\r\n while not routing.IsEnd(index):\r\n node_index = manager.IndexToNode(index)\r\n route_load = route_load + data['demands'][node_index]\r\n time_var = time_dimension.CumulVar(index)\r\n plan_output += ' {0} Load({1}) Time({2},{3}) -> '.format(node_index, route_load,\r\n solution.Min(time_var), solution.Max(time_var))\r\n previous_index = index\r\n index = solution.Value(routing.NextVar(index))\r\n route_distance += routing.GetArcCostForVehicle(previous_index, index, vehicle_id)\r\n time_var = time_dimension.CumulVar(index)\r\n plan_output += ' {0} Load({1}) Time({2},{3})\\n'.format(manager.IndexToNode(index), route_load,\r\n solution.Min(time_var), solution.Max(time_var))\r\n plan_output += 'Distance of the route: {}m\\n'.format(route_distance)\r\n plan_output += 'Load of the route: {}\\n'.format(route_load)\r\n plan_output += 'Time of the route: {}min\\n'.format(solution.Min(time_var))\r\n print(plan_output)\r\n total_distance += route_distance\r\n total_load += route_load\r\n total_time += solution.Min(time_var)\r\n print('Total distance of all routes: {}m'.format(total_distance))\r\n print('Total load of all routes: {}'.format(total_load))\r\n print('Total time of all routes: {}min'.format(total_time))",
"def print_solution(solution_list) -> 'Human Readable Solution':\n\tsize = len(solution_list[0][0])\n\ttry:\n\t\tprint('Starting Node'.center(20, ' '))\n\t\tprint(''.center(20, '-'))\n\t\tfor node in solution_list:\n\t\t\t\tfor i in range(size):\n\t\t\t\t\tprint(str(node[i]).center(20, ' '))\n\t\t\t\tprint(''.center(20, '-'))\n\t\tprint('Goal Node'.center(20, ' '))\n\texcept Exception as error_msg:\n\t\tprint(\"No solution found!\")",
"def print_v(self, filename, gather=True):\n global controller\n timeStep = (controller.dao.machineTimeStep*1.0)/1000.0\n v = self.get_v(gather, compatible_output=True)\n utility_calls.check_directory_exists(filename)\n fileHandle = open(filename, \"w\")\n first_id = 0\n num_neurons = self.vertex.atoms\n dimensions = self.vertex.atoms\n fileHandle.write(\"# first_id = %d\\n\" % first_id)\n fileHandle.write(\"# n = %d\\n\" % num_neurons)\n fileHandle.write(\"# dt = %f\\n\" % timeStep)\n fileHandle.write(\"# dimensions = [%d]\\n\" % dimensions)\n fileHandle.write(\"# last_id = %d\\n\" % (num_neurons-1))\n for (neuronId, time, value) in v:\n fileHandle.write(\"%f\\t%d\\n\" % (value, neuronId))\n fileHandle.close()",
"def print(self):\n for var in self.variables:\n print(var)",
"def vprint(string):\n global verbose\n if verbose:\n print(string)",
"def show_graph(self):\n print(f'|V| = {self.V}, |E| = {self.E}')\n for n in range(1, self.V+1):\n print(f'[{n}] -> {self.adjacency_list[n]}')",
"def print_solution(manager, routing, solution):\n print('Objective: {} miles'.format(solution.ObjectiveValue()))\n index = routing.Start(0)\n plan_output = 'Route for vehicle 0:\\n'\n route_distance = 0\n while not routing.IsEnd(index):\n plan_output += ' {} ->'.format(manager.IndexToNode(index))\n previous_index = index\n index = solution.Value(routing.NextVar(index))\n route_distance += routing.GetArcCostForVehicle(previous_index, index, 0)\n plan_output += ' {}\\n'.format(manager.IndexToNode(index))\n print(plan_output)\n plan_output += 'Route distance: {}miles\\n'.format(route_distance)",
"def print_solution(data, manager, routing, assignment):\n total_distance = 0\n total_load = 0\n for vehicle_id in range(data['num_vehicles']):\n index = routing.Start(vehicle_id)\n plan_output = 'Route for vehicle {}:\\n'.format(vehicle_id)\n route_distance = 0\n route_load = 0\n while not routing.IsEnd(index):\n node_index = manager.IndexToNode(index)\n route_load += data['demands'][node_index]\n plan_output += ' {0} Load({1}) -> '.format(node_index, route_load)\n previous_index = index\n index = assignment.Value(routing.NextVar(index))\n route_distance += routing.GetArcCostForVehicle(\n previous_index, index, vehicle_id)\n plan_output += ' {0} Load({1})\\n'.format(manager.IndexToNode(index),\n route_load)\n plan_output += 'Distance of the route: {}m\\n'.format(route_distance)\n plan_output += 'Load of the route: {}\\n'.format(route_load)\n # print(plan_output)\n total_distance += route_distance\n total_load += route_load\n with open(f\"Survey/vrp-nanostores/vrp-nanostores/food_deserts/outputs/2-e/clust8/route/route_vehicle{vehicle_id}.txt\", \"w\") as file:\n file.write(plan_output)\n file.close()\n print(\"aaa\")\n print('Total cost for all routes: {}m'.format(total_distance))\n print('Total load of all routes: {}'.format(total_load))\n with open(f\"Survey/vrp-nanostores/vrp-nanostores/food_deserts/outputs/2-e/clust8/load_dist_{data['num_vehicles']}vehicles.txt\", \"w\") as file:\n out_file = \"\"\n out_file += str(total_load) + \",\" + str(total_distance)\n file.write(out_file)\n file.close() # OPEN AND ANALYZE LATER WITH PANDAS",
"def printvarindex(fname):\n cursor = eplussql.getcursor(fname)\n mtx1 = eplussql.get_varindex(cursor)\n mtx2 = [[str(item) for item in row] for row in mtx1]\n mtx3 = [','.join(row) for row in mtx2]\n for row in mtx3:\n print row",
"def print_solution(manager, routing, solution, dima):\n print('Objective: {} miles'.format(solution.ObjectiveValue()))\n index = routing.Start(0)\n plan_output = 'Route for vehicle 0:\\n'\n route_distance = 0\n while not routing.IsEnd(index):\n plan_output += ' {} ->'.format(manager.IndexToNode(index))\n previous_index = index\n index = solution.Value(routing.NextVar(index))\n route_distance += routing.GetArcCostForVehicle(previous_index, index, 0)\n plan_output += ' {}\\n'.format(manager.IndexToNode(index))\n plan_output += 'Route distance: {} miles\\n'.format(route_distance)\n print(plan_output)",
"def print_solution(manager, routing, assignment):\n print('Objective: {}'.format(assignment.ObjectiveValue()))\n index = routing.Start(0)\n plan_output = 'Route:\\n'\n route_distance = 0\n while not routing.IsEnd(index):\n plan_output += ' {} ->'.format(index)\n previous_index = index\n index = assignment.Value(routing.NextVar(index))\n route_distance += routing.GetArcCostForVehicle(previous_index, index, 0)\n plan_output += ' {}\\n'.format(manager.IndexToNode(index))\n print(plan_output)\n plan_output += 'Objective: {}m\\n'.format(route_distance)",
"def printDesignVariables(self):\n print(\"-\" * 85)\n print(\"{:>30}{:>20}{:>20}\".format(\"CSM Design Parameter\", \"Name\", \"Value\"))\n print(\"-\" * 85)\n for dvName in self.DVs:\n DV = self.DVs[dvName]\n print(f\"{DV.csmDesPmtr:>30}{DV.name:>20}{DV.value:>20}\")",
"def print_solution(manager, routing, solution):\n print('Objective: {} miles'.format(solution.ObjectiveValue()))\n index = routing.Start(0)\n plan_output = 'Route for vehicle 0:\\n'\n route_distance = 0\n while not routing.IsEnd(index):\n plan_output += ' {} ->'.format(manager.IndexToNode(index))\n previous_index = index\n index = solution.Value(routing.NextVar(index))\n route_distance += routing.GetArcCostForVehicle(previous_index, index, 0)\n plan_output += ' {}\\n'.format(manager.IndexToNode(index))\n print(plan_output)\n plan_output += 'Route distance: {}miles\\n'.format(route_distance)",
"def printPath(edgesTo,v):\r\n path = str()\r\n while v is not None:\r\n print(v) \r\n path += str(v) + ' -> ' \r\n v = edgesTo[v]\r\n print(path)",
"def print_solution(manager, routing, assignment):\n print('Objective: {}'.format(assignment.ObjectiveValue()))\n index = routing.Start(0)\n plan_output = 'Route:\\n'\n route_distance = 0\n while not routing.IsEnd(index):\n plan_output += ' {} ->'.format(manager.IndexToNode(index))\n previous_index = index\n index = assignment.Value(routing.NextVar(index))\n route_distance += routing.GetArcCostForVehicle(previous_index, index, 0)\n plan_output += ' {}\\n'.format(manager.IndexToNode(index))\n print(plan_output)\n plan_output += 'Objective: {}m\\n'.format(route_distance)",
"def see(s, v):\n print(\"---- %s -----\" % s)\n print(v)",
"def solution_to_string(self):\n solution_vector_index_format = [index+1 if elem == 1 else -index-1 for index, elem in enumerate(self.solution_vector)]\n return \" \".join(map(str, solution_vector_index_format))",
"def query_and_print_variables(md):\n\n # save x variable as dictionary with keys (s, v, t)\n x_searchers = {}\n # save beta variable as dictionary with keys (v, t)\n b_target = {}\n\n for var in md.getVars():\n my_var_name = var.varName\n my_var_value = var.x\n print('%s %g' % (my_var_name, my_var_value))\n\n if 'x' in my_var_name:\n s = int(my_var_name[2])\n v = int(my_var_name[4])\n t = int(my_var_name[6])\n\n if my_var_value >= 0.5:\n x_searchers[(s, v, t)] = 1\n else:\n x_searchers[(s, v, t)] = 0\n\n elif 'beta' in my_var_name:\n # print('%s %g' % (my_var_name, my_var_value))\n # remember b[0] is probability of capture\n v = int(my_var_name[5])\n t = int(my_var_name[7])\n b_target[v, t] = my_var_value\n\n obj = md.getObjective()\n print(obj.getValue())\n\n return x_searchers, b_target",
"def print(cls, vas):\n print(vas)"
] | [
"0.70311254",
"0.6513373",
"0.63381404",
"0.6321742",
"0.62229276",
"0.62111807",
"0.6145689",
"0.595207",
"0.59471786",
"0.5912587",
"0.5911108",
"0.58804804",
"0.58691436",
"0.5866293",
"0.57992846",
"0.57930803",
"0.57446843",
"0.5736822",
"0.5732533",
"0.57235485",
"0.57170826",
"0.5715841",
"0.57130206",
"0.56894153",
"0.56803584",
"0.5658738",
"0.56533146",
"0.5648205",
"0.5644899",
"0.56079125"
] | 0.6904997 | 1 |
If var is None, it means an error has happened (and been reported by earlier prints), so exit execution with code 1 | def EXIT_ON_NONE(var):
try:
if not var.any():
pass
except:
if var == None:
sys.exit(1) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def exit_error(message: Optional[str] = None) -> NoReturn:\n\n\tif (message != None):\n\t\tprint(message)\n\tsys.exit(EXIT_FAILURE)",
"def error(message, code=None):\n print_error(message)\n sys.exit(code or 1)",
"def finalize_error():\n print('')\n exit(-1)",
"def check_error(err):\n if err != None:\n logging.error(err)\n sys.exit(-1)",
"def error(msg):\n print 'ERROR: %s' % msg\n sys.exit(1)",
"def error(message):\n print message\n sys.exit(2)",
"def ErrorExit(msg):\r\n print >>sys.stderr, msg\r\n sys.exit(1)",
"def error(code, message):\n sys.stderr.write(message)\n sys.exit(code)",
"def ConsoleExit(self, errorcode=200):\n pass",
"def error(error_no):\n print('--] Encountered unrecoverable ERROR [%s] ... leaving' % error_no)\n write_termination_message(error_no)\n sys.exit(0)",
"def ErrorExit(msg):\n print >>sys.stderr, msg\n sys.exit(1)",
"def fail(msg, exit_code=1):\n sys.stderr.write(\"{}\\n\".format(msg))\n sys.exit(exit_code)",
"def bail_out(exit_code=ErrorCode.NO_ERROR):\n sys.exit(exit_code.value)",
"def error():\n return None",
"def determine_exit_code(self) -> int:",
"def die(msg):\n errorPrint(msg)\n sys.exit(1)",
"def _fatal(msg):\n sys.stderr.write(msg + \"\\n\")\n sys.exit(1)",
"def error_if_null_return(retval: Any, func: Callable, args: Tuple[Any]):\n if not retval:\n raise WinError()\n return retval",
"def exit(status=None): # real signature unknown; restored from __doc__\n pass",
"def _exit(message):\n\tprint('ERROR: ' + message, file=sys.stderr)\n\tsys.exit(1)",
"def errorCheck(sh, returncode, stderr):\n\tif returncode!=0 or stderr!='':\n\t\tif config.DEBUG:\n\t\t\tmsg = \"sh code execution [%s] returned non-zero exit status [%s] and/or non-empty stdterr [%s]\" % (repr(sh), returncode, repr(stderr.strip()))\n\t\telse:\n\t\t\tmsg = \"sh code execution returned non-zero exit status and/or non-empty stdterr\"\n\t\traise Exception(msg)",
"def abort(msg=''):\n if msg:\n print >> sys.stderr, msg\n sys.exit(1)",
"def error(msg: str) -> None:\n print('ERROR: {msg}'.format(msg=msg))\n sys.exit()",
"def error_handler(self):\n if self.ctx.exit_code is not None:\n return self.ctx.exit_code",
"def exit_success(message: Optional[str] = None) -> NoReturn:\n\n\tif (message != None):\n\t\tprint(message)\n\tsys.exit(EXIT_SUCCESS)",
"def die(msg=None,rc=1):\n\n if msg:\n error(msg)\n\n sys.exit(rc)",
"def die(errmsg):\n eprint(errmsg)\n exit(1)",
"def check_exitcode(exit_code):\n if exit_code == 0:\n logging.debug(\"Exit code OK (0)\")\n else:\n raise RuntimeError(\"Bad exit code (%d)\" % exit_code)",
"def err(string, exitval):\n\tprint >> sys.stderr, string.rstrip()\n\tsys.exit(exitval)",
"def execute_failure(self, *args, **kwargs):\n return 1, \"\", None"
] | [
"0.6083959",
"0.6044611",
"0.60042363",
"0.60028774",
"0.59937257",
"0.5983206",
"0.5926563",
"0.59169286",
"0.5879745",
"0.5866192",
"0.58173686",
"0.5799356",
"0.57898295",
"0.57889676",
"0.57837886",
"0.5783207",
"0.5754361",
"0.57477415",
"0.57348764",
"0.5712263",
"0.57109964",
"0.5709496",
"0.5708857",
"0.5705119",
"0.57017046",
"0.5700466",
"0.5700328",
"0.5697657",
"0.56959134",
"0.568569"
] | 0.68557256 | 0 |
Unable to continue because the server could not fulfill the request. Most of the time is due to a third party request. | def not_implemented(self):
response.status = 501
return {'message':'server was not able to complete this request'} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _raise_performing_request_error(self, *args, **kwargs):",
"def _retry_occurred(self):",
"def _raise_http_error(self, *args, **kwargs):",
"def _raise_if_inprogress_or_timeout(self):\n if self._stack_result is None or self._recm_data is None:\n # If the response is not ready and the timeout period is over, send error 408\n if request_timed_out(self._db_result):\n error_message = 'Stack analysis request {} has timed out. Please retry ' \\\n 'with a new analysis.'.format(self.external_request_id)\n logger.error(error_message)\n raise SARBRequestTimeoutException(error_message)\n else:\n error_message = 'Analysis for request ID {} is in progress'.format(\n self.external_request_id)\n logger.warning(error_message)\n raise SARBRequestInprogressException(error_message)",
"def bad_request(self, message=None):\n return self.send_message(message, status=400)",
"def test_upload_service_unavailable(self):\n self._retryable.side_effect = requests.HTTPError('Fail')\n\n payload = dict(id=\"stub_id\", data={\"some\": \"data\"})\n resp = self.client.post(self.url, json=payload)\n\n assert resp.status_code == 500\n assert resp.get_json() == {\n 'status': 'Error',\n 'type': 'HTTPError',\n 'status_code': 500,\n 'message': \"Unable to access upload-service\"\n }",
"def resolve_failure(self):\n\t\tpass",
"def error_handler(source, prod, HEADERS):\n\n try:\n req = requests.get(source, params=prod, headers=HEADERS)\n except Timeout as e:\n print(\"\\nThe website took too long to respond. Please try after sometime.\\n\")\n sys.exit(1)\n except ConnectionError as e:\n print(\"\\nYou do not have a descent internet connection. Please check your Internet Connection and try again later.\\n\")\n sys.exit(1)\n except TooManyRedirects as e:\n print(\"\\nYour request exceeded the configured number of maximum redirections. Please try after sometime.\\n\")\n sys.exit(1)\n except Exception as e:\n print(\"\\nRequest souldn't be completed. Please try after sometime.\\n\")\n sys.exit(1)\n\n return req",
"def connectFailed(self, reason):\n\t\tself._tunnelReadyDeferred.errback(reason)",
"def retry_if_connection_error(exception):\r\n # return True\r\n return isinstance(exception, HttpError)",
"def _handle_resource_exhausted_error():\n _debug_print(\n \"Traceback that led to resource exhaustion handling: \" + traceback.format_exc()\n )\n time.sleep(3)",
"def server_fault(e):\n return \"Something went wrong, and it is our fault. Try reloading the page.\"",
"def bad_request():\n return HttpError(400)",
"def test_request_failed(self, kasserver, kasapi):\n kasapi.side_effect = zeep.exceptions.Fault(\"failed\")\n with pytest.raises(zeep.exceptions.Fault):\n kasserver._request(self.REQUEST_TYPE, self.REQUEST_PARAMS)",
"def connection_lost(self, exc):\n pass",
"def connection_failed(self, connection, error):\n assert False",
"def Take_Off_Connection_Failed(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def handle_connection_lost(self, exc: Optional[Exception]) -> None:",
"def error(self, flow: mitmproxy.http.HTTPFlow):",
"def request_failed(self, ignored):\n self._errors += 1",
"def _process_fetch_failure(self):\n logger.info('DataFetcher: No valid result is received')\n if len(self.urls_processed) == len(self.urls):\n raise NoDataReceivedFromCaster()\n for _, error_code, error_text in self._curls_failed:\n if error_code == PYCURL_TIMEOUT_ERRNO:\n raise ExceededTimeoutError(error_text)\n if self._curls_failed:\n _, _, error_text = self._curls_failed[0]\n raise UnableToConnect(error_text)\n raise NoDataReceivedFromCaster()",
"def on_request_error(self, status_code):\n log.error(\"Stream encountered HTTP error: %d\", status_code)",
"def on_timeout(self):\n logging.error(\"Streaming request timed out\")",
"def request_until_succeed(url):\n req = Request(url)\n success = False\n while success is False:\n try:\n response = urlopen(req)\n if response.getcode() == 200:\n success = True\n except Exception as e:\n print(e) \n if e.file:\n data = json.loads(e.file.read())\n if 'error' in data and 'error_subcode' in data['error'] and data['error']['error_subcode'] == 33:\n return None\n time.sleep(5)\n\n print(\"Error for URL {}: {}\".format(url, datetime.datetime.now()))\n print(\"Retrying.\")\n\n return response.read()",
"def decide_to_retry(error):\n return True",
"def handle_failure_request(self) -> HttpResponse:\n return HttpResponseNotFound()",
"def failure(self, error):\n \n self.request.response.status_int = 400\n return None",
"def on_connection_error(self):\n log.error(\"Stream connection has errored or timed out\")",
"def raise_on_error(self):\n if not self._status.success:\n cls = UrlApi.InfraHTTPError if self._infra_step else UrlApi.HTTPError\n raise cls('HTTP status (%d)' % (self.status_code,), self)",
"def handle_request_unknown(self, msg):\n\t\traise NotFound()"
] | [
"0.7025658",
"0.60970294",
"0.5991379",
"0.59823847",
"0.5957003",
"0.58494574",
"0.58302826",
"0.58297503",
"0.58086425",
"0.57881504",
"0.5785328",
"0.5778837",
"0.5773076",
"0.576584",
"0.57561743",
"0.57517314",
"0.57288325",
"0.57262796",
"0.5704582",
"0.56889284",
"0.5682813",
"0.56706375",
"0.56433153",
"0.563468",
"0.56344473",
"0.56141543",
"0.5555549",
"0.5548418",
"0.5548161",
"0.5539482"
] | 0.65417635 | 1 |
Get embedded EXIF data from image file. | def get_exif_data(fname):
ret = {}
try:
img = Image.open(StringIO.StringIO(fname))
if hasattr( img, '_getexif' ):
exifinfo = img._getexif()
if exifinfo != None:
for tag, value in exifinfo.items():
decoded = TAGS.get(tag, tag)
ret[decoded] = value
except IOError:
print 'IOERROR ' + fname
return ret | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_exif_data(self):\n self.exif_data = piexif.load(self.img_path)\n return self.exif_data",
"def get_exif_data(fname):\n ret = {}\n try:\n img = Image.open(fname)\n if hasattr( img, '_getexif' ):\n exifinfo = img._getexif()\n if exifinfo != None:\n for tag, value in exifinfo.items():\n decoded = TAGS.get(tag, tag)\n ret[decoded] = value\n except IOError:\n print ('IOERROR ' + fname)\n return ret",
"def _exif_data(self):\n return exif.get_metadata(self._filename)",
"def get_exif_data(filename):\n ret = {}\n try:\n img = Image.open(filename)\n if hasattr( img, '_getexif' ):\n exifinfo = img._getexif()\n if exifinfo != None:\n for tag, value in exifinfo.items():\n decoded = TAGS.get(tag, tag)\n ret[decoded] = value\n except IOError:\n print 'IOERROR ' + filename\n return ret",
"def get_exif_data(fn):\n exif_data = {}\n i = Image.open(fn)\n info = i._getexif()\n if info:\n for tag, value in info.items():\n decoded = TAGS.get(tag, tag)\n if decoded == \"GPSInfo\":\n gps_data = {}\n for t in value:\n sub_decoded = GPSTAGS.get(t, t)\n gps_data[sub_decoded] = value[t]\n exif_data[decoded] = gps_data\n else:\n exif_data[decoded] = value\n return exif_data",
"def extract_exif_data(image):\n if isinstance(image,(str,unicode,file)):\n # Open image\n im=Image.open(image)\n else:\n im=image\n # Get EXIF numeric tags info\n exif_data = im._getexif()\n data={}\n if exif_data is not None:\n data={ \n ExifTags.TAGS[k]: v\n for k, v in exif_data.items()\n if k in ExifTags.TAGS\n }\n return data",
"def extract_exif_data(self, path_img):\n try:\n img = PIL.Image.open(path_img)\n self.exif = {PIL.ExifTags.TAGS[k]: v\n for k, v in img._getexif().items()\n if k in PIL.ExifTags.TAGS}\n if 'GPSInfo' in self.exif.keys():\n latitude = str(self.getCoordinate(self.exif['GPSInfo'][2], self.exif['GPSInfo'][1]))\n longitude = str(self.getCoordinate(self.exif['GPSInfo'][4], self.exif['GPSInfo'][3]))\n self.exif['GPSInfo'] = \"https://www.google.com/maps/search/?api=1&query=\" + str(latitude) + \",\" + str(\n longitude)\n return self.exif\n except:\n return None",
"def get_exif_data(image):\n exif_data = {}\n info = image._getexif()\n if info:\n for tag, value in info.items():\n decoded = TAGS.get(tag, tag)\n if decoded == \"GPSInfo\":\n gps_data = {}\n for t in value:\n sub_decoded = GPSTAGS.get(t, t)\n gps_data[sub_decoded] = value[t]\n\n exif_data[decoded] = gps_data\n else:\n exif_data[decoded] = value\n\n return exif_data",
"def get_exif(path, key, numeric=False):\n args = ['exiftool', '-' + key, path]\n if numeric:\n args.insert(1, '-n')\n output = subprocess.check_output(args).strip()\n if ':' not in output:\n raise EXIFError(\"%s has no EXIF data for %s\" % (path, key))\n return output.split(':')[1].strip()",
"def exif(filename):\n clef = ['Exif.Image.Make',\n 'Exif.Image.Model',\n 'Exif.Image.DateTime',\n 'Exif.Photo.ExposureTime',\n 'Exif.Photo.FNumber',\n 'Exif.Photo.DateTimeOriginal',\n 'Exif.Photo.DateTimeDigitized',\n 'Exif.Photo.ShutterSpeedValue',\n 'Exif.Photo.ApertureValue',\n 'Exif.Photo.ExposureBiasValue',\n 'Exif.Photo.Flash',\n 'Exif.Photo.FocalLength',\n 'Exif.Photo.ISOSpeedRatings'\n]\n data = {}\n image_exif = Exif(filename)\n image_exif.read()\n comment = image_exif.comment\n\n for i in clef:\n try:\n data[i] = image_exif.interpretedExifValue(i)\n except:\n data[i] = \"\"\n return data, comment",
"def get_exif_data(imgpath):\n exif_data = {}\n try:\n image=Image.open(imgpath)\n except:\n print(\"Failed to open \",imgpath)\n\n info = image._getexif()\n if info:\n for tag, value in info.items():\n decoded = TAGS.get(tag, tag)\n if decoded == \"GPSInfo\":\n gps_data = {}\n for t in value:\n sub_decoded = GPSTAGS.get(t, t)\n gps_data[sub_decoded] = value[t]\n\n exif_data[decoded] = gps_data\n else:\n exif_data[decoded] = value\n else:\n print(\"No GPS info in \",imgpath)\n image.close()\n return exif_data",
"def get_EXIF_features(mistery_photo, features='default', verbose=False):\n exif_data = dict()\n\n f = os.path.join(mistery_photo['dir'], mistery_photo['filename'])\n # open in binary mode\n photo = open(f, 'rb')\n # Read EXIF data\n tags = exifread.process_file(photo, details=False)\n # Extract time\n\n # Quick&Dirty to extract month\n # NEED TO BE IMPROVED TO SUPORT year\n try:\n timestamp = tags['EXIF DateTimeOriginal'].values\n d, h = timestamp.split()\n exif_data['day'] = d.split(':')[2].strip()\n exif_data['month'] = d.split(':')[1].strip()\n exif_data['year'] = d.split(':')[0].strip()\n\n exif_data['hour'] = h.split(':')[0].strip()\n exif_data['min'] = h.split(':')[1].strip()\n exif_data['sec'] = h.split(':')[2].strip()\n\n except:\n # add log\n # configuration file default photos\n if verbose:\n print('error with {}'.format(f))\n\n exif_data['year'] = None\n\n return exif_data",
"def getExifData(filePath):\n\tbad_tags = ('EXIF Tag 0x9009', 'MakerNote Tag 0x0099',\n\t\t\t\t'EXIF UserComment')\n\ttry:\n\t\twith open(filePath, 'rb') as f:\n\t\t\treturn [\"%s: %s\" % (tag, data) \n\n\t\tfor tag, data in exifread.process_file(f).iteritems() \n\t\tif tag not in bad_tags]\n\texcept OSError:\n\t\treturn",
"def get_image_data (file_path, metadata_required):\n lookup = ImageLookup()\n return lookup.lookup_by_filename(file_path, metadata_required=False)",
"def extract_exif(fname):\n\n try:\n # check if file has EXIF date, exception if not\n exif_data = fileops.get_exif_datetimeorig_tag(fname)\n\n # extract the date/time string from EXIF, exception if\n # not the proper format\n datetimestr = exif_to_datetimestr(exif_data)\n\n logging.debug(\"Found EXIF Tag %r for file %r\", datetimestr, \n os.path.basename(fname))\n\n return datetimestr\n\n except fileops.EXIFTagError:\n logging.warning(\"%r does not have a proper EXIF tag\",\n os.path.basename(fname))\n return \"\";\n\n except DateStrError:\n logging.warning(\"%r EXIF tag not the right format\",\n os.path.basename(fname))\n return \"\";",
"def fetch_exif_tags(image, s3bucket):\n\n s3client = boto3.client('s3', region_name=AWS_REGION)\n\n useful_exif_tags = [ # List of useful EXIF tags as presented in ExifRead\n 'Image Make',\n 'Image Model',\n 'Image DateTime',\n # 'Image Orientation',\n 'EXIF LensModel',\n 'EXIF ISOSpeedRatings',\n 'EXIF ExposureTime',\n 'EXIF FNumber',\n 'EXIF ExposureProgram',\n # 'EXIF ExposureMode'\n 'EXIF FocalLength',\n # 'EXIF ExifImageWidth',\n # 'EXIF ExifImageLength',\n 'GPS GPSAltitude',\n 'GPS GPSLatitude',\n 'GPS GPSLatitudeRef',\n 'GPS GPSLongitude',\n 'GPS GPSLongitudeRef',\n ]\n\n try:\n temp_file = '/tmp/tmpimage.jpg'\n\n with open(temp_file, 'wb') as data:\n s3client.download_fileobj(s3bucket, image, data)\n\n tf = open(temp_file, 'rb')\n exif_tags = exif.process_file(tf, details=False)\n\n exifs_dict = {}\n\n for tag in exif_tags.keys():\n if tag in useful_exif_tags: # Filtering whole EXIF array to select only list of useful\n\n if tag == 'Image DateTime': # Creating datetime in ISO format\n shoot_date = datetime.datetime.strptime(exif_tags[tag].printable,\n \"%Y:%m:%d %H:%M:%S\").isoformat()\n exifs_dict.update({'ShootingTime': shoot_date})\n\n elif tag.startswith('EXIF'):\n exif_tag_str = tag.lstrip('EXIF')\n exifs_dict.update({exif_tag_str.lstrip(): exif_tags[tag].printable})\n\n elif tag.startswith('GPS'):\n exif_tag_str = tag.lstrip('GPS')\n exifs_dict.update({exif_tag_str.lstrip(): exif_tags[tag].printable})\n\n else:\n exifs_dict.update({tag: exif_tags[tag].printable})\n\n return exifs_dict\n\n except Exception as e:\n print(\"EXIF tags fetching failed because of : \", e)",
"def strip_exif(self,img):\n data = list(img.getdata())\n image_without_exif = PIL.Image.new(img.mode, img.size)\n image_without_exif.putdata(data)\n return image_without_exif",
"def get_file(self):\n img_hex = self._segments['preceding']\n\n if self.has_exif:\n img_hex += self._segments['APP1'].get_segment_hex()\n\n img_hex += self._segments['succeeding']\n\n return binascii.unhexlify(img_hex)",
"def get_image_data(self):\n raise NotImplementedError(str(type(self)) + 'does not'\n 'implement get_image.')",
"def get_data_ge(logger, file):\n fp = open(file, 'rb')\n offset = 8192\n\n fp.seek(18)\n size, nframes = st.unpack('<ih',fp.read(6))\n if size != 2048:\n logger.error('GE image size unexpected: '+str(size))\n return None, 0, 0\n\n fsize = os.stat(str(fp).split(\"'\")[1]).st_size\n nframes_calc = (fsize - offset)/(2*size**2)\n\n if nframes != nframes_calc:\n logger.error('GE number frames unexpected: '+str(nframes))\n return None, 0, 0\n\n pos = offset\n fp.seek(pos)\n\n return fp, int(nframes_calc), size*size",
"def extract_data(filename, num_images, starting_id, context_factor):\n imgs = []\n for i in range(starting_id, num_images+starting_id):\n imageid = \"satImage_%.3d\" % i\n image_filename = filename + imageid + \".png\"\n if os.path.isfile(image_filename):\n print ('Loading ' + image_filename)\n img = mpimg.imread(image_filename)\n\n\n imgs.append(img)\n else:\n print ('File ' + image_filename + ' does not exist')\n\n num_images = len(imgs)\n IMG_WIDTH = int(imgs[0].shape[0]/DOWNSCALE)\n IMG_HEIGHT = int(imgs[0].shape[1]/DOWNSCALE)\n N_PATCHES_PER_IMAGE = (IMG_WIDTH/IMG_PATCH_SIZE)*(IMG_HEIGHT/IMG_PATCH_SIZE)\n\n\n img_patches = [img_crop_context(imgs[i], IMG_PATCH_SIZE, IMG_PATCH_SIZE,context_factor, sub_mean=True) for i in range(num_images)]\n data = [img_patches[i][j] for i in range(len(img_patches)) for j in range(len(img_patches[i]))]\n data = np.asarray(data)\n return data",
"def getImgContentFile(img):\n format, imgstr = img.split(';base64,')\n ext = format.split('/')[-1]\n file = ContentFile(base64.b64decode(imgstr), name='temp.' + ext)\n return file",
"def test_jpeg_exif(h, f):\n if h[6:10].lower() == 'exif':\n return 'jpeg'",
"def _getAllMeta(self):\n try:\n metadata = pyexiv2.ImageMetadata(self.imagePath)\n metadata.read()\n return metadata\n except:\n print 'error reading meta data'\n return None",
"def GetHeaders(the_file):\n\n data = exifread.process_file(the_file, 'UNDEF', False, False, False)\n return data",
"def getimg(filename):\n return np.asarray(Image.open('imgdb/'+filename))",
"def open_image_and_meta(image_bytes):\n with MemoryFile(image_bytes) as memfile:\n with memfile.open() as src:\n meta = src.meta\n arr = reshape_as_image(src.read())\n return arr, meta",
"def data_for_src(self, file_id):\n data, metadata = self.load(file_id, True)\n return \"data:image/gif;base64,%s\" % data.encode('base64')",
"def image_info(self):\n\n if not self._image_info:\n path_image_info = os.path.join(\n self._path, f\"ImageSet_{self._image['ImageSetID']}.ImageInfo\"\n )\n\n # Make sure the ImageInfo file really exists\n if not os.path.exists(path_image_info):\n self.logger.warning(\"ImageInfo path doesn't exist: %s\", path_image_info)\n return None\n\n self.logger.debug(\"Reading image data from: %s\", path_image_info)\n self._image_info = pinn_to_dict(path_image_info)\n\n return self._image_info",
"def get_metadata(files, color_space, sat_percent):\n\n\t# Read exposure time, gain and aperture from EXIF data\n\tdata = dict()\n\tdata['exp'], data['gain'], data['aperture'] = np.empty((3, len(files)))\n\n\tfor i, file in enumerate(files):\n\t\twith open(file, 'rb') as f:\n\t\t\ttags = exifread.process_file(f)\n\t\tif 'EXIF ExposureTime' in tags:\n\t\t\tdata['exp'][i] = np.float32(Fraction(tags['EXIF ExposureTime'].printable))\n\t\telif 'Image ExposureTime' in tags:\n\t\t\tdata['exp'][i] = float(Fraction(tags['Image ExposureTime'].printable))\n\t\telse:\n\t\t\traise Exception(f'Unable to read exposure time for {file}. Check EXIF data.')\n\n\t\tif 'EXIF ISOSpeedRatings' in tags:\n\t\t\tdata['gain'][i] = float(tags['EXIF ISOSpeedRatings'].printable)/100\n\t\telif 'Image ISOSpeedRatings' in tags:\n\t\t\tdata['gain'][i] = float(tags['Image ISOSpeedRatings'].printable)/100\n\t\telse:\n\t\t\traise Exception(f'Unable to read ISO. Check EXIF data for {file}.')\n\n\t\t# Aperture formula from https://www.omnicalculator.com/physics/aperture-area\n\t\tfocal_length = float(Fraction(tags['EXIF FocalLength'].printable))\n\t\tf_number = float(Fraction(tags['EXIF FNumber'].printable))\n\t\tdata['aperture'][i] = np.pi * (focal_length / 2 / f_number)**2\n\n\t# Get remaining data from rawpy\n\traw = rawpy.imread(files[0])\n\tdata['h'], data['w'] = raw.postprocess(user_flip=0).shape[:2]\n\tdata['black_level'] = np.array(raw.black_level_per_channel)\n\t# For some cameras, the provided white_level is incorrect\n\tdata['saturation_point'] = raw.white_level*sat_percent\n\tdata['color_space'] = color_space.lower()\n\n\tlogger.info(f\"Stack contains {len(files)} images of size: {data['h']}x{data['w']}\")\n\tlogger.info(f\"Exp: {data['exp']}\")\n\tlogger.info(f\"Gain: {data['gain']}\")\n\tlogger.info(f\"aperture: {data['aperture']}\")\n\tlogger.info(f\"Black-level: {data['black_level']}\")\n\tlogger.info(f\"Saturation point: {data['saturation_point']}\")\n\tlogger.info(f\"Color-space: {color_space}\")\n\n\treturn data"
] | [
"0.7258743",
"0.7192131",
"0.7121678",
"0.705259",
"0.69628465",
"0.6757472",
"0.67125493",
"0.6626685",
"0.6603185",
"0.6571405",
"0.65506035",
"0.6476884",
"0.64304584",
"0.6219957",
"0.61542845",
"0.60704327",
"0.5961148",
"0.5961018",
"0.57735795",
"0.57373935",
"0.57016903",
"0.569225",
"0.5657295",
"0.56282336",
"0.56066555",
"0.5604661",
"0.55755544",
"0.55667526",
"0.5557431",
"0.5538574"
] | 0.7227778 | 1 |
Sets the Tile as a mine self.count attribute is changed to 9, index of the mine image in Tile.images marks self.mine boolean attribute to be true | def setMine(self):
self.count = 13
self.mine = True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def enumerate_tiles(self):\n # Iterates through entire game board.\n for row in range(self.rows):\n for col in range(self.cols):\n\n # Doesn't count mines adjacent to mine tiles.\n if self.board[row][col].category == Tiles.mine:\n continue\n mines = 0\n\n # Calculates number of mines surrounding each tile.\n for i in [row-1, row, row+1]:\n for j in [col-1, col, col+1]:\n if (self.valid_tile(i, j) and self.board[i][j].category == Tiles.mine):\n mines += 1\n \n # Sets each game board tile's mine proximity number.\n self.board[row][col] = Tiles(row, col, str(mines))",
"def new_tile(self):\n # replace with your code\n pass",
"def _set_mine(self,index):\n game.get_cell(index).set_mine() #set current index as mine\n game.add_mine(index) #add index to mine_index\n\n # add its neighbor's neighbor_num \n temp_r=index/self._col_num\n temp_c=index%self._col_num\n shift=[[temp_r+dr,temp_c+dc] for dr in self.shifts for dc in self.shifts\n if [temp_r+dr,temp_c+dc]!=[temp_r,temp_c]\n and temp_r+dr in range(0,self._row_num)\n and temp_c+dc in range(0,self._col_num)]\n for s in shift:\n game.get_cell(s[0]*self._col_num+s[1]).add_neighbor()",
"def __init__minefield__(self):\n # Creates random locations of mines according to the size of the game board.\n mines = random.sample(range(0, self.rows * self.cols), self.mines)\n \n # Uses a helper method to initialize tile categories: mine or zero.\n return [[Tiles(i, j, self.create_tile(mines, i, j)) for j in range(self.cols)] for i in range(self.rows)]",
"def set_tile(self, row, col, value):\n # replace with your code\n pass",
"def new_tile(self):\r\n count = 0\r\n tot_count = self.get_grid_width() * self.get_grid_height()\r\n\r\n while count < 2 and tot_count > 0:\r\n # my_list = 4 10% of the time and a 2 90%\r\n my_list = [4] * 10 + [2] * 90\r\n new_tile = random.choice(my_list)\r\n\r\n # Selects a random number from 0 to width * height -1\r\n\r\n spot = random.randint(0, self._grid_height * self._grid_width - 1)\r\n\r\n # sets location to random selection from spot\r\n loc = [spot / self._grid_width, spot % self._grid_width]\r\n # if loc is empty ( == 0 ) sets number, else repeats process.\r\n\r\n if self._board[loc[0]][loc[1]] == 0:\r\n # sets radom selected board tile to new_tile number\r\n self._board[loc[0]][loc[1]] = new_tile\r\n count += 1\r\n tot_count -= 1",
"def new_tile(self):\n rowm, colm = self.get_ava_index()\n value = 2 if random() <= 0.90 else 4\n self.set_tile(rowm, colm, value)\n print rowm,colm,value",
"def new_tile(self):\n # replace with your code\n empty_list = []\n counter_1 = 0\n for _ in self._grid:\n counter_2 = 0\n line = _\n for blank in line:\n if blank == 0:\n blank_tile = (counter_1, counter_2)\n empty_list.append(blank_tile)\n counter_2 += 1\n else:\n counter_2 += 1\n counter_1 += 1\n #print empty_list\n \n self._tile = empty_list[random.randrange(len(empty_list))]\n \n value = [2,2,2,2,2,2,2,2,2,4]\n tile_value = value[random.randint(0,9)]\n \n self.set_tile(self._tile[0], self._tile[1], tile_value)",
"def adjust_minefield(self, row, col):\n # Iterates through the user selected 3x3 grid.\n for i in [row-1, row, row+1]:\n for j in [col-1, col, col+1]:\n\n # if the tile is valid and it contains a mine.\n if self.valid_tile(i, j) and self.board[i][j].category == Tiles.mine:\n random_i = random.randint(0, self.rows - 1)\n random_j = random.randint(0, self.cols - 1)\n\n # Searches for locations to place the mine outside of the starting position's adjacent tiles.\n while self.board[random_i][random_j].category == Tiles.mine or (abs(row-random_i) <= 1 and abs(col-random_j) <= 1):\n random_i = random.randint(0, self.rows - 1)\n random_j = random.randint(0, self.cols - 1)\n \n # Places the mine in a valid random location on the game board.\n self.board[random_i][random_j] = Tiles(random_i, random_j, Tiles.mine)\n\n # All mines removed from starting position thus set the tile to zero.\n self.board[i][j] = Tiles(i, j, Tiles.zero)",
"def set_tile(self, row, col, value):\n # replace with your code\n self._grid_2048[row][col] = value",
"def new_tile(self):\r\n # check if is zero or not\r\n new_tile_added = False\r\n # a list to 2 90% of the time and 4 10% of the time\r\n new_tile_list = [2,2,2,2,2,2,2,2,2,4]\r\n counter = 0\r\n while not new_tile_added:\r\n row_position = random.randrange(0,self.grid_height)\r\n col_position = random.randrange(0,self.grid_width)\r\n if self.grid[row_position][col_position] == 0:\r\n self.grid[row_position][col_position] = random.choice(new_tile_list)\r\n new_tile_added = True\r\n if counter > self.grid_width * self.grid_height:\r\n print 'you failed'\r\n break\r\n\r\n counter +=1",
"def replay(self):\n self.shown = False\n self.mine = False\n self.flag = False\n self.inPlay = True\n self.count = 0\n self.numFlags = 0\n self.configure(image=Tile.images[10])",
"def setTile(self, cell, tile):\n assert isinstance(cell, tuple)\n cellx, celly = cell\n\n if cellx < 0 or cellx > self.map_array.shape[0]-1 or celly < 0 or celly > self.map_array.shape[1]-1:\n return\n\n if self.tile_dict.get((cellx, celly)):\n self.canvas.delete(self.tile_dict[(cellx, celly)])\n\n if tile:\n self.map_array[cellx, celly] = tile.tid\n if tile.tid == 0.0:\n return\n map_posx, map_posy = iso(cellx * self.cell_width, celly * self.cell_height)\n image = self.main.main_tilelist.images[tile.tid]\n self.tile_dict[(cellx, celly)] = self.canvas.create_image(map_posx, map_posy, image=image, anchor=tk.N)",
"def set_tile(self, row, column, tile_number):\n \n current_tile = self.get_tile(row, column)\n \n bits_to_shift = tile_offsets[row][column]\n new_mask = tile_number << bits_to_shift\n old_mask = current_tile << bits_to_shift\n self.tiles = self.tiles ^ old_mask\n self.tiles = self.tiles ^ new_mask",
"def new_tile(self):\n \n # get random corordinates for new tile\n row = random.randint(0,self._grid_width)\n col = random.randint(0,self._grid_height)\n # keeps generating random tile corordinates for non-empty tile\n while self.get_tile(row,col) != 0:\n row = random.randint(0,self._grid_width)\n col = random.randint(0,self._grid_height)\n \n # get random index of new tile value\n freq = random.randint(0,9)\n if freq == 9:\n self.set_tile(row, col, 4)\n else:\n self.set_tile(row, col, 2)",
"def changeTile (self, posY, posX, tile=\"t\"):\r\n self.grid[posY][posX] = tile",
"def set_tile(self, row, col, value):\r\n # replace with your code\r\n self._grid_tile[row][col] = value",
"def set_tile(self, row, col, value):\n # replace with your code\n self.grid[row][col] = value",
"def __init__(self, mine_count=BOARD_DIM[\"MINE_COUNT\"], width=BOARD_DIM[\"BOARD_WIDTH\"],\n height=BOARD_DIM[\"BOARD_HEIGHT\"]):\n if height is None:\n height = width\n if mine_count > height * width:\n raise TooManyMineException\n self.height = height\n self.width = width\n self.mine_count = mine_count\n self.chessboard = [[Point(x, y) for x in range(width)] for y in range(height)]\n self.mines = [-1 for z in range(mine_count)]\n self.initialise()",
"def set_our_tile(self, x, y, value):\n\t\tif x >= 0 and x < self.w and y >= 0 and y < self.h:\n\t\t\tself.our_tiles[x][y] = value",
"def set_tile(self, row, col, value):\r\n # replace with your code\r\n self._cells[row][col] = value",
"def new_tile(self):\n zero_list = []\n zero_cell = ()\n # self._cells = [[0 for col in range(self._grid_width)] for row in range(self._grid_height)]\n for row in range(self._grid_height):\n for col in range(self._grid_width):\n if self._cells[row][col] == 0:\n zero_cell = (row, col)\n zero_list.append(zero_cell)\n if len(zero_list) > 0:\n chance = random.randrange(0,10)\n cell_idx = random.randrange(len(zero_list))\n if chance == 9:\n self._cells[zero_list[cell_idx][0]][zero_list[cell_idx][1]] = 4\n else:\n self._cells[zero_list[cell_idx][0]][zero_list[cell_idx][1]] = 2\n else:\n print(\"You lost! Better luck next time!\")",
"def set_tile(self, row, col, value):\n # replace with your code\n self._cells[row][col] = value",
"def add_mines(self):\n for x, y in sample(list(itertools.product(range(self.width), range(self.height))), self.num_mines):\n self.grid[y][x] = self.mine",
"def set_tile(self, row, col, value):\n # replace with your code\n self._grid[row][col] = value",
"def set_tile(self, row, col, value):\n # replace with your code\n self._grid[row][col] = value",
"def new_tile(self):\n # Getting the list of positions of empty tiles\n indices_list = [(i, j) for i, l in enumerate(self._grid)\n for j in xrange(len(l)) if not l[j]]\n \n # Filling the the empty tile with a 2 or a 4\n if indices_list:\n self.set_tile(*choice(indices_list),\n value = 2 if random() <.9 else 4)",
"def set_tile(self, row, col, value):\r\n self.grid[row][col] = value",
"def new(self):\n #groups for drawing\n self.moving_sprites = pg.sprite.LayeredUpdates() \n self.static_sprites = pg.sprite.LayeredUpdates()\n #other groups\n self.walls = pg.sprite.Group()\n self.teleports = pg.sprite.Group() \n self.win = pg.sprite.Group() \n self.threat = pg.sprite.Group()\n self.hearts= pg.sprite.Group()\n \n for tile_object in self.map.tmxdata.objects:\n if tile_object.name == \"player\":\n self.player = Player(self, tile_object.x, tile_object.y)\n if tile_object.name == \"monster\":\n self.monster = Monster(self, tile_object.x, tile_object.y)\n if tile_object.name == \"wall\":\n Obstacle(self, tile_object.x, tile_object.y, tile_object.width, tile_object.height)\n if tile_object.name == \"mirror\":\n Mirror(self, tile_object.x, tile_object.y, tile_object.width, tile_object.height, self.destinations)\n if tile_object.name == \"pentagram\":\n self.goal=Pentagram(self, tile_object.x, tile_object.y, tile_object.width, tile_object.height)\n\n self.camera = Camera(self.map.width, self.map.height)\n\n #static sprites\n self.flashlight=Flashlight(self, int(WIDTH/2), int(HEIGHT/2))\n self.darkness=Darkness(self, int(WIDTH/2), int(HEIGHT/2))\n if self.minimap_name != None:\n self.minimap=Minimap(self, self.minimap_name)\n for i in range(int(PLAYERHEALTH/10)):\n Heart(self, 726-37*(2-i), 20)\n self.battery= Battery(self, 726, 52)\n self.draw_debug = False\n\n self.teleport_list=[]\n for tele in self.teleports:\n self.teleport_list.append(tele)",
"def addTiles(self, rows, cols, minecount):\n for row in range(rows):\n self.tiles.append([])\n for col in range(cols):\n tile = Tile(self, row, col)\n tile.grid(row=row+1, column=col)\n self.tiles[row].append(tile)\n #left click listeners\n tile.bind('<ButtonPress-1>', self.pressTile)\n tile.bind('<ButtonRelease-1>', self.showTile)\n #middle click listeners\n tile.bind('<ButtonPress-2>', self.pressAdjTiles)\n tile.bind('<ButtonRelease-2>', self.showAdjTiles)\n #right click listeners\n tile.bind('<ButtonPress-3>', self.pressTile)\n tile.bind('<ButtonRelease-3>', self.toggleFlag)"
] | [
"0.67524004",
"0.6709142",
"0.66603965",
"0.6585605",
"0.64893264",
"0.64514196",
"0.63818496",
"0.63528097",
"0.63202614",
"0.62961346",
"0.62728107",
"0.62516195",
"0.62442756",
"0.6236275",
"0.62213844",
"0.62194306",
"0.6208039",
"0.61843634",
"0.6172285",
"0.61482275",
"0.6140236",
"0.61372274",
"0.6114426",
"0.6113802",
"0.6101054",
"0.6101054",
"0.60969263",
"0.6094662",
"0.6091975",
"0.60694903"
] | 0.7026651 | 0 |
Changes the image to pressed button, Tile.images[0] Only works for Tiles that are in play (self.inPlay == True) | def buttonPress(self):
if self.inPlay and not self.shown:
self.configure(image = Tile.images[0]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_imgs(self):\n\n for b in self.gamebuttons:\n b.update_img()\n self.start_but.update_img()",
"def boutton_press(self,a,img):\r\n x,y=self.can.coords(self.button[a])\r\n self.can.delete(self.button[a])\r\n self.button[a]=self.creat_image(img,x,y)",
"def change_start_button(event):\n img_start_button_mouse_over = PhotoImage(\n file=r\"C:\\Users\\Owner\\PycharmProjects\\Module14\\buttons\\start_new_game_raised_active.png\")\n lbl_start_game.config(image=img_start_button_mouse_over)\n lbl_start_game.image = img_start_button_mouse_over\n lbl_start_game.grid(row=8, column=1, columnspan=8, pady=6)",
"def pressTile(self, event):\n clickedTile = event.widget\n if clickedTile.isInPlay(): self.changeSmile(2)\n if not clickedTile.isFlagged():\n clickedTile.buttonPress()\n if not self.minesArmed and event.num == 1:\n self.setUpBombs(event)",
"def release_click_start_button(event):\n img_start_button_release_click = PhotoImage(\n file=r\"C:\\Users\\Owner\\PycharmProjects\\Module14\\buttons\\start_new_game_raised_active.png\")\n lbl_start_game.config(image=img_start_button_release_click)\n lbl_start_game.image = img_start_button_release_click\n lbl_start_game.grid(row=8, column=1, columnspan=8, pady=6)",
"def change_back_start_button(event):\n img_start_button_mouse_over = PhotoImage(\n file=r\"C:\\Users\\Owner\\PycharmProjects\\Module14\\buttons\\start_new_game_raised_normal.png\")\n lbl_start_game.config(image=img_start_button_mouse_over)\n lbl_start_game.image = img_start_button_mouse_over\n lbl_start_game.grid(row=8, column=1, columnspan=8, pady=6)",
"def _update_image(self):\n button = self.buttons.checkedButton()\n if button is None:\n return\n\n button.click()",
"def bone(self):\n root = tkinter.Toplevel()\n button = ttk.Button(root)\n photo = tkinter.PhotoImage(file='C:/Users/shepheam/RobotTeamProject/assets/images/dog_treats.gif')\n button.image = photo\n button.grid()\n button['command'] = lambda: print('Good boy!')",
"def on_click_start_button(event):\n img_start_button_on_click = PhotoImage(\n file=r\"C:\\Users\\Owner\\PycharmProjects\\Module14\\buttons\\start_new_game_pressed_normal.png\")\n lbl_start_game.config(image=img_start_button_on_click)\n lbl_start_game.image = img_start_button_on_click\n lbl_start_game.grid(row=8, column=1, columnspan=8, pady=8) # Uses more padding b/c the image is smaller\n place_holder.destroy() # Removes the place holder",
"def __initBtnImages__(self) -> None:\n self._images = [\n pygame.Surface((self.rect.width, self.rect.height)),\n pygame.Surface((self.rect.width, self.rect.height)),\n ]\n self._images[ButtonImages.DEFAULT_IMAGE.value].fill(self.notClickedBtnColor)\n self._images[ButtonImages.CLICKING_IMAGE.value].fill(self.clickedBtnColor)\n self.textSurface = self.font.render(self.text, False, (0, 0, 0))\n self.textSurfaceDest = (self.rect.centerx - (self.textSurface.get_width() / 2),\n self.rect.centery - (self.textSurface.get_height() / 2))\n self._images[0].blit(self.textSurface, self.textSurfaceDest)\n self._images[1].blit(self.textSurface, self.textSurfaceDest)",
"def replay(self):\n self.shown = False\n self.mine = False\n self.flag = False\n self.inPlay = True\n self.count = 0\n self.numFlags = 0\n self.configure(image=Tile.images[10])",
"def boutton(self,img1,x,y):\r\n self.button.append(self.creat_image(img1,x,y))",
"def pressAdjTiles(self, event):\n clickedTile = event.widget\n if clickedTile.isInPlay(): self.changeSmile(2)\n for adjTile in self.getAdjacentTiles(clickedTile.row, clickedTile.col):\n if not adjTile.isFlagged(): adjTile.buttonPress()",
"def __init__(self,size,tilelist,buttonflag):\n\n # Initialize the screen class\n BaseScreen.__init__(self,size)\n\n # Create the list of tile objects and draw them on the screen\n self.tilelist = tilelist\n xlen = self.tilelist[0][0].image.get_width()\n ylen = self.tilelist[0][0].image.get_height()\n for x in range(0,size[0],xlen):\n for y in range(0,size[1],ylen):\n try:\n self.image.blit(self.tilelist[x // xlen][y // ylen].image,(x,y))\n self.tilelist[x // xlen][y // ylen].set_position((x,y))\n except:\n pass\n\n # Set up an empty button list and the buttonflag\n self.buttonlist = []\n self.buttonflag = buttonflag",
"def changeImageTab(self, idTag):\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(os.path.join(GG.utils.PATH_EDITOR_INTERFACE, self.activeOption + \"_back.png\"))\n self.imagesTag[self.activeOption].picture = ocempgui.draw.Image.load_image(imgPath)\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(os.path.join(GG.utils.PATH_EDITOR_INTERFACE, idTag + \"_front.png\"))\n self.imagesTag[idTag].picture = ocempgui.draw.Image.load_image(imgPath)",
"def change_button_img_to_null(self, null_img=None):\n null_img = self.null_img\n self.button1.configure(image=null_img)\n self.button2.configure(image=null_img)\n self.button3.configure(image=null_img)\n\n self.button4.configure(image=null_img)\n self.button5.configure(image=null_img)\n self.button6.configure(image=null_img)\n\n self.button7.configure(image=null_img)\n self.button8.configure(image=null_img)\n self.button9.configure(image=null_img)",
"def setBtnIcon(self):\n self.setIcon(QtGui.QIcon(self.movie.currentPixmap()))\n self.setIconSize(QtCore.QSize(self.size[0], self.size[1]))",
"def clickable(self, event):\n tile = self.canvas.find_closest(event.x, event.y)\n # check if tile is clickable, and already fill color\n if self.is_clickable(tile) and self.canvas.itemcget(tile, \"fill\") != \\\n self.color:\n self.num_clicks += 1\n cords = self.canvas.coords(tile)\n self.canvas.itemconfigure(tile, tag=\"selected\")\n self.pic.append(tkinter.PhotoImage(file=os.path.join(self.folder,\n self.new_list[\n tile[\n 0] - 1])))\n self.image_id.append(self.canvas.create_image(\n (cords[0] + cords[2]) / 2,\n (cords[1] + cords[3]) / 2,\n image=self.pic[-1]))\n self.click_tiles.append(tile)",
"def changeSmile(self, num, event=None):\n self.smileButton.configure(image=self.images[num])",
"def __init__(self, posX, posY, normal, hovered, pressed, command) :\n self.posX = posX\n self.posY = posY\n\n self.imgNormal = pygame.image.load(normal).convert_alpha()\n self.posButton = self.imgNormal.get_rect()\n self.imgPressed = pygame.image.load(pressed).convert_alpha()\n self.imgHovered = pygame.image.load(hovered).convert_alpha()\n\n self.images = (self.imgNormal, self.imgPressed, self.imgHovered)\n\n self.posButton.x = self.posX\n self.posButton.y = self.posY\n\n self.blitImage = self.imgNormal\n\n self.command = command\n\n self.buttonSize = self.imgNormal.get_size()",
"def get_sound_button_img(self):\n return sound_button_images[self.game_data.is_sound_on()][False]",
"def change_image(self):\n image_lst = [\"images/hangman01.png\",\n \"images/hangman02.png\",\n \"images/hangman03.png\",\n \"images/hangman04.png\",\n \"images/hangman05.png\",\n \"images/hangman06.png\",\n \"images/hangman07.png\"]\n self.strikes = self.strikes % len(image_lst)\n self.photo = PhotoImage(file=image_lst[self.strikes])\n self.canvas.create_image(340, 240, image=self.photo)",
"def release_click_alphabet_button(event):\n another_image = image_abc_active[abc_index]\n a_label = buttons_abc[abc_index]\n a_label.config(image=another_image)\n a_label.image = another_image\n a_label.grid(row=ALPHA_ROW, column=ALPHA_COL, pady=2)",
"def setFlag(self):\n if self.inPlay and not self.shown:\n self.flag = not(self.flag)\n image_index = 11 if self.flag else 10\n self.configure(image = Tile.images[image_index])\n return 1 if self.flag else -1\n return 0",
"def showTile(self, event):\n clicked = event.widget\n if clicked.isInPlay():\n self.changeSmile(1)\n returned = clicked.show()\n if returned == 1 and clicked.isZero():\n returned += self.cascadeShow(clicked)\n self.checkEnd(returned)",
"def __init__(self,file,size):\n\n # Initialize button class and set the picture attribute of the instance\n Button.__init__(self,1,file,(0,0),resize = size)\n self.pic = pygame.Surface(self.image.get_size())\n self.pic.blit(self.image,(0,0))\n\n # Set up the shades dictionary. The first item determines if the shade\n # is on and the second item is the surface containing the shade.\n self.shades = {}\n\n # Create blue and red shades for the tile\n self.initialize_shade('blue',(0,0,255),150)\n self.initialize_shade('red',(255,0,0),150)",
"def update(self):\n frame = str(self.image_number//10)\n if self.image_number < 30: # Not yet on the tenth frame\n self.image_number += 1\n else: # Reset back to 0\n self.image_number = 0\n\n image_location = os.path.join(\"assets\", \"player\" + frame + \".png\") # Get image path\n self.image = pygame.image.load(image_location).convert_alpha() # Load image\n\n # Keyboard events\n keys_pressed = pygame.key.get_pressed()\n if keys_pressed[pygame.K_UP]:\n self.move(0, -5)\n if keys_pressed[pygame.K_LEFT]:\n self.move(-5, 0)\n if keys_pressed[pygame.K_RIGHT]:\n self.move(5, 0)\n if keys_pressed[pygame.K_DOWN]:\n self.move(0, 5)\n\n # Mouse events\n mouse_pos = pygame.mouse.get_pos() # Get position of mouse as a tuple representing the\n # (x, y) coordinate\n\n mouse_buttons = pygame.mouse.get_pressed()\n if mouse_buttons[0]: # If left mouse pressed\n self.teleport(mouse_pos[0], mouse_pos[1])\n if mouse_buttons[2]: # If right mouse pressed\n self.teleport(mouse_pos[0], mouse_pos[1])",
"def set_sprite(self, image):\n self.current_sprite = image\n self.draw_alpha()",
"def showBtnImg(*args, **kwargs):\n\targs[0].get_image().show()",
"def update(self):\n self.imagecounter +=1\n if self.imagecounter > 7:\n self.imagecounter = 0\n self.image = pygame.image.load(self.pictures[self.imagecounter])\n self.rect = self.image.get_rect()\n self.rect.left = self.x\n self.rect.top = self.y"
] | [
"0.6658446",
"0.65949404",
"0.6556653",
"0.6481363",
"0.64126843",
"0.6371399",
"0.6321544",
"0.63166",
"0.61455536",
"0.60779876",
"0.60149246",
"0.5937855",
"0.593545",
"0.59137666",
"0.5902924",
"0.5878515",
"0.58688813",
"0.5859732",
"0.5858953",
"0.58473176",
"0.5811791",
"0.58109665",
"0.57849497",
"0.5782984",
"0.57612485",
"0.5726066",
"0.57164574",
"0.5677657",
"0.5658106",
"0.5655195"
] | 0.78256506 | 0 |
Toggles the flag on the tile if it is still unknown and in play self.flag boolean attribute is toggled Returns 1 if flag is toggled on and 1 if flag is toggled off Returns 0 if flag was not toggled | def setFlag(self):
if self.inPlay and not self.shown:
self.flag = not(self.flag)
image_index = 11 if self.flag else 10
self.configure(image = Tile.images[image_index])
return 1 if self.flag else -1
return 0 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def toggleFlag(self, event): \n clicked = event.widget\n if clicked.isInPlay(): self.changeSmile(1)\n value = clicked.setFlag()\n for adjTile in self.getAdjacentTiles(clicked.row, clicked.col):\n adjTile.numFlags += value\n self.numFlags += value\n self.flagLabel.configure(text=\"Flags: \"+str(self.numFlags))",
"def SetToggle(self, flag):\n\n self.up = not flag\n self.Refresh()",
"def flag(self, i, j):\n # Does not allow starting a game with a flag\n if not self.is_game_over and self.is_initialized:\n if not self.revealed[i, j]:\n self.flags[i, j] = not self.flags[i, j]\n self.flags_pts.set_data(*np.where(self.flags)[::-1])\n self.title_txt.set_text('{}/{}'.format(np.count_nonzero(self.flags), self.n_mines))\n self.refresh_canvas()",
"def toggle_flag(self, row: int, column: int):\n if self.flags is None:\n self.flags = []\n cell = self.board[row][column]\n flag_pos = row * self.columns + column\n if cell[\"f\"] == 1:\n self.flags.pop(self.flags.index(flag_pos))\n elif cell[\"f\"] == 0:\n self.flags.append(flag_pos)\n cell[\"f\"] = (cell[\"f\"] + 1) % 3\n self.save()\n return cell[\"f\"], len(self.flags)",
"def flip_tile(self, tile, flag=False):\n selected_tile = self.stack[tile]\n\n if flag == True:\n if self.flags_remaining == 0:\n raise ValueError(\"No flags left\")\n else:\n selected_tile['flag']=True\n self.flags_remaining -= 1\n\n else:\n if selected_tile['flag'] == True:\n selected_tile['flag'] == False\n self.flags_remaining += 1\n\n else:\n self.stack[tile]['flip'] = True\n self.tiles_remaining -= 1\n if selected_tile['value'] == 'bomb':\n self.end_game()\n elif selected_tile['value'] == 0:\n self.blank_tile_cascade(tile)\n self.check_win()",
"def getFlag(self, flag) -> bool:\n ...",
"def isFlagged(self):\n return self.flag",
"def _action_toggle(self, flag):\n if flag:\n return {\"toggle\": \"ON\"}\n else:\n return {\"toggle\": \"OFF\"}",
"def __toggle(self,x):\n\t\tif x == 1:\n\t\t\treturn 0\n\t\telse:\n\t\t\treturn 1",
"def get_flag(self):\n if self.flag is None:\n # Find the flag in the game objects list\n for obj in self.game_objects_list:\n if isinstance(obj, gameobjects.Flag):\n self.flag = obj\n break\n return self.flag",
"def toggle_flag(self, bit):\n\n self.fl = self.fl ^ (1 << bit)",
"def _flag():\n current_flag = _flag.flag\n _flag.flag <<= 1\n return current_flag",
"def toggle_article_flag(self, article: Article) -> None:\n article.flag = not article.flag\n with self._sqlite_connection:\n self._sqlite_connection.execute('''UPDATE articles SET flag = ? WHERE identifier = ? and feed_id = ?''', [article.flag, article.identifier, article.feed_id])",
"def check(self):\n return self.tile==\"\"",
"def flag_set(self, flag):\n if self.flags & flag != 0:\n return True\n else:\n return False",
"def flag():\n pass",
"def check_flag(self):\n return self._flag is 0 or self._flag is 16",
"def toggle_flag(self, loc: tuple[int, int]) -> None:\n if self.game_over or self.field[loc].is_naked:\n return\n\n if self.field[loc].is_flagged:\n self.field[loc].un_flag()\n self.mines_left += 1\n else:\n self.field[loc].flag()\n self.mines_left -= 1\n\n if self.auto_solving.get():\n block = Block(self.field, loc)\n useful_neighbors = {neighbor for neighbor in block.naked_neighbors\n if Block(self.field, neighbor).unknown_neighbors}\n [self.hyper_queue.remove(cell) for cell in useful_neighbors]\n self.auto_queue.add_batch(useful_neighbors,\n emphasis=self.emphasis[\"add_batch\"],\n color=\"new_auto\")\n self._auto_spark()",
"def toggle_bit(bit) -> int:\n\treturn 1 if bit == 0 else 0",
"def set_flag(self, new):\n self.flag = new",
"def flag_change(self, flag):\n\n if flag == 0:\n self.setText(\"全部暂停\")\n else:\n self.setText(\"全部继续\")\n self.flag = flag",
"def toggle(self):",
"def test(self, grid, flag):\n x = self.x+SPEED_X[flag]\n y = self.y+SPEED_Y[flag]\n return 0 <= x < self.n and 0 <= y < self.n and grid[y][x] == 1",
"def ison(self):\n return bool(self.pin.state) if self.pinishigh else not bool(self.pin.state)",
"def replay(self):\n self.shown = False\n self.mine = False\n self.flag = False\n self.inPlay = True\n self.count = 0\n self.numFlags = 0\n self.configure(image=Tile.images[10])",
"def toggle(self) -> None:",
"def toggle(self) -> None:",
"def toggle_flag_slot(self, pick):\n self._check_game_over()\n self._validate_pick(pick)\n\n self.board.toggle_flag_slot(pick)",
"def toggle(self) -> None:\n ...",
"def GetToggle(self):\n\n return not self.up"
] | [
"0.7438908",
"0.676971",
"0.62580466",
"0.6238106",
"0.6185206",
"0.6137376",
"0.60840374",
"0.6058401",
"0.6057641",
"0.5971555",
"0.59167653",
"0.5901829",
"0.58407146",
"0.58136433",
"0.5802496",
"0.5797119",
"0.57912606",
"0.57860863",
"0.56620306",
"0.5659031",
"0.56172997",
"0.5594932",
"0.55931604",
"0.5563483",
"0.5547223",
"0.5545641",
"0.5545641",
"0.5537686",
"0.5524598",
"0.55168366"
] | 0.7420534 | 1 |
A function used to set self.count used by Board | def setCount(self, num):
self.count=num | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_count(self):\n pass # Do nothing",
"def update_count(self):\n pass",
"def __call__(self, *args):\n self.count = self.count + 1",
"def _update_count(self):\n self._count = len(self._items)",
"def add_count(self):\n self.count += 1",
"def set_count(c):\n global count\n count = c",
"def set_count(self, count):\n self._count = count",
"def __init__(self):\n self.count = 1",
"def setIterationCount(self, newIterationCount):\n \n pass",
"def __init__(self):\n self.count = 0",
"def increment_counter(self) -> None:",
"def count(self, count: int) -> None:\n self._count = count",
"def inc( self ):\n self.count += 1",
"def timesGoBy(self):\n self.wcount += 1",
"def __init__(self):\n\n self.count = 0",
"def count(self, count):\n\n self._count = count",
"def count(self, count):\n\n self._count = count",
"def count(self, count):\n\n self._count = count",
"def count(self, count):\n\n self._count = count",
"def reset_count(self):\n self.count = 0",
"def increase_count(self, number=1):\n self.count += number",
"def count(self, count: int):\n\n self._count = count",
"def increment_count(self):\n self.image_count +=1\n if self.image_count > self.max_count:\n self.image_count = self.count_start # overflow",
"def setMancount(self, cnt):\n self.__mancount=cnt",
"def count(self, val):\n raise ValueError('cannot set \\'count\\' in class KeyTracker')",
"def make_count_change():\n \"*** YOUR CODE HERE ***\"",
"def __init__(self):\n self.counter = 0",
"def inc(self):\n \n self.count += 1",
"def plant(self, xcord, ycord, g_board, count):\n for i in range(xcord, xcord + 2):\n for j in range(ycord, ycord + self.size):\n g_board[i][j] = count",
"def increment(self, count_name):\n prop_name = 'count_' + count_name\n setattr(self, prop_name, getattr(self, prop_name, 0) + 1)"
] | [
"0.71770173",
"0.71657187",
"0.69234514",
"0.6896584",
"0.6862326",
"0.68291837",
"0.67461693",
"0.67281395",
"0.6694756",
"0.66242033",
"0.6550625",
"0.6524782",
"0.65234715",
"0.6470743",
"0.64682573",
"0.64680815",
"0.64680815",
"0.64680815",
"0.64680815",
"0.6439154",
"0.6420215",
"0.6389256",
"0.63822204",
"0.63638026",
"0.6351821",
"0.6349869",
"0.62558144",
"0.6250328",
"0.6231123",
"0.61956006"
] | 0.7432971 | 0 |
Removes the self.button myLabel() that has been hiding the value/mine Has no effect on Tiles that are already being shown Calls self.showAround() to cascade if self.count == 0 Returns 1 if one or more of the revealed Tiles is a Mine. Returns the number of Tiles revealed by it as an Integer. | def show(self):
if not self.shown and not self.flag:
self.shown = True
self.configure(image=Tile.images[self.count])
return -1 if self.mine else 1
return 0 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def showAdjTiles(self,event):\n clicked = event.widget\n if clicked.isInPlay():\n self.changeSmile(1)\n #if tile is Safe, reveal adjacent tiles and cascade if needed\n if clicked.isSafe():\n returned = 0\n for adjTile in self.getAdjacentTiles(clicked.row, clicked.col):\n value = adjTile.show()\n if value == 1 and adjTile.isZero():\n value += self.cascadeShow(adjTile)\n returned = -1 if (value == -1 or returned == -1) else value+returned\n self.checkEnd(returned)\n #if unsafe, return adjacent buttons to unpressed images\n else:\n for adjTile in self.getAdjacentTiles(clicked.row, clicked.col):\n if not adjTile.isFlagged() and not adjTile.isShown():\n adjTile.configure(image=Tile.images[10])",
"def showTile(self, event):\n clicked = event.widget\n if clicked.isInPlay():\n self.changeSmile(1)\n returned = clicked.show()\n if returned == 1 and clicked.isZero():\n returned += self.cascadeShow(clicked)\n self.checkEnd(returned)",
"def get_num_hidden(self) -> int:\n return 0",
"def hide_best_distance():\n global canvas, list_label_distance, data, list_position, list_best_label_distance\n if len(list_best_label_distance) != 0:\n if button_off_best_label_distance['text'] == \"Hide best distance\": # If current status is show and want to hide\n for i in range(len(list_best_label_distance)):\n list_best_label_distance[i].destroy() # Delete all label distance from canvas\n button_off_best_label_distance.configure(\n bg=button_off_color, text=\"Show best distance\") # Change status of button\n else: # If current status is hide and want to show\n # Draw new all distance\n # list_label_distance = draw_distance(data, canvas, list_position, 0.1) # Draw new all distance\n # if len(list_best_label_distance) != 0: # If best line is not none, draw best distance\n list_best_label_distance = draw_best_distance(best_line[\"path\"], data, canvas, list_position, 0.1)\n button_off_best_label_distance.configure(bg=button_on_color, text=\"Hide best distance\")",
"def hide_all_distance():\n global canvas, button_off_all_label_distance, list_label_distance, data, list_position, \\\n list_best_label_distance, button_off_best_label_distance\n if button_off_all_label_distance['text'] == \"Hide all distance\": # If current status is show and want to hide\n for i in range(len(list_label_distance)):\n list_label_distance[i].destroy() # Delete all label distance from canvas\n button_off_all_label_distance.configure(\n bg=button_off_color, text=\"Show all distance\") # Change status of button\n else: # If current status is hide and want to show\n list_label_distance = draw_distance(data, canvas, list_position, 0.1) # Draw new all distance\n # If best line is not none, draw best line\n if len(list_best_label_distance) != 0 and button_off_best_label_distance['text'] == 'Hide best distance':\n for i in range(len(list_best_label_distance)):\n list_best_label_distance[i].destroy()\n list_best_label_distance = []\n list_best_label_distance = draw_best_distance(best_line[\"path\"], data, canvas, list_position, 0.1)\n button_off_all_label_distance.configure(bg=button_on_color, text=\"Hide all distance\")",
"def toggleFlag(self, event): \n clicked = event.widget\n if clicked.isInPlay(): self.changeSmile(1)\n value = clicked.setFlag()\n for adjTile in self.getAdjacentTiles(clicked.row, clicked.col):\n adjTile.numFlags += value\n self.numFlags += value\n self.flagLabel.configure(text=\"Flags: \"+str(self.numFlags))",
"def revealBombs(self, win):\n for row in self.tiles:\n for tile in row:\n tile.inPlay = False\n if tile.isMine():\n if win:\n #flag non-flagged mines after winning\n if not tile.isFlagged():\n tile.configure(image=Tile.images[11])\n self.numFlags += 1\n else:\n #show unexploded mines after losing \n if not tile.isShown():\n tile.configure(image=Tile.images[9])\n #if incorrectly flagged, mark as such \n elif tile.isFlagged():\n tile.configure(image=Tile.images[12])",
"def show_hud(self):\n # Button positioners\n hl_slot, hr_slot = pt.make_bbox_positioners(\n y=0.02, w=0.15, h=0.063, xpad=0.02, startx=0, stopx=1\n )\n # hack make a second bbox positioner to get different sized buttons on #\n # the left\n hl_slot2, hr_slot2 = pt.make_bbox_positioners(\n y=0.02, w=0.08, h=0.05, xpad=0.015, startx=0, stopx=1\n )\n\n def next_rect(accum=[-1]):\n accum[0] += 1\n return hr_slot(accum[0])\n\n def next_rect2(accum=[-1]):\n accum[0] += 1\n return hl_slot2(accum[0])\n\n ibs = self.ibs\n name1, name2 = self.name1, self.name2\n nid1_is_known = not ibs.is_nid_unknown(self.nid1)\n nid2_is_known = not ibs.is_nid_unknown(self.nid2)\n all_nid_list = ibs.get_annot_name_rowids(self.all_aid_list)\n is_unknown = ibs.is_nid_unknown(all_nid_list)\n is_name1 = [nid == self.nid1 for nid in all_nid_list]\n is_name2 = [nid == self.nid2 for nid in all_nid_list]\n\n # option to remove all names only if at least one name exists\n if not all(is_unknown):\n unname_all_text = 'remove all names'\n self.append_button(\n unname_all_text, callback=self.unname_all, rect=next_rect()\n )\n # option to merge all into a new name if all are unknown\n if all(is_unknown) and not nid1_is_known and not nid2_is_known:\n joinnew_text = 'match all (nonjunk)\\n to a new name'\n self.append_button(\n joinnew_text, callback=self.merge_nonjunk_into_new_name, rect=next_rect()\n )\n # option dismiss all and give new names to all nonjunk images\n if any(is_unknown):\n self.append_button(\n 'mark all unknowns\\nas not matching',\n callback=self.dismiss_all,\n rect=next_rect(),\n )\n # merges all into the first name\n if nid1_is_known and not all(is_name1):\n join1_text = 'match all to name1:\\n{name1}'.format(name1=name1)\n callback = functools.partial(self.merge_all_into_nid, self.nid1)\n self.append_button(join1_text, callback=callback, rect=next_rect())\n # merges all into the seoncd name\n if name1 != name2 and nid2_is_known and not all(is_name2):\n join2_text = 'match all to name2:\\n{name2}'.format(name2=name2)\n callback = functools.partial(self.merge_all_into_nid, self.nid2)\n self.append_button(join2_text, callback=callback, rect=next_rect())\n ###\n self.append_button('close', callback=self.close_, rect=next_rect2())\n if self.qres_callback is not None:\n self.append_button('review', callback=self.review, rect=next_rect2())\n self.append_button('reset', callback=self.reset_all_names, rect=next_rect2())\n self.dbname = ibs.get_dbname()\n self.vsstr = 'qaid%d-vs-aid%d' % (self.aid1, self.aid2)\n figtitle_fmt = \"\"\"\n Match Review Interface - {dbname}\n {match_text}:\n {vsstr}\n \"\"\"\n figtitle = figtitle_fmt.format(**self.__dict__) # sexy: using obj dict as fmtkw\n pt.set_figtitle(figtitle)",
"def lives_counter(self):\n count = 15\n for row in self.board:\n for column in row:\n if column == HITSHIP:\n count -= 1\n self.lives = count\n return self.lives",
"def remaining(self):\n\t\tmines = sum(1 for _ in self.get_mines())\n\t\tmarked = sum(1 for x in range(self.width)\n\t\t\t\t\t for y in range(self.height) if self.marks[x][y] == FLAG)\n\t\treturn mines - marked",
"def n_hidden(self):\n return self.__n_hidden",
"def get_num_hidden(self):\n return self.num_hidden",
"def game_over_remove_labels(game_over):\n if game_over >= 1:\n losing_lbl_one.grid_forget() # place left leg on the grid\n if game_over >= 2:\n losing_lbl_two.grid_forget() # place right leg on the grid\n if game_over >= 3:\n losing_lbl_three.grid_forget() # place chest on the grid\n if game_over >= 4:\n losing_lbl_four.grid_forget() # place left arm on the grid\n if game_over >= 5:\n losing_lbl_five.grid_forget() # place right arm on the grid\n if game_over >= 6: # GAME OVER\n losing_lbl_six.grid_forget() # place head on the grid\n messagebox.showerror(title=\"GAME OVER\", message=\"GAME OVER\\n\"\n \"Aw shucks, maybe next time :(\")\n play_again() # Asks if they'd like to play again",
"def revealed_suits_tiles(player, tiles_34):\n return _suits_tiles_helper(\n tiles_34, lambda _tile_34_index, _tiles_34: player.number_of_revealed_tiles(_tile_34_index, _tiles_34)\n )",
"def getNumCleanedTiles(self):\n counter = 0\n for tile in self.tiles:\n if self.tiles[tile] == 'clean':\n counter += 1\n return counter",
"def get_cells_to_hide(self, level):\n level = LEVEL[level]\n bottom = level[BOTTOM]\n top = level[TOP]\n return random.randint(bottom, top)",
"def process_tile_reveal(self, tile_reveal_result):\r\n\r\n self.num_of_hidden_non_mines_tiles -= tile_reveal_result.non_mines_uncovered\r\n if tile_reveal_result.hit_mine:\r\n self.lose_game(tile_reveal_result.mine_tiles)\r\n elif self.num_of_hidden_non_mines_tiles == 0:\r\n self.win_game()",
"def total_unrealised_pnl(self):\n return self.pos_handler.total_unrealised_pnl()",
"def is_mine(self):\n return self.has_label(MINE_LABEL)",
"def getNumCleanedTiles(self):\n tilesCopy = {}\n tilesCopy = self.tiles.copy()\n numCleanTiles = 0\n \n for posTupleKey, posVal in tilesCopy.items():\n if posVal == 1:\n numCleanTiles += 1\n return numCleanTiles\n #raise NotImplementedError",
"def cascadeShow(self, tile):\n value = 0\n for adjTile in self.getAdjacentTiles(tile.row, tile.col):\n returned = adjTile.show()\n value += returned\n if returned == 1 and adjTile.isZero():\n value += self.cascadeShow(adjTile)\n return value",
"def show_or_hide_buttons(label, score):\n # record a timestamp for the detected label\n # (skipping 'no sleeve shirt' to avoid miss detection)\n now = time.time()\n if score > 0.5 and label != 'no sleeve shirt':\n label_detected_times[label] = now\n\n # show or hide label buttons\n count = 0\n for l in labels:\n is_visible = label_buttons[l].winfo_ismapped()\n if is_visible:\n count = count + 1\n is_detected = now - label_detected_times[l] < 1.0 \\\n if l in label_detected_times else False\n if count < 2 and not is_visible and is_detected:\n label_buttons[l].pack(side=tk.TOP)\n elif is_visible and not is_detected:\n label_buttons[l].pack_forget()",
"def obstacle_count(self):\n found_something = False\n count = 0\n starting_postion = self.get_heading()\n self.right(primary=60, counter=60)\n time.sleep(0.5)\n while self.get_heading() != starting_postion:\n if self.read_distance() < 250 and not found_something:\n found_something = True\n count += 1\n print (\"I found something\")\n elif self.read_distance() > 250 and found_something:\n found_something = False\n print(\"I have a clear view\")\n self.stop()\n\n print(\"I have found this many things: %d\" % count)\n return count",
"def is_won(self):\n for tile in self:\n if not tile.is_mine and tile.visibility != 1:\n return False\n return True",
"def check_open(self, n_faces):\r\n count_used = Counter([item for sublist in self.tiles\r\n for item in sublist\r\n if item in self.get_borders()])\r\n if min(count_used.values()) == n_faces:\r\n self.open = False",
"def _hide_numbers(self):\n global counter\n\n # num of attempts allow for more blocks to be removed\n attempts = self._difficulty\n\n while attempts > 0:\n # selecting random cell and rotational counterpart\n row = randint(0, 8)\n col = randint(0, 8)\n while self._grid_init[row][col] == 0:\n row = randint(0, 8)\n col = randint(0, 8)\n\n # backing up in case removal is gives multiple solutions\n backupone = self._grid_init[row][col]\n backuptwo = self._grid_init[8 - row][8 - col]\n self._grid_init[row][col] = 0\n self._grid_init[8 - row][8 - col] = 0\n\n # cloning grid to test number of solutions\n test_puzzle = []\n for r in range(0, 9):\n test_puzzle.append(self._grid_init[r][:])\n\n # counter for num solutions is set to 0\n counter = 0\n\n # check num of solutions\n self._solve_puzzle(test_puzzle)\n\n # if num of solutions is not one, replace the two blocks\n if counter != 1:\n self._grid_init[row][col] = backupone\n self._grid_init[8 - row][8 - col] = backuptwo\n attempts -= 1",
"def getNumCleanedTiles(self):\n return self.cleaned",
"def is_hidden(self):\n return self.has_label(HIDDEN_LABEL)",
"def CountButtons(self):\r\n\r\n n = 0\r\n \r\n if self.HasCaption() or self.HasCaptionLeft():\r\n if isinstance(wx.GetTopLevelParent(self.window), AuiFloatingFrame):\r\n return 1\r\n \r\n if self.HasCloseButton():\r\n n += 1\r\n if self.HasMaximizeButton():\r\n n += 1\r\n if self.HasMinimizeButton():\r\n n += 1\r\n if self.HasPinButton():\r\n n += 1\r\n\r\n return n",
"def setFlag(self):\n if self.inPlay and not self.shown:\n self.flag = not(self.flag)\n image_index = 11 if self.flag else 10\n self.configure(image = Tile.images[image_index])\n return 1 if self.flag else -1\n return 0"
] | [
"0.5823116",
"0.58139676",
"0.5568885",
"0.54501146",
"0.54450107",
"0.5411542",
"0.53524137",
"0.5317927",
"0.5260474",
"0.5220331",
"0.51799744",
"0.5176658",
"0.5174279",
"0.5155872",
"0.5153838",
"0.5134267",
"0.5128716",
"0.5122763",
"0.5103027",
"0.5050029",
"0.503795",
"0.50361353",
"0.50213933",
"0.49861616",
"0.49613652",
"0.49574175",
"0.49541304",
"0.49536374",
"0.49317527",
"0.49272957"
] | 0.63438165 | 0 |
returns self.shown which is True if the Tile is revealed | def isShown(self):
return self.shown | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def IsShown(self):\r\n\r\n return self._shown",
"def showTile(self, event):\n clicked = event.widget\n if clicked.isInPlay():\n self.changeSmile(1)\n returned = clicked.show()\n if returned == 1 and clicked.isZero():\n returned += self.cascadeShow(clicked)\n self.checkEnd(returned)",
"def show(self):\n if not self.shown and not self.flag:\n self.shown = True\n self.configure(image=Tile.images[self.count])\n return -1 if self.mine else 1\n return 0",
"def IsShown(self):\r\n \r\n return not self.HasFlag(self.optionHidden)",
"def is_visible(self):\n return self._currently_shown",
"def is_visible(self):",
"def show(self):\n if self.visible == 1 and time() - self.lastMotion > self.delay:\n self.visible = 2\n if self.visible == 2:\n self.deiconify()",
"def show(self):\r\n if self.visible == 1 and time() - self.lastMotion > self.delay:\r\n self.visible = 2\r\n if self.visible == 2:\r\n self.deiconify()",
"def show(self):\r\n if self.visible == 1 and time() - self.lastMotion > self.delay:\r\n self.visible = 2\r\n if self.visible == 2:\r\n self.deiconify()",
"def is_visible(self):\n return self.rect.x < self.screen_rect.width",
"def show( self ):\n if self.visible == 1:#ohnheiser hack and time() - self.lastMotion > self.delay:\n self.visible = 2\n if self.visible == 2:\n self.deiconify()",
"def isVisible( self ):\n layer = self.layer()\n if ( layer and not layer.isVisible() ):\n return False\n# \n# if ( self.isIsolateHidden() ):\n# return False\n# \n return self._visible",
"def visible(self):\n return self._turtle.isvisible()",
"def visible(self):\n return self._turtle.isvisible()",
"def visible(self, show):",
"def isVisible(self):\n\t\treturn True",
"def is_shown(self, request):\n return True",
"def is_visible(self):\n return self._visible",
"def is_visible(self):\n return self.window.active_panel() == self.full_name",
"def show( self ):\n if self.visible == 1 and time() - self.lastMotion > self.delay:\n self.visible = 2\n if self.visible == 2 and self.msgVar.get()!='':\n self.deiconify()",
"def is_alive(self):\r\n return self.visible",
"async def __is_final_tile(self) -> bool:\n tiles = self.__get_neutral_tiles()\n if len(tiles) == 1:\n player = self.get_current_player()\n await self.announcer.auto_reveal(player)\n await self.flip(tiles[0])\n return True",
"def is_won(self):\n for tile in self:\n if not tile.is_mine and tile.visibility != 1:\n return False\n return True",
"def visible(self):\n return self._visible",
"def visible(self):\n return self._visible",
"def is_visible(self):\n return self.container['is_visible']",
"def is_visible(self):\n return self.proto.display_type == DISPLAY_TYPE.Visible.value",
"def is_outline_shown(self):\n return self.container['is_outline_shown']",
"def isWin(self):\n\n return self.tiles == self.winCdt",
"def IsHidden(self):\r\n\r\n return self._hidden"
] | [
"0.739741",
"0.7251842",
"0.7121201",
"0.68841034",
"0.6844302",
"0.68267983",
"0.66852266",
"0.6665799",
"0.6665799",
"0.65777355",
"0.6567036",
"0.6543213",
"0.65341944",
"0.65341944",
"0.6508172",
"0.65051925",
"0.64582306",
"0.6448889",
"0.64341176",
"0.64185053",
"0.6388922",
"0.6336273",
"0.63266957",
"0.62947315",
"0.62947315",
"0.622013",
"0.62057763",
"0.6170637",
"0.61637914",
"0.6126744"
] | 0.75655967 | 0 |
returns self.mine which is True if the Tile is a mine | def isMine(self):
return self.mine | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_won(self):\n for tile in self:\n if not tile.is_mine and tile.visibility != 1:\n return False\n return True",
"def is_mine(self) -> bool:\n return self.proto.alliance == ALLIANCE.Self.value",
"def is_mine(self, coords):\n try:\n if coords[0] >= 0 and coords[1] >= 0:\n return self.grid[coords[1]][coords[0]] == self.mine\n else:\n return False\n except IndexError:\n return False",
"def is_mine(board, x, y):\n return board[x, y] == MINE",
"def isMine(self, row, col):\n return self.board[row, col] == 1",
"def is_mine(self):\n return self.has_label(MINE_LABEL)",
"def __isTileWall(self, point):\n return self.__getElementFromPairs(point) == \"-\"",
"def is_wall_marker(self):\n return self.id in WALL",
"def check(self):\n return self.tile==\"\"",
"def is_passable(self) -> bool:\n return self != Tile.Block",
"def is_passable(self, tile, pos):\n #Check superclass to see if it's passable first\n if not super().is_passable(tile, pos):\n return False\n\n #This unit can't pass these specific terrains\n ttype = tile.type\n if (tile.type == 'forest'):\n return False\n \n #The tile is passable\n return True",
"def canTile(self):\n raise RuntimeError('Not implemented')\n \n return False",
"def has_neighbor(self, tile: 'games.saloon.tile.Tile') -> bool:\n return bool(tile and tile in self.get_neighbors())",
"def __isTileInExplored(self, tile):\n for eachTile in self.explored:\n if eachTile.coordinate == tile.coordinate:\n return True\n return False",
"def is_wall(self, x, y):\r\n\r\n return self.get_bool(x, y, 'wall')",
"async def __is_final_tile(self) -> bool:\n tiles = self.__get_neutral_tiles()\n if len(tiles) == 1:\n player = self.get_current_player()\n await self.announcer.auto_reveal(player)\n await self.flip(tiles[0])\n return True",
"def __isTileGoalState(self, point):\n return point == self.goalPoint",
"def is_wall(self, x, y):\n return (x, y) in self.walls",
"def is_on_ground(self):\n return bool(self.ground_sprites())",
"def __init__minefield__(self):\n # Creates random locations of mines according to the size of the game board.\n mines = random.sample(range(0, self.rows * self.cols), self.mines)\n \n # Uses a helper method to initialize tile categories: mine or zero.\n return [[Tiles(i, j, self.create_tile(mines, i, j)) for j in range(self.cols)] for i in range(self.rows)]",
"def isPositionInRoom(self, pos):\n if pos in self.tiles:\n return True\n else:\n return False",
"def getTile(self):\n return self.tile",
"def mine(self, body: 'games.stardash.body.Body') -> bool:\n return self._run_on_server('mine', {\n 'body': body\n })",
"def create_tile(self, mines, row, col):\n if row * self.cols + col in mines:\n return Tiles.mine\n return Tiles.zero",
"def open_tile(self, y, x):\n # Find the letter index and convert into a y-coordinate.\n # Checks if it is a mine\n if [y, x] in self.mine_locations:\n # explode\n self.show_answer_board([y, x])\n print \"Boomz.\"\n return Minesweeper.IS_A_BOMB\n else:\n # strip(?)tease to the user (oh damn sexy numbers)\n self.tease_user(y, x)\n return Minesweeper.NOT_A_BOMB",
"def is_at_wall(self):\n return self.distmin < self.distmax*0.8",
"def tileOccupied(self, i, j):\n if self.tiles[i][j] == 1 or i == 0 or i == self.size[0] - 1 or j == 0 or j == self.size[1] - 1:\n return True\n for prop in self.props:\n if prop.i == i and prop.j == j:\n return True\n return False",
"def adjust_minefield(self, row, col):\n # Iterates through the user selected 3x3 grid.\n for i in [row-1, row, row+1]:\n for j in [col-1, col, col+1]:\n\n # if the tile is valid and it contains a mine.\n if self.valid_tile(i, j) and self.board[i][j].category == Tiles.mine:\n random_i = random.randint(0, self.rows - 1)\n random_j = random.randint(0, self.cols - 1)\n\n # Searches for locations to place the mine outside of the starting position's adjacent tiles.\n while self.board[random_i][random_j].category == Tiles.mine or (abs(row-random_i) <= 1 and abs(col-random_j) <= 1):\n random_i = random.randint(0, self.rows - 1)\n random_j = random.randint(0, self.cols - 1)\n \n # Places the mine in a valid random location on the game board.\n self.board[random_i][random_j] = Tiles(random_i, random_j, Tiles.mine)\n\n # All mines removed from starting position thus set the tile to zero.\n self.board[i][j] = Tiles(i, j, Tiles.zero)",
"def is_wall(self, cell):\n # pylint: disable=invalid-name\n x, y = cell\n\n return self.grid[y][x]",
"def isWin(self):\n\n return self.tiles == self.winCdt"
] | [
"0.7636543",
"0.7621583",
"0.73764515",
"0.7335211",
"0.7304841",
"0.7171517",
"0.6615718",
"0.64297485",
"0.6389095",
"0.6372669",
"0.62786186",
"0.6278285",
"0.6270997",
"0.6176634",
"0.6149281",
"0.61392736",
"0.61352473",
"0.6128118",
"0.6124146",
"0.6122007",
"0.61073",
"0.6084456",
"0.6081747",
"0.6073573",
"0.60316783",
"0.6031667",
"0.60253227",
"0.60137266",
"0.5996732",
"0.59901446"
] | 0.824674 | 0 |
returns self.inPlay which is True when the game is being played | def isInPlay(self):
return self.inPlay | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def isPlaying(self) :\n raise NotImplementedError(\"isPlaying not implemented\")",
"def playing(self): # type: () -> bool\n return xbmc.getCondVisibility(\"Player.Playing\")",
"def game_play(self):",
"def is_game_started(self):\r\n\t\treturn self._is_game_started",
"def is_playing(self):\n return self.status == \"PLAYING\"",
"def canPlay(self):\r\n return self.__canPlay",
"def is_playing(self):\n raise NotImplementedError",
"def still_playing_game(self):\n for player in self.players:\n if player.is_playing:\n return True\n return False",
"def is_playing(self):\n return self.process is not None",
"def isOpen(self):\n\t\treturn not self.endgame",
"def play_game():\n pass",
"def is_playing(self):\n command_output = self.run_command('is_playing')[0]\n return True if command_output == '1' else False",
"def _check_play_button(self, mouse_pos):\n\n # If the player clicks the play button AND the game isn't going\n if self.play_button.rect.collidepoint(mouse_pos) and not self.stats.game_active:\n\n # reset the game stats and dynamic settings\n self.stats.reset_stats()\n self.settings.initialize_dynamic_settings()\n self.stats.game_active = True\n self.sb.prep_score()\n\n # get rid of any remaining aliens and bullets.\n self.aliens.empty()\n self.bullets.empty()\n\n # recenter player\n self.ship.center_ship()\n\n # hide the mouse cursor\n pygame.mouse.set_visible(False)",
"def playing(self):\n return self._playing",
"def is_active(self):\n return self.state == self.States.playing",
"async def is_playing(self) -> bool:\n play_state = await self.get_play_state()\n return play_state == models.player.PlayState.Playing",
"def play(self):\n print('Playing game...')",
"def _check_play_button(self, mouse_pos):\n if self.play_button.rect.collidepoint(mouse_pos) and not self.stats.game_active:\n self.stats.reset_stats()\n self.settings.initialize_dynamic_settings()\n self.stats.game_active = True\n\n #Hide mouse cursor\n pygame.mouse.set_visible(False)\n\n # Get rid of any leftover aliens and bullets\n self.aliens.empty()\n self.bullets.empty()\n\n #Create a new fleet and center the ship.\n self._create_fleet()\n self.ship.center_ship()\n\n self.scoreboard.prep_score()\n self.scoreboard.prep_high_score()\n self.scoreboard.prep_ships()",
"def is_playing(self):\n if self.voice is None or self.current is None:\n return False\n\n player = self.current.player\n return not player.is_done()",
"def isPlaying(self):\n return self.getChannel().get_busy()",
"def isPlaying(self, *args):\n return _osgAnimation.BasicAnimationManager_isPlaying(self, *args)",
"def playerCanPlay(game, situation, player):\r\n return True",
"def is_playing(self):\n return self.connected_channel is not None and self.current is not None",
"def join_in_play(self):\n return self._join_in_play",
"def can_play(self) -> bool:\n purple_card = self.game.board.purple\n return (\n self.game.current_player != self\n and purple_card is not None\n and purple_card.space > len(self.game.board.yellow[self])\n )",
"def is_playing(self):\n if (self._call_player_proxy('GetStatus', None).unpack()[0])[0] == 0:\n return True\n return False",
"def _check_play_button(self, mouse_pos):\n button_clicked = self.play_button.rect.collidepoint(mouse_pos)\n if button_clicked and not self.stats.game_active:\n # Reset the game settings.\n self.settings.initialize_dynamic_settings()\n # Reset the game statistics.\n self.stats.reset_stats()\n self.stats.game_active = True\n self.sb.prep_score()\n self.sb.prep_level()\n self.sb.prep_ships()\n # Get rid of any remaining stars and bullets.\n self.stars.empty()\n self.bullets.empty()\n # Create a new galaxy and center the ship.\n self._create_galaxy()\n self.ship.center_ship()\n pygame.mouse.set_visible(False)",
"def play(self) -> bool:\n self._draw()\n\n # Allow the player to control the agent, redrawing as we go\n while True:\n should_quit, quit_all_the_way_out = self._handle_events()\n if should_quit and quit_all_the_way_out:\n return True\n elif should_quit:\n return False\n\n # Go at a reasonable FPS\n self._clock.tick(self._settings.fps)\n\n # Unblock movement now that a frame has elapsed (we only want to be able to move once per frame)\n self._moved_this_frame = False",
"def _check_play_button(self, mouse_pos): \n button_clicked = self.play_button.rect.collidepoint(mouse_pos)\n if button_clicked and not self.stats.game_active:\n # Reset game settings\n self.settings.initialize_dynamic_settings()\n\n # Reset game stats\n self.stats.reset_stats()\n self.stats.game_active = True\n self.sb.prep_score()\n self.sb.prep_level()\n self.sb.prep_ships()\n\n # Remove any remaining aliends and bullets\n self.aliens.empty() \n self.bullets.empty()\n\n # Create new fleet and center the ship\n self._create_fleet()\n self.ship.center_ship() \n\n # Hide the mouse cursor when inside of game window\n pygame.mouse.set_visible(False)",
"def play(self, state,currentplayer):\n\t\tpass"
] | [
"0.75845695",
"0.75283724",
"0.7370583",
"0.728609",
"0.72555286",
"0.7232817",
"0.71996415",
"0.71901125",
"0.7180946",
"0.7111877",
"0.70412475",
"0.70058084",
"0.7004731",
"0.69960487",
"0.69873065",
"0.6971001",
"0.69605577",
"0.6921104",
"0.69201666",
"0.6918572",
"0.6904891",
"0.68908083",
"0.68746036",
"0.6858805",
"0.6841618",
"0.6823994",
"0.68109566",
"0.67849267",
"0.6770131",
"0.6752939"
] | 0.8621334 | 0 |
clears self.frame and destroys it so it can be rebuilt | def clearFrame(self, event=None):
for widget in self.winfo_children():
widget.destroy()
del self.tiles[:] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def reset_frame(self):\n if self.frame is not None:\n self.frame.destroy()\n\n self.frame = Frame(self.root,\n width=self._width,\n height=self._height,\n bg=Game.BACKGROUND_COLOUR)\n self.frame.pack()",
"def clearScreen(self):\n self.removeFrame(self.frame1)\n self.removeFrame(self.frame2)\n self.separator.destroy()\n #Here, the app will lose the row and column configuration and does not\n #apply new configuration. Don't know why?. So that, I destroy the\n #parent (in this case, a frame), create a new frame and set it again.\n self.parent.destroy()\n mainFrame = tk.Frame(self.store[\"root\"], bg=\"#FFF\")\n self.parent = mainFrame\n self.parent.grid(column=0, row=0, sticky=\"nsew\")",
"def destroy(self):\n tk.Frame.destroy(self)",
"def clear(self):\n self._frame.clear()\n self._turtles = []\n self._gpens = []",
"def clearwin(event=None):\r\n # for child in mframe.winfo_children():\r\n # child.destroy()\r\n global mframe\r\n mframe.destroy()\r\n mframe = tkinter.Frame(main, width=800, height=600, background='pink')\r\n mframe.pack(fill=\"both\", expand=True, padx=20, pady=20)",
"def removeFrame(self, frame):\n for widget in frame.winfo_children():\n widget.destroy()\n\n frame.pack_forget()",
"def clear(self):\n self.image = None\n self.prepareGeometryChange()\n self.informViewBoundsChanged()\n self.update()",
"def clear_canvas():\n self.parent_class.canvas.delete(\"all\")",
"def __del__(self):\n try:\n self._frame._destroy()\n except:\n pass\n self._turtles = []\n self._pencils = []\n del self._frame",
"def clear(self):\n self.animation.stop()\n self.draw(0, 0, 0, 0, 0)",
"def clear_frame(self, table):\n for widget in table.winfo_children():\n widget.destroy()",
"def bye(self):\n self._frame._destroy()\n self._turtles = []\n self._gpens = []\n del self._frame",
"def destroy_view(self): \n\n self.canvas.destroy()\n self.scrollbar.destroy()\n self.header_frame.destroy()\n self.button_frame.destroy()\n self.twitter_canvas.destroy()\n self.twitter_scrollbar.destroy()",
"def reset(self):\n self.current_frame = 0",
"def _clear_drawing(self) -> None:\n self.vertices.clear()\n self.edges.clear()\n self.subplot.clear()\n self.selected_element = None\n self.pressed_elements.clear()",
"def hlpframeclear(self):\r\n \r\n self.menubar.entryconfig(\"File\", state = 'normal')\r\n self.menubar.entryconfig(\"Help\", state = 'normal')\r\n self.hlpframe.place_forget()",
"def clear(self):\r\n\t\tself.grid.fill(False)",
"def UnInit(self):\r\n\r\n if self._frame:\r\n self._frame.RemoveEventHandler(self)",
"def removeLatticeFrame(self):\n self.latticeFrame.remove()",
"def clear(self):\n try:\n # This causes stupid errors with tkagg, so just wrap it in\n # try-except for now\n self.fig.clear()\n except: pass\n self.annotators.clear()\n self.dims.clear()\n self.ph.remove(self.ID)",
"def _clear(self):\n\n self.image = Image.new(\"RGB\", (self._width, self._height), self._color)",
"def deinit(self):\n self.reset()",
"def clear(self):\n self.display(Image.new(self.mode, self.size))",
"def reset(self):\n self.clear()",
"def clear_form(self):\n self.lst_state_item = None\n self.lst_file_item = None\n self.txt_state.setText(\"\")\n self.txt_file.setText(\"\")\n self.lbl_image.setText(\"\")\n self.frm_edit.setEnabled(False)\n self.tbl_symbols.clearSelection()\n self.preview = False",
"def destroy(self):\r\n self.visible = False",
"def clear_screen(self):\r\n lst_grid = self.root.grid_slaves()\r\n for widget in lst_grid:\r\n widget.destroy()\r\n lst_pack = self.root.pack_slaves()\r\n for widget in lst_pack:\r\n widget.destroy()",
"def clear(self):\r\n self.delete(0, tkinter.END)",
"def __destroy_ui(self):\n # Remove the viewable area from Gedit's side panel\n self.__side_panel.remove_item(self.__view_port)\n\n # Empty class's properties\n self.__tree_view = None\n self.__side_panel = None\n\n self.__view_port.destroy()\n self.__view_port = None",
"def _clear_drawing(self) -> None:\n self.vertices.clear()\n self.edges.clear()\n self.subplot.clear()\n self.subplot2.clear()"
] | [
"0.7760308",
"0.7735232",
"0.76537466",
"0.7313428",
"0.7302228",
"0.7171346",
"0.7010329",
"0.700973",
"0.69976103",
"0.6972827",
"0.69609433",
"0.6927987",
"0.6858457",
"0.68578225",
"0.6823086",
"0.68147045",
"0.67939603",
"0.6778618",
"0.6772166",
"0.6730643",
"0.6709295",
"0.6706634",
"0.6696387",
"0.6682199",
"0.6663374",
"0.6652941",
"0.66502255",
"0.6646007",
"0.66433346",
"0.66349536"
] | 0.7825959 | 0 |
sets up the Frame and Labels to be used | def setUpFrame(self):
#adds labels to the Board
self.mineLabel = tk.Label(self, text="Mines: "+str(self.numMines))
self.mineLabel.grid(row=0, column=0, sticky="W", columnspan=int((self.cols-2)/2))
self.smileButton = tk.Label(self, image=self.images[1])
self.smileButton.grid(row=0, column=int((self.cols-2)/2), sticky="WE", columnspan=2)
self.flagLabel = tk.Label(self, text="Flags: "+str(self.numFlags))
self.flagLabel.grid(row=0, column=int((self.cols-2)/2)+2, sticky="E", columnspan=int((self.cols-1)/2))
#left click listeners on smileButton
self.smileButton.bind('<ButtonPress-1>', lambda event, num=0: self.changeSmile(num))
self.smileButton.bind('<ButtonRelease-1>', self.replay) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def Define_Frame(self):\n self.frame=Frame(self.master, relief=GROOVE, bd=4)\n self.frame.grid(row=0,column=1,rowspan=2,columnspan=2)\n frame_title = Label(self.frame,text=\"Stage Control\",relief=RAISED,bd=2,width=24, bg=\"light yellow\",font=(\"Times\", 16))\n frame_title.grid(row=0, column=1)\n self.encoder_text = [] # These hold the stage position as read by the encoders\n self.coo_ent = [] # These hold the coordinate entry values\n but = []\n encoder_display = []\n for i in range(3):\n self.coo_ent.append(Entry(self.frame, justify=\"center\", width=12))\n but.append(Button(self.frame, text=\"Move %s (relative)\"%self.POS_NAME[i], width=12,command=lambda axis=i:self.GUI_move(axis)))\n self.encoder_text.append(StringVar())\n encoder_display.append(Label(self.frame,textvariable=self.encoder_text[i],relief=SUNKEN,bd=1, width=20))\n self.coo_ent[i].grid(row=i+1,column=0)\n self.coo_ent[i].focus_set()\n but[i].grid(row=i+1,column=1)\n encoder_display[i].grid(row=i+1,column=2)\n self.encoder_text[i].set(\"%8s microns\"%str(self.read_pos[i]))\n zero_encoders_button = Button(self.frame, text=\"Re-Initialize Encoders\", width=20, command=self.GUI_ReInitialize_Encoders)\n zero_encoders_button.grid(row=5,column=1)\n return",
"def __init__( self ):\n \n Frame.__init__( self ) # initializes Frame instance\n \n # frame fills all available space\n self.pack( expand = YES, fill = BOTH )\n self.master.title( \"Labels\" )\n \n self.Label1 = Label( self, text = \"Label with text\" )\n \n # resize frame to accommodate Label\n self.Label1.pack()\n \n self.Label2 = Label( self,\n text = \"Labels with text and a bitmap\" )\n \n # insert Label against left side of frame\n self.Label2.pack( side = LEFT )\n \n # using default bitmap image as label\n self.Label3 = Label( self, bitmap = \"warning\" )\n self.Label3.pack( side = LEFT )",
"def create_status_robot_frame(self):\n\n self.Robot_Status_Frame = tk.Frame(master=self)\n self.Robot_Status_Frame.config(highlightthickness=1, highlightcolor=\"black\", highlightbackground=\"black\")\n self.Robot_Status_Frame.pack(side = tk.LEFT, padx = 20, pady = 10, fill = tk.BOTH)\n\n status_lbl = tk.Label(master = self.Robot_Status_Frame, text = \"ROBOT STATUS\", width = 15)\n status_lbl.pack(side = tk.TOP) \n\n self.lbl_pose_x = tk.StringVar()\n self.lbl_pose_y = tk.StringVar()\n self.lbl_angle = tk.StringVar()\n self.lbl_status = tk.StringVar()\n self.lbl_goto_x = tk.StringVar()\n self.lbl_goto_y = tk.StringVar()\n\n GUI_MAP.create_label_frame(master = self.Robot_Status_Frame, label_text = \"X: \", label_target = self.lbl_pose_x)\n GUI_MAP.create_label_frame(master = self.Robot_Status_Frame, label_text = \"Y: \", label_target = self.lbl_pose_y)\n GUI_MAP.create_label_frame(master = self.Robot_Status_Frame, label_text = \"Angle: \", label_target = self.lbl_angle)\n GUI_MAP.create_label_frame(master = self.Robot_Status_Frame, label_text = \"Status: \", label_target = self.lbl_status)\n GUI_MAP.create_label_frame(master = self.Robot_Status_Frame, label_text = \"\", label_target = None)\n GUI_MAP.create_label_frame(master = self.Robot_Status_Frame, label_text = \"next GOTO X: \", label_target = self.lbl_goto_x)\n GUI_MAP.create_label_frame(master = self.Robot_Status_Frame, label_text = \"next GOTO Y: \", label_target = self.lbl_goto_y)\n\n\n self.lbl_pose_x.set(\"N/A\")\n self.lbl_pose_y.set(\"N/A\")\n self.lbl_angle.set(\"N/A\")\n self.lbl_status.set(\"N/A\")\n self.lbl_goto_x.set(\"N/A\")\n self.lbl_goto_y.set(\"N/A\")",
"def __init__(self):\n super().__init__()\n self.geometry('{}x{}'.format(425, 185))\n self.title('PAD Tracker')\n self.frame = Frame(self)\n self.populateFields()\n self.frame.pack()",
"def _create_right_info_frame(self, headers):\n\n self.frames.append(tk.Frame(self.master))\n self.labels.append([])\n\n for column, name in enumerate(headers):\n self.frames[4].columnconfigure(column, weight=1, minsize=60)\n self.header_values[name] = []\n for row in range(5):\n if column == 9:\n self.header_values[name].append(tk.StringVar())\n else:\n self.header_values[name].append(tk.DoubleVar())\n self.header_values[name][row].set(self.default_values\n ['ri']\n [(column*5)+row])\n self.labels[2].append(tk.Label(self.frames[4],\n textvariable=self.\n header_values[name]\n [row], relief=\"ridge\"))\n self.labels[2][(column*5)+row].grid(column=column, row=row,\n sticky=\"nsew\", padx=8, pady=10)\n self.frames[4].rowconfigure(row, weight=1, minsize=50)\n self.frames[4].grid(column=2, row=1, sticky=\"ew\", padx=10, pady=10)",
"def initGUI(self):\n\n\t\t# Set main frame's location \n\t\tself.grid(row=0, column=0, sticky=\"nsew\")\n\n\t\t# Set path entry frame and its location\n\t\tself.entryFrame = Frame(self, relief = RAISED, borderwidth = 1)\n\t\tself.entryFrame.pack(fill = BOTH, expand = False)\n\t\t# Make label\n\t\tif self.message:\n\t\t\tmessageLabel = Label(self.entryFrame, text = self.message, font=(\"Bradley\", 10))\n\t\t\tmessageLabel.pack(anchor=W, padx=0, pady=0)\n\n\t\t# Set path entry and its location\n\t\tself.filePathEntry = Entry(self.entryFrame, bd = 4, width = 50)\n\t\tself.filePathEntry.pack(side = LEFT, padx=2, pady=1)",
"def __init__(self,*args, **kwargs):\n tk.Tk.__init__(self, *args, **kwargs)\n self.winfo_toplevel().title(\"ElogQP\")\n self.container = tk.Frame(self)\n self.container.pack(side=\"top\", fill=\"both\", expand=True)\n self.container.grid_rowconfigure(0, weight=1)\n self.container.grid_columnconfigure(0, weight=1)\n self.activeFrames = []\n for F in (Frames.frame_start.frame_start, Frames.frame_modules.frame_modules, Frames.frame_showError.frame_showError):\n self.createFrame(F, F.__name__)\n \n self.showFrame(\"frame_start\")",
"def build_frames(self):\n self.cntrl_frame = tk.PanedWindow(self.root)\n self.cntrl_frame.pack(side = tk.TOP, padx = 1, pady = 1, fill = tk.Y)\n self.info_frame_1 = tk.PanedWindow(self.root)\n self.info_frame_1.pack(side = tk.TOP, padx = 1, pady = 2, fill = tk.Y)",
"def _create_main_label_frame(self):\r\n # frame for storage\r\n main_label_frame = Frame(master=self)\r\n main_label_frame.pack()\r\n\r\n # canvas for circle\r\n self.canvas = Canvas(master=main_label_frame, width=100, height=100)\r\n self.canvas.pack(side=LEFT)\r\n\r\n # create circle\r\n self.circle = self.canvas.create_oval(40, 25, 90, 70, outline=\"black\", fill=\"red\", width=6)\r\n\r\n # main label\r\n Label(master=main_label_frame, text=\"Recorder\", font=FONT_BIG).pack()",
"def buildmainframe(self):\n self.mainframewidgets=[]\n for x in range(3):\n thislabel = Label(self.mainframe, text=str(x))\n thislabel.grid()\n self.mainframewidgets.append(thislabel)",
"def __init__(self):\n self.window = Tk()\n self.window.title(\"Brick Breaker\")\n self.window.attributes(\"-fullscreen\", True)\n self.window.iconbitmap(\"data/wall.ico\")\n self.window.config(background=\"light blue\")\n\n # initialization des composants\n self.frame = Frame(self.window, bg='light blue')\n self.littleFrame = Frame(self.frame, bg='light blue')\n self.littleFrame_bis = LabelFrame(self.frame, bg='light blue', text=\"USER NAME\")\n\n # creation des composants\n self.create_title()\n self.create_subtitle()\n self.create_play_button()\n self.create_quit_button()\n\n # empaquetage\n self.littleFrame_bis.pack(expand=YES, pady=30)\n self.littleFrame.pack(expand=YES, pady=50)\n self.frame.pack(expand=YES, fill=BOTH, pady=200)",
"def load(self):\n # Frame\n self.frame.grid_configure(row=1, column=1, padx=(PAD, PAD+TINY_PAD), pady=(0, PAD+CANVAS_PAD), sticky=tk.N+tk.S)\n self.frame.rowconfigure(1, weight=1)\n self.frame.rowconfigure(3, weight=1)\n # Across label\n self.across_label.config(text=\"Across\", anchor=tk.W, **settings.get(\"style:clue\"))\n self.across_label.grid(row=0, column=0, pady=(0, TINY_PAD), sticky=tk.N+tk.W)\n # Across frame\n self.across_frame.config(highlightthickness=1, highlightbackground=settings.get(\"style:border:fill\"))\n self.across_frame.grid(row=1, pady=(CANVAS_PAD, PAD), sticky=tk.N+tk.S)\n self.across_frame.rowconfigure(0, weight=1)\n # Across listbox\n self.across_listbox.config(bd=0, selectborderwidth=0, activestyle=tk.NONE, **settings.get(\"style:list\"))\n self.across_listbox.grid(row=0, column=0, sticky=tk.N+tk.S)\n self.across_listbox.config(yscrollcommand=self.across_scrollbar.set)\n # Across scrollbar\n self.across_scrollbar.config(command=self.across_listbox.yview)\n self.across_scrollbar.grid(row=0, column=1, sticky=tk.N+tk.S)\n # Down label\n self.down_label.config(text=\"Down\", anchor=tk.W, **settings.get(\"style:clue\"))\n self.down_label.grid(row=2, column=0, pady=(PAD, 0), sticky=tk.N+tk.W)\n # Down frame\n self.down_frame.config(highlightthickness=1, highlightbackground=settings.get(\"style:border:fill\"))\n self.down_frame.grid(row=3, pady=(TINY_PAD, 0), sticky=tk.N+tk.S)\n self.down_frame.rowconfigure(0, weight=1)\n # Down listbox\n self.down_listbox.config(bd=0, selectborderwidth=0, activestyle=tk.NONE, **settings.get(\"style:list\"))\n self.down_listbox.grid(row=0, column=0, sticky=tk.N+tk.S)\n self.down_listbox.config(yscrollcommand=self.down_scrollbar.set)\n # Down scrollbar\n self.down_scrollbar.config(command=self.down_listbox.yview)\n self.down_scrollbar.grid(row=0, column=1, sticky=tk.N+tk.S)",
"def load(self):\n # Frame\n self.frame.grid_configure(row=0, column=0, columnspan=4, padx=PAD, pady=(TINY_PAD, PAD), sticky=tk.W+tk.E)\n self.frame.columnconfigure(0, weight=1)\n # Crossword title\n self.title_label.config(**settings.get(\"style:title\"))\n self.title_label.grid(row=0, column=0, pady=(0, PAD), sticky=tk.W)\n # Crossword author\n self.author_label.config(**settings.get(\"style:author\"))\n self.author_label.grid(row=0, column=0, padx=TINY_PAD, pady=(0, PAD), sticky=tk.E)\n # Separator\n self.separator.config(height=SEPARATOR_HEIGHT, bg=SEPARATOR_COLOR)\n self.separator.grid(row=1, padx=TINY_PAD, sticky=tk.W+tk.E)",
"def config_frames(self):\n self.root.grid_rowconfigure(1, weight=1)\n self.root.grid_columnconfigure(1, weight=1)\n\n self.top_frame = tkinter.Frame(self.root, pady=1)\n self.top_frame.grid(row=0, columnspan=2, sticky='nsew')",
"def _create_right_name_frame(self, headers):\n\n self.frames.append(tk.LabelFrame(self.master))\n self.labels.append([])\n\n for i, name in enumerate(headers):\n self.labels[1].append(tk.Label(self.frames[1], text=name,\n relief=\"sunken\"))\n self.labels[1][i].grid(column=i, row=0, sticky=\"ew\")\n self.frames[1].columnconfigure(i, weight=1, minsize=60)\n self.frames[1].grid(column=2, row=0, sticky=\"ew\", padx=10, pady=10)",
"def __init__(self):\n EasyFrame.__init__(self, title = \"Game Time\")\n self.setSize(440, 400)\n self.cardLabel1 = self.addLabel(\"\", row = 0,\n column = 0,\n sticky = \"NSEW\")\n self.cardLabel2 = self.addLabel(\"\", row = 0,\n column = 1,\n sticky = \"NSEW\")\n self.cardLabel3 = self.addLabel(\"\", row = 0,\n column = 2,\n sticky = \"NSEW\")\n self.stateLabel = self.addLabel(\"\", row = 1, column = 0,\n sticky = \"NSEW\",\n columnspan = 2)\n self.addButton(row = 2, column = 0,\n text = \"New game\",\n command = self.newDeal)\n self.addButton(row = 2, column = 2,\n text = \"Quit\",\n command = self.quit)",
"def setupFrame(self, frame_width, frame_height):\n x, y = 0.0, 0.4\n self.x0 = int(frame_width*x)\n self.y0 = int(frame_height*y)\n self.width = 260\n self.height = 260",
"def __init__(self, frame):\n self.frame = frame\n self._configure()",
"def __init__(self, master=None):\n # Initialise variables\n tk.Frame.__init__(self)\n self.frames = []\n self.labels = []\n self.entries = []\n self.user_values = {}\n self.header_values = {}\n self.summary_values = {\"EWA\": {}, \"Ave\": {}}\n self.row_buttons = []\n self.master.title(\"LoL Team Checker\")\n\n # Please check how to code this by PEP standards\n self.default_values = {'ln': [\"Summoner Name\", \"Champion Name\"],\n 'rn': [\"Games\", \"Win Rate\", \"Kills\",\n \"Deaths\", \"Assists\", \"CS\",\n \"Towers\", \"Gold\", \"KDA\",\n \"Prediction\"],\n 'li': {\"Names\": ['{s}'.format(s=\"Summoner\"\" \")\n + str(i) for i in range(1, 6)],\n \"Champs\": ['{s}'.format(s=\"Champ \")\n + str(i) for i in range(1, 6)]\n }, 'ri': ['-' if i == 9 else '0' for i in\n range(10) for j in range(5)],\n 'rv': [tk.StringVar() if i == 9 else\n tk.DoubleVar() for i in range(10)\n for j in range(5)]}\n\n # Create Frames\n self._create_left_name_frame(self.default_values['ln'])\n self._create_right_name_frame(self.default_values['rn'])\n self._create_left_info_frame(self.default_values['ln'])\n self._create_button_frame()\n self._create_right_info_frame(self.default_values['rn'])\n self._create_mid_region_frame() # mid, top, frame created by column\n self._create_left_summary_frame()\n self._create_mid_summary_frame()\n self._create_right_summary_frame()\n# configuration, not explicitly.\n # Configure frames\n# self.master.grid()\n top = self.winfo_toplevel()\n# top.grid(0, \"ew\")\n top.columnconfigure(0, weight=1) # , minsize=100)\n top.columnconfigure(1, weight=1) # , minsize=75)\n top.columnconfigure(2, weight=1) # , minsize=100)\n# top.rowconfigure(0, weight=1)\n top.rowconfigure(1, weight=1)\n top.rowconfigure(2, weight=2)\n top.rowconfigure(3, weight=2)\n# self.columnconfigure(0, weight=1)\n# self.columnconfigure(1, weight=1)\n# self.rowconfigure(0, weight=0)\n self.grid(sticky=\"ew\")",
"def update_frame_label(self):\n count = len(self.main_frame_list)\n\n for idx in range(count): #Start, count) \n s1 = \"\"\n for i in range(16): #self.main_frame_nibble_list: # 16\n s = \"\"\n for j in range(4):\n s += str(self.main_button_bit_list[idx][i*4 + j].get_label())\n s = s[::-1]\n self.main_frame_nibble_list[idx][i].set_label(str(hex(int(s,2)))[2:].upper())\n s1 += str(self.main_frame_nibble_list[idx][i].get_label())\n s1 = s1[::-1]\n if DEBUG: print(s1[:8] + \" \" + s1[8:])\n self.main_frame_list[idx].set_label(s1[:8] + \" \" + s1[8:])",
"def create_controls(self):\n\n self.button_frame = tk.LabelFrame(self, text=\"Controls\", padx=5, pady=5)\n self.button_frame.grid(row=0, column=1, padx=5, pady=5, sticky=\"n\")\n self.load_data = tk.Button(\n self.button_frame, text=\"Load Data\", command=self.update_stats\n )\n self.load_data.grid(row=0)\n\n self.print_data = tk.Button(\n self.button_frame, text=\"Print Data\", command=self.print_raw_data,\n )\n self.print_data.grid(row=1)\n\n self.quit = tk.Button(\n self.button_frame, text=\"Quit\", fg=\"red\", command=self.master.destroy\n )\n self.quit.grid(row=2)",
"def create_frame_icons(self):\n self.text = \"{}\".format(self.name)\n self.y = self.startY - 10 if self.startY - 10 > 10 else self.startY + 10\n self.colorIndex = LABELS.index(self.name)",
"def setup(self):\n\n # push the frame for the toplevel window\n self.lumpy.pushfr(self.tl)\n self.lumpy.col([0,1])\n\n # the frame at the top contains buttons\n self.lumpy.row([0,0,1], bg='white')\n self.lumpy.bu(text='Close', command=self.close)\n self.lumpy.bu(text='Print to file:', command=self.printfile)\n self.en = self.lumpy.en(width=10, text='lumpy.ps')\n self.en.bind('<Return>', self.printfile)\n self.la = self.lumpy.la(width=40)\n self.lumpy.endrow()\n\n # the grid contains the canvas and scrollbars\n self.lumpy.gr(2)\n \n self.ca_width = 1000\n self.ca_height = 500\n self.canvas = self.ca(self.ca_width, self.ca_height, bg='white')\n\n yb = self.lumpy.sb(command=self.canvas.yview, sticky=N+S)\n xb = self.lumpy.sb(command=self.canvas.xview, orient=HORIZONTAL,\n sticky=E+W)\n self.canvas.configure(xscrollcommand=xb.set, yscrollcommand=yb.set,\n scrollregion=(0, 0, 800, 800))\n \n self.lumpy.endgr()\n self.lumpy.endcol()\n self.lumpy.popfr()\n\n # measure some sample letters to get the text height\n # and set the scale factor for the canvas accordingly\n self.canvas.clear_transforms()\n bbox = self.canvas.measure(['bdfhklgjpqy'])\n self.unit = 1.0 * bbox.height()\n transform = ScaleTransform([self.unit, self.unit])\n self.canvas.add_transform(transform)",
"def _create_left_name_frame(self, headers):\n self.frames.append(tk.LabelFrame(self.master))\n self.labels.append([])\n\n for i, name in enumerate(headers):\n self.labels[0].append(tk.Label(self.frames[0], text=name,\n relief=\"groove\"))\n self.labels[0][i].grid(column=i, row=0, sticky=\"ew\")\n self.frames[0].columnconfigure(i, weight=1, minsize=100)\n\n # For .grid one must modify their positions by referencing\n # their parents. Here: the LabelFrame is self.frames[0], and\n # in order to modify the positions, etc. of the Labels *IN*\n # the LabelFrame, one must modify the relevant coordinates in\n # the LabelFrame, not by referencing the Labels:\n # x = Label(parent, ...) {parent=LabelFrame}\n # x.grid(column_in_parent, row_in_parent)\n\n self.frames[0].grid(column=0, row=0, sticky=\"ew\", columnspan=1, padx=10, pady=10)",
"def _initialize(self):\n self._frame = ttk.Frame(master=self._root)\n self._ingredients_frame = ttk.Frame(master=self._frame)\n\n self._create_header()\n self._show_ingredient_list()\n self._create_footer()\n\n self._ingredients_frame.grid(row=1, column=1, columnspan=2)\n self._frame.grid_columnconfigure(1, weight=1, minsize=250)",
"def __init__(self, parent, *args, **kwargs):\n tk.LabelFrame.__init__(self, parent, *args, **kwargs)\n self.canvas = MainCanvas(self, bg=\"orange\")\n self.canvas.pack(side='top', fill='both', expand=True)",
"def assemble_img_frame(self):\n\n self.p2_label_img = ttk.Label(self.p2_frame_img, text=self.lang.VP_IMG_LABEL,\n font=FONT_MSG)\n self.p2_label_img.grid(row=1, column=2, padx=5, pady=0)",
"def init_grid(self):\n self.headlabel.collection = self.books\n self.headlabel.set_label_text()\n self.warnlabel.set_label_text('Welcome to the Reading Tracker 2.0!')\n self.building_grid(None, 'Author')",
"def initGUI(self):\r\n\r\n self.pack(fill=tk.BOTH, expand=True)\r\n\r\n # Figure out sizing.\r\n width = 200\r\n height = 200\r\n pad = 5\r\n fontWidth = 8\r\n bigWidth = int((width*3 + pad*6) / fontWidth)\r\n \r\n # Create option frames.\r\n self.frameOptions = tk.LabelFrame(self, text=\"Options:\",\r\n width=width, height=height)\r\n self.frameSegment = tk.LabelFrame(self, text=\"Segmentation Method:\",\r\n width=width, height=height)\r\n self.frameMeasure = tk.LabelFrame(self, text=\"Measurements:\",\r\n width=width, height=height)\r\n\r\n # Create text boxes and labels.\r\n self.labelStatus = tk.LabelFrame(self, text=\"Status:\", bd=0)\r\n self.labelResults = tk.LabelFrame(self, text=\"Results:\", bd=0)\r\n self.textStatus = ScrolledText(self.labelStatus, height=5,\r\n width=bigWidth)\r\n self.textResults = ScrolledText(self.labelResults, height=10,\r\n width=bigWidth)\r\n\r\n # Create buttons.\r\n self.buttonCalculate = tk.Button(self, text='Calculate',\r\n width=20, height=1, font=12, bd=3,\r\n command=lambda:self.prepare())\r\n self.buttonSaveAll = tk.Button(self, text='Save Session Summary',\r\n command=self.saveAll)\r\n self.buttonSelectOutFold = tk.Button(self, text='Set Output Folder',\r\n command=self.setOutputFolder)\r\n self.buttonAbout = tk.Button(self, text='About', command=self.about)\r\n\r\n # Arrange toplevel widgets.\r\n self.frameOptions.grid(row=0, column=2, padx=pad, pady=pad,\r\n sticky='NESW')\r\n self.frameSegment.grid(row=0, column=1, padx=pad, pady=pad,\r\n sticky='NESW')\r\n self.frameMeasure.grid(row=0, column=0, padx=pad, pady=pad,\r\n sticky='NESW')\r\n\r\n self.buttonCalculate.grid(row=1, column=1, \r\n padx=pad, pady=pad*3)\r\n self.buttonSelectOutFold.grid(row=1, column=0, \r\n padx=pad, pady=pad*3)\r\n self.buttonAbout.grid(row=6, column=2, sticky='e', padx=20, pady=10)\r\n\r\n self.labelStatus.grid(row=2, column=0, columnspan=3, sticky='w',\r\n padx=pad, pady=pad)\r\n self.textStatus.grid(row=3, column=0, columnspan=3)\r\n self.labelResults.grid(row=4, column=0, columnspan=3, sticky='w',\r\n padx=pad, pady=pad)\r\n self.textResults.grid(row=5, column=0, columnspan=3)\r\n self.buttonSaveAll.grid(row=6, column=1, padx=pad, pady=pad)\r\n\r\n # Variables\r\n self.outFold = None\r\n columns = [[\"\",\"\",\"\",\"\",\r\n \"Bright phase diameter\",\r\n \"\",\"\",\r\n \"Dark phase diameter\",\r\n \"\",\"\",\r\n \"Bright length\",\r\n \"\",\"\",\r\n \"Dark length\",\r\n \"\",\"\",\r\n \"Bright area\",\r\n \"\",\"\",\r\n \"Dark area\",\r\n \"\",\"\",\r\n \"Bright connected length\",\r\n \"\",\"\",\r\n \"Dark connected length\",\r\n \"\",\"\"], \r\n [\"image\",\r\n \"pixel size\",\r\n \"area frac\",\r\n \"est diam\",\r\n \"Average\",\r\n \"SD\",\r\n \"Measured\",\r\n \"Average\",\r\n \"SD\",\r\n \"Measured\",\r\n \"Average\",\r\n \"SD\",\r\n \"Measured\",\r\n \"Average\",\r\n \"SD\",\r\n \"Measured\",\r\n \"Average\",\r\n \"SD\",\r\n \"Measured\",\r\n \"Average\",\r\n \"SD\",\r\n \"Measured\",\r\n \"Average\",\r\n \"SD\",\r\n \"Measured\",\r\n \"Average\",\r\n \"SD\",\r\n \"Measured\"]]\r\n \r\n self.saveAll = np.array(columns)\r\n\r\n # Measurement options.\r\n # Variables.\r\n self.varDiameter = tk.BooleanVar()\r\n self.varLength = tk.BooleanVar()\r\n self.varArea = tk.BooleanVar()\r\n self.varSumConnectedLength = tk.BooleanVar()\r\n self.varAreaFraction = tk.BooleanVar()\r\n # Create widgets.\r\n self.checkDiameter = tk.Checkbutton(self.frameMeasure,\r\n text=\"Diameter\", variable=self.varDiameter)\r\n self.checkLength = tk.Checkbutton(self.frameMeasure,\r\n text=\"Length\", variable=self.varLength)\r\n self.checkArea = tk.Checkbutton(self.frameMeasure,\r\n text=\"Area\", variable=self.varArea)\r\n self.checkSumConnectedLength = tk.Checkbutton(self.frameMeasure,\r\n text=\"Connected length\", variable=self.varSumConnectedLength)\r\n self.checkAreaFraction = tk.Checkbutton(self.frameMeasure,\r\n text=\"Area fraction\", variable=self.varAreaFraction)\r\n # Pack widgets.\r\n self.checkDiameter.grid(row=0, column=0, sticky='w')\r\n self.checkLength.grid(row=1, column=0, sticky='w')\r\n self.checkArea.grid(row=2, column=0, sticky='w')\r\n self.checkSumConnectedLength.grid(row=3, column=0, sticky='w')\r\n self.checkAreaFraction.grid(row=4, column=0, sticky='w')\r\n # Check appropriate boxes.\r\n self.checkDiameter.select()\r\n self.checkLength.select()\r\n self.checkArea.select()\r\n self.checkSumConnectedLength.select()\r\n self.checkAreaFraction.select()\r\n \r\n # Segment options.\r\n # Variables.\r\n self.varSegment = tk.StringVar()\r\n # Create widgets.\r\n self.radAccurate = tk.Radiobutton(self.frameSegment,\r\n text=\"Accurate\", variable=self.varSegment, value=\"accurate\",\r\n command=self.updateOptions)\r\n self.radFast = tk.Radiobutton(self.frameSegment,\r\n text=\"Fast\", variable=self.varSegment, value=\"fast\",\r\n command=self.updateOptions)\r\n self.radManual= tk.Radiobutton(self.frameSegment,\r\n text=\"Manual\", variable=self.varSegment, value=\"manual\",\r\n command=self.updateOptions)\r\n self.radFromBinary = tk.Radiobutton(self.frameSegment,\r\n text=\"From binary\", variable=self.varSegment, value=\"binary\",\r\n command=self.updateOptions)\r\n # Pack widgets.\r\n self.radAccurate.grid(row=0, column=0, sticky='w')\r\n self.radFast.grid(row=1, column=0, sticky='w')\r\n self.radManual.grid(row=2, column=0, sticky='w')\r\n self.radFromBinary.grid(row=3, column=0, sticky='w')\r\n # Check appropriate boxes.\r\n self.radAccurate.select()\r\n\r\n # Option options.\r\n # Profiles\r\n profiles = autoSelect.profiles()\r\n # Variables.\r\n self.varShowSteps = tk.BooleanVar()\r\n self.varOutputExcel = tk.BooleanVar()\r\n self.varSavePDF = tk.BooleanVar()\r\n self.varSaveMovie = tk.BooleanVar()\r\n self.varSaveBinary = tk.BooleanVar()\r\n self.varAutoParse = tk.BooleanVar()\r\n self.varProfile = tk.StringVar()\r\n self.varProfile.set(profiles[0])\r\n # Create widgets.\r\n self.checkShowSteps = tk.Checkbutton(self.frameOptions,\r\n text=\"Show steps\", variable=self.varShowSteps)\r\n self.checkOutputExcel = tk.Checkbutton(self.frameOptions,\r\n text=\"Output to Excel\", variable=self.varOutputExcel)\r\n self.checkSavePDF = tk.Checkbutton(self.frameOptions,\r\n text=\"Save PDF\", variable=self.varSavePDF)\r\n self.checkSaveMovie = tk.Checkbutton(self.frameOptions,\r\n text=\"Save movie\", variable=self.varSaveMovie)\r\n self.checkSaveBinary = tk.Checkbutton(self.frameOptions,\r\n text=\"Save binary\", variable=self.varSaveBinary)\r\n self.checkAutoParse = tk.Checkbutton(self.frameOptions,\r\n text=\"Auto parse raw image\", variable=self.varAutoParse,\r\n command=self.updateAuto)\r\n self.optionProfile = tk.OptionMenu(self.frameOptions, self.varProfile,\r\n *profiles)\r\n self.optionProfile.config(state=tk.DISABLED)\r\n\r\n # Pack widgets.\r\n self.checkShowSteps.grid(row=0, column=0, sticky='w')\r\n self.checkOutputExcel.grid(row=1, column=0, sticky='w')\r\n self.checkSavePDF.grid(row=2, column=0, sticky='w')\r\n #self.checkSaveMovie.grid(row=3, column=0, sticky='w')\r\n self.checkSaveBinary.grid(row=4, column=0, sticky='w')\r\n self.checkAutoParse.grid(row=5, column=0, sticky='w')\r\n self.optionProfile.grid(row=6, column=0, sticky='w', padx=15)\r\n \r\n # Check appropriate boxes.\r\n self.checkOutputExcel.select()\r\n\r\n self.createToolTips()",
"def label_grid(self):\n\n self.pc_label.grid(row=0, sticky=\"nw\", pady=2, padx=3)\n self.sc_label.grid(row=1, sticky=\"nw\", pady=2, padx=3)\n self.avg_t_label.grid(row=2, sticky=\"nw\", pady=2, padx=3)\n self.nwt_label.grid(row=4, sticky=\"nw\", pady=2, padx=3)\n self.nw_ip_label.grid(row=5, sticky=\"nw\", pady=2, padx=3)\n self.nw_gw_label.grid(row=6, sticky=\"nw\", pady=2, padx=3)\n self.nw_sm_label.grid(row=7, sticky=\"nw\", pady=2, padx=3)\n self.nw_mca_label.grid(row=8, sticky=\"nw\", pady=2, padx=3)"
] | [
"0.7544622",
"0.75399923",
"0.7160371",
"0.700181",
"0.6993383",
"0.6987549",
"0.693042",
"0.68723804",
"0.68431395",
"0.6841213",
"0.68293494",
"0.6811473",
"0.67933005",
"0.67782164",
"0.6755658",
"0.67353123",
"0.67290145",
"0.66983646",
"0.66882557",
"0.66620207",
"0.6655562",
"0.6646496",
"0.6645624",
"0.6641553",
"0.66393024",
"0.6624608",
"0.6614281",
"0.6605923",
"0.660465",
"0.6603433"
] | 0.76427805 | 0 |
Adds all the needed tiles on the Board | def addTiles(self, rows, cols, minecount):
for row in range(rows):
self.tiles.append([])
for col in range(cols):
tile = Tile(self, row, col)
tile.grid(row=row+1, column=col)
self.tiles[row].append(tile)
#left click listeners
tile.bind('<ButtonPress-1>', self.pressTile)
tile.bind('<ButtonRelease-1>', self.showTile)
#middle click listeners
tile.bind('<ButtonPress-2>', self.pressAdjTiles)
tile.bind('<ButtonRelease-2>', self.showAdjTiles)
#right click listeners
tile.bind('<ButtonPress-3>', self.pressTile)
tile.bind('<ButtonRelease-3>', self.toggleFlag) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def populate_tiles(self):\n\n # grid format :\n # grid(x,y,z)[0]: A valid WorldTile type (i.e. WorldTile.door)\n # grid(x,y,z)[1]: A list of ASCII color or format codes for ColorIze\n # grid(x,y,z)[2]: The tile object\n\n self.t_count = 0 # Tile count, increment for each tile added\n self.build_start = time.clock()\n self.logger.info(\"[*] Starting world building script\")\n\n script_list = [\n self.build_boss_room,\n self.build_rooms,\n self.build_halls,\n self.build_doors,\n self.build_chests,\n self.build_traps,\n self.build_mobs,\n self.build_npcs\n ]\n for func in script_list:\n self.logger.debug(\"\\tRunning {}\".format(func.__name__))\n if not func():\n e_text = \"Build script failed : {}\".format(func.__name__)\n raise AssertionError(e_text)\n\n self.logger.info(\"[*] World building script completed\")\n self.logger.debug(\"\\tTiles Placed : {}\".format(self.t_count))\n build_time = time.clock()-self.build_start\n self.logger.debug(\"\\tTook {}s\".format(build_time))\n self.logger.debug(\"\\tTiles/s : {}\".format(t_count/build_time))",
"def make_board(self):\n http = urllib3.PoolManager()\n r = http.request('GET', 'http://www.cse.msu.edu/~ruppmatt/itm891/tiles.pickle')\n tiles = pickle.loads(r.data)\n self.assets = tiles\n self.gameboard = Image.new('RGBA', (64*(self.world_width+2), 64*(self.world_height+2)))\n # Laydown land\n for c in range(0,self.world_width):\n for r in range(0, self.world_height):\n x = (c+1)*64\n y = (r+1)*64\n tile_ndx = np.random.choice(len(tiles['land']))\n self.gameboard.paste(tiles['land'][tile_ndx], (x,y)) \n # Laydown water\n for c in range(0,self.world_width):\n x = (c+1)*64\n yy = (self.world_height+1)*64\n self.gameboard.paste(tiles['water']['edge_north'], (x,0))\n self.gameboard.paste(tiles['water']['edge_south'], (x, yy))\n for r in range(0,self.world_height):\n y = (r+1)*64\n xx = (self.world_width+1)*64\n self.gameboard.paste(tiles['water']['edge_west'], (0,y))\n self.gameboard.paste(tiles['water']['edge_east'], (xx,y))\n self.gameboard.paste(tiles['water']['corner_nw'], (0,0))\n self.gameboard.paste(tiles['water']['corner_sw'], (0,(self.world_height+1)*64))\n self.gameboard.paste(tiles['water']['corner_ne'], ((self.world_width+1)*64,0))\n self.gameboard.paste(tiles['water']['corner_se'], ((self.world_width+1)*64,(self.world_height+1)*64))\n \n # Some land lines\n draw = ImageDraw.Draw(self.gameboard)\n for c in range(0,self.world_width-1):\n y_1 = 64\n y_2 = 64*(self.world_height+1)\n x = (2+c)*64\n draw.line([(x,y_1),(x,y_2)], fill='white', width=1)\n for r in range(0,self.world_height-1):\n y = (2+r)*64\n x_1= 64\n x_2 = 64 * (self.world_width+1)\n draw.line([(x_1,y),(x_2,y)], fill='white', width=1)\n return",
"def test_generate_tiles(self):\n tile_list = utils.generate_tiles()\n self.assertEqual(len(tile_list), utils.TILE_COUNT)",
"def __init__(self, width, height):\n self.width =width\n self.height = height\n self.box_width = width/self._BOXES_WIDE\n print 'box width: ', self.box_width\n self.box_height = height/self._BOXES_TALL\n\n self.tiles = []\n self.changes = set()\n y = 0\n for i in range(World._BOXES_TALL):\n y += self.box_height\n x = 0\n self.tiles.append([])\n for j in range(World._BOXES_WIDE):\n x += self.box_width\n tile = Tile(self.changes, x, y, self.box_width, self.box_height)\n self.tiles[i].append(tile)",
"def test_create_tile_puzzle(self):\n p = hw.create_tile_puzzle(3, 3)\n self.assertEqual(p.get_board(), [[1,2,3],[4,5,6],[7,8,0]])\n p = hw.create_tile_puzzle(2, 4)\n self.assertEqual(p.get_board(), [[1,2,3,4],[5,6,7,0]])\n p = hw.create_tile_puzzle(1, 4)\n self.assertEqual(p.get_board(), [[1,2,3,0]])",
"def populate_board(self):\n for row in range(10):\n for col in range(10):\n coord = Coordinate(row, col)\n coord_attack = Coordinate(row, col)\n self.player_table.setItem(row, col, coord)\n self.attack_table.setItem(row, col, coord_attack)",
"def fill_with_random_tiles(self):\n for elem in [x[1] for x in self.tile_grid.values()]:\n self.view.remove(elem)\n tile_grid = {}\n # Fill the data matrix with random tile types\n while True: # Loop until we have a valid table (no imploding lines)\n for x in range(COLS_COUNT):\n for y in range(ROWS_COUNT):\n tile_type, sprite = choice(self.available_tiles), None\n tile_grid[x, y] = tile_type, sprite\n if len(self.get_same_type_lines(tile_grid)) == 0:\n break\n tile_grid = {}\n\n # Build the sprites based on the assigned tile type\n for key, value in tile_grid.items():\n tile_type, sprite = value\n sprite = self.tile_sprite(tile_type, self.to_display(key))\n tile_grid[key] = tile_type, sprite\n self.view.add(sprite)\n\n self.tile_grid = tile_grid",
"def populate_board(self):\n for key, value in self.game.white_pieces.items():\n x_pos = self.width * value.x_pos\n y_pos = self.width * value.y_pos\n img = self.load_image(\"images/\" + value.image, value.starting_position)\n self.place_image_on_canvas(x_pos, y_pos, img, \"images/\" + value.image, value.starting_position)\n for key, value in self.game.black_pieces.items():\n x_pos = self.width * value.x_pos\n y_pos = self.width * value.y_pos\n img = self.load_image(\"images/\" + value.image, value.starting_position)\n self.place_image_on_canvas(x_pos, y_pos, img, \"images/\" + value.image, value.starting_position)",
"def arrange_tiles(self, layer):\n\n # número de tiles en 'x'\n width = self.width\n arranged_tiles = layer.arranged_tiles\n\n row = -1\n\n # convierte una lista en un diccionario\n for col, tile in enumerate(layer.tiles):\n # calcula la ubicación en dos dimensiones (fila y columna) de cada tile,\n # los tiles originalmente están ordenados en línea\n col %= width\n if col == 0:\n row += 1\n\n # excluye los tiles con id 0,\n # id 0 representa un espacio vacío en el tilemap\n if tile != 0:\n arranged_tiles[(row, col)] = tile\n\n # libera la memoria ocupada por la lista de tiles\n layer.tiles = None",
"def fill_grid(self):\n\n for row_margin, row in enumerate(range(self.rows)):\n self.grid.append([])\n\n for col_margin, col in enumerate(range(self.cols)):\n x = col*self.cell_size + col_margin\n y = row*self.cell_size + row_margin\n\n rect = pygame.Rect(x, y, self.cell_size, self.cell_size)\n\n cell = Cell(row, col, rect)\n\n if row == 7 and col == 3:\n cell.root = True\n self.root = cell\n elif row == 7 and col == 16:\n cell.goal = True\n self.goal = cell\n\n self.grid[row].append(cell)",
"def get_tiles(self, numTiles, gameBag):\r\n tiles_picked = gameBag.pick_tiles(numTiles)\r\n for givenTile in tiles_picked:\r\n self.rack.append(givenTile)",
"def setup_board(self):\n\n for row in range(10):\n\n row_list = list()\n\n for column in range(9):\n\n row_list.append(None)\n\n self._board.append(row_list)",
"def enumerate_tiles(self):\n # Iterates through entire game board.\n for row in range(self.rows):\n for col in range(self.cols):\n\n # Doesn't count mines adjacent to mine tiles.\n if self.board[row][col].category == Tiles.mine:\n continue\n mines = 0\n\n # Calculates number of mines surrounding each tile.\n for i in [row-1, row, row+1]:\n for j in [col-1, col, col+1]:\n if (self.valid_tile(i, j) and self.board[i][j].category == Tiles.mine):\n mines += 1\n \n # Sets each game board tile's mine proximity number.\n self.board[row][col] = Tiles(row, col, str(mines))",
"def new_tile(self):\n # replace with your code\n empty_list = []\n counter_1 = 0\n for _ in self._grid:\n counter_2 = 0\n line = _\n for blank in line:\n if blank == 0:\n blank_tile = (counter_1, counter_2)\n empty_list.append(blank_tile)\n counter_2 += 1\n else:\n counter_2 += 1\n counter_1 += 1\n #print empty_list\n \n self._tile = empty_list[random.randrange(len(empty_list))]\n \n value = [2,2,2,2,2,2,2,2,2,4]\n tile_value = value[random.randint(0,9)]\n \n self.set_tile(self._tile[0], self._tile[1], tile_value)",
"def setup_new_board(self):\n\n logger.info(u'setup_new_board()')\n\n self.squares = [[None for j in xrange(8)] for i in xrange(8)]\n \n self.black_checkers = [ch.Checker(u'black', self) for i in xrange(12)]\n self.white_checkers = [ch.Checker(u'white', self) for i in xrange(12)]\n\n u\"\"\" Place checkers in starting squares \"\"\"\n i = 0\n for row in xrange(3):\n for column in xrange(8):\n if self.dark_square((row, column)):\n self.place_checker((row, column), self.white_checkers[i])\n i += 1\n\n i = 0\n for row in xrange(5, 8):\n for column in xrange(8):\n if self.dark_square((row, column)):\n self.place_checker((row, column), self.black_checkers[i])\n i += 1",
"def build_tiles(cls):\n\n LOGGER.debug(\"Building tiles\")\n\n for tile_id in tiledata.TILE_DATA:\n if not Tile.tile_factory(tile_id):\n LOGGER.error(\"Could not construct tile with ID %d\", tile_id)\n sys.exit(1)",
"def __init__(self):\n self.board = [[T.Tile().getColor() for x in range(6)] for y in range(6)]",
"def generate_tiles(self):\n if self.children:\n for child in self.children:\n child.generate_tiles()\n print \"Generating tile for %s using child tiles\" % self.bbox\n self.generate_tile_from_child_tiles()\n else:\n print \"Generating tile for %s using source data\" % self.bbox\n self.generate_tile_from_source()",
"def draw_puzzle():\n # Define Baseboard\n baseboard = pygame.Rect(61, 70, 498, 498) # creates a rectangle object \n\n # Draw Baseboard\n pygame.draw.rect(RENDER_WINDOW, TEXTCOLOR, baseboard)\n\n tiles = GAME_PUZZLE.puzzle # fetch game puzzle\n\n gameboard = [] # mimics the puzzle_board.puzzle\n\n # define first tile position\n start_x = 62 \n start_y = 71\n\n # build a tile for each item in the game puzzle\n for i in range(0,len(tiles)):\n row = []\n for j in range(0, len(tiles[i])):\n\n if tiles[i][j] is not None: # only draw non - blank tile\n new_tile = pygame.Rect(start_x, start_y, 164, 164) # creates a rectangle object\n\n tile_txt = TILE_FONT.render(str(tiles[i][j]), True, TEXTCOLOR) # creates font \n\n row.append(new_tile) # add tile to row in 2d list\n\n pygame.draw.rect(RENDER_WINDOW, BUTTONCOLOR, new_tile) #draw title rectangle\n\n RENDER_WINDOW.blit(tile_txt, (new_tile.x + 40, new_tile.y + 20)) # render text centered on Tile\n else:\n new_tile = pygame.Rect(start_x, start_y, 164, 164) # creates a WHITE rectangle object\n row.append(new_tile)\n pygame.draw.rect(RENDER_WINDOW, TEXTCOLOR, new_tile) #draw title rectangle\n \n \n start_x += 166\n\n gameboard.append(row)\n start_x = 62 # reset for each row\n start_y += 166\n \n # update the global Board\n global BOARD\n BOARD = gameboard",
"def new_tile(self):\n # replace with your code\n pass",
"def test_room_has_tiles(self):\n self.assertEqual(self.room.tile_set.count(), self.room.grid_size ** 2)",
"def make_board(self, ):\n for r in range(self.boardSize):\n for c in range(self.boardSize): # avoid redundant calculation by adding neighbors \"behind\" current cell\n new_cell = Cell(r, c)\n self.board[r][c] = new_cell\n if c > 0: # add left neighbor-cell\n new_cell.add_neighbor(self.board[r][c-1])\n if r > 0: # add above neighbor-cell\n new_cell.add_neighbor(self.board[r-1][c])\n if r > 0 and c < self.boardSize-1: # add right diagonal neighbor-cell\n new_cell.add_neighbor(self.board[r-1][c+1])",
"def new_tile(self):\r\n # replace with your code\r\n empty_square_lists = []\r\n for row in range(self._grid_height):\r\n for col in range(self._grid_width):\r\n if(self.get_tile(row, col) == 0):\r\n empty_square_lists.append((row, col))\r\n \r\n if len(empty_square_lists) == 0:\r\n return \"game over!\"\r\n \r\n random_cell = random.choice(empty_square_lists)\r\n random_cell_row = random_cell[0]\r\n random_cell_col = random_cell[1]\r\n \r\n values = [2] * 90 + [4] * 10\r\n value = random.choice(values)\r\n \r\n self.set_tile(random_cell_row, random_cell_col, value)",
"def reset(self):\r\n # replace with your code\r\n for row in range(0, self._grid_height):\r\n for col in range(0, self._grid_width):\r\n self._grid_tile[row][col] = 0\r\n # at this step, all cells should be available\r\n self.new_tile()\r\n self.new_tile()",
"def __init__(self):\n\n self._length = 8\n self.board = []\n self.columns = \"ABCDEFGH\"\n for colNum in range(0, self._length):\n self.board.append([])\n for rowNum in range(0, self._length):\n self.board[colNum].append(Tile(colNum, rowNum))\n\n self.board[3][3].color = \"blue\"\n self.board[3][4].color = \"red\"\n self.board[4][3].color = \"red\"\n self.board[4][4].color = \"blue\"",
"def readTiles(self):\n TileImage = Image.open(self.Filename).convert(\"RGB\")\n TileIW, TileIH = TileImage.size\n TilesetW, TilesetH = TileIW // self.TileWidth, TileIH // self.TileHeight\n\n for y in range(TilesetH):\n for x in range(TilesetW):\n box = self.TileWidth * x, self.TileHeight * y, self.TileWidth * (x+1), self.TileHeight * (y+1)\n tile = TileImage.crop(box)\n self.List.append(tile)\n\n str = tile.tostring()\n if not str in self.TileDict:\n #print(\"add tile: \", str)\n self.TileDict[str] = len(self.List) - 1\n print(\"tile count: {}, unique count: {}\".format(len(self.List),len(self.TileDict.values())))",
"def load_frame(self):\n world_map = self.data[self.time_point][\"tiles\"]\n self.tiles = []\n for x in range(self.width):\n for y in range(self.height):\n index = x + self.width * y\n tile = world_map[index]\n xpos = x * tile_size\n ypos = y * tile_size\n if tile[\"type\"] == \"Wall\":\n sprite = pyglet.sprite.Sprite(images[\"Wall\"], x=xpos, y=ypos)\n elif tile[\"type\"] == \"SnakeHead\":\n sprite = pyglet.sprite.Sprite(images[\"SnakeHead\"], x=xpos, y=ypos)\n elif tile[\"type\"] == \"SnakeBody\":\n sprite = pyglet.sprite.Sprite(images[\"SnakeBody\"], x=xpos, y=ypos)\n elif tile[\"type\"] == \"Doodah\":\n sprite = pyglet.sprite.Sprite(images[\"Doodah\"], x=xpos, y=ypos)\n elif tile[\"type\"] == \"Blank\":\n sprite = pyglet.sprite.Sprite(images[\"Blank\"], x=xpos, y=ypos)\n self.tiles.append(sprite)",
"def reset(self) -> None:\n self.map = []\n for col in range(self.width):\n self.map.append([])\n for cell in range(self.height):\n if col > 1 and col < self.width - 2:\n if cell == 0:\n # World Barrier - Top Middle\n self.map[col].append(StaticTile('wall_3', self.graphicsLibrary.get('wall_3'), (self.scaleWidth,self.scaleHeight), barrier=True))\n elif cell == self.height - 1:\n # World Barrier - Bottom Middle\n self.map[col].append(StaticTile('wall_12', self.graphicsLibrary.get('wall_12'), (self.scaleWidth,self.scaleHeight), barrier=True))\n else:\n # Playable Map Area\n if (col % 2) != 0 and (cell % 2) == 0:\n # Hard-Barrier Generation\n self.map[col].append(StaticTile('solid', self.graphicsLibrary.get('solid'), (self.scaleWidth,self.scaleHeight), barrier=True))\n elif (col,cell) in self.spawn_buffers:\n # Preserve Potential Spawn Points\n self.map[col].append(StaticTile('terrain', self.graphicsLibrary.get('terrain'), (self.scaleWidth,self.scaleHeight), barrier=False))\n elif random.randint(0, 2) == 0:\n # Soft-Barrier Generation\n self.map[col].append(DynamicTile('destructable_new', self.graphicsLibrary.get('destructable_new'), (self.scaleWidth,self.scaleHeight), destructable=\"True\", barrier=True, death_animation=self.animations_library.get('destructable_death')))\n else:\n # Fill Remaining Terrain\n self.map[col].append(StaticTile('terrain', self.graphicsLibrary.get('terrain'), (self.scaleWidth,self.scaleHeight), barrier=False))\n else:\n # World Barrier - Side Sections\n if col == 0 or col == self.width - 1:\n # Roof\n right_most_columns = False\n if col == self.width - 1:\n right_most_columns = True\n\n if cell == self.height - 1:\n self.map[col].append(StaticTile('wall_10', self.graphicsLibrary.get('wall_10'), (self.scaleWidth,self.scaleHeight), flip_x=right_most_columns, barrier=True))\n elif cell == self.height - 2:\n self.map[col].append(StaticTile('wall_1', self.graphicsLibrary.get('wall_1'), (self.scaleWidth,self.scaleHeight), flip_x=right_most_columns, barrier=True))\n elif cell == 0:\n self.map[col].append(StaticTile('wall_1', self.graphicsLibrary.get('wall_1'), (self.scaleWidth,self.scaleHeight), flip_x=right_most_columns, barrier=True))\n else:\n self.map[col].append(StaticTile('wall_5', self.graphicsLibrary.get('wall_5'), (self.scaleWidth,self.scaleHeight), flip_x=right_most_columns, barrier=True))\n elif col == 1 or col == self.width - 2:\n # Floor \n right_most_columns = False\n if col == self.width - 2:\n right_most_columns = True\n\n if cell == self.height -1:\n self.map[col].append(StaticTile('wall_11', self.graphicsLibrary.get('wall_11'), (self.scaleWidth,self.scaleHeight), flip_x=right_most_columns, barrier=True))\n elif cell == self.height - 2:\n self.map[col].append(StaticTile('wall_9', self.graphicsLibrary.get('wall_9'), (self.scaleWidth,self.scaleHeight), flip_x=right_most_columns, barrier=True))\n elif cell == 0:\n self.map[col].append(StaticTile('wall_2', self.graphicsLibrary.get('wall_2'), (self.scaleWidth,self.scaleHeight), flip_x=right_most_columns, barrier=True))\n elif cell == 1:\n self.map[col].append(StaticTile('wall_6', self.graphicsLibrary.get('wall_6'), (self.scaleWidth,self.scaleHeight), flip_x=right_most_columns, barrier=True))\n else:\n self.map[col].append(StaticTile('wall_7', self.graphicsLibrary.get('wall_7'), (self.scaleWidth,self.scaleHeight), flip_x=right_most_columns, barrier=True))\n self.map[col][cell].place_at(topleft=(self.scaleWidth * col, self.scaleHeight * cell))",
"def __init__(self, tiles):\n self.tiles = tiles",
"def set_pieces(self):\n\n for i in range(len(self._game_board)):\n\n # Row 1\n if i == 0:\n for ii in range(len(self._game_board[i])):\n if ii == 0 or ii == 8:\n self._game_board[i][ii] = Chariot(\"black\", \"BCHA\")\n self._game_board[i][ii].update_location([i, ii])\n if ii == 1 or ii == 7:\n self._game_board[i][ii] = Horse(\"black\", \" BH \")\n self._game_board[i][ii].update_location([i, ii])\n if ii == 2 or ii == 6:\n self._game_board[i][ii] = Elephant(\"black\", \" BE \")\n self._game_board[i][ii].update_location([i, ii])\n if ii == 3 or ii == 5:\n self._game_board[i][ii] = Advisor(\"black\", \" BA \")\n self._game_board[i][ii].update_location([i, ii])\n if ii == 4:\n self._game_board[i][ii] = General(\"black\", \" BG \")\n self._game_board[i][ii].update_location([i, ii])\n\n # Row 3\n if i == 2:\n for ii in range(len(self._game_board[i])):\n if ii == 1 or ii == 7:\n self._game_board[i][ii] = Cannon(\"black\", \"BCAN\")\n self._game_board[i][ii].update_location([i, ii])\n\n # Row 4\n if i == 3:\n for ii in range(len(self._game_board[i])):\n if ii % 2 == 0:\n self._game_board[i][ii] = Soldier(\"black\", \"BSOL\")\n self._game_board[i][ii].update_location([i, ii])\n\n # Row 7\n if i == 6:\n for ii in range(len(self._game_board[i])):\n if ii % 2 == 0:\n self._game_board[i][ii] = Soldier(\"red\", \"RSOL\")\n self._game_board[i][ii].update_location([i, ii])\n\n # Row 8\n if i == 7:\n for ii in range(len(self._game_board[i])):\n if ii == 1 or ii == 7:\n self._game_board[i][ii] = Cannon(\"red\", \"RCAN\")\n self._game_board[i][ii].update_location([i, ii])\n\n # Row 10\n if i == 9:\n for ii in range(len(self._game_board[i])):\n if ii == 0 or ii == 8:\n self._game_board[i][ii] = Chariot(\"red\", \"RCHA\")\n self._game_board[i][ii].update_location([i, ii])\n if ii == 1 or ii == 7:\n self._game_board[i][ii] = Horse(\"red\", \" RH \")\n self._game_board[i][ii].update_location([i, ii])\n if ii == 2 or ii == 6:\n self._game_board[i][ii] = Elephant(\"red\", \" RE \")\n self._game_board[i][ii].update_location([i, ii])\n if ii == 3 or ii == 5:\n self._game_board[i][ii] = Advisor(\"red\", \" RA \")\n self._game_board[i][ii].update_location([i, ii])\n if ii == 4:\n self._game_board[i][ii] = General(\"red\", \" RG \")\n self._game_board[i][ii].update_location([i, ii])"
] | [
"0.7357654",
"0.6758766",
"0.6640157",
"0.6556963",
"0.64719063",
"0.64707685",
"0.64605796",
"0.64567256",
"0.6433377",
"0.64320713",
"0.6412151",
"0.63864046",
"0.63160187",
"0.6314387",
"0.6299924",
"0.6276776",
"0.6269016",
"0.62673545",
"0.6259894",
"0.6252422",
"0.6234899",
"0.6224357",
"0.62113553",
"0.62097675",
"0.6200525",
"0.61812073",
"0.6178238",
"0.61780614",
"0.6169277",
"0.61636084"
] | 0.73305285 | 1 |
Changes smileButton image to self.images[num] | def changeSmile(self, num, event=None):
self.smileButton.configure(image=self.images[num]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def change_image(self):\n image_lst = [\"images/hangman01.png\",\n \"images/hangman02.png\",\n \"images/hangman03.png\",\n \"images/hangman04.png\",\n \"images/hangman05.png\",\n \"images/hangman06.png\",\n \"images/hangman07.png\"]\n self.strikes = self.strikes % len(image_lst)\n self.photo = PhotoImage(file=image_lst[self.strikes])\n self.canvas.create_image(340, 240, image=self.photo)",
"def update_imgs(self):\n\n for b in self.gamebuttons:\n b.update_img()\n self.start_but.update_img()",
"def select_sprite(self, n):\n self.img = self.sub_images[n]\n self.last_sprite = n",
"def set_num_images(self,num_images):\n for roi in self.rois:\n roi.set_num_images(num_images)\n self.num_images = num_images",
"def _update_buttons(self):\n for index, piece in enumerate(self.game_board.board):\n self.board_button_list[index].config(\n image=self.image_dict.get(f\"{piece.team_id}{piece.piece_id}\")\n )\n\n self.update()",
"def update_image(self):\n if self.updated_sprite_list:\n self.image = self.increment_sprite_index(True)\n self.updated_sprite_list = False\n self.update_count_down = self.update_frames\n self.redraw = True\n elif self.update_frames == 0:\n return\n elif self.update_count_down == 0:\n if self.sprite_index == 2:\n self.remove_action(Action.crouch_attack)\n self.image = self.increment_sprite_index()\n self.update_count_down = self.update_frames\n self.redraw = True\n else:\n self.update_count_down -= 1",
"def boutton(self,img1,x,y):\r\n self.button.append(self.creat_image(img1,x,y))",
"def change_button_img_to_x(self, row_seq):\n change_img = self.x_img_dict[row_seq]\n idx_list = [int(i)-1 for i in str(row_seq)]\n for button_idx in idx_list:\n self.button_list[button_idx].config(image=change_img)",
"def refreshImages(self):\n fileName1 = \"DECK/\" + str(self.card1) + \".gif\"\n fileName2 = \"DECK/\" + str(self.card2) + \".gif\"\n fileName3 = \"DECK/\" + str('b') + \".gif\"\n self.image1 = PhotoImage(file = fileName1)\n self.cardLabel1[\"image\"] = self.image1\n self.image2 = PhotoImage(file = fileName2)\n self.cardLabel2[\"image\"] = self.image2\n self.image3 = PhotoImage(file = fileName3)\n self.cardLabel3[\"image\"] = self.image3",
"def update(self):\n self.imagecounter +=1\n if self.imagecounter > 7:\n self.imagecounter = 0\n self.image = pygame.image.load(self.pictures[self.imagecounter])\n self.rect = self.image.get_rect()\n self.rect.left = self.x\n self.rect.top = self.y",
"def __initBtnImages__(self) -> None:\n self._images = [\n pygame.Surface((self.rect.width, self.rect.height)),\n pygame.Surface((self.rect.width, self.rect.height)),\n ]\n self._images[ButtonImages.DEFAULT_IMAGE.value].fill(self.notClickedBtnColor)\n self._images[ButtonImages.CLICKING_IMAGE.value].fill(self.clickedBtnColor)\n self.textSurface = self.font.render(self.text, False, (0, 0, 0))\n self.textSurfaceDest = (self.rect.centerx - (self.textSurface.get_width() / 2),\n self.rect.centery - (self.textSurface.get_height() / 2))\n self._images[0].blit(self.textSurface, self.textSurfaceDest)\n self._images[1].blit(self.textSurface, self.textSurfaceDest)",
"def setBtnIcon(self):\n self.setIcon(QtGui.QIcon(self.movie.currentPixmap()))\n self.setIconSize(QtCore.QSize(self.size[0], self.size[1]))",
"def change_button_img_to_o(self, row_seq):\n change_img = self.o_img_dict[row_seq]\n idx_list = [int(i)-1 for i in str(row_seq)]\n for button_idx in idx_list:\n self.button_list[button_idx].config(image=change_img)",
"def setIcon(self,icon,index=0):\n self.rb[index].setIcon(icon)",
"def setIconImage(*args):",
"def draw_board(self, board: BoardModel):\n self._picture=[]\n self._board = board\n self.delete(tk.ALL)\n\n for i in range(self._grid_size):\n for j in range(self._grid_size):\n char = self._board.get_game()[self.position_to_index((j, i), self._grid_size)]\n x1 = i * 60\n y1 = j * 60\n x2 = x1 + 60\n y2 = y1 + 60\n\n #insert the image of unexposed cell\n if char == UNEXPOSED:\n photo=tk.PhotoImage(file=\"./images/unrevealed.gif\")\n\n\n # insert the image of exposed cell\n elif char == EXPOSED:\n photo=tk.PhotoImage(file=\"./images/zero_adjacent.gif\")\n\n # insert the image of cell of pokemon\n elif char == POKEMON:\n pokemon_list=[\"./images/pokemon_sprites/charizard.gif\",\n \"./images/pokemon_sprites/cyndaquil.gif\",\n \"./images/pokemon_sprites/pikachu.gif\",\n \"./images/pokemon_sprites/psyduck.gif\",\n \"./images/pokemon_sprites/togepi.gif\"]\n a=random.choice(pokemon_list)\n photo = tk.PhotoImage(file=a)\n print(a)\n\n # insert the image of cell of digit\n elif char.isdigit():\n if char==\"1\":\n photo = tk.PhotoImage(file=\"./images/one_adjacent.gif\")\n\n elif char==\"2\":\n photo = tk.PhotoImage(file=\"./images/two_adjacent.gif\")\n\n elif char==\"3\":\n photo = tk.PhotoImage(file=\"./images/three_adjacent.gif\")\n\n elif char==\"4\":\n photo = tk.PhotoImage(file=\"./images/four_adjacent.gif\")\n\n elif char==\"5\":\n photo = tk.PhotoImage(file=\"./images/five_adjacent.gif\")\n\n elif char==\"6\":\n photo = tk.PhotoImage(file=\"./images/six_adjacent.gif\")\n\n elif char==\"7\":\n photo = tk.PhotoImage(file=\"./images/seven_adjacent.gif\")\n\n else:\n photo = tk.PhotoImage(file=\"./images/eight_adjacent.gif\")\n\n # insert the image of pokeball\n elif char == FLAG:\n photo = tk.PhotoImage(file=\"./images/pokeball.gif\")\n\n self.create_image(x1 + 60 / 2, y1 + 60 / 2, image=photo)\n self._picture.append(photo)\n\n self.bind_clicks()",
"def set_icon(self, icon_name):\n # cria o icone, passando o tamanho máximo\n icon = pygame.Surface((32, 32))\n\n # define a cor de fundo do icone, preto, ficará transparente\n icon.set_colorkey((0, 0, 0))\n\n # carrega o img do icone\n self.icon_image = pygame.image.load('images/' + str(icon_name))\n\n # mapeia os pixels da imagem carregada para o icon(Surface) gerado\n for i in range(0, 32):\n for j in range(0, 32):\n icon.set_at((i, j), self.icon_image.get_at((i, j)))\n\n # define o icon na tela do jogo\n pygame.display.set_icon(icon)",
"def _update_image(self):\n button = self.buttons.checkedButton()\n if button is None:\n return\n\n button.click()",
"def set_sprite(self, image):\n self.current_sprite = image\n self.draw_alpha()",
"def update_num_images(self,num_images):\n if (num_images != None) and (num_images != self.num_images):\n for group in self.roi_groups:\n group.set_num_images(num_images)\n\n for _ in range(num_images,len(self.copy_im_threshs)): # delete unneeded copy im data\n self.copy_im_threshs.pop()\n for _ in range(len(self.copy_im_threshs), num_images): # make new copy im data\n self.copy_im_threshs.append(None)\n\n self.next_image = 0\n self.num_images = num_images\n self.signal_status_message.emit('Set number of images to {}'.format(self.num_images))\n self.signal_next_image_num.emit(self.next_image)\n self.signal_num_images.emit(self.num_images)",
"def update_image(self):\n self.image = Image.fromarray(self.img)",
"def set_next_image(self, image):\n raise NotImplementedError",
"def update_image(self):\n if self.filenames:\n pos = self.slider.value()\n image = read_tiff(self.filenames[pos])\n self.image_item.setImage(image)",
"def place_images(self, final_list, points):\n\t\tfor i in range(8): \n # Please change this (8) into a class-level variable --KOH\n\t\t\timage_object = final_list[i]\n#\t\tif type(image_object) == 'CorrectImage':\n#\t\t\t\tself.correct = [i, points[i]]\n\t\t\timage = pygame.image.load(image_object.file_path)\n # Why can't these be stored as a property of the class --KOH\n\t\t\timagerect = image.get_rect()\n\t\t\treimage = pygame.transform.scale(image, image_object.size)\n\t\t\tself.screen.blit(reimage, points[i])",
"def config_image(self):\n group = self.make_ui_group(False,\n self.images[self.image_num].split('.')[0])\n orig_image, prev_image = self.image_num, self.image_num\n\n while True:\n action_left, action_right = (self.button_left.action(),\n self.button_right.action())\n if action_left is RichButton.HOLD:\n return self.image_num is not orig_image, False # Resume config\n if action_right is RichButton.HOLD:\n return self.image_num is not orig_image, True # Resume paint\n if action_left is RichButton.TAP:\n self.image_num = (self.image_num - 1) % len(self.images)\n elif action_right is RichButton.TAP:\n self.image_num = (self.image_num + 1) % len(self.images)\n\n if self.image_num is not prev_image:\n group.pop()\n group.append(centered_label(\n self.images[self.image_num].split('.')[0], 40, 3))\n prev_image = self.image_num",
"def SetImage(self, image, which):\r\n\r\n self._images[which] = image",
"def setImage(self, img):\n self.image = img\n self.repaint()",
"def change_button_img_to_null(self, null_img=None):\n null_img = self.null_img\n self.button1.configure(image=null_img)\n self.button2.configure(image=null_img)\n self.button3.configure(image=null_img)\n\n self.button4.configure(image=null_img)\n self.button5.configure(image=null_img)\n self.button6.configure(image=null_img)\n\n self.button7.configure(image=null_img)\n self.button8.configure(image=null_img)\n self.button9.configure(image=null_img)",
"def changeImageTab(self, idTag):\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(os.path.join(GG.utils.PATH_EDITOR_INTERFACE, self.activeOption + \"_back.png\"))\n self.imagesTag[self.activeOption].picture = ocempgui.draw.Image.load_image(imgPath)\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(os.path.join(GG.utils.PATH_EDITOR_INTERFACE, idTag + \"_front.png\"))\n self.imagesTag[idTag].picture = ocempgui.draw.Image.load_image(imgPath)",
"def __rename_images(self):\n for idx, image in enumerate(self._values):\n image.partname = '/ppt/media/image%d%s' % (idx+1, image.ext)"
] | [
"0.7132109",
"0.6939925",
"0.649007",
"0.62246907",
"0.6144239",
"0.6120769",
"0.60779893",
"0.60716736",
"0.60617447",
"0.6057991",
"0.6031894",
"0.6001782",
"0.59618104",
"0.5947578",
"0.59292924",
"0.57726",
"0.5738294",
"0.5729699",
"0.5716061",
"0.5704813",
"0.57009625",
"0.5675861",
"0.56521714",
"0.56254524",
"0.56154764",
"0.5595857",
"0.5595464",
"0.55940795",
"0.5592227",
"0.55894405"
] | 0.8161651 | 0 |
Returns a list of Tiles adjacent to self.tiles[row][col] | def getAdjacentTiles(self, row, col):
adjacency = []
if row-1 >= 0:
if col-1 >= 0: adjacency.append(self.tiles[row-1][col-1])
if col+1 < self.cols: adjacency.append(self.tiles[row-1][col+1])
adjacency.append(self.tiles[row-1][col])
if col-1 >= 0: adjacency.append(self.tiles[row][col-1])
if col+1 < self.cols: adjacency.append(self.tiles[row][col+1])
if row+1 < self.rows:
if col-1 >= 0: adjacency.append(self.tiles[row+1][col-1])
if col+1 < self.cols: adjacency.append(self.tiles[row+1][col+1])
adjacency.append(self.tiles[row+1][col])
return adjacency | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def adjacent_tiles(self,tile,pattern):\n\n # Initialize the list of tiles to return\n adj_tiles = []\n\n # Find the row and column of the input tile\n for i in self.tilelist:\n for j in i:\n if j == tile:\n row = self.tilelist.index(i)\n column = self.tilelist[row].index(j)\n\n # Define functions for the 2 distinct patterns\n def plus_sign(self,row,column):\n nonlocal adj_tiles\n if row - 1 >= 0:\n adj_tiles += [self.tilelist[row - 1][column]]\n if row + 1 != len(self.tilelist):\n adj_tiles += [self.tilelist[row + 1][column]]\n if column - 1 >= 0 :\n adj_tiles += [self.tilelist[row][column - 1]]\n if column + 1 != len(self.tilelist[row]):\n adj_tiles += [self.tilelist[row][column + 1]]\n\n def diagonal(self,row,column):\n nonlocal adj_tiles\n if column - 1 >= 0:\n if row - 1 >= 0:\n adj_tiles += [self.tilelist[row - 1][column - 1]]\n if row + 1 != len(self.tilelist):\n adj_tiles += [self.tilelist[row + 1][column - 1]]\n if column + 1 != len(self.tilelist[row]):\n if row - 1 >= 0:\n adj_tiles += [self.tilelist[row - 1][column + 1]]\n if row + 1 != len(self.tilelist):\n adj_tiles += [self.tilelist[row + 1][column + 1]]\n\n # Return the tiles that form a plus sign with the given input tile\n if pattern == 'p':\n plus_sign(self,row,column)\n\n # Return the tiles touching the four corners of the input tile\n elif pattern == 'x':\n diagonal(self,row,column)\n\n # Return all of the tiles surrounding the input tile\n elif pattern == 'b':\n plus_sign(self,row,column)\n diagonal(self,row,column)\n\n return adj_tiles",
"def get_adjacent(self):\n\n def get(incell, loc):\n x = incell.x + loc[0]\n y = incell.y + loc[1]\n if x >= incell.field.width or x < 0:\n return None\n if y >= incell.field.height or y < 0:\n return None\n return incell.field.board[x][y]\n\n touching = [self.above(), self.below(), self.right(), self.left()]\n corner_deltas = [[-1, -1], [-1, 1], [1, -1], [1, 1]]\n touching += [get(self, delt) for delt in corner_deltas]\n return [x for x in touching if x]",
"def adjacent(self):\n x, y = self.opentile\n return (x - 1, y), (x + 1, y), (x, y - 1), (x, y + 1) # left, right, up, down",
"def neighbors(self):\n \n # find 0 - blank square\n \n x0 = None\n y0 = None\n \n for i in range(4):\n for j in range(4):\n if self.get_tile(i,j) == 0:\n y0 = i\n x0 = j\n\n if x0 == None or y0 == None:\n return []\n \n neighbor_list = []\n \n # move 0 to the right\n if x0 < 3:\n new_position = Position(self.tiles)\n temp = new_position.get_tile(y0,x0+1)\n new_position.set_tile(y0,x0+1,0)\n new_position.set_tile(y0,x0,temp)\n new_position.directiontomoveto = 'r'\n neighbor_list.append(new_position)\n # move 0 to the left\n if x0 > 0:\n new_position = Position(self.tiles)\n temp = new_position.get_tile(y0,x0-1)\n new_position.set_tile(y0,x0-1,0)\n new_position.set_tile(y0,x0,temp)\n new_position.directiontomoveto = 'l'\n neighbor_list.append(new_position)\n # move 0 up\n if y0 > 0:\n new_position = Position(self.tiles)\n temp = new_position.get_tile(y0-1,x0)\n new_position.set_tile(y0-1,x0,0)\n new_position.set_tile(y0,x0,temp)\n new_position.directiontomoveto = 'u'\n neighbor_list.append(new_position)\n # move 0 down\n if y0 < 3:\n new_position = Position(self.tiles)\n temp = new_position.get_tile(y0+1,x0)\n new_position.set_tile(y0+1,x0,0)\n new_position.set_tile(y0,x0,temp)\n new_position.directiontomoveto = 'd'\n neighbor_list.append(new_position)\n \n return neighbor_list",
"def get_neighbours(self, grid):\n\t\tfor diff in ((-1, 0), (1, 0), (0, -1), (0, 1)):\n\t\t\tres = Vector((self.row, self.col)) + diff\n\t\t\tif res[0] >= 0 and res[1] >= 0 and res[0] < len(grid) and res[1] < len(grid[0]):\n\t\t\t\tyield grid[res[0]][res[1]]",
"def get_neighbours(self, row, col):\n neighbour_location_diffs = [(-1, -1),\n ( 0, -1),\n ( 1, -1),\n ( 1, 0),\n ( 1, 1),\n ( 0, 1),\n (-1, 1),\n (-1, 0)]\n neighbours = []\n for diff in neighbour_location_diffs:\n if (row + diff[0] >= 0 and\n row + diff[0] < self.height and\n col + diff[1] >= 0 and\n col + diff[1] < self.width):\n neighbours.append(self.cells[row + diff[0]][col + diff[1]])\n return neighbours",
"def get_neighbours(self, cell: Position) -> Iterable[Position]:\n x, y = cell\n\n return [\n (x - 1, y - 1), (x, y - 1), (x + 1, y - 1),\n (x - 1, y), (x + 1, y),\n (x - 1, y + 1), (x, y + 1), (x + 1, y + 1),\n ]",
"def iter_grid_tiles(self):\n all_points = self.grid[0].union(self.grid[1], self.grid[2], {self.position})\n min_x = min(p.x for p in all_points)\n min_y = min(p.y for p in all_points)\n\n if min_x < 0:\n xoffset = -min_x\n elif min_x == 0:\n xoffset = 0\n elif min_x > 0:\n xoffset = min_x\n if min_y < 0:\n yoffset = -min_y\n elif min_y == 0:\n yoffset = 0\n elif min_y > 0:\n yoffset = min_y\n origin = Point(0 + xoffset, 0 + yoffset)\n position = Point(self.position.x + xoffset, self.position.y + yoffset)\n for tile_type in (0, 1, 2):\n for point in self.grid[tile_type]:\n newpoint = Point(point.x + xoffset, point.y + yoffset)\n if newpoint not in (origin, position):\n yield newpoint.x, newpoint.y, tile_type\n yield origin.x, origin.y , 4\n yield position.x, position.y, 3",
"def get_tiles(self):\n\n tiles = []\n for x in range(self.position[0],\n self.position[0] + CAR_LENGTH if self.is_horizontal else self.position[0] + CAR_WIDTH):\n for y in range(self.position[1],\n self.position[1] + CAR_WIDTH if self.is_horizontal else self.position[1] + CAR_LENGTH):\n tiles.append((x, y))\n\n return tiles",
"def get_neighbors(self) -> List['games.saloon.tile.Tile']:\n neighbors = []\n\n for direction in Tile.directions:\n neighbor = getattr(self, \"tile_\" + direction.lower())\n if neighbor:\n neighbors.append(neighbor)\n\n return neighbors",
"def get_all_neighbor_coords(tiles):\n return [add(tile, neighbor) for tile in tiles for neighbor in NEIGHBORS]",
"def findNeighbours(self):\n neighbours = []\n\n for i in range(self.xCoordinate - 1, self.xCoordinate + 2):\n for j in range(self.yCoordinate - 1, self.yCoordinate + 2):\n if (not (i == self.xCoordinate and j == self.yCoordinate)) and (0 <= i <= 394 and 0 <= j <= 499):\n neighbours.append(PixelPosition(i, j))\n\n return neighbours",
"def getNeighbors(self, row, col):\n neighbors = []\n for deltaRow in range(-1, 2):\n for deltaCol in range(-1, 2):\n if not (deltaRow == 0 and deltaCol == 0) and self.inBoard(row + deltaRow, col + deltaCol):\n neighbors += [(row + deltaRow, col + deltaCol)]\n return neighbors",
"def get_cells(self):\n self.list = [self.origin]\n\n for i in range(1, self.size):\n if(self.direction ==self.direction.RIGHT):\n self.list.append((self.origin[0], self.origin[1]+i))\n elif(self.direction ==self.direction.DOWN):\n self.list.append((self.origin[0]-i, self.origin[1]))\n\n return self.list",
"def neighbors(self, cell):\n x = cell.x\n y = cell.y\n for new_x, new_y in [(x, y - 1), (x, y + 1), (x - 1, y), (x + 1, y)]:\n neighbor = self[new_x, new_y]\n if neighbor is not None:\n yield neighbor",
"def eight_neighbors(self, row, col):\n ans = []\n if row > 0:\n ans.append((row - 1, col))\n if row < self._grid_height - 1:\n ans.append((row + 1, col))\n if col > 0:\n ans.append((row, col - 1))\n if col < self._grid_width - 1:\n ans.append((row, col + 1))\n if (row > 0) and (col > 0):\n ans.append((row - 1, col - 1))\n if (row > 0) and (col < self._grid_width - 1):\n ans.append((row - 1, col + 1))\n if (row < self._grid_height - 1) and (col > 0):\n ans.append((row + 1, col - 1))\n if (row < self._grid_height - 1) and (col < self._grid_width - 1):\n ans.append((row + 1, col + 1))\n return ans",
"def get_neighbors(self):\n return list(map(self.game.square, [self.position - self.game.rules[\"row_len\"], self.position + 1, self.position + self.game.rules[\"row_len\"], self.position - 1]))",
"def enumerate_tiles(self):\n # Iterates through entire game board.\n for row in range(self.rows):\n for col in range(self.cols):\n\n # Doesn't count mines adjacent to mine tiles.\n if self.board[row][col].category == Tiles.mine:\n continue\n mines = 0\n\n # Calculates number of mines surrounding each tile.\n for i in [row-1, row, row+1]:\n for j in [col-1, col, col+1]:\n if (self.valid_tile(i, j) and self.board[i][j].category == Tiles.mine):\n mines += 1\n \n # Sets each game board tile's mine proximity number.\n self.board[row][col] = Tiles(row, col, str(mines))",
"def find_open_tiles(self, arena, units):\r\n tiles = []\r\n for x, y in [(self.x+1, self.y), (self.x, self.y+1), (self.x-1, self.y), (self.x, self.y-1)]:\r\n if arena[x][y] == '.':\r\n tiles.append((x, y))\r\n return tiles",
"def cell_neighbours(self, x, y):\n if self.maze_map[y][x]:\n return set()\n neighbours = set()\n for (direction, ((i, j), dummy)) in MazeGraph.DIRECTIONS.items():\n xi, yj = (x + i) % self.width, (y + j) % self.height\n if not self.maze_map[yj][xi]:\n neighbours.add((direction, (xi, yj)))\n return neighbours",
"def findImmediateNeighbours(self):\n immediateNeighbours = []\n\n if self.xCoordinate - 1 > 0:\n immediateNeighbours.append(PixelPosition(self.xCoordinate - 1, self.yCoordinate))\n\n if self.xCoordinate + 1 < 395:\n immediateNeighbours.append(PixelPosition(self.xCoordinate + 1, self.yCoordinate))\n\n if self.yCoordinate + 1 < 500:\n immediateNeighbours.append(PixelPosition(self.xCoordinate, self.yCoordinate + 1))\n\n if self.yCoordinate - 1 > 0:\n immediateNeighbours.append(PixelPosition(self.xCoordinate, self.yCoordinate - 1))\n\n return immediateNeighbours",
"def find_tiles(self):\n lat1, lat2 = self.bbox.south, self.bbox.north\n lon1, lon2 = self.bbox.west, self.bbox.east\n # convert to geographic bounding box\n minlat, minlon = min(lat1, lat2), min(lon1, lon2)\n maxlat, maxlon = max(lat1, lat2), max(lon1, lon2)\n\n # convert to tile-space bounding box\n _, xmin, ymin = self.mercator(maxlat, minlon, self.zoom)\n _, xmax, ymax = self.mercator(minlat, maxlon, self.zoom)\n\n # generate a list of tiles\n xs, ys = range(xmin, xmax + 1), range(ymin, ymax + 1)\n tile_list = [(self.zoom, x, y) for (y, x) in product(ys, xs)]\n\n return tile_list",
"def generate_nearby_cells(self):\n for y in range(len(self.island_map)):\n for x in range(len(self.island_map[y])):\n list_of_nearby_cells = []\n\n if y != 0:\n self.generate_cell_above(x, y, list_of_nearby_cells)\n\n if x != 0:\n self.generate_cell_left(x, y, list_of_nearby_cells)\n\n if y != len(self.island_map)-1:\n self.generate_cell_below(x, y, list_of_nearby_cells)\n\n if x != len(self.island_map[y])-1:\n self.generate_cell_right(x, y, list_of_nearby_cells)\n\n self.island_map[y][x].nearby_cells = list_of_nearby_cells",
"def traversed_list(self, start_cell, direction):\n lst = []\n if direction == UP or direction == DOWN:\n for step in range(self._grid_height):\n row = start_cell[0] + step * OFFSETS[direction][0]\n col = start_cell[1] + step * OFFSETS[direction][1]\n lst.append(self.get_tile(row, col))\n else:\n for step in range(self._grid_width):\n row = start_cell[0] + step * OFFSETS[direction][0]\n col = start_cell[1] + step * OFFSETS[direction][1]\n lst.append(self.get_tile(row, col))\n return lst",
"def get_neighbours(self, coords):\n\n\t dxdy = [(-1,-2),(0,-2),(1,-2),(-2,-1),(-1,-1),(0,-1),(1,-1),(2,-1),\n\t (-2,0),(-1,0),(1,0),(2,0),(-2,1),(-1,1),(0,1),(1,1),(2,1),\n\t (-1,2),(0,2),(1,2),(0,0)]\n\t neighbours = []\n\t for dx, dy in dxdy:\n\t neighbour_coords = coords[0] + dx, coords[1] + dy\n\t if not (0 <= neighbour_coords[0] < self.nx and\n\t 0 <= neighbour_coords[1] < self.ny):\n\t # We're off the grid: no neighbours here.\n\t continue\n\t neighbour_cell = self.cells[neighbour_coords]\n\t if neighbour_cell is not None:\n\t # This cell is occupied: store this index of the contained point.\n\t neighbours.append(neighbour_cell)\n\t return neighbours",
"def get_further_neighbours(self, cell):\n\t\tneighs = self.get_neighbours(cell)\n\t\ti, j = cell.find_id()\n\t\tneighbours = []\n\t\tfor neigh in neighs:\n\t\t\tx, y = neigh.find_id()\n\t\t\tif abs(x-i)+abs(y-j) > 1 or abs(x-i)+abs(y-j) == 0: \n\t\t\t\tneighbours.append(self.space[y,x])\n\t\treturn neighbours",
"def list_tiles_covering_land(self):\n\n land_tiles = Equi7Grid._static_data[self.core.tag][\"coverland\"][\n self.core.tiletype]\n return list(land_tiles)",
"def neighbors(self, include_water=False):\n cells = []\n for delta in DIRECTION_DELTAS.values():\n row = self.location[ROW_INDEX] + delta[ROW_INDEX]\n col = self.location[COL_INDEX] + delta[COL_INDEX]\n if (col >= 0) and (col < self.world.get_dim()[0]) and (row >= 0) and (row < self.world.get_dim()[1]):\n cell = self.world.get_cell(row, col)\n if include_water or cell.get_water_level() == 0:\n cells.append(cell)\n return cells",
"def all_chebyshev_neighbours(self):\n return [Point(self.x - 1, self.y - 1),\n Point(self.x - 1, self.y),\n Point(self.x - 1, self.y + 1),\n Point(self.x, self.y - 1),\n Point(self.x, self.y + 1),\n Point(self.x + 1, self.y - 1),\n Point(self.x + 1, self.y),\n Point(self.x + 1, self.y + 1)]",
"def get_neighbours(x, y, board):\n return [get_left(x, y, board), get_upper(x, y, board), get_right(x, y, board), get_lower(x, y, board)]"
] | [
"0.72973114",
"0.71875554",
"0.7148474",
"0.71376336",
"0.705874",
"0.6981794",
"0.69214076",
"0.68788856",
"0.68238246",
"0.68225366",
"0.67235833",
"0.6686271",
"0.6669075",
"0.6657436",
"0.6635436",
"0.6622865",
"0.65752494",
"0.6510529",
"0.6490984",
"0.6426009",
"0.6418801",
"0.641845",
"0.6415256",
"0.6405736",
"0.6397115",
"0.6375212",
"0.63437825",
"0.63291955",
"0.63129056",
"0.6296409"
] | 0.80199665 | 0 |
Changes the image on the clicked Tile accordingly i.e. if Tile not isFlagged() Changes the Smiley button's image so it animates on clicks also calls setUpBombs() if this is the first left click of the game | def pressTile(self, event):
clickedTile = event.widget
if clickedTile.isInPlay(): self.changeSmile(2)
if not clickedTile.isFlagged():
clickedTile.buttonPress()
if not self.minesArmed and event.num == 1:
self.setUpBombs(event) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def buttonPress(self):\n if self.inPlay and not self.shown:\n self.configure(image = Tile.images[0])",
"def update_image(self):\n if self.updated_sprite_list:\n self.image = self.increment_sprite_index(True)\n self.updated_sprite_list = False\n self.update_count_down = self.update_frames\n self.redraw = True\n elif self.update_frames == 0:\n return\n elif self.update_count_down == 0:\n if self.sprite_index == 2:\n self.remove_action(Action.crouch_attack)\n self.image = self.increment_sprite_index()\n self.update_count_down = self.update_frames\n self.redraw = True\n else:\n self.update_count_down -= 1",
"def pressAdjTiles(self, event):\n clickedTile = event.widget\n if clickedTile.isInPlay(): self.changeSmile(2)\n for adjTile in self.getAdjacentTiles(clickedTile.row, clickedTile.col):\n if not adjTile.isFlagged(): adjTile.buttonPress()",
"def update_imgs(self):\n\n for b in self.gamebuttons:\n b.update_img()\n self.start_but.update_img()",
"def toggleFlag(self, event): \n clicked = event.widget\n if clicked.isInPlay(): self.changeSmile(1)\n value = clicked.setFlag()\n for adjTile in self.getAdjacentTiles(clicked.row, clicked.col):\n adjTile.numFlags += value\n self.numFlags += value\n self.flagLabel.configure(text=\"Flags: \"+str(self.numFlags))",
"def revealBombs(self, win):\n for row in self.tiles:\n for tile in row:\n tile.inPlay = False\n if tile.isMine():\n if win:\n #flag non-flagged mines after winning\n if not tile.isFlagged():\n tile.configure(image=Tile.images[11])\n self.numFlags += 1\n else:\n #show unexploded mines after losing \n if not tile.isShown():\n tile.configure(image=Tile.images[9])\n #if incorrectly flagged, mark as such \n elif tile.isFlagged():\n tile.configure(image=Tile.images[12])",
"def changeSmile(self, num, event=None):\n self.smileButton.configure(image=self.images[num])",
"def change_image(self):\n image_lst = [\"images/hangman01.png\",\n \"images/hangman02.png\",\n \"images/hangman03.png\",\n \"images/hangman04.png\",\n \"images/hangman05.png\",\n \"images/hangman06.png\",\n \"images/hangman07.png\"]\n self.strikes = self.strikes % len(image_lst)\n self.photo = PhotoImage(file=image_lst[self.strikes])\n self.canvas.create_image(340, 240, image=self.photo)",
"def showTile(self, event):\n clicked = event.widget\n if clicked.isInPlay():\n self.changeSmile(1)\n returned = clicked.show()\n if returned == 1 and clicked.isZero():\n returned += self.cascadeShow(clicked)\n self.checkEnd(returned)",
"def setFlag(self):\n if self.inPlay and not self.shown:\n self.flag = not(self.flag)\n image_index = 11 if self.flag else 10\n self.configure(image = Tile.images[image_index])\n return 1 if self.flag else -1\n return 0",
"def replay(self, event=None):\n #reset relevant variables\n self.numChecked = 0\n self.numFlags = 0\n self.minesArmed = False\n self.startTime = None\n\n #reset labels\n self.mineLabel.configure(text=\"Mines: \"+str(self.numMines))\n self.smileButton.configure(image=self.images[1])\n self.flagLabel.configure(text=\"Flags: \"+str(self.numFlags))\n\n #reset all tiles\n for row in self.tiles:\n for tile in row:\n tile.replay()",
"def set_eaten(self):\n self.state['return'] = True\n self.state['blue'] = False\n self.tile = (self.get_nearest_row(), self.get_nearest_col())\n self.return_path = Ghost.find_path(self.internal_map, self.tile, self.return_tile)\n self.direction = self.get_dir_from_path()\n self.image = self.score_font.render('200', True, (255, 255, 255))\n self.eaten_time = time.get_ticks()",
"def clickable(self, event):\n tile = self.canvas.find_closest(event.x, event.y)\n # check if tile is clickable, and already fill color\n if self.is_clickable(tile) and self.canvas.itemcget(tile, \"fill\") != \\\n self.color:\n self.num_clicks += 1\n cords = self.canvas.coords(tile)\n self.canvas.itemconfigure(tile, tag=\"selected\")\n self.pic.append(tkinter.PhotoImage(file=os.path.join(self.folder,\n self.new_list[\n tile[\n 0] - 1])))\n self.image_id.append(self.canvas.create_image(\n (cords[0] + cords[2]) / 2,\n (cords[1] + cords[3]) / 2,\n image=self.pic[-1]))\n self.click_tiles.append(tile)",
"def replay(self):\n self.shown = False\n self.mine = False\n self.flag = False\n self.inPlay = True\n self.count = 0\n self.numFlags = 0\n self.configure(image=Tile.images[10])",
"def grid_clicked(self, x, y):\n if self.__game.get_tile(x, y) is MarkerType.NONE:\n player = self.__game.get_player()\n next_player = self.__game.get_next_player()\n\n # Next move the positions are swapped\n self.__infobar.update_info(next_player, player)\n\n self.__tilegrid.set_tile_marker(x, y, player)\n self.__tilegrid.set_tile_color(x, y, Color.DARK_TONE)\n\n state, winner, loser, win_tiles = self.__game.make_move(x, y)\n # Display winner info if found\n if state is GameState.WINNER:\n self.__infobar.show_results(state, winner, loser)\n self.__tilegrid.highlight_tiles(win_tiles)\n self.__buttonbar.set_disabled(False)\n elif state is GameState.TIE:\n self.__infobar.show_results(state, None, None)\n self.__buttonbar.set_disabled(False)\n\n # Play sound according to the player\n if player is MarkerType.CROSS:\n winsound.PlaySound(\"sound/click_x.wav\", winsound.SND_ASYNC)\n else:\n winsound.PlaySound(\"sound/click_o.wav\", winsound.SND_ASYNC)\n else:\n self.__tilegrid.set_tile_color(x, y, Color.FAIL_COLOR)",
"def update_reset_button(self):\r\n if self.board.hovered_tiles and self.is_left_mouse_down:\r\n self.reset_button.draw_uhoh()\r\n else:\r\n self.reset_button.draw_smiley()",
"def update(self):\r\n if self.opportunity or 'key' in inventory:\r\n self.image = pygame.transform.scale(pygame.image.load_extended(\"images/greenPortal.png\").convert_alpha(),\r\n (50, 75))\r\n self.image.set_colorkey((255, 255, 255))\r\n elif not self.opportunity:\r\n self.image = pygame.transform.scale(pygame.image.load_extended(\"images/redPortal.png\").convert_alpha(),\r\n (50, 75))\r\n self.image.set_colorkey((255, 255, 255))",
"def handle_left_click(self):\n if not self.game_in_progress:\n return\n if self.first_click:\n self.first_click = False\n self.timer.start(1000)\n sender = self.sender()\n row = 0\n col = 0\n for row in range(self.rows):\n for col in range(self.cols):\n if self.button_array[row][col] == sender:\n break\n else:\n continue\n break\n # print 'Received left click:', row, ',', col\n celllist = self.board.opencell(row, col)\n if celllist == []:\n return\n for cell in celllist:\n row = cell[0]\n col = cell[1]\n cell_property = self.board.getcellproperty(row, col)\n if cell_property == CellProperty.Empty:\n self.button_array[row][col].setIcon(QtGui.QIcon(\"icons/OpenedSquare.png\"))\n elif cell_property == CellProperty.Mine:\n # Game over\n for row in range(self.rows):\n for col in range(self.cols):\n cell_property = self.board.getcellproperty(row, col)\n if cell_property == CellProperty.Mine:\n self.button_array[row][col].setIcon(QtGui.QIcon(\"icons/mine.ico\"))\n self.status_button.setIcon(QtGui.QIcon(\"icons/smiley3.ico\"))\n self.game_in_progress = False\n self.timer.stop()\n return\n elif cell_property == CellProperty.MineCountOne:\n self.button_array[row][col].setIcon(QtGui.QIcon(\"icons/1.png\"))\n elif cell_property == CellProperty.MineCountTwo:\n self.button_array[row][col].setIcon(QtGui.QIcon(\"icons/2.png\"))\n elif cell_property == CellProperty.MineCountThree:\n self.button_array[row][col].setIcon(QtGui.QIcon(\"icons/3.png\"))\n elif cell_property == CellProperty.MineCountFour:\n self.button_array[row][col].setIcon(QtGui.QIcon(\"icons/4.png\"))\n elif cell_property == CellProperty.MineCountFive:\n self.button_array[row][col].setIcon(QtGui.QIcon(\"icons/5.png\"))\n elif cell_property == CellProperty.MineCountSix:\n self.button_array[row][col].setIcon(QtGui.QIcon(\"icons/6.png\"))\n elif cell_property == CellProperty.MineCountSeven:\n self.button_array[row][col].setIcon(QtGui.QIcon(\"icons/7.png\"))\n elif cell_property == CellProperty.MineCountEight:\n self.button_array[row][col].setIcon(QtGui.QIcon(\"icons/8.png\"))\n\n game_status = self.board.continuegame()\n print 'Game Status:', game_status\n if game_status == GameStatus.GameWon:\n self.timer.stop()\n self.game_in_progress = False\n player_name = QtGui.QInputDialog.getText(self, \"Name Please !!\",\\\n \"Enter your name for leader board:\")\n # TODO: Replace 1 with the time taken by the end user.\n LeaderBoard.insertnewscore(CURRENT_GAME_LEVEL, player_name[0], self.time)\n self.status_button.setIcon(QtGui.QIcon(\"icons/smiley.ico\"))\n print \"You have won the game\"",
"def blitme(self):\n if not self.toggle_death:\n self.screen.blit(self.image, self.rect)\n else:\n self.screen.blit(self.score_image, self.score_rect)",
"def blit_me(self):\n self.game_over_img.blit_me()\n self.retry_button.blit_me()",
"def reveal(self, i, j):\n if not self.is_game_over:\n if not self.flags[i, j]:\n # Game is initialized after first click in order to prevent\n # the first click being straight over a mine\n if not self.is_initialized:\n self.initialize(i, j)\n\n self.update_revealed(i, j)\n self.revealed_img.set_data(self.revealed)\n self.flags_pts.set_data(*np.where(self.flags)[::-1])\n self.refresh_canvas()\n\n if np.count_nonzero(self.revealed) == self.n_not_mines:\n self.game_over(True)",
"def change_start_button(event):\n img_start_button_mouse_over = PhotoImage(\n file=r\"C:\\Users\\Owner\\PycharmProjects\\Module14\\buttons\\start_new_game_raised_active.png\")\n lbl_start_game.config(image=img_start_button_mouse_over)\n lbl_start_game.image = img_start_button_mouse_over\n lbl_start_game.grid(row=8, column=1, columnspan=8, pady=6)",
"def update(self, new_state):\n\n if self.update_animation:\n self.canvas.delete(self.agent)\n row, col = new_state\n x1 = col * self.GRID_ROW_HEIGHT\n y1 = row * self.GRID_ROW_HEIGHT\n self.agent = self.canvas.create_image(x1 + self.GRID_ROW_HEIGHT / 2, y1 + self.GRID_ROW_HEIGHT / 2,\n image=self.penguin)",
"def update(self):\n if self.value:\n self.image = self.rect2 \n else:\n self.image = self.rect1",
"def bone(self):\n root = tkinter.Toplevel()\n button = ttk.Button(root)\n photo = tkinter.PhotoImage(file='C:/Users/shepheam/RobotTeamProject/assets/images/dog_treats.gif')\n button.image = photo\n button.grid()\n button['command'] = lambda: print('Good boy!')",
"def click_cell(self, event):\n if (self.world_setable):\n x, y = event.x, event.y\n row = y / self.cell_size\n col = x / self.cell_size\n if ((row in range(self.cell_row)) and\n (col in range(self.cell_col))):\n status_now = not self.world_status.now[row, col]\n if (status_now):\n color = self.color_alive\n else:\n color = self.color_dead\n item_id = self.world[row, col]\n self.canvas.itemconfig(item_id, fill=color)\n self.world_status.now[row, col] = status_now\n self.world_status.next = self.world_status.now.copy()\n self.init_world = self.world_status.now.copy()",
"def _update_image(self):\n button = self.buttons.checkedButton()\n if button is None:\n return\n\n button.click()",
"def showAdjTiles(self,event):\n clicked = event.widget\n if clicked.isInPlay():\n self.changeSmile(1)\n #if tile is Safe, reveal adjacent tiles and cascade if needed\n if clicked.isSafe():\n returned = 0\n for adjTile in self.getAdjacentTiles(clicked.row, clicked.col):\n value = adjTile.show()\n if value == 1 and adjTile.isZero():\n value += self.cascadeShow(adjTile)\n returned = -1 if (value == -1 or returned == -1) else value+returned\n self.checkEnd(returned)\n #if unsafe, return adjacent buttons to unpressed images\n else:\n for adjTile in self.getAdjacentTiles(clicked.row, clicked.col):\n if not adjTile.isFlagged() and not adjTile.isShown():\n adjTile.configure(image=Tile.images[10])",
"def change_back_start_button(event):\n img_start_button_mouse_over = PhotoImage(\n file=r\"C:\\Users\\Owner\\PycharmProjects\\Module14\\buttons\\start_new_game_raised_normal.png\")\n lbl_start_game.config(image=img_start_button_mouse_over)\n lbl_start_game.image = img_start_button_mouse_over\n lbl_start_game.grid(row=8, column=1, columnspan=8, pady=6)",
"def ballchange(self):\r\n self.picture+=1\r\n self.image=pong2.bballs[self.picture]\r\n if self.image==pong2.zeus:\r\n wow=games.Message(value=\"YOU NEED TO GET A LIFE!!!\", size=75, color=color.white, left=5, top=5, lifetime=10*games.screen.fps, after_death=None, is_collideable=False)\r\n games.screen.add(wow)"
] | [
"0.66302764",
"0.62137914",
"0.62118137",
"0.62015975",
"0.61229986",
"0.6108913",
"0.6084819",
"0.60612625",
"0.60522854",
"0.60503805",
"0.6035717",
"0.6031627",
"0.60065144",
"0.6004971",
"0.5978538",
"0.5958254",
"0.5944373",
"0.5938037",
"0.59312075",
"0.5902276",
"0.5889518",
"0.5888267",
"0.588544",
"0.5881061",
"0.586779",
"0.58517843",
"0.5842759",
"0.58179504",
"0.5815789",
"0.5779331"
] | 0.72606826 | 0 |
Calls setFlag() on the Tile that was right clicked Updates the self.numFlags counter accordingly and updates label on the board Reverts Smiley button's image to default smile to animate on clicks | def toggleFlag(self, event):
clicked = event.widget
if clicked.isInPlay(): self.changeSmile(1)
value = clicked.setFlag()
for adjTile in self.getAdjacentTiles(clicked.row, clicked.col):
adjTile.numFlags += value
self.numFlags += value
self.flagLabel.configure(text="Flags: "+str(self.numFlags)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def pressTile(self, event):\n clickedTile = event.widget\n if clickedTile.isInPlay(): self.changeSmile(2)\n if not clickedTile.isFlagged():\n clickedTile.buttonPress()\n if not self.minesArmed and event.num == 1:\n self.setUpBombs(event)",
"def flag(self, i, j):\n # Does not allow starting a game with a flag\n if not self.is_game_over and self.is_initialized:\n if not self.revealed[i, j]:\n self.flags[i, j] = not self.flags[i, j]\n self.flags_pts.set_data(*np.where(self.flags)[::-1])\n self.title_txt.set_text('{}/{}'.format(np.count_nonzero(self.flags), self.n_mines))\n self.refresh_canvas()",
"def replay(self, event=None):\n #reset relevant variables\n self.numChecked = 0\n self.numFlags = 0\n self.minesArmed = False\n self.startTime = None\n\n #reset labels\n self.mineLabel.configure(text=\"Mines: \"+str(self.numMines))\n self.smileButton.configure(image=self.images[1])\n self.flagLabel.configure(text=\"Flags: \"+str(self.numFlags))\n\n #reset all tiles\n for row in self.tiles:\n for tile in row:\n tile.replay()",
"def setFlag(self):\n if self.inPlay and not self.shown:\n self.flag = not(self.flag)\n image_index = 11 if self.flag else 10\n self.configure(image = Tile.images[image_index])\n return 1 if self.flag else -1\n return 0",
"def pressAdjTiles(self, event):\n clickedTile = event.widget\n if clickedTile.isInPlay(): self.changeSmile(2)\n for adjTile in self.getAdjacentTiles(clickedTile.row, clickedTile.col):\n if not adjTile.isFlagged(): adjTile.buttonPress()",
"def handle_right_click(self):\n if not self.game_in_progress:\n return\n if self.first_click:\n self.first_click = False\n self.timer.start(1000)\n sender = self.sender()\n row = 0\n col = 0\n for row in range(self.rows):\n for col in range(self.cols):\n if self.button_array[row][col] == sender:\n break\n else:\n continue\n break\n # print 'Received right click:', row, ',', col\n status = self.board.getcellstatus(row, col)\n if status == CellStatus.Opened:\n return\n elif status == CellStatus.Closed:\n self.remainingminecount = self.remainingminecount - 1\n self.mines_lcd.display(str(self.remainingminecount))\n self.board.setcellstatus(row, col, CellStatus.MarkedAsMine)\n self.button_array[row][col].setIcon(QtGui.QIcon(\"icons/Flag.png\"))\n elif status == CellStatus.MarkedAsMine:\n self.remainingminecount = self.remainingminecount + 1\n self.mines_lcd.display(str(self.remainingminecount))\n self.board.setcellstatus(row, col, CellStatus.MarkedAsSuspectedMine)\n self.button_array[row][col].setIcon(QtGui.QIcon(\"icons/questionmark.png\"))\n elif status == CellStatus.MarkedAsSuspectedMine:\n self.board.setcellstatus(row, col, CellStatus.Closed)\n self.button_array[row][col].setIcon(QtGui.QIcon(\"icons/unopenedsquare.png\"))",
"def replay(self):\n self.shown = False\n self.mine = False\n self.flag = False\n self.inPlay = True\n self.count = 0\n self.numFlags = 0\n self.configure(image=Tile.images[10])",
"def showTile(self, event):\n clicked = event.widget\n if clicked.isInPlay():\n self.changeSmile(1)\n returned = clicked.show()\n if returned == 1 and clicked.isZero():\n returned += self.cascadeShow(clicked)\n self.checkEnd(returned)",
"def flag_cell(self, event):\n if self.mineboard.gamestate is None:\n x = (event.x-2) // CELLWIDTH\n y = (event.y-2) // CELLWIDTH\n self.mineboard.flag_cell(y, x)\n self.update_cells()\n mines_rem = self.mineboard.minecount - self.mineboard.flagcount\n # updates the mines_left label\n if mines_rem == 1:\n self.mines_left.set(f\"{mines_rem} mine left\")\n else:\n self.mines_left.set(f\"{mines_rem} mines left\")",
"def update_reset_button(self):\r\n if self.board.hovered_tiles and self.is_left_mouse_down:\r\n self.reset_button.draw_uhoh()\r\n else:\r\n self.reset_button.draw_smiley()",
"def showAdjTiles(self,event):\n clicked = event.widget\n if clicked.isInPlay():\n self.changeSmile(1)\n #if tile is Safe, reveal adjacent tiles and cascade if needed\n if clicked.isSafe():\n returned = 0\n for adjTile in self.getAdjacentTiles(clicked.row, clicked.col):\n value = adjTile.show()\n if value == 1 and adjTile.isZero():\n value += self.cascadeShow(adjTile)\n returned = -1 if (value == -1 or returned == -1) else value+returned\n self.checkEnd(returned)\n #if unsafe, return adjacent buttons to unpressed images\n else:\n for adjTile in self.getAdjacentTiles(clicked.row, clicked.col):\n if not adjTile.isFlagged() and not adjTile.isShown():\n adjTile.configure(image=Tile.images[10])",
"def handle_left_click(self):\n if not self.game_in_progress:\n return\n if self.first_click:\n self.first_click = False\n self.timer.start(1000)\n sender = self.sender()\n row = 0\n col = 0\n for row in range(self.rows):\n for col in range(self.cols):\n if self.button_array[row][col] == sender:\n break\n else:\n continue\n break\n # print 'Received left click:', row, ',', col\n celllist = self.board.opencell(row, col)\n if celllist == []:\n return\n for cell in celllist:\n row = cell[0]\n col = cell[1]\n cell_property = self.board.getcellproperty(row, col)\n if cell_property == CellProperty.Empty:\n self.button_array[row][col].setIcon(QtGui.QIcon(\"icons/OpenedSquare.png\"))\n elif cell_property == CellProperty.Mine:\n # Game over\n for row in range(self.rows):\n for col in range(self.cols):\n cell_property = self.board.getcellproperty(row, col)\n if cell_property == CellProperty.Mine:\n self.button_array[row][col].setIcon(QtGui.QIcon(\"icons/mine.ico\"))\n self.status_button.setIcon(QtGui.QIcon(\"icons/smiley3.ico\"))\n self.game_in_progress = False\n self.timer.stop()\n return\n elif cell_property == CellProperty.MineCountOne:\n self.button_array[row][col].setIcon(QtGui.QIcon(\"icons/1.png\"))\n elif cell_property == CellProperty.MineCountTwo:\n self.button_array[row][col].setIcon(QtGui.QIcon(\"icons/2.png\"))\n elif cell_property == CellProperty.MineCountThree:\n self.button_array[row][col].setIcon(QtGui.QIcon(\"icons/3.png\"))\n elif cell_property == CellProperty.MineCountFour:\n self.button_array[row][col].setIcon(QtGui.QIcon(\"icons/4.png\"))\n elif cell_property == CellProperty.MineCountFive:\n self.button_array[row][col].setIcon(QtGui.QIcon(\"icons/5.png\"))\n elif cell_property == CellProperty.MineCountSix:\n self.button_array[row][col].setIcon(QtGui.QIcon(\"icons/6.png\"))\n elif cell_property == CellProperty.MineCountSeven:\n self.button_array[row][col].setIcon(QtGui.QIcon(\"icons/7.png\"))\n elif cell_property == CellProperty.MineCountEight:\n self.button_array[row][col].setIcon(QtGui.QIcon(\"icons/8.png\"))\n\n game_status = self.board.continuegame()\n print 'Game Status:', game_status\n if game_status == GameStatus.GameWon:\n self.timer.stop()\n self.game_in_progress = False\n player_name = QtGui.QInputDialog.getText(self, \"Name Please !!\",\\\n \"Enter your name for leader board:\")\n # TODO: Replace 1 with the time taken by the end user.\n LeaderBoard.insertnewscore(CURRENT_GAME_LEVEL, player_name[0], self.time)\n self.status_button.setIcon(QtGui.QIcon(\"icons/smiley.ico\"))\n print \"You have won the game\"",
"def flag(self, y, x):\n if self.table_state[y][x] == '-':\n self.table_state[y][x] = Minesweeper.FLAG\n Minesweeper.print_table(self.table_state)",
"def on_toggle_click(self, change):\n change = change[\"new\"]\n if change == \"Good\":\n self.set_mask_good()\n elif change == \"Bad\":\n self.set_mask_bad()\n elif change == \"Continuum\":\n self.set_mask_continuum()\n elif change == \"Line\":\n self.set_mask_line()",
"def reveal(self, i, j):\n if not self.is_game_over:\n if not self.flags[i, j]:\n # Game is initialized after first click in order to prevent\n # the first click being straight over a mine\n if not self.is_initialized:\n self.initialize(i, j)\n\n self.update_revealed(i, j)\n self.revealed_img.set_data(self.revealed)\n self.flags_pts.set_data(*np.where(self.flags)[::-1])\n self.refresh_canvas()\n\n if np.count_nonzero(self.revealed) == self.n_not_mines:\n self.game_over(True)",
"def changeTile (self, posY, posX, tile=\"t\"):\r\n self.grid[posY][posX] = tile",
"def buttonPress(self):\n if self.inPlay and not self.shown:\n self.configure(image = Tile.images[0])",
"def changeSmile(self, num, event=None):\n self.smileButton.configure(image=self.images[num])",
"def _right_click(self, event):\n\n position = event.x, event.y\n cell_position = self._game.grid.pixel_to_cell(position)\n\n removed_tower = self._game.remove(cell_position)\n self._coins += removed_tower.get_value() * 0.8\n\n #updates coins string var to display coins\n self._status_bar.set_coins(self._coins)\n\n #update availability for tower views\n for tower, view in self._tower_views:\n if self._coins < tower.get_value():\n view.set_available(False)\n else: \n view.set_available(True)",
"def flagCell(self, row, col):\n self.flagged[row, col] = 1",
"def set_eaten(self):\n self.state['return'] = True\n self.state['blue'] = False\n self.tile = (self.get_nearest_row(), self.get_nearest_col())\n self.return_path = Ghost.find_path(self.internal_map, self.tile, self.return_tile)\n self.direction = self.get_dir_from_path()\n self.image = self.score_font.render('200', True, (255, 255, 255))\n self.eaten_time = time.get_ticks()",
"def buildIcon(self):\n\n cmds.iconTextCheckBox(image=self.icon, style=\"iconOnly\", label=self.name, height=200, width=200, dragCallback=lambda *x: self.iconDrag(*x),\n ann=\"Middle mouse click and drag to perspective view.\")",
"def grid_clicked(self, x, y):\n if self.__game.get_tile(x, y) is MarkerType.NONE:\n player = self.__game.get_player()\n next_player = self.__game.get_next_player()\n\n # Next move the positions are swapped\n self.__infobar.update_info(next_player, player)\n\n self.__tilegrid.set_tile_marker(x, y, player)\n self.__tilegrid.set_tile_color(x, y, Color.DARK_TONE)\n\n state, winner, loser, win_tiles = self.__game.make_move(x, y)\n # Display winner info if found\n if state is GameState.WINNER:\n self.__infobar.show_results(state, winner, loser)\n self.__tilegrid.highlight_tiles(win_tiles)\n self.__buttonbar.set_disabled(False)\n elif state is GameState.TIE:\n self.__infobar.show_results(state, None, None)\n self.__buttonbar.set_disabled(False)\n\n # Play sound according to the player\n if player is MarkerType.CROSS:\n winsound.PlaySound(\"sound/click_x.wav\", winsound.SND_ASYNC)\n else:\n winsound.PlaySound(\"sound/click_o.wav\", winsound.SND_ASYNC)\n else:\n self.__tilegrid.set_tile_color(x, y, Color.FAIL_COLOR)",
"def shortcut_click(self, event):\r\n\r\n tile = self.board.get_event_tile(event.pos)\r\n\r\n if not self.is_new_game and not self.is_game_over and tile is not None:\r\n self.update_reset_button()\r\n tile_reveal_result = self.board.left_click_up(tile, is_shortcut_click=True)\r\n self.process_tile_reveal(tile_reveal_result)",
"def __init__(self, rows, cols, mines):\n tk.Tk.__init__(self)\n \n #load all needed images into Tile.images\n for i in range(14):\n Tile.images.append(tk.PhotoImage(file = \"images/tile-\"+str(i)+\".gif\"))\n \n self.menu = tk.Menu(self)\n self.configure(menu=self.menu)\n self.title(\"Minesweeper\")\n self.myBoard = Board(rows, cols, mines, self)\n self.menuVar = tk.IntVar(self)\n self.menuVar.set(1)\n self.checkVar = tk.IntVar(self)\n self.checkVar.set(1)\n self.gamemenu = tk.Menu(self.menu, tearoff = False)\n self.menu.add_cascade(label=\"Game\", menu=self.gamemenu)\n self.gamemenu.add_command(label=\"New Game\", command=self.myBoard.replay)\n self.gamemenu.add_separator()\n self.gamemenu.add_radiobutton(variable = self.menuVar, value=1, label=\"Beginner\", command=lambda: self.resize(8,8,10))\n self.gamemenu.add_radiobutton(variable = self.menuVar, value=2, label=\"Intermediate\", command=lambda: self.resize(16,16,40))\n self.gamemenu.add_radiobutton(variable = self.menuVar, value=3, label=\"Expert\", command=lambda: self.resize(16,30,99))\n self.gamemenu.add_separator()\n self.gamemenu.add_checkbutton(variable = self.checkVar, onvalue=4, offvalue=0, label=\"Custom\", command= self.options)\n self.gamemenu.add_separator()\n self.gamemenu.add_command(label=\"Exit\", command=self.exitGame)\n windowWidth = str(20*cols+40)\n windowHeight = str(20*rows+60)\n self.protocol(\"WM_DELETE_WINDOW\", self.exitGame)\n self.minsize(windowWidth, windowHeight)\n self.maxsize(windowWidth, windowHeight)\n self.geometry(windowWidth+'x'+windowHeight)\n self.mainloop()",
"def __init__(self,size,tilelist,buttonflag):\n\n # Initialize the screen class\n BaseScreen.__init__(self,size)\n\n # Create the list of tile objects and draw them on the screen\n self.tilelist = tilelist\n xlen = self.tilelist[0][0].image.get_width()\n ylen = self.tilelist[0][0].image.get_height()\n for x in range(0,size[0],xlen):\n for y in range(0,size[1],ylen):\n try:\n self.image.blit(self.tilelist[x // xlen][y // ylen].image,(x,y))\n self.tilelist[x // xlen][y // ylen].set_position((x,y))\n except:\n pass\n\n # Set up an empty button list and the buttonflag\n self.buttonlist = []\n self.buttonflag = buttonflag",
"def switch(self, tile):\n self.tiles[self.tiles.index(tile)], self.opentile, self.prev = self.opentile, tile, self.opentile\n self.nb_move += 1",
"def new_tile(self):\n # replace with your code\n pass",
"def click_cell(self, event):\n if (self.world_setable):\n x, y = event.x, event.y\n row = y / self.cell_size\n col = x / self.cell_size\n if ((row in range(self.cell_row)) and\n (col in range(self.cell_col))):\n status_now = not self.world_status.now[row, col]\n if (status_now):\n color = self.color_alive\n else:\n color = self.color_dead\n item_id = self.world[row, col]\n self.canvas.itemconfig(item_id, fill=color)\n self.world_status.now[row, col] = status_now\n self.world_status.next = self.world_status.now.copy()\n self.init_world = self.world_status.now.copy()",
"def right_mouse_down_handler(self, event):\r\n\r\n self.is_right_mouse_down = True\r\n\r\n tile = self.board.get_event_tile(event.pos)\r\n if not self.is_new_game and not self.is_game_over and tile is not None:\r\n if not self.is_left_mouse_down:\r\n change_in_unflagged_mines = tile.toggle_flag()\r\n self.mine_counter.update(change_in_unflagged_mines)\r\n self.board.update_tile_hover(tile, self.is_left_mouse_down, self.is_right_mouse_down)"
] | [
"0.6642763",
"0.6635976",
"0.6471024",
"0.6449346",
"0.64119965",
"0.63664156",
"0.6306046",
"0.62559724",
"0.6244577",
"0.62244415",
"0.61597365",
"0.61555517",
"0.6153825",
"0.6059769",
"0.6005872",
"0.589099",
"0.58645517",
"0.58640367",
"0.58401346",
"0.5822118",
"0.5819733",
"0.5770579",
"0.57393277",
"0.5701726",
"0.57004005",
"0.5698711",
"0.5686647",
"0.56536233",
"0.56445843",
"0.5623273"
] | 0.7922562 | 0 |
Changes the image on the adjacent Tiles to be clicked only if the adjacent Tile is not flagged. Changes Smiley button's image to animate on clicks | def pressAdjTiles(self, event):
clickedTile = event.widget
if clickedTile.isInPlay(): self.changeSmile(2)
for adjTile in self.getAdjacentTiles(clickedTile.row, clickedTile.col):
if not adjTile.isFlagged(): adjTile.buttonPress() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def showAdjTiles(self,event):\n clicked = event.widget\n if clicked.isInPlay():\n self.changeSmile(1)\n #if tile is Safe, reveal adjacent tiles and cascade if needed\n if clicked.isSafe():\n returned = 0\n for adjTile in self.getAdjacentTiles(clicked.row, clicked.col):\n value = adjTile.show()\n if value == 1 and adjTile.isZero():\n value += self.cascadeShow(adjTile)\n returned = -1 if (value == -1 or returned == -1) else value+returned\n self.checkEnd(returned)\n #if unsafe, return adjacent buttons to unpressed images\n else:\n for adjTile in self.getAdjacentTiles(clicked.row, clicked.col):\n if not adjTile.isFlagged() and not adjTile.isShown():\n adjTile.configure(image=Tile.images[10])",
"def clickable(self, event):\n tile = self.canvas.find_closest(event.x, event.y)\n # check if tile is clickable, and already fill color\n if self.is_clickable(tile) and self.canvas.itemcget(tile, \"fill\") != \\\n self.color:\n self.num_clicks += 1\n cords = self.canvas.coords(tile)\n self.canvas.itemconfigure(tile, tag=\"selected\")\n self.pic.append(tkinter.PhotoImage(file=os.path.join(self.folder,\n self.new_list[\n tile[\n 0] - 1])))\n self.image_id.append(self.canvas.create_image(\n (cords[0] + cords[2]) / 2,\n (cords[1] + cords[3]) / 2,\n image=self.pic[-1]))\n self.click_tiles.append(tile)",
"def pressTile(self, event):\n clickedTile = event.widget\n if clickedTile.isInPlay(): self.changeSmile(2)\n if not clickedTile.isFlagged():\n clickedTile.buttonPress()\n if not self.minesArmed and event.num == 1:\n self.setUpBombs(event)",
"def showTile(self, event):\n clicked = event.widget\n if clicked.isInPlay():\n self.changeSmile(1)\n returned = clicked.show()\n if returned == 1 and clicked.isZero():\n returned += self.cascadeShow(clicked)\n self.checkEnd(returned)",
"def delaround(i, j):\r\n global game_over, last_destroy\r\n # If the clicked tile is already revealed, do nothing.\r\n if tiles_cover[i, j].get_size() == (0, 0):\r\n return\r\n if tiles_cover[i, j] == tile_img_list[11]:\r\n return\r\n if tiles_cover[i, j] != tile_img_list[11]:\r\n tiles_cover[i, j] = pyg.Surface((0, 0)) # Reveal clicked tile\r\n last_destroy = (i, j)\r\n if tiles[i, j] == 9: # If mine is under clicked tile\r\n game_over = 1\r\n # If the current tile is blank, check all adjacent tiles\r\n cycle = [(i+dx, j+dy) for dx, dy in around]\r\n # Cycles through surrounding tiles\r\n for x, y in cycle:\r\n if show_destroy:\r\n pyg.event.pump()\r\n # If x or y coordinates are off the grid, skip this loop\r\n if x >= size_h or y >= size_w or x < 0 or y < 0:\r\n continue\r\n # If the current tile is already uncovered, skip loop\r\n if tiles_cover[x, y].get_size() == (0, 0):\r\n continue\r\n if tiles_cover[x, y] == tile_img_list[11]:\r\n continue\r\n # If clicked tile is a number tile, uncover it\r\n if tiles[i, j] == 0 and tiles[x, y] in range(1, 9):\r\n tiles_cover[x, y] = pyg.Surface((0, 0))\r\n last_destroy = (i, j)\r\n if show_destroy:\r\n draw_img()\r\n draw_cover()\r\n update()\r\n # If clicked tile is blank, call function at the tile\r\n elif tiles[x, y] == 0: # abs(x-i)+abs(y-j) != 2\r\n if show_destroy:\r\n draw_img()\r\n draw_cover()\r\n update()\r\n delaround(x, y)",
"def is_clickable(self, tile):\n return \"match\" not in self.canvas.gettags(tile) and \\\n \"selected\" not in self.canvas.gettags(tile) and \\\n len(self.canvas.find_withtag(\"selected\")) < 2",
"def toggleFlag(self, event): \n clicked = event.widget\n if clicked.isInPlay(): self.changeSmile(1)\n value = clicked.setFlag()\n for adjTile in self.getAdjacentTiles(clicked.row, clicked.col):\n adjTile.numFlags += value\n self.numFlags += value\n self.flagLabel.configure(text=\"Flags: \"+str(self.numFlags))",
"def grid_clicked(self, x, y):\n if self.__game.get_tile(x, y) is MarkerType.NONE:\n player = self.__game.get_player()\n next_player = self.__game.get_next_player()\n\n # Next move the positions are swapped\n self.__infobar.update_info(next_player, player)\n\n self.__tilegrid.set_tile_marker(x, y, player)\n self.__tilegrid.set_tile_color(x, y, Color.DARK_TONE)\n\n state, winner, loser, win_tiles = self.__game.make_move(x, y)\n # Display winner info if found\n if state is GameState.WINNER:\n self.__infobar.show_results(state, winner, loser)\n self.__tilegrid.highlight_tiles(win_tiles)\n self.__buttonbar.set_disabled(False)\n elif state is GameState.TIE:\n self.__infobar.show_results(state, None, None)\n self.__buttonbar.set_disabled(False)\n\n # Play sound according to the player\n if player is MarkerType.CROSS:\n winsound.PlaySound(\"sound/click_x.wav\", winsound.SND_ASYNC)\n else:\n winsound.PlaySound(\"sound/click_o.wav\", winsound.SND_ASYNC)\n else:\n self.__tilegrid.set_tile_color(x, y, Color.FAIL_COLOR)",
"def changeSmile(self, num, event=None):\n self.smileButton.configure(image=self.images[num])",
"def clicked(self, x_pos, y_pos):\n img = self.tower_imgs\n if self.x - img.get_width() // 2 + self.width >= x_pos >= self.x - img.get_width() // 2:\n if self.y + self.height - img.get_height() // 2 >= y_pos >= self.y - img.get_height() // 2:\n return True\n return False",
"def click(self, X, Y):\n tower_image = self.tower_images[self.level - 1]\n\n if X <= self.x + tower_image.get_width() // 2 - 2 * self.extra_padding and X >= self.x - tower_image.get_width() // 2 + self.extra_padding // 2:\n if self.name in TowerConstants.MAGIC_TOWER_NAMES or self.name in TowerConstants.SUP_TOWER_NAMES:\n if Y <= self.y + self.height // 2 - (2 * self.extra_padding) and Y >= self.y - self.height // 2 + (2 * self.extra_padding):\n return True\n else:\n if Y <= self.y + self.height // 2 - (4 * self.extra_padding) and Y >= self.y - self.height // 2 + (2 * self.extra_padding):\n return True\n return False",
"def click(self, X, Y):\n img = self.tower_imgs\n if self.x - img.get_width() // 2 + self.width >= X >= self.x - img.get_width() // 2:\n if self.y + self.height - img.get_height() // 2 >= Y >= self.y - img.get_height() // 2:\n return True\n return False",
"def reveal(self, i, j):\n if not self.is_game_over:\n if not self.flags[i, j]:\n # Game is initialized after first click in order to prevent\n # the first click being straight over a mine\n if not self.is_initialized:\n self.initialize(i, j)\n\n self.update_revealed(i, j)\n self.revealed_img.set_data(self.revealed)\n self.flags_pts.set_data(*np.where(self.flags)[::-1])\n self.refresh_canvas()\n\n if np.count_nonzero(self.revealed) == self.n_not_mines:\n self.game_over(True)",
"def on_click(self, e: ti.template()):\n for i, j in ti.ndrange(self.nx, self.ny):\n if self.inside(self.Vector2(i / self.nx, j / self.ny),\n self.Vector2(e.pos[0], e.pos[1]), 0.03):\n self.T[i, j] = 1",
"def switch(self, tile):\n self.tiles[self.tiles.index(tile)], self.opentile, self.prev = self.opentile, tile, self.opentile\n self.nb_move += 1",
"def handle_left_click(self):\n if not self.game_in_progress:\n return\n if self.first_click:\n self.first_click = False\n self.timer.start(1000)\n sender = self.sender()\n row = 0\n col = 0\n for row in range(self.rows):\n for col in range(self.cols):\n if self.button_array[row][col] == sender:\n break\n else:\n continue\n break\n # print 'Received left click:', row, ',', col\n celllist = self.board.opencell(row, col)\n if celllist == []:\n return\n for cell in celllist:\n row = cell[0]\n col = cell[1]\n cell_property = self.board.getcellproperty(row, col)\n if cell_property == CellProperty.Empty:\n self.button_array[row][col].setIcon(QtGui.QIcon(\"icons/OpenedSquare.png\"))\n elif cell_property == CellProperty.Mine:\n # Game over\n for row in range(self.rows):\n for col in range(self.cols):\n cell_property = self.board.getcellproperty(row, col)\n if cell_property == CellProperty.Mine:\n self.button_array[row][col].setIcon(QtGui.QIcon(\"icons/mine.ico\"))\n self.status_button.setIcon(QtGui.QIcon(\"icons/smiley3.ico\"))\n self.game_in_progress = False\n self.timer.stop()\n return\n elif cell_property == CellProperty.MineCountOne:\n self.button_array[row][col].setIcon(QtGui.QIcon(\"icons/1.png\"))\n elif cell_property == CellProperty.MineCountTwo:\n self.button_array[row][col].setIcon(QtGui.QIcon(\"icons/2.png\"))\n elif cell_property == CellProperty.MineCountThree:\n self.button_array[row][col].setIcon(QtGui.QIcon(\"icons/3.png\"))\n elif cell_property == CellProperty.MineCountFour:\n self.button_array[row][col].setIcon(QtGui.QIcon(\"icons/4.png\"))\n elif cell_property == CellProperty.MineCountFive:\n self.button_array[row][col].setIcon(QtGui.QIcon(\"icons/5.png\"))\n elif cell_property == CellProperty.MineCountSix:\n self.button_array[row][col].setIcon(QtGui.QIcon(\"icons/6.png\"))\n elif cell_property == CellProperty.MineCountSeven:\n self.button_array[row][col].setIcon(QtGui.QIcon(\"icons/7.png\"))\n elif cell_property == CellProperty.MineCountEight:\n self.button_array[row][col].setIcon(QtGui.QIcon(\"icons/8.png\"))\n\n game_status = self.board.continuegame()\n print 'Game Status:', game_status\n if game_status == GameStatus.GameWon:\n self.timer.stop()\n self.game_in_progress = False\n player_name = QtGui.QInputDialog.getText(self, \"Name Please !!\",\\\n \"Enter your name for leader board:\")\n # TODO: Replace 1 with the time taken by the end user.\n LeaderBoard.insertnewscore(CURRENT_GAME_LEVEL, player_name[0], self.time)\n self.status_button.setIcon(QtGui.QIcon(\"icons/smiley.ico\"))\n print \"You have won the game\"",
"def update_reset_button(self):\r\n if self.board.hovered_tiles and self.is_left_mouse_down:\r\n self.reset_button.draw_uhoh()\r\n else:\r\n self.reset_button.draw_smiley()",
"def click_img(self, target_img):\n pos = imagesearch_loop(target_img, timesample=0.5)\n if pos[0] == -1:\n print(\"No image found\")\n else:\n self.click(pos)",
"def _onclick(self,event):\r\n if self.NumCells > 0:\r\n ShapeMask = np.shape(self.Mask)\r\n # get coorinates at selected location in image coordinates\r\n if event.xdata == None or event.ydata == None:\r\n return\r\n xcoor = min(max(int(event.xdata),0),ShapeMask[1])\r\n ycoor = min(max(int(event.ydata),0),ShapeMask[0])\r\n \r\n # search for the mask coresponding to the selected cell\r\n for EachCell in range(self.NumCells):\r\n if self.Mask[ycoor,xcoor,EachCell]:\r\n self.SelectedCellIndex = EachCell\r\n break\r\n \r\n # highlight selected cell\r\n if self.SelectedCellIndex not in self.selected_ML_Index:\r\n # Get the selected cell's contour coordinates and mask patch\r\n self.contour_verts, self.Cell_patch = self.get_cell_polygon(self.Mask[:,:,self.SelectedCellIndex])\r\n \r\n self.Matdisplay_Figure_axis.add_patch(self.Cell_patch)\r\n self.Matdisplay_Canvas.draw()\r\n \r\n self.selected_ML_Index.append(self.SelectedCellIndex)\r\n self.selected_cells_infor_dict['cell{}_verts'.format(str(self.SelectedCellIndex))] = self.contour_verts\r\n else:\r\n # If click on the same cell\r\n self.Cell_patch.remove()\r\n self.Matdisplay_Canvas.draw()\r\n self.selected_ML_Index.remove(self.SelectedCellIndex)\r\n self.selected_cells_infor_dict.pop('cell{}_verts'.format(str(self.SelectedCellIndex)))",
"def revealBombs(self, win):\n for row in self.tiles:\n for tile in row:\n tile.inPlay = False\n if tile.isMine():\n if win:\n #flag non-flagged mines after winning\n if not tile.isFlagged():\n tile.configure(image=Tile.images[11])\n self.numFlags += 1\n else:\n #show unexploded mines after losing \n if not tile.isShown():\n tile.configure(image=Tile.images[9])\n #if incorrectly flagged, mark as such \n elif tile.isFlagged():\n tile.configure(image=Tile.images[12])",
"def set_eaten(self):\n self.state['return'] = True\n self.state['blue'] = False\n self.tile = (self.get_nearest_row(), self.get_nearest_col())\n self.return_path = Ghost.find_path(self.internal_map, self.tile, self.return_tile)\n self.direction = self.get_dir_from_path()\n self.image = self.score_font.render('200', True, (255, 255, 255))\n self.eaten_time = time.get_ticks()",
"def buttonPress(self):\n if self.inPlay and not self.shown:\n self.configure(image = Tile.images[0])",
"def boutton_press(self,a,img):\r\n x,y=self.can.coords(self.button[a])\r\n self.can.delete(self.button[a])\r\n self.button[a]=self.creat_image(img,x,y)",
"def click_callback(self, event):\n # print(\"clicked at \", event.x+self.offset_x, event.y+self.offset_y)\n # x = string.ascii_lowercase[math.ceil((event.x + self.offset_x) / self.width) - 1]\n # y = (math.ceil((event.y + self.offset_y) / self.width) - 9) * -1\n self.clear_moves_on_canvas()\n\n x = math.ceil((event.x + self.offset_x) / self.width) - 1\n y = math.ceil((event.y + self.offset_y) / self.width) - 1\n\n if 0 <= x < 8 and 0 <= y < 8:\n board_value = self.game.board[x][y]\n if self.moving:\n # check if second click isn't on another piece\n if board_value != \"\" and board_value[0] == self.game.current_player_color:\n self.calculate_moves_for_moving_piece(x, y)\n else:\n self.move_piece(x, y) # method moves moving_piece\n self.moving = False\n else:\n self.calculate_moves_for_moving_piece(x, y) # method sets moving_piece",
"def update_image(self):\n if self.updated_sprite_list:\n self.image = self.increment_sprite_index(True)\n self.updated_sprite_list = False\n self.update_count_down = self.update_frames\n self.redraw = True\n elif self.update_frames == 0:\n return\n elif self.update_count_down == 0:\n if self.sprite_index == 2:\n self.remove_action(Action.crouch_attack)\n self.image = self.increment_sprite_index()\n self.update_count_down = self.update_frames\n self.redraw = True\n else:\n self.update_count_down -= 1",
"def _update_image(self):\n button = self.buttons.checkedButton()\n if button is None:\n return\n\n button.click()",
"def choose_mine(self,index):\n cell=game.get_cell(index)\n update_stack={'type':'continue'}\n \n if cell.isMine():\n self._flipAll(update_stack) #clicked on a mine\n else:\n self._flip(update_stack,index) #clicked on a safe cell\n\n return update_stack",
"def isPieceClicked(self):\r\n if self.clickedPiece is None:\r\n return False\r\n return True",
"def click(self, event):\n x, y = self.canvas.invert([event.x, event.y])\n i, j = int(floor(x)), int(floor(y))\n patch = self.get_cell(i, j)\n if patch and patch.state == \"green\":\n cluster = self.get_cluster(patch)\n self.show_cluster(cluster)",
"def handle_right_click(self):\n if not self.game_in_progress:\n return\n if self.first_click:\n self.first_click = False\n self.timer.start(1000)\n sender = self.sender()\n row = 0\n col = 0\n for row in range(self.rows):\n for col in range(self.cols):\n if self.button_array[row][col] == sender:\n break\n else:\n continue\n break\n # print 'Received right click:', row, ',', col\n status = self.board.getcellstatus(row, col)\n if status == CellStatus.Opened:\n return\n elif status == CellStatus.Closed:\n self.remainingminecount = self.remainingminecount - 1\n self.mines_lcd.display(str(self.remainingminecount))\n self.board.setcellstatus(row, col, CellStatus.MarkedAsMine)\n self.button_array[row][col].setIcon(QtGui.QIcon(\"icons/Flag.png\"))\n elif status == CellStatus.MarkedAsMine:\n self.remainingminecount = self.remainingminecount + 1\n self.mines_lcd.display(str(self.remainingminecount))\n self.board.setcellstatus(row, col, CellStatus.MarkedAsSuspectedMine)\n self.button_array[row][col].setIcon(QtGui.QIcon(\"icons/questionmark.png\"))\n elif status == CellStatus.MarkedAsSuspectedMine:\n self.board.setcellstatus(row, col, CellStatus.Closed)\n self.button_array[row][col].setIcon(QtGui.QIcon(\"icons/unopenedsquare.png\"))"
] | [
"0.71017325",
"0.648041",
"0.64698595",
"0.62327427",
"0.6209187",
"0.6125583",
"0.60775197",
"0.597811",
"0.5892851",
"0.5829729",
"0.5796278",
"0.57661337",
"0.57153255",
"0.5674777",
"0.566453",
"0.5633023",
"0.56284046",
"0.5595467",
"0.5584798",
"0.5573565",
"0.55453783",
"0.55080265",
"0.5419047",
"0.5403632",
"0.5371344",
"0.5331167",
"0.5312016",
"0.530582",
"0.5304293",
"0.5304041"
] | 0.7350363 | 0 |
calls show() on every tile adjacent to the given tile continues cascade if shown tile is zero returns the total number of tiles newly revealed | def cascadeShow(self, tile):
value = 0
for adjTile in self.getAdjacentTiles(tile.row, tile.col):
returned = adjTile.show()
value += returned
if returned == 1 and adjTile.isZero():
value += self.cascadeShow(adjTile)
return value | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def showAdjTiles(self,event):\n clicked = event.widget\n if clicked.isInPlay():\n self.changeSmile(1)\n #if tile is Safe, reveal adjacent tiles and cascade if needed\n if clicked.isSafe():\n returned = 0\n for adjTile in self.getAdjacentTiles(clicked.row, clicked.col):\n value = adjTile.show()\n if value == 1 and adjTile.isZero():\n value += self.cascadeShow(adjTile)\n returned = -1 if (value == -1 or returned == -1) else value+returned\n self.checkEnd(returned)\n #if unsafe, return adjacent buttons to unpressed images\n else:\n for adjTile in self.getAdjacentTiles(clicked.row, clicked.col):\n if not adjTile.isFlagged() and not adjTile.isShown():\n adjTile.configure(image=Tile.images[10])",
"def show(self):\n if not self.shown and not self.flag:\n self.shown = True\n self.configure(image=Tile.images[self.count])\n return -1 if self.mine else 1\n return 0",
"def showTile(self, event):\n clicked = event.widget\n if clicked.isInPlay():\n self.changeSmile(1)\n returned = clicked.show()\n if returned == 1 and clicked.isZero():\n returned += self.cascadeShow(clicked)\n self.checkEnd(returned)",
"def num_tiles(self):\n return self.num_row_tiles * self.num_col_tiles",
"def revealBombs(self, win):\n for row in self.tiles:\n for tile in row:\n tile.inPlay = False\n if tile.isMine():\n if win:\n #flag non-flagged mines after winning\n if not tile.isFlagged():\n tile.configure(image=Tile.images[11])\n self.numFlags += 1\n else:\n #show unexploded mines after losing \n if not tile.isShown():\n tile.configure(image=Tile.images[9])\n #if incorrectly flagged, mark as such \n elif tile.isFlagged():\n tile.configure(image=Tile.images[12])",
"def display(self):\n for row in self.tile_rows:\n print(row)",
"def draw(self, screen):\n for i in range(self.tiles_len):\n x, y = self.tilepos[i]\n screen.blit(self.images[i], (x, y))\n self.draw_text(screen, \"Moves : \" + str(self.nb_move), 40, 500, 10, 255, 255, 255, False)",
"def no_of_misplaced_tiles(state):\r\n h1 = 0\r\n goal_state = [[0], [1, 2, 3], [4, 5, 6], [7, 8, 9]]\r\n for y in range(len(goal_state)):\r\n for x in range(len(goal_state[y])):\r\n if state[y][x] != goal_state[y][x]:\r\n h1 += 1\r\n return h1",
"def show_possibles(self):\n for row in range(self.board_size):\n for col in range(self.board_size):\n poss = list(self.possibles[row][col])\n if poss:\n teil = qbwrdd.Tile(poss, self.board.scene)\n teil.cell = \"poss\"\n cell = row * self.board_size + col\n pos_x, pos_y = self.board.cells[cell].x(), self.board.cells[cell].y()\n if col % 3 > 0:\n pos_x += 2\n self.poss_tiles[row][col] = teil\n teil.draw_tile_at(pos_x, pos_y)",
"def __FreeTiles(self, grid, log=False):\n\n x_pos, _ = np.where(grid == 0)\n return len(x_pos)",
"def num_black_neighbors(tile, tiles):\n return sum([tiles[add(tile, step)] for step in NEIGHBORS])",
"def display(self):\n count = 0\n self.displays[0].start() # call only once to support shift chain\n for d in self.displays:\n d.output(self.data[count])\n count += 1\n self.displays[0].latch() # call only once to support shift chain",
"def update_tile(tile, color, tiles):\n if color == BLACK:\n return num_black_neighbors(tile, tiles) in [1, 2]\n if color == WHITE:\n return num_black_neighbors(tile, tiles) == 2",
"def getNumTiles(self):\n return len(list(product(list(range(self.width+1))[1:], list(range(self.height+1))[1:])))",
"def __handle_view_tile(self, gamestate_component):",
"def enumerate_tiles(self):\n # Iterates through entire game board.\n for row in range(self.rows):\n for col in range(self.cols):\n\n # Doesn't count mines adjacent to mine tiles.\n if self.board[row][col].category == Tiles.mine:\n continue\n mines = 0\n\n # Calculates number of mines surrounding each tile.\n for i in [row-1, row, row+1]:\n for j in [col-1, col, col+1]:\n if (self.valid_tile(i, j) and self.board[i][j].category == Tiles.mine):\n mines += 1\n \n # Sets each game board tile's mine proximity number.\n self.board[row][col] = Tiles(row, col, str(mines))",
"def delaround(i, j):\r\n global game_over, last_destroy\r\n # If the clicked tile is already revealed, do nothing.\r\n if tiles_cover[i, j].get_size() == (0, 0):\r\n return\r\n if tiles_cover[i, j] == tile_img_list[11]:\r\n return\r\n if tiles_cover[i, j] != tile_img_list[11]:\r\n tiles_cover[i, j] = pyg.Surface((0, 0)) # Reveal clicked tile\r\n last_destroy = (i, j)\r\n if tiles[i, j] == 9: # If mine is under clicked tile\r\n game_over = 1\r\n # If the current tile is blank, check all adjacent tiles\r\n cycle = [(i+dx, j+dy) for dx, dy in around]\r\n # Cycles through surrounding tiles\r\n for x, y in cycle:\r\n if show_destroy:\r\n pyg.event.pump()\r\n # If x or y coordinates are off the grid, skip this loop\r\n if x >= size_h or y >= size_w or x < 0 or y < 0:\r\n continue\r\n # If the current tile is already uncovered, skip loop\r\n if tiles_cover[x, y].get_size() == (0, 0):\r\n continue\r\n if tiles_cover[x, y] == tile_img_list[11]:\r\n continue\r\n # If clicked tile is a number tile, uncover it\r\n if tiles[i, j] == 0 and tiles[x, y] in range(1, 9):\r\n tiles_cover[x, y] = pyg.Surface((0, 0))\r\n last_destroy = (i, j)\r\n if show_destroy:\r\n draw_img()\r\n draw_cover()\r\n update()\r\n # If clicked tile is blank, call function at the tile\r\n elif tiles[x, y] == 0: # abs(x-i)+abs(y-j) != 2\r\n if show_destroy:\r\n draw_img()\r\n draw_cover()\r\n update()\r\n delaround(x, y)",
"def new_tile(self):\r\n # replace with your code\r\n # complete search ....\r\n non_zero_count = 0;\r\n for row in range(self._grid_height):\r\n for col in range(self._grid_width):\r\n if self._grid_tile[row][col] == 0:\r\n non_zero_count += 1\r\n random_choice = random.randrange(0, non_zero_count)\r\n count = 0\r\n # another search ....\r\n generated_new_tile = False\r\n for row in range(self._grid_height):\r\n for col in range(self._grid_width):\r\n if generated_new_tile == False and self._grid_tile[row][col] == 0:\r\n if count != random_choice:\r\n count += 1 \r\n else:\r\n if random.randrange(0,100) < 10:\r\n self.set_tile(row, col ,4)\r\n else:\r\n self.set_tile(row, col ,2)\r\n generated_new_tile = True",
"def main():\n row, col, island = make_matrix()\n print(count_island(row, col, island))",
"def part1(mem):\n return len(paint_panels(mem, 0))",
"def revealed_suits_tiles(player, tiles_34):\n return _suits_tiles_helper(\n tiles_34, lambda _tile_34_index, _tiles_34: player.number_of_revealed_tiles(_tile_34_index, _tiles_34)\n )",
"def check_open(self, n_faces):\r\n count_used = Counter([item for sublist in self.tiles\r\n for item in sublist\r\n if item in self.get_borders()])\r\n if min(count_used.values()) == n_faces:\r\n self.open = False",
"def no_occupied_visible_seats(index_i, index_j, tile_arrangement):\n\n occupied_seats = 0\n\n # Right, Left, Down and Up\n try:\n value = 1\n while True:\n occupancy = get_tile_type(index_i + value, index_j, tile_arrangement)\n if occupancy == \"Occupied\":\n occupied_seats += 1\n break\n elif occupancy == \"Free\":\n break\n else:\n value += 1\n except IndexError:\n pass\n\n try:\n value = 1\n while True:\n occupancy = get_tile_type(index_i - value, index_j, tile_arrangement)\n if occupancy == \"Occupied\":\n occupied_seats += 1\n break\n elif occupancy == \"Free\":\n break\n else:\n value += 1\n except IndexError:\n pass\n\n try:\n value = 1\n while True:\n occupancy = get_tile_type(index_i, index_j + value, tile_arrangement)\n if occupancy == \"Occupied\":\n occupied_seats += 1\n break\n elif occupancy == \"Free\":\n break\n else:\n value += 1\n except IndexError:\n pass\n\n try:\n value = 1\n while True:\n occupancy = get_tile_type(index_i, index_j - value, tile_arrangement)\n if occupancy == \"Occupied\":\n occupied_seats += 1\n break\n elif occupancy == \"Free\":\n break\n else:\n value += 1\n except IndexError:\n pass\n\n # Diagonals (Right-Down, Left-Down, Right-Up, Left-Up)\n try:\n value = 1\n while True:\n occupancy = get_tile_type(index_i + value, index_j + value, tile_arrangement)\n if occupancy == \"Occupied\":\n occupied_seats += 1\n break\n elif occupancy == \"Free\":\n break\n else:\n value += 1\n except IndexError:\n pass\n\n try:\n value = 1\n while True:\n occupancy = get_tile_type(index_i - value, index_j + value, tile_arrangement)\n if occupancy == \"Occupied\":\n occupied_seats += 1\n break\n elif occupancy == \"Free\":\n break\n else:\n value += 1\n except IndexError:\n pass\n\n try:\n value = 1\n while True:\n occupancy = get_tile_type(index_i + value, index_j - value, tile_arrangement)\n if occupancy == \"Occupied\":\n occupied_seats += 1\n break\n elif occupancy == \"Free\":\n break\n else:\n value += 1\n except IndexError:\n pass\n\n try:\n value = 1\n while True:\n occupancy = get_tile_type(index_i - value, index_j - value, tile_arrangement)\n if occupancy == \"Occupied\":\n occupied_seats += 1\n break\n elif occupancy == \"Free\":\n break\n else:\n value += 1\n except IndexError:\n pass\n\n return occupied_seats",
"def getNumTiles(self):\n return self.height * self.width",
"def getNumTiles(self):\n\t\treturn self.numTiles",
"def __len__(self) -> int:\n return len(self._tiles)",
"def finish(self):\n\t\tself.planet.tiles[self.y][self.x].set_occupant() # set occupant to the initial tile\n\t\tnum_tile = self.planet.width * self.planet.height\n\t\tsum = 0\n\t\tfor y in range(self.planet.height): # get the number of the tiles which have been explored\n\t\t\tfor x in range(self.planet.width):\n\t\t\t\ttile = self.planet.tiles[y][x]\n\t\t\t\tif tile.occupant == 1:\n\t\t\t\t\tsum += 1\n\t\tpercent = int((sum/num_tile)*100)\n\t\tprint(\"You explored {}% of {}\".format(percent, self.planet.name))",
"def use_displaced_heur(self):\r\n\t\tdisplacedTiles = 0\r\n\r\n\t\tfor row in range(self.n):\r\n\t\t\tfor col in range(self.n):\r\n\t\t\t\tif self.board[row][col] != BoardClass.goal[row][col]:\r\n\t\t\t\t\tdisplacedTiles += 1\r\n\r\n\t\tself.heuristic = displacedTiles",
"def getNumTiles(self):\n return self.w * self.h",
"def map_displayer(stage, player,\n stage_tiles, TILES, special_tiles, default_tile):\n color.write(\"=============================================\\n\",\"BUILTIN\") # Hard seperation to show that a new turn has begun\n # Setup variables\n x = 1\n y = stage[1]\n player_x = player[0]\n player_y = player[1]\n\n while y > 0:\n while x < stage[0]+1:\n if x == player_x and y == player_y:\n color.write(TILES.get(\"player\", \"X\"), \"hit\")\n\n elif (\"{0},{1}\".format(x, y) in stage_tiles\n and \"{0},{1}\".format(x, y) in special_tiles):\n if (stage_tiles[\"{0},{1}\".format(x, y)] == \"npc\"\n or stage_tiles[\"{0},{1}\".format(x, y)] == \"sign\"):\n tile = stage_tiles.get(\"{0},{1}\".format(x, y), default_tile)\n color.write(TILES[tile], \"KEYWORD\")\n \n else:\n tile = stage_tiles.get(\"{0},{1}\".format(x, y), default_tile)\n color.write(TILES[tile], \"STRING\")\n\n elif \"{0},{1}\".format(x, y) in stage_tiles:\n if (stage_tiles[\"{0},{1}\".format(x, y)] == \"rock\"\n or stage_tiles[\"{0},{1}\".format(x, y)] == \"mountain\"):\n tile = stage_tiles.get(\"{0},{1}\".format(x, y), default_tile)\n color.write(TILES[tile], \"stderr\")\n\n else:\n tile = stage_tiles.get(\"{0},{1}\".format(x, y), default_tile)\n color.write(TILES[tile], \"stdout\")\n\n elif \"{0},{1}\".format(x,y) in special_tiles:\n if (special_tiles[\"{0},{1}\".format(x, y)] == \"dark_water\"):\n tile = stage_tiles.get(\"{0},{1}\".format(x, y), default_tile)\n color.write(TILES[tile],\"stdin\") \n else:\n print(TILES[default_tile], end='')\n x += 1\n print(\" \",end='')\n print(\"\")\n y -= 1\n x = 1"
] | [
"0.714055",
"0.7036135",
"0.6740042",
"0.5885403",
"0.576607",
"0.57298493",
"0.5698092",
"0.56953084",
"0.56464475",
"0.5641887",
"0.56332046",
"0.5593228",
"0.55830514",
"0.55693036",
"0.5568158",
"0.5554808",
"0.5553236",
"0.55482566",
"0.5500011",
"0.54978377",
"0.5483443",
"0.54781795",
"0.54764426",
"0.5455631",
"0.5441625",
"0.54285413",
"0.5416541",
"0.5416254",
"0.54046994",
"0.5397837"
] | 0.70521516 | 1 |
Calls show() on clicked Tile if applicable Reverts Smiley button's image to default smile to animate on clicks | def showTile(self, event):
clicked = event.widget
if clicked.isInPlay():
self.changeSmile(1)
returned = clicked.show()
if returned == 1 and clicked.isZero():
returned += self.cascadeShow(clicked)
self.checkEnd(returned) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def showAdjTiles(self,event):\n clicked = event.widget\n if clicked.isInPlay():\n self.changeSmile(1)\n #if tile is Safe, reveal adjacent tiles and cascade if needed\n if clicked.isSafe():\n returned = 0\n for adjTile in self.getAdjacentTiles(clicked.row, clicked.col):\n value = adjTile.show()\n if value == 1 and adjTile.isZero():\n value += self.cascadeShow(adjTile)\n returned = -1 if (value == -1 or returned == -1) else value+returned\n self.checkEnd(returned)\n #if unsafe, return adjacent buttons to unpressed images\n else:\n for adjTile in self.getAdjacentTiles(clicked.row, clicked.col):\n if not adjTile.isFlagged() and not adjTile.isShown():\n adjTile.configure(image=Tile.images[10])",
"def buttonPress(self):\n if self.inPlay and not self.shown:\n self.configure(image = Tile.images[0])",
"def changeSmile(self, num, event=None):\n self.smileButton.configure(image=self.images[num])",
"def pressTile(self, event):\n clickedTile = event.widget\n if clickedTile.isInPlay(): self.changeSmile(2)\n if not clickedTile.isFlagged():\n clickedTile.buttonPress()\n if not self.minesArmed and event.num == 1:\n self.setUpBombs(event)",
"def replay(self):\n self.shown = False\n self.mine = False\n self.flag = False\n self.inPlay = True\n self.count = 0\n self.numFlags = 0\n self.configure(image=Tile.images[10])",
"def new_tile(self):\n # replace with your code\n pass",
"def revealBombs(self, win):\n for row in self.tiles:\n for tile in row:\n tile.inPlay = False\n if tile.isMine():\n if win:\n #flag non-flagged mines after winning\n if not tile.isFlagged():\n tile.configure(image=Tile.images[11])\n self.numFlags += 1\n else:\n #show unexploded mines after losing \n if not tile.isShown():\n tile.configure(image=Tile.images[9])\n #if incorrectly flagged, mark as such \n elif tile.isFlagged():\n tile.configure(image=Tile.images[12])",
"def update_reset_button(self):\r\n if self.board.hovered_tiles and self.is_left_mouse_down:\r\n self.reset_button.draw_uhoh()\r\n else:\r\n self.reset_button.draw_smiley()",
"def replay(self, event=None):\n #reset relevant variables\n self.numChecked = 0\n self.numFlags = 0\n self.minesArmed = False\n self.startTime = None\n\n #reset labels\n self.mineLabel.configure(text=\"Mines: \"+str(self.numMines))\n self.smileButton.configure(image=self.images[1])\n self.flagLabel.configure(text=\"Flags: \"+str(self.numFlags))\n\n #reset all tiles\n for row in self.tiles:\n for tile in row:\n tile.replay()",
"def pressAdjTiles(self, event):\n clickedTile = event.widget\n if clickedTile.isInPlay(): self.changeSmile(2)\n for adjTile in self.getAdjacentTiles(clickedTile.row, clickedTile.col):\n if not adjTile.isFlagged(): adjTile.buttonPress()",
"def show(self):\n if not self.shown and not self.flag:\n self.shown = True\n self.configure(image=Tile.images[self.count])\n return -1 if self.mine else 1\n return 0",
"def __on_tile_matching_started(self):\n\n self.progress_window.show_tile_matching_animation()",
"def iconify(self):\n if self.active:\n self.master.withdraw()\n self.active = False",
"def showBtnImg(*args, **kwargs):\n\targs[0].get_image().show()",
"def set_eaten(self):\n self.state['return'] = True\n self.state['blue'] = False\n self.tile = (self.get_nearest_row(), self.get_nearest_col())\n self.return_path = Ghost.find_path(self.internal_map, self.tile, self.return_tile)\n self.direction = self.get_dir_from_path()\n self.image = self.score_font.render('200', True, (255, 255, 255))\n self.eaten_time = time.get_ticks()",
"def reveal(self, i, j):\n if not self.is_game_over:\n if not self.flags[i, j]:\n # Game is initialized after first click in order to prevent\n # the first click being straight over a mine\n if not self.is_initialized:\n self.initialize(i, j)\n\n self.update_revealed(i, j)\n self.revealed_img.set_data(self.revealed)\n self.flags_pts.set_data(*np.where(self.flags)[::-1])\n self.refresh_canvas()\n\n if np.count_nonzero(self.revealed) == self.n_not_mines:\n self.game_over(True)",
"def setBtnIcon(self):\n self.setIcon(QtGui.QIcon(self.movie.currentPixmap()))\n self.setIconSize(QtCore.QSize(self.size[0], self.size[1]))",
"def setIcon(self, icon, alpha=False):\n try:\n pygame.display.set_icon(icon)\n except TypeError:\n icon = self.newObject(icon, alpha)\n pygame.display.set_icon(icon)",
"def show( self ):\n if self.visible == 1:#ohnheiser hack and time() - self.lastMotion > self.delay:\n self.visible = 2\n if self.visible == 2:\n self.deiconify()",
"def grid_clicked(self, x, y):\n if self.__game.get_tile(x, y) is MarkerType.NONE:\n player = self.__game.get_player()\n next_player = self.__game.get_next_player()\n\n # Next move the positions are swapped\n self.__infobar.update_info(next_player, player)\n\n self.__tilegrid.set_tile_marker(x, y, player)\n self.__tilegrid.set_tile_color(x, y, Color.DARK_TONE)\n\n state, winner, loser, win_tiles = self.__game.make_move(x, y)\n # Display winner info if found\n if state is GameState.WINNER:\n self.__infobar.show_results(state, winner, loser)\n self.__tilegrid.highlight_tiles(win_tiles)\n self.__buttonbar.set_disabled(False)\n elif state is GameState.TIE:\n self.__infobar.show_results(state, None, None)\n self.__buttonbar.set_disabled(False)\n\n # Play sound according to the player\n if player is MarkerType.CROSS:\n winsound.PlaySound(\"sound/click_x.wav\", winsound.SND_ASYNC)\n else:\n winsound.PlaySound(\"sound/click_o.wav\", winsound.SND_ASYNC)\n else:\n self.__tilegrid.set_tile_color(x, y, Color.FAIL_COLOR)",
"def icon(self):",
"def __handle_view_tile(self, gamestate_component):",
"def delaround(i, j):\r\n global game_over, last_destroy\r\n # If the clicked tile is already revealed, do nothing.\r\n if tiles_cover[i, j].get_size() == (0, 0):\r\n return\r\n if tiles_cover[i, j] == tile_img_list[11]:\r\n return\r\n if tiles_cover[i, j] != tile_img_list[11]:\r\n tiles_cover[i, j] = pyg.Surface((0, 0)) # Reveal clicked tile\r\n last_destroy = (i, j)\r\n if tiles[i, j] == 9: # If mine is under clicked tile\r\n game_over = 1\r\n # If the current tile is blank, check all adjacent tiles\r\n cycle = [(i+dx, j+dy) for dx, dy in around]\r\n # Cycles through surrounding tiles\r\n for x, y in cycle:\r\n if show_destroy:\r\n pyg.event.pump()\r\n # If x or y coordinates are off the grid, skip this loop\r\n if x >= size_h or y >= size_w or x < 0 or y < 0:\r\n continue\r\n # If the current tile is already uncovered, skip loop\r\n if tiles_cover[x, y].get_size() == (0, 0):\r\n continue\r\n if tiles_cover[x, y] == tile_img_list[11]:\r\n continue\r\n # If clicked tile is a number tile, uncover it\r\n if tiles[i, j] == 0 and tiles[x, y] in range(1, 9):\r\n tiles_cover[x, y] = pyg.Surface((0, 0))\r\n last_destroy = (i, j)\r\n if show_destroy:\r\n draw_img()\r\n draw_cover()\r\n update()\r\n # If clicked tile is blank, call function at the tile\r\n elif tiles[x, y] == 0: # abs(x-i)+abs(y-j) != 2\r\n if show_destroy:\r\n draw_img()\r\n draw_cover()\r\n update()\r\n delaround(x, y)",
"def toggleFlag(self, event): \n clicked = event.widget\n if clicked.isInPlay(): self.changeSmile(1)\n value = clicked.setFlag()\n for adjTile in self.getAdjacentTiles(clicked.row, clicked.col):\n adjTile.numFlags += value\n self.numFlags += value\n self.flagLabel.configure(text=\"Flags: \"+str(self.numFlags))",
"def update_icon(self, _widget, _callback_data):\n\t\t\n\t\tprint \"in update_icon for \", self.name\n\t\tself.icon = self.__window.get_icon()\n\t\tself.icon.save(self.imgpath, \"png\")\n\t\tif not self.pile is None:\n\t\t\tself.pile.update_child_icon(self)\n\t\treturn",
"def setIconImage(*args):",
"def display(self):\n\t\tself.imgDisplay.set_from_pixbuf(self.getVisible())\n\t\tgc.collect()",
"def blit_me(self):\n self.game_over_img.blit_me()\n self.retry_button.blit_me()",
"def draw_tile(self, tile):\n raise NotImplemented()",
"def for_tests_only():\n root = tk.Tk()\n panel = tk.Frame(root)\n panel.pack(expand=tk.YES, fill=tk.BOTH)\n lbl = ShowGif(panel)\n lbl.place(bordermode='outside', x=135, y=500)\n lbl.show('..\\\\PicUi\\\\100x100.gif')\n root.mainloop()"
] | [
"0.70960873",
"0.702084",
"0.6491204",
"0.64384776",
"0.6199169",
"0.5966352",
"0.5956875",
"0.5948234",
"0.59471923",
"0.59469694",
"0.5903998",
"0.5832787",
"0.57578105",
"0.5723929",
"0.5723466",
"0.5685981",
"0.5650508",
"0.5596597",
"0.55943763",
"0.55925274",
"0.552771",
"0.5518731",
"0.5503096",
"0.54961336",
"0.5487563",
"0.5479362",
"0.54713136",
"0.54693097",
"0.5443515",
"0.543385"
] | 0.7800496 | 0 |
calls showAround() on clicked Tile if applicable | def showAdjTiles(self,event):
clicked = event.widget
if clicked.isInPlay():
self.changeSmile(1)
#if tile is Safe, reveal adjacent tiles and cascade if needed
if clicked.isSafe():
returned = 0
for adjTile in self.getAdjacentTiles(clicked.row, clicked.col):
value = adjTile.show()
if value == 1 and adjTile.isZero():
value += self.cascadeShow(adjTile)
returned = -1 if (value == -1 or returned == -1) else value+returned
self.checkEnd(returned)
#if unsafe, return adjacent buttons to unpressed images
else:
for adjTile in self.getAdjacentTiles(clicked.row, clicked.col):
if not adjTile.isFlagged() and not adjTile.isShown():
adjTile.configure(image=Tile.images[10]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def showTile(self, event):\n clicked = event.widget\n if clicked.isInPlay():\n self.changeSmile(1)\n returned = clicked.show()\n if returned == 1 and clicked.isZero():\n returned += self.cascadeShow(clicked)\n self.checkEnd(returned)",
"def __handle_view_tile(self, gamestate_component):",
"def delaround(i, j):\r\n global game_over, last_destroy\r\n # If the clicked tile is already revealed, do nothing.\r\n if tiles_cover[i, j].get_size() == (0, 0):\r\n return\r\n if tiles_cover[i, j] == tile_img_list[11]:\r\n return\r\n if tiles_cover[i, j] != tile_img_list[11]:\r\n tiles_cover[i, j] = pyg.Surface((0, 0)) # Reveal clicked tile\r\n last_destroy = (i, j)\r\n if tiles[i, j] == 9: # If mine is under clicked tile\r\n game_over = 1\r\n # If the current tile is blank, check all adjacent tiles\r\n cycle = [(i+dx, j+dy) for dx, dy in around]\r\n # Cycles through surrounding tiles\r\n for x, y in cycle:\r\n if show_destroy:\r\n pyg.event.pump()\r\n # If x or y coordinates are off the grid, skip this loop\r\n if x >= size_h or y >= size_w or x < 0 or y < 0:\r\n continue\r\n # If the current tile is already uncovered, skip loop\r\n if tiles_cover[x, y].get_size() == (0, 0):\r\n continue\r\n if tiles_cover[x, y] == tile_img_list[11]:\r\n continue\r\n # If clicked tile is a number tile, uncover it\r\n if tiles[i, j] == 0 and tiles[x, y] in range(1, 9):\r\n tiles_cover[x, y] = pyg.Surface((0, 0))\r\n last_destroy = (i, j)\r\n if show_destroy:\r\n draw_img()\r\n draw_cover()\r\n update()\r\n # If clicked tile is blank, call function at the tile\r\n elif tiles[x, y] == 0: # abs(x-i)+abs(y-j) != 2\r\n if show_destroy:\r\n draw_img()\r\n draw_cover()\r\n update()\r\n delaround(x, y)",
"def draw(self):\n if context.click():\n self.place()",
"def clickable(self, event):\n tile = self.canvas.find_closest(event.x, event.y)\n # check if tile is clickable, and already fill color\n if self.is_clickable(tile) and self.canvas.itemcget(tile, \"fill\") != \\\n self.color:\n self.num_clicks += 1\n cords = self.canvas.coords(tile)\n self.canvas.itemconfigure(tile, tag=\"selected\")\n self.pic.append(tkinter.PhotoImage(file=os.path.join(self.folder,\n self.new_list[\n tile[\n 0] - 1])))\n self.image_id.append(self.canvas.create_image(\n (cords[0] + cords[2]) / 2,\n (cords[1] + cords[3]) / 2,\n image=self.pic[-1]))\n self.click_tiles.append(tile)",
"def new_tile(self):\n # replace with your code\n pass",
"def pressAdjTiles(self, event):\n clickedTile = event.widget\n if clickedTile.isInPlay(): self.changeSmile(2)\n for adjTile in self.getAdjacentTiles(clickedTile.row, clickedTile.col):\n if not adjTile.isFlagged(): adjTile.buttonPress()",
"def pressTile(self, event):\n clickedTile = event.widget\n if clickedTile.isInPlay(): self.changeSmile(2)\n if not clickedTile.isFlagged():\n clickedTile.buttonPress()\n if not self.minesArmed and event.num == 1:\n self.setUpBombs(event)",
"def __on_tile_matching_started(self):\n\n self.progress_window.show_tile_matching_animation()",
"def grid_clicked(self, x, y):\n if self.__game.get_tile(x, y) is MarkerType.NONE:\n player = self.__game.get_player()\n next_player = self.__game.get_next_player()\n\n # Next move the positions are swapped\n self.__infobar.update_info(next_player, player)\n\n self.__tilegrid.set_tile_marker(x, y, player)\n self.__tilegrid.set_tile_color(x, y, Color.DARK_TONE)\n\n state, winner, loser, win_tiles = self.__game.make_move(x, y)\n # Display winner info if found\n if state is GameState.WINNER:\n self.__infobar.show_results(state, winner, loser)\n self.__tilegrid.highlight_tiles(win_tiles)\n self.__buttonbar.set_disabled(False)\n elif state is GameState.TIE:\n self.__infobar.show_results(state, None, None)\n self.__buttonbar.set_disabled(False)\n\n # Play sound according to the player\n if player is MarkerType.CROSS:\n winsound.PlaySound(\"sound/click_x.wav\", winsound.SND_ASYNC)\n else:\n winsound.PlaySound(\"sound/click_o.wav\", winsound.SND_ASYNC)\n else:\n self.__tilegrid.set_tile_color(x, y, Color.FAIL_COLOR)",
"def draw_tile(self, tile):\n raise NotImplemented()",
"def buttonPress(self):\n if self.inPlay and not self.shown:\n self.configure(image = Tile.images[0])",
"def handle(self, event):\n\n if event == FL_PUSH:\n if Fl.event_button1():\n if self.gamewin.placing >= 0:\n self.gamewin.place_boat()\n \n else:\n self.gamewin.tile_clicked(self)\n return 1\n \n if event == FL_DRAG:\n return 0\n \n return super().handle(event)",
"def on_click(self, e: ti.template()):\n for i, j in ti.ndrange(self.nx, self.ny):\n if self.inside(self.Vector2(i / self.nx, j / self.ny),\n self.Vector2(e.pos[0], e.pos[1]), 0.03):\n self.T[i, j] = 1",
"def show(self):\n if not self.shown and not self.flag:\n self.shown = True\n self.configure(image=Tile.images[self.count])\n return -1 if self.mine else 1\n return 0",
"def query_image_tile(self, coord):",
"def clicked(self, x_pos, y_pos):\n img = self.tower_imgs\n if self.x - img.get_width() // 2 + self.width >= x_pos >= self.x - img.get_width() // 2:\n if self.y + self.height - img.get_height() // 2 >= y_pos >= self.y - img.get_height() // 2:\n return True\n return False",
"def place_object(self, thing):\n color = [i * 255 for i in thing.color.rgb]\n size = (20, 20)\n if thing.name == \"luna\":\n size = (5, 5)\n if self.is_visible(thing.position, max(size)):\n position = self.get_position(thing.position, size)\n pygame.draw.ellipse(self.screen, color, (position, size))",
"def _left_click(self, event):\n #retrieve position to place tower\n if self._current_tower is None:\n return\n\n position = event.x, event.y\n cell_position = self._game.grid.pixel_to_cell(position)\n \n #if the event position already has a tower, show the upgrades for it\n if cell_position in self._game.towers:\n\n tower = self._game.towers[cell_position]\n\n\n #hide all upgrade_controls\n for t in self._upgrade_controls:\n self._upgrade_controls[t].pack_forget()\n\n \n #initiate the upgrade control if it doesn't already exist\n if tower not in self._upgrade_controls:\n upgrade_control = UpgradeControl(self._right_frame, tower, self)\n self._upgrade_controls[tower] = upgrade_control\n upgrade_control.pack(expand=True)\n\n else:\n #pack the one with the tower\n tower = self._game.towers[cell_position]\n upgrade_control = self._upgrade_controls[tower]\n upgrade_control.pack(expand=True)\n upgrade_control.check_status()\n\n\n #Task 1.2 (Tower placement): Attempt to place the tower being previewed\n legal, grid_path = self._game.attempt_placement(position)\n\n if legal and (self._current_tower.get_value() <= self._coins):\n self._coins -= self._current_tower.get_value() \n self._status_bar.set_coins(self._coins)\n\n #refresh view upon placing a tower\n\n if self._game.place(cell_position, tower_type=self._current_tower.__class__):\n #delete preview after placing\n self._view.delete(\"shadow\", \"range\", \"path\")\n for tower_type, shop_tower_view in self._tower_views:\n if tower_type.base_cost > self._coins:\n shop_tower_view.set_available(False)\n self.refresh_view()\n self._step()",
"def click(self, X, Y):\n tower_image = self.tower_images[self.level - 1]\n\n if X <= self.x + tower_image.get_width() // 2 - 2 * self.extra_padding and X >= self.x - tower_image.get_width() // 2 + self.extra_padding // 2:\n if self.name in TowerConstants.MAGIC_TOWER_NAMES or self.name in TowerConstants.SUP_TOWER_NAMES:\n if Y <= self.y + self.height // 2 - (2 * self.extra_padding) and Y >= self.y - self.height // 2 + (2 * self.extra_padding):\n return True\n else:\n if Y <= self.y + self.height // 2 - (4 * self.extra_padding) and Y >= self.y - self.height // 2 + (2 * self.extra_padding):\n return True\n return False",
"def is_clickable(self, tile):\n return \"match\" not in self.canvas.gettags(tile) and \\\n \"selected\" not in self.canvas.gettags(tile) and \\\n len(self.canvas.find_withtag(\"selected\")) < 2",
"def area(self, tileID):\n pass",
"def click_callback(self, event):\n # print(\"clicked at \", event.x+self.offset_x, event.y+self.offset_y)\n # x = string.ascii_lowercase[math.ceil((event.x + self.offset_x) / self.width) - 1]\n # y = (math.ceil((event.y + self.offset_y) / self.width) - 9) * -1\n self.clear_moves_on_canvas()\n\n x = math.ceil((event.x + self.offset_x) / self.width) - 1\n y = math.ceil((event.y + self.offset_y) / self.width) - 1\n\n if 0 <= x < 8 and 0 <= y < 8:\n board_value = self.game.board[x][y]\n if self.moving:\n # check if second click isn't on another piece\n if board_value != \"\" and board_value[0] == self.game.current_player_color:\n self.calculate_moves_for_moving_piece(x, y)\n else:\n self.move_piece(x, y) # method moves moving_piece\n self.moving = False\n else:\n self.calculate_moves_for_moving_piece(x, y) # method sets moving_piece",
"def update_map(self, screenshot=None):\n # Get the visible tiles\n nearby = self.game_map[\n (self.player_position[0] - 10): (self.player_position[0] + 11),\n (self.player_position[1] - 10): (self.player_position[1] + 11)\n ]\n\n # Clear NPCs in the nearby as they may have moved\n nearby[nearby == self.TILES.WEAPON_SHOPKEEPER.value] = self.TILES.UNKNOWN.value\n nearby[nearby == self.TILES.BLACKSMITH.value] = self.TILES.UNKNOWN.value\n\n # Take screenshot and isolate the gamplay region\n if screenshot is None:\n screenshot = utils.take_screenshot()\n play = screenshot[8:344, 8:344]\n\n # Loop through all unknown tiles in the nearby\n for i, j in zip(*np.where(nearby == self.TILES.UNKNOWN.value)):\n # Scale up the dimensions\n tile_x = i * self.TILE_DIM\n tile_y = j * self.TILE_DIM\n\n # The center cell is always the player\n if i == 10 and j == 10:\n tile_x = self.player_position[0] + int(tile_x / 16) - 10\n tile_y = self.player_position[1] + int(tile_y / 16) - 10\n self.game_map[(tile_x, tile_y)] = self.TILES.PLAYER.value\n continue\n\n # Slice the tile from the play region\n tile = play[tile_y:tile_y + self.TILE_DIM,\n tile_x:tile_x + self.TILE_DIM]\n\n tile_x = self.player_position[0] + int(tile_x / 16) - 10\n tile_y = self.player_position[1] + int(tile_y / 16) - 10\n\n # Go through all tile types looking for a high confidence match\n template = None\n for potential_template in self.templates:\n if np.allclose(potential_template[0], tile, 1, 1):\n template = potential_template\n break\n\n # No match, assume it is inaccessible\n if template is None:\n self.game_map[(tile_x, tile_y)] = self.TILES.INACCESSIBLE.value\n continue\n\n # By default, mark tile as inaccessible\n label = None\n\n # Mark as mineable\n if re.search(r'rock', template[1], re.M | re.I):\n label = self.TILES.MOUNTAIN.value\n elif re.search(r'door', template[1], re.M | re.I):\n label = self.TILES.DOOR.value\n elif re.search(r'gravel', template[1], re.M | re.I):\n label = self.TILES.GRAVEL.value\n elif re.search(r'shopkeeper', template[1], re.M | re.I):\n label = self.TILES.WEAPON_SHOPKEEPER.value\n elif re.search(r'blacksmith', template[1], re.M | re.I):\n label = self.TILES.BLACKSMITH.value\n elif re.search(r'guard', template[1], re.M | re.I):\n label = self.TILES.INACCESSIBLE.value\n elif re.search(r'inaccessible', template[1], re.M | re.I):\n label = self.TILES.INACCESSIBLE.value\n elif re.search(r'accessible', template[1], re.M | re.I):\n label = self.TILES.ACCESSIBLE.value\n\n # Calculate coordinates of tile in the map relative to the player\n self.game_map[(tile_x, tile_y)] = label\n\n # Go through all tiles in the gameplay region to find the mountains\n for i, j in zip(*np.where(nearby == self.TILES.MOUNTAIN.value)):\n # Get the tile to the left of the mountain\n tile_left = nearby[(i-1, j)]\n\n # Only allow mountains to be minable if they are beside gravel\n if not tile_left == self.TILES.GRAVEL.value:\n nearby[(i, j)] = self.TILES.INACCESSIBLE.value\n\n # Save the game map to disk\n np.savetxt('map.txt', self.game_map, fmt='%d')",
"def click(self, event):\n x, y = self.canvas.invert([event.x, event.y])\n i, j = int(floor(x)), int(floor(y))\n patch = self.get_cell(i, j)\n if patch and patch.state == \"green\":\n cluster = self.get_cluster(patch)\n self.show_cluster(cluster)",
"def open_tile(self, y, x):\n # Find the letter index and convert into a y-coordinate.\n # Checks if it is a mine\n if [y, x] in self.mine_locations:\n # explode\n self.show_answer_board([y, x])\n print \"Boomz.\"\n return Minesweeper.IS_A_BOMB\n else:\n # strip(?)tease to the user (oh damn sexy numbers)\n self.tease_user(y, x)\n return Minesweeper.NOT_A_BOMB",
"def click(self, X, Y):\n img = self.tower_imgs\n if self.x - img.get_width() // 2 + self.width >= X >= self.x - img.get_width() // 2:\n if self.y + self.height - img.get_height() // 2 >= Y >= self.y - img.get_height() // 2:\n return True\n return False",
"def drawScreen(self, player, AI, maze):\n self.showScreen(maze, self.bananaIcon)\n player.drawCreature(self.screen, self.TILE_SIZE)\n AI.drawCreature(self.screen, self.TILE_SIZE)\n if self.popup:\n self.ItemPickedUpPopUp()\n pygame.display.update()",
"def tile_clicked(position):\n\n # retrieve tile index\n for i in range(0, len(BOARD)):\n for j in range(0, len(BOARD[i])):\n if BOARD[i][j].collidepoint(position):\n return [i, j]\n \n return False",
"def HitTest(self, point, theCtrl, flags=0, level=0):\r\n \r\n # for a hidden root node, don't evaluate it, but do evaluate children\r\n if not (level == 0 and theCtrl.HasAGWFlag(TR_HIDE_ROOT)):\r\n \r\n # evaluate the item\r\n h = theCtrl.GetLineHeight(self)\r\n \r\n if point.y > self._y and point.y < self._y + h:\r\n \r\n y_mid = self._y + h/2\r\n\r\n if point.y < y_mid:\r\n flags |= TREE_HITTEST_ONITEMUPPERPART\r\n else:\r\n flags |= TREE_HITTEST_ONITEMLOWERPART\r\n\r\n xCross = self._x - theCtrl.GetSpacing()\r\n\r\n if wx.Platform == \"__WXMAC__\":\r\n # according to the drawing code the triangels are drawn\r\n # at -4 , -4 from the position up to +10/+10 max\r\n if point.x > xCross-4 and point.x < xCross+10 and point.y > y_mid-4 and \\\r\n point.y < y_mid+10 and self.HasPlus() and theCtrl.HasButtons():\r\n\r\n flags |= TREE_HITTEST_ONITEMBUTTON\r\n return self, flags\r\n else:\r\n # 5 is the size of the plus sign\r\n if point.x > xCross-6 and point.x < xCross+6 and point.y > y_mid-6 and \\\r\n point.y < y_mid+6 and self.HasPlus() and theCtrl.HasButtons():\r\n\r\n flags |= TREE_HITTEST_ONITEMBUTTON\r\n return self, flags\r\n\r\n if point.x >= self._x and point.x <= self._x + self._width:\r\n\r\n image_w = -1\r\n wcheck = 0\r\n\r\n # assuming every image (normal and selected) has the same size!\r\n if self.GetImage() != _NO_IMAGE and theCtrl._imageListNormal:\r\n image_w, image_h = theCtrl._imageListNormal.GetSize(self.GetImage())\r\n\r\n if self.GetCheckedImage() is not None:\r\n wcheck, hcheck = theCtrl._imageListCheck.GetSize(self.GetCheckedImage())\r\n\r\n if wcheck and point.x <= self._x + wcheck + 1:\r\n flags |= TREE_HITTEST_ONITEMCHECKICON\r\n return self, flags\r\n\r\n if image_w != -1 and point.x <= self._x + wcheck + image_w + 1:\r\n flags |= TREE_HITTEST_ONITEMICON\r\n else:\r\n flags |= TREE_HITTEST_ONITEMLABEL\r\n\r\n return self, flags\r\n\r\n if point.x < self._x:\r\n if theCtrl.HasAGWFlag(TR_FULL_ROW_HIGHLIGHT):\r\n flags |= TREE_HITTEST_ONITEM\r\n else:\r\n flags |= TREE_HITTEST_ONITEMINDENT\r\n if point.x > self._x + self._width:\r\n if theCtrl.HasAGWFlag(TR_FULL_ROW_HIGHLIGHT):\r\n flags |= TREE_HITTEST_ONITEM\r\n else:\r\n flags |= TREE_HITTEST_ONITEMRIGHT\r\n \r\n return self, flags\r\n \r\n # if children are expanded, fall through to evaluate them\r\n if self._isCollapsed:\r\n return None, 0\r\n \r\n # evaluate children\r\n for child in self._children:\r\n res, flags = child.HitTest(point, theCtrl, flags, level + 1)\r\n if res != None:\r\n return res, flags\r\n\r\n return None, 0"
] | [
"0.74901944",
"0.63370013",
"0.6282459",
"0.60075176",
"0.5984413",
"0.58725184",
"0.5865513",
"0.5808913",
"0.57776076",
"0.57593286",
"0.57503843",
"0.57025725",
"0.5689865",
"0.5677591",
"0.5671587",
"0.56301147",
"0.5627339",
"0.5608963",
"0.5573536",
"0.55536336",
"0.5535506",
"0.5518618",
"0.5452951",
"0.5419526",
"0.5415975",
"0.53856486",
"0.53774154",
"0.5371551",
"0.5356196",
"0.5340003"
] | 0.67923003 | 1 |
If win == True, flags the unflagged mines otherwise it reveals the unrevealed mines and marks the incorrect flags | def revealBombs(self, win):
for row in self.tiles:
for tile in row:
tile.inPlay = False
if tile.isMine():
if win:
#flag non-flagged mines after winning
if not tile.isFlagged():
tile.configure(image=Tile.images[11])
self.numFlags += 1
else:
#show unexploded mines after losing
if not tile.isShown():
tile.configure(image=Tile.images[9])
#if incorrectly flagged, mark as such
elif tile.isFlagged():
tile.configure(image=Tile.images[12]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_revealed(self, i, j):\n if not self.revealed[i, j]:\n # If not revealed cell\n if self.mines_count[i, j] < 0:\n # If wrong guess, games is over\n self.wrong = ~self.mines & self.flags\n self.wrong[i, j] = True\n self.game_over()\n else:\n # If guess is correct\n self.revealed[i, j] = True\n if self.mines_count[i, j] == 0:\n # Recursively looks for contiguous cells without mines\n for _i, _j in self.get_ij_neighbors(i, j):\n if self.mines_count[_i, _j] >= 0 and not self.revealed[_i, _j]:\n self.flags[_i, _j] = False\n self.update_revealed(_i, _j)\n elif self.mines_count[i, j] > 0:\n # The line below only makes sense when it's in the middle of the\n # recursion. For instance, a cell is flagged, but it is part of a\n # big blob that's going to be revealed. The game doesn't punish\n # the player in this scenario. This behavior has been copied\n # from gnome-mines\n self.flags[i, j] = False\n # Reveals mine count\n self.mines_count_txt[i, j].set_visible(True)\n elif self.mines_count[i, j] == self.count_neighbor_flags(i, j):\n # If cell that's already revealed is clicked and the number of\n # neighboring flags is the same as the number of neighboring\n # mines, then the hidden neighbor cells are recursevely\n # revealed. Evidently, if any flag guess is wrong, the game is\n # over.\n for _i, _j in self.get_ij_neighbors(i, j):\n if not self.flags[_i, _j] and not self.revealed[_i, _j]:\n self.update_revealed(_i, _j)",
"def flag(self, i, j):\n # Does not allow starting a game with a flag\n if not self.is_game_over and self.is_initialized:\n if not self.revealed[i, j]:\n self.flags[i, j] = not self.flags[i, j]\n self.flags_pts.set_data(*np.where(self.flags)[::-1])\n self.title_txt.set_text('{}/{}'.format(np.count_nonzero(self.flags), self.n_mines))\n self.refresh_canvas()",
"def game_over(self, win=False):\n self.is_game_over = True\n\n if win:\n self.flags_pts.set_data(*np.where(self.mines)[::-1]) # shows mines marked with flags\n self.title_txt.set_text('You win! Press F2 to start a new game')\n else:\n self.wrong_img.set_data(self.wrong) # wrong guesses\n self.mines_pts = self.ax.plot(self.jj[self.mines & ~self.flags],\n self.ii[self.mines & ~self.flags],\n 'kX', ms=10) # shows mines\n self.title_txt.set_text('You lose! Press F2 to start a new game')\n\n self.refresh_canvas()",
"def check_win(game, pokemon_locations):\n #traverse game\n for i in range(len(game)):\n if game[i] == FLAG:#flagged\n if i not in pokemon_locations:\n return False\n elif game[i] == '~':\n return False\n return True",
"def check_win():\r\n for mark in markers:\r\n if loc[0] == mark and loc[1] == mark and loc[2] == mark:\r\n return True\r\n if loc[0] == mark and loc[3] == mark and loc[6] == mark:\r\n return True\r\n if loc[0] == mark and loc[4] == mark and loc[8] == mark:\r\n return True\r\n if loc[1] == mark and loc[4] == mark and loc[7] == mark:\r\n return True\r\n if loc[2] == mark and loc[4] == mark and loc[6] == mark:\r\n return True\r\n if loc[2] == mark and loc[5] == mark and loc[8] == mark:\r\n return True\r\n if loc[3] == mark and loc[4] == mark and loc[5] == mark:\r\n return True\r\n if loc[6] == mark and loc[7] == mark and loc[8] == mark:\r\n return True\r\n else:\r\n return False",
"def flag_cell(self, event):\n if self.mineboard.gamestate is None:\n x = (event.x-2) // CELLWIDTH\n y = (event.y-2) // CELLWIDTH\n self.mineboard.flag_cell(y, x)\n self.update_cells()\n mines_rem = self.mineboard.minecount - self.mineboard.flagcount\n # updates the mines_left label\n if mines_rem == 1:\n self.mines_left.set(f\"{mines_rem} mine left\")\n else:\n self.mines_left.set(f\"{mines_rem} mines left\")",
"def hint(self):\n\t\tif not self.game:\n\t\t\treturn\n\t\tzeros = [(x,y) for x in range(self.width) for y in range(self.height)\n\t\t if self.game.marks[x][y] == CLOSED and\n\t\t not self.game.mines[x][y] and\n\t\t self.game.count_neighbor_mines(x, y) == 0]\n\t\tif zeros:\n\t\t\tx, y = random.choice(zeros)\n\t\t\tself.game.reveal(x, y, True)\n\t\t\tself.draw_field()",
"def won(self):\n return self.mines_found == self.mines",
"def won(self):\n return self.mines_found == self.mines",
"def won(self):\n return self.mines_found == self.mines",
"def won(self):\n return self.mines_found == self.mines",
"def won(self):\n return self.mines_found == self.mines",
"def won(self):\n return self.mines_found == self.mines",
"def mark_mines(self, cells):\r\n for cell in cells:\r\n row, col = cell\r\n self.mine_field[row][col] = 'x'\r\n self.mines_left -= 1\r\n return",
"def play_best_guess(self, game):\n\n\n # create a list of cells\n cells = [game.board[i][j]\n for i in xrange(game.rows)\n for j in xrange(game.cols)]\n\n first_cell = cells[0]\n game.reveal_cell(first_cell.row, first_cell.col)\n\n # draw updated board and pause for a second\n game.draw_board()\n if PAUSE == True:\n time.sleep(1)\n\n\n total_flagged = 0\n while not game.lost_game and not game.won_game:\n\n # remember if we've made a move in the while loop\n # so we know whether to make a random move later on\n made_move = False\n\n # look through all revealed cells for any with a number of neighboring mines.\n # if the cell has the same number of unrevealed neighbors as the cell's\n # number of neighboring mines, all the unrevealed neighbors must be mines.\n revealed_numbered_cells = [c for c in cells if c.revealed and (not c.flagged) and (c.neighbors > 0)]\n while revealed_numbered_cells:\n cell = revealed_numbered_cells.pop()\n # cell may have been marked flagged after revealed_numbered_cells was compiled\n if not cell.flagged:\n neighbor_cells = ms.Minesweeper.get_neighbors(cell.row, cell.col, game.board)\n flagged_neighbors = [n for n in neighbor_cells if n.flagged]\n number_remaining_mines = cell.neighbors - len(flagged_neighbors)\n unknown_neighbors = [n for n in neighbor_cells if not n.flagged and not n.revealed]\n if number_remaining_mines > 0 and len(unknown_neighbors) == number_remaining_mines:\n # flag every neighbor\n for c in unknown_neighbors:\n if total_flagged < game.mines:\n total_flagged += 1\n game.flag_cell(c.row, c.col)\n if (game.test_did_win()):\n game.game_over()\n game.draw_board()\n if PAUSE == True:\n time.sleep(1)\n made_move = True\n\n # we may have won with the flag above so test whether we're still playing\n # before further calculations\n if not game.lost_game and not game.won_game:\n # loop through all unrevealed, unflagged cells and see if we know it's safe to reveal\n for c in cells:\n if not c.revealed and not c.flagged and self.is_cell_safe(c, game.board):\n game.reveal_cell(c.row, c.col)\n if (game.test_did_win()):\n game.game_over()\n game.draw_board()\n if PAUSE == True:\n time.sleep(1)\n made_move = True\n\n # assume we've made our best guesses and now have to guess randomly\n # this will prevent us from looping forever if no obvious moves are available\n if not made_move:\n unrevealed = [c for c in cells if not c.revealed and not c.flagged]\n if len(unrevealed) > 0:\n cell = random.choice(unrevealed)\n game.reveal_cell(cell.row, cell.col)\n if (game.test_did_win()):\n game.game_over()\n game.draw_board()\n if PAUSE == True:\n time.sleep(3)",
"def is_win(my_board):\n return np.count_nonzero(my_board == CLOSED) == NUM_MINES",
"def check_win(self):\n for pos in self.win_set:\n # s would be all 1 if all positions of a winning move is fulfilled\n # otherwise 1s and 0s\n s = set([self.grid[p] for p in pos])\n if len(s) == 1 and (0 not in s):\n return True\n return False",
"def toggle_flag(self, loc: tuple[int, int]) -> None:\n if self.game_over or self.field[loc].is_naked:\n return\n\n if self.field[loc].is_flagged:\n self.field[loc].un_flag()\n self.mines_left += 1\n else:\n self.field[loc].flag()\n self.mines_left -= 1\n\n if self.auto_solving.get():\n block = Block(self.field, loc)\n useful_neighbors = {neighbor for neighbor in block.naked_neighbors\n if Block(self.field, neighbor).unknown_neighbors}\n [self.hyper_queue.remove(cell) for cell in useful_neighbors]\n self.auto_queue.add_batch(useful_neighbors,\n emphasis=self.emphasis[\"add_batch\"],\n color=\"new_auto\")\n self._auto_spark()",
"def gameWon(self):\n \n wins = [ threeInARow( self.squares[0], self.squares[1], self.squares[2] ),\n threeInARow( self.squares[3], self.squares[4], self.squares[5] ),\n threeInARow( self.squares[6], self.squares[7], self.squares[8] ),\n threeInARow( self.squares[0], self.squares[3], self.squares[6] ),\n threeInARow( self.squares[1], self.squares[4], self.squares[7] ),\n threeInARow( self.squares[2], self.squares[5], self.squares[8] ),\n threeInARow( self.squares[0], self.squares[4], self.squares[8] ),\n threeInARow( self.squares[2], self.squares[4], self.squares[6] ) ]\n \n return any(wins)",
"def reveal_mines(self, row, col):\n for loc in self.mineboard.mine_places:\n if loc != [row, col]:\n i, j = loc[0], loc[1]\n if self.mineboard.gameboard[i][j] == 'F':\n continue\n self.canvas.delete(self.cells[i][j])\n self.canvas.create_image(\n 2+j*CELLWIDTH, 2+i*CELLWIDTH, image=MINE, anchor='nw')",
"def check_win(self):\n for pos in self.win_set:\n s = set([self.grid[p] for p in pos])\n if len(s) == 1 and (0 not in s):\n return True\n return False",
"def check_won (grid):\r\n w=False\r\n for row in range(4):\r\n for col in range(4):\r\n if grid[row][col]>=32:\r\n w=True\r\n break\r\n return w",
"def win_game(self):\n\n def horizontal_win():\n \"\"\"Return whether there is horizontal win\"\"\"\n\n for i in range(0, board_size):\n if set(self.board[i]) == set([o_symbol]) or set(self.board[i]) == set([x_symbol]):\n print \"horizontal win\"\n return True\n\n def vertical_win():\n \"\"\"Return whether there is vertical win\"\"\"\n\n vert_set = set()\n for i in range(0, board_size):\n for j in range(0, board_size):\n vert_set.add(self.board[j][i])\n if vert_set == set([o_symbol]) or vert_set == set([x_symbol]):\n print \"vertical win\"\n return True \n vert_set = set()\n\n def diagonal_win():\n \"\"\"Return whether there is diagonal win\"\"\"\n\n diagonal_set = set()\n for i in range(0, board_size):\n diagonal_set.add(self.board[i][i]) \n\n if diagonal_set == set([o_symbol]) or diagonal_set == set([x_symbol]):\n print \"diagonal win 1\"\n return True\n \n diagonal_set = set()\n for i in range(0, board_size):\n diagonal_set.add(self.board[i][board_size - 1 - i])\n\n if diagonal_set == set([o_symbol]) or diagonal_set == set([x_symbol]):\n print \"diagonal win 2\"\n return True\n\n if horizontal_win() or vertical_win() or diagonal_win():\n print \"You have won.\"\n return True",
"def win():\r\n\r\n\tglobal turn, tile1, tile2, tile3, tile4, tile5, tile6, tile7, tile8, tile9, move1, move2, player1, player2\r\n\r\n\twin1 = tile1==tile2==tile3==1 or tile1==tile2==tile3==2\r\n\twin2 = tile4==tile5==tile6==1 or tile4==tile5==tile6==2\r\n\twin3 = tile7==tile8==tile9==1 or tile7==tile8==tile9==2\r\n\twin4 = tile1==tile4==tile7==1 or tile1==tile4==tile7==2\r\n\twin5 = tile2==tile5==tile8==1 or tile2==tile5==tile8==2\r\n\twin6 = tile3==tile6==tile9==1 or tile3==tile6==tile9==2\r\n\twin7 = tile1==tile5==tile9==1 or tile1==tile5==tile9==2\r\n\twin8 = tile3==tile5==tile7==1 or tile3==tile5==tile7==2\r\n\r\n\twin = win1 or win2 or win3 or win4 or win5 or win6 or win7 or win8\r\n\treturn win",
"def toggleFlag(self, event): \n clicked = event.widget\n if clicked.isInPlay(): self.changeSmile(1)\n value = clicked.setFlag()\n for adjTile in self.getAdjacentTiles(clicked.row, clicked.col):\n adjTile.numFlags += value\n self.numFlags += value\n self.flagLabel.configure(text=\"Flags: \"+str(self.numFlags))",
"def check_win(self):\n return UNEXPOSED not in self.get_game() and self.get_game().count(FLAG) == len(self.get_pokemon_location)",
"def is_won(self):\n for tile in self:\n if not tile.is_mine and tile.visibility != 1:\n return False\n return True",
"def mark_mine(self, cell):\n if cell in self.cells:\n self.mines.add(cell)\n self.cells.remove(cell)\n self.count -= 1",
"def game_on(self):\n doc = self.documentation\n return (self.draw.accepted or doc[len(doc)-1].accepted) and (self.board.stones_set < self.board.max_nr_stones) and (self.board.score[opponent(self.draw.player)] > 0)",
"def reveal(self, i, j):\n if not self.is_game_over:\n if not self.flags[i, j]:\n # Game is initialized after first click in order to prevent\n # the first click being straight over a mine\n if not self.is_initialized:\n self.initialize(i, j)\n\n self.update_revealed(i, j)\n self.revealed_img.set_data(self.revealed)\n self.flags_pts.set_data(*np.where(self.flags)[::-1])\n self.refresh_canvas()\n\n if np.count_nonzero(self.revealed) == self.n_not_mines:\n self.game_over(True)"
] | [
"0.6455586",
"0.6382103",
"0.6161107",
"0.6144557",
"0.6141855",
"0.6128536",
"0.59943545",
"0.59508157",
"0.59508157",
"0.59508157",
"0.59508157",
"0.59508157",
"0.59508157",
"0.59415877",
"0.5753732",
"0.57524306",
"0.5717041",
"0.5663946",
"0.5627639",
"0.5537732",
"0.553722",
"0.55174124",
"0.54986787",
"0.5486317",
"0.54860157",
"0.5485424",
"0.5481574",
"0.5450109",
"0.5432576",
"0.5411198"
] | 0.70450246 | 0 |
Calculates game duration based on self.startTime and time.time() calls revealBomb() to reveal/flag remaining bombs prints the given message and elapsed time in a readable format in a pop up messagebox | def endGame(self, msg, win):
elapsedTime = time.time() - self.startTime
readableTime = str(int((elapsedTime / 60) / 60))
readableTime += ":" + str(int(elapsedTime / 60))
readableTime += ":" + str(elapsedTime % 60)[0:6]
msg +="Time: " + readableTime
self.revealBombs(win)
self.flagLabel.configure(text="Flags: "+str(self.numFlags))
messagebox.showinfo('Game Over', msg) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def display_time(self, time):\n pygame.draw.rect(self.screen, self.font_fgcolor, self.time_rect)\n self.screen.blit(self.small_font.render(\"Elapsed time: %.0f s\" % time, -1, (0, 0, 0)), (5, 720))\n pygame.display.update(self.time_rect)\n return",
"def timer_update(self):\n if self.mineboard.gamestate is not None:\n return\n time_so_far = round(time.time()-self.start_time)\n if time_so_far == 1:\n self.now.set(f\"Time so far: {time_so_far} second\")\n else:\n self.now.set(f\"Time so far: {time_so_far} seconds\")\n self.after(1000, self.timer_update) # calls this function every second",
"def message(self):\n if self.display_time:\n return \"Time: {}\".format(int(self.physics_engine.time_since_start()%self.time_cycle_secs))",
"def time(self):\n\n self.timing = True\n self.scramble()\n\n self.disp = False",
"def GAME_TIME_ADVANCE(dt):",
"def print_stats(self):\n self.clear_top()\n font2 = pygame.font.SysFont('comicsans', 40, True) # creates new font object\n minutes, seconds = divmod(self.current_time - self.start, 60) # calculation\n minutes, seconds = round(minutes), round(seconds) # rounds numbers\n if seconds == 60:\n seconds = 0\n # Draw text onto the screen\n text = font2.render('Attempts: ' + str(self.recursions), 1, (0, 0, 0))\n if len(str(seconds)) == 1:\n seconds = '0' + str(seconds)\n text2 = font2.render(' Time: 0{}:{}'.format(minutes, seconds),\n 1, (0, 0, 0))\n self.screen.blit(text, (20, 20))\n self.screen.blit(text2, (480, 20))\n pygame.display.update((0, 0, 720, 800))",
"def check_time(start, message):\n\n logger.info(\" {} -> took {}\".format(message, clock() - start))",
"def print_time_elapsed(self):\r\n stop_time = time.time()\r\n elapsed_time = stop_time - self.start_time\r\n print(f\"-- time elapsed: {elapsed_time:.5f} s\", flush=True)",
"def run_timer():\n \n start_time = time.time()\n print(start_time)\n stopper = input(\"Press enter to stop\")\n end_time = time.time()\n print(\"You have finished collecting the blocks!\")\n duration = int(end_time - start_time)\n if duration > 25:\n print(\"You were too slow collecting the blocks, better luck next time\")\n else: \n print(\"Good job speedy, you collected all the blocks before time ran out!\")",
"def __get_elapsed__(self):\n elapsed = (self.__end_time - self.__start_time)\n unit = \"seconds\"\n if elapsed >= 3600:\n unit = \"minutes\"\n hours = elapsed / 3600\n minutes = hours % 60\n hours = floor(hours)\n print(self.name, \"took\", str(hours), \"hours and\", \"{0:.2f}\".format(minutes), unit, \"to complete\")\n elif elapsed >= 60:\n minutes = floor(elapsed / 60)\n seconds = elapsed % 60\n print(self.name, \"took\", str(minutes), \"minutes and\", \"{0:.2f}\".format(seconds), unit, \"to complete\")\n else:\n print(self.name, \"took\", \"{0:.2f}\".format(elapsed), unit, \"to complete\")",
"def remaining_ms():",
"async def _time(self, ctx):\n try:\n await self.bot.say('@{0}:'.format(ctx.message.author.name) + '\\nDate is: **' + time.strftime(\"%A, %B %d, %Y\") + '**' + '\\nTime is: **' + time.strftime(\"%I:%M:%S %p\") + '**')\n except Exception as e:\n await self.bot.say(code.format(type(e).__name__ + ': ' + str(e)))",
"def main(self):\n\t\t\tfactor = 0.0\n\t\t\tif self.time.time() > (self.startTime + self.showTime + self.displayTime + self.hideTime):\n\t\t\t\tself.end()\n\t\t\telif self.time.time() > (self.startTime + self.showTime + self.displayTime):\n\t\t\t\t#Hide animation will occur here.\n\t\t\t\tfactor = (self.hideTime - (self.time.time() - (self.startTime + self.showTime + self.displayTime))) / self.hideTime\n\t\t\t\tself.frame.colors = [(0.2, 0.2, 0.2, (0.8 * factor)) for i in range(4)]\n\t\t\t\tself.frame._update_position(self.frame._base_size, [0.7, (1.0 - (0.2 * factor))])\n\t\t\t\tself.display._update_position(self.display._base_size, self.display._base_pos)\n\t\t\t\tself.display.text = self.text\n\t\t\t\n\t\t\telif self.time.time() > (self.startTime + self.showTime):\n\t\t\t\tif self.frame._base_pos != [0.7, 0.8]:\n\t\t\t\t\tself.frame._update_position(self.frame._base_size, [0.7, 0.8])\n\t\t\t\t\tself.display._update_position(self.display._base_size, self.display._base_pos)\n\t\t\t\t\tself.display.text = self.text\n\t\t\t\n\t\t\telse:\n\t\t\t\t#play show animation.\n\t\t\t\tfactor = (self.time.time() - self.startTime) / self.showTime\n\t\t\t\tself.frame.colors = [(0.2, 0.2, 0.2, (0.8 * factor)) for i in range(4)]\n\t\t\t\tself.frame._update_position(self.frame._base_size, [0.7, (1.0 - (0.2 * factor))])\n\t\t\t\tself.display._update_position(self.display._base_size, self.display._base_pos)\n\t\t\t\tself.display.text = self.text",
"def update(self):\n \n # If the countdown timer has not yet hit 0\n if self.__time > 0:\n \n # Displays the grace period time in seconds\n countdown_message = str(self.__time)\n self.image = self.__font1.render(countdown_message, 1, (255, 255, 255))\n self.rect = self.image.get_rect()\n \n # Message is positioned in the center of the screen near the top\n self.rect.centerx, self.rect.centery = 400, 30\n \n # If the countdown timer has hit 0\n elif self.__time <= 0:\n \n # Notifies the Player that the Minotaur has escaped from his contained area\n release_message = \"he has been released\"\n self.image = self.__font2.render(release_message, 1, (255, 255, 255))\n self.rect = self.image.get_rect()\n \n # Message is positioned in the center of the screen near the top\n self.rect.centerx, self.rect.centery = 400, 30",
"def startgame(BNE):\n nextstep = time.time() + 1 # value to make enemies move automatically\n flag = 0 # bomb is not there on the board\n B.score = 0\n while True:\n inp = input_to()\n if inp == 'q':\n break\n if inp == 'w':\n BNE = B.moveup(BNE, 1)\n os.system('clear') # clears screen or refreshes\n printboard(BNE)\n if inp == 's':\n BNE = B.movedown(BNE, 1)\n os.system('clear')\n printboard(BNE)\n if inp == 'a':\n BNE = B.moveleft(BNE, 1)\n os.system('clear')\n printboard(BNE)\n if inp == 'd':\n BNE = B.moveright(BNE, 1)\n os.system('clear')\n printboard(BNE)\n if flag != 1: # bomb is not placed\n if inp == 'b':\n boom = Bomb(B.x_pos, B.y_pos, 1)\n if flag == 0:\n bomb_start = time.time()\n BNE = boom.placebomb(BNE)\n flag = 1\n os.system('clear')\n printboard(BNE)\n if flag == 1: # bomb is there on board\n if time.time() - bomb_start > 3: # if it has been more than 3 sec\n BNE = boom.explode(BNE, EMB, B) # after placing bomb\n flag = 0\n os.system('clear')\n printboard(BNE)\n time.sleep(0.3) # shows board with e's for 0.3 sec\n os.system('clear')\n printboard1()\n if time.time() >= nextstep:\n nextstep = nextstep + 1\n for i in EMB.enemies:\n BNE = i.randommove(BNE, B)\n os.system('clear')\n printboard(BNE)",
"def _countdown(self):\n self._game.deleteBall()\n self._game.draw()\n # reset paddle speed\n self._game.updatePaddle(self.input)\n if ZERO_SECS <= self.time < ONE_SEC:\n self._mssg = (GLabel(text='3', x=GAME_WIDTH/2, y=GAME_HEIGHT/2,\n font_size=COUNTDOWN_FONT_SIZE))\n if ONE_SEC <= self.time < TWO_SECS:\n self._mssg = (GLabel(text='2', x=GAME_WIDTH/2, y=GAME_HEIGHT/2,\n font_size=COUNTDOWN_FONT_SIZE))\n if TWO_SECS <= self.time < THREE_SECS:\n self._mssg = (GLabel(text='1', x=GAME_WIDTH/2, y=GAME_HEIGHT/2,\n font_size=COUNTDOWN_FONT_SIZE))\n if self.time >= THREE_SECS:\n self._mssg = None\n self._game.serveBall()\n self._state = STATE_ACTIVE\n self._points_mssg = (GLabel(text='Points: 0', x=POINTS_X, y=POINTS_Y, font_size=24))",
"async def time(self, ctx):\n global time_msg\n if timer > 0:\n if time_msg:\n await time_msg.delete()\n time_msg = None\n minutes = timer // 60\n seconds = timer % 60 if timer % 60 > 9 else '0' + str(timer % 60)\n time_msg = await ctx.send(embed=make_time_embed('work'))\n else:\n # await ctx.send(\"No timer active.\")\n await send_msg(ctx, \"❌\", \"No Timer Active\", color='error')\n await ctx.message.delete()",
"def send_time_length_info(self):\n min_rounds = self.min_num_turns\n wiz_time = sec_to_min_pretty(self.wizard_time_out)\n app_time = sec_to_min_pretty(self.apprentice_time_out)\n for agent in self.agents:\n message = f'This conversation continues for at least {min_rounds} rounds.\\n'\n t = wiz_time if _is_wiz(agent) else app_time\n message += (\n f'In your turn, please send your message within {t} minutes. '\n 'Otherwise you may be disqualified. '\n )\n if not _is_wiz(agent):\n message += (\n f'Note that you might have to wait up to {wiz_time} '\n 'mintes to receive a response from the other person.'\n )\n agent.observe(\n {\n 'id': constants.COORDINATOR_AGENT,\n 'text': message,\n 'episode_done': False,\n }\n )",
"def clock( current_time ):\n global D\n number_of_seconds_since_start = int(current_time - D.start_time)\n if D.last_time_printed < number_of_seconds_since_start:\n print \"[Brains] [State:\", D.STATE, \"] time is\", \\\n number_of_seconds_since_start, \"seconds since starting...\"\n D.last_time_printed = number_of_seconds_since_start",
"def printTime(self, beginTime, endTime, text):\n methodName = \"printTime\"\n elapsedTime = (endTime - beginTime)/1000\n etm, ets = divmod(elapsedTime,60)\n eth, etm = divmod(etm,60) \n TR.info(methodName,\"Elapsed time (hh:mm:ss): %d:%02d:%02d for %s\" % (eth,etm,ets,text))",
"def finish(self, secs=False):\n self.timeFinish = pygame.time.get_ticks()()\n elapsedTime = self.timeFinish - self.timeStart\n if secs:\n return elapsedTime / 1000\n else:\n return elapsedTime",
"def time(self) -> float:\n return self.state.game_loop / 22.4 # / (1/1.4) * (1/16)",
"def on_draw(self):\n\n # Start the render. This must happen before any drawing\n # commands. We do NOT need an stop render command.\n arcade.start_render()\n\n # Calculate minutes\n minutes = int(self.total_time) // 60\n\n # Calculate seconds by using a modulus (remainder)\n seconds = int(self.total_time) % 60\n\n # Figure out our output\n output = \"Time minutes:\" + format(minutes) + \" seconds:\" + format(seconds) + \" \"\n\n # See if the output is the same as last frame. If not, generate a new\n # text object.\n if not self.timer_text or self.timer_text.text != output:\n self.timer_text = arcade.create_text(output, arcade.color.BLACK, 30)\n\n # Output the timer text.\n arcade.render_text(self.timer_text, 300, 300)",
"def __get_elapsed__(self):\n elapsed = (self.__end_time - self.__start_time)\n unit = \"seconds\"\n if elapsed >= 3600:\n unit = \"minutes\"\n hours = elapsed / 3600\n minutes = hours % 60\n hours = floor(hours)\n print(\"{} {} took {} hours and {:.2f} {} to complete\".format(self.__get_timestamp__(), self.name, hours, minutes, unit))\n elif elapsed >= 60:\n minutes = floor(elapsed / 60)\n seconds = elapsed % 60\n print(\"{} {} took {} minutes and {:.2f} {} to complete\".format(self.__get_timestamp__(), self.name, minutes, seconds, unit))\n else:\n print(\"{} {} took {:.2f} {} to complete\".format(self.__get_timestamp__(), self.name, elapsed, unit))",
"def check_timer():\n end = time.time()\n time_elapsed = end - target_time[0]\n durationMSG = fg.cyan + f\"Scans Completed for {args.target} in: \" + fg.rs\n print(durationMSG, display_time(time_elapsed))",
"def time_thread(self):\n while self.time > 0:\n t.sleep(1)\n self.time -= 1\n self.end_round(\"Time is up\")",
"def time(self, start_time):\n \n TIME_LIST.append((time.time() - start_time))\n print(\"--- %s seconds ---\" % (time.time() - start_time))",
"def stopwatch(message):\n t0 = time.time()\n try:\n yield\n finally:\n t1 = time.time()\n print('Total elapsed time for %s: %f s' % (message, t1 - t0))",
"def stopwatch(message):\r\n t0 = time.time()\r\n try:\r\n yield\r\n finally:\r\n t1 = time.time()\r\n print('Total elapsed time for %s: %.3f' % (message, t1 - t0))",
"def time(self):\n return pygame.time.get_ticks() - self.start_time"
] | [
"0.63711405",
"0.62700844",
"0.62045985",
"0.6160741",
"0.6118818",
"0.606114",
"0.600875",
"0.59369403",
"0.5876301",
"0.5856027",
"0.5779341",
"0.5773944",
"0.5736033",
"0.5725558",
"0.5696176",
"0.56574124",
"0.56115764",
"0.56063986",
"0.55996895",
"0.5567945",
"0.55638397",
"0.55551904",
"0.55547893",
"0.5549033",
"0.5530699",
"0.54944825",
"0.54713017",
"0.54693174",
"0.5462217",
"0.54615754"
] | 0.66032505 | 0 |
unchecks menu custom game checkbox appropriately and resizes self.myBoard | def resize(self, rows, cols, mines):
if self.menuVar.get() != 4: self.checkVar.set(0)
self.myBoard.resize(rows, cols, mines) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self, rows, cols, mines):\n tk.Tk.__init__(self)\n \n #load all needed images into Tile.images\n for i in range(14):\n Tile.images.append(tk.PhotoImage(file = \"images/tile-\"+str(i)+\".gif\"))\n \n self.menu = tk.Menu(self)\n self.configure(menu=self.menu)\n self.title(\"Minesweeper\")\n self.myBoard = Board(rows, cols, mines, self)\n self.menuVar = tk.IntVar(self)\n self.menuVar.set(1)\n self.checkVar = tk.IntVar(self)\n self.checkVar.set(1)\n self.gamemenu = tk.Menu(self.menu, tearoff = False)\n self.menu.add_cascade(label=\"Game\", menu=self.gamemenu)\n self.gamemenu.add_command(label=\"New Game\", command=self.myBoard.replay)\n self.gamemenu.add_separator()\n self.gamemenu.add_radiobutton(variable = self.menuVar, value=1, label=\"Beginner\", command=lambda: self.resize(8,8,10))\n self.gamemenu.add_radiobutton(variable = self.menuVar, value=2, label=\"Intermediate\", command=lambda: self.resize(16,16,40))\n self.gamemenu.add_radiobutton(variable = self.menuVar, value=3, label=\"Expert\", command=lambda: self.resize(16,30,99))\n self.gamemenu.add_separator()\n self.gamemenu.add_checkbutton(variable = self.checkVar, onvalue=4, offvalue=0, label=\"Custom\", command= self.options)\n self.gamemenu.add_separator()\n self.gamemenu.add_command(label=\"Exit\", command=self.exitGame)\n windowWidth = str(20*cols+40)\n windowHeight = str(20*rows+60)\n self.protocol(\"WM_DELETE_WINDOW\", self.exitGame)\n self.minsize(windowWidth, windowHeight)\n self.maxsize(windowWidth, windowHeight)\n self.geometry(windowWidth+'x'+windowHeight)\n self.mainloop()",
"def remove_checks(self):\n for checkbox in self.checkboxes:\n checkbox.setChecked(False)\n mw.checked_stats = []\n mw.bonuses = {'Charisma': 2}",
"def updateCheck(self):\n if (self.checkStatus1.get() == True):\n self.master.configure(background='#f5f5f0')\n self.checkStatus2.set(False)\n self.checkStatus3.set(False)\n\n elif (self.checkStatus2.get() == True):\n self.master.configure(background='#ff99ff')\n self.checkStatus3.set(False)\n self.checkStatus1.set(False)\n elif (self.checkStatus3.get() == True):\n self.master.configure(background='#00ff00')\n self.checkStatus1.set(False)\n self.checkStatus2.set(False)",
"def erase(self):\n\tself.state={}\n\tself.display(update_board=0)",
"def main_board_maintenance(self,x_cor,y_cor):\r\n\t\r\n\t\tfor event in pygame.event.get(): \r\n\r\n\t\t\tif event.type == pygame.QUIT:\r\n\t\t\t\tpygame.display.quit()\r\n\t\t\t\tpygame.quit()\r\n\t\t\t\tquit()\r\n\t\t\t\r\n\t\t\tif event.type == pygame.MOUSEBUTTONDOWN:\r\n\r\n\t\t\t\tx_adjusted,y_adjusted = Helping_Class.convert_coordinate(x_cor,y_cor,from_where ='board')\r\n\t\t\t\t#print(x_adjusted/80,y_adjusted/80)\r\n\r\n\t\t\t\tif self.selected_from_selection_bar :\r\n\t\t\t\t\t#print('inside selection bar selection option')\r\n\t\t\t\t\tx_adjusted,y_adjusted = Helping_Class.convert_coordinate(x_cor,y_cor,from_where ='board')\r\n\t\t\t\t\ttemp_game_state = CP.game_data()\r\n\t\t\t\t\ttemp_game_state = copy.deepcopy(self.game_state)\r\n\t\t\t\t\tdata_convert = CP.Conversion_of_postion_name(self.selected_piece,Helping_Class.selection_bar_reverse_mapping[self.selected_piece] ,(x_adjusted,y_adjusted))\r\n\t\t\t\t\ttemp_game_state.update(data_convert.piece, int(data_convert.i_pos_ani()), int(data_convert.f_pos_ani()))\r\n\t\t\t\t\ttemp_game_state.active_color = not temp_game_state.active_color\r\n\t\t\t\t\tfen = temp_game_state.generate_fen()\r\n\t\t\t\t\tboard2 = chess.Board(fen=fen)\r\n\t\t\t\t\tprint(board2)\r\n\t\t\t\t\tprint(fen)\r\n\t\t\t\t\tprint('board2.is_check()',board2.is_check())\r\n\t\t\t\t\t\r\n\t\t\t\t\t#now we need to place the piece on board\r\n\t\t\t\t\tif self.game_state.board[Helping_Class.convert_my_coordinate_to_anirudh(x_adjusted,y_adjusted)] == None:\r\n\t\t\t\t\t\t#print(self.selected_position)\r\n\t\t\t\t\t\tif not board2.is_check():\r\n\t\t\t\t\t\t\tif self._check_valid_position_(x_adjusted,y_adjusted):\r\n\t\t\t\t\t\t\t\tself.place_piece_on_board_from_selection_bar(x_adjusted,y_adjusted)\r\n\t\t\t\t\t\t\t\t#rajan's\r\n\t\t\t\t\t\t\t\t#print(self.selected_piece)\r\n\t\t\t\t\t\t\t\t#print(self.selected_position)\r\n\t\t\t\t\t\t\t\tdata_convert = CP.Conversion_of_postion_name(self.selected_piece,self.selected_position ,(x_adjusted,y_adjusted))\r\n\t\t\t\t\t\t\t\tself.game_state.update(data_convert.piece, int(data_convert.i_pos_ani()), int(data_convert.f_pos_ani()))\r\n\t\t\t\t\t\t\t\tself.selected_piece = None\r\n\t\t\t\t\t\t\t\tself.selected_position = None\r\n\r\n\t\t\t\t\t\t\t\tself.computer_turn =True\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\tpass\r\n\t\t\t\t\t#board position is filled then nothing to do\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\t#if his piece change selection\r\n\t\t\t\t\t\tself.selected_from_selection_bar =False\r\n\t\t\t\t\t\tself.selected_from_board = True\r\n\t\t\t\t\t\tself.selected_piece = self.anirudh_2_pritish[self.game_state.board[Helping_Class.convert_my_coordinate_to_anirudh(x_adjusted,y_adjusted)]]\r\n\t\t\t\t\t\tself.selected_position = (x_adjusted,y_adjusted)\r\n\r\n\r\n\t\t\t\telif self.selected_from_board:\r\n\t\t\t\t\t#print('inside selection bar board option')\r\n\t\t\t\t\tx_adjusted,y_adjusted = Helping_Class.convert_coordinate(x_cor,y_cor,from_where ='board')\r\n\t\t\t\t\t\r\n\t\t\t\t\tomega = True\r\n\t\t\t\t\tif self.selected_position:\r\n\t\t\t\t\t\tif self.selected_position == (x_adjusted,y_adjusted):\r\n\t\t\t\t\t\t\tomega = False\r\n\t\t\t\t\t#print(self.selected_position,(x_adjusted,y_adjusted))\r\n\t\t\t\t\tif omega:\r\n\t\t\t\t\t\tmove = self._check_valid_move_(x_adjusted,y_adjusted)\r\n\t\t\t\t\t\tprint(move)\r\n\t\t\t\t\tif omega:\r\n\t\t\t\t\t\tif move:\r\n\t\t\t\t\t\t\tself.computer_turn = True\r\n\t\t\t\t\t\t\t#if move contains x then we have update state of captured piece\r\n\t\t\t\t\t\t\t#else just update selected piece\r\n\t\t\t\t\t\t\t#print(\"correct move\")\r\n\t\t\t\t\t\t\tself.capture_piece_update_board_or_place_piece(move,x_adjusted,y_adjusted)\r\n\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t#select the piece\r\n\t\t\t\t\t\t\tif self.game_state.board[Helping_Class.convert_my_coordinate_to_anirudh(x_adjusted,y_adjusted)]:\r\n\t\t\t\t\t\t\t\tself.selected_piece = self.anirudh_2_pritish[self.game_state.board[Helping_Class.convert_my_coordinate_to_anirudh(x_adjusted,y_adjusted)]]\r\n\t\t\t\t\t\t\t\tself.selected_position = (x_adjusted,y_adjusted)\r\n\t\t\t\t\t\t\t\tself.selected_from_board = True\r\n\t\t\t\t\t\r\n\t\t\t\telse:\r\n\t\t\t\t\t\r\n\t\t\t\t\tx_adjusted,y_adjusted = Helping_Class.convert_coordinate(x_cor,y_cor,from_where ='board')\r\n\t\t\t\t\tif self.game_state.board[Helping_Class.convert_my_coordinate_to_anirudh(x_adjusted,y_adjusted)]:\r\n\t\t\t\t\t\t#select the piece\r\n\t\t\t\t\t\tif self.whose_move == 'white':\r\n\t\t\t\t\t\t\tif 'W' in self.anirudh_2_pritish[self.game_state.board[Helping_Class.convert_my_coordinate_to_anirudh(x_adjusted,y_adjusted)]]:\r\n\t\t\t\t\t\t\t\tself.selected_piece = self.anirudh_2_pritish[self.game_state.board[Helping_Class.convert_my_coordinate_to_anirudh(x_adjusted,y_adjusted)]]\r\n\t\t\t\t\t\t\t\tself.selected_from_board = True\r\n\t\t\t\t\t\t\t\tself.selected_position = (x_adjusted,y_adjusted)\r\n\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t#nothing to do\r\n\t\t\t\t\t\t\t\tpass\r\n\t\t\t\t\t\telif self.whose_move == 'black':\r\n\t\t\t\t\t\t\tif 'B' in self.anirudh_2_pritish[self.game_state.board[Helping_Class.convert_my_coordinate_to_anirudh(x_adjusted,y_adjusted)]]:\r\n\t\t\t\t\t\t\t\tself.selected_piece = self.anirudh_2_pritish[self.game_state.board[Helping_Class.convert_my_coordinate_to_anirudh(x_adjusted,y_adjusted)]]\r\n\t\t\t\t\t\t\t\tself.selected_from_board = True\r\n\t\t\t\t\t\t\t\tself.selected_position = (x_adjusted,y_adjusted)\r\n\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t#nothing to do\r\n\t\t\t\t\t\t\t\tpass\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\t#it is none means nothing is their so nothing to do\r\n\t\t\t\t\t\tpass\r\n\t\t\t\t\t\r\n\t\t\t\r\n\r\n\t\t\telse:\r\n\t\t\t\t#print(\"not_pressed\")\r\n\t\t\t\tpass",
"def clear_board(self, event):\n for row in range(self._dim):\n for col in range(self._dim):\n self._board[row][col] = False\n self.model_refresh()",
"def uncheckMyWorkBox(self):\n self.util.waitForElementToBePresent(self.element.my_work_checkbox)\n checkbox = self.util.driver.find_element_by_xpath(self.element.my_work_checkbox)\n if checkbox.is_selected():\n self.util.clickOn(self.element.my_work_checkbox)",
"def resize(self, rows, cols, minecount, event=None):\n self.clearFrame()\n #reset relevant instance variables\n self.rows = rows\n self.cols = cols\n self.numMines = minecount\n self.numChecked = 0\n self.numFlags = 0\n self.minesArmed = False\n self.startTime = None\n\n #re-add all elements on the board\n self.setUpFrame()\n self.addTiles(rows,cols,minecount)\n\n #resize window to fit the new board size\n windowWidth = str(20*cols+40)\n windowHeight = str(20*rows+60)\n self.parent.minsize(windowWidth, windowHeight)\n self.parent.maxsize(windowWidth, windowHeight)\n self.parent.geometry(windowWidth+'x'+windowHeight)",
"def onClickCheckbutton(self):\r\n self.app.unbind()\r\n mask = []\r\n for val in self.intvars:\r\n mask.append(val.get())\r\n # Recreate fNIRS Channels with channel mask\r\n self.app.reconfigureChannels(self.app.dataPath,mask)\r\n self.app.bindHotkeys()",
"def reset_board(self):\n self.board = [0, 0, 0, 0, 0, 0, 0, 0, 0]\n self.turn = 0\n\n self.change_button_img_to_null()\n\n #self.Score_Label.grid(row=0,column=1, ipadx=32)\n\n self.player_highlight()\n self.change_button_state('normal')\n self.update_score()",
"def deSelect(self):\n for i in range(len(self.__controlsChecks)):\n self.__controlsChecks[i].setChecked(False)",
"def options(self):\n self.checkVar.set(self.menuVar.get())\n #create window then set window size & title\n self.optionsWindow = tk.Toplevel(self)\n self.optionsWindow.grab_set()\n self.optionsWindow.title(\"Options\")\n windowWidth = \"225\"\n windowHeight = \"175\"\n self.optionsWindow.minsize(windowWidth, windowHeight)\n self.optionsWindow.maxsize(windowWidth, windowHeight)\n self.optionsWindow.geometry(windowWidth+'x'+windowHeight)\n \n #creates the frame and self.optionVar\n frame = tk.Frame(self.optionsWindow)\n frame.pack()\n self.optionVar = tk.IntVar(self)\n self.optionVar.set(self.menuVar.get())\n\n #add the choices as radio buttons to the frame\n choices = [\n (\"Beginner\"+\"\\n8 X 8\"+\"\\n10 Mines\", 1),\n (\"Intermediate\"+\"\\n16 X 16\"+\"\\n40 Mines\", 2),\n (\"Expert\"+\"\\n16 X 30\"+\"\\n99 Mines\", 3),\n (\"Custom\", 4)\n ]\n for text, value in choices:\n button = tk.Radiobutton(frame, text=text, value=value, variable=self.optionVar, justify=\"left\", command=self.entryToggle)\n row, col, colspan = value-1, 0, 1\n if value is 4:row, col, colspan = 0, 1, 2\n button.grid(row=row, column=col, columnspan=colspan, sticky=\"W\")\n \n #add the text entry options for the custom game\n frame2 = tk.Frame(frame)\n frame2.grid(row=1, column=1, sticky=\"N\")\n\n rowLabel = tk.Label(frame2, text=\"Height: \", justify=\"left\")\n rowLabel.grid(row=0, column=0)\n colLabel = tk.Label(frame2, text=\"Width: \", justify=\"left\")\n colLabel.grid(row=1, column=0)\n minLabel = tk.Label(frame2, text=\"Mines: \", justify=\"left\")\n minLabel.grid(row=2, column=0)\n\n self.entry = []\n for i in range(3):\n self.entry.append(tk.Entry(frame2,width=10))\n self.entry[i].grid(row=i, column=1)\n self.entryToggle()\n \n #add the submit button to handle options given in the window\n submit = tk.Button(frame, text=\"Play\", command=self.optionSet)\n submit.grid(row=2, column=1, sticky=\"WE\")",
"def check_button(self, button):\n if button == 1:\n if int(self.var1.get()) == 1:\n self.size_two.deselect()\n self.size_three.deselect()\n self.size_four.deselect()\n if button == 2:\n if int(self.var2.get()) == 1:\n self.size_one.deselect()\n self.size_three.deselect()\n self.size_four.deselect()\n if button == 3:\n if int(self.var3.get()) == 1:\n self.size_two.deselect()\n self.size_one.deselect()\n self.size_four.deselect()\n if button == 4:\n if int(self.var4.get()) == 1:\n self.size_two.deselect()\n self.size_three.deselect()\n self.size_one.deselect()\n if button == 5:\n if int(self.var_yes.get()) == 1:\n self.no_box.deselect()\n if button == 6:\n if int(self.var_no.get()) == 1:\n self.yes_box.deselect()",
"def reset(self, event):\n #Resets the current puzzle\n self.w.delete('all') #Deletes all widgets/components \n self.resetnums() #Call restnums() to reset self.movelist\n\n #Destroys all buttons on GUI\n #self.buttonlist.append(self.lbl)\n for i in range(len(self.buttonlist)):\n self.buttonlist[i].destroy()\n\n self.create_widgets(self.counter) #Calls the create_widgets() to redisplay all widgets and buttons\n self.lbl2[\"text\"] = \"\" #Clears any text (e.g. instructions or check) if there is any.",
"def exitMenu(self, fpsclock, screen):\n screen.fill((0, 0, 0))\n self.rect = pygame.Rect(0, 0, self.gs[0] * (self.ts + self.ms) + self.ms,\n self.gs[1] * (self.ts + self.ms) + self.ms)\n self.pic = pygame.transform.smoothscale(pygame.image.load('blurredImage.png'), self.rect.size)\n screen.blit(self.pic, self.rect)\n if self.isWin():\n self.draw_text(screen, \"You won !\", 50, 250, 80, 0, 0, 0, True)\n self.draw_text(screen, \"Congratulations !\", 50, 250, 160, 0, 0, 0, True)\n else:\n self.draw_text(screen, \"You lost !\", 50, 250, 80, 0, 0, 0, True)\n self.draw_text(screen, \"Better luck next time !\", 50, 250, 160, 0, 0, 0, True)\n self.draw_text(screen, \"Moves : \" + str(self.nb_move), 40, 500, 10, 255, 255, 255, False)\n self.draw_text(screen, \"Shortcuts\", 40, 500, 40, 255, 255, 255, False)\n self.draw_text(screen, \"Restart : y\", 40, 500, 70, 255, 255, 255, False)\n self.draw_text(screen, \"Quit : n\", 40, 500, 100, 255, 255, 255, False)\n\n pygame.display.flip()\n while True:\n fpsclock.tick(FPS)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.exit()\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_y:\n self.reset()\n return False\n if event.key == pygame.K_n:\n self.exit()",
"def reset_board(self):\n\n for y in range(Settings.SIZE_Y):\n for x in range(Settings.SIZE_X):\n self.__tilegrid.set_tile_marker(x, y, MarkerType.NONE)\n self.__tilegrid.set_tile_color(x, y, Color.MID_TONE)\n\n if self.__game.get_state() == GameState.WINNER:\n winner = self.__game.get_winner()\n loser = self.__game.get_loser()\n self.__game.reset(loser)\n self.__infobar.update_info(loser, winner)\n\n elif self.__game.get_state() == GameState.TIE:\n self.__game.reset(MarkerType.CROSS)\n self.__infobar.update_info(MarkerType.CROSS, MarkerType.CIRCLE)\n\n else:\n # Should never happen, since button is disabled while playing.\n raise PermissionError(\n \"Method reset_board was called while game hasn't ended.\")",
"def resetBoard(self):\n pass",
"def button_reset(self): \n self.button_1 = arcade.Sprite(settings.button, .7, 0, 0, 0, 0, 50,\n 570)\n self.button_2 = arcade.Sprite(settings.button, .7, 0, 0, 0, 0, 50,\n 75)\n self.button_3 = arcade.Sprite(settings.button, .7, 0, 0, 0, 0, 750,\n 570)\n self.button_4 = arcade.Sprite(settings.button, .7, 0, 0, 0, 0, 750,\n 75)\n a = [1, 2, 3, 4]\n for i in a:\n self.puzzle.remove_value(i)",
"def _remove_walls(current: GridCell, choice: GridCell):\n if choice.x > current.x:\n current.walls[1] = False\n choice.walls[0] = False\n elif choice.x < current.x:\n current.walls[0] = False\n choice.walls[1] = False\n elif choice.y > current.y:\n current.walls[3] = False\n choice.walls[2] = False\n elif choice.y < current.y:\n current.walls[2] = False\n choice.walls[3] = False",
"def reset_board(self):\n cell_list = self.get_cells()\n for current_cell in cell_list:\n current_cell.set_cell_state(0) # remove player ownership of cell",
"def delete_checkbox(self):\n wanna_delete = mb.askyesno(\n \"Warning\",\n f'Delete checkbox for column \"{self.text_value}\"?',\n icon=\"warning\"\n )\n if wanna_delete:\n self.handle_modified()\n conf[\"cols_all\"].remove(self.text_value)\n try:\n conf[\"cols_selected\"].remove(self.text_value)\n except ValueError:\n pass\n mb.showinfo(\"Info\", \"This checkbox will not bother you anymore\")\n self.box_frame.grid_remove()\n self.parent.focus_set()\n else:\n self.parent.focus_set()",
"def disable(self): \n self.feed_button.config(state=\"disabled\")\n self.eat_button.config(state=\"disabled\") \n for t in range(self.player.game.trait_limit): \n self.add_trait_buttons[t].config(state=\"disabled\") \n self.add_population_button.config(state=\"disabled\")\n self.add_body_size_button.config(state=\"disabled\")",
"def clear_board(cls):\n # Set the board dimensions\n cls.board = [[cls.empty for x in range(cls.size)] for y in range(cls.size)]\n \n # Set allowed positions the user may provide\n cls.positions = [str(x) for x in range(1, cls.size**2 + 1)]\n\n cls.current_player = 'X'\n\n cls.display_board()\n\n cls.prompt_player()",
"def selection_board_maintenance(self,x_cor,y_cor):\t\t\r\n\t\tfor event in pygame.event.get():\r\n\t\t\tif event.type == pygame.QUIT:\r\n\t\t\t\tpygame.display.quit()\r\n\t\t\t\tpygame.quit()\r\n\t\t\t\tquit() \r\n\r\n\t\t\tif event.type == pygame.MOUSEBUTTONDOWN:\r\n\t\t\t\t#print(\"mouse is pressed\")\r\n\t\t\t\t#everything begins here\r\n\t\t\t\tx_adjusted,y_adjusted,who_is_clicked,piece = Helping_Class.convert_coordinate(x_cor,\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t y_cor,\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t 'selection_bar')\r\n\t\t\t\t#print(who_is_clicked)\r\n\t\t\t\tif (self.selected_from_selection_bar + self.selected_from_board):\r\n\t\t\t\t\t#print(\"inside selected item one\")\r\n\t\t\t\t\tif Helping_Class._check_if_clicked_on_his_own_piece_(self.whose_move,who_is_clicked):\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\tif self.pieces[piece].availability:\r\n\t\t\t\t\t\t\tself.selected_from_board = False\r\n\t\t\t\t\t\t\tself.selected_from_selection_bar = True\r\n\r\n\t\t\t\t\t\t\t#update \r\n\t\t\t\t\t\t\tself.selected_piece = piece\r\n\t\t\t\t\t\t\tself.selected_position =Helping_Class.selection_bar_reverse_mapping[piece]\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\t#nothing to do\r\n\t\t\t\t\t\tpass\r\n\t\t\t\telse:\r\n\t\t\t\t\t#print(\"nothing is selected\")\r\n\t\t\t\t\t#check if clicked on his piece change then select it\r\n\t\t\t\t\tif Helping_Class._check_if_clicked_on_his_own_piece_(self.whose_move,who_is_clicked):\r\n\r\n\t\t\t\t\t\tif self.pieces[piece].availability:\r\n\t\t\t\t\t\t\tself.selected_from_selection_bar = True\r\n\r\n\t\t\t\t\t\t\t#update \r\n\t\t\t\t\t\t\tself.selected_piece = piece\r\n\t\t\t\t\t\t\tself.selected_position =(x_adjusted,y_adjusted)\r\n\t\t\t\t\t\t\t#print(self.selected_piece,self.selected_position,self.selected_from_selection_bar)\r\n\r\n\t\t\t\t\t\t\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\t#nothing to do\r\n\t\t\t\t\t\tpass\r\n\t\t\t\t\t\t\r\n\t\t\t\t\r\n\t\t\telse:\r\n\t\t\t\t#color change\r\n\t\t\t\t#who_is_clicked is dummy variable as no click has occurred\r\n\t\t\t\tx_adjusted,y_adjusted,who_is_clicked,piece = Helping_Class.convert_coordinate(x_cor,\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t y_cor,\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t 'selection_bar')\r\n\r\n\t\t\t\tself.blit_piece = [(x_adjusted,y_adjusted),piece]",
"def shrink(self):\r\n # first we need to decide how to shrink\r\n choice = rand.choice([0, 1, 2, 3])\r\n # now do it\r\n if ((choice == 0) and (self.xspan > mparam.min_s_xspan)):\r\n # delete first row\r\n self.cells = np.delete(self.cells, (0), axis=0) \r\n elif ((choice == 1) and (self.xspan > mparam.min_s_xspan)):\r\n # delete last row\r\n self.cells = np.delete(self.cells, (-1), axis=0) \r\n elif ((choice == 2) and (self.yspan > mparam.min_s_yspan)):\r\n # delete first column\r\n self.cells = np.delete(self.cells, (0), axis=1) \r\n elif ((choice == 3) and (self.yspan > mparam.min_s_yspan)):\r\n # delete last column\r\n self.cells = np.delete(self.cells, (-1), axis=1) \r\n # now let's update xspan and yspan to the new size\r\n self.xspan = self.cells.shape[0]\r\n self.yspan = self.cells.shape[1]\r\n #\r",
"def Fixed(self):\r\n \r\n return self.SetFlag(self.optionResizable, False)",
"def resetgrid(self):\n self.remainingminecount = self.minecount\n self.board.reset()\n self.button_array = [[QtGui.QPushButton() \\\n for col in range(self.cols)] for row in range(self.rows)]\n self.game_in_progress = True\n self.first_click = True\n for row in range(self.rows):\n for col in range(self.cols):\n self.button_array[row][col].setFixedSize(self.cell_size, self.cell_size)\n self.button_array[row][col].setIcon(QtGui.QIcon(\"icons/unopenedsquare.png\"))\n self.button_array[row][col].setIconSize(QtCore.QSize(self.cell_size,\\\n self.cell_size))\n self.button_array[row][col].clicked.connect(self.handle_left_click)\n self.button_array[row][col].setContextMenuPolicy(QtCore.Qt.CustomContextMenu)\n self.button_array[row][col].customContextMenuRequested.connect(\\\n self.handle_right_click)\n self.cell_grid_layout.addWidget(self.button_array[row][col], row, col)\n self.mines_lcd.display(str(self.remainingminecount))\n self.status_button.setIcon(QtGui.QIcon(\"icons/smiley1.ico\"))\n self.time = 0\n self.time_lcd.display(self.time)",
"def create_checkboxes(self):\n self.create_y_crop_box()",
"def ToggleOff(self):\n for s in self.sensors:\n self.gSetpt[s.GetID()].Disable()\n\n self.top_sizer.Layout()\n print(\"Toggle off\")",
"def clear_board(self):\n pygame.draw.rect(self.display, self.white, pygame.Rect(0, 0, self.window_x, self.window_y))\n self.draw_grid()"
] | [
"0.67420113",
"0.5735204",
"0.56506914",
"0.55636704",
"0.55593777",
"0.5487704",
"0.54679674",
"0.542365",
"0.5423146",
"0.5422084",
"0.5379945",
"0.53320664",
"0.5271477",
"0.52714485",
"0.52524525",
"0.5247694",
"0.52458036",
"0.5245706",
"0.5222096",
"0.51987946",
"0.5181694",
"0.5175004",
"0.51670325",
"0.51647097",
"0.5163904",
"0.51487964",
"0.51441956",
"0.5139294",
"0.5124892",
"0.5124374"
] | 0.67196774 | 1 |
Handles the custom game options window button click. Resizes the board according to specifications. If Custom game was chosen but specifications are invalid, a popup message notifies the user without closing the options window | def optionSet(self):
choice = self.optionVar.get()
#if custom game is chosen
if choice == 4:
msg = "Invalid Input!"
valid = True
nums = []
#make sure all inputs are integers
for i in range(3):
try:
value = int(self.entry[i].get())
nums.append(value)
except ValueError:
valid = False
if i == 0: msg += "\nHeight "
elif i == 1: msg += "\nWidth "
elif i == 2: msg += "\nMines "
msg += "input must be an integer."
#check for other invalid inputs
#(negative input, not wide enough, too many mines)
if valid:
if nums[0]<=0 or nums[1]<=0 or nums[2]<=0:
valid = False
msg += "\nInputs must be integers greater than zero"
elif nums[1] < 8 :
valid = False
msg += "\nMinimum width allowed is 8"
if nums[0]*nums[1] <= nums[2]:
valid = False
msg += "\nToo many mines to fit on the board!"
#start game according to specs if input was valid
if valid:
self.menuVar.set(choice)
self.checkVar.set(4)
self.resize(nums[0],nums[1],nums[2])
self.optionsWindow.destroy()
#otherwise popup error and keep options window open
else:
messagebox.showinfo('Custom Game Error', msg)
#start game according to difficulty chosen
else:
self.menuVar.set(choice)
if choice == 1: self.resize(8,8,10)
elif choice == 2: self.resize(16,16,40)
else: self.resize(16,30,99)
self.optionsWindow.destroy() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def options(self):\n self.checkVar.set(self.menuVar.get())\n #create window then set window size & title\n self.optionsWindow = tk.Toplevel(self)\n self.optionsWindow.grab_set()\n self.optionsWindow.title(\"Options\")\n windowWidth = \"225\"\n windowHeight = \"175\"\n self.optionsWindow.minsize(windowWidth, windowHeight)\n self.optionsWindow.maxsize(windowWidth, windowHeight)\n self.optionsWindow.geometry(windowWidth+'x'+windowHeight)\n \n #creates the frame and self.optionVar\n frame = tk.Frame(self.optionsWindow)\n frame.pack()\n self.optionVar = tk.IntVar(self)\n self.optionVar.set(self.menuVar.get())\n\n #add the choices as radio buttons to the frame\n choices = [\n (\"Beginner\"+\"\\n8 X 8\"+\"\\n10 Mines\", 1),\n (\"Intermediate\"+\"\\n16 X 16\"+\"\\n40 Mines\", 2),\n (\"Expert\"+\"\\n16 X 30\"+\"\\n99 Mines\", 3),\n (\"Custom\", 4)\n ]\n for text, value in choices:\n button = tk.Radiobutton(frame, text=text, value=value, variable=self.optionVar, justify=\"left\", command=self.entryToggle)\n row, col, colspan = value-1, 0, 1\n if value is 4:row, col, colspan = 0, 1, 2\n button.grid(row=row, column=col, columnspan=colspan, sticky=\"W\")\n \n #add the text entry options for the custom game\n frame2 = tk.Frame(frame)\n frame2.grid(row=1, column=1, sticky=\"N\")\n\n rowLabel = tk.Label(frame2, text=\"Height: \", justify=\"left\")\n rowLabel.grid(row=0, column=0)\n colLabel = tk.Label(frame2, text=\"Width: \", justify=\"left\")\n colLabel.grid(row=1, column=0)\n minLabel = tk.Label(frame2, text=\"Mines: \", justify=\"left\")\n minLabel.grid(row=2, column=0)\n\n self.entry = []\n for i in range(3):\n self.entry.append(tk.Entry(frame2,width=10))\n self.entry[i].grid(row=i, column=1)\n self.entryToggle()\n \n #add the submit button to handle options given in the window\n submit = tk.Button(frame, text=\"Play\", command=self.optionSet)\n submit.grid(row=2, column=1, sticky=\"WE\")",
"def opt_dialog(self, event):\n dialog = options.OptionsDialog(self, self.options)\n dialog.ShowModal()\n \n self.reconfigure()\n self.info_panel.Layout()\n self.main_panel.Layout()",
"def create_options(self, n):\n f = Frame(n)\n f.pack(side=TOP, fill=BOTH, expand=Y)\n options = Labelframe(f, text='Options')\n options.pack(side=TOP, fill=BOTH, expand=Y)\n Grid.columnconfigure(options, 0, weight=1)\n Grid.columnconfigure(options, 1, weight=1)\n\n population = Button(\n options, text='Population Cap', command=self.set_pop_cap)\n create_tooltip(population, 'Maximum population in your fort')\n population.grid(column=0, row=0, sticky=\"nsew\")\n self.controls['popcap'] = population\n invaders = Button(\n options, text='Invaders',\n command=lambda: self.cycle_option('invaders'))\n create_tooltip(\n invaders, 'Toggles whether invaders (goblins, etc.) show up')\n invaders.grid(column=1, row=0, sticky=\"nsew\")\n self.controls['invaders'] = invaders\n childcap = Button(\n options, text='Child Cap', command=self.set_child_cap)\n create_tooltip(childcap, 'Maximum children in your fort')\n childcap.grid(column=0, row=1, sticky=\"nsew\")\n self.controls['childcap'] = childcap\n caveins = Button(\n options, text='Cave-ins',\n command=lambda: self.cycle_option('caveins'))\n create_tooltip(\n caveins,\n 'Toggles whether unsupported bits of terrain will collapse')\n caveins.grid(column=1, row=1, sticky=\"nsew\")\n self.controls['caveins'] = caveins\n temperature = Button(\n options, text='Temperature',\n command=lambda: self.cycle_option('temperature'))\n create_tooltip(\n temperature,\n 'Toggles whether things will burn, melt, freeze, etc.')\n temperature.grid(column=0, row=2, sticky=\"nsew\")\n self.controls['temperature'] = temperature\n liquid_depth = Button(\n options, text='Liquid Depth',\n command=lambda: self.cycle_option('liquidDepth'))\n create_tooltip(\n liquid_depth,\n 'Displays the depth of liquids with numbers 1-7')\n liquid_depth.grid(column=1, row=2, sticky=\"nsew\")\n self.controls['liquidDepth'] = liquid_depth\n weather = Button(\n options, text='Weather',\n command=lambda: self.cycle_option('weather'))\n create_tooltip(weather, 'Rain, snow, etc.')\n weather.grid(column=0, row=3, sticky=\"nsew\")\n self.controls['weather'] = weather\n varied_ground = Button(\n options, text='Varied Ground',\n command=lambda: self.cycle_option('variedGround'))\n create_tooltip(\n varied_ground,\n 'If ground tiles use a variety of punctuation, or only periods')\n varied_ground.grid(column=1, row=3, sticky=\"nsew\")\n self.controls['variedGround'] = varied_ground\n starting_labors = Button(\n options, text='Starting Labors',\n command=lambda: self.cycle_option('laborLists'))\n create_tooltip(\n starting_labors, 'Which labors are enabled by default:'\n 'by skill level of dwarves, by their unit type, or none')\n starting_labors.grid(column=0, row=4, columnspan=2, sticky=\"nsew\")\n self.controls['laborLists'] = starting_labors\n\n modifications = Labelframe(f, text='Modifications', width=192)\n modifications.pack(side=TOP, expand=Y, anchor=\"w\")\n Grid.columnconfigure(modifications, 0, weight=1)\n Grid.columnconfigure(modifications, 1, weight=1)\n\n aquifers = Button(\n modifications, text='Aquifers',\n command=lambda: self.cycle_option('aquifers'))\n create_tooltip(\n aquifers, 'Whether newly created worlds will have Aquifers in them '\n '(Infinite sources of underground water, but may flood your fort')\n aquifers.grid(column=0, row=0, sticky=\"nsew\")\n self.controls['aquifers'] = aquifers\n\n keybindings = Labelframe(f, text='Key Bindings')\n keybindings.pack(side=BOTTOM, fill=BOTH, expand=Y, anchor=\"s\")\n Grid.columnconfigure(keybindings, 0, weight=2)\n Grid.columnconfigure(keybindings, 1, weight=1)\n Grid.columnconfigure(keybindings, 2, weight=1)\n\n keybinding_files = Listbox(\n keybindings, height=4, listvariable=self.keybinds,\n activestyle='dotbox')\n keybinding_files.grid(column=0, row=0, rowspan=2, sticky=\"nsew\")\n\n load_keyb = Button(\n keybindings, text='Load',\n command=lambda: self.load_keybinds(keybinding_files))\n load_keyb.grid(column=1, row=0)\n create_tooltip(load_keyb, 'Load selected keybindings')\n refresh_keyb = Button(\n keybindings, text='Refresh', command=self.read_keybinds)\n create_tooltip(refresh_keyb, 'Refresh keybinding list')\n refresh_keyb.grid(column=2, row=0)\n save_keyb = Button(keybindings, text='Save', command=self.save_keybinds)\n create_tooltip(save_keyb, 'Save your current keybindings')\n save_keyb.grid(column=1, row=1)\n delete_keyb = Button(\n keybindings, text='Delete',\n command=lambda: self.delete_keybinds(keybinding_files))\n create_tooltip(delete_keyb, 'Delete selected keybinding')\n delete_keyb.grid(column=2, row=1)\n return f",
"def __init__(self, rows, cols, mines):\n tk.Tk.__init__(self)\n \n #load all needed images into Tile.images\n for i in range(14):\n Tile.images.append(tk.PhotoImage(file = \"images/tile-\"+str(i)+\".gif\"))\n \n self.menu = tk.Menu(self)\n self.configure(menu=self.menu)\n self.title(\"Minesweeper\")\n self.myBoard = Board(rows, cols, mines, self)\n self.menuVar = tk.IntVar(self)\n self.menuVar.set(1)\n self.checkVar = tk.IntVar(self)\n self.checkVar.set(1)\n self.gamemenu = tk.Menu(self.menu, tearoff = False)\n self.menu.add_cascade(label=\"Game\", menu=self.gamemenu)\n self.gamemenu.add_command(label=\"New Game\", command=self.myBoard.replay)\n self.gamemenu.add_separator()\n self.gamemenu.add_radiobutton(variable = self.menuVar, value=1, label=\"Beginner\", command=lambda: self.resize(8,8,10))\n self.gamemenu.add_radiobutton(variable = self.menuVar, value=2, label=\"Intermediate\", command=lambda: self.resize(16,16,40))\n self.gamemenu.add_radiobutton(variable = self.menuVar, value=3, label=\"Expert\", command=lambda: self.resize(16,30,99))\n self.gamemenu.add_separator()\n self.gamemenu.add_checkbutton(variable = self.checkVar, onvalue=4, offvalue=0, label=\"Custom\", command= self.options)\n self.gamemenu.add_separator()\n self.gamemenu.add_command(label=\"Exit\", command=self.exitGame)\n windowWidth = str(20*cols+40)\n windowHeight = str(20*rows+60)\n self.protocol(\"WM_DELETE_WINDOW\", self.exitGame)\n self.minsize(windowWidth, windowHeight)\n self.maxsize(windowWidth, windowHeight)\n self.geometry(windowWidth+'x'+windowHeight)\n self.mainloop()",
"def applyOptions(self, options_file):\n self.settings.res_width = self.res_width.text()\n self.settings.res_height = self.res_height.text()\n \n self.settings.writeSettingsToFile(options_file)\n self.button_apply.setEnabled(False)\n\n self.parent.resize(int(self.settings.res_width),int(self.settings.res_height))",
"def run_game():\n mainBoard = get_new_board()\n resetBoard(mainBoard)\n showHints = False\n\n turn = random.choice(['computer', 'player'])\n\n # Draw the starting board and ask the player what color they want.\n draw_board(mainBoard)\n\n playerTile, computer_tile = enter_player_tile()\n # Make the Surface and Rect objects for the \"New Game\" and \"Hints\" buttons\n\n newGameSurf = FONT.render('New Game', True, TEXTCOLOR, TEXTBGCOLOR2)\n newGameRect = newGameSurf.get_rect()\n newGameRect.topright = (WINDOWWIDTH - 8, 10)\n\n hintsSurf = FONT.render('Hints', True, TEXTCOLOR, TEXTBGCOLOR2)\n hintsRect = hintsSurf.get_rect()\n hintsRect.topright = (WINDOWWIDTH - 8, 40)\n\n while True: # main game loop\n # Keep looping for player and computer's turns.\n if turn == 'player':\n # Player's turn:\n if get_valid_moves(mainBoard, playerTile) == []:\n # If it's the player's turn but they\n # can't move, then end the game.\n break\n\n movexy = None\n\n while movexy == None:\n # Keep looping until the player clicks on a valid space.\n # Determine which board data structure to use for display.\n if showHints:\n boardToDraw = get_board_with_valid_moves(mainBoard, playerTile)\n else:\n boardToDraw = mainBoard\n\n check_for_quit()\n for event in pygame.event.get(): # event handling loop\n if event.type == MOUSEBUTTONUP:\n # Handle mouse click events\n mousex, mousey = event.pos\n if newGameRect.collide_point((mousex, mousey)):\n # Start a new game\n return True\n elif hintsRect.collide_point((mousex, mousey)):\n # Toggle hints mode\n showHints = not showHints\n # movexy is set to a two-item tuple XY coordinate, or None value\n movexy = get_space_clicked(mousex, mousey)\n\n if movexy != None and not isValidMove(mainBoard, playerTile, movexy[0], movexy[1]):\n movexy = None\n\n # Draw the game board.\n draw_board(boardToDraw)\n draw_info(boardToDraw, playerTile, computer_tile, turn)\n\n # Draw the \"New Game\" and \"Hints\" buttons.\n DISPLAYSURF.blit(newGameSurf, newGameRect)\n DISPLAYSURF.blit(hintsSurf, hintsRect)\n\n MAINCLOCK.tick(FPS)\n pygame.display.update()\n\n # Make the move and end the turn.\n make_move(mainBoard, playerTile, movexy[0], movexy[1], True)\n if get_valid_moves(mainBoard, computer_tile) != []:\n # Only set for the computer's turn if it can make a move.\n turn = 'computer'\n else:\n # Computer's turn:\n if get_valid_moves(mainBoard, computer_tile) == []:\n # If it was set to be the computer's turn but\n # they can't move, then end the game.\n break\n\n # Draw the board.\n draw_board(mainBoard)\n draw_info(mainBoard, playerTile, computer_tile, turn)\n\n # Draw the \"New Game\" and \"Hints\" buttons.\n DISPLAYSURF.blit(newGameSurf, newGameRect)\n DISPLAYSURF.blit(hintsSurf, hintsRect)\n\n # Make it look like the computer is thinking by pausing a bit.\n pauseUntil = time.time() + random.randint(5, 15) * 0.1\n\n while time.time() < pauseUntil:\n pygame.display.update()\n\n # Make the move and end the turn.\n x, y = get_computer_move(mainBoard, computer_tile)\n make_move(mainBoard, computer_tile, x, y, True)\n\n if get_valid_moves(mainBoard, playerTile) != []:\n # Only set for the player's turn if they can make a move.\n turn = 'player'\n\n # Display the final score.\n draw_board(mainBoard)\n scores = get_score_of_board(mainBoard)\n # Determine the text of the message to display.\n\n if scores[playerTile] > scores[computer_tile]:\n text = 'You beat the computer by %s points! Congratulations!' % \\\n (scores[playerTile] - scores[computer_tile])\n elif scores[playerTile] < scores[computer_tile]:\n text = 'You lost. The computer beat you by %s points.' % \\\n (scores[computer_tile] - scores[playerTile])\n else:\n text = 'The game was a tie!'\n\n textSurf = FONT.render(text, True, TEXTCOLOR, TEXTBGCOLOR1)\n textRect = textSurf.get_rect()\n textRect.center = (int(WINDOWWIDTH / 2), int(WINDOWHEIGHT / 2))\n DISPLAYSURF.blit(textSurf, textRect)\n\n # Display the \"Play again?\" text with Yes and No buttons.\n text2Surf = BIGFONT.render('Play again?', True, TEXTCOLOR, TEXTBGCOLOR1)\n text2Rect = text2Surf.get_rect()\n text2Rect.center = (int(WINDOWWIDTH / 2), int(WINDOWHEIGHT / 2) + 50)\n\n # Make \"Yes\" button.\n yesSurf = BIGFONT.render('Yes', True, TEXTCOLOR, TEXTBGCOLOR1)\n yesRect = yesSurf.get_rect()\n yesRect.center = (int(WINDOWWIDTH / 2) - 60, int(WINDOWHEIGHT / 2) + 90)\n\n # Make \"No\" button.\n noSurf = BIGFONT.render('No', True, TEXTCOLOR, TEXTBGCOLOR1)\n noRect = noSurf.get_rect()\n noRect.center = (int(WINDOWWIDTH / 2) + 60, int(WINDOWHEIGHT / 2) + 90)\n\n while True:\n # Process events until the user clicks on Yes or No.\n check_for_quit()\n\n for event in pygame.event.get(): # event handling loop\n if event.type == MOUSEBUTTONUP:\n mousex, mousey = event.pos\n\n if yesRect.collide_point((mousex, mousey)):\n return True\n\n elif noRect.collide_point((mousex, mousey)):\n return False\n\n DISPLAYSURF.blit(textSurf, textRect)\n DISPLAYSURF.blit(text2Surf, text2Rect)\n DISPLAYSURF.blit(yesSurf, yesRect)\n DISPLAYSURF.blit(noSurf, noRect)\n\n pygame.display.update()\n MAINCLOCK.tick(FPS)",
"def optionsWindow():\n\t# create the main interface\n\tif cmds.window(kSetupOptionsWindow, q=True, ex=True):\n\t\tcmds.deleteUI(kSetupOptionsWindow)\n\tmainWindow = cmds.window(kSetupOptionsWindow, title='%s Options'%kToolName, menuBar=True, wh=(545,350))\n\t\n\t# build the menu bar\n\tcmds.menu(label='Help')\n\tamui.helpMenuItem(kToolName, __file__)\n\tamui.aboutMenuItem(kToolName, kVersionNumber, kVersionDate)\n\t\n\tmainForm = cmds.formLayout(nd=100)\n\t\n\t# build the section to get information about the new twist joints\n\tif_suffixName = cmds.textFieldGrp(text='_Twist', label='Suffix of New Twist Joints:')\n\tif_numberTwistJoints = cmds.intSliderGrp(v=3, min=1, max=10, fmn=1, fmx=100, label='Number of Twist Joints:', field=True)\n\t\n\t# position the input fields for the twist joints\n\tcmds.formLayout(mainForm, edit=True, attachForm=[(if_suffixName, 'left', 30), (if_suffixName, 'top', 5)], attachNone=[(if_suffixName, 'right'), (if_suffixName, 'bottom')])\n\tcmds.formLayout(mainForm, edit=True, attachForm=[(if_numberTwistJoints, 'left', 30)], attachNone=[(if_numberTwistJoints, 'right'), (if_numberTwistJoints, 'bottom')], attachControl=[(if_numberTwistJoints, 'top', 5, if_suffixName)])\n\t\n\t# build the section to get information for the hip constraint\n\tconstraintFrame = eval('cmds.frameLayout(collapsable=True, label=\"Hip Constraint Options:\" %s)'%amui.__frameAlignCenter__)\n\tconstraintForm = cmds.formLayout(nd=100)\n\t\n\t# attempt to guess what the pelvis is if there is a selection when the GUI is created\n\tpelvisText = 'CenterRoot'\n\tsel = cmds.ls(sl=True, l=True, type='transform')\n\tif sel and len(sel) > 0: # BUG: in Maya 8.5, a selection of length 0 returns None rather than an empty list\n\t\ttry:\n\t\t\thip = cmds.listRelatives(sel[0], p=True, f=True) # just use the first knee in the selection\n\t\t\tpelvis = cmds.listRelatives(hip[0], p=True, f=True)\n\t\t\tpelvisText = pelvis[0]\n\t\texcept: pass\n\t\t\n\tif_pelvis = cmds.textFieldGrp(label='Pelvis Object:', tx=pelvisText)\n\tif_hipAimAxis = cmds.floatFieldGrp(v1=1, v2=0, v3=0, nf=3, pre=4, label='Hip Aim Axis:')\n\tif_hipFrontAxis = cmds.floatFieldGrp(v1=0, v2=0, v3=1, nf=3, pre=4, label='Hip Front Axis:')\n\tif_pelvisAimAxis = cmds.floatFieldGrp(v1=0, v2=1, v3=0, nf=3, pre=4, label='Pelvis Aim Axis:')\n\tif_pelvisFrontAxis = cmds.floatFieldGrp(v1=0, v2=0, v3=1, nf=3, pre=4, label='Pelvis Front Axis:')\n\t\n\t# position the input fields for the hip constraint\n\tcmds.formLayout(constraintForm, edit=True, attachForm=[(if_pelvis, 'left', 30), (if_pelvis, 'top', 5)], attachNone=[(if_pelvis, 'right'), (if_pelvis, 'bottom')])\n\tcmds.formLayout(constraintForm, edit=True, attachForm=[(if_hipAimAxis, 'left', 30)], attachNone=[(if_hipAimAxis, 'right'), (if_hipAimAxis, 'bottom')], attachControl=[(if_hipAimAxis, 'top', 5, if_pelvis)])\n\tcmds.formLayout(constraintForm, edit=True, attachForm=[(if_hipFrontAxis, 'left', 30)], attachNone=[(if_hipFrontAxis, 'right'), (if_hipFrontAxis, 'bottom')], attachControl=[(if_hipFrontAxis, 'top', 5, if_hipAimAxis)])\n\tcmds.formLayout(constraintForm, edit=True, attachForm=[(if_pelvisAimAxis, 'left', 30)], attachNone=[(if_pelvisAimAxis, 'right'), (if_pelvisAimAxis, 'bottom')], attachControl=[(if_pelvisAimAxis, 'top', 5, if_hipFrontAxis)])\n\tcmds.formLayout(constraintForm, edit=True, attachForm=[(if_pelvisFrontAxis, 'left', 30)], attachNone=[(if_pelvisFrontAxis, 'right'), (if_pelvisFrontAxis, 'bottom')], attachControl=[(if_pelvisFrontAxis, 'top', 5, if_pelvisAimAxis)])\n\t\n\tcmds.setParent('..') # go up to constraintForm\n\tcmds.setParent('..') # go up to mainForm\n\t\n\t# position the frame for the hip constraint\n\tcmds.formLayout(mainForm, edit=True, attachPosition=[(constraintFrame, 'left', -1, 0), (constraintFrame, 'right', -1, 100)], attachControl=[(constraintFrame, 'top', 5, if_numberTwistJoints)], attachNone=[(constraintFrame, 'bottom')])\n\t\n\t# create the buttons to execute the script\n\tcmd_create='amTools.rigging.hipSetup.doOptions (\"%s\", \"%s\", \"%s\", \"%s\", \"%s\", \"%s\", \"%s\")'%(\n\t\tif_suffixName, \n\t\tif_numberTwistJoints, \n\t\tif_pelvis, \n\t\tif_hipAimAxis, \n\t\tif_hipFrontAxis, \n\t\tif_pelvisAimAxis, \n\t\tif_pelvisFrontAxis)\n\tutils.ui.threeButtonLayout(mainForm, mainWindow, cmd_create)\n\t\n\tcmds.showWindow(mainWindow)",
"def OnOkButton(self,event):\n debug.debug_info(\"avatarSizeDialog.OnOkButton()\")\n size = self.sizeSlider.GetValue()\n\n # generate the avatars of the neighbors with the size value\n self.resizeAvatars(size)\n\n # save the new value in the conf file\n configuration.writeConfParameterValue(\"avatarSize\", size)\n\n # close the dialog box\n self.Close(FALSE)",
"def options(self):\n opt = self.main_window.toplevel()\n cur_l = tkinter.Scale(opt, length=200, label=\"Number of lines:\",\n orient=tkinter.HORIZONTAL, from_=1, to=12,\n command=self.update_nb_rows)\n cur_l.set(self.game.n_row) # initial position of the cursor\n cur_l.pack()\n cur_h = tkinter.Scale(opt, length=200, label=\"Number of columns:\",\n orient=tkinter.HORIZONTAL, from_=1, to=12,\n command=self.update_nb_cols)\n cur_h.set(self.game.n_col)\n cur_h.pack()",
"def end_game_dialog(self):\n retry_button = pygame.Rect((self.settings[\"screen_width\"] / 2 - 75, 170), (150, 25))\n play_again = self.font.render('Play Again!', True, (255, 250, 106))\n\n quit_button = pygame.Rect((self.settings[\"screen_width\"] / 2 - 75, 250), (150, 25))\n quit_text = self.font.render('Quit!', True, (255, 250, 106))\n while True:\n self.clock.tick(10)\n self.draw_grid(0.4) # darken shade for every block\n pygame.draw.rect(self.surface, (0, 0, 0), retry_button)\n pygame.draw.rect(self.surface, (0, 0, 0), quit_button)\n self.screen.blit(self.surface, (0, 0))\n\n current_score = self.font.render(\"Score {0}\".format(self.score), True, (6, 255, 43))\n best_score = self.font.render(\"Max Score {0}\".format(self.maxScore), True, (6, 255, 43))\n\n self.screen.blit(current_score, (self.settings[\"screen_width\"] / 4 - 50, 100))\n self.screen.blit(best_score, (self.settings[\"screen_width\"] / 2 + 50, 100))\n\n self.screen.blit(play_again, (retry_button.left +\n (retry_button.width / 2 - play_again.get_width() / 2),\n retry_button.top +\n (retry_button.height / 2 - play_again.get_height() / 2))\n )\n\n self.screen.blit(quit_text, (quit_button.left +\n (quit_button.width / 2 - quit_text.get_width() / 2),\n quit_button.top +\n (quit_button.height / 2 - quit_text.get_height() / 2))\n )\n pygame.display.update()\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n return 0\n if event.type == pygame.MOUSEBUTTONDOWN:\n mouse_pos = event.pos\n if retry_button.collidepoint(mouse_pos):\n return 1\n if quit_button.collidepoint(mouse_pos):\n return 0",
"def on_click_options_button(event):\n img_options_button_on_click = PhotoImage(\n file=r\"C:\\Users\\Owner\\PycharmProjects\\Module14\\buttons\\options_pressed_normal.png\")\n lbl_options.config(image=img_options_button_on_click)\n lbl_options.image = img_options_button_on_click\n lbl_options.grid(row=16, column=1, columnspan=8, pady=8) # Uses more padding b/c the image is smaller\n under_construction() # Calls the under construction button",
"def game():\n dictCapitals = countries.dictCountries\n country = random.choice(list(dictCapitals.keys()))\n\n window = Tk()\n window.title(\"Do you know your capitals?\")\n window.geometry(\"500x250\")\n\n asktext = \"%s) What is the capital of %s?\" % (qno, country)\n question = tkinter.Label(window, text=asktext)\n question.place(x=5, y=5)\n\n def result(txt):\n \"\"\"\n Compares the answer input by the user and the correct answer. If the\n number of questions asked is less than 10, it asks another question.\n \"\"\"\n global wrong, qno\n if txt == dictCapitals[country]:\n qno += 1\n if qno <= 10:\n restart()\n else:\n msg = messagebox.showinfo('Congratulations', \"Your Final Score is %s\" % score)\n window.quit()\n window.destroy()\n\n else:\n msg = messagebox.showinfo('NO', \"WRONG! Try again.\")\n wrong += 1\n\n def restart():\n \"\"\"\n Asks another question when called.\n \"\"\"\n msg = messagebox.showinfo('YES!', \"You're Right\")\n window.destroy()\n game()\n\n def choose_correct():\n \"\"\"\n Chooses a random number to be the correct answer, so that the correct\n asnwer is not always e.g. the second choice.\n \"\"\"\n val = [0, 1, 2, 3]\n correct = random.choice(val)\n return correct\n\n def score():\n \"\"\"\n Determines the score of the user.\n \"\"\"\n factor = 10\n current = (qno - wrong - 1) * factor\n return current\n\n score = score()\n scoretext = \"Current Score = %s\" % score\n scoreboard = tkinter.Label(window, text=scoretext)\n scoreboard.place(x=300, y=225)\n\n Atxt = 0\n Btxt = 0\n Ctxt = 0\n Dtxt = 0\n buttons = [Atxt, Btxt, Ctxt, Dtxt]\n correct = choose_correct()\n buttons[correct] = dictCapitals[country]\n\n if buttons[0] != buttons[correct]:\n buttons[0] = random.choice(list(dictCapitals.values()))\n if buttons[1] != buttons[correct]:\n buttons[1] = random.choice(list(dictCapitals.values()))\n if buttons[2] != buttons[correct]:\n buttons[2] = random.choice(list(dictCapitals.values()))\n if buttons[3] != buttons[correct]:\n buttons[3] = random.choice(list(dictCapitals.values()))\n\n Atxt = buttons[0]\n Btxt = buttons[1]\n Ctxt = buttons[2]\n Dtxt = buttons[3]\n\n A = Button(window, text=Atxt, command=lambda: result(Atxt))\n A.place(x=50, y=50)\n\n B = Button(window, text=Btxt, command=lambda: result(Btxt))\n B.place(x=50, y=100)\n\n C = Button(window, text=Ctxt, command=lambda: result(Ctxt))\n C.place(x=50, y=150)\n\n D = Button(window, text=Dtxt, command=lambda: result(Dtxt))\n D.place(x=50, y=200)\n\n window.mainloop()",
"def load_game(self):\n # Show error message if any of the toggles are not picked \n if not self.diff_choice or not self.game_choice or not self.match_style:\n content = Button(text=\"Dismiss\")\n error = Popup(title=\"Select one of each option\", content=content, size_hint=(.6, .3))\n content.bind(on_press=error.dismiss)\n error.open()\n return\n \n # load game settings and swap screens\n game_screen = self.manager.get_screen(self.game_choice)\n game_screen.load_settings(self.diff_choice, self.match_style)\n self.manager.transition = SlideTransition(direction=\"left\")\n self.manager.current = self.game_choice",
"def options():\r\n pygame.display.flip()\r\n\r\n click = False\r\n waiting = True\r\n while waiting:\r\n # sets the game_over background\r\n const.WINDOW.blit(const.OPTIONS_SCREEN, (0, 0))\r\n\r\n # get the mouse cursor position\r\n x, y = pygame.mouse.get_pos()\r\n\r\n # creates the buttons\r\n back_button = pygame.Rect(242, 892, 325, 54) # back to main menu\r\n\r\n # if click on play button, then starts the game\r\n if back_button.collidepoint((x, y)):\r\n if click:\r\n return # problem: it doesn't restart the game\r\n\r\n # draws the buttons\r\n pygame.draw.rect(const.WINDOW, const.DARK_GREY, back_button, 1)\r\n\r\n click = False\r\n for event in pygame.event.get():\r\n if event.type == QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n if event.type == KEYDOWN:\r\n if event.key == K_ESCAPE:\r\n pygame.quit()\r\n sys.exit()\r\n if event.type == pygame.MOUSEBUTTONDOWN:\r\n if event.button == 1:\r\n click = True\r\n\r\n pygame.display.update()\r\n const.CLOCK.tick(30)",
"def shotWinUI(*args):\n### ---------- should check for current project\n if cmds.window(\"shotWin\", exists = True):\n cmds.deleteUI(\"shotWin\")\n\n widgets[\"win\"] = cmds.window(\"shotWin\", t= \"Charlex Shot Manager\", w=1000, h=560, s=False)\n widgets[\"mainCLO\"] = cmds.columnLayout(w=1000, h=560)\n\n #######################\n #top bar layout\n #######################\n\n #rowlayout\n widgets[\"bannerFLO\"] = cmds.formLayout(w=1000, h=50, bgc=(.300,.3,.300))\n widgets[\"bannerImage\"] = cmds.image(image=\"{0}/banner_shotWin.png\".format(pi.images))\n widgets[\"spotImage\"] = cmds.iconTextButton(style=\"iconOnly\", image = \"{0}/defaultSpotImage.jpg\".format(pi.images), w=50, h=50, ann=ann[\"spotIcon\"], c=changeSpotIcon)\n widgets[\"projectText\"] = cmds.text(l=\"Project Name: Spot Name\", font = \"boldLabelFont\")\n widgets[\"sceneText\"] = cmds.text(l=\"Current Scene\") \n widgets[\"projectButton\"] = cmds.button(l=\"Change Job\", w = 100, h= 40, bgc= (.5,.5,.5), ann = ann[\"proj\"], c=setProject)\n widgets[\"refreshButton\"] = cmds.button(l=\"Refresh\", w = 60, h= 40, bgc= (.2,.2,.2), c = populateWindow)\n widgets[\"exploreButton\"] = cmds.button(l=\"Explore\\nReference\", w = 60, h= 40, bgc= (.7,.5,.3), c=exploreReference)\n\n cmds.formLayout(widgets[\"bannerFLO\"], e=True, af = [(widgets[\"bannerImage\"], \"top\", 0),\n (widgets[\"bannerImage\"], \"left\", 0),\n (widgets[\"projectText\"], \"left\", 400),\n (widgets[\"projectText\"], \"top\", 5),\n (widgets[\"sceneText\"], \"top\", 25),\n (widgets[\"spotImage\"], \"left\", 335), \n (widgets[\"sceneText\"], \"left\", 400),\n (widgets[\"projectButton\"], \"left\", 740),\n (widgets[\"projectButton\"], \"top\", 5),\n (widgets[\"refreshButton\"], \"left\", 850),\n (widgets[\"refreshButton\"], \"top\", 5),\n (widgets[\"exploreButton\"], \"left\", 920),\n (widgets[\"exploreButton\"], \"top\", 5), \n ])\n\n ######################\n #bottom layout\n ########################\n cmds.setParent(widgets[\"mainCLO\"])\n widgets[\"lowFLO\"] = cmds.formLayout()\n widgets[\"lowTLO\"] = cmds.tabLayout(bgc = (.2, .2, .2 ))\n\n ################\n #shots tab\n ################\n cmds.setParent(widgets[\"lowTLO\"])\n widgets[\"shotsFLO\"] = cmds.formLayout(\"Shots - Anim, Light and FX\",w=1000, h=500, bgc = (.4,.4,.4))\n \n ##############\n #shot asset List layout\n ###############\n widgets[\"shotAssListCLO\"] = cmds.columnLayout(w=240, bgc = (.5, .5,.5))\n widgets[\"shotAssListFLO\"] = cmds.formLayout(w=240, h= 500)\n widgets[\"shotAssListTSL\"] = cmds.textScrollList(w=240, h=465, ams=True) \n\n widgets[\"shotAssListTitleText\"] = cmds.text(l=\"Referenced Assets In Current Scene\", font = \"boldLabelFont\", al=\"center\", ann=ann[\"reffedAssets\"])\n\n cmds.formLayout(widgets[\"shotAssListFLO\"], e=True, af = [\n (widgets[\"shotAssListTSL\"], \"top\", 35),\n (widgets[\"shotAssListTSL\"], \"left\", 0),\n \n (widgets[\"shotAssListTitleText\"], \"top\", 5),\n (widgets[\"shotAssListTitleText\"], \"left\", 20),\n ])\n\n ##############\n #shot List layout\n ###############\n cmds.setParent(widgets[\"shotsFLO\"])\n widgets[\"shotListCLO\"] = cmds.columnLayout(w=130, bgc = (.5, .5, .5))\n widgets[\"shotListFLO\"] = cmds.formLayout(w=130, h= 500)\n widgets[\"shotListTSL\"] = cmds.textScrollList(w=130, h=460)\n widgets[\"shotListTitleText\"] = cmds.text(l=\"Shot List\", font = \"boldLabelFont\", ann=ann[\"shotList\"])\n widgets[\"shotListCharText\"] = cmds.text(l=\"Shots\")\n\n cmds.formLayout(widgets[\"shotListFLO\"], e=True, af = [\n (widgets[\"shotListTSL\"], \"top\", 40), \n (widgets[\"shotListTSL\"], \"left\", 0),\n (widgets[\"shotListTitleText\"], \"top\", 5),\n (widgets[\"shotListTitleText\"], \"left\", 30),\n (widgets[\"shotListCharText\"], \"top\", 25),\n (widgets[\"shotListCharText\"], \"left\", 5),\n ])\n\n ##############\n #shot Status layout\n ############### \n cmds.setParent(widgets[\"shotsFLO\"])\n widgets[\"shotInfoAssListTLO\"] = cmds.tabLayout(w=200, h=500)\n widgets[\"shotInfoFLO\"] = cmds.formLayout(\"ShotInfo\", w=200, h=500, bgc= (.5, .5, .5))\n widgets[\"shotInfoTitleText\"] = cmds.text(l=\"Shot Information\", font = \"boldLabelFont\")\n widgets[\"shotInfoNameText\"] = cmds.text(l=\"<Shot Name>\", font = \"boldLabelFont\", al=\"center\", w=200)\n widgets[\"shotInfoVariantText\"] = cmds.text(l=\"<Var Name>\", font = \"boldLabelFont\", al=\"center\", w=200) \n widgets[\"shotInfoPic\"] = cmds.image(image = \"{0}/kitten-photo-632-3.jpg\".format(pi.images), w= 154, h=154)\n widgets[\"shotAnnCB\"] = cmds.checkBox(l=\"Tooltips popups?\", value=tooltips, changeCommand=tooltipSet)\n\n cmds.formLayout(widgets[\"shotInfoFLO\"], e=True, af =[\n (widgets[\"shotInfoNameText\"], \"top\", 60),\n (widgets[\"shotInfoNameText\"], \"left\", 0),\n (widgets[\"shotInfoVariantText\"], \"top\", 80),\n (widgets[\"shotInfoVariantText\"], \"left\", 0), \n (widgets[\"shotInfoPic\"], \"top\", 110),\n (widgets[\"shotInfoPic\"], \"left\", 23),\n (widgets[\"shotInfoTitleText\"], \"top\", 5),\n (widgets[\"shotInfoTitleText\"], \"left\", 35),\n (widgets[\"shotAnnCB\"], \"top\", 420),\n (widgets[\"shotAnnCB\"], \"left\", 50), \n ])\n\n cmds.setParent(widgets[\"shotInfoAssListTLO\"])\n widgets[\"shotAssRigListTLO\"] = cmds.tabLayout(\"ProjAssets\", w=200, h=500) \n widgets[\"shotAssRigCharListCLO\"] = cmds.columnLayout(\"Chars\", w=200, h=500)\n widgets[\"shotAssRigCharListTSL\"] = cmds.textScrollList(w=200, h=450) \n cmds.setParent(widgets[\"shotAssRigListTLO\"])\n widgets[\"shotAssRigPropListCLO\"] = cmds.columnLayout(\"Props\", w=200, h=500)\n widgets[\"shotAssRigPropListTSL\"] = cmds.textScrollList(w=200, h=450) \n cmds.setParent(widgets[\"shotAssRigListTLO\"])\n widgets[\"shotAssRigSetListCLO\"] = cmds.columnLayout(\"Sets\", w=200, h=500)\n widgets[\"shotAssRigSetListTSL\"] = cmds.textScrollList(w=200, h=450) \n cmds.setParent(widgets[\"shotAssRigListTLO\"])\n widgets[\"shotAnmMstListCLO\"] = cmds.columnLayout(\"Anm\", w=200, h=500)\n widgets[\"shotAnmMstListTSL\"] = cmds.textScrollList(w=200, h=450) \n ###############\n #Shot Action layout\n ################\n cmds.setParent(widgets[\"shotsFLO\"])\n widgets[\"shotActionFLO\"] = cmds.formLayout(w=150, h=500, bgc =(.5, .5, .5))\n widgets[\"shotActionRefAssBut\"] = cmds.button(l=\"-> Ref Asset In ->\", w=130, h=20, bgc = (.7,.7,.7), c=referenceAsset, ann=ann[\"refAsset\"]) \n widgets[\"shotActionReplaceBut\"] = cmds.button(l=\"-> Replace Reference ->\", w=130, h=20, en=True, bgc = (.7,.7,.7), ann=ann[\"replace\"], c=replaceReference)\n widgets[\"shotActionRefMultBut\"] = cmds.button(l=\"-> Ref Multiple ->\", w=100, h=20, en=True, bgc = (.7,.7,.7), ann=ann[\"refMult\"], c=referenceMultiple)\n widgets[\"shotActionRefMultIFG\"] = cmds.intFieldGrp(w=20, v1=1)\n widgets[\"shotActionReloadBut\"] = cmds.button(l=\"Reload Reference ->\", w=130, h=20, bgc = (.7,.7,.7), c=reloadReference, ann=ann[\"reload\"])\n widgets[\"shotActionUnloadBut\"] = cmds.button(l=\"Unload Reference ->\", w=130, h=20, bgc = (.7,.7,.7), c=unloadReference, ann=ann[\"unload\"])\n widgets[\"shotActionRemoveBut\"] = cmds.button(l=\"Remove Reference ->\", w=130, h=20, bgc = (.7,.7,.7), c=removeReference, ann=ann[\"remove\"])\n widgets[\"shotActionQIncrBut\"] = cmds.button(l=\"Quick Increment\", w=130, h=20, en=True, bgc = (.7,.7,.7), c=quickIncrement, ann=ann[\"qkIncr\"])\n widgets[\"shotActionNewShotBut\"] = cmds.button(l=\"Create new shot\", en=True, w=130, h=20, bgc = (.7,.7,.7), c=createNewShot, ann=ann[\"crtShot\"]) \n widgets[\"shotActionTitle\"] = cmds.text(l=\"Shot Actions\", font = \"boldLabelFont\")\n\n # create an embedded tab layout for each type of button!\n widgets[\"shotActionTypeTLO\"] = cmds.tabLayout(\"Specific Actions\", w=150, h=180, bgc=(.2,.2,.2))\n\n widgets[\"shotActionTypeAnmSLO\"] = cmds.scrollLayout(\"Anm\", w=150, h=180, verticalScrollBarThickness=5) \n widgets[\"shotActionTypeAnmFLO\"] = cmds.formLayout(w=150,h=240, bgc=(.4, .45, .4))\n widgets[\"shotActionExpAnimBut\"] = cmds.button(l=\"Export Anim ->\", w=130, h=20, en=True, bgc=(.7,.7,.7), c=exportAnimation, ann=ann[\"expAnim\"])\n widgets[\"shotActionImpAnimBut\"] = cmds.button(l=\"Import Anim ->\", w=130, h=20, en=True, bgc=(.7,.7,.7), c=importAnimation, ann=ann[\"impAnim\"])\n widgets[\"shotActionRefToBut\"] = cmds.button(l=\"-> Reference To\", w=130, h=20, en=True, bgc=(.7,.7,.7), c=referenceTo, ann=ann[\"refTo\"])\n widgets[\"shotActionCtrlMkBut\"] = cmds.button(l=\"Ctrl On Selection\", w=130, h=20, en=True, bgc=(.7,.7,.7), c=controlMaker, ann=ann[\"ctrlMk\"])\n\n cmds.setParent(widgets[\"shotActionTypeTLO\"])\n widgets[\"shotActionTypeLgtSLO\"] = cmds.scrollLayout(\"Lgt\", w=150, h=180, verticalScrollBarThickness=5) \n widgets[\"shotActionTypeLgtFLO\"] = cmds.formLayout(w=150,h=240, bgc=(.4, .4, .45))\n widgets[\"shotActionGenericBut\"] = cmds.button(l=\"Render Setup\", w=130, h=20, en=True, bgc = (.7,.7,.7), c=renderSetup, ann=ann[\"rendGen\"])\n\n widgets[\"shotActionMtlBut\"] = cmds.button(l=\"-> Apply Mtl To Sel ->\", w=130, h=20, en=False, bgc = (.7,.7,.7), ann=ann[\"mtlApply\"])\n\n cmds.setParent(widgets[\"shotActionTypeTLO\"])\n widgets[\"shotActionTypeFxSLO\"] = cmds.scrollLayout(\"Fx\", w=150, h=240, verticalScrollBarThickness=5) \n widgets[\"shotActionTypeFxFLO\"] = cmds.formLayout(w=150,h=180, bgc=(.45, .4, .4))\n \n\n#---------------- add any fx buttons here and then postion them below \n\n cmds.formLayout(widgets[\"shotActionTypeLgtFLO\"], e=True, af = [\n (widgets[\"shotActionGenericBut\"], \"top\", 10),\n (widgets[\"shotActionGenericBut\"], \"left\", 2),\n (widgets[\"shotActionMtlBut\"], \"top\", 40),\n (widgets[\"shotActionMtlBut\"], \"left\", 2) \n ])\n\n cmds.formLayout(widgets[\"shotActionTypeAnmFLO\"], e=True, af = [\n (widgets[\"shotActionExpAnimBut\"], \"top\", 10),\n (widgets[\"shotActionExpAnimBut\"], \"left\", 2),\n (widgets[\"shotActionImpAnimBut\"], \"top\", 40),\n (widgets[\"shotActionImpAnimBut\"], \"left\", 2),\n (widgets[\"shotActionRefToBut\"], \"top\", 70),\n (widgets[\"shotActionRefToBut\"], \"left\", 2),\n (widgets[\"shotActionCtrlMkBut\"], \"top\", 100),\n (widgets[\"shotActionCtrlMkBut\"], \"left\", 2) \n ])\n\n cmds.formLayout(widgets[\"shotActionFLO\"], e=True, af = [\n (widgets[\"shotActionTitle\"], \"top\", 5),\n (widgets[\"shotActionTitle\"], \"left\", 35),\n (widgets[\"shotActionRefAssBut\"], \"top\", 30),\n (widgets[\"shotActionRefAssBut\"], \"left\", 10),\n (widgets[\"shotActionRefMultBut\"], \"top\", 60),\n (widgets[\"shotActionRefMultBut\"], \"left\", 10),\n (widgets[\"shotActionRefMultIFG\"], \"top\", 60),\n (widgets[\"shotActionRefMultIFG\"], \"left\", 110),\n (widgets[\"shotActionReloadBut\"], \"top\", 90),\n (widgets[\"shotActionReloadBut\"], \"left\", 10),\n (widgets[\"shotActionUnloadBut\"], \"top\", 120),\n (widgets[\"shotActionUnloadBut\"], \"left\", 10),\n (widgets[\"shotActionRemoveBut\"], \"top\", 150),\n (widgets[\"shotActionRemoveBut\"], \"left\", 10),\n (widgets[\"shotActionReplaceBut\"], \"top\", 180),\n (widgets[\"shotActionReplaceBut\"], \"left\", 10),\n (widgets[\"shotActionQIncrBut\"], \"top\", 210),\n (widgets[\"shotActionQIncrBut\"], \"left\", 10),\n (widgets[\"shotActionTypeTLO\"], \"top\", 270),\n (widgets[\"shotActionTypeTLO\"], \"left\", 0), \n (widgets[\"shotActionNewShotBut\"], \"top\", 470),\n (widgets[\"shotActionNewShotBut\"], \"left\", 10), \n ])\n\n ###############\n #Shot anmLgt tab layout\n ################\n cmds.setParent(widgets[\"shotsFLO\"])\n widgets[\"anmLgtFLO\"] = cmds.formLayout(w=250, h=500, bgc = (.4, .4, .4))\n widgets[\"anmLgtTLO\"] = cmds.tabLayout(w=250, h=500, bgc = (.4,.4,.4), changeCommand = varTabChange)\n ###############\n #shot anm tab layout\n ###############\n widgets[\"anmTabCLO\"] = cmds.columnLayout(\"ANM\", w=250, bgc = (.4, .45, .4))\n #################\n #anm info frame and column layouts\n ################# \n cmds.separator(h=5)\n widgets[\"anmVariationsTSL\"] = cmds.textScrollList(w=250, h=90)\n widgets[\"anmLastWSTFG\"] = cmds.textFieldGrp(l=\"Latest WS: \", w=250, cw = [(1, 70), (2,170)], cal = [(1,\"left\"), (2, \"left\")],ed=False)\n widgets[\"anmLastMasterTFG\"] = cmds.textFieldGrp(l=\"Master: \", w=250, cw = [(1, 70), (2,170)], cal = [(1,\"left\"), (2, \"left\")],ed=False)\n cmds.separator(h=5)\n\n #################\n #anm 'workshop' frame and column layouts\n #################\n cmds.setParent(widgets[\"anmTabCLO\"])\n widgets[\"anmWSFLO\"] = cmds.frameLayout(\"Animation Workshop\", w=250, h=165, bgc= (.3, .3, .3))\n widgets[\"anmWSFoLO\"] = cmds.formLayout(w=250, h=165, bgc = (.4,.45,.4))\n\n widgets[\"anmWSOpenBut\"] = cmds.button(l=\"Open Latest\\nAnim\\nWorkshop\", w=70, h=50, en=False, bgc = (.4,.5,.8), ann=ann[\"openWS\"])\n widgets[\"anmWSIncrBut\"] = cmds.button(l=\"Increment Anim Workshop\", w=160, h=50, en=True, bgc = (.7,.6,.4), ann=ann[\"incrWS\"], c = partial(incrementWorkshop, \"anm\"))\n widgets[\"anmWSPrevBut\"] = cmds.button(l=\"Previous Anim Workshops\", w=160, bgc = (.7,.7,.7), en=False, ann=ann[\"prevWS\"])\n widgets[\"anmWSInfoBut\"] = cmds.button(l=\"WS Info\", w=70, bgc = (.7, .7, .7), en=False, ann=ann[\"WSInfo\"]) \n widgets[\"anmWSNewVarBut\"] = cmds.button(l=\"Create New Variant\", w=160, h=30, bgc = (.2,.2,.2), c=partial(createVariant, \"anm\"), ann=ann[\"crtVariant\"])\n widgets[\"anmVarIconBut\"] = cmds.button(l=\"Create Var\\nIcon\", w=70, h=30, bgc = (.7,.7,.7), en=False, c=createShotIcon, ann=ann[\"crtIcon\"]) \n\n cmds.formLayout(widgets[\"anmWSFoLO\"], e=True, af = [\n (widgets[\"anmWSOpenBut\"], \"left\", 5),\n (widgets[\"anmWSOpenBut\"], \"top\", 10),\n (widgets[\"anmWSIncrBut\"], \"left\", 80),\n (widgets[\"anmWSIncrBut\"], \"top\", 10),\n (widgets[\"anmWSInfoBut\"], \"left\", 5),\n (widgets[\"anmWSInfoBut\"], \"top\", 65),\n (widgets[\"anmWSPrevBut\"], \"left\", 80),\n (widgets[\"anmWSPrevBut\"], \"top\", 65),\n (widgets[\"anmWSNewVarBut\"], \"left\", 5),\n (widgets[\"anmWSNewVarBut\"], \"top\", 105),\n (widgets[\"anmVarIconBut\"], \"left\", 170),\n (widgets[\"anmVarIconBut\"], \"top\", 105), \n ])\n #################\n #anm 'master' frame and column layouts\n #################\n cmds.setParent(widgets[\"anmTabCLO\"])\n widgets[\"anmMstFLO\"] = cmds.frameLayout(\"Animation Master\", w=250, h=200, bgc= (.3, .3, .3))\n widgets[\"anmMstFoLO\"] = cmds.formLayout(w=250, h=200, bgc = (.4,.45,.4))\n widgets[\"anmMstOpenBut\"] = cmds.button(l=\"Open Anim\\nMaster\", w=70, h=50, en=False, bgc = (.5,.7,.5), ann=ann[\"openMst\"])\n widgets[\"anmMstIncrBut\"] = cmds.button(l=\"Publish Anim Master\\n(Import Refs)\", w=160, h=50, en=False, bgc = (.7,.5,.5), ann=ann[\"pubRefMst\"])\n widgets[\"anmMstBgIncrBut\"] = cmds.button(l=\"BG Publish Anim Master (Import Refs)\", w=235, en=False, bgc = (.3,.3,.3), ann=ann[\"pubBGMst\"])\n widgets[\"anmMstPrevBut\"] = cmds.button(l=\"Previous Anim Masters\", w=160, en=False, bgc = (.7,.7,.7), ann=ann[\"prevMst\"])\n widgets[\"anmMstInfoBut\"] = cmds.button(l=\"Mst Info\", w=70, bgc = (.7, .7, .7), en=False, ann=ann[\"MstInfo\"])\n\n\n \n cmds.formLayout(widgets[\"anmMstFoLO\"], e=True, af = [\n (widgets[\"anmMstOpenBut\"], \"left\", 5),\n (widgets[\"anmMstOpenBut\"], \"top\", 10),\n (widgets[\"anmMstIncrBut\"], \"left\", 80),\n (widgets[\"anmMstIncrBut\"], \"top\", 10),\n (widgets[\"anmMstBgIncrBut\"], \"left\", 5),\n (widgets[\"anmMstBgIncrBut\"], \"top\", 65), \n (widgets[\"anmMstInfoBut\"], \"left\", 5),\n (widgets[\"anmMstInfoBut\"], \"top\", 95), \n (widgets[\"anmMstPrevBut\"], \"left\", 80),\n (widgets[\"anmMstPrevBut\"], \"top\", 95), \n \n ])\n ###############\n #shot Lgt tab layout\n ################ \n cmds.setParent(widgets[\"anmLgtTLO\"]) \n widgets[\"lgtTabCLO\"] = cmds.columnLayout(\"LGT\", w=250, bgc = (.4,.4,.45))\n #################\n #lgt info frame and column layouts\n ################# \n cmds.separator(h=5)\n widgets[\"lgtVariationsTSL\"] = cmds.textScrollList(w=250, h=90)\n widgets[\"lgtLastWSTFG\"] = cmds.textFieldGrp(l=\"Latest WS: \", w=250, cw = [(1, 70), (2,170)], cal = [(1,\"left\"), (2, \"left\")],ed=False)\n widgets[\"lgtLastMasterTFG\"] = cmds.textFieldGrp(l=\"Master: \", w=250, cw = [(1, 70), (2,170)], cal = [(1,\"left\"), (2, \"left\")],ed=False) \n cmds.separator(h=5)\n #################\n #lgt 'workshop' frame and column layouts\n #################\n cmds.setParent(widgets[\"lgtTabCLO\"])\n widgets[\"lgtWSFLO\"] = cmds.frameLayout(\"Lighting Workshop\", w=250, h=165, bgc= (.3, .3, .3))\n widgets[\"lgtWSFoLO\"] = cmds.formLayout(w=250, h=165, bgc = (.4,.4,.45))\n\n widgets[\"lgtWSOpenBut\"] = cmds.button(l=\"Open Latest\\nLight\\nWorkshop\", w=70, h=50, en=False, bgc = (.4,.5,.8), ann=ann[\"openWS\"])\n widgets[\"lgtWSIncrBut\"] = cmds.button(l=\"Increment Light Workshop\", w=160, h=50, en=True, bgc = (.7,.6,.4), c = partial(incrementWorkshop, \"lgt\"), ann=ann[\"incrWS\"])\n widgets[\"lgtWSInfoBut\"] = cmds.button(l=\"WS Info\", w=70, bgc = (.7, .7, .7), en=False, ann=ann[\"WSInfo\"])\n widgets[\"lgtWSPrevBut\"] = cmds.button(l=\"Previous Light Workshops\", w=160, en=False, bgc = (.7,.7,.7), ann=ann[\"prevWS\"])\n widgets[\"lgtWSNewVarBut\"] = cmds.button(l=\"Create New Variant\", w=160, h=30, bgc = (.2,.2,.2), c=partial(createVariant, \"lgt\"), ann=ann[\"crtVariant\"]) \n widgets[\"lgtVarIconBut\"] = cmds.button(l=\"Create Var\\nIcon\", w=70, h=30, en=False, bgc = (.7,.7,.7), c=createShotIcon, ann=ann[\"crtIcon\"])\n\n cmds.formLayout(widgets[\"lgtWSFoLO\"], e=True, af = [\n (widgets[\"lgtWSOpenBut\"], \"left\", 5),\n (widgets[\"lgtWSOpenBut\"], \"top\", 10),\n (widgets[\"lgtWSIncrBut\"], \"left\", 80),\n (widgets[\"lgtWSIncrBut\"], \"top\", 10),\n (widgets[\"lgtWSInfoBut\"], \"left\", 5),\n (widgets[\"lgtWSInfoBut\"], \"top\", 65),\n (widgets[\"lgtWSPrevBut\"], \"left\", 80),\n (widgets[\"lgtWSPrevBut\"], \"top\", 65),\n (widgets[\"lgtWSNewVarBut\"], \"left\", 5),\n (widgets[\"lgtWSNewVarBut\"], \"top\", 105),\n (widgets[\"lgtVarIconBut\"], \"left\", 170),\n (widgets[\"lgtVarIconBut\"], \"top\", 105), \n ]) \n #################\n #lgt 'master' frame and column layouts\n #################\n cmds.setParent(widgets[\"lgtTabCLO\"])\n widgets[\"lgtMstFLO\"] = cmds.frameLayout(\"Lighting Master\", w=250, h=200, bgc= (.3, .3, .3))\n widgets[\"lgtMstFoLO\"] = cmds.formLayout(w=250, h=200, bgc = (.4,.4,.45))\n widgets[\"lgtMstOpenBut\"] = cmds.button(l=\"Open\\nLight Master\", w=70, h=50, en=True, bgc = (.5,.7,.5), c=partial(openShotMaster, \"lgt\"), ann=ann[\"openMst\"])\n widgets[\"lgtMstIncrBut\"] = cmds.button(l=\"Publish Light Master\\n(Keep Refs)\", w=160, h=50, en=False, bgc = (.7,.5,.5), ann=ann[\"pubRefMst\"])\n widgets[\"lgtMstInfoBut\"] = cmds.button(l=\"Mst Info\", w=70, bgc = (.7, .7, .7), en=False, ann=ann[\"MstInfo\"]) \n widgets[\"lgtMstPrevBut\"] = cmds.button(l=\"Previous Light Masters\", w=160, en=False, bgc = (.7,.7,.7), ann=ann[\"prevMst\"])\n widgets[\"lgtMstBgIncrBut\"] = cmds.button(l=\" BG Publish Light Master (Import Refs)\", w=235, en=False, bgc = (.3,.3,.3), ann=ann[\"pubBGMst\"]) \n\n cmds.formLayout(widgets[\"lgtMstFoLO\"], e=True, af = [\n (widgets[\"lgtMstOpenBut\"], \"left\", 5),\n (widgets[\"lgtMstOpenBut\"], \"top\", 10),\n (widgets[\"lgtMstIncrBut\"], \"left\", 80),\n (widgets[\"lgtMstIncrBut\"], \"top\", 10),\n (widgets[\"lgtMstBgIncrBut\"], \"left\", 5),\n (widgets[\"lgtMstBgIncrBut\"], \"top\", 65), \n (widgets[\"lgtMstInfoBut\"], \"left\", 5),\n (widgets[\"lgtMstInfoBut\"], \"top\", 95),\n (widgets[\"lgtMstPrevBut\"], \"left\", 80),\n (widgets[\"lgtMstPrevBut\"], \"top\", 95),\n \n ]) \n\n ###############\n #shot anm tab layout\n ###############\n cmds.setParent(widgets[\"anmLgtTLO\"])\n widgets[\"fxTabCLO\"] = cmds.columnLayout(\"FX\", w=250, bgc = (.45, .4, .4))\n #################\n #fx info frame and column layouts\n ################# \n cmds.separator(h=5)\n widgets[\"fxVariationsTSL\"] = cmds.textScrollList(w=250, h=90)\n widgets[\"fxLastWSTFG\"] = cmds.textFieldGrp(l=\"Latest WS: \", w=250, cw = [(1, 70), (2,170)], cal = [(1,\"left\"), (2, \"left\")],ed=False)\n widgets[\"fxLastMasterTFG\"] = cmds.textFieldGrp(l=\"Master: \", w=250, cw = [(1, 70), (2,170)], cal = [(1,\"left\"), (2, \"left\")],ed=False) \n cmds.separator(h=5)\n #################\n #lgt 'workshop' frame and column layouts\n #################\n cmds.setParent(widgets[\"fxTabCLO\"])\n widgets[\"fxWSFLO\"] = cmds.frameLayout(\"FX Workshop\", w=250, h=165, bgc= (.3, .3, .3))\n widgets[\"fxWSFoLO\"] = cmds.formLayout(w=250, h=165, bgc = (.45,.4,.4))\n\n widgets[\"fxWSOpenBut\"] = cmds.button(l=\"Open Latest\\nFX\\nWorkshop\", w=70, h=50, en=False, bgc = (.4,.5,.8), ann=ann[\"openWS\"])\n widgets[\"fxWSIncrBut\"] = cmds.button(l=\"Increment FX Workshop\", w=160, h=50, en=True, bgc = (.7,.6,.4), c = partial(incrementWorkshop, \"fx\"), ann=ann[\"incrWS\"])\n widgets[\"fxWSInfoBut\"] = cmds.button(l=\"WS Info\", w=70, bgc = (.7, .7, .7), en=False, ann=ann[\"WSInfo\"]) \n widgets[\"fxWSPrevBut\"] = cmds.button(l=\"Previous FX Workshops\", w=160, en=False, bgc = (.7,.7,.7), ann=ann[\"prevWS\"])\n widgets[\"fxWSNewVarBut\"] = cmds.button(l=\"Create New Variant\", w=160, h=30, bgc = (.2,.2,.2), c=partial(createVariant, \"fx\"), ann=ann[\"crtVariant\"])\n widgets[\"fxVarIconBut\"] = cmds.button(l=\"Create Var\\nIcon\", w=70, h=30, en=False, bgc = (.7,.7,.7), c=createShotIcon, ann=ann[\"crtIcon\"]) \n \n cmds.formLayout(widgets[\"fxWSFoLO\"], e=True, af = [\n (widgets[\"fxWSOpenBut\"], \"left\", 5),\n (widgets[\"fxWSOpenBut\"], \"top\", 10),\n (widgets[\"fxWSIncrBut\"], \"left\", 80),\n (widgets[\"fxWSIncrBut\"], \"top\", 10),\n (widgets[\"fxWSInfoBut\"], \"left\", 5),\n (widgets[\"fxWSInfoBut\"], \"top\", 65),\n (widgets[\"fxWSPrevBut\"], \"left\", 80),\n (widgets[\"fxWSPrevBut\"], \"top\", 65),\n (widgets[\"fxWSNewVarBut\"], \"left\", 5),\n (widgets[\"fxWSNewVarBut\"], \"top\", 105),\n (widgets[\"fxVarIconBut\"], \"left\", 170),\n (widgets[\"fxVarIconBut\"], \"top\", 105), \n ]) \n #################\n #lgt 'master' frame and column layouts\n #################\n cmds.setParent(widgets[\"fxTabCLO\"])\n widgets[\"fxMstFLO\"] = cmds.frameLayout(\"FX Master\", w=250, h=200, bgc= (.3, .3, .3))\n widgets[\"fxMstFoLO\"] = cmds.formLayout(w=250, h=200, bgc = (.45,.4,.4))\n widgets[\"fxMstOpenBut\"] = cmds.button(l=\"Open\\nFX Master\", w=70, h=50, en=False, bgc = (.5,.7,.5), ann=ann[\"openMst\"])\n widgets[\"fxMstIncrBut\"] = cmds.button(l=\"Publish FX Master\\n(Import Refs)\", w=160, h=50, en=False, bgc = (.7,.5,.5), ann=ann[\"pubRefMst\"])\n widgets[\"fxMstInfoBut\"] = cmds.button(l=\"Mst Info\", w=70, bgc = (.7, .7, .7), en=False, ann=ann[\"MstInfo\"]) \n widgets[\"fxMstPrevBut\"] = cmds.button(l=\"Previous FX Masters\", w=160, en=False, bgc = (.7,.7,.7), ann=ann[\"prevMst\"])\n widgets[\"fxMstBgIncrBut\"] = cmds.button(l=\" BG Publish FX Master (Import Refs)\", w=235, en=False, bgc = (.3,.3,.3), ann=ann[\"pubBGMst\"]) \n\n cmds.formLayout(widgets[\"fxMstFoLO\"], e=True, af = [\n (widgets[\"fxMstOpenBut\"], \"left\", 5),\n (widgets[\"fxMstOpenBut\"], \"top\", 10),\n (widgets[\"fxMstIncrBut\"], \"left\", 80),\n (widgets[\"fxMstIncrBut\"], \"top\", 10),\n (widgets[\"fxMstBgIncrBut\"], \"left\", 5),\n (widgets[\"fxMstBgIncrBut\"], \"top\", 65), \n (widgets[\"fxMstInfoBut\"], \"left\", 5),\n (widgets[\"fxMstInfoBut\"], \"top\", 95),\n (widgets[\"fxMstPrevBut\"], \"left\", 80),\n (widgets[\"fxMstPrevBut\"], \"top\", 95),\n \n ]) \n\n\n cmds.setParent(widgets[\"anmLgtFLO\"])\n widgets[\"anmLgtTitleText\"] = cmds.text(l=\"Variant Files\", font = \"boldLabelFont\", ann=ann[\"varFile\"]) \n\n cmds.formLayout(widgets[\"anmLgtFLO\"], e=True, af = [(widgets[\"anmLgtTitleText\"], \"top\", 5), (widgets[\"anmLgtTitleText\"], \"left\", 135)])\n\n ###################\n # - -- Shot Tab form setup\n ##################\n cmds.formLayout(widgets[\"shotsFLO\"], e=True, af = [\n (widgets[\"shotListCLO\"], \"left\", 0),\n (widgets[\"shotListCLO\"], \"top\", 0),\n (widgets[\"anmLgtFLO\"], \"left\", 134),\n (widgets[\"anmLgtFLO\"], \"top\", 0), \n (widgets[\"shotInfoAssListTLO\"], \"left\", 387),\n (widgets[\"shotInfoAssListTLO\"], \"top\", 0),\n (widgets[\"shotActionFLO\"], \"top\", 0),\n (widgets[\"shotActionFLO\"], \"left\", 594),\n (widgets[\"shotAssListCLO\"], \"top\", 0),\n (widgets[\"shotAssListCLO\"], \"left\", 752)\n ])\n\n ################\n #Misc tab\n ################\n cmds.setParent(widgets[\"lowTLO\"])\n widgets[\"miscFLO\"] = cmds.formLayout(\"Other Shot Tools\",width=1000, height=500, backgroundColor = (.4,.4,.4))\n\n widgets[\"animationTLO\"] = cmds.tabLayout(width=500, height=250, backgroundColor = (.3, .35, .3))\n widgets[\"animationRCLO\"] = cmds.rowColumnLayout(\"animation\", numberOfColumns = 4, columnSpacing=[(1, 0), (2,5), (3,5), (4,5)], rowSpacing=[1,5])\n\n cmds.setParent(widgets[\"miscFLO\"])\n widgets[\"lightingTLO\"] = cmds.tabLayout(width=500, height=250, backgroundColor = (.3, .32, .35))\n widgets[\"lightingRCLO\"] = cmds.rowColumnLayout(\"lighting\", numberOfColumns = 4, columnSpacing=[(1, 0), (2,5), (3,5), (4,5)], rowSpacing=[1,5]) \n\n cmds.setParent(widgets[\"miscFLO\"])\n widgets[\"fxTLO\"] = cmds.tabLayout(width=500, height=250, backgroundColor = (.35, .3, .3))\n widgets[\"fxRCLO\"] = cmds.rowColumnLayout(\"fx\", numberOfColumns = 4, columnSpacing=[(1, 0), (2,5), (3,5), (4,5)], rowSpacing=[1,5])\n\n cmds.setParent(widgets[\"miscFLO\"])\n widgets[\"charlexTLO\"] = cmds.tabLayout(width=500, height=250, backgroundColor = (.55, .55, .55))\n widgets[\"charlexRCLO\"] = cmds.rowColumnLayout(\"charlex_general\", numberOfColumns = 4, columnSpacing=[(1, 0), (2,5), (3,5), (4,5)], rowSpacing=[1,5])\n\n cmds.formLayout(widgets[\"miscFLO\"], e=True, af=[\n (widgets[\"charlexTLO\"], \"top\", 0),\n (widgets[\"charlexTLO\"], \"left\", 0),\n (widgets[\"animationTLO\"], \"top\", 0),\n (widgets[\"animationTLO\"], \"left\", 500),\n (widgets[\"lightingTLO\"], \"top\", 250),\n (widgets[\"lightingTLO\"], \"left\", 0),\n (widgets[\"fxTLO\"], \"top\", 250),\n (widgets[\"fxTLO\"], \"left\", 500) \n ])\n\n # get the dictionary of scripts, calls and annotations from the database\n dbPath =os.path.join(os.getenv(\"MAYA_ROOT\"), \"scripts\", \"chrlx_pipe\", \"chrlxScriptList.json\")\n with open(dbPath, \"r\") as f:\n scriptList = json.load(f)\n\n # populate the row column layouts with buttons and funcs from the database\n btl.buttonsToLayout(widgets[\"animationRCLO\"], scriptList[\"shot\"][\"animation\"], width=117, height=40, color=(.38, .3, .38))\n btl.buttonsToLayout(widgets[\"lightingRCLO\"], scriptList[\"shot\"][\"lighting\"], width=117, height=40, color=(.37,.34, .3))\n btl.buttonsToLayout(widgets[\"fxRCLO\"], scriptList[\"shot\"][\"fx\"], width=117, height=40, color=(.35, .3, .3))\n btl.buttonsToLayout(widgets[\"charlexRCLO\"], scriptList[\"shot\"][\"charlex\"], width=117, height=40, color=(.3, .3, .3))\n\n # widgets[\"miscCLO\"] = cmds.columnLayout(\"Other Pipeline Tools\",w=1000, h=500, bgc = (.4,.4,.4))\n # cmds.text(l=\"------ANIM STUFF-------\")\n # cmds.text(l=\"TODO - export cam(s) for nuke, etc\")\n # cmds.text(l=\"TODO - create a new prop from selected geo (propify)\") \n # cmds.text(l=\"TODO - blasting, rendering stuff?\")\n # cmds.text(l=\"TODO - export data (text file of scene locations?)\")\n # cmds.text(l=\"TODO - create render cam? Should this be in the main anim increment? (probably both)\")\n\n # cmds.text(l=\"------LGT STUFF--------\")\n # cmds.text(l=\"TODO - set up current scene for maxwell, arnold\")\n # cmds.text(l=\"TODO - convert an external image to icon (char or project)\")\n # cmds.text(l=\"TODO - revert ['ROLL BACK'] to master version? (replaces master and grabs that workshop\")\n # cmds.text(l=\"TODO - function to add your folder to the WIP folder in this project - save current to WIP folder\")\n # cmds.text(l=\"TODO - explore various frame (render) folders in explorer\")\n # cmds.text(l=\"TODO - various preset light setups/rigs? \")\n\n\n ######################\n #show window\n ######################\n cmds.window(widgets[\"win\"], e=True, w=1000, h=580)\n cmds.showWindow(widgets[\"win\"])\n\n #start us off\n populateWindow()",
"def __init__(self, parent, state, position = wx.DefaultPosition):\n ##Set up data.\n self.state = state\n modeName = MODE_LIST[self.state.GetSurface(\"Mode\")]\n wx.Dialog.__init__(self, parent, -1, \"%s Mode Settings\" %(modeName),\n pos = position,\n style = wx.DEFAULT_FRAME_STYLE ^ (wx.RESIZE_BORDER | \n wx.MINIMIZE_BOX |\n wx.MAXIMIZE_BOX)\n | wx.TAB_TRAVERSAL)\n ##Jconf pull-down menu.\n \n self.lblStBox1 = wx.StaticBox(self, -1, \"Programs to launch\" )\n ##Name Server checkbox.\n self.cbNameServer = wx.CheckBox(self, -1, \"Name Server\", wx.DefaultPosition, wx.DefaultSize, 0 )\n self.cbNameServer.SetToolTip(wx.ToolTip(\"Run Name Server at Launch\"))\n ##Conductor checkbox.\n self.cbConductor = wx.CheckBox(self, -1, \"Conductor\", wx.DefaultPosition, wx.DefaultSize, 0 )\n self.cbConductor.SetToolTip(wx.ToolTip(\"Run Conductor at Launch\"))\n ##Xplorer checkbox.\n self.cbXplorer = wx.CheckBox(self, -1, \"Xplorer\", wx.DefaultPosition, wx.DefaultSize, 0 )\n self.cbXplorer.SetToolTip(wx.ToolTip(\"Run Xplorer at Launch\"))\n ##Desktop checkbox.\n self.cbDesktop = wx.CheckBox(self, -1, \"Desktop Mode\", wx.DefaultPosition, wx.DefaultSize, 0 )\n self.cbDesktop.SetToolTip(wx.ToolTip(\"Set Desktop Mode for\" +\n \" Conductor and Xplorer\"))\n \n self.lblStBox2 = wx.StaticBox(self, -1, \"Xplorer Configuration\" )\n ##Xplorer Type radio box.\n self.rbXplorer = wx.RadioBox(self, -1, \"Mode\",\n wx.DefaultPosition, wx.DefaultSize,\n RADIO_XPLORER_LIST, 1, wx.RA_SPECIFY_ROWS)\n self.rbXplorer.SetToolTip(wx.ToolTip(\"Which Xplorer format do you\" +\n \" want to launch?\"))\n ##Cluster button.\n self.bCluster = wx.Button(self, -1, \"Cluster Settings\", wx.DefaultPosition, wx.DefaultSize, 0 )\n self.bCluster.SetToolTip(wx.ToolTip(\"Set the computers and extra\" +\n \" variables in the cluster.\"))\n ##Configuration Choice\n self.chJconf = wx.Choice(self, -1, wx.DefaultPosition, [150,-1])\n self.chJconf.SetToolTip(wx.ToolTip(\"Choose Xplorer's configuration.\"))\n ##Edit Jconf button.\n self.bEditJconf = wx.Button(self, -1, \"Edit Configuration List\", wx.DefaultPosition, wx.DefaultSize, 0 )\n self.bEditJconf.SetToolTip(wx.ToolTip(\"Edit the list of Xplorer\" +\n \" configurations.\")) \n #OK and Cancel button\n if windows:\n self.bOk = wx.Button( self, wx.ID_OK, \"OK\", wx.DefaultPosition, wx.DefaultSize, 0 )\n else:\n self.bOk = wx.Button( self, wx.ID_SAVE, \"Save\", wx.DefaultPosition, wx.DefaultSize, 0 )\n self.bCancel = wx.Button( self, wx.ID_CANCEL, \"Cancel\", wx.DefaultPosition, wx.DefaultSize, 0 )\n \n ##Bind events.\n self.Bind(wx.EVT_LISTBOX, self.Refresh, self.chJconf)\n self.Bind(wx.EVT_CHECKBOX, self.Refresh, self.cbXplorer)\n self.Bind(wx.EVT_RADIOBOX, self.Refresh, self.rbXplorer)\n self.Bind(wx.EVT_CHECKBOX, self.Refresh, self.cbConductor)\n self.Bind(wx.EVT_CHECKBOX, self.Refresh, self.cbDesktop)\n \"\"\"\n self.Bind(wx.EVT_LISTBOX, self.UpdateData, self.chJconf)\n self.Bind(wx.EVT_CHECKBOX, self.UpdateData, self.cbXplorer)\n self.Bind(wx.EVT_RADIOBOX, self.UpdateData, self.rbXplorer)\n self.Bind(wx.EVT_CHECKBOX, self.UpdateData, self.cbConductor)\n self.Bind(wx.EVT_CHECKBOX, self.UpdateData, self.cbDesktop)\n \"\"\"\n self.Bind(wx.EVT_CLOSE, self.OnClose)\n if windows:\n self.Bind(wx.EVT_BUTTON, self.OnOk, id = wx.ID_OK)\n else:\n self.Bind(wx.EVT_BUTTON, self.OnOk, id = wx.ID_SAVE)\n self.Bind(wx.EVT_BUTTON, self.EditJconf, self.bEditJconf)\n self.Bind(wx.EVT_BUTTON, self.EditCluster, self.bCluster)\n \n ##Set sizers.\n vSizerMain = wx.BoxSizer( wx.VERTICAL )\n vSizer1 = wx.BoxSizer( wx.VERTICAL )\n svSizer1 = wx.StaticBoxSizer( self.lblStBox1, wx.VERTICAL )\n svSizer1.Add( self.cbNameServer, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5 )\n hSizer1 = wx.BoxSizer( wx.HORIZONTAL )\n hSizer1.Add( self.cbConductor, 0, wx.ALIGN_CENTER|wx.ALL, 5 )\n spacer1 = wx.StaticText(self, -1, \" \", wx.DefaultPosition, wx.DefaultSize, 0 )\n hSizer1.Add( spacer1, 0, wx.ALIGN_CENTER, 5 )\n hSizer1.Add( self.cbDesktop, 0, wx.ALIGN_CENTER|wx.ALL, 5 )\n svSizer1.Add( hSizer1, 0, wx.ALIGN_CENTER_VERTICAL, 5 )\n svSizer1.Add( self.cbXplorer, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5 )\n vSizer1.Add( svSizer1, 0, wx.GROW|wx.ALIGN_CENTER_VERTICAL|wx.TOP, 5 )\n spacer2 = wx.StaticText(self, -1, \"\", wx.DefaultPosition, [10,10], 0 )\n vSizer1.Add( spacer2, 0, wx.ALIGN_CENTER, 5 )\n svSizer2 = wx.StaticBoxSizer( self.lblStBox2, wx.VERTICAL )\n hSizer2 = wx.BoxSizer( wx.HORIZONTAL )\n hSizer2.Add( self.rbXplorer, 0, wx.ALIGN_CENTER|wx.ALL, 5 )\n hSizer2.Add( self.bCluster, 0, wx.ALIGN_CENTER|wx.LEFT|wx.RIGHT|wx.TOP, 5 )\n svSizer2.Add( hSizer2, 0, wx.ALIGN_CENTER_VERTICAL, 5 )\n hSizer3 = wx.BoxSizer( wx.HORIZONTAL )\n hSizer3.Add( self.chJconf, 0, wx.ALIGN_CENTER|wx.ALL, 5 )\n hSizer3.Add( self.bEditJconf, 0, wx.ALIGN_CENTER|wx.ALL, 5 )\n svSizer2.Add( hSizer3, 0, wx.ALIGN_CENTER, 5 )\n vSizer1.Add( svSizer2, 0, wx.GROW|wx.ALIGN_CENTER_VERTICAL, 5 )\n hSizer4 = wx.BoxSizer( wx.HORIZONTAL )\n if windows:\n hSizer4.Add( self.bOk, 0, wx.ALIGN_CENTER|wx.ALL, 5 ) \n hSizer4.Add( self.bCancel, 0, wx.ALIGN_CENTER|wx.LEFT|wx.TOP|wx.BOTTOM, 5 )\n else: \n hSizer4.Add( self.bCancel, 0, wx.ALIGN_CENTER|wx.ALL, 5 ) \n hSizer4.Add( self.bOk, 0, wx.ALIGN_CENTER|wx.LEFT|wx.TOP|wx.BOTTOM, 5 )\n vSizer1.Add( hSizer4, 0, wx.ALIGN_RIGHT|wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.TOP, 5 )\n vSizerMain.Add( vSizer1, 0, wx.ALIGN_CENTER|wx.ALL, 5 ) \n \n vSizerMain.SetSizeHints(self)\n self.SetSizer(vSizerMain)\n #self.CenterOnParent(wx.BOTH)\n ##Set the background color.\n #Style(self)\n if not CLUSTER_ENABLED:\n self.bCluster.Hide()\n ##Set up OK button.\n ##Update Display\n self.React()",
"def __button2(self):\n contract_change = StringVar()\n decision = [\"Yes. I would like to change\",\"No. 6 months would suffice\"]\n contract_change.set(\"Default Contract Length is 6 Months. Change?\")\n contract_change_options = OptionMenu(self.root, contract_change, *decision)\n contract_change_options.config(width=45,bd=0,bg=\"snow\")\n contract_change_options.place(x=300,y= 455)\n\n choices = [\"3\", \"6\", \"12\", \"24\", \"Custom\"]\n duration_change = StringVar()\n duration_change.set(\"Select your duration:\")\n duration_change_option = OptionMenu(self.root,duration_change,*choices)\n duration_change_option.config(width=45,bd=0,bg=\"snow\",state=\"disabled\")\n duration_change_option.place(x=300,y=480)\n\n sign_button = self.button_maker(self.root)\n sign_button.config(text=\"SIGN\",state=\"disabled\")\n sign_button.place(x=300, y=555, width=200, height=50)\n\n def selection(*args):\n if duration_change.get() == \"Custom\":\n duration = simpledialog.askinteger(\"Duration\",\"Enter Duration in Months: \")\n if duration:\n self.duration = duration\n self.button_destroyed = True\n sign_button.config(state=\"active\",command=lambda :[(self.__sign_contract2()),self.__messageBox2(),self.root.destroy(),self.root2.deiconify()])\n duration_change_option.config(state=\"disabled\")\n contract_change_options.config(state=\"disabled\")\n else:\n self.duration = int(duration_change.get())\n self.button_destroyed = True\n sign_button.config(state=\"active\",command=lambda : [(self.__sign_contract2()),self.__messageBox2(),self.root.destroy(),self.root2.deiconify()])\n\n\n def my_show(*args):\n if \"Yes\" in contract_change.get():\n duration_change_option.config(state=\"active\")\n self.default = False\n sign_button.config(state=\"disabled\")\n else:\n duration_change_option.config(state=\"disabled\")\n self.default = True\n self.button_destroyed = True\n sign_button.config(state=\"active\",command=lambda:[(self.__sign_contract2()),self.__messageBox2(),self.root.destroy(),self.root2.deiconify()])\n\n\n contract_change.trace('w',my_show)\n duration_change.trace('w',selection)",
"def choose_option(friendly,enemy,opt1=\"Fight\",opt2=\"Bag\",opt3=\"Pokemon\",opt4 = \"Run\"):\n background_color = blit_background()[1]\n blit_friendly(friendly)\n blit_enemy(enemy)\n blit_health(friendly,enemy)\n pygame.display.update()\n pause(friendly,enemy,3) #to stop the click from 1st menu selecting option in second\n mouse_pos = 0,0\n while True:\n event_check(False, friendly,enemy)\n blit_background()\n opt_1 = pygame.draw.rect(screen,((background_color)),(60,540,300,70))\n blit_text(opt1,(70,545))\n opt_3 = pygame.draw.rect(screen,(background_color),(60,615,300,70))\n blit_text(opt2,(70,620))\n opt_2 = pygame.draw.rect(screen,(background_color),(360,540,300,70))\n blit_text(opt3,(370,545))\n opt_4 = pygame.draw.rect(screen,(background_color),(360,615,300,70))\n blit_text(opt4,(370,620))\n mouse_pos = get_click()\n blit_friendly(friendly)\n blit_enemy(enemy)\n blit_health(friendly,enemy)\n blit_text(\"What will you do?\",(800,580))\n pygame.display.update()\n if opt_1.collidepoint(mouse_pos):\n option = 1\n break\n elif opt_2.collidepoint(mouse_pos):\n option = 2\n break\n elif opt_3.collidepoint(mouse_pos):\n option = 3\n break\n elif opt_4.collidepoint(mouse_pos):\n option = 4\n break\n pygame.display.update()\n return option",
"def __init__(self, board):\r\n\r\n super(ViaStitchingDialog, self).__init__(None)\r\n self.viagroupname = None\r\n self.SetTitle(_(u\"{0} v{1}\").format(__plugin_name__, __version__))\r\n self.Bind(wx.EVT_CLOSE, self.onCloseWindow)\r\n self.m_btnCancel.Bind(wx.EVT_BUTTON, self.onCloseWindow)\r\n self.m_btnOk.Bind(wx.EVT_BUTTON, self.onProcessAction)\r\n self.m_btnClear.Bind(wx.EVT_BUTTON, self.onClearAction)\r\n self.board = board\r\n self.randomize = False\r\n self.pcb_group = None\r\n self.clearance = 0\r\n self.board_edges = []\r\n self.config_layer = 0\r\n self.config_textbox = None\r\n self.area = None\r\n self.net = None\r\n self.config = {}\r\n\r\n self.getConfigLayer()\r\n\r\n for d in pcbnew.GetBoard().GetDrawings():\r\n if d.GetLayerName() == 'Edge.Cuts':\r\n self.board_edges.append(d)\r\n if d.GetLayerName() == __plugin_config_layer_name__:\r\n try:\r\n new_config = json.loads(d.GetText())\r\n if __plugin_name__ in new_config.keys():\r\n self.config_textbox = d\r\n self.config = new_config\r\n except (JSONDecodeError, AttributeError):\r\n pass\r\n\r\n\r\n # Use the same unit set int PCBNEW\r\n self.ToUserUnit = None\r\n self.FromUserUnit = None\r\n units_mode = pcbnew.GetUserUnits()\r\n if units_mode == -1:\r\n wx.MessageBox(_(u\"Not a valid frame\"))\r\n self.Destroy()\r\n return\r\n\r\n # Check for selected area\r\n if not self.GetAreaConfig():\r\n wx.MessageBox(_(u\"Please select a valid area\"))\r\n self.Destroy()\r\n return\r\n\r\n # Populate nets checkbox\r\n self.PopulateNets()\r\n\r\n self.ToUserUnit = GUI_defaults[\"to_units\"][units_mode]\r\n self.FromUserUnit = GUI_defaults[\"from_units\"][units_mode]\r\n self.m_lblUnit1.SetLabel(_(GUI_defaults[\"unit_labels\"][units_mode]))\r\n self.m_lblUnit2.SetLabel(_(GUI_defaults[\"unit_labels\"][units_mode]))\r\n\r\n defaults = self.config.get(self.area.GetZoneName(), {})\r\n self.viagroupname = __viagroupname_base__ + self.area.GetZoneName()\r\n\r\n # Search trough groups\r\n for group in self.board.Groups():\r\n if group.GetName() == self.viagroupname:\r\n self.pcb_group = group\r\n\r\n self.m_txtVSpacing.SetValue(defaults.get(\"VSpacing\", GUI_defaults[\"spacing\"][units_mode]))\r\n self.m_txtHSpacing.SetValue(defaults.get(\"HSpacing\", GUI_defaults[\"spacing\"][units_mode]))\r\n self.m_txtClearance.SetValue(defaults.get(\"Clearance\", \"0\"))\r\n self.m_chkRandomize.SetValue(defaults.get(\"Randomize\", False))\r\n\r\n # Get default Vias dimensions\r\n via_dim_list = self.board.GetViasDimensionsList()\r\n\r\n if via_dim_list:\r\n via_dims = via_dim_list.pop()\r\n else:\r\n wx.MessageBox(_(u\"Please set via drill/size in board\"))\r\n self.Destroy()\r\n\r\n self.m_txtViaSize.SetValue(\"%.6f\" % self.ToUserUnit(via_dims.m_Diameter))\r\n self.m_txtViaDrillSize.SetValue(\"%.6f\" % self.ToUserUnit(via_dims.m_Drill))\r\n via_dim_list.push_back(via_dims)\r\n self.overlappings = None",
"def game_help(self):\n QtGui.QMessageBox.about(self, \"How to Play game\",\n \"<b>How to Play</b><br>\"\n \"The rules in Minesweeper are simple:<br><br>\"\n \"<b>1.</b> Uncover a mine and that's end of game <br>\"\n \"<b>2.</b> Uncover empty cell and \"\n \"it opens surrounding empty cells too<br>\"\n \"<b>3.</b> Uncover a number \"\n \"and it tells you how many mines are hidden in\"\n \"surrounding 8 cells.<br>\"\n \"<b>4.</b> Use this information to \"\n \"deduce which squares are safe to click.<br>\"\n \"<b>5.</b> Uncover all cells and \"\n \"mark cells with mine to win the game <br><br>\"\n\n \"<b>Hints</b> <br>\"\n \"<b>1.Mark as Mine </b> <br>\"\n \" If you suspect that cell as mine, \"\n \"right click twice to put a question mark.<br>\"\n \"<b>2.Study surrounding cells </b><br>\"\n \" Study all neighbour cells before opening any cell\"\n \"to make sure whether its mine or not.<br><br>\"\n \"Enjoy the game :) <br>\")",
"def menuItem(*args):\n\toptionsWindow()",
"def resize(self, rows, cols, minecount, event=None):\n self.clearFrame()\n #reset relevant instance variables\n self.rows = rows\n self.cols = cols\n self.numMines = minecount\n self.numChecked = 0\n self.numFlags = 0\n self.minesArmed = False\n self.startTime = None\n\n #re-add all elements on the board\n self.setUpFrame()\n self.addTiles(rows,cols,minecount)\n\n #resize window to fit the new board size\n windowWidth = str(20*cols+40)\n windowHeight = str(20*rows+60)\n self.parent.minsize(windowWidth, windowHeight)\n self.parent.maxsize(windowWidth, windowHeight)\n self.parent.geometry(windowWidth+'x'+windowHeight)",
"def _onoptions(self):\n\n dlg = OptionsDialog(self)\n\n if dlg.exec_():\n self._storeoptions(dlg)",
"def setupPopUp(self):\n\n for driver in self.drivers:\n if self.sender() != self.drivers[driver]['uiSetup']:\n continue\n\n # calculate geometry\n geometry = self.pos().x(), self.pos().y(), self.height(), self.width()\n # get all available frameworks\n framework = self.drivers[driver]['class'].run.keys()\n # selecting the device type\n deviceType = self.drivers[driver]['deviceType']\n\n self.popupUi = DevicePopup(geometry=geometry,\n driver=driver,\n deviceType=deviceType,\n framework=framework,\n data=self.driversData)\n # memorizing the driver we have to update\n self.popupUi.exec_()\n if self.popupUi.returnValues.get('close', 'cancel') == 'cancel':\n # when cancel nothing happens\n return False\n else:\n # when ok, we have to further work\n break\n\n # check if copy are made. if so, than restart all drivers related\n if self.popupUi.returnValues.get('copyIndi', False):\n for driver in self.drivers:\n if not self.drivers[driver]['class'].framework == 'indi':\n continue\n self.dispatch(driverName=driver)\n elif self.popupUi.returnValues.get('copyAlpaca', False):\n for driver in self.drivers:\n if not self.drivers[driver]['class'].framework == 'alpaca':\n continue\n self.dispatch(driverName=driver)\n else:\n # if we choose a driver and it's available, we select it from drop down\n if self.popupUi.returnValues.get('framework', '') == 'indi':\n index = self.findIndexValue(self.drivers[driver]['uiDropDown'], 'indi')\n self.drivers[driver]['uiDropDown'].setCurrentIndex(index)\n self.dispatch(driverName=driver)\n\n return True",
"def draw_options(self, window, font, xpos, ypos):\n\n for i, option in enumerate(self.options):\n if not self.is_selected_option(xpos, ypos, i):\n option_label = font.render(option[0], 1, self.option_colour1)\n window.blit(\n option_label,\n (\n self.x + (self.width // 2 - option_label.get_width() // 2),\n self.y\n + (self.height // 2 - option_label.get_height() // 2)\n + self.height * i\n + self.height,\n ),\n )\n\n else:\n # Generated first so it's width can be used in the background placement calculations\n option_label = font.render(option[0], 1, self.option_colour2)\n\n # x defined in that way so the background is centered under the origianl dropdown button\n # width is multiplied by 1.1 to give each option a bit more background on it's sides\n pygame.draw.rect(\n window,\n self.option_colour1,\n (\n self.x\n + (self.width // 2 - option_label.get_width() * 1.1 // 2), # x\n self.y + self.height + i * self.height, # y\n option_label.get_width() * 1.1, # width\n self.height,\n ), # height\n 0,\n )\n\n window.blit(\n option_label,\n (\n self.x + (self.width // 2 - option_label.get_width() // 2),\n self.y\n + (self.height // 2 - option_label.get_height() // 2)\n + self.height * i\n + self.height,\n ),\n )",
"def win_popup(self):\n content = BoxLayout(orientation='vertical')\n message_label = Label(text=self.win_message)\n button_layer = BoxLayout(orientation='horizontal')\n dismiss_button = Button(text='QUIT', size_hint=(1, 1))\n next_button = Button(id='next', text='NEXT ROUND', size_hint=(1, 1))\n button_layer.add_widget(dismiss_button)\n button_layer.add_widget(next_button)\n content.add_widget(message_label)\n content.add_widget(button_layer)\n popup = Popup(title=self.winner,\n content=content, size_hint=(0.3, 0.25))\n dismiss_button.bind(on_release=(lambda a: self.exit_game()),\n on_press=popup.dismiss)\n next_button.bind(on_release=(lambda a: self.next_round()),\n on_press=popup.dismiss)\n popup.open()",
"def setwinsize(self, rows, cols):",
"def _update_visual(self):\n\n # Check if the options are opened\n if self._is_opened:\n\n self._option_border.set_color(self.border_press)\n\n else:\n\n self._option_border.set_color((0, 0, 0, 0))",
"def game_main():\n # global variables that will be used in other functions\n global GAME_CLOCK, RENDER_WINDOW, GAME_PUZZLE, BOARD, MOVE_COUNT_BOX, MOVE_COUNT, PUZZLE_COPY, RESET_BTN, CHECK_BTN, NEW_BTN, K_VAL, SOLVED, RESULT, RND_TOG,N_MODE, R_BTN, N_BTN\n \n #Quickly Solvable Games\n #These all are solvable in less than 15 moves\n #I used these to keep the processing time lower\n quick_games = [[[4,1,3],[None, 2, 5], [7, 8, 6]],\n [[4,1,3],[2, None, 5], [7, 8, 6]],\n [[4,1,3],[2, 8, 5], [7, None, 6]],\n [[4,1,None],[2, 8, 3], [7, 6, 5]]]\n\n random_mode = False # toggle random mode\n \n GAME_CLOCK = pygame.time.Clock() # clock will assist with screen updates\n\n RENDER_WINDOW = pygame.display.set_mode((WIN_WIDTH, WIN_HEIGHT)) # set render window function \n\n puzzle_select = random.randint(0, 3) # generate a random number between 0 and 3\n \n GAME_PUZZLE = generate_new_puzzle() # generate new puzzle for the game \n\n # set toggle mode\n if random_mode is True:\n RND_TOG = 'X'\n N_MODE = ''\n else:\n RND_TOG = ''\n N_MODE = 'X'\n GAME_PUZZLE.puzzle = quick_games[random.randint(0, 3)] # pick a quick solve puzzle \n\n PUZZLE_COPY = copy.deepcopy(GAME_PUZZLE) # make a copy of the puzzle for resetting\n\n K_VAL = '' # set k value text to nothing\n\n SOLVED = '' # set solved text to nothing\n\n MOVE_COUNT = '0' # initialize move count\n\n run_game = True # establish case for game loop\n\n # MAIN GAME LOOP\n while run_game: \n \n\n # Draw Game Screen and GUI\n # ============\n draw_game() \n\n # Main Event Handler Loop\n # =======================\n for event in pygame.event.get(): # check for user interaction\n\n # check if user is exiting game\n if event.type == pygame.QUIT:\n pygame.quit() # deactivate Pygame Libraries (undoes init())\n sys.exit() # terminate program\n\n # Mouse click even listener\n if event.type == MOUSEBUTTONDOWN:\n\n position = pygame.mouse.get_pos() # mouse position\n tile_index = tile_clicked(position) # gets tile index if clicked\n \n # NUMBER TILE CLICKED\n if tile_index:\n \n # get blank position\n blank_position = GAME_PUZZLE.get_blank_pos() \n\n # if the tile clicked was not the blank tile\n if tile_index != blank_position:\n move_direction = get_move_type(tile_index, blank_position) # determine move direction\n\n GAME_PUZZLE.make_move(move_direction) # make move\n MOVE_COUNT = str(int(MOVE_COUNT) + 1)\n draw_puzzle() # render update\n \n # RESET BUTTON CLICKED\n if RESET_BTN.collidepoint(position):\n\n # Reset Puzzle\n GAME_PUZZLE = copy.deepcopy(PUZZLE_COPY)\n\n # Reset Game Values\n MOVE_COUNT = '0'\n SOLVED = ''\n K_VAL = ''\n\n # Render Update \n draw_puzzle() \n\n # NEW GAME BUTTON CLICKED\n if NEW_BTN.collidepoint(position):\n\n if random_mode is True:\n # Generate NEW\n GAME_PUZZLE = generate_new_puzzle()\n else:\n # pick a quick solve puzzle\n GAME_PUZZLE.puzzle = quick_games[random.randint(0, 3)] \n\n # make a copy of the puzzle for resetting\n PUZZLE_COPY = copy.deepcopy(GAME_PUZZLE)\n\n # Reset Game Values\n MOVE_COUNT = '0'\n SOLVED = ''\n K_VAL = ''\n\n # Render Update \n draw_puzzle() \n \n # CHECK BUTTON WAS CLICKED\n if CHECK_BTN.collidepoint(position):\n \n result = None # holds the result of the outcome\n moves = 0\n\n # check for a k - value\n if K_VAL != '':\n k = int(K_VAL) # transform to integer\n\n outcome = vpuz.build_move_tree(GAME_PUZZLE, k) # determine if solvable in k moves\n \n if outcome[0] is True: # Game Was Solved \n MOVE_COUNT= str(outcome[3].generation) # set number of moves\n SOLVED = ','.join(vpuz.get_solving_moves(outcome[3])) # join returned list into comma separated string\n result = 'Solvable! Winning Moves: ' + SOLVED\n SOLVED = result\n elif outcome[1] is True:\n SOLVED = 'Unsolvable in ' + K_VAL + ' moves...' # not solvable in k moves\n \n # Random mode was enabled\n if R_BTN.collidepoint(position):\n if random_mode is True:\n RND_TOG = ''\n N_MODE = 'X'\n random_mode = False\n else:\n RND_TOG = 'X'\n N_MODE = ''\n random_mode = True\n\n # Normal mode was enabled\n if N_BTN.collidepoint(position):\n if random_mode is True:\n RND_TOG = ''\n N_MODE = 'X'\n random_mode = False\n else:\n RND_TOG = 'X'\n N_MODE = ''\n random_mode = True\n \n \n # Key Pressed Event Listener\n if event.type == pygame.KEYDOWN:\n\n #backspace\n if event.key == pygame.K_BACKSPACE:\n K_VAL = K_VAL[:-1] # subtract one character from end\n elif event.key == pygame.K_DELETE:\n K_VAL = '' # delete number \n else:\n K_VAL += event.unicode # otherwise enter number\n\n\n pygame.display.set_caption(\"Eight Puzzle: By Joseph Polaski\")\n pygame.display.flip()\n GAME_CLOCK.tick(30) # limit to 30 Frames per second",
"def confirm(self):\n\t\t# TODO: write the current control scheme to config.ini\n\t\tdefault_controls = self.default_controls()\n\t\tconfig = ConfigParser.RawConfigParser()\n\t\tconfig.add_section('controls')\n\t\tconfig.add_section('default_controls')\n\t\tfor i in xrange(len(CONTROLS_OPTIONS) - 2): \n\t\t\tconfig.set('controls', CONTROLS_OPTIONS[i], self.control_map[i])\n\t\t\tconfig.set('default_controls', CONTROLS_OPTIONS[i], default_controls[i] )\n\t\twith open('config.ini', 'wb') as configfile: config.write(configfile)\n\t\tself.player.current_level.screen_manager.switch_to_options_screen(self.player)"
] | [
"0.6645034",
"0.6050015",
"0.59368014",
"0.58010167",
"0.57743067",
"0.571248",
"0.5672358",
"0.56640756",
"0.5635978",
"0.5627065",
"0.5618419",
"0.56161207",
"0.55233985",
"0.5507695",
"0.54934543",
"0.54858404",
"0.54275507",
"0.5421459",
"0.54163617",
"0.5387098",
"0.53785855",
"0.5365017",
"0.53416693",
"0.5335712",
"0.533234",
"0.5321121",
"0.5317057",
"0.5308953",
"0.53051686",
"0.52883387"
] | 0.7077302 | 0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.