query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
sequencelengths 30
30
| negative_scores
sequencelengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Process response from authority API and return a list of humanreadable addresses for any matches. Raise GeoAuthorityError with an appropriate message otherwise. | def parse_location_response(self, location_json):
street_addresses = []
try:
if location_json['status'] == 'ZERO_RESULTS':
return []
elif location_json['status'] != 'OK':
self.logger.error("Unexpected response status: %s", location_json['status'])
raise GeoAuthorityError("Unexpected response body (status)")
for result_json in location_json['results']:
if 'formatted_address' in result_json:
street_addresses.append(result_json['formatted_address'])
except (KeyError, TypeError):
raise GeoAuthorityError("Unexpected response body (structure)")
return street_addresses | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def addresses(self):\n if 'Ward Matters' in self.topics or 'City Matters' in self.topics:\n stname_pattern = \"(\\S*[a-z]\\S*\\s){1,4}?\"\n sttype_pattern = \"(ave|blvd|cres|ct|dr|hwy|ln|pkwy|pl|plz|rd|row|sq|st|ter|way)\"\n st_pattern = stname_pattern + sttype_pattern\n\n addr_pattern = \"(\\d(\\d|-)*\\s%s)\" %st_pattern\n intersec_pattern = exp = \"((?<=\\sat\\s)%s\\s?and\\s?%s)\" %(st_pattern, st_pattern)\n\n pattern = \"(%s|%s)\" %(addr_pattern, intersec_pattern)\n\n matches = re.findall(pattern, self.description, re.IGNORECASE)\n\n addresses = [m[0] for m in matches]\n return addresses\n\n return []",
"def test_list_address(self):\n\n data = [\n dict(\n id=self.address.id,\n address_line1='random address 1',\n address_line2='',\n postal_code='RAN DOM',\n city='random city',\n state_province=dict(\n iso_code=self.random_state_province.iso_code,\n name=self.random_state_province.name,\n ),\n country=dict(\n iso_code=self.random_country.iso_code,\n name=self.random_country.name,\n ),\n ),\n ]\n\n self.client.force_authenticate(user=self.user)\n\n response = self.client.get(reverse('location:addresses'))\n\n self.assertEqual(json.loads(response.content)['results'], data)\n self.assertEqual(json.loads(response.content)['count'], 1)\n self.assertEqual(response.status_code, status.HTTP_200_OK)",
"def parse_address_from_geocoding_response(geocoded_data: dict) -> str:\n return geocoded_data[\n 'response'][\n 'GeoObjectCollection'][\n 'featureMember'][0][\n 'GeoObject'][\n 'metaDataProperty'][\n 'GeocoderMetaData'][\n 'text']",
"def parse_elevations_response(elevations_response):\n return [result[\"elevation\"] for result in elevations_response]",
"def suggestions(self, input, borough_code=None):\n parsed = parser.address(input)\n if borough_code:\n parsed['BOROUGH_CODE'] = borough_code\n self.similiar_names = []\n self.results = []\n if parsed['PHN'] and parsed['STREET']:\n if not parsed['BOROUGH_CODE'] and not parsed['ZIP']:\n # iterate borocodes\n for x in range(1, 6):\n self._geocode(phn=parsed['PHN'], street=parsed['STREET'], borough_code=x)\n # try address with borough code if present\n elif parsed['BOROUGH_CODE']:\n self._geocode(phn=parsed['PHN'], street=parsed['STREET'], borough_code=parsed['BOROUGH_CODE'])\n # try address with zip code if present\n elif parsed['ZIP']:\n self._geocode(phn=parsed['PHN'], street=parsed['STREET'], zip=parsed['ZIP'])\n # validate and retrieve any addresses\n if len(self.similiar_names):\n for name in self.similiar_names:\n self._geocode(phn=parsed['PHN'], street=name['street'], borough_code=name['borough_code'])\n if None in self.results:\n self.results = list(filter(lambda v: v is not None, self.results))\n\n return self.results",
"def validate(self, params, address_input_data):\n processed_address_list = []\n # check avoids redundancy for combined 'forward geocode and validate' \n # option as API does both by default\n if self.__is_address_list_processed:\n processed_address_list = address_input_data\n else:\n request_list = self.__prepare_smarty_request_list(address_input_data)\n processed_address_list = self.__process_smarty_request_list(request_list,\n address_input_data )\n self.__is_address_list_processed = True\n print(f'< {self.num_addresses_processed} addresses processed >')\n return processed_address_list",
"def parse_response(response):\n # Use the first result\n res = response['results'][0]\n\n # Store attributes\n geodata = dict()\n geodata['lat'] = res['geometry']['location']['lat']\n geodata['lng'] = res['geometry']['location']['lng']\n geodata['address'] = res['formatted_address']\n\n for output in res['address_components']:\n if output['types'][0] == 'postal_town':\n geodata['postal_town'] = output['long_name']\n elif output['types'][0] == 'administrative_area_level_2':\n geodata['administrative_area_level_2'] = output['long_name']\n elif output['types'][0] == 'administrative_area_level_1':\n geodata['administrative_area_level_1'] = output['long_name']\n elif output['types'][0] == 'country':\n geodata['country'] = output['long_name']\n elif output['types'][0] == 'route':\n geodata['route'] = output['long_name']\n else:\n continue\n\n return geodata",
"def get_locations(response_args, f_users):\n\n with open(f_users) as f:\n users = f.readlines()\n user_locations = {}\n for user in users:\n status = requests.get(\"https://api.intra.42.fr/v2/users/\" + user.strip() + \"/locations?\" + \"&\".join(response_args))\n connection_status = check_connection_status(status)\n if connection_status:\n response = status.json()\n if response:\n print user.strip() + ' is at computer: ' + response[0]['host']\n else:\n print user.strip() + \" is not logged onto a computer.\"\n else:\n print user.strip() + \" is an invalid user.\"",
"def build_addresses(self):\n \n from ambry.geo.geocoders import DstkGeocoder\n\n facilities = self.partitions.find(table='facilities')\n\n def address_gen():\n for row in facilities.query(\"SELECT * FROM facilities\"):\n address = \"{}, {}, {} {}\".format(row['dba_address1'], row['dba_city'], 'CA', row['dba_zip_code'])\n yield (address, row)\n\n dstk_service = self.config.service('dstk')\n \n dstk_gc = DstkGeocoder(dstk_service, address_gen())\n \n p = self.partitions.find_or_new(table='facilities_addresses')\n p.clean()\n \n lr = self.init_log_rate(500)\n \n with p.inserter() as ins:\n for i, (k, r, inp_row) in enumerate(dstk_gc.geocode()):\n lr(\"Addresses \"+str(i))\n r['facilities_id'] = inp_row['id']\n ins.insert(r)",
"def test_retrieve_l_organization_locations(self):\n pass",
"def extract_ip(self, authorities, additionals, hostname):\n addresses = []\n remaining = []\n for ns in [ans for ans in authorities if ans.type_ == Type.NS]:\n #TODO check if this ns could be useful\n ns_name = ns.rdata.data\n found = False\n for answer in [ans for ans in additionals if ans.type_ == Type.A]:\n if answer.name == ns_name:\n addresses.append(answer.rdata.data)\n if not found:\n remaining.append(ns)\n return addresses, remaining",
"def lookup(addr, num, street, city, code, geo_dict, failure_set):\n try:\n address_url = \"https://geocoding.geo.census.gov/geocoder/locations/address?\" + \\\n \"street=\" + str(num) + \"+\" + street.replace(\" \", \"+\") + \"&city=\" + city + \"&zip=\" + \\\n str(code) + \"&benchmark=9&format=json\"\n geo_data = json.load(req.urlopen(address_url).decode('utf-8'))['result']\n except Exception:\n try:\n address_url = \"https://geocoding.geo.census.gov/geocoder/locations/address?\" + \\\n \"street=\" + str(num) + \"+\" + street.replace(\" \", \"+\") + \"&city=\" + city + \"&zip=\" + \\\n str(code) + \"&benchmark=9&format=json\"\n geo_data = json.loads(req.urlopen(address_url).read().decode('utf-8'))['result']\n except Exception as e:\n print(e, addr)\n failure_set.add(addr)\n return None\n if len(geo_data['addressMatches']) == 0:\n print(addr, ': Failure')\n failure_set.add(addr)\n return None\n print(addr, ': Success')\n location = geo_data['addressMatches'][0]['coordinates']\n latlong = ','.join([str(location['y']), str(location['x'])])\n geo_dict[addr] = latlong\n return tuple(float(geo) for geo in latlong.split(','))",
"def extract_data_from_google_response(geocoding_response):\n root = ET.fromstring(geocoding_response)\n for result in root.findall('result'):\n data = result.find('formatted_address').text\n if data != '':\n return data\n return 'Dirección desconocida'",
"def get_addrs(self) -> List[Multiaddr]:",
"def get_location(coordinates):\n location_info = gmaps.reverse_geocode(latlng=coordinates)\n location_list = list()\n for location in location_info:\n if \"locality\" in location[\"types\"]:\n return location[\"formatted_address\"]\n # location_list.append(location[\"formatted_address\"])\n # return location_list",
"def __getAddresses(parsed: BeautifulSoup) -> list:\n\n # Addresses container\n address_divs = parsed.find_all('div', class_='mailer')\n\n # Building RegEx for phone number\n # The following RegEx extracts phone numbers in the following formats:\n # 1. (###) ###-####\n # 2. ###-###-####\n # 3. ##########\n phone_number_regex = re.compile(\n r'(\\(\\d{3}\\) \\d{3}-\\d{4}|\\d{3}-\\d{3}-\\d{4}|\\d{10})')\n\n # List for final addresses\n addresses = list()\n\n for address in address_divs:\n # Create dict for address\n address_parsed = dict()\n # Split text by newline\n address_items = address.text.split('\\n')\n # Removing leading and trailing spaces\n address_items = [i.strip() for i in address_items]\n\n # Variable to store street address\n street_address = ''\n\n # Iterate through each line\n for idx, address_item in enumerate(address_items):\n # First line is address type\n if idx == 0:\n address_parsed['type'] = address_item\n continue\n\n # Check if line has phone number\n phone_matches = phone_number_regex.findall(address_item)\n if len(phone_matches) == 1:\n # Stripping non-digit characters from phone number\n phone_number = re.sub('[^0-9]', '', phone_matches[0])\n address_parsed['phone'] = phone_number\n continue\n \n # If no number, add to address line\n street_address += address_item.strip() + ' '\n \n # Adding street address to parsed address\n address_parsed['street_address'] = street_address.strip()\n\n # Adding parsed address to addresses master list\n addresses += [address_parsed]\n\n return addresses",
"def _parse_location(self, response):\n if \"1700 S. Wentworth\" in response.text:\n return {\n \"address\": \"1700 S. Wentworth Avenue, Chicago, Illinois\",\n \"name\": \"Leonard M. Louie Fieldhouse\",\n }\n elif \"Zoom\" in response.text:\n return {\n \"address\": \"\",\n \"name\": \"Zoom\",\n }\n else:\n raise ValueError(\"Meeting address has changed\")",
"def collect_results(name: str) -> dict:\n full_response = {}\n\n target_name = dns.name.from_text(name)\n\n # lookup CNAME\n response = lookup(target_name, dns.rdatatype.CNAME)\n cnames = []\n if response is not None:\n for answers in response.answer:\n for answer in answers:\n cnames.append({\"name\": answer, \"alias\": name})\n\n # lookup A\n response = lookup(target_name, dns.rdatatype.A)\n arecords = []\n\n if response is not None:\n for answers in response.answer:\n a_name = answers.name\n for answer in answers:\n if answer.rdtype == 1: # A record\n arecords.append({\"name\": a_name, \"address\": str(answer)})\n\n # lookup AAAA\n response = lookup(target_name, dns.rdatatype.AAAA)\n aaaarecords = []\n\n if response is not None:\n for answers in response.answer:\n aaaa_name = answers.name\n for answer in answers:\n if answer.rdtype == 28: # AAAA record\n aaaarecords.append({\"name\": aaaa_name, \"address\": str(answer)})\n\n # lookup MX\n response = lookup(target_name, dns.rdatatype.MX)\n mxrecords = []\n if response is not None:\n for answers in response.answer:\n mx_name = answers.name\n for answer in answers:\n if answer.rdtype == 15: # MX record\n mxrecords.append({\"name\": mx_name,\n \"preference\": answer.preference,\n \"exchange\": str(answer.exchange)})\n\n full_response[\"CNAME\"] = cnames\n full_response[\"A\"] = arecords\n full_response[\"AAAA\"] = aaaarecords\n full_response[\"MX\"] = mxrecords\n\n return full_response",
"def test_autocomplete_locations_urls(self):\n r = self.base_check_request(\"get\", \"autocomplete/locations/\")\n self.assertIsInstance(r, list)\n self.assertEqual(len(r), 10, \"Invalid default count\")\n\n ac_keys = ['ancestors', 'id', 'is_region', 'name', 'prepositional_name',\n 'slug', 'text_for_apartments_search',\n 'text_for_complexes_search', 'type_name']\n # ac_keys_full = ac_keys + [\"metro_stations\"]\n for ac in r:\n # check response objects structure\n self.assertListEqual(sorted(list(ac.keys())), ac_keys)\n\n # check response types\n # self.check_list_item_keys(ac[\"ancestors\"], ac_keys_full)\n self.assertIsInstance(ac['id'], int)\n self.assertIsInstance(ac['is_region'], bool)\n self.assertIsInstance(ac['name'], str)\n self.assertIsInstance(ac['prepositional_name'], str)\n self.assertIsInstance(ac['slug'], str)\n self.assertIsInstance(ac['text_for_apartments_search'], (str, type(None)))\n self.assertIsInstance(ac['text_for_complexes_search'], (str, type(None)))\n self.assertIsInstance(ac['type_name'], str)",
"def geocode(postcode):\n key = current_app.config.get(\"OS_PLACES_API_KEY\")\n formatted_addresses = FormattedAddressLookup(key=key).by_postcode(postcode)\n response = [{\"formatted_address\": address} for address in formatted_addresses if address]\n return Response(json.dumps(response), mimetype=\"application/json\")",
"def _handle_tracker_contact(self, response):\n peers = response['peers']\n self._try_peers(peers)",
"def process_response_data(self, response):\n response = response.replace('false', \"'false'\")\n response = response.replace('true', \"'true'\")\n response = eval(response)\n locations = response[\"locationSearchResponse\"][\"locations\"]\n\n atms = []\n branches = []\n\n for loc in locations:\n loc_type = loc[\"apiStructType\"]\n\n if loc_type==\"atm\":\n atm_dict = loc[\"atm\"]\n atm = self.get_item_details(atm_dict, self.atm_headers)\n self.ATMS[atm[0]] = atm\n\n elif loc_type==\"brc\":\n branch_dict = loc[\"brc\"]\n brc = self.get_item_details(branch_dict, self.branch_headers)\n self.BRANCHES[brc[0]] = brc",
"def _parse_location(self, response):\n loc_parts = [\n re.sub(r\"\\s+\", \" \", part).strip()\n for part in response.css(\n \"#contact-info .right-col-content .content *::text\"\n ).extract()\n if part.strip()\n ]\n return {\n \"name\": loc_parts[3],\n \"address\": \" \".join(loc_parts[4:]).replace(\" ,\", \",\").strip(),\n }",
"def get_supervisor_addresses(self) -> List[str]:",
"def get_addresses_by_account(account):\n try:\n stdout = subprocess.check_output([\"litecoin-cli\", \"getaddressesbyaccount\", account])\n addresses = json.loads(stdout.decode())\n except:\n sys.exit(1)\n\n return addresses",
"def collect_results(name: str) -> dict:\n full_response = {}\n target_name = dns.name.from_text(name)\n # lookup CNAME\n response = lookup(target_name, dns.rdatatype.CNAME)\n cnames = []\n for answers in response.answer:\n for answer in answers:\n cnames.append({\"name\": answer, \"alias\": name})\n # lookup A\n response = lookup(target_name, dns.rdatatype.A)\n arecords = []\n for answers in response.answer:\n a_name = answers.name\n for answer in answers:\n if answer.rdtype == 1: # A record\n arecords.append({\"name\": a_name, \"address\": str(answer)})\n # lookup AAAA\n response = lookup(target_name, dns.rdatatype.AAAA)\n aaaarecords = []\n for answers in response.answer:\n aaaa_name = answers.name\n for answer in answers:\n if answer.rdtype == 28: # AAAA record\n aaaarecords.append({\"name\": aaaa_name, \"address\": str(answer)})\n # lookup MX\n response = lookup(target_name, dns.rdatatype.MX)\n mxrecords = []\n for answers in response.answer:\n mx_name = answers.name\n for answer in answers:\n if answer.rdtype == 15: # MX record\n mxrecords.append({\"name\": mx_name,\n \"preference\": answer.preference,\n \"exchange\": str(answer.exchange)})\n\n full_response[\"CNAME\"] = cnames\n full_response[\"A\"] = arecords\n full_response[\"AAAA\"] = aaaarecords\n full_response[\"MX\"] = mxrecords\n\n return full_response",
"def geocoding(address, API_KEY=API_KEY, GEOCODE_API_URL=GEOCODE_API_URL):\n # define the parameters of the search\n params = {\n 'address': '{}'.format(address),\n 'key': API_KEY\n }\n\n # Do the request and get the response data\n response = requests.get(GEOCODE_API_URL, params=params)\n response = response.json()\n\n geodata = parse_response(response)\n return geodata",
"def _parse_location(self, response):\n location_block = response.css(\".row.mt-4 > .col-12\")[0]\n location_items = location_block.css(\"p *::text\").extract()\n addr_items = [\n i.strip() for i in location_items if \"Building\" not in i and i.strip()\n ]\n name_items = [\n i.strip() for i in location_items if \"Building\" in i and i.strip()\n ]\n return {\n \"address\": \" \".join(addr_items),\n \"name\": \" \".join(name_items),\n }",
"def _set_search_addresses(self):\n if self._report_data and self._report_data['details']:\n for detail in self._report_data['details']:\n if detail.get('ownerGroups'):\n for group in detail['ownerGroups']:\n for owner in group['owners']:\n Report._format_address(owner['address'])\n if detail.get('location') and 'address' in detail['location']:\n Report._format_address(detail['location']['address'])\n if detail.get('notes'):\n for note in detail['notes']:\n if note.get('contactAddress'):\n Report._format_address(note['contactAddress'])\n elif note.get('givingNoticeParty') and note['givingNoticeParty'].get('address'):\n Report._format_address(note['givingNoticeParty']['address'])",
"def geocoding(address):\n AUTH = json.loads(open(\"auth.json\", \"r\").read())\n\n r = requests.get(f\"https://maps.googleapis.com/maps/api/geocode/json\", params={\n \"address\": address,\n \"key\": AUTH[\"GMAP_API\"]\n })\n\n if r.status_code == 200:\n r = r.json()\n results = r[\"results\"]\n if len(results) < 1:\n log.error(\"No result geocoding for %s\", address)\n return (-1, -1)\n\n result = results[0]\n proper_address = result[\"formatted_address\"]\n loc = result[\"geometry\"][\"location\"]\n lat = loc[\"lat\"]\n lng = loc[\"lng\"]\n\n return (proper_address, lat, lng)\n\n else:\n log.error(\"Error in Geocoding %s\", address)\n return (-1, -1)"
] | [
"0.5419127",
"0.52974874",
"0.5250207",
"0.5239093",
"0.520853",
"0.5162842",
"0.51276225",
"0.5108789",
"0.5096379",
"0.50797266",
"0.5039929",
"0.50187445",
"0.5002884",
"0.49940777",
"0.4963809",
"0.49536124",
"0.4948754",
"0.4948175",
"0.49268144",
"0.49164474",
"0.49149558",
"0.49106547",
"0.48927516",
"0.4875097",
"0.48665375",
"0.48662752",
"0.48614737",
"0.48604047",
"0.48561856",
"0.48543516"
] | 0.6365182 | 0 |
Creates the three final Libraries. | def createIntermediateLibraries(LISTPRELIBRARIES, DICOLIBRARIES, CONFIG, DICOFINALCLASSIF):
#### Parse all the intermediate libraries files
for preLibrary in LISTPRELIBRARIES:
#### Retrieve the final classification name of the ET from the file name
finalClassification = os.path.basename(preLibrary).split(".fasta")[0]
#### Read and store the fasta sequences of the prelibraries
sequences=readInput.readFasta(preLibrary)
#### Parse all the sequences
for id in sequences:
#### Check the finalClassification of the sequences is in the ID
if finalClassification.lower() in id.lower():
DICOFINALCLASSIF[id]=finalClassification
applyFiltersForIntermediate(id, sequences, finalClassification, CONFIG, DICOLIBRARIES) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def createFinalLibraries(INTERMEDIATELIBRARIES, DICOLIBRARIES):\n\t#### Parse all the intermediate libraries files\n\tfor file in INTERMEDIATELIBRARIES:\n\t\tfileName = os.path.basename(file).split(\".fasta\")[0]\n\t\t#### Read and store the fasta sequences of the prelibraries\n\t\tsequences=readInput.readFasta(file)\n\t\t#### Save the three finals libraries\n\t\tsave.saveLibraries(sequences, DICOLIBRARIES)",
"def makeLibrary(self):\n #------------------------------------------ Instance for the output file\n outputFile = open(\"%s/%s\" % (self.sceneryPath,self.libTxtFileName),\"w\")\n #------------------------------------------------------ write the header\n for line in self.header:\n outputFile.write(\"%s\\n\" % (line))\n #------------------------------------------------- Loop over all folders\n packageContent = os.walk(self.sceneryPath)\n for folder in packageContent:\n for fileName in folder[2]:\n fileType = fileName.split(\".\")[-1]\n if fileType in self.objectTypes:\n realPath = folder[0][len(self.sceneryPath)+1:].replace(\"\\\\\",\"/\")\n filePath = \"%s/%s\" % (realPath,fileName)\n print filePath\n outputFile.write(\"EXPORT %s%s %s%s\\n\" % (self.libPrefix,filePath,self.realPathPrefix,filePath))\n outputFile.close()",
"def _shared_library_in_2steps(env):\r\n\r\n if not config.shared_library_1st in env['BUILDERS']:\r\n # The 1st builder\r\n shlinkcom_name = config.shared_library_1st + \"COM\"\r\n env[shlinkcom_name] = \"${TEMPFILE('$AR /DEF $ARFLAGS /OUT:$TARGET $SOURCES')}\"\r\n ar_action = SCons.Action.Action(\"$\" + shlinkcom_name, \"building '$TARGET' from '$SOURCE'\")\r\n emitter_name = config.shared_library_1st + \"EMITTER\"\r\n env[emitter_name] = [__lib_export_emitter]\r\n env[\"BUILDERS\"][config.shared_library_1st] = SCons.Builder.Builder(\r\n action=ar_action,\r\n emitter=\"$\" + emitter_name,\r\n prefix=\"$LIBPREFIX\",\r\n suffix=\"$LIBSUFFIX\",\r\n src_suffix=\"$SHOBJSUFFIX\",\r\n src_builder=\"SharedObject\")\r\n\r\n if not config.shared_library_2nd in env['BUILDERS']:\r\n # The 2nd builder\r\n emitter_name = config.shared_library_2nd + \"EMITTER\"\r\n env[emitter_name] = [__win32_lib_emitter]\r\n env[\"BUILDERS\"][config.shared_library_2nd] = SCons.Builder.Builder(\r\n action=[SCons.Defaults.SharedCheck,\r\n SCons.Action.Action(\"$SHLINKCOM\", \"building '$TARGET' from '$SOURCE'\")],\r\n emitter=\"$\" + emitter_name,\r\n prefix=\"$SHLIBPREFIX\",\r\n suffix=\"$SHLIBSUFFIX\",\r\n target_scanner=SCons.Scanner.Prog.ProgramScanner(),\r\n src_suffix=\"$SHOBJSUFFIX\",\r\n src_builder=\"SharedObject\")",
"def library_dirs(self):",
"def linking_library_dirs(self):",
"def create_libs(desc: dict, pins: list, output_dir: str, verbose: bool = False):\n db = desc.copy()\n db[\"block_name\"] = desc[\"name_of_the_cell\"]\n db[\"area\"] = db.pop(\"block_area_(um2)\")\n db[\"pins\"] = pins\n db[\"types\"] = [pin for pin in pins if pin.width > 1]\n lib_paths = []\n for corner, condition in desc.get(\"corners\", {}).items():\n db[\"library\"] = \"%s_%s_%sV_%sC\" % (\n desc.get(\"name_of_the_cell\"),\n corner,\n (\"%.2f\" % condition.get(\"voltage\")).replace('.', '_'),\n str(condition.get(\"temperature\")).replace('-', 'm')\n )\n db[\"corner_name\"] = corner\n db[\"corner\"] = condition\n if verbose:\n print(db)\n # create directory if does not exist\n os.makedirs(output_dir, exist_ok=True)\n # generate lib file\n template_file = os.path.join(os.path.dirname(__file__), \"./template_ana.lib.mako\")\n _tmp = Template(filename=template_file)\n lib_path = os.path.join(output_dir, \"%s.lib\" % db[\"library\"])\n with open(lib_path, \"w+\") as fp:\n fp.write(_tmp.render_unicode(**db))\n lib_paths.append(lib_path)\n return lib_paths",
"def genLibData(self):\n import mush\n tsMain = string.Template(mush.libGenMain)\n tsIfAltId = string.Template(mush.libGenIfAltId)\n #--Data Records\n for id in ('lib_action','lib_actionCount'):\n glob = self.getRecord('GLOB',id,Glob)\n (glob.type, glob.value) = ('s',0)\n glob.setChanged()\n setAllCode = 'begin lib_setAllGS\\n'\n setNoneCode = 'begin lib_setNoneGS\\n'\n for libId in self.libList:\n (srcId,altId) = self.libMap[libId]\n srcBook = self.srcBooks.get(srcId)[0]\n if not srcBook:\n print '%s: Missing source: %s' % (libId,srcId)\n continue\n #--Global\n glob = self.getRecord('GLOB',libId+'G',Glob)\n (glob.type, glob.value) = ('s',0)\n glob.setChanged()\n #--Script\n scriptId = libId+'LS'\n script = self.getRecord('SCPT',scriptId,Scpt)\n scriptCode = tsMain.substitute(\n libId=libId, srcId=srcId, ifAltId=(\n (altId and tsIfAltId.substitute(libId=libId,altId=altId)) or ''))\n script.setCode(scriptCode)\n script.setChanged()\n #--Book\n srcBook.load(unpack=True)\n book = self.getRecord('BOOK',libId,Book)\n book.model = srcBook.model\n book.title = srcBook.title\n book.icon = srcBook.icon\n book.text = srcBook.text\n book.script = scriptId\n book.setChanged()\n #--Set Scripts\n setAllCode += 'set %sG to 1\\n' % (libId,)\n setNoneCode += 'set %sG to 0\\n' % (libId,)\n #--Set scripts\n for id,code in (('lib_setAllGS',setAllCode),('lib_setNoneGS',setNoneCode)):\n code += ';--Done\\nstopScript %s\\nend\\n' % (id,)\n script = self.getRecord('SCPT',id,Scpt)\n script.setCode(code)\n script.setChanged()",
"def initialize_libraries(experiment, ln):\n # Move into the folder to do the intial calculations in\n folder = \"initial_library\" + str(ln)\n os.chdir(folder) \n # Create a time stamp for beginning the calculations\n experiment[\"Summary\"] = \"Library \" + str(ln) + \" Initialization\\n\"\n experiment[\"Summary\"] += \"Started\" + SHARING.time_stamp()\n # Find the proper number of coordinates to consider\n N = len(experiment[\"Movements\"][ln])/2\n # Go through each antigen\n for mol in experiment[0]:\n # Apply the proper rotation\n for cn in range(N):\n # Create a generic vector of zeros of the appropriate length\n vector = [0.0] * N\n # Place a value of 1.0 in the correct location in the vector\n vector[cn] = 1.0\n # Find the angle to rotate the antigens by\n angle = experiment[\"Movements\"][ln][N+cn]\n # Rotate each of the antigens by the appropriate angle\n rmatrix = MOLECULES.calculate_rmatrix(angle, vector)\n MOLECULES.rotate(mol, rmatrix)\n # Translate each antigen by the appropriate amount\n MOLECULES.move(mol, experiment[\"Movements\"][ln][:N], '+')\n # Update the reference folder with these updated coordinates\n SHARING.output_Current(experiment, \"./Current/\") \n # Load the canonical structures\n canonicals = IPRO_FUNCTIONS.load_canonicals(experiment)\n cdrs = list(canonicals.keys())\n cdrs.sort()\n # Load the clashes\n clashes = IPRO_FUNCTIONS.load_clashes(experiment, cdrs) \n # Load the C++ scores\n raw_scores = IPRO_FUNCTIONS.load_scores(experiment[\"Folder\"])\n # Look for alternate solutions using integer cuts\n goOn = True\n # Store the solutions in a list\n solutions = [experiment[\"Scores\"][ln-1]]\n # Keep searching for alternate solutions until the quality of the result is\n # worse\n while goOn:\n # Resolve the MILP using integer cuts\n if useCPLEX:\n #solution = CPLEX.optcdr_canonicals(canonicals, clashes, \\\n # raw_scores[ln], solutions)\n pass\n else:\n solution = GAMS.optcdr_canonicals(canonicals, clashes, \\\n raw_scores[ln], solutions)\n # If the solution found has an equal objective value to the first, store\n # it and re-run the MILP\n if solution[\"Score\"] == experiment[\"Scores\"][ln-1][1][\"Score\"]:\n solutions.append([experiment[\"Scores\"][ln-1][0], solution])\n # Otherwise, break out of the loop and analyze the results\n else:\n goOn = False\n # Update the library based on the most members for the cluster\n best = 0\n # Skip this if there is only one solution after applying the integer cuts\n if len(solutions) > 1:\n # Load the clusters\n cdrs = list(canonicals.keys())\n cdrs.sort()\n clusters = load_clusters(experiment, cdrs)\n # Initialize the variables to store the solution with the most cluster\n # members\n best = None\n amount = 0\n # Go through the solutions\n for i, solution in enumerate(solutions):\n # Store the total number of members throughout the CDRs\n total = 0\n # Go through the CDRs\n for j, cdr in enumerate(cdrs):\n # Extract the number of members from the \"clusters\" dictionary \n members = clusters[cdr][solution[1][j+1]][\"Members\"]\n # 30 is the number where the permitted amino acids change from\n # \"of the same type\" to \"only those observed\" at each position\n if members > 30:\n members = 30\n # Add the number of members to the total for this solution\n total += members\n # If applicable, update the \"best\" solution found and its\n # corresponding total number of members\n if total > amount:\n best = i\n amount = total\n # Update the library based on the most structures\n experiment[\"Scores\"][ln-1] = solutions[best]\n # If the set of canonical structures has changed, update the referenced\n # values\n if best != 0:\n SHARING.output_scores(experiment, experiment[\"Folder\"] + \"Current/\", ln)\n # Copy the necessary files\n SHARING.copy_standard_files(experiment, solv = True) \n # Generate the antibody structures\n build_antibodies(experiment, canonicals, ln) \n # Go back to the home directory\n os.chdir(\"../\")\n # Try to create a new folder to handle the IPRO affinity maturation\n folder = \"library\" + str(ln)\n try:\n os.mkdir(folder)\n # If the folder already exists, delete it and make a new one. This is the\n # proper procedure since the library should only be there if the\n # initialization has already finished\n except OSError:\n os.system(\"rm -rf \" + folder)\n os.mkdir(folder)\n # Create a new Experiment class object to handle the IPRO affinity maturation\n make_IPRO_experiment(experiment, folder)\n # Delete the initialization folder\n os.system(\"rm -rf initial_\" + folder) \n # Update the summary file\n # Create a summary file\n experiment[\"Summary\"] += \"Ended\" + SHARING.time_stamp()\n name = SHARING.summary_name(SHARING.get_current())\n f = open(name, \"a\")\n f.write(experiment[\"Summary\"])\n f.close()",
"def build_assets():\n\n # templates\n template = open(os.path.join(BASE_PATH, 'AssetLibrary.as.template'), 'r').read()\n\n embed_templates = {\n 'image': \"[Embed(source='%(asset_path)s')] private var %(asset_class_name)s:Class;\\n\",\n 'mp3': \"[Embed(source='%(asset_path)s')] private var %(asset_class_name)s:Class;\\n\", \n 'xml': \"[Embed(source='%(asset_path)s', mimeType=\\\"application/octet-stream\\\")] private var %(asset_class_name)s:Class;\\n\"\n }\n \n library_element_template = \"'%(asset_id)s': %(asset_class_name)s\"\n\n # load+parse asset xml\n complete_asset_embed_code = \"\"\n complete_asset_data_code = \"\"\n asset_dom = minidom.parse(ASSET_XML_FILE)\n \n asset_nodes = list(asset_dom.getElementsByTagName('asset'))\n \n for asset_node in asset_nodes:\n asset_attrs = dict(asset_node.attributes.items())\n asset_embed_code = embed_templates[asset_attrs['type']] % {\n 'asset_class_name': asset_attrs['name'],\n 'asset_path': ASSET_BASE + asset_attrs['file']\n }\n\n complete_asset_embed_code += asset_embed_code\n \n asset_data_code = library_element_template % {\n 'asset_id': asset_attrs['name'],\n 'asset_class_name': asset_attrs['name']\n }\n\n complete_asset_data_code += asset_data_code\n\n if asset_nodes.index(asset_node) == len(asset_nodes) - 1:\n complete_asset_data_code += \"\\n\"\n else:\n complete_asset_data_code += \",\\n\"\n \n output = template % {\n 'asset_embeds': complete_asset_embed_code,\n 'asset_data': complete_asset_data_code\n }\n \n # render\n output_f = open(os.path.join(BASE_PATH, 'AssetLibrary.as'), 'w')\n output_f.write(output)",
"def test_project_with_dependencies(self):\n self.make_project()\n # 'test_library.zip' is not currently compiled for diorite.\n self.project.app_platforms = \"aplite,basalt,chalk\"\n self.project.save()\n tempdir = tempfile.mkdtemp()\n try:\n # Extract a premade library to a temporary directory\n ZipFile(LIBRARY_PATH).extractall(tempdir)\n lib_path = os.path.join(tempdir, 'libname')\n\n # Include the library in the code and package.json\n self.add_file(\"main.c\", DEPENDENCY_MAIN)\n self.project.set_dependencies({\n 'libname': lib_path\n })\n\n # Compile and check\n self.compile()\n self.check_compile_success(num_platforms=3)\n finally:\n shutil.rmtree(tempdir)",
"def make_productions3(self):\n self.make_productions2()\n for prod in self.make_productions_preterminals():\n self.productions.add(prod)",
"def save_libraries(self, a, lib):\n logging.debug(\"in save libraries\")\n self.libraries.append(lib)\n self.produce(\"library\", lib)",
"def _copy_bins():\n # STEP 1: If we're performing a build from a copied source tree,\n # copy the generated python files into the package\n\n _clean_bins()\n\n py_z3_build_dir = os.path.join(BUILD_DIR, 'python', 'z3')\n root_z3_dir = os.path.join(ROOT_DIR, 'z3')\n shutil.copy(os.path.join(py_z3_build_dir, 'z3core.py'), root_z3_dir)\n shutil.copy(os.path.join(py_z3_build_dir, 'z3consts.py'), root_z3_dir)\n\n # STEP 2: Copy the shared library, the executable and the headers\n\n os.mkdir(LIBS_DIR)\n os.mkdir(BINS_DIR)\n os.mkdir(HEADERS_DIR)\n shutil.copy(os.path.join(BUILD_DIR, LIBRARY_FILE), LIBS_DIR)\n shutil.copy(os.path.join(BUILD_DIR, EXECUTABLE_FILE), BINS_DIR)\n path1 = glob.glob(os.path.join(BUILD_DIR, \"msvcp*\"))\n path2 = glob.glob(os.path.join(BUILD_DIR, \"vcomp*\"))\n path3 = glob.glob(os.path.join(BUILD_DIR, \"vcrun*\"))\n for filepath in path1 + path2 + path3:\n shutil.copy(filepath, LIBS_DIR)\n\n for header_dir in HEADER_DIRS:\n for fname in os.listdir(header_dir):\n if not fname.endswith('.h'):\n continue\n shutil.copy(os.path.join(header_dir, fname), os.path.join(HEADERS_DIR, fname))\n\n # This hack lets z3 installed libs link on M1 macs; it is a hack, not a proper fix\n # @TODO: Linked issue: https://github.com/Z3Prover/z3/issues/5926\n major_minor = '.'.join(_z3_version().split('.')[:2])\n link_name = None\n if BUILD_PLATFORM in ('win32', 'cygwin', 'win'):\n pass # TODO: When windows VMs work on M1, fill this in\n elif BUILD_PLATFORM in ('darwin', 'osx'):\n split = LIBRARY_FILE.split('.')\n link_name = split[0] + '.' + major_minor + '.' + split[1]\n else:\n link_name = LIBRARY_FILE + '.' + major_minor\n if link_name:\n os.symlink(LIBRARY_FILE, os.path.join(LIBS_DIR, link_name), True)",
"def add_library(self):\n library = self.new_section('The Library')\n books = self.wiki('the-library')._soup(class_='boxbook')\n template = (\n '<div class=\"book-title\">{}</div>'\n '<div class=\"book-description\">{}</div>')\n for b in books:\n title = b.find(class_='booktitle').string\n description = b.find(class_='boxleft')('div')[0].text.strip()\n excerpts = [self.wiki.site + a['href']\n for a in b.find(class_='boxright')('a')]\n if title == 'The Journal of Aframos Longjourney':\n links = self.wiki(excerpts[1])._soup.select('#page-content a')\n links = [\n 'http://wanderers-library.wikidot.com/' +\n l['href'].split('/')[-1] for l in links]\n excerpts = [excerpts[0]] + links\n book = self.add_page(\n title, template.format(title, description), library)\n for url in excerpts:\n self.add_url(url, book)",
"def create_data_base():\n\n\tscript_files = []\n\tjson_files = []\n\t\n\t# get script files list\n\tfor file in os.listdir(\"learned_objects_scripts/\"):\n\t\tif file.endswith(\".script\"):\n\t\t\tscript_files.append(file)\n\n\t# get json files list\n\tfor file in os.listdir(\"object_models/\"):\n\t\tif file.endswith(\".json\"):\n\t\t\tjson_files.append(file)\n\t\n\t# create json file for new objects\n\tmodel_created = False\n\tfor file in script_files:\n\t\tif \"{}.json\".format(file[:-7]) not in json_files:\n\t\t\twith open(\"object_models/{}.json\".format(file[:-7]), 'w') as outfile:\n\t\t\t\tobj_model = object_script_to_model(\"learned_objects_scripts/\" + file)\n\t\t\t\tjson.dump(obj_model, outfile)\n\t\t\t\tmodel_created = True\n\t\t\t\tprint(\"model created for\", file)\n\tif not model_created:\n\t\tprint(\"data base is already up to date\")",
"def gather() -> None:\n # pylint: disable=too-many-locals\n\n # First off, clear out any existing output.\n existing_dirs = [\n os.path.join('src/external', d) for d in os.listdir('src/external')\n if d.startswith('python-') and d != 'python-notes.txt'\n ]\n existing_dirs += [\n os.path.join('assets/src', d) for d in os.listdir('assets/src')\n if d.startswith('pylib-')\n ]\n for existing_dir in existing_dirs:\n efrotools.run('rm -rf \"' + existing_dir + '\"')\n\n for buildtype in ['debug', 'release']:\n debug = buildtype == 'debug'\n bsuffix = '_debug' if buildtype == 'debug' else ''\n bsuffix2 = '-debug' if buildtype == 'debug' else ''\n\n libname = 'python' + PYTHON_VERSION_MAJOR + ('dm' if debug else 'm')\n\n bases = {\n 'mac':\n f'build/python_apple_mac{bsuffix}/build/macOS',\n 'ios':\n f'build/python_apple_ios{bsuffix}/build/iOS',\n 'tvos':\n f'build/python_apple_tvos{bsuffix}/build/tvOS',\n 'android_arm':\n f'build/python_android_arm{bsuffix}/build/sysroot',\n 'android_arm64':\n f'build/python_android_arm64{bsuffix}/build/sysroot',\n 'android_x86':\n f'build/python_android_x86{bsuffix}/build/sysroot',\n 'android_x86_64':\n f'build/python_android_x86_64{bsuffix}/build/sysroot'\n }\n\n # Note: only need pylib for the first in each group.\n builds: List[Dict[str, Any]] = [{\n 'name':\n 'macos',\n 'group':\n 'apple',\n 'headers':\n bases['mac'] + '/Support/Python/Headers',\n 'libs': [\n bases['mac'] + '/Support/Python/libPython.a',\n bases['mac'] + '/Support/OpenSSL/libOpenSSL.a',\n bases['mac'] + '/Support/XZ/libxz.a'\n ],\n 'pylib':\n (bases['mac'] + '/python/lib/python' + PYTHON_VERSION_MAJOR),\n }, {\n 'name':\n 'ios',\n 'group':\n 'apple',\n 'headers':\n bases['ios'] + '/Support/Python/Headers',\n 'libs': [\n bases['ios'] + '/Support/Python/libPython.a',\n bases['ios'] + '/Support/OpenSSL/libOpenSSL.a',\n bases['ios'] + '/Support/XZ/libxz.a'\n ],\n }, {\n 'name':\n 'tvos',\n 'group':\n 'apple',\n 'headers':\n bases['tvos'] + '/Support/Python/Headers',\n 'libs': [\n bases['tvos'] + '/Support/Python/libPython.a',\n bases['tvos'] + '/Support/OpenSSL/libOpenSSL.a',\n bases['tvos'] + '/Support/XZ/libxz.a'\n ],\n }, {\n 'name':\n 'android_arm',\n 'group':\n 'android',\n 'headers':\n bases['android_arm'] + f'/usr/include/{libname}',\n 'libs': [\n bases['android_arm'] + f'/usr/lib/lib{libname}.a',\n bases['android_arm'] + '/usr/lib/libssl.a',\n bases['android_arm'] + '/usr/lib/libcrypto.a',\n bases['android_arm'] + '/usr/lib/liblzma.a',\n bases['android_arm'] + '/usr/lib/libsqlite3.a'\n ],\n 'libinst':\n 'android_armeabi-v7a',\n 'pylib': (bases['android_arm'] + '/usr/lib/python' +\n PYTHON_VERSION_MAJOR),\n }, {\n 'name': 'android_arm64',\n 'group': 'android',\n 'headers': bases['android_arm64'] + f'/usr/include/{libname}',\n 'libs': [\n bases['android_arm64'] + f'/usr/lib/lib{libname}.a',\n bases['android_arm64'] + '/usr/lib/libssl.a',\n bases['android_arm64'] + '/usr/lib/libcrypto.a',\n bases['android_arm64'] + '/usr/lib/liblzma.a',\n bases['android_arm64'] + '/usr/lib/libsqlite3.a'\n ],\n 'libinst': 'android_arm64-v8a',\n }, {\n 'name': 'android_x86',\n 'group': 'android',\n 'headers': bases['android_x86'] + f'/usr/include/{libname}',\n 'libs': [\n bases['android_x86'] + f'/usr/lib/lib{libname}.a',\n bases['android_x86'] + '/usr/lib/libssl.a',\n bases['android_x86'] + '/usr/lib/libcrypto.a',\n bases['android_x86'] + '/usr/lib/liblzma.a',\n bases['android_x86'] + '/usr/lib/libsqlite3.a'\n ],\n 'libinst': 'android_x86',\n }, {\n 'name': 'android_x86_64',\n 'group': 'android',\n 'headers': bases['android_x86_64'] + f'/usr/include/{libname}',\n 'libs': [\n bases['android_x86_64'] + f'/usr/lib/lib{libname}.a',\n bases['android_x86_64'] + '/usr/lib/libssl.a',\n bases['android_x86_64'] + '/usr/lib/libcrypto.a',\n bases['android_x86_64'] + '/usr/lib/liblzma.a',\n bases['android_x86_64'] + '/usr/lib/libsqlite3.a'\n ],\n 'libinst': 'android_x86_64',\n }]\n\n for build in builds:\n\n grp = build['group']\n builddir = f'src/external/python-{grp}{bsuffix2}'\n header_dst = os.path.join(builddir, 'include')\n lib_dst = os.path.join(builddir, 'lib')\n assets_src_dst = f'assets/src/pylib-{grp}'\n\n # Do some setup only once per group.\n if not os.path.exists(builddir):\n efrotools.run('mkdir -p \"' + builddir + '\"')\n efrotools.run('mkdir -p \"' + lib_dst + '\"')\n\n # Only pull modules into game assets on release pass.\n if not debug:\n # Copy system modules into the src assets\n # dir for this group.\n efrotools.run('mkdir -p \"' + assets_src_dst + '\"')\n efrotools.run(\n 'rsync --recursive --include \"*.py\"'\n ' --exclude __pycache__ --include \"*/\" --exclude \"*\" \"'\n + build['pylib'] + '/\" \"' + assets_src_dst + '\"')\n\n # Prune a bunch of modules we don't need to cut\n # down on size.\n prune = [\n 'config-*', 'idlelib', 'lib-dynload', 'lib2to3',\n 'multiprocessing', 'pydoc_data', 'site-packages',\n 'ensurepip', 'tkinter', 'wsgiref', 'distutils',\n 'turtle.py', 'turtledemo', 'test', 'sqlite3/test',\n 'unittest', 'dbm', 'venv', 'ctypes/test', 'imaplib.py',\n '_sysconfigdata_*'\n ]\n efrotools.run('cd \"' + assets_src_dst + '\" && rm -rf ' +\n ' '.join(prune))\n\n # Some minor filtering to system scripts:\n # on iOS/tvOS, addusersitepackages() leads to a crash\n # due to _sysconfigdata_dm_ios_darwin module not existing,\n # so let's skip that.\n fname = f'{assets_src_dst}/site.py'\n txt = efrotools.readfile(fname)\n txt = efrotools.replace_one(\n txt,\n ' known_paths = addusersitepackages(known_paths)',\n ' # efro tweak: this craps out on ios/tvos.\\n'\n ' # (and we don\\'t use it anyway)\\n'\n ' # known_paths = addusersitepackages(known_paths)')\n efrotools.writefile(fname, txt)\n\n # Copy in a base set of headers (everything in a group should\n # be using the same headers)\n efrotools.run(f'cp -r \"{build[\"headers\"]}\" \"{header_dst}\"')\n\n # Clear whatever pyconfigs came across; we'll build our own\n # universal one below.\n efrotools.run('rm ' + header_dst + '/pyconfig*')\n\n # Write a master pyconfig header that reroutes to each\n # platform's actual header.\n with open(header_dst + '/pyconfig.h', 'w') as hfile:\n hfile.write(\n '#if BA_OSTYPE_MACOS\\n'\n '#include \"pyconfig-macos.h\"\\n\\n'\n '#elif BA_OSTYPE_IOS\\n'\n '#include \"pyconfig-ios.h\"\\n\\n'\n '#elif BA_OSTYPE_TVOS\\n'\n '#include \"pyconfig-tvos.h\"\\n\\n'\n '#elif BA_OSTYPE_ANDROID and defined(__arm__)\\n'\n '#include \"pyconfig-android_arm.h\"\\n\\n'\n '#elif BA_OSTYPE_ANDROID and defined(__aarch64__)\\n'\n '#include \"pyconfig-android_arm64.h\"\\n\\n'\n '#elif BA_OSTYPE_ANDROID and defined(__i386__)\\n'\n '#include \"pyconfig-android_x86.h\"\\n\\n'\n '#elif BA_OSTYPE_ANDROID and defined(__x86_64__)\\n'\n '#include \"pyconfig-android_x86_64.h\"\\n\\n'\n '#else\\n'\n '#error unknown platform\\n\\n'\n '#endif\\n')\n\n # Now copy each build's config headers in with unique names.\n cfgs = [\n f for f in os.listdir(build['headers'])\n if f.startswith('pyconfig')\n ]\n\n # Copy config headers to their filtered names.\n for cfg in cfgs:\n out = cfg.replace('pyconfig', 'pyconfig-' + build['name'])\n if cfg == 'pyconfig.h':\n\n # For platform's root pyconfig.h we need to filter\n # contents too (those headers can themselves include\n # others; ios for instance points to a arm64 and a\n # x86_64 variant).\n contents = efrotools.readfile(build['headers'] + '/' + cfg)\n contents = contents.replace('pyconfig',\n 'pyconfig-' + build['name'])\n efrotools.writefile(header_dst + '/' + out, contents)\n else:\n # other configs we just rename\n efrotools.run('cp \"' + build['headers'] + '/' + cfg +\n '\" \"' + header_dst + '/' + out + '\"')\n\n # Copy in libs. If the lib gave a specific install name,\n # use that; otherwise use name.\n targetdir = lib_dst + '/' + build.get('libinst', build['name'])\n efrotools.run('rm -rf \"' + targetdir + '\"')\n efrotools.run('mkdir -p \"' + targetdir + '\"')\n for lib in build['libs']:\n efrotools.run('cp \"' + lib + '\" \"' + targetdir + '\"')\n\n print('Great success!')",
"def test_index_libraries(self):\n result1 = self._create_library(slug=\"test-lib-index-1\", title=\"Title 1\", description=\"Description\")\n result2 = self._create_library(slug=\"test-lib-index-2\", title=\"Title 2\", description=\"Description\")\n\n for result in [result1, result2]:\n library_key = LibraryLocatorV2.from_string(result['id'])\n response = ContentLibraryIndexer.get_items([library_key])[0]\n\n assert response['id'] == result['id']\n assert response['title'] == result['title']\n assert response['description'] == result['description']\n assert response['uuid'] == result['bundle_uuid']\n assert response['num_blocks'] == 0\n assert response['version'] == result['version']\n assert response['last_published'] is None\n assert response['has_unpublished_changes'] is False\n assert response['has_unpublished_deletes'] is False",
"def makeProjects(self, *versions):\n baseDirectory = FilePath(self.mktemp())\n baseDirectory.createDirectory()\n for version in versions:\n self.makeProject(version, baseDirectory)\n return baseDirectory",
"def create_init_files(self, app_label, model_names, models):\n model_name_slugs = [\"%s_views\" % (self.camel_to_slug(model_name)) for model_name in model_names]\n model_names_dict = {self.camel_to_slug(model.__name__): self.camel_to_slug(self.model_name_plural(model)) for\n model in models}\n for folder_name in [\"views\", \"urls\"]:\n file_path = \"%s/%s/__init__.py\" % (app_label, folder_name)\n template_path = \"django_baker/__init__%s\" % folder_name\n self.create_file_from_template(file_path, template_path, {\"app_label\": app_label,\n \"model_name_slugs\": model_name_slugs,\n \"model_names_dict\": model_names_dict\n })",
"def mk_rg3(self):\n pass",
"def main():\n # Create / clean output dir\n if os.path.isdir(OUT_DIR):\n shutil.rmtree(OUT_DIR)\n os.mkdir(OUT_DIR)\n\n # Write all assets to the directory\n for fname, bb in create_assets().items():\n filename = os.path.join(OUT_DIR, fname)\n dirname = os.path.dirname(filename)\n if not os.path.isdir(dirname):\n os.makedirs(dirname)\n with open(filename, \"wb\") as f:\n f.write(bb)",
"def __init__(self):\n self._ll = LowLevelLibs()\n self._lib = self._ll.phe",
"def __init__(self, lib_dir, dist_dir, windows_exe_files=[],\n console_exe_files=[], service_exe_files=[],\n comserver_files=[], lib_files=[]):\n self.lib_dir = lib_dir\n self.dist_dir = dist_dir\n if not self.dist_dir[-1] in \"\\\\/\":\n self.dist_dir += \"\\\\\"\n self.name = AppName\n self.lname = AppName.lower()\n self.version = AppVersion\n self.windows_exe_files = [self.chop(p) for p in windows_exe_files]\n self.console_exe_files = [self.chop(p) for p in console_exe_files]\n self.service_exe_files = [self.chop(p) for p in service_exe_files]\n self.comserver_files = [self.chop(p) for p in comserver_files]\n self.lib_files = [self.chop(p) for p in lib_files]\n self.icon = os.path.abspath(r'doc\\icon\\favicon.ico')",
"def _setup(self):\n mkdir_p(self.output_folder)\n if self.symlink_dir:\n mkdir_p(self.symlink_dir)\n try:\n selected_versions = self._resolve_dependencies()\n if selected_versions:\n self._write_lock(selected_versions)\n print('\\n\\nVersions Selected for downloading:\\n')\n print('\\t' + '\\n\\t'.join(['{}: {}'.format(req, ver) for req, ver in selected_versions.items()]) + '\\n')\n for pkg_name, version in selected_versions.items():\n pkg_metadata = self._get_metadata(pkg_name)\n version_metadata = pkg_metadata.get('versions', dict()).get(str(version), dict())\n self._download_package(version_metadata)\n except (RequirementMatchError, DependencyError) as e:\n print(e.message)\n return self.created()",
"def get_libraries(self, archs: list[str]):\n libraries = self.ScopedLibraryDict.get(\"common\", []).copy()\n\n for arch in archs:\n libraries + self.ScopedLibraryDict.get(arch, []).copy()\n return list(set(libraries))",
"def makeProjects(self, *versions):\n baseDirectory = FilePath(self.mktemp())\n for version in versions:\n self.makeProject(version, baseDirectory)\n return baseDirectory",
"def create_aiida_project_environment(self):\n try:\n self.create_folder_structure()\n self.build_python_environment()\n self.install_packages_from_index()\n except Exception:\n self.exit_on_exception()\n raise\n self.create_spec_entry()",
"def create_packages(self):\n if not self.rewrite:\n # The extra package structure is only required for vendored code used via import rewrites.\n return\n\n for index, _ in enumerate(self._subpath_components):\n relpath = _PACKAGE_COMPONENTS + self._subpath_components[: index + 1] + [\"__init__.py\"]\n touch(os.path.join(self.ROOT, *relpath))",
"def setup_lib(CLIB):\n # {{ SETUP_LIB }}",
"def gen_library(self):\n newlibrary = self.newlibrary\n whelpers.add_all_helpers(newlibrary.symtab)\n\n self.function_index = newlibrary.function_index\n self.class_map = newlibrary.class_map\n\n self.instantiate_all_classes(newlibrary.wrap_namespace)\n self.update_templated_typemaps(newlibrary.wrap_namespace)\n self.gen_namespace(newlibrary.wrap_namespace)"
] | [
"0.7787591",
"0.61216825",
"0.59788126",
"0.5914733",
"0.5853105",
"0.5761492",
"0.5729343",
"0.57070595",
"0.5634071",
"0.5578847",
"0.5557914",
"0.55127",
"0.5486333",
"0.5457092",
"0.5417137",
"0.54008245",
"0.5397091",
"0.53696746",
"0.53682125",
"0.5365185",
"0.53581166",
"0.53523415",
"0.5310362",
"0.5295752",
"0.5293841",
"0.52729946",
"0.52695954",
"0.5262353",
"0.52619714",
"0.52426034"
] | 0.6650628 | 1 |
Apply the cdhitest on all the intermediate Libraries. | def applyCDHIT(INTERMEDIATELIBRARIES):
#### Apply cd-hit-est for all the intermediate library
for file in INTERMEDIATELIBRARIES:
fileName = os.path.basename(file).split(".fasta")[0]
os.chdir("classification_result/intermediateLibraries/")
subprocess.call('cdhit-est -aS 0.9 -c 0.9 -g 1 -r 1 -i {input}.fasta -o {output}.fasta_tmp'.format(input=fileName, output=fileName), shell=True)
subprocess.call("mv {input}.fasta_tmp {output}.fasta".format(input=fileName, output=fileName), shell=True)
os.chdir("../..") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_c_extensions_import():\n import storm_analysis.dbscan.dbscan_c\n \n import storm_analysis.fista.fista_fft_c\n \n import storm_analysis.frc.frc_c\n \n import storm_analysis.L1H.homotopy_imagea_c\n\n import storm_analysis.rolling_ball_bgr.rolling_ball_lib_c\n\n import storm_analysis.sa_library.cs_decon_utilities_c\n import storm_analysis.sa_library.dao_fit_c\n import storm_analysis.sa_library.grid_c\n import storm_analysis.sa_library.ia_utilities_c\n import storm_analysis.sa_library.matched_filter_c\n\n import storm_analysis.sa_utilities.fitz_c\n\n import storm_analysis.simulator.pf_math_c\n import storm_analysis.simulator.draw_gaussians_c\n \n import storm_analysis.spliner.cubic_spline_c\n import storm_analysis.spliner.cubic_fit_c",
"def createIntermediateLibraries(LISTPRELIBRARIES, DICOLIBRARIES, CONFIG, DICOFINALCLASSIF):\n\t#### Parse all the intermediate libraries files\n\tfor preLibrary in LISTPRELIBRARIES:\n\t\t#### Retrieve the final classification name of the ET from the file name\n\t\tfinalClassification = os.path.basename(preLibrary).split(\".fasta\")[0]\n\t\t#### Read and store the fasta sequences of the prelibraries\n\t\tsequences=readInput.readFasta(preLibrary)\n\t\t#### Parse all the sequences\n\t\tfor id in sequences:\n\t\t\t#### Check the finalClassification of the sequences is in the ID\n\t\t\tif finalClassification.lower() in id.lower():\n\t\t\t\tDICOFINALCLASSIF[id]=finalClassification\n\t\t\t\tapplyFiltersForIntermediate(id, sequences, finalClassification, CONFIG, DICOLIBRARIES)",
"def suite():\n\n testSuite = common.unittest.TestSuite()\n\n cdatafuncs = [niclassdata] # non-indexing data tests\n cdatafuncs.append(iclassdata) # indexing data tests\n\n heavy = common.heavy\n # Choose which tests to run in classes with autogenerated tests.\n if heavy:\n autoprefix = 'test' # all tests\n else:\n autoprefix = 'test_l' # only light tests\n\n niter = 1\n for i in range(niter):\n # Tests on query data.\n for cdatafunc in cdatafuncs:\n for cdata in cdatafunc():\n class_ = eval(cdata[0])\n if heavy or not class_.heavy:\n suite_ = common.unittest.makeSuite(class_,\n prefix=autoprefix)\n testSuite.addTest(suite_)\n # Tests on query usage.\n testSuite.addTest(common.unittest.makeSuite(ScalarTableUsageTestCase))\n testSuite.addTest(common.unittest.makeSuite(MDTableUsageTestCase))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage1))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage2))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage3))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage4))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage5))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage6))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage7))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage8))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage9))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage10))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage11))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage12))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage13))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage14))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage15))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage16))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage17))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage18))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage19))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage20))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage21))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage22))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage23))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage24))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage25))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage26))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage27))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage28))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage29))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage30))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage31))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage32))\n\n return testSuite",
"def compute_dependency_specs(cls, kwargs=None, payload=None):\n for spec in super(ImportJarsMixin, cls).compute_dependency_specs(kwargs, payload):\n yield spec\n\n imported_jar_library_specs = cls.imported_jar_library_specs(kwargs=kwargs, payload=payload)\n for spec in imported_jar_library_specs:\n yield spec",
"def initialize_libraries(experiment, ln):\n # Move into the folder to do the intial calculations in\n folder = \"initial_library\" + str(ln)\n os.chdir(folder) \n # Create a time stamp for beginning the calculations\n experiment[\"Summary\"] = \"Library \" + str(ln) + \" Initialization\\n\"\n experiment[\"Summary\"] += \"Started\" + SHARING.time_stamp()\n # Find the proper number of coordinates to consider\n N = len(experiment[\"Movements\"][ln])/2\n # Go through each antigen\n for mol in experiment[0]:\n # Apply the proper rotation\n for cn in range(N):\n # Create a generic vector of zeros of the appropriate length\n vector = [0.0] * N\n # Place a value of 1.0 in the correct location in the vector\n vector[cn] = 1.0\n # Find the angle to rotate the antigens by\n angle = experiment[\"Movements\"][ln][N+cn]\n # Rotate each of the antigens by the appropriate angle\n rmatrix = MOLECULES.calculate_rmatrix(angle, vector)\n MOLECULES.rotate(mol, rmatrix)\n # Translate each antigen by the appropriate amount\n MOLECULES.move(mol, experiment[\"Movements\"][ln][:N], '+')\n # Update the reference folder with these updated coordinates\n SHARING.output_Current(experiment, \"./Current/\") \n # Load the canonical structures\n canonicals = IPRO_FUNCTIONS.load_canonicals(experiment)\n cdrs = list(canonicals.keys())\n cdrs.sort()\n # Load the clashes\n clashes = IPRO_FUNCTIONS.load_clashes(experiment, cdrs) \n # Load the C++ scores\n raw_scores = IPRO_FUNCTIONS.load_scores(experiment[\"Folder\"])\n # Look for alternate solutions using integer cuts\n goOn = True\n # Store the solutions in a list\n solutions = [experiment[\"Scores\"][ln-1]]\n # Keep searching for alternate solutions until the quality of the result is\n # worse\n while goOn:\n # Resolve the MILP using integer cuts\n if useCPLEX:\n #solution = CPLEX.optcdr_canonicals(canonicals, clashes, \\\n # raw_scores[ln], solutions)\n pass\n else:\n solution = GAMS.optcdr_canonicals(canonicals, clashes, \\\n raw_scores[ln], solutions)\n # If the solution found has an equal objective value to the first, store\n # it and re-run the MILP\n if solution[\"Score\"] == experiment[\"Scores\"][ln-1][1][\"Score\"]:\n solutions.append([experiment[\"Scores\"][ln-1][0], solution])\n # Otherwise, break out of the loop and analyze the results\n else:\n goOn = False\n # Update the library based on the most members for the cluster\n best = 0\n # Skip this if there is only one solution after applying the integer cuts\n if len(solutions) > 1:\n # Load the clusters\n cdrs = list(canonicals.keys())\n cdrs.sort()\n clusters = load_clusters(experiment, cdrs)\n # Initialize the variables to store the solution with the most cluster\n # members\n best = None\n amount = 0\n # Go through the solutions\n for i, solution in enumerate(solutions):\n # Store the total number of members throughout the CDRs\n total = 0\n # Go through the CDRs\n for j, cdr in enumerate(cdrs):\n # Extract the number of members from the \"clusters\" dictionary \n members = clusters[cdr][solution[1][j+1]][\"Members\"]\n # 30 is the number where the permitted amino acids change from\n # \"of the same type\" to \"only those observed\" at each position\n if members > 30:\n members = 30\n # Add the number of members to the total for this solution\n total += members\n # If applicable, update the \"best\" solution found and its\n # corresponding total number of members\n if total > amount:\n best = i\n amount = total\n # Update the library based on the most structures\n experiment[\"Scores\"][ln-1] = solutions[best]\n # If the set of canonical structures has changed, update the referenced\n # values\n if best != 0:\n SHARING.output_scores(experiment, experiment[\"Folder\"] + \"Current/\", ln)\n # Copy the necessary files\n SHARING.copy_standard_files(experiment, solv = True) \n # Generate the antibody structures\n build_antibodies(experiment, canonicals, ln) \n # Go back to the home directory\n os.chdir(\"../\")\n # Try to create a new folder to handle the IPRO affinity maturation\n folder = \"library\" + str(ln)\n try:\n os.mkdir(folder)\n # If the folder already exists, delete it and make a new one. This is the\n # proper procedure since the library should only be there if the\n # initialization has already finished\n except OSError:\n os.system(\"rm -rf \" + folder)\n os.mkdir(folder)\n # Create a new Experiment class object to handle the IPRO affinity maturation\n make_IPRO_experiment(experiment, folder)\n # Delete the initialization folder\n os.system(\"rm -rf initial_\" + folder) \n # Update the summary file\n # Create a summary file\n experiment[\"Summary\"] += \"Ended\" + SHARING.time_stamp()\n name = SHARING.summary_name(SHARING.get_current())\n f = open(name, \"a\")\n f.write(experiment[\"Summary\"])\n f.close()",
"def run_combined(self):\n self.runtest_autokey()\n self.runtest_mediaresource()\n self.runtest_composite_slug()\n self.runtest_all_types()\n self.runtest_complex_types()\n self.runtest_only_key()\n self.runtest_compound_key()\n self.runtest_simple_select()\n self.runtest_paging()\n self.runtest_nav_o2o()\n self.runtest_nav_o2o_1()\n self.runtest_nav_zo2o()\n self.runtest_nav_zo2o_f()\n self.runtest_nav_zo2o_b()\n self.runtest_nav_many2o()\n self.runtest_nav_many2o_f()\n self.runtest_nav_many2o_b()\n self.runtest_nav_many2zo()\n self.runtest_nav_many2zo_f()\n self.runtest_nav_many2zo_b()\n self.runtest_nav_many2zo_r()\n self.runtest_nav_many2zo_rf()\n self.runtest_nav_many2zo_rb()\n self.runtest_nav_many2many()\n self.runtest_nav_many2many_1()\n self.runtest_nav_many2many_r()\n self.runtest_nav_many2many_r1()",
"def main():\n argp = argparse.ArgumentParser(prog='-mshlibs', description=('Print the '\n 'complete list of shared libraries used by the specified binary '\n 'file(s), (optionally including all child dependencies)'))\n argp.add_argument('file', nargs='+', help='file(s) to report on')\n argp.add_argument('-a', '--all', action=\"store_true\", help=(\n \"recursively resolve all sub-dependencies\"))\n args = argp.parse_args()\n\n if args.all:\n deps = reduce(lambda a, b: a|b,\n [all_libraries_used(f) for f in args.file])\n else:\n deps = reduce(lambda a, b: set(a)|set(b),\n [libraries_used(f) for f in args.file])\n\n for path in sorted(deps):\n print path",
"def test_1_make(self):\n #We can compile all these modules together into a single shared library.\n writer = self.writers.values()[0]\n self.code = writer.make(remake=True, dependencies=self.dependencies)\n self.assertEqual(self.code, 0)",
"def run_feature_extraction_tests():\n test_feature_extraction()\n test_distributed_feature_extraction()\n test_multimodel_feature_extraction()\n test_distributed_multimodel_feature_extraction()",
"def createFinalLibraries(INTERMEDIATELIBRARIES, DICOLIBRARIES):\n\t#### Parse all the intermediate libraries files\n\tfor file in INTERMEDIATELIBRARIES:\n\t\tfileName = os.path.basename(file).split(\".fasta\")[0]\n\t\t#### Read and store the fasta sequences of the prelibraries\n\t\tsequences=readInput.readFasta(file)\n\t\t#### Save the three finals libraries\n\t\tsave.saveLibraries(sequences, DICOLIBRARIES)",
"def setup_for_compilation_testcase(self):\n os.chdir(self.tmp_work)\n\n for container in self.containers:\n self._setup_single_directory_for_compilation(container.directory)\n # Run any necessary pre_commands\n self._run_pre_commands(container.directory)",
"def linking_library_dirs(self):",
"def helper_test_cccc(standardized_output: dict, output: dict):\n\n tot_loc = 0\n tot_cloc = 0\n\n for file in standardized_output[\"files\"]:\n for function in file[\"functions\"]:\n tot_loc += function[\"LOC\"]\n tot_cloc += function[\"CLOC\"]\n\n output[\"LOC\"] = tot_loc\n output[\"CLOC\"] = tot_cloc\n output[\"classes\"] = standardized_output[\"classes\"]\n output[\"files\"] = standardized_output[\"files\"]\n\n for module in output[\"classes\"]:\n WMC = 0\n n_func = 0\n module_name = module[\"class name\"]\n for file in output[\"files\"]:\n for func in file[\"functions\"]:\n if \"class name\" in func and func[\"class name\"] == module_name:\n WMC += func[\"CC\"]\n n_func += 1\n module[\"WMC\"] = WMC\n module[\"no. functions\"] = n_func",
"def targets():\n return [\n # C++\n CppDistribTest(\"linux\", \"x64\", \"debian10\", \"cmake\", presubmit=True),\n CppDistribTest(\n \"linux\", \"x64\", \"debian10\", \"cmake_as_submodule\", presubmit=True\n ),\n CppDistribTest(\n \"linux\",\n \"x64\",\n \"debian10\",\n \"cmake_as_externalproject\",\n presubmit=True,\n ),\n CppDistribTest(\n \"linux\", \"x64\", \"debian10\", \"cmake_fetchcontent\", presubmit=True\n ),\n CppDistribTest(\n \"linux\", \"x64\", \"debian10\", \"cmake_module_install\", presubmit=True\n ),\n CppDistribTest(\n \"linux\", \"x64\", \"debian10\", \"cmake_pkgconfig\", presubmit=True\n ),\n CppDistribTest(\n \"linux\",\n \"x64\",\n \"debian10_aarch64_cross\",\n \"cmake_aarch64_cross\",\n presubmit=True,\n ),\n CppDistribTest(\"windows\", \"x86\", testcase=\"cmake\", presubmit=True),\n CppDistribTest(\n \"windows\",\n \"x86\",\n testcase=\"cmake_as_externalproject\",\n presubmit=True,\n ),\n # C#\n CSharpDistribTest(\n \"linux\", \"x64\", \"debian10\", use_dotnet_cli=True, presubmit=True\n ),\n CSharpDistribTest(\"linux\", \"x64\", \"ubuntu1604\", use_dotnet_cli=True),\n CSharpDistribTest(\n \"linux\", \"x64\", \"alpine\", use_dotnet_cli=True, presubmit=True\n ),\n CSharpDistribTest(\n \"linux\", \"x64\", \"dotnet31\", use_dotnet_cli=True, presubmit=True\n ),\n CSharpDistribTest(\n \"linux\", \"x64\", \"dotnet5\", use_dotnet_cli=True, presubmit=True\n ),\n CSharpDistribTest(\"macos\", \"x64\", use_dotnet_cli=True, presubmit=True),\n CSharpDistribTest(\"windows\", \"x86\", presubmit=True),\n CSharpDistribTest(\"windows\", \"x64\", presubmit=True),\n # Python\n PythonDistribTest(\"linux\", \"x64\", \"buster\", presubmit=True),\n PythonDistribTest(\"linux\", \"x86\", \"buster\", presubmit=True),\n PythonDistribTest(\"linux\", \"x64\", \"fedora34\"),\n PythonDistribTest(\"linux\", \"x64\", \"arch\"),\n PythonDistribTest(\"linux\", \"x64\", \"alpine\"),\n PythonDistribTest(\"linux\", \"x64\", \"ubuntu2004\"),\n PythonDistribTest(\n \"linux\", \"aarch64\", \"python38_buster\", presubmit=True\n ),\n PythonDistribTest(\n \"linux\", \"x64\", \"alpine3.7\", source=True, presubmit=True\n ),\n PythonDistribTest(\n \"linux\", \"x64\", \"buster\", source=True, presubmit=True\n ),\n PythonDistribTest(\n \"linux\", \"x86\", \"buster\", source=True, presubmit=True\n ),\n PythonDistribTest(\"linux\", \"x64\", \"fedora34\", source=True),\n PythonDistribTest(\"linux\", \"x64\", \"arch\", source=True),\n PythonDistribTest(\"linux\", \"x64\", \"ubuntu2004\", source=True),\n # Ruby\n RubyDistribTest(\n \"linux\",\n \"x64\",\n \"debian10\",\n ruby_version=\"ruby_2_7\",\n source=True,\n presubmit=True,\n ),\n RubyDistribTest(\n \"linux\", \"x64\", \"debian10\", ruby_version=\"ruby_3_0\", presubmit=True\n ),\n RubyDistribTest(\"linux\", \"x64\", \"centos7\"),\n RubyDistribTest(\"linux\", \"x64\", \"ubuntu1604\"),\n RubyDistribTest(\"linux\", \"x64\", \"ubuntu1804\", presubmit=True),\n # PHP7\n PHP7DistribTest(\"linux\", \"x64\", \"debian10\", presubmit=True),\n PHP7DistribTest(\"macos\", \"x64\", presubmit=True),\n ]",
"def gather() -> None:\n # pylint: disable=too-many-locals\n\n # First off, clear out any existing output.\n existing_dirs = [\n os.path.join('src/external', d) for d in os.listdir('src/external')\n if d.startswith('python-') and d != 'python-notes.txt'\n ]\n existing_dirs += [\n os.path.join('assets/src', d) for d in os.listdir('assets/src')\n if d.startswith('pylib-')\n ]\n for existing_dir in existing_dirs:\n efrotools.run('rm -rf \"' + existing_dir + '\"')\n\n for buildtype in ['debug', 'release']:\n debug = buildtype == 'debug'\n bsuffix = '_debug' if buildtype == 'debug' else ''\n bsuffix2 = '-debug' if buildtype == 'debug' else ''\n\n libname = 'python' + PYTHON_VERSION_MAJOR + ('dm' if debug else 'm')\n\n bases = {\n 'mac':\n f'build/python_apple_mac{bsuffix}/build/macOS',\n 'ios':\n f'build/python_apple_ios{bsuffix}/build/iOS',\n 'tvos':\n f'build/python_apple_tvos{bsuffix}/build/tvOS',\n 'android_arm':\n f'build/python_android_arm{bsuffix}/build/sysroot',\n 'android_arm64':\n f'build/python_android_arm64{bsuffix}/build/sysroot',\n 'android_x86':\n f'build/python_android_x86{bsuffix}/build/sysroot',\n 'android_x86_64':\n f'build/python_android_x86_64{bsuffix}/build/sysroot'\n }\n\n # Note: only need pylib for the first in each group.\n builds: List[Dict[str, Any]] = [{\n 'name':\n 'macos',\n 'group':\n 'apple',\n 'headers':\n bases['mac'] + '/Support/Python/Headers',\n 'libs': [\n bases['mac'] + '/Support/Python/libPython.a',\n bases['mac'] + '/Support/OpenSSL/libOpenSSL.a',\n bases['mac'] + '/Support/XZ/libxz.a'\n ],\n 'pylib':\n (bases['mac'] + '/python/lib/python' + PYTHON_VERSION_MAJOR),\n }, {\n 'name':\n 'ios',\n 'group':\n 'apple',\n 'headers':\n bases['ios'] + '/Support/Python/Headers',\n 'libs': [\n bases['ios'] + '/Support/Python/libPython.a',\n bases['ios'] + '/Support/OpenSSL/libOpenSSL.a',\n bases['ios'] + '/Support/XZ/libxz.a'\n ],\n }, {\n 'name':\n 'tvos',\n 'group':\n 'apple',\n 'headers':\n bases['tvos'] + '/Support/Python/Headers',\n 'libs': [\n bases['tvos'] + '/Support/Python/libPython.a',\n bases['tvos'] + '/Support/OpenSSL/libOpenSSL.a',\n bases['tvos'] + '/Support/XZ/libxz.a'\n ],\n }, {\n 'name':\n 'android_arm',\n 'group':\n 'android',\n 'headers':\n bases['android_arm'] + f'/usr/include/{libname}',\n 'libs': [\n bases['android_arm'] + f'/usr/lib/lib{libname}.a',\n bases['android_arm'] + '/usr/lib/libssl.a',\n bases['android_arm'] + '/usr/lib/libcrypto.a',\n bases['android_arm'] + '/usr/lib/liblzma.a',\n bases['android_arm'] + '/usr/lib/libsqlite3.a'\n ],\n 'libinst':\n 'android_armeabi-v7a',\n 'pylib': (bases['android_arm'] + '/usr/lib/python' +\n PYTHON_VERSION_MAJOR),\n }, {\n 'name': 'android_arm64',\n 'group': 'android',\n 'headers': bases['android_arm64'] + f'/usr/include/{libname}',\n 'libs': [\n bases['android_arm64'] + f'/usr/lib/lib{libname}.a',\n bases['android_arm64'] + '/usr/lib/libssl.a',\n bases['android_arm64'] + '/usr/lib/libcrypto.a',\n bases['android_arm64'] + '/usr/lib/liblzma.a',\n bases['android_arm64'] + '/usr/lib/libsqlite3.a'\n ],\n 'libinst': 'android_arm64-v8a',\n }, {\n 'name': 'android_x86',\n 'group': 'android',\n 'headers': bases['android_x86'] + f'/usr/include/{libname}',\n 'libs': [\n bases['android_x86'] + f'/usr/lib/lib{libname}.a',\n bases['android_x86'] + '/usr/lib/libssl.a',\n bases['android_x86'] + '/usr/lib/libcrypto.a',\n bases['android_x86'] + '/usr/lib/liblzma.a',\n bases['android_x86'] + '/usr/lib/libsqlite3.a'\n ],\n 'libinst': 'android_x86',\n }, {\n 'name': 'android_x86_64',\n 'group': 'android',\n 'headers': bases['android_x86_64'] + f'/usr/include/{libname}',\n 'libs': [\n bases['android_x86_64'] + f'/usr/lib/lib{libname}.a',\n bases['android_x86_64'] + '/usr/lib/libssl.a',\n bases['android_x86_64'] + '/usr/lib/libcrypto.a',\n bases['android_x86_64'] + '/usr/lib/liblzma.a',\n bases['android_x86_64'] + '/usr/lib/libsqlite3.a'\n ],\n 'libinst': 'android_x86_64',\n }]\n\n for build in builds:\n\n grp = build['group']\n builddir = f'src/external/python-{grp}{bsuffix2}'\n header_dst = os.path.join(builddir, 'include')\n lib_dst = os.path.join(builddir, 'lib')\n assets_src_dst = f'assets/src/pylib-{grp}'\n\n # Do some setup only once per group.\n if not os.path.exists(builddir):\n efrotools.run('mkdir -p \"' + builddir + '\"')\n efrotools.run('mkdir -p \"' + lib_dst + '\"')\n\n # Only pull modules into game assets on release pass.\n if not debug:\n # Copy system modules into the src assets\n # dir for this group.\n efrotools.run('mkdir -p \"' + assets_src_dst + '\"')\n efrotools.run(\n 'rsync --recursive --include \"*.py\"'\n ' --exclude __pycache__ --include \"*/\" --exclude \"*\" \"'\n + build['pylib'] + '/\" \"' + assets_src_dst + '\"')\n\n # Prune a bunch of modules we don't need to cut\n # down on size.\n prune = [\n 'config-*', 'idlelib', 'lib-dynload', 'lib2to3',\n 'multiprocessing', 'pydoc_data', 'site-packages',\n 'ensurepip', 'tkinter', 'wsgiref', 'distutils',\n 'turtle.py', 'turtledemo', 'test', 'sqlite3/test',\n 'unittest', 'dbm', 'venv', 'ctypes/test', 'imaplib.py',\n '_sysconfigdata_*'\n ]\n efrotools.run('cd \"' + assets_src_dst + '\" && rm -rf ' +\n ' '.join(prune))\n\n # Some minor filtering to system scripts:\n # on iOS/tvOS, addusersitepackages() leads to a crash\n # due to _sysconfigdata_dm_ios_darwin module not existing,\n # so let's skip that.\n fname = f'{assets_src_dst}/site.py'\n txt = efrotools.readfile(fname)\n txt = efrotools.replace_one(\n txt,\n ' known_paths = addusersitepackages(known_paths)',\n ' # efro tweak: this craps out on ios/tvos.\\n'\n ' # (and we don\\'t use it anyway)\\n'\n ' # known_paths = addusersitepackages(known_paths)')\n efrotools.writefile(fname, txt)\n\n # Copy in a base set of headers (everything in a group should\n # be using the same headers)\n efrotools.run(f'cp -r \"{build[\"headers\"]}\" \"{header_dst}\"')\n\n # Clear whatever pyconfigs came across; we'll build our own\n # universal one below.\n efrotools.run('rm ' + header_dst + '/pyconfig*')\n\n # Write a master pyconfig header that reroutes to each\n # platform's actual header.\n with open(header_dst + '/pyconfig.h', 'w') as hfile:\n hfile.write(\n '#if BA_OSTYPE_MACOS\\n'\n '#include \"pyconfig-macos.h\"\\n\\n'\n '#elif BA_OSTYPE_IOS\\n'\n '#include \"pyconfig-ios.h\"\\n\\n'\n '#elif BA_OSTYPE_TVOS\\n'\n '#include \"pyconfig-tvos.h\"\\n\\n'\n '#elif BA_OSTYPE_ANDROID and defined(__arm__)\\n'\n '#include \"pyconfig-android_arm.h\"\\n\\n'\n '#elif BA_OSTYPE_ANDROID and defined(__aarch64__)\\n'\n '#include \"pyconfig-android_arm64.h\"\\n\\n'\n '#elif BA_OSTYPE_ANDROID and defined(__i386__)\\n'\n '#include \"pyconfig-android_x86.h\"\\n\\n'\n '#elif BA_OSTYPE_ANDROID and defined(__x86_64__)\\n'\n '#include \"pyconfig-android_x86_64.h\"\\n\\n'\n '#else\\n'\n '#error unknown platform\\n\\n'\n '#endif\\n')\n\n # Now copy each build's config headers in with unique names.\n cfgs = [\n f for f in os.listdir(build['headers'])\n if f.startswith('pyconfig')\n ]\n\n # Copy config headers to their filtered names.\n for cfg in cfgs:\n out = cfg.replace('pyconfig', 'pyconfig-' + build['name'])\n if cfg == 'pyconfig.h':\n\n # For platform's root pyconfig.h we need to filter\n # contents too (those headers can themselves include\n # others; ios for instance points to a arm64 and a\n # x86_64 variant).\n contents = efrotools.readfile(build['headers'] + '/' + cfg)\n contents = contents.replace('pyconfig',\n 'pyconfig-' + build['name'])\n efrotools.writefile(header_dst + '/' + out, contents)\n else:\n # other configs we just rename\n efrotools.run('cp \"' + build['headers'] + '/' + cfg +\n '\" \"' + header_dst + '/' + out + '\"')\n\n # Copy in libs. If the lib gave a specific install name,\n # use that; otherwise use name.\n targetdir = lib_dst + '/' + build.get('libinst', build['name'])\n efrotools.run('rm -rf \"' + targetdir + '\"')\n efrotools.run('mkdir -p \"' + targetdir + '\"')\n for lib in build['libs']:\n efrotools.run('cp \"' + lib + '\" \"' + targetdir + '\"')\n\n print('Great success!')",
"def ci(session):\n session.install('-rrequirements-dev.txt')\n session.install('-e', '.')\n run_sphinx(session)\n run_yapf(session, True)\n run_all_linters(session)\n run_pytest_units(session)\n run_pytest_integrations(session)",
"def run(self):\n self._make_lib_file_symbolic_links()\n self._copy_each_include_files_to_include_dir()\n self._make_dep_lib_file_sym_links_and_copy_include_files()\n self.setup_py.add_patchs_to_build_without_pkg_config(\n self.rpm.lib_dir, self.rpm.include_dir\n )\n self.setup_py.apply_and_save()\n self._build_and_install()",
"def test_core_modules(testing_config):\n cache_dir = Path(testing_config.src_cache_root, \".conda-build\", \"pickled.cb\")\n perl_version = testing_config.variant.get(\n \"perl\", get_default_variant(testing_config)[\"perl\"]\n )\n core_modules = get_core_modules_for_this_perl_version(perl_version, str(cache_dir))\n assert \"Config\" in core_modules\n assert \"Module::Build\" not in core_modules",
"def library_dirs(self):",
"def labs(lab_sources, headers, deps):\n return [compile(src, headers) for src in lab_sources]",
"def test_project_with_dependencies(self):\n self.make_project()\n # 'test_library.zip' is not currently compiled for diorite.\n self.project.app_platforms = \"aplite,basalt,chalk\"\n self.project.save()\n tempdir = tempfile.mkdtemp()\n try:\n # Extract a premade library to a temporary directory\n ZipFile(LIBRARY_PATH).extractall(tempdir)\n lib_path = os.path.join(tempdir, 'libname')\n\n # Include the library in the code and package.json\n self.add_file(\"main.c\", DEPENDENCY_MAIN)\n self.project.set_dependencies({\n 'libname': lib_path\n })\n\n # Compile and check\n self.compile()\n self.check_compile_success(num_platforms=3)\n finally:\n shutil.rmtree(tempdir)",
"def helper_cccc(standardized_output: dict):\n\n for module in standardized_output[\"classes\"]:\n WMC = 0\n n_func = 0\n module_name = module[\"class name\"]\n for file in standardized_output[\"files\"]:\n for func in file[\"functions\"]:\n if \"class name\" in func and func[\"class name\"] == module_name:\n WMC += func[\"CC\"]\n n_func += 1\n module[\"WMC\"] = WMC\n module[\"no. functions\"] = n_func",
"def import_all():\n import theory",
"def main():\n run_test_summary1a()\n run_test_summary1c()\n run_test_summary1c()",
"def test_classes():\n\n # If we are in a source folder and these tests aren't installed as a\n # package, we want to load asn1crypto from this source folder\n tests_dir = os.path.dirname(os.path.abspath(__file__))\n\n asn1crypto = None\n if os.path.basename(tests_dir) == 'tests':\n asn1crypto = _import_from(\n 'asn1crypto',\n os.path.join(tests_dir, '..')\n )\n if asn1crypto is None:\n import asn1crypto\n\n if asn1crypto.__version__ != __version__:\n raise AssertionError(\n ('asn1crypto_tests version %s can not be run with ' % __version__) +\n ('asn1crypto version %s' % asn1crypto.__version__)\n )\n\n from .test_algos import AlgoTests\n from .test_cms import CMSTests\n from .test_crl import CRLTests\n from .test_csr import CSRTests\n from .test_init import InitTests\n from .test_keys import KeysTests\n from .test_ocsp import OCSPTests\n from .test_pem import PEMTests\n from .test_pkcs12 import PKCS12Tests\n from .test_tsp import TSPTests\n from .test_x509 import X509Tests\n from .test_util import UtilTests\n from .test_parser import ParserTests\n from .test_core import CoreTests\n\n return [\n AlgoTests,\n CMSTests,\n CRLTests,\n CSRTests,\n InitTests,\n KeysTests,\n OCSPTests,\n PEMTests,\n PKCS12Tests,\n TSPTests,\n UtilTests,\n ParserTests,\n X509Tests,\n CoreTests\n ]",
"def test_classes():\n\n # If we are in a source folder and these tests aren't installed as a\n # package, we want to load asn1crypto from this source folder\n tests_dir = os.path.dirname(os.path.abspath(__file__))\n\n asn1crypto = None\n if os.path.basename(tests_dir) == 'tests':\n asn1crypto = _import_from(\n 'asn1crypto',\n os.path.join(tests_dir, '..')\n )\n if asn1crypto is None:\n import asn1crypto\n\n if asn1crypto.__version__ != __version__:\n raise AssertionError(\n ('asn1crypto_tests version %s can not be run with ' % __version__) +\n ('asn1crypto version %s' % asn1crypto.__version__)\n )\n\n from .test_algos import AlgoTests\n from .test_cms import CMSTests\n from .test_crl import CRLTests\n from .test_csr import CSRTests\n from .test_init import InitTests\n from .test_keys import KeysTests\n from .test_ocsp import OCSPTests\n from .test_pem import PEMTests\n from .test_pkcs12 import PKCS12Tests\n from .test_tsp import TSPTests\n from .test_x509 import X509Tests\n from .test_util import UtilTests\n from .test_parser import ParserTests\n from .test_core import CoreTests\n\n return [\n AlgoTests,\n CMSTests,\n CRLTests,\n CSRTests,\n InitTests,\n KeysTests,\n OCSPTests,\n PEMTests,\n PKCS12Tests,\n TSPTests,\n UtilTests,\n ParserTests,\n X509Tests,\n CoreTests\n ]",
"def test_collect_integration_dependencies(self, module_repo):\n expected_result = {\n (\"HelloWorld\", True),\n (\"Claroty\", True),\n (\"EWS\", True),\n (\"CrisisManagement\", True),\n (\"CommonTypes\", True),\n }\n\n test_input = [\n {\n \"Dummy Integration\": {\n \"name\": \"Dummy Integration\",\n \"fromversion\": \"5.0.0\",\n \"pack\": \"dummy_pack\",\n \"classifiers\": \"HelloWorld\",\n \"mappers\": [\"Claroty-mapper\", \"EWS v2-mapper\"],\n \"incident_types\": \"HR Ticket\",\n \"indicator_fields\": \"CommonTypes\",\n }\n }\n ]\n\n found_result = PackDependencies._collect_integrations_dependencies(\n pack_integrations=test_input,\n id_set=module_repo.id_set.read_json_as_dict(),\n )\n\n assert set(found_result) == set(expected_result)",
"def test_arm_c_lib(self):\n mock_target = mock.MagicMock()\n mock_target.core = \"Cortex-M4\"\n mock_target.supported_c_libs = {\"arm\": [\"small\"]}\n mock_target.c_lib = \"sMALL\"\n del mock_target.default_lib\n mock_target.default_toolchain = \"ARM\"\n mock_target.supported_toolchains = [\"ARM\", \"uARM\", \"ARMC5\", \"ARMC6\"]\n arm_std_obj = ARM_STD(mock_target)\n arm_micro_obj = ARM_MICRO(mock_target)\n\n mock_target.default_toolchain = \"ARMC6\"\n arm_c6_obj = ARMC6(mock_target)\n\n self.assertIn(\"-D__MICROLIB\", arm_std_obj.flags[\"common\"])\n self.assertIn(\"-D__MICROLIB\", arm_micro_obj.flags[\"common\"])\n self.assertIn(\"-D__MICROLIB\", arm_c6_obj.flags[\"common\"])\n\n self.assertIn(\"--library_type=microlib\", arm_std_obj.flags[\"ld\"])\n self.assertIn(\"--library_type=microlib\", arm_micro_obj.flags[\"ld\"])\n self.assertIn(\"--library_type=microlib\", arm_c6_obj.flags[\"ld\"]) \n self.assertIn(\"--library_type=microlib\", arm_c6_obj.flags[\"asm\"])",
"def make_all():\n\n if not MASTER.exists():\n os.makedirs(MASTER)\n members = inspect.getmembers(sys.modules[__name__])\n members = [f for f in members if 'test_' in f[0]]\n for member in members:\n print('Running %s...' % member[0], end='')\n member[1](master=True)\n print('done!')",
"def make_all():\n\n if not MASTER.exists():\n os.makedirs(MASTER)\n members = inspect.getmembers(sys.modules[__name__])\n members = [f for f in members if 'test_' in f[0]]\n for member in members:\n print('Running %s...' % member[0], end='')\n member[1](master=True)\n print('done!')"
] | [
"0.5385712",
"0.53443336",
"0.51123476",
"0.51066065",
"0.5053206",
"0.504615",
"0.5035862",
"0.50291365",
"0.50230294",
"0.50175893",
"0.4983539",
"0.49827704",
"0.4977544",
"0.49708322",
"0.49618056",
"0.49440145",
"0.4931897",
"0.49209505",
"0.49121943",
"0.48950914",
"0.48858225",
"0.488245",
"0.48782206",
"0.48636824",
"0.4860275",
"0.4860275",
"0.48589674",
"0.48588789",
"0.48486173",
"0.48486173"
] | 0.72149426 | 0 |
Creates the three final Libraries. | def createFinalLibraries(INTERMEDIATELIBRARIES, DICOLIBRARIES):
#### Parse all the intermediate libraries files
for file in INTERMEDIATELIBRARIES:
fileName = os.path.basename(file).split(".fasta")[0]
#### Read and store the fasta sequences of the prelibraries
sequences=readInput.readFasta(file)
#### Save the three finals libraries
save.saveLibraries(sequences, DICOLIBRARIES) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def createIntermediateLibraries(LISTPRELIBRARIES, DICOLIBRARIES, CONFIG, DICOFINALCLASSIF):\n\t#### Parse all the intermediate libraries files\n\tfor preLibrary in LISTPRELIBRARIES:\n\t\t#### Retrieve the final classification name of the ET from the file name\n\t\tfinalClassification = os.path.basename(preLibrary).split(\".fasta\")[0]\n\t\t#### Read and store the fasta sequences of the prelibraries\n\t\tsequences=readInput.readFasta(preLibrary)\n\t\t#### Parse all the sequences\n\t\tfor id in sequences:\n\t\t\t#### Check the finalClassification of the sequences is in the ID\n\t\t\tif finalClassification.lower() in id.lower():\n\t\t\t\tDICOFINALCLASSIF[id]=finalClassification\n\t\t\t\tapplyFiltersForIntermediate(id, sequences, finalClassification, CONFIG, DICOLIBRARIES)",
"def makeLibrary(self):\n #------------------------------------------ Instance for the output file\n outputFile = open(\"%s/%s\" % (self.sceneryPath,self.libTxtFileName),\"w\")\n #------------------------------------------------------ write the header\n for line in self.header:\n outputFile.write(\"%s\\n\" % (line))\n #------------------------------------------------- Loop over all folders\n packageContent = os.walk(self.sceneryPath)\n for folder in packageContent:\n for fileName in folder[2]:\n fileType = fileName.split(\".\")[-1]\n if fileType in self.objectTypes:\n realPath = folder[0][len(self.sceneryPath)+1:].replace(\"\\\\\",\"/\")\n filePath = \"%s/%s\" % (realPath,fileName)\n print filePath\n outputFile.write(\"EXPORT %s%s %s%s\\n\" % (self.libPrefix,filePath,self.realPathPrefix,filePath))\n outputFile.close()",
"def _shared_library_in_2steps(env):\r\n\r\n if not config.shared_library_1st in env['BUILDERS']:\r\n # The 1st builder\r\n shlinkcom_name = config.shared_library_1st + \"COM\"\r\n env[shlinkcom_name] = \"${TEMPFILE('$AR /DEF $ARFLAGS /OUT:$TARGET $SOURCES')}\"\r\n ar_action = SCons.Action.Action(\"$\" + shlinkcom_name, \"building '$TARGET' from '$SOURCE'\")\r\n emitter_name = config.shared_library_1st + \"EMITTER\"\r\n env[emitter_name] = [__lib_export_emitter]\r\n env[\"BUILDERS\"][config.shared_library_1st] = SCons.Builder.Builder(\r\n action=ar_action,\r\n emitter=\"$\" + emitter_name,\r\n prefix=\"$LIBPREFIX\",\r\n suffix=\"$LIBSUFFIX\",\r\n src_suffix=\"$SHOBJSUFFIX\",\r\n src_builder=\"SharedObject\")\r\n\r\n if not config.shared_library_2nd in env['BUILDERS']:\r\n # The 2nd builder\r\n emitter_name = config.shared_library_2nd + \"EMITTER\"\r\n env[emitter_name] = [__win32_lib_emitter]\r\n env[\"BUILDERS\"][config.shared_library_2nd] = SCons.Builder.Builder(\r\n action=[SCons.Defaults.SharedCheck,\r\n SCons.Action.Action(\"$SHLINKCOM\", \"building '$TARGET' from '$SOURCE'\")],\r\n emitter=\"$\" + emitter_name,\r\n prefix=\"$SHLIBPREFIX\",\r\n suffix=\"$SHLIBSUFFIX\",\r\n target_scanner=SCons.Scanner.Prog.ProgramScanner(),\r\n src_suffix=\"$SHOBJSUFFIX\",\r\n src_builder=\"SharedObject\")",
"def library_dirs(self):",
"def linking_library_dirs(self):",
"def create_libs(desc: dict, pins: list, output_dir: str, verbose: bool = False):\n db = desc.copy()\n db[\"block_name\"] = desc[\"name_of_the_cell\"]\n db[\"area\"] = db.pop(\"block_area_(um2)\")\n db[\"pins\"] = pins\n db[\"types\"] = [pin for pin in pins if pin.width > 1]\n lib_paths = []\n for corner, condition in desc.get(\"corners\", {}).items():\n db[\"library\"] = \"%s_%s_%sV_%sC\" % (\n desc.get(\"name_of_the_cell\"),\n corner,\n (\"%.2f\" % condition.get(\"voltage\")).replace('.', '_'),\n str(condition.get(\"temperature\")).replace('-', 'm')\n )\n db[\"corner_name\"] = corner\n db[\"corner\"] = condition\n if verbose:\n print(db)\n # create directory if does not exist\n os.makedirs(output_dir, exist_ok=True)\n # generate lib file\n template_file = os.path.join(os.path.dirname(__file__), \"./template_ana.lib.mako\")\n _tmp = Template(filename=template_file)\n lib_path = os.path.join(output_dir, \"%s.lib\" % db[\"library\"])\n with open(lib_path, \"w+\") as fp:\n fp.write(_tmp.render_unicode(**db))\n lib_paths.append(lib_path)\n return lib_paths",
"def genLibData(self):\n import mush\n tsMain = string.Template(mush.libGenMain)\n tsIfAltId = string.Template(mush.libGenIfAltId)\n #--Data Records\n for id in ('lib_action','lib_actionCount'):\n glob = self.getRecord('GLOB',id,Glob)\n (glob.type, glob.value) = ('s',0)\n glob.setChanged()\n setAllCode = 'begin lib_setAllGS\\n'\n setNoneCode = 'begin lib_setNoneGS\\n'\n for libId in self.libList:\n (srcId,altId) = self.libMap[libId]\n srcBook = self.srcBooks.get(srcId)[0]\n if not srcBook:\n print '%s: Missing source: %s' % (libId,srcId)\n continue\n #--Global\n glob = self.getRecord('GLOB',libId+'G',Glob)\n (glob.type, glob.value) = ('s',0)\n glob.setChanged()\n #--Script\n scriptId = libId+'LS'\n script = self.getRecord('SCPT',scriptId,Scpt)\n scriptCode = tsMain.substitute(\n libId=libId, srcId=srcId, ifAltId=(\n (altId and tsIfAltId.substitute(libId=libId,altId=altId)) or ''))\n script.setCode(scriptCode)\n script.setChanged()\n #--Book\n srcBook.load(unpack=True)\n book = self.getRecord('BOOK',libId,Book)\n book.model = srcBook.model\n book.title = srcBook.title\n book.icon = srcBook.icon\n book.text = srcBook.text\n book.script = scriptId\n book.setChanged()\n #--Set Scripts\n setAllCode += 'set %sG to 1\\n' % (libId,)\n setNoneCode += 'set %sG to 0\\n' % (libId,)\n #--Set scripts\n for id,code in (('lib_setAllGS',setAllCode),('lib_setNoneGS',setNoneCode)):\n code += ';--Done\\nstopScript %s\\nend\\n' % (id,)\n script = self.getRecord('SCPT',id,Scpt)\n script.setCode(code)\n script.setChanged()",
"def initialize_libraries(experiment, ln):\n # Move into the folder to do the intial calculations in\n folder = \"initial_library\" + str(ln)\n os.chdir(folder) \n # Create a time stamp for beginning the calculations\n experiment[\"Summary\"] = \"Library \" + str(ln) + \" Initialization\\n\"\n experiment[\"Summary\"] += \"Started\" + SHARING.time_stamp()\n # Find the proper number of coordinates to consider\n N = len(experiment[\"Movements\"][ln])/2\n # Go through each antigen\n for mol in experiment[0]:\n # Apply the proper rotation\n for cn in range(N):\n # Create a generic vector of zeros of the appropriate length\n vector = [0.0] * N\n # Place a value of 1.0 in the correct location in the vector\n vector[cn] = 1.0\n # Find the angle to rotate the antigens by\n angle = experiment[\"Movements\"][ln][N+cn]\n # Rotate each of the antigens by the appropriate angle\n rmatrix = MOLECULES.calculate_rmatrix(angle, vector)\n MOLECULES.rotate(mol, rmatrix)\n # Translate each antigen by the appropriate amount\n MOLECULES.move(mol, experiment[\"Movements\"][ln][:N], '+')\n # Update the reference folder with these updated coordinates\n SHARING.output_Current(experiment, \"./Current/\") \n # Load the canonical structures\n canonicals = IPRO_FUNCTIONS.load_canonicals(experiment)\n cdrs = list(canonicals.keys())\n cdrs.sort()\n # Load the clashes\n clashes = IPRO_FUNCTIONS.load_clashes(experiment, cdrs) \n # Load the C++ scores\n raw_scores = IPRO_FUNCTIONS.load_scores(experiment[\"Folder\"])\n # Look for alternate solutions using integer cuts\n goOn = True\n # Store the solutions in a list\n solutions = [experiment[\"Scores\"][ln-1]]\n # Keep searching for alternate solutions until the quality of the result is\n # worse\n while goOn:\n # Resolve the MILP using integer cuts\n if useCPLEX:\n #solution = CPLEX.optcdr_canonicals(canonicals, clashes, \\\n # raw_scores[ln], solutions)\n pass\n else:\n solution = GAMS.optcdr_canonicals(canonicals, clashes, \\\n raw_scores[ln], solutions)\n # If the solution found has an equal objective value to the first, store\n # it and re-run the MILP\n if solution[\"Score\"] == experiment[\"Scores\"][ln-1][1][\"Score\"]:\n solutions.append([experiment[\"Scores\"][ln-1][0], solution])\n # Otherwise, break out of the loop and analyze the results\n else:\n goOn = False\n # Update the library based on the most members for the cluster\n best = 0\n # Skip this if there is only one solution after applying the integer cuts\n if len(solutions) > 1:\n # Load the clusters\n cdrs = list(canonicals.keys())\n cdrs.sort()\n clusters = load_clusters(experiment, cdrs)\n # Initialize the variables to store the solution with the most cluster\n # members\n best = None\n amount = 0\n # Go through the solutions\n for i, solution in enumerate(solutions):\n # Store the total number of members throughout the CDRs\n total = 0\n # Go through the CDRs\n for j, cdr in enumerate(cdrs):\n # Extract the number of members from the \"clusters\" dictionary \n members = clusters[cdr][solution[1][j+1]][\"Members\"]\n # 30 is the number where the permitted amino acids change from\n # \"of the same type\" to \"only those observed\" at each position\n if members > 30:\n members = 30\n # Add the number of members to the total for this solution\n total += members\n # If applicable, update the \"best\" solution found and its\n # corresponding total number of members\n if total > amount:\n best = i\n amount = total\n # Update the library based on the most structures\n experiment[\"Scores\"][ln-1] = solutions[best]\n # If the set of canonical structures has changed, update the referenced\n # values\n if best != 0:\n SHARING.output_scores(experiment, experiment[\"Folder\"] + \"Current/\", ln)\n # Copy the necessary files\n SHARING.copy_standard_files(experiment, solv = True) \n # Generate the antibody structures\n build_antibodies(experiment, canonicals, ln) \n # Go back to the home directory\n os.chdir(\"../\")\n # Try to create a new folder to handle the IPRO affinity maturation\n folder = \"library\" + str(ln)\n try:\n os.mkdir(folder)\n # If the folder already exists, delete it and make a new one. This is the\n # proper procedure since the library should only be there if the\n # initialization has already finished\n except OSError:\n os.system(\"rm -rf \" + folder)\n os.mkdir(folder)\n # Create a new Experiment class object to handle the IPRO affinity maturation\n make_IPRO_experiment(experiment, folder)\n # Delete the initialization folder\n os.system(\"rm -rf initial_\" + folder) \n # Update the summary file\n # Create a summary file\n experiment[\"Summary\"] += \"Ended\" + SHARING.time_stamp()\n name = SHARING.summary_name(SHARING.get_current())\n f = open(name, \"a\")\n f.write(experiment[\"Summary\"])\n f.close()",
"def build_assets():\n\n # templates\n template = open(os.path.join(BASE_PATH, 'AssetLibrary.as.template'), 'r').read()\n\n embed_templates = {\n 'image': \"[Embed(source='%(asset_path)s')] private var %(asset_class_name)s:Class;\\n\",\n 'mp3': \"[Embed(source='%(asset_path)s')] private var %(asset_class_name)s:Class;\\n\", \n 'xml': \"[Embed(source='%(asset_path)s', mimeType=\\\"application/octet-stream\\\")] private var %(asset_class_name)s:Class;\\n\"\n }\n \n library_element_template = \"'%(asset_id)s': %(asset_class_name)s\"\n\n # load+parse asset xml\n complete_asset_embed_code = \"\"\n complete_asset_data_code = \"\"\n asset_dom = minidom.parse(ASSET_XML_FILE)\n \n asset_nodes = list(asset_dom.getElementsByTagName('asset'))\n \n for asset_node in asset_nodes:\n asset_attrs = dict(asset_node.attributes.items())\n asset_embed_code = embed_templates[asset_attrs['type']] % {\n 'asset_class_name': asset_attrs['name'],\n 'asset_path': ASSET_BASE + asset_attrs['file']\n }\n\n complete_asset_embed_code += asset_embed_code\n \n asset_data_code = library_element_template % {\n 'asset_id': asset_attrs['name'],\n 'asset_class_name': asset_attrs['name']\n }\n\n complete_asset_data_code += asset_data_code\n\n if asset_nodes.index(asset_node) == len(asset_nodes) - 1:\n complete_asset_data_code += \"\\n\"\n else:\n complete_asset_data_code += \",\\n\"\n \n output = template % {\n 'asset_embeds': complete_asset_embed_code,\n 'asset_data': complete_asset_data_code\n }\n \n # render\n output_f = open(os.path.join(BASE_PATH, 'AssetLibrary.as'), 'w')\n output_f.write(output)",
"def test_project_with_dependencies(self):\n self.make_project()\n # 'test_library.zip' is not currently compiled for diorite.\n self.project.app_platforms = \"aplite,basalt,chalk\"\n self.project.save()\n tempdir = tempfile.mkdtemp()\n try:\n # Extract a premade library to a temporary directory\n ZipFile(LIBRARY_PATH).extractall(tempdir)\n lib_path = os.path.join(tempdir, 'libname')\n\n # Include the library in the code and package.json\n self.add_file(\"main.c\", DEPENDENCY_MAIN)\n self.project.set_dependencies({\n 'libname': lib_path\n })\n\n # Compile and check\n self.compile()\n self.check_compile_success(num_platforms=3)\n finally:\n shutil.rmtree(tempdir)",
"def make_productions3(self):\n self.make_productions2()\n for prod in self.make_productions_preterminals():\n self.productions.add(prod)",
"def save_libraries(self, a, lib):\n logging.debug(\"in save libraries\")\n self.libraries.append(lib)\n self.produce(\"library\", lib)",
"def _copy_bins():\n # STEP 1: If we're performing a build from a copied source tree,\n # copy the generated python files into the package\n\n _clean_bins()\n\n py_z3_build_dir = os.path.join(BUILD_DIR, 'python', 'z3')\n root_z3_dir = os.path.join(ROOT_DIR, 'z3')\n shutil.copy(os.path.join(py_z3_build_dir, 'z3core.py'), root_z3_dir)\n shutil.copy(os.path.join(py_z3_build_dir, 'z3consts.py'), root_z3_dir)\n\n # STEP 2: Copy the shared library, the executable and the headers\n\n os.mkdir(LIBS_DIR)\n os.mkdir(BINS_DIR)\n os.mkdir(HEADERS_DIR)\n shutil.copy(os.path.join(BUILD_DIR, LIBRARY_FILE), LIBS_DIR)\n shutil.copy(os.path.join(BUILD_DIR, EXECUTABLE_FILE), BINS_DIR)\n path1 = glob.glob(os.path.join(BUILD_DIR, \"msvcp*\"))\n path2 = glob.glob(os.path.join(BUILD_DIR, \"vcomp*\"))\n path3 = glob.glob(os.path.join(BUILD_DIR, \"vcrun*\"))\n for filepath in path1 + path2 + path3:\n shutil.copy(filepath, LIBS_DIR)\n\n for header_dir in HEADER_DIRS:\n for fname in os.listdir(header_dir):\n if not fname.endswith('.h'):\n continue\n shutil.copy(os.path.join(header_dir, fname), os.path.join(HEADERS_DIR, fname))\n\n # This hack lets z3 installed libs link on M1 macs; it is a hack, not a proper fix\n # @TODO: Linked issue: https://github.com/Z3Prover/z3/issues/5926\n major_minor = '.'.join(_z3_version().split('.')[:2])\n link_name = None\n if BUILD_PLATFORM in ('win32', 'cygwin', 'win'):\n pass # TODO: When windows VMs work on M1, fill this in\n elif BUILD_PLATFORM in ('darwin', 'osx'):\n split = LIBRARY_FILE.split('.')\n link_name = split[0] + '.' + major_minor + '.' + split[1]\n else:\n link_name = LIBRARY_FILE + '.' + major_minor\n if link_name:\n os.symlink(LIBRARY_FILE, os.path.join(LIBS_DIR, link_name), True)",
"def add_library(self):\n library = self.new_section('The Library')\n books = self.wiki('the-library')._soup(class_='boxbook')\n template = (\n '<div class=\"book-title\">{}</div>'\n '<div class=\"book-description\">{}</div>')\n for b in books:\n title = b.find(class_='booktitle').string\n description = b.find(class_='boxleft')('div')[0].text.strip()\n excerpts = [self.wiki.site + a['href']\n for a in b.find(class_='boxright')('a')]\n if title == 'The Journal of Aframos Longjourney':\n links = self.wiki(excerpts[1])._soup.select('#page-content a')\n links = [\n 'http://wanderers-library.wikidot.com/' +\n l['href'].split('/')[-1] for l in links]\n excerpts = [excerpts[0]] + links\n book = self.add_page(\n title, template.format(title, description), library)\n for url in excerpts:\n self.add_url(url, book)",
"def create_data_base():\n\n\tscript_files = []\n\tjson_files = []\n\t\n\t# get script files list\n\tfor file in os.listdir(\"learned_objects_scripts/\"):\n\t\tif file.endswith(\".script\"):\n\t\t\tscript_files.append(file)\n\n\t# get json files list\n\tfor file in os.listdir(\"object_models/\"):\n\t\tif file.endswith(\".json\"):\n\t\t\tjson_files.append(file)\n\t\n\t# create json file for new objects\n\tmodel_created = False\n\tfor file in script_files:\n\t\tif \"{}.json\".format(file[:-7]) not in json_files:\n\t\t\twith open(\"object_models/{}.json\".format(file[:-7]), 'w') as outfile:\n\t\t\t\tobj_model = object_script_to_model(\"learned_objects_scripts/\" + file)\n\t\t\t\tjson.dump(obj_model, outfile)\n\t\t\t\tmodel_created = True\n\t\t\t\tprint(\"model created for\", file)\n\tif not model_created:\n\t\tprint(\"data base is already up to date\")",
"def gather() -> None:\n # pylint: disable=too-many-locals\n\n # First off, clear out any existing output.\n existing_dirs = [\n os.path.join('src/external', d) for d in os.listdir('src/external')\n if d.startswith('python-') and d != 'python-notes.txt'\n ]\n existing_dirs += [\n os.path.join('assets/src', d) for d in os.listdir('assets/src')\n if d.startswith('pylib-')\n ]\n for existing_dir in existing_dirs:\n efrotools.run('rm -rf \"' + existing_dir + '\"')\n\n for buildtype in ['debug', 'release']:\n debug = buildtype == 'debug'\n bsuffix = '_debug' if buildtype == 'debug' else ''\n bsuffix2 = '-debug' if buildtype == 'debug' else ''\n\n libname = 'python' + PYTHON_VERSION_MAJOR + ('dm' if debug else 'm')\n\n bases = {\n 'mac':\n f'build/python_apple_mac{bsuffix}/build/macOS',\n 'ios':\n f'build/python_apple_ios{bsuffix}/build/iOS',\n 'tvos':\n f'build/python_apple_tvos{bsuffix}/build/tvOS',\n 'android_arm':\n f'build/python_android_arm{bsuffix}/build/sysroot',\n 'android_arm64':\n f'build/python_android_arm64{bsuffix}/build/sysroot',\n 'android_x86':\n f'build/python_android_x86{bsuffix}/build/sysroot',\n 'android_x86_64':\n f'build/python_android_x86_64{bsuffix}/build/sysroot'\n }\n\n # Note: only need pylib for the first in each group.\n builds: List[Dict[str, Any]] = [{\n 'name':\n 'macos',\n 'group':\n 'apple',\n 'headers':\n bases['mac'] + '/Support/Python/Headers',\n 'libs': [\n bases['mac'] + '/Support/Python/libPython.a',\n bases['mac'] + '/Support/OpenSSL/libOpenSSL.a',\n bases['mac'] + '/Support/XZ/libxz.a'\n ],\n 'pylib':\n (bases['mac'] + '/python/lib/python' + PYTHON_VERSION_MAJOR),\n }, {\n 'name':\n 'ios',\n 'group':\n 'apple',\n 'headers':\n bases['ios'] + '/Support/Python/Headers',\n 'libs': [\n bases['ios'] + '/Support/Python/libPython.a',\n bases['ios'] + '/Support/OpenSSL/libOpenSSL.a',\n bases['ios'] + '/Support/XZ/libxz.a'\n ],\n }, {\n 'name':\n 'tvos',\n 'group':\n 'apple',\n 'headers':\n bases['tvos'] + '/Support/Python/Headers',\n 'libs': [\n bases['tvos'] + '/Support/Python/libPython.a',\n bases['tvos'] + '/Support/OpenSSL/libOpenSSL.a',\n bases['tvos'] + '/Support/XZ/libxz.a'\n ],\n }, {\n 'name':\n 'android_arm',\n 'group':\n 'android',\n 'headers':\n bases['android_arm'] + f'/usr/include/{libname}',\n 'libs': [\n bases['android_arm'] + f'/usr/lib/lib{libname}.a',\n bases['android_arm'] + '/usr/lib/libssl.a',\n bases['android_arm'] + '/usr/lib/libcrypto.a',\n bases['android_arm'] + '/usr/lib/liblzma.a',\n bases['android_arm'] + '/usr/lib/libsqlite3.a'\n ],\n 'libinst':\n 'android_armeabi-v7a',\n 'pylib': (bases['android_arm'] + '/usr/lib/python' +\n PYTHON_VERSION_MAJOR),\n }, {\n 'name': 'android_arm64',\n 'group': 'android',\n 'headers': bases['android_arm64'] + f'/usr/include/{libname}',\n 'libs': [\n bases['android_arm64'] + f'/usr/lib/lib{libname}.a',\n bases['android_arm64'] + '/usr/lib/libssl.a',\n bases['android_arm64'] + '/usr/lib/libcrypto.a',\n bases['android_arm64'] + '/usr/lib/liblzma.a',\n bases['android_arm64'] + '/usr/lib/libsqlite3.a'\n ],\n 'libinst': 'android_arm64-v8a',\n }, {\n 'name': 'android_x86',\n 'group': 'android',\n 'headers': bases['android_x86'] + f'/usr/include/{libname}',\n 'libs': [\n bases['android_x86'] + f'/usr/lib/lib{libname}.a',\n bases['android_x86'] + '/usr/lib/libssl.a',\n bases['android_x86'] + '/usr/lib/libcrypto.a',\n bases['android_x86'] + '/usr/lib/liblzma.a',\n bases['android_x86'] + '/usr/lib/libsqlite3.a'\n ],\n 'libinst': 'android_x86',\n }, {\n 'name': 'android_x86_64',\n 'group': 'android',\n 'headers': bases['android_x86_64'] + f'/usr/include/{libname}',\n 'libs': [\n bases['android_x86_64'] + f'/usr/lib/lib{libname}.a',\n bases['android_x86_64'] + '/usr/lib/libssl.a',\n bases['android_x86_64'] + '/usr/lib/libcrypto.a',\n bases['android_x86_64'] + '/usr/lib/liblzma.a',\n bases['android_x86_64'] + '/usr/lib/libsqlite3.a'\n ],\n 'libinst': 'android_x86_64',\n }]\n\n for build in builds:\n\n grp = build['group']\n builddir = f'src/external/python-{grp}{bsuffix2}'\n header_dst = os.path.join(builddir, 'include')\n lib_dst = os.path.join(builddir, 'lib')\n assets_src_dst = f'assets/src/pylib-{grp}'\n\n # Do some setup only once per group.\n if not os.path.exists(builddir):\n efrotools.run('mkdir -p \"' + builddir + '\"')\n efrotools.run('mkdir -p \"' + lib_dst + '\"')\n\n # Only pull modules into game assets on release pass.\n if not debug:\n # Copy system modules into the src assets\n # dir for this group.\n efrotools.run('mkdir -p \"' + assets_src_dst + '\"')\n efrotools.run(\n 'rsync --recursive --include \"*.py\"'\n ' --exclude __pycache__ --include \"*/\" --exclude \"*\" \"'\n + build['pylib'] + '/\" \"' + assets_src_dst + '\"')\n\n # Prune a bunch of modules we don't need to cut\n # down on size.\n prune = [\n 'config-*', 'idlelib', 'lib-dynload', 'lib2to3',\n 'multiprocessing', 'pydoc_data', 'site-packages',\n 'ensurepip', 'tkinter', 'wsgiref', 'distutils',\n 'turtle.py', 'turtledemo', 'test', 'sqlite3/test',\n 'unittest', 'dbm', 'venv', 'ctypes/test', 'imaplib.py',\n '_sysconfigdata_*'\n ]\n efrotools.run('cd \"' + assets_src_dst + '\" && rm -rf ' +\n ' '.join(prune))\n\n # Some minor filtering to system scripts:\n # on iOS/tvOS, addusersitepackages() leads to a crash\n # due to _sysconfigdata_dm_ios_darwin module not existing,\n # so let's skip that.\n fname = f'{assets_src_dst}/site.py'\n txt = efrotools.readfile(fname)\n txt = efrotools.replace_one(\n txt,\n ' known_paths = addusersitepackages(known_paths)',\n ' # efro tweak: this craps out on ios/tvos.\\n'\n ' # (and we don\\'t use it anyway)\\n'\n ' # known_paths = addusersitepackages(known_paths)')\n efrotools.writefile(fname, txt)\n\n # Copy in a base set of headers (everything in a group should\n # be using the same headers)\n efrotools.run(f'cp -r \"{build[\"headers\"]}\" \"{header_dst}\"')\n\n # Clear whatever pyconfigs came across; we'll build our own\n # universal one below.\n efrotools.run('rm ' + header_dst + '/pyconfig*')\n\n # Write a master pyconfig header that reroutes to each\n # platform's actual header.\n with open(header_dst + '/pyconfig.h', 'w') as hfile:\n hfile.write(\n '#if BA_OSTYPE_MACOS\\n'\n '#include \"pyconfig-macos.h\"\\n\\n'\n '#elif BA_OSTYPE_IOS\\n'\n '#include \"pyconfig-ios.h\"\\n\\n'\n '#elif BA_OSTYPE_TVOS\\n'\n '#include \"pyconfig-tvos.h\"\\n\\n'\n '#elif BA_OSTYPE_ANDROID and defined(__arm__)\\n'\n '#include \"pyconfig-android_arm.h\"\\n\\n'\n '#elif BA_OSTYPE_ANDROID and defined(__aarch64__)\\n'\n '#include \"pyconfig-android_arm64.h\"\\n\\n'\n '#elif BA_OSTYPE_ANDROID and defined(__i386__)\\n'\n '#include \"pyconfig-android_x86.h\"\\n\\n'\n '#elif BA_OSTYPE_ANDROID and defined(__x86_64__)\\n'\n '#include \"pyconfig-android_x86_64.h\"\\n\\n'\n '#else\\n'\n '#error unknown platform\\n\\n'\n '#endif\\n')\n\n # Now copy each build's config headers in with unique names.\n cfgs = [\n f for f in os.listdir(build['headers'])\n if f.startswith('pyconfig')\n ]\n\n # Copy config headers to their filtered names.\n for cfg in cfgs:\n out = cfg.replace('pyconfig', 'pyconfig-' + build['name'])\n if cfg == 'pyconfig.h':\n\n # For platform's root pyconfig.h we need to filter\n # contents too (those headers can themselves include\n # others; ios for instance points to a arm64 and a\n # x86_64 variant).\n contents = efrotools.readfile(build['headers'] + '/' + cfg)\n contents = contents.replace('pyconfig',\n 'pyconfig-' + build['name'])\n efrotools.writefile(header_dst + '/' + out, contents)\n else:\n # other configs we just rename\n efrotools.run('cp \"' + build['headers'] + '/' + cfg +\n '\" \"' + header_dst + '/' + out + '\"')\n\n # Copy in libs. If the lib gave a specific install name,\n # use that; otherwise use name.\n targetdir = lib_dst + '/' + build.get('libinst', build['name'])\n efrotools.run('rm -rf \"' + targetdir + '\"')\n efrotools.run('mkdir -p \"' + targetdir + '\"')\n for lib in build['libs']:\n efrotools.run('cp \"' + lib + '\" \"' + targetdir + '\"')\n\n print('Great success!')",
"def test_index_libraries(self):\n result1 = self._create_library(slug=\"test-lib-index-1\", title=\"Title 1\", description=\"Description\")\n result2 = self._create_library(slug=\"test-lib-index-2\", title=\"Title 2\", description=\"Description\")\n\n for result in [result1, result2]:\n library_key = LibraryLocatorV2.from_string(result['id'])\n response = ContentLibraryIndexer.get_items([library_key])[0]\n\n assert response['id'] == result['id']\n assert response['title'] == result['title']\n assert response['description'] == result['description']\n assert response['uuid'] == result['bundle_uuid']\n assert response['num_blocks'] == 0\n assert response['version'] == result['version']\n assert response['last_published'] is None\n assert response['has_unpublished_changes'] is False\n assert response['has_unpublished_deletes'] is False",
"def makeProjects(self, *versions):\n baseDirectory = FilePath(self.mktemp())\n baseDirectory.createDirectory()\n for version in versions:\n self.makeProject(version, baseDirectory)\n return baseDirectory",
"def create_init_files(self, app_label, model_names, models):\n model_name_slugs = [\"%s_views\" % (self.camel_to_slug(model_name)) for model_name in model_names]\n model_names_dict = {self.camel_to_slug(model.__name__): self.camel_to_slug(self.model_name_plural(model)) for\n model in models}\n for folder_name in [\"views\", \"urls\"]:\n file_path = \"%s/%s/__init__.py\" % (app_label, folder_name)\n template_path = \"django_baker/__init__%s\" % folder_name\n self.create_file_from_template(file_path, template_path, {\"app_label\": app_label,\n \"model_name_slugs\": model_name_slugs,\n \"model_names_dict\": model_names_dict\n })",
"def mk_rg3(self):\n pass",
"def main():\n # Create / clean output dir\n if os.path.isdir(OUT_DIR):\n shutil.rmtree(OUT_DIR)\n os.mkdir(OUT_DIR)\n\n # Write all assets to the directory\n for fname, bb in create_assets().items():\n filename = os.path.join(OUT_DIR, fname)\n dirname = os.path.dirname(filename)\n if not os.path.isdir(dirname):\n os.makedirs(dirname)\n with open(filename, \"wb\") as f:\n f.write(bb)",
"def __init__(self):\n self._ll = LowLevelLibs()\n self._lib = self._ll.phe",
"def __init__(self, lib_dir, dist_dir, windows_exe_files=[],\n console_exe_files=[], service_exe_files=[],\n comserver_files=[], lib_files=[]):\n self.lib_dir = lib_dir\n self.dist_dir = dist_dir\n if not self.dist_dir[-1] in \"\\\\/\":\n self.dist_dir += \"\\\\\"\n self.name = AppName\n self.lname = AppName.lower()\n self.version = AppVersion\n self.windows_exe_files = [self.chop(p) for p in windows_exe_files]\n self.console_exe_files = [self.chop(p) for p in console_exe_files]\n self.service_exe_files = [self.chop(p) for p in service_exe_files]\n self.comserver_files = [self.chop(p) for p in comserver_files]\n self.lib_files = [self.chop(p) for p in lib_files]\n self.icon = os.path.abspath(r'doc\\icon\\favicon.ico')",
"def _setup(self):\n mkdir_p(self.output_folder)\n if self.symlink_dir:\n mkdir_p(self.symlink_dir)\n try:\n selected_versions = self._resolve_dependencies()\n if selected_versions:\n self._write_lock(selected_versions)\n print('\\n\\nVersions Selected for downloading:\\n')\n print('\\t' + '\\n\\t'.join(['{}: {}'.format(req, ver) for req, ver in selected_versions.items()]) + '\\n')\n for pkg_name, version in selected_versions.items():\n pkg_metadata = self._get_metadata(pkg_name)\n version_metadata = pkg_metadata.get('versions', dict()).get(str(version), dict())\n self._download_package(version_metadata)\n except (RequirementMatchError, DependencyError) as e:\n print(e.message)\n return self.created()",
"def get_libraries(self, archs: list[str]):\n libraries = self.ScopedLibraryDict.get(\"common\", []).copy()\n\n for arch in archs:\n libraries + self.ScopedLibraryDict.get(arch, []).copy()\n return list(set(libraries))",
"def makeProjects(self, *versions):\n baseDirectory = FilePath(self.mktemp())\n for version in versions:\n self.makeProject(version, baseDirectory)\n return baseDirectory",
"def create_aiida_project_environment(self):\n try:\n self.create_folder_structure()\n self.build_python_environment()\n self.install_packages_from_index()\n except Exception:\n self.exit_on_exception()\n raise\n self.create_spec_entry()",
"def create_packages(self):\n if not self.rewrite:\n # The extra package structure is only required for vendored code used via import rewrites.\n return\n\n for index, _ in enumerate(self._subpath_components):\n relpath = _PACKAGE_COMPONENTS + self._subpath_components[: index + 1] + [\"__init__.py\"]\n touch(os.path.join(self.ROOT, *relpath))",
"def setup_lib(CLIB):\n # {{ SETUP_LIB }}",
"def gen_library(self):\n newlibrary = self.newlibrary\n whelpers.add_all_helpers(newlibrary.symtab)\n\n self.function_index = newlibrary.function_index\n self.class_map = newlibrary.class_map\n\n self.instantiate_all_classes(newlibrary.wrap_namespace)\n self.update_templated_typemaps(newlibrary.wrap_namespace)\n self.gen_namespace(newlibrary.wrap_namespace)"
] | [
"0.6650628",
"0.61216825",
"0.59788126",
"0.5914733",
"0.5853105",
"0.5761492",
"0.5729343",
"0.57070595",
"0.5634071",
"0.5578847",
"0.5557914",
"0.55127",
"0.5486333",
"0.5457092",
"0.5417137",
"0.54008245",
"0.5397091",
"0.53696746",
"0.53682125",
"0.5365185",
"0.53581166",
"0.53523415",
"0.5310362",
"0.5295752",
"0.5293841",
"0.52729946",
"0.52695954",
"0.5262353",
"0.52619714",
"0.52426034"
] | 0.7787591 | 0 |
Return df with all enteries which df.query(pattern) match with removed | def remove(df, pattern):
return df[~df.index.isin(df.query(pattern).index)] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def prune(df, regex_list):\n for regex_pattern in regex_list:\n df = df[~df.case_action.str.contains(regex_pattern)]\n return df",
"def clean(df):",
"def filter_same_helix(df):\n helixpattern = re.compile(r\"\"\"^(..)\\w+\\s+\\1\"\"\")#For detecting same-helix contacts, the ones like 1.22x22 1.54x54\n helixfilter = df['Position'].str.contains(helixpattern)\n df = df[~helixfilter]\n return(df)",
"def filter_processes(ps_df: pd.DataFrame, user_query: str) -> pd.DataFrame:\n user_query = user_query.strip('\\'')\n result_df = ps_df.query(user_query)\n return result_df",
"def pattern_search(pattern, dataset, column):\n # Filter\n dataset = dataset[dataset[column].str.contains(pattern, regex=True)]\n # Reset index\n dataset = dataset.reset_index(drop=True)\n # Return\n return dataset",
"def minus_df(df):\n return df[df[\"content\"].str.contains(\"(^|[^0-9])\\-[0-9]+\")]",
"def filter(df, predicate):\n if not df:\n return []\n\n return [row for row in df if predicate(row)]",
"def _filter_df(adjmat, df, verbose=3):\n remcols = df.columns[~np.isin(df.columns.values, adjmat.columns.values)].values\n if len(remcols)>0:\n if verbose>=3: print('[bnlearn] >Removing columns from dataframe to make consistent with DAG [%s]' %(remcols))\n df.drop(labels=remcols, axis=1, inplace=True)\n return df",
"def _filter_df(adjmat, df, verbose=3):\n remcols = df.columns[~np.isin(df.columns.values, adjmat.columns.values)].values\n if len(remcols)>0:\n if verbose>=3: print('[bnlearn] >Removing columns from dataframe to make consistent with DAG [%s]' %(remcols))\n df.drop(labels=remcols, axis=1, inplace=True)\n return df",
"def _clean_dataset(df: pd.DataFrame) -> pd.DataFrame:\n df = df.loc[:, ~df.columns.str.contains(\"^Unnamed\")]\n df = df.dropna()\n return df",
"def trimDf(df):\n cols = set(df.columns)\n\n cols.remove('exclamationCount') # bug in our feature extraction code\n cols.remove('price') # considered only free apps\n cols.remove('appName') # removing appNames\n\n # return df[list(cols)]\n\n\n\n return df[list(('revSent', 'appLabel'))]",
"def get_subtable(df, col, val) -> pd.DataFrame:\r\n return df[df[col] == val].drop(columns=col)",
"def remove_other_elements(data):\n charset = ['F','l','B','r','I','i','M','g','L','b','a','e','K','V','d','R','Z','G','A','Y','u']\n x = []\n for i in range(data.shape[0]):\n for j in range(len(data.iloc[i,1])):\n if data.iloc[i,1][j] in charset:\n x.append(i)\n break\n df = data[(True^data['Index'].isin(x))]\n df.reset_index(drop=True, inplace=True)\n return df",
"def clean(self):\n self.df = _data.prune(self.df, [REGEX_PATTERN_GCI, REGEX_PATTERN_DB_ID])\n self.df, _ = _data.remove_totally_failed_tests(self.df)\n self.is_cleaned = True",
"def filter_blacklist(df, path):\n f = open(path)\n patterns = [e.strip() for e in f.readlines()]\n f.close()\n\n def run_filter(x):\n for pat in patterns:\n if fnmatch.fnmatch(x, pat):\n return True\n return False\n\n index = df[df[PROD_NM].apply(run_filter)].index\n df.drop(columns=[PROD_NM], index=index, inplace=True)",
"def drop_irrelevant_practices(df):\n\n is_relevant = df.groupby(\"practice\").value.any()\n return df[df.practice.isin(is_relevant[is_relevant == True].index)]",
"def _remove_non_informative_rows(self, df, threshold):\n df_tmp = pd.DataFrame()\n n_features = len(df.columns)\n # calculating ratio of rows that have more than \"ratio\" missing values\n df_tmp['ratio'] = df.apply(lambda row: row.isnull().sum()/n_features, axis='columns')\n\n # kick too noisy rows\n return df[df_tmp['ratio'] <= threshold]",
"def cleanDf(df, badaa=None):\n return df.loc[[isvalidpeptide(s, badaa) for s in df.seq]]",
"def remove_rows_with_non_english_movies(df):\n df = df[df['original_language'] == 'en']\n return df",
"def clean_rows_cat_values(df: pd.DataFrame, col: str, values: list) -> pd.DataFrame:\n\n # create mask to filter df with rows that have\n # the indicated values in the indicated column\n index = df.columns.get_loc(col)\n mask = [df.iloc[row, index] not in values for row in range(len(df))]\n\n # print original dataframe shape\n print(f\"Shape of the original dataframe: \\n{df.shape}\\n\")\n\n # filter df\n df = df.iloc[mask]\n df.reset_index(drop=True, inplace=True)\n print(\n f\"Shape after removing rows with values equal to\\n{values}\\nin column '{col}'':\"\n )\n print(df.shape, \"\\n\")\n\n return df",
"def select_feats(df):\n cols = list(df)\n for col in cols:\n if col not in config[\"feats\"] and col != \"label\":\n df = df.drop(columns=col)\n return df",
"def clean_unknown(df, db, index_col='id'):\n ind = []\n for i, gene_id in enumerate(df[index_col]):\n try:\n db[gene_id]\n ind.append(i)\n except gffutils.FeatureNotFoundError:\n continue\n return df.ix[ind, :]",
"def filter_cols(df):\n comm_keys = list( set(df.keys()) & set(KEYS_FOR_ML) )\n filt_col_df = df.copy()[comm_keys]\n\n return filt_col_df",
"def remove_rows_without_feature(df, feature):\n return df[np.isfinite(df[feature])]",
"def deletingNaNs(df):\n # start_ time.time()\n df_old = df.copy()\n df.dropna(axis=1, how='any', inplace=True)\n for key in df_old:\n if str(key) not in df:\n print('Deleted ', key)\n # end_time time.time()\n #print('Time to run deletingNaNs: ', end_time - start_time)\n return df",
"def extract_relevant_rows(df, column_name, column_value, not_equal=False):\n\n if not_equal:\n return df.loc[df[column_name] != column_value]\n\n return df.loc[df[column_name] == column_value]",
"def filter_input(input_df, target_df):\n # input_df = input_df.reindex(target_df.index, copy=False)\n data_df = pd.concat((input_df, target_df), join=\"inner\", copy=False, axis=1)\n return data_df",
"def delete_matches(self, pattern):\n with self.connect() as c:\n cur = c.cursor()\n cur.execute(self.create_query(\"DELETE\", pattern))",
"def unmatching_driver_id(df):\n\ttemp = df[df['driver_id_bkg'].notnull()]\n\torder_ids = temp[temp['driver_id_bkg'] != temp['driver_id_pnt']]['order_id'].values\n\treturn df[~df['order_id'].isin(order_ids)]",
"def drop_transafers(df):\n return df.filter(~(df.valor == 0))"
] | [
"0.67580104",
"0.61284876",
"0.59954125",
"0.5946802",
"0.5877959",
"0.5857522",
"0.5824286",
"0.57481945",
"0.57481945",
"0.5692505",
"0.56695396",
"0.5544663",
"0.55146563",
"0.5502974",
"0.54838765",
"0.54560864",
"0.54314256",
"0.5424039",
"0.5391558",
"0.53590816",
"0.5323305",
"0.53208524",
"0.5313805",
"0.5310662",
"0.53092474",
"0.5287976",
"0.52514154",
"0.5244534",
"0.52442133",
"0.5229888"
] | 0.7941567 | 0 |
Split design by subexperiment (1, 2, 3) and create SubExperiment objects | def create_subexperiments(self):
subexperiments = {}
for label, df in self.design.groupby(level=0):
subexperiments[label] = SubExperiment(label, df.loc[label], self.root)
return subexperiments | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_expand_experiments():\n template_script = get_template_script()\n experiment_systems = utils.CombinatorialLeaf(['explicit-system', 'implicit-system', 'hydration-system'])\n template_script['experiments']['system'] = experiment_systems\n\n exp_builder = ExperimentBuilder(script=template_script, job_id=1, n_jobs=2)\n experiments = list(exp_builder._expand_experiments())\n assert len(experiments) == 2\n\n exp_builder = ExperimentBuilder(script=template_script, job_id=2, n_jobs=2)\n experiments = list(exp_builder._expand_experiments())\n assert len(experiments) == 1",
"def set_sub_models(self):\r\n sub_data_grid = [0.1 * i for i in range(1, 10)]\r\n self.sub_models = [\r\n copy.deepcopy(self.model_) for _ in range(len(sub_data_grid))]\r\n # fit sub-models to subset of data\r\n for key, data_size in enumerate(sub_data_grid):\r\n X, _, y, _ = train_test_split(\r\n self.X_train, self.y_train.ravel(),\r\n train_size=data_size,\r\n random_state=self.seeds.get('l_curve_seed', 22))\r\n self.sub_models[key].fit(X, y)",
"def get_subensemble_parts(self, num_parts):\n parts = []\n\n decoder_parts = self.get_subensemble_decoder(num_parts, \"X\")\n\n encoder_length = len(self.encoders)\n bias_length = len(self.bias)\n\n # create the specified number of subensembles\n for e_num in range(1, num_parts + 1):\n e_size = encoder_length / num_parts\n b_size = bias_length / num_parts\n\n encoder_part = self.encoders[e_size * (e_num - 1):e_size * e_num]\n bias_part = self.bias[b_size * (e_num - 1):b_size * e_num]\n\n parts.append((encoder_part, decoder_parts[e_num - 1], bias_part))\n\n return parts",
"def splits(cls, exts, fields, root='/Users/yingliu/PycharmProjects/Seq2SeqSemantic/data/',\n train='train2', validation='val2', test='test2', **kwargs):\n return super(SPDataset, cls).splits(\n exts, fields, path=root,\n train = train, validation = validation, test = test, **kwargs)",
"def _build_experiment_embedded_list():\n pass",
"def split_test_data():\n outputvis = ROOT_DIR + 'test_imaging/test_split_1eb.ms'\n targ = TARGETS['NGC1333IRAS4A']\n spw = '{0}:236~276'.format(SPWS[targ.name]['NH3_11'].spw_id)\n split(\n vis=get_vis_name(targ),\n outputvis=outputvis,\n field=targ.name,\n spw=spw,\n )",
"def create_sub_mdps(self, level):\n\n mdps_copy = set(self.mdps[level-1].copy())\n mdps = set()\n upper_level_exits = {}\n\n # Full depth-first search to group MDPs into MERs\n while len(mdps_copy) > 0:\n curr_mdp = random.choice(tuple(mdps_copy))\n mer, exits = set(), set()\n # Group curr_mdp with neighbors to form a MER and find exits\n self.dfs(mdps_copy, curr_mdp, level, mer, exits)\n # Choose a state var that is representative of the new MER\n state_var = next(iter(mer)).state_var[1:]\n # Create a new upper level MDP and set its properties\n mdp = MDP(level=level, state_var=state_var)\n mdp.mer = frozenset(mer)\n\n upper_level_exits[mdp] = exits\n for _mdp in mer:\n mdp.primitive_states.update(_mdp.primitive_states)\n mdps.add(mdp)\n\n self.mdps[level] = mdps\n\n # Add MDP Exits/Actions\n for mdp in self.mdps[level]:\n mdp.exits = set()\n # Generate new exits (mdp at level, Exit at level-1, target mdp at level)\n for s_mdp, exit, n_mdp in upper_level_exits[mdp]:\n neighbor_mdp = n_mdp.get_upper_mdp(self.mdps) \n mdp.exits.add(Exit(mdp, Exit(s_mdp, exit, n_mdp), neighbor_mdp))",
"def setup_experiment(testruns, droplist=\"\"):\n ex = Experiment()\n ex.addSoluFile(ALL_SOLU)\n\n regexlist = []\n for x in droplist.split(\",\"):\n # defaultvalue, if empty we don't want to exclude everything\n if x == \"\":\n continue\n try:\n y = re.compile(x)\n regexlist.append(y)\n except:\n pass\n\n excluded_inst = []\n # get data\n for t in testruns:\n # update representation\n additional_data = {\"RubberbandId\": get_rbid_representation(t, \"extended\")}\n\n # collect data and pass to ipet\n ipettestrun = TestRun()\n tr_raw_data = t.get_data(add_data=additional_data)\n\n tr_data = {}\n for i in tr_raw_data.keys():\n for r in regexlist:\n if r.match(i):\n excluded_inst.append(i)\n break\n else:\n tr_data[i] = tr_raw_data[i]\n\n ipettestrun.data = pd.DataFrame(tr_data).T\n\n ex.testruns.append(ipettestrun)\n return ex, excluded_inst",
"def split_dataset(dataset, eval_proportion, shuffle=False):\n split_sizes = [1. - eval_proportion, eval_proportion]\n split_frames = []\n split_demos = []\n num_demos = dataset.get_num_demos()\n split_num_demos = [int(fraction * num_demos) for fraction in split_sizes]\n split_num_demos[0] += num_demos - sum(split_num_demos)\n num_instances = len(dataset)\n demos = list(range(num_demos))\n if shuffle:\n np.random.shuffle(demos)\n start_idx = 0\n for split_idx in range(len(split_sizes)):\n if split_sizes[split_idx] == 0:\n split_frames.append(None)\n continue\n split_frames.append([])\n split_demos.append(range(start_idx, start_idx + split_num_demos[split_idx]))\n for demo_idx in split_demos[split_idx]:\n demo_slice = dataset.get_demo_frame_idxs(demos[demo_idx])\n split_frames[split_idx].extend(\n list(range(demo_slice.start, demo_slice.stop)))\n start_idx += split_num_demos[split_idx]\n # Check if the split indices are unique\n assert len(set(split_frames[split_idx])) == len(split_frames[split_idx])\n\n if eval_proportion > 0:\n # Check that splits do not intersect\n for split_idx in range(len(split_frames)):\n for split_idx2 in range(split_idx + 1, len(split_frames)):\n assert len(set(split_frames[split_idx]).intersection(split_frames[split_idx2])) == 0\n assert sum([len(s) for s in split_frames]) == num_instances\n\n split_datasets = [Subset(dataset, split) if split is not None else None for split in split_frames]\n return split_datasets",
"def subanalyses(self, subject_id, data):\n raise NotImplementedError('not implemented in abstract class')",
"def build_experiments(self):\n\n # width=500, height=350, pos_x= 2.0, pos_y=0.0, pos_z= 1.4, angle=-30.0\n cameraRGB = Camera('Camera', PostProcessing='SceneFinal')\n cameraRGB.set_image_size(500, 350)\n cameraRGB.set_position(2.0, 0.0, 1.4)\n cameraRGB.set_rotation(-30.0, 0.0, 0.)\n cameraRGB.set(FOV=100)\n\n camera = Camera('CameraSem', PostProcessing='SemanticSegmentation')\n camera.set_image_size(320, 180)\n camera.set_position(2.0, 0.0, 1.4)\n camera.set_rotation(-30.0, 0.0, 0.)\n camera.set(FOV=100)\n\n if self._city_name == 'Town01':\n poses_tasks = self._poses_town01()\n vehicles_tasks = []\n pedestrians_tasks = []\n for i in range(len(poses_tasks)):\n vehicles_tasks.append(0)\n pedestrians_tasks.append(0)\n\n experiment_vector = []\n\n for weather in self.weathers:\n\n for iteration in range(len(poses_tasks)):\n poses = poses_tasks[iteration]\n vehicles = vehicles_tasks[iteration]\n pedestrians = pedestrians_tasks[iteration]\n\n conditions = CarlaSettings()\n conditions.set(\n SendNonPlayerAgentsInfo=True,\n NumberOfVehicles=vehicles,\n NumberOfPedestrians=pedestrians,\n WeatherId=weather,\n QualityLevel=1\n )\n\n conditions.set(SynchronousMode=True)\n conditions.set(DisableTwoWheeledVehicles=True)\n\n conditions.add_sensor(camera)\n conditions.add_sensor(cameraRGB)\n\n experiment = Experiment()\n experiment.set(\n Conditions=conditions,\n Poses=poses,\n Task=iteration,\n Repetitions=1\n )\n\n experiment_vector.append(experiment)\n\n return experiment_vector",
"def setUp(self):\n self.splits = (2,3,4)",
"def three_experiments(two_experiments, one_experiment):",
"def _split(self, split, randomise=False, **kwargs):\r\n # Copy split to prevent modifying outside arguments\r\n split = split.copy()\r\n # Compute total\r\n total = sum(split.values())\r\n # If split contains floats, convert to integers\r\n if isinstance(total, float):\r\n assert_msg = 'Not enough data! ' \\\r\n + f'Split requires a total of {total*100}%. ' \\\r\n + 'Split should not exceed 100%.'\r\n assert total <= 1, assert_msg\r\n # Add 'rest' subset if not all data is used in split\r\n if total < 1:\r\n split['rest'] = 1 - total\r\n split = self._float_split_to_int(split)\r\n total = sum(split.values())\r\n # Create subsets based off integer values\r\n if isinstance(total, int):\r\n assert_msg = 'Not enough data! ' \\\r\n + f'Split requires a total of {total} data entries ' \\\r\n + f'but only {len(self.data)} are available.'\r\n assert total <= len(self.data), assert_msg\r\n # Add 'rest' subset if not all data is used in split\r\n if total < len(self.data):\r\n split['rest'] = len(self.data) - total\r\n # Create subsets\r\n index = 0\r\n for name, length in split.items():\r\n subset_name = f'{self.name}.{name}'\r\n subset_data = self.data[index:index + length]\r\n subset_seed = self.seed\r\n if self.seed is not None:\r\n subset_seed += sum([ord(c) for c in name]) + length\r\n subset = self._make_subset(subset_name,\r\n subset_data,\r\n randomise=randomise,\r\n seed=subset_seed,\r\n **kwargs\r\n )\r\n setattr(self, name, subset)\r\n index += length\r\n # Replace data with references to subsets\r\n self.data = []\r\n for name in split.keys():\r\n self.data.append(getattr(self, name, None))\r\n # Indicate that this is a superset\r\n self.is_superset = True",
"def make_multiinstantiate(self, special_properties, name, parameters):\n PARAM_SUBSCRIPT = \"_p\"\n self._model_namespace[\"ct_populationname\"] = name+\"Multi\"\n multi_ct = lems.ComponentType(self._model_namespace[\"ct_populationname\"], extends=BASE_POPULATION)\n structure = lems.Structure()\n multi_ins = lems.MultiInstantiate(component_type=name,\n number=\"N\")\n param_dict = {}\n # number of neruons\n multi_ct.add(lems.Parameter(name=\"N\", dimension=\"none\"))\n # other parameters\n for sp in special_properties:\n if special_properties[sp] is None:\n multi_ct.add(lems.Parameter(name=sp+PARAM_SUBSCRIPT, dimension=self._all_params_unit[sp]))\n multi_ins.add(lems.Assign(property=sp, value=sp+PARAM_SUBSCRIPT))\n param_dict[sp] = parameters[sp]\n else:\n # multi_ct.add(lems.Parameter(name=sp, dimension=self._all_params_unit[sp]))\n # check if there are some units in equations\n equation = special_properties[sp]\n # add spaces around brackets to prevent mismatching\n equation = re.sub(\"\\(\", \" ( \", equation)\n equation = re.sub(\"\\)\", \" ) \", equation)\n for i in get_identifiers(equation):\n # iterator is a special case\n if i == \"i\":\n regexp_noletter = \"[^a-zA-Z0-9]\"\n equation = re.sub(\"{re}i{re}\".format(re=regexp_noletter),\n \" {} \".format(INDEX), equation)\n # here it's assumed that we don't use Netwton in neuron models\n elif i in name_to_unit and i != \"N\":\n const_i = i+'const'\n multi_ct.add(lems.Constant(name=const_i, symbol=const_i,\n dimension=self._all_params_unit[sp], value=\"1\"+i))\n equation = re.sub(i, const_i, equation)\n multi_ins.add(lems.Assign(property=sp, value=equation))\n structure.add(multi_ins)\n multi_ct.structure = structure\n self._model.add(multi_ct)\n param_dict = dict([(k+\"_p\", v) for k, v in param_dict.items()])\n param_dict[\"N\"] = self._nr_of_neurons\n self._model_namespace[\"populationname\"] = self._model_namespace[\"ct_populationname\"] + \"pop\"\n self._model_namespace[\"networkname\"] = self._model_namespace[\"ct_populationname\"] + \"Net\"\n self.add_population(self._model_namespace[\"networkname\"],\n self._model_namespace[\"populationname\"],\n self._model_namespace[\"ct_populationname\"],\n **param_dict)",
"def get_subensemble_decoder(self, num_parts, origin_name, func=None):\n parts = []\n\n # TODO do not require an Origin to be created just to compute decoder\n if origin_name not in self.origin:\n # create the origin in order to compute a decoder\n self.add_origin(origin_name, func)\n # print \"name \" + self.name + \" decoder: \" + str(self.origin[origin_name].decoder)\n\n decoder = self.origin[origin_name].decoder\n decoder_length = len(decoder)\n\n # create the specified number of decoders\n for e_num in range(1, num_parts + 1):\n d_size = decoder_length / num_parts\n decoder_part = decoder[d_size * (e_num - 1):d_size * e_num]\n\n parts.append(decoder_part)\n\n return parts",
"def generate_submissons_all_steps():\n\n\n data_en = read_json_file(\"Test_Data/test-en.json\")\n data_pr = read_json_file(\"Test_Data/test-pr.json\")\n data_es = read_json_file(\"Test_Data/test-es.json\")\n res_en = generate_embeddings_sentence_test_data(data_en, \"Test_Data/embd-en.pkl\")\n res_es = generate_embeddings_sentence_test_data(data_es, \"Test_Data/embd-es.pkl\")\n res_pr = generate_embeddings_sentence_test_data(data_pr, \"Test_Data/embd-pr.pkl\")\n model = load_model(\"model_doc\")\n make_submission(res_es, model, \"submission-es\")\n make_submission(res_pr, model, \"submission-pr\")\n make_submission(res_en, model, \"submission-en\")\n exit()",
"def decode(self):\n # Extract all the experiments\n\n # Map of imageset/scan pairs\n imagesets = {}\n\n # For every experiment, use the given input to create\n # a sensible experiment.\n el = ExperimentList()\n for eobj in self._obj[\"experiment\"]:\n\n # Get the models\n identifier = eobj.get(\"identifier\", \"\")\n beam = self._lookup_model(\"beam\", eobj)\n detector = self._lookup_model(\"detector\", eobj)\n goniometer = self._lookup_model(\"goniometer\", eobj)\n scan = self._lookup_model(\"scan\", eobj)\n crystal = self._lookup_model(\"crystal\", eobj)\n profile = self._lookup_model(\"profile\", eobj)\n scaling_model = self._lookup_model(\"scaling_model\", eobj)\n\n key = (eobj.get(\"imageset\"), eobj.get(\"scan\"))\n\n imageset = None\n try:\n imageset = imagesets[key] # type: ImageSet\n except KeyError:\n # This imageset hasn't been loaded yet - create it\n imageset_data = self._lookup_model(\"imageset\", eobj)\n\n # Create the imageset from the input data\n if imageset_data is not None:\n if \"params\" in imageset_data:\n format_kwargs = imageset_data[\"params\"]\n else:\n format_kwargs = {}\n\n # Load the external lookup data\n mask_filename, mask = self._load_pickle_path(imageset_data, \"mask\")\n gain_filename, gain = self._load_pickle_path(imageset_data, \"gain\")\n pedestal_filename, pedestal = self._load_pickle_path(\n imageset_data, \"pedestal\"\n )\n dx_filename, dx = self._load_pickle_path(imageset_data, \"dx\")\n dy_filename, dy = self._load_pickle_path(imageset_data, \"dy\")\n\n if imageset_data[\"__id__\"] == \"ImageSet\":\n imageset = self._make_stills(\n imageset_data, format_kwargs=format_kwargs\n )\n elif imageset_data[\"__id__\"] == \"ImageGrid\":\n imageset = self._make_grid(\n imageset_data, format_kwargs=format_kwargs\n )\n elif (\n imageset_data[\"__id__\"] == \"ImageSequence\"\n or imageset_data[\"__id__\"] == \"ImageSweep\"\n ):\n imageset = self._make_sequence(\n imageset_data,\n beam=beam,\n detector=detector,\n goniometer=goniometer,\n scan=scan,\n format_kwargs=format_kwargs,\n )\n elif imageset_data[\"__id__\"] == \"MemImageSet\":\n imageset = self._make_mem_imageset(imageset_data)\n else:\n raise RuntimeError(\"Unknown imageset type\")\n\n if imageset is not None:\n # Set the external lookup\n if mask is None:\n mask = ImageBool()\n else:\n mask = ImageBool(mask)\n if gain is None:\n gain = ImageDouble()\n else:\n gain = ImageDouble(gain)\n if pedestal is None:\n pedestal = ImageDouble()\n else:\n pedestal = ImageDouble(pedestal)\n if dx is None:\n dx = ImageDouble()\n else:\n dx = ImageDouble(dx)\n if dy is None:\n dy = ImageDouble()\n else:\n dy = ImageDouble(dy)\n\n if not imageset.external_lookup.mask.data.empty():\n if not mask.empty():\n mask = tuple(m.data() for m in mask)\n for m1, m2 in zip(\n mask, imageset.external_lookup.mask.data\n ):\n m1 &= m2.data()\n imageset.external_lookup.mask.data = ImageBool(mask)\n else:\n imageset.external_lookup.mask.data = mask\n imageset.external_lookup.mask.filename = mask_filename\n imageset.external_lookup.gain.data = gain\n imageset.external_lookup.gain.filename = gain_filename\n imageset.external_lookup.pedestal.data = pedestal\n imageset.external_lookup.pedestal.filename = pedestal_filename\n imageset.external_lookup.dx.data = dx\n imageset.external_lookup.dx.filename = dx_filename\n imageset.external_lookup.dy.data = dy\n imageset.external_lookup.dy.filename = dy_filename\n\n # Update the imageset models\n if isinstance(imageset, ImageSequence):\n imageset.set_beam(beam)\n imageset.set_detector(detector)\n imageset.set_goniometer(goniometer)\n imageset.set_scan(scan)\n elif isinstance(imageset, (ImageSet, ImageGrid)):\n for i in range(len(imageset)):\n imageset.set_beam(beam, i)\n imageset.set_detector(detector, i)\n imageset.set_goniometer(goniometer, i)\n imageset.set_scan(scan, i)\n\n imageset.update_detector_px_mm_data()\n\n # Add the imageset to the dict - even if empty - as this will\n # prevent a duplicated attempt at reconstruction\n imagesets[key] = imageset\n\n # Append the experiment\n el.append(\n Experiment(\n imageset=imageset,\n beam=beam,\n detector=detector,\n goniometer=goniometer,\n scan=scan,\n crystal=crystal,\n profile=profile,\n scaling_model=scaling_model,\n identifier=identifier,\n )\n )\n\n # Return the experiment list\n return el",
"def train_data_split(self, selected_sr, selected_ss):\n \"\"\"\n Arguments:\n selected_sr: ordinal number of the selected split ratio\n selected_ss: ordinal number of split shift\n \"\"\"\n assert selected_sr < len(self.split_ratios),\\\n \"The total number of possible split ratios is: %d\"\\\n % len(self.split_ratios)\n\n max_shifts = 100 / self.split_ratios[selected_sr][-1]\n\n assert selected_ss < max_shifts,\\\n \"The total number of split shifts is: %d\" % max_shifts\n\n self.empty_split()\n\n n = float(self.n_train) / max_shifts\n self.n_develop = int(self.split_ratios[selected_sr][0] /\n (100 / max_shifts) * n)\n\n self.n_valid = int(self.split_ratios[selected_sr][1] /\n (100 / max_shifts) * n)\n\n self.n_eval = self.n_train - self.n_develop - self.n_valid\n\n for i in range(self.n_develop):\n self.development_subjects.\\\n append(self.training_subjects[(selected_ss * self.n_eval + i) %\n self.n_train])\n\n for i in range(self.n_valid):\n self.validation_subjects.\\\n append(self.training_subjects[(selected_ss * self.n_eval +\n self.n_develop + i) %\n self.n_train])\n\n for i in range(self.n_eval):\n self.evaluation_subjects.\\\n append(self.training_subjects[(selected_ss * self.n_eval +\n self.n_develop +\n self.n_valid + i) %\n self.n_train])",
"def __init__(self, transform, num_subbursts=1, parts_subset=None, \n pct_subset=None, balance=True, random_start_points=False, seed1=4, seed2=33):\n self.dp = DataPipeline(\"\",\"\")\n self.metadata = self.dp.get_metadata()\n if parts_subset:\n self.metadata = self.metadata[self.metadata.part.isin(parts_subset)]\n if pct_subset:\n self.metadata = self.metadata.sample(frac=pct_subset, random_state=seed1)\n if balance:\n reals = self.metadata[self.metadata.label=='REAL']\n self.num_reals = len(reals)\n fakes = self.metadata[self.metadata.label=='FAKE']\n fakes = fakes.sample(n=self.num_reals, random_state=seed2)\n self.metadata = pd.concat([reals,fakes])\n\n self.num_subbursts = num_subbursts\n self.transform = transform\n self.padding = PADDING_CROP\n self.random_start_points = random_start_points",
"def make_data_splits(samples, params, RESULTSDIR, num_experiments):\n # TODO: Switch to .mat from .pickle so that these lists are easier to read\n # and change.\n\n partition = {}\n if params[\"load_valid\"] is None:\n # Set random seed if included in params\n if params[\"data_split_seed\"] is not None:\n np.random.seed(params[\"data_split_seed\"])\n\n all_inds = np.arange(len(samples))\n\n # extract random inds from each set for validation\n v = params[\"num_validation_per_exp\"]\n valid_inds = []\n if params[\"valid_exp\"] is not None and params[\"num_validation_per_exp\"] > 0:\n all_valid_inds = []\n for e in params[\"valid_exp\"]:\n tinds = [\n i for i in range(len(samples)) if int(samples[i].split(\"_\")[0]) == e\n ]\n all_valid_inds = all_valid_inds + tinds\n valid_inds = valid_inds + list(\n np.random.choice(tinds, (v,), replace=False)\n )\n valid_inds = list(np.sort(valid_inds))\n\n train_inds = list(set(all_inds) - set(all_valid_inds))#[i for i in all_inds if i not in all_valid_inds]\n elif params[\"num_validation_per_exp\"] > 0: # if 0, do not perform validation\n for e in range(num_experiments):\n tinds = [\n i for i in range(len(samples)) if int(samples[i].split(\"_\")[0]) == e\n ]\n valid_inds = valid_inds + list(\n np.random.choice(tinds, (v,), replace=False)\n )\n valid_inds = list(np.sort(valid_inds))\n\n train_inds = [i for i in all_inds if i not in valid_inds]\n elif params[\"valid_exp\"] is not None:\n raise Exception(\"Need to set num_validation_per_exp in using valid_exp\")\n else:\n train_inds = all_inds\n\n assert (set(valid_inds) & set(train_inds)) == set()\n\n train_samples = samples[train_inds]\n train_inds = []\n if params[\"valid_exp\"] is not None:\n train_expts = [f for f in range(num_experiments) if f not in params[\"valid_exp\"]]\n else:\n train_expts = np.arange(num_experiments)\n\n print(\"TRAIN EXPTS: {}\".format(train_expts))\n\n if params[\"num_train_per_exp\"] is not None:\n # Then sample randomly without replacement from training sampleIDs\n for e in train_expts:\n tinds = [\n i for i in range(len(train_samples)) if int(train_samples[i].split(\"_\")[0]) == e\n ]\n print(e)\n print(len(tinds))\n train_inds = train_inds + list(\n np.random.choice(tinds, (params[\"num_train_per_exp\"],), replace=False)\n )\n train_inds = list(np.sort(train_inds))\n else:\n train_inds = np.arange(len(train_samples))\n\n \n\n partition[\"valid_sampleIDs\"] = samples[valid_inds]\n partition[\"train_sampleIDs\"] = train_samples[train_inds]\n\n # Save train/val inds\n with open(os.path.join(RESULTSDIR, \"val_samples.pickle\"), \"wb\") as f:\n cPickle.dump(partition[\"valid_sampleIDs\"], f)\n\n with open(os.path.join(RESULTSDIR, \"train_samples.pickle\"), \"wb\") as f:\n cPickle.dump(partition[\"train_sampleIDs\"], f)\n else:\n # Load validation samples from elsewhere\n with open(os.path.join(params[\"load_valid\"], \"val_samples.pickle\"), \"rb\",) as f:\n partition[\"valid_sampleIDs\"] = cPickle.load(f)\n partition[\"train_sampleIDs\"] = [\n f for f in samples if f not in partition[\"valid_sampleIDs\"]\n ]\n\n # Reset any seeding so that future batch shuffling, etc. are not tied to this seed\n if params[\"data_split_seed\"] is not None:\n np.random.seed()\n\n return partition",
"def _create_examples_split(self, lines, set_type):\n examples = []\n \n for (i, line) in enumerate(lines):\n a_label = int(line[\"label\"])\n q_type = line[\"type\"]\n if a_label == 0 and q_type != \"qLookup\":\n #print(\"discontinue\")\n continue\n sentence_number = 0\n premise_text = line[\"premise\"]\n the_id = int(line[\"id\"])\n modified_premise_text = re.sub(self.stage_name_pattern,\"\",premise_text)\n modified_premise_text = re.sub(self.w_patterns,\"\",modified_premise_text)\n hypothesis_text = line[\"hypothesis\"]\n hypothesis_text = re.sub(self.w_patterns,\"\",hypothesis_text)\n \n\n sentences = modified_premise_text.split('.')\n\n for j, sentence in enumerate(sentences):\n guid = \"\" + str(sentence_number) + \"\\t\" + str(i) + \"\\t\" + str(len(sentences)) + \"\\t\" + str(a_label)\n text_a = sentence\n text_b = hypothesis_text\n label = a_label\n sentence_number += 1\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n #print(\"16th sentence::\",sentences[16])\n\n return examples",
"def generate_complex_catalog(stem: str = '') -> cat.Catalog:\n group_a = generators.generate_sample_model(cat.Group, True)\n group_a.id = f'{stem}a'\n group_a.controls = generate_control_list(group_a.id, 4)\n part = generators.generate_sample_model(common.Part)\n part.id = f'{stem}a-1_smt'\n part.parts = None\n group_a.controls[0].parts[0].id = f'{stem}_part_with_subpart'\n group_a.controls[0].parts[0].parts = [part]\n group_b = generators.generate_sample_model(cat.Group, True)\n group_b.id = f'{stem}b'\n group_b.controls = generate_control_list(group_b.id, 3)\n group_b.controls[2].controls = generate_control_list(f'{group_b.id}-2', 3)\n group_ba = generators.generate_sample_model(cat.Group, True)\n group_ba.id = f'{stem}ba'\n group_ba.controls = generate_control_list(group_ba.id, 2)\n group_b.groups = [group_ba]\n\n catalog = generators.generate_sample_model(cat.Catalog, True)\n catalog.controls = generate_control_list(f'{stem}cat', 3)\n catalog.params = generate_param_list(f'{stem}parm', 3)\n\n test_control = generators.generate_sample_model(cat.Control, False)\n test_control.id = f'{stem}test-1'\n test_control.params = [common.Parameter(id=f'{test_control.id}_prm_1', values=['Default', 'Values'])]\n test_control.parts = [\n common.Part(\n id=f'{test_control.id}-stmt', prose='The prose with {{ insert: param, test-1_prm_1 }}', name='statement'\n )\n ]\n catalog.controls.append(test_control)\n catalog.groups = [group_a, group_b]\n\n return catalog",
"def create_new_individuals(design, problem, pop_size=None):\n\n if pop_size is None:\n pop_size_options = [50, 105, 120, 126, 132, 112, 156, 90, 275]\n pop_size = pop_size_options[problem.num_of_objectives - 2]\n\n if design == \"RandomDesign\":\n lower_limits = np.asarray(problem.get_variable_lower_bounds())\n upper_limits = np.asarray(problem.get_variable_upper_bounds())\n individuals = np.random.random((pop_size, problem.n_of_variables))\n # Scaling\n individuals = individuals * (upper_limits - lower_limits) + lower_limits\n\n return individuals\n\n elif design == \"LHSDesign\":\n lower_limits = np.asarray(problem.get_variable_lower_bounds())\n upper_limits = np.asarray(problem.get_variable_upper_bounds())\n individuals = lhs(problem.n_of_variables, samples=pop_size)\n # Scaling\n individuals = individuals * (upper_limits - lower_limits) + lower_limits\n\n return individuals\n\n elif design == \"EvoNN\":\n\n \"\"\"Create a population of neural networks for the EvoNN algorithm.\n\n Individuals are 2d arrays representing the weight matrices of the NNs.\n One extra row is added for bias.\n\n \"\"\"\n\n w_low = problem.params[\"w_low\"]\n w_high = problem.params[\"w_high\"]\n in_nodes = problem.num_of_variables\n num_nodes = problem.params[\"num_nodes\"]\n prob_omit = problem.params[\"prob_omit\"]\n\n individuals = np.random.uniform(\n w_low, w_high, size=(pop_size, in_nodes, num_nodes)\n )\n\n # Randomly set some weights to zero\n zeros = np.random.choice(\n np.arange(individuals.size), ceil(individuals.size * prob_omit)\n )\n individuals.ravel()[zeros] = 0\n\n # Set bias\n individuals = np.insert(individuals, 0, 1, axis=1)\n\n return individuals\n\n elif design == \"EvoDN2\":\n \"\"\"Create a population of deep neural networks (DNNs) for the EvoDN2 algorithm.\n\n Each individual is a list of subnets, and each subnet contains a random amount\n of layers and\n nodes per layer. The subnets are evolved via evolutionary algorithms, and they\n converge\n on the final linear layer of the DNN.\n \"\"\"\n\n individuals = []\n for i in range(problem.params[\"pop_size\"]):\n nets = []\n for j in range(problem.params[\"num_subnets\"]):\n\n layers = []\n num_layers = np.random.randint(1, problem.params[\"max_layers\"])\n in_nodes = len(problem.subsets[j])\n\n for k in range(num_layers):\n out_nodes = random.randint(2, problem.params[\"max_nodes\"])\n net = np.random.uniform(\n problem.params[\"w_low\"],\n problem.params[\"w_high\"],\n size=(in_nodes, out_nodes),\n )\n # Randomly set some weights to zero\n zeros = np.random.choice(\n np.arange(net.size),\n ceil(net.size * problem.params[\"prob_omit\"]),\n )\n net.ravel()[zeros] = 0\n\n # Add bias\n net = np.insert(net, 0, 1, axis=0)\n in_nodes = out_nodes\n layers.append(net)\n\n nets.append(layers)\n\n individuals.append(nets)\n\n return individuals\n\n elif design == \"BioGP\":\n return problem.create_individuals()",
"def gen_submodels(self, model, options):\n for submodel in options:\n model.submodels.create(id=submodel)",
"def new_multi_experiments_from_components(self, dataset_params,\n input_params,\n architecture, model_params,\n train_params=None):\n # Assuming they're all list\n components = [dataset_params, input_params, architecture, model_params,\n train_params if train_params else [None]]\n args_list = product(*components)\n\n inserted_ids = []\n for args in args_list:\n inserted_ids.append(\n self.new_multi_experiments_from_components(*args))\n return inserted_ids",
"def three_experiments_branch_same_name_trials(\n three_experiments_branch_same_name, orionstate, storage\n):\n exp1 = experiment_builder.build(name=\"test_single_exp\", version=1, storage=storage)\n exp2 = experiment_builder.build(name=\"test_single_exp\", version=2, storage=storage)\n exp3 = experiment_builder.build(\n name=\"test_single_exp_child\", version=1, storage=storage\n )\n\n x = {\"name\": \"/x\", \"type\": \"real\"}\n y = {\"name\": \"/y\", \"type\": \"real\"}\n z = {\"name\": \"/z\", \"type\": \"real\"}\n x_value = 0.0\n for status in Trial.allowed_stati:\n x[\"value\"] = x_value + 0.1 # To avoid duplicates\n y[\"value\"] = x_value * 10\n z[\"value\"] = x_value * 100\n trial1 = Trial(experiment=exp1.id, params=[x], status=status)\n trial2 = Trial(experiment=exp2.id, params=[x, y], status=status)\n trial3 = Trial(experiment=exp3.id, params=[x, y, z], status=status)\n # Add a child to a trial from exp1\n child = trial1.branch(params={\"/x\": 1})\n orionstate.database.write(\"trials\", trial1.to_dict())\n orionstate.database.write(\"trials\", trial2.to_dict())\n orionstate.database.write(\"trials\", trial3.to_dict())\n orionstate.database.write(\"trials\", child.to_dict())\n x_value += 1\n # exp1 should have 12 trials (including child trials)\n # exp2 and exp3 should have 6 trials each\n\n # Add some algo data for exp1\n orionstate.database.read_and_write(\n collection_name=\"algo\",\n query={\"experiment\": exp1.id},\n data={\n \"state\": pickle.dumps(\n {\"my_algo_state\": \"some_data\", \"my_other_state_data\": \"some_other_data\"}\n )\n },\n )",
"def split(self, fractions=[0.8, 0.2]):\n\n if sum(fractions) > 1.0 or sum(fractions) <= 0:\n raise ValueError(\"the sum of fractions argument should be between 0 and 1\")\n\n # random indices\n idx = np.arange(self.n_samples)\n np.random.shuffle(idx)\n\n # insert zero\n fractions.insert(0, 0)\n\n # gte limits of the subsets\n limits = (np.cumsum(fractions) * self.n_samples).astype(np.int32)\n\n subsets = []\n # create output dataset\n for i in range(len(fractions) - 1):\n subsets.append(\n Dataset(self.inputs[idx[limits[i]:limits[i + 1]]], self.targets[idx[limits[i]:limits[i + 1]]]))\n\n return subsets",
"def init_subgroups(metadata):\n dictionary = {'dataframe': metadata}\n subgroups = [dictionary]\n\n return subgroups",
"def experiment(worker):\n if worker == 0:\n logging.info('Start.')\n\n surv = np.zeros((NUM_GEN, NUM_CRS, 2), dtype=float)\n # order = np.zeros((NUM_GEN, NUM_CRS, NUM_G), dtype=float) # time + orders\n ctrl = np.zeros((NUM_GEN, NUM_LIN, 2), dtype=float)\n\n # Initiation of lineages\n ancestor = Generation(species=SPECIES, N=NUM_INDV, environment=ENVIRONMENT)\n ancestor.natural_selection()\n lineages = [deepcopy(ancestor) for _ in range(NUM_LIN)]\n\n if worker == 0:\n logging.info('Lineages are initiated.')\n\n # BUG: Need to run analysis for the ancestral population!\n\n # Evolution\n for t in range(NUM_GEN):\n for i in range(NUM_LIN):\n lineages[i] = lineages[i].next_generation()\n lineages[i].natural_selection()\n\n # Control group of survival percentage\n population = deepcopy(lineages[i])\n offspring = population.next_generation(num=NUM_HYB)\n offspring.natural_selection()\n ctrl[t, i] = [t+1, offspring.survival_rate()]\n\n # Hybrids\n for j in range(NUM_CRS):\n idx_1, idx_2 = sample(range(len(lineages)), 2)\n lin_1, lin_2 = deepcopy(lineages[idx_1]), deepcopy(lineages[idx_2])\n hybrids = lin_1.hybrids(lin_2, num=NUM_HYB, env=ENVIRONMENT)\n hybrids.natural_selection()\n\n # Survival percentage\n surv[t, j] = [t+1, hybrids.survival_rate()]\n\n # Order of incompatibilities\n counting = {_ord: 0 for _ord in range(1, NUM_G)}\n inviable = [hybrids.members[i]\\\n for i, survived in enumerate(hybrids.survival)\\\n if not survived]\n for indv in inviable:\n for l, n in indv.incompatibility(**ENVIRONMENT).items():\n counting[l-1] += n\n order[t, j] = [t+1] + [counting[_ord]/float(NUM_HYB)\\\n for _ord in range(1, NUM_G)]\n\n if worker == 0:\n logging.info('Generation {} is done.'.format(t+1))\n\n return (surv.reshape((NUM_GEN*NUM_CRS, 2)).tolist(),\\\n # order.reshape((NUM_GEN*NUM_CRS, NUM_G)).tolist(),\\\n ctrl.reshape((NUM_GEN*NUM_LIN, 2)).tolist())"
] | [
"0.573789",
"0.57144195",
"0.56194884",
"0.5434938",
"0.5363377",
"0.53393525",
"0.529092",
"0.5277947",
"0.5253223",
"0.5229658",
"0.5225851",
"0.5225829",
"0.5144398",
"0.51441",
"0.5116659",
"0.5102679",
"0.5095015",
"0.5081049",
"0.5081003",
"0.50729406",
"0.5070303",
"0.5065203",
"0.5049927",
"0.5024096",
"0.50216424",
"0.5003964",
"0.5003254",
"0.49998808",
"0.49918142",
"0.49752128"
] | 0.7028728 | 0 |
Get baseline data without treatment time courses | def baseline(self):
return self.data[self.data['treatment'] == 'Baseline'] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def emissions_baseline(self):\n baseline = DataFrame(columns=[\"CO2\", \"NOx\", \"PM10\", \"PM2.5\", \"SO2\"])\n baseline = baseline.append(year_1(self.plant.emissions()))\n baseline = baseline.append(year_1(self.plant.fuel_reseller().emissions()))\n baseline = baseline.append(year_1(self.farmer.emissions_exante))\n baseline.loc[\"Total\"] = baseline.sum()\n baseline.loc[\"Total_plant\"] = baseline.iloc[0]\n baseline.loc[\"Total_transport\"] = baseline.iloc[1]\n baseline.loc[\"Total_field\"] = baseline.iloc[2]\n return baseline",
"def baseline(self) -> List[PredictionsDatapoints]:\n return self._baseline",
"def organise_baseline_data(self):\n self.baseline_data = {}\n for injkey in self.data_sets.keys():\n data = {}\n baseline_result = self.data_sets[injkey].pop('full_syst_baseline')\n datakey = baseline_result.keys()[0]\n baseline_data = self.systtest_fit_extract(\n fit_data=baseline_result[datakey],\n datakey=datakey,\n labels=self.labels[injkey]['full_syst_baseline'].dict\n )\n self.baseline_data[injkey] = baseline_data",
"def baselineFrames(self):\n frames=[]\n for tag,T1,T2 in [x for x in self.tags if x[0]=='baseline']:\n for i,timePoint in enumerate(self.conf['times']):\n if timePoint>=T1*60 and timePoint<=T2*60:\n frames.append(i)\n return frames\n else:\n return [0]",
"def baseline(x_data, y_data, stra = \"uniform\"):\r\n x_train, x_test, y_train, y_test = train_test_split(x_data, y_data, test_size=0.2)\r\n dummy = DummyClassifier(strategy= stra)\r\n dummy.fit(x_train, y_train)\r\n y_pred = dummy.predict(x_test)\r\n accu = accuracy_score(y_test, y_pred)\r\n return accu",
"def __getBaselineList(self):\n\n # cumulative baseline selections do not reflect on the msselectedindices()\n if self._msTool is None:\n self.__selectMS()\n\n \n # If there are any previous antenna selections, use it\n if self._arg['antenna'] != '':\n baselineSelection = {'baseline':self._arg['antenna']}\n try:\n self._msTool.msselect(baselineSelection, onlyparse=False)\n # IMPORTANT: msselectedindices() will always say there are auto-correlation\n # baselines, even when there aren't. In the MMS case, the SubMS creation will\n # issue a MSSelectionNullSelection and not be created. \n baselinelist = self._msTool.msselectedindices()['baselines']\n except:\n baselinelist = []\n else:\n md = msmdtool()\n md.open(self._arg['vis'])\n baselines = md.baselines()\n md.close()\n import numpy as np\n baselinelist = np.vstack(np.where(np.triu(baselines))).T \n \n\n return baselinelist.tolist()",
"def getDataForLoadComparisons(self):\n\n\t\t# Variables\n\t\tload_data = self.getLoadData() \n\t\tvalues = [] \n\t\tinner_dict = {}\n\t\touter_dict = {}\n\t\tfinal_data = []\n\t\tyesterday = self.helper.getYesterday()\n\t\tkey = self.helper.getYear() + self.helper.getMonth() + self.helper.getDay() + \"-loadData\"\n\t\tdata = load_data[yesterday[0]][int(yesterday[1])][int(yesterday[2])]\n\t\tdates = (['12:00 AM','1:00 AM','2:00 AM','3:00 AM','4:00 AM','5:00 AM',\n\t\t\t'6:00 AM','7:00 AM','8:00 AM','9:00 AM','10:00 AM','11:00 AM',\n\t\t\t'12:00 PM','1:00 PM','2:00 PM','3:00 PM','4:00 PM','5:00 PM',\n\t\t\t'6:00 PM','7:00 PM','8:00 PM','9:00 PM','10:00 PM','11:00 PM'])\n\n\t\t# Populating values array\n\t\tfor i in range(0,len(data)):\n\t\t\tinner_dict['label'] = dates[i]\n\t\t\tinner_dict['value'] = data[i]\n\t\t\tvalues.append(inner_dict)\n\t\t\tinner_dict = {}\n\n\t\t# Populating the final_data array and returning it\n\t\touter_dict['key'] = key\n\t\touter_dict['values'] = values\n\t\tfinal_data.append(outer_dict)\n\n\t\treturn final_data",
"def calculate_ddct(self, baseline_time=0):\n\n if baseline_time == 0:\n baseline = self.baseline_data['dct'][0]\n\n elif baseline_time == 96:\n baseline = self.baseline_data['dct'][96]\n\n elif baseline_time == 'average':\n baseline = (self.baseline_data['dct'][0] + self.baseline_data['dct'][96]) / 2\n\n control =self.treatment_data.query('treatment == \"Control\"')['dct']\n tgf = self.treatment_data.query('treatment == \"TGFb\"')['dct']\n control.index = control.index.droplevel(1)\n tgf.index = tgf.index.droplevel(1)\n baseline.index = baseline.index.droplevel(1)\n\n time = control.columns\n\n tgf = pandas.concat([tgf[i]/baseline for i in tgf.columns], axis=1)\n control = pandas.concat([control[i]/baseline for i in control.columns], axis=1)\n\n control.columns = time\n tgf.columns = time\n\n control = pandas.DataFrame(control.stack())\n tgf = pandas.DataFrame(tgf.stack())\n control['treatment'] = 'Control'\n tgf['treatment'] = 'TGFb'\n df = pandas.concat([control, tgf]).reset_index()\n df.rename(columns={0: 'ddct'})\n return df",
"def test_get_derived_metric_history(self):\n pass",
"def get_iaq_baseline(self) -> List[int]:\n # name, command, signals, delay\n return self._run_profile((\"iaq_get_baseline\", [0x20, 0x15], 2, 0.01))",
"def data_preprocessing(dataset):\r\n df = pd.read_csv(dataset)\r\n df.head()\r\n df.describe()\r\n df.isnull().sum()\r\n df= df.drop(['instant'], axis=1)\r\n df['dteday'] = pd.to_datetime(df['dteday'].apply(str) + ' ' + df['hr'].apply(str) + ':00:00')\r\n return df",
"def get_timecourse_data(self, fragment, number):\n timepoints = self.get_timepoints()\n timecourse = []\n for time in timepoints:\n if self.mdvtc[time].has_data(fragment, number):\n ratio, stdev, use = self.mdvtc[time].get_data(fragment, number)\n timecourse.append(ratio)\n return timecourse",
"def baseline(records, baseline_samples=40):\n if not len(records):\n return\n samples_per_record = len(records[0]['data'])\n\n # Array for looking up last baseline seen in channel\n # We only care about the channels in this set of records; a single .max()\n # is worth avoiding the hassle of passing n_channels around\n last_bl_in = np.zeros(records['channel'].max() + 1, dtype=np.int16)\n\n for d_i, d in enumerate(records):\n\n # Compute the baseline if we're the first record of the pulse,\n # otherwise take the last baseline we've seen in the channel\n if d.record_i == 0:\n bl = last_bl_in[d.channel] = d.data[:baseline_samples].mean()\n else:\n bl = last_bl_in[d.channel]\n\n # Subtract baseline from all data samples in the record\n # (any additional zeros should be kept at zero)\n last = min(samples_per_record,\n d.pulse_length - d.record_i * samples_per_record)\n d.data[:last] = int(bl) - d.data[:last]\n d.baseline = bl",
"def calc_baseline(signal):\n ssds = np.zeros((3))\n\n cur_lp = np.copy(signal)\n iterations = 0\n while True:\n # Decompose 1 level\n lp, hp = pywt.dwt(cur_lp, \"db4\")\n\n # Shift and calculate the energy of detail/high pass coefficient\n ssds = np.concatenate(([np.sum(hp ** 2)], ssds[:-1]))\n\n # Check if we are in the local minimum of energy function of high-pass signal\n if ssds[2] > ssds[1] and ssds[1] < ssds[0]:\n break\n\n cur_lp = lp[:]\n iterations += 1\n\n # Reconstruct the baseline from this level low pass signal up to the original length\n baseline = cur_lp[:]\n for _ in range(iterations):\n baseline = pywt.idwt(baseline, np.zeros((len(baseline))), \"db4\")\n\n return baseline[: len(signal)]",
"def baseline(data):\n weights = weighting(data)\n return np.inner(weights,data['clicks'])/weights.sum()",
"def graph_baseline(evictiondata, weeks):\r\n base_evictions_per_week = {}\r\n for index, row in evictiondata.iterrows():\r\n if row['week_date'] not in base_evictions_per_week.keys():\r\n base_evictions_per_week[row['week_date']] = row['filings_avg']\r\n elif row['GEOID'] != 'sealed':\r\n base_evictions_per_week[row['week_date']] += row['filings_avg']\r\n base_evictions_filed = []\r\n for week in weeks:\r\n base_evictions_filed.append(base_evictions_per_week[week])\r\n\r\n plt.figure(figsize=(50, 10))\r\n plt.plot(weeks, base_evictions_filed, color='orange')\r\n plt.title('Base Evictions filed by the week')\r\n plt.xlabel('Date')\r\n plt.ylabel('Evictions filed')\r\n plt.show()\r\n return base_evictions_filed",
"def _load_timecourses(self):\n # load the timecourses file\n tcsf = fetch_one_file(self.ica_dir, self._tcs_fname)\n tcs = niimg.load_img(tcsf).get_data()\n return tcs",
"def test_no_base_date(self):\n data = self._data()\n data.pop('base_date')\n steps = [{'dateTime': '2012-06-07', 'value': '10'}]\n TimeSeriesData.objects.create(\n user=self.user,\n resource_type=TimeSeriesDataType.objects.get(\n category=TimeSeriesDataType.activities, resource='steps'),\n date=steps[0]['dateTime'],\n value=steps[0]['value']\n )\n response = self._mock_utility(response=steps, get_kwargs=data)\n self._check_response(response, 100, steps)",
"def process_for_baseline(dat):\n records = []\n count = 0\n for record in dat:\n # Context\n context = record['context'].replace(\"''\", '\" ').replace(\"``\", '\" ') # Replace non-standard quotation marks\n context_tokens = text2tokens(context)\n spans = convert_idx(context, context_tokens) # (token_start_char_idx, token_end_char_idx)\n \n # Question\n ques = record['question'].replace(\"''\", '\" ').replace(\"``\", '\" ') \n ques_tokens = text2tokens(ques)\n \n # Answers\n y1, y2 = charIdx_to_tokenIdx(spans, record['answers']['text'], record['answers']['answer_start']) \n\n res_dict = {\"context_tokens\": context_tokens,\n \"ques_tokens\": ques_tokens,\n \"answer\": record['answers']['text'], \n \"y1s\": y1, \n \"y2s\": y2,\n \"id\": count,\n 'pubId': record['PubID'],\n \"group\": record['group']}\n records.append(res_dict)\n count += 1 \n \n return records",
"def baseline_statistics(self, **_):\n raise NotImplementedError(\"{} doesn't support statistics.\".format(__class__.__name__))",
"def all_data(self):\n return pd.concat([self.historic_data, self.dayahead_data])",
"def _read_baseline(self, path):\n base_rmsd = dict()\n fin = open(path,'r')\n for line in fin:\n if line == '\\s' or line == '' or line == '\\n':\n continue\n k, v = line.split()\n base_rmsd[k.strip()] = float(v.strip())\n return base_rmsd",
"def _load_timecourses(self):\n # load the timecourses file\n tcsf = fetch_one_file(self.ica_dir, self._tcs_fname, pat_type='re.match')\n tcs = niimg.load_img(tcsf).get_data()\n return tcs",
"def run_baseline_simulation(self):\n n_days_base = 1 # Only consider 1 day simulation, self.n_days_base\n sim_time = 24*3600 # one day in seconds\n \n print(\"Running day-ahead baseline simulation ...\") \n print(\"Running baseline right away charging strategy ...\")\n baseline_soc, baseline_std_soc, baseline_power, baseline_cycles, baseline_Tin, baseline_std_Tin, baseline_Tin_max, baseline_Tin_min = self.run_baseline_right_away(n_days_base, sim_time)\n \n print(\"Exported baseline soc, Temperatures, power and HVAC cycles ...\")\n \n base_path = dirname(abspath(__file__))\n path = join(base_path,'data')\n \n # Already saved inside the right away function\n # baseline_soc.to_csv(join(path, r'SOC_baseline.csv'), index = False)\n # baseline_power.to_csv(join(path, r'power_baseline.csv'), index = False)\n # baseline_Tin.to_csv(join(path, r'Tin_baseline.csv'), index = False)\n # baseline_Tin_max.to_csv(join(path, r'Tin_max_baseline.csv'), index = False)\n # baseline_Tin_min.to_csv(join(path, r'Tin_min_baseline.csv'), index = False)\n print(\"Exported\")",
"def baseline(spectra):\n\n return spectra - np.mean(spectra, axis=0)",
"def _baseline_value(self):\n t = self['primary']\n return np.median(t.data[:int(10e-3/t.dt)])",
"def read_ct_data(train_start, train_count, eval_start, eval_count):\n data = pd.read_csv('/opt/train.csv')\n\n # Dropping the id column\n data.drop(['ID_code'], axis=1, inplace=True)\n\n data = data.values\n return (data[train_start:train_start + train_count],\n data[eval_start:eval_start + eval_count])",
"def data(self):\n if self._data.empty:\n self._data = super().data\n\n max_data_year = self._data[\"year\"].max()\n\n # If the date range of the data doesn't line up with the year filters\n # for train/test data, we risk getting empty data sets\n if self.max_year != max_data_year:\n max_year_diff = pd.to_timedelta(\n [(YEAR_IN_DAYS * (self.max_year - max_data_year))]\n * len(self._data),\n unit=\"days\",\n )\n\n self._data.loc[:, \"date\"] = self._data[\"date\"] + max_year_diff\n self._data.loc[:, \"year\"] = self._data[\"date\"].dt.year\n self._data.set_index(\n [\"team\", \"year\", \"round_number\"], drop=False, inplace=True\n )\n\n return self._data",
"def background_subtract_data(data):\n bgsub_data = data.copy()\n bax_concs = data.columns.levels[0]\n lipo_concs = data.columns.levels[1]\n\n for bax_conc in data.columns.levels[0]:\n timecourses = data.xs(bax_conc, axis=1, level='Bax')\n bg = timecourses[0.]\n for lipo_conc in lipo_concs:\n bgsub_tc = timecourses[lipo_conc] - bg\n bgsub_data[(bax_conc, lipo_conc)] = bgsub_tc\n\n return bgsub_data",
"def calc_base_year_data(base_year_vehicles_df):\n pass"
] | [
"0.661995",
"0.5938596",
"0.5920818",
"0.5673383",
"0.56499016",
"0.5554714",
"0.5551037",
"0.5503436",
"0.5422899",
"0.5406511",
"0.53813374",
"0.53646106",
"0.5344754",
"0.53437316",
"0.533326",
"0.5273145",
"0.52495307",
"0.5244175",
"0.5234559",
"0.5222947",
"0.5214892",
"0.5192904",
"0.51905257",
"0.51727",
"0.5168285",
"0.5108798",
"0.5105418",
"0.51014227",
"0.5100567",
"0.50957274"
] | 0.61777115 | 1 |
Creates a millisecond based timestamp of UTC now. | def get_utc_now_timestamp() -> int:
return int(datetime.datetime.now(datetime.timezone.utc).timestamp() * 1000) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def Now():\n ut = (datetime.datetime.utcnow() - _EPOCH).total_seconds() / 86400.0\n return Time(ut)",
"def now() -> datetime:\n now = datetime.now(tz=timezone.utc)\n return now.replace(microsecond=now.microsecond - now.microsecond % 1000)",
"def now_timestamp(unit: TimeUnit = TimeUnit.SECONDS) -> float:\n return TimeHelper.to_timestamp(TimeHelper.now(), unit)",
"def utcnow_ts():\r\n return calendar.timegm(utcnow().timetuple())",
"def _create_timestamp():\n return (datetime.utcnow() - datetime(1970,1,1)).total_seconds()",
"def now_s():\n return calendar.timegm(now_dt().utctimetuple())",
"def getNowMilliseconds():\n return (datetime.datetime.utcnow() - Common.epoch_).total_seconds() * 1000.0",
"def get_now_utc(no_microseconds=True):\n if no_microseconds:\n return pytz.utc.localize(datetime.datetime.utcnow()).replace(\n microsecond=0\n )\n else:\n return pytz.utc.localize(datetime.datetime.utcnow())",
"def get_now_utc(no_microseconds=True):\n if no_microseconds:\n return pytz.utc.localize(datetime.datetime.utcnow()).replace(\n microsecond=0\n )\n else:\n return pytz.utc.localize(datetime.datetime.utcnow())",
"def now():\n return utcfromtimestamp(time.time())",
"def utc_millisecond_timestamp():\n return __date_to_millisecond_ts(utc())",
"def local_timestamp():\n # type: () -> int\n now = datetime.utcnow()\n timestamp_in_seconds = calendar.timegm(now.timetuple()) + (now.microsecond / 1e6)\n timestamp_in_milliseconds = int(timestamp_in_seconds * 1000)\n return timestamp_in_milliseconds",
"def nowUTC():\n return datetime.datetime.now(pytz.utc)",
"def _nowms():\n return int(time.time() * 1000)",
"def default_timestamp():\n date = datetime.datetime.now().replace(microsecond=0)\n return date",
"def current_time_millis():\n return int(round(time.time() * 1000))",
"def now():\n return int(datetime.datetime.now().strftime(\"%s\")) * 1000",
"def timestamp(millis=False):\n return int(round(time.time() * (millis and 1000 or 1)))",
"def tstamp_now(self):\n return self.tstamp(datetime.datetime.now(tz))",
"def get_now_hour_utc(no_microseconds=True):\n if no_microseconds:\n return datetime.datetime.utcnow().replace(microsecond=0).time()\n else:\n return datetime.datetime.utcnow().time()",
"def time_now():\n ts = datetime.datetime.now().timetuple()\n return '{wday} {day} {month} {year} {hour}:{minute:0>2d}:{second:0>2d} UTC'.format(\n year=ts.tm_year, month=calendar.month_name[ts.tm_mon],\n day=ts.tm_mday, wday=calendar.day_name[ts.tm_wday],\n hour=ts.tm_hour, minute=ts.tm_min, second=ts.tm_sec)",
"def utcnow(cls):\n t = _time.time()\n return cls.utcfromtimestamp(t)",
"def FromNowUTC(cls):\n t = pytime.time()\n utcTime = pytime.gmtime(t)\n return cls.FromStructTime(utcTime).WithZone(zDirection=0)",
"def get_now_hour_utc(no_microseconds=True):\n if no_microseconds:\n return datetime.time.utcnow().replace(microsecond=0).time()\n else:\n return datetime.time.utcnow().time()",
"def curTimeMs():\n\treturn int((datetime.utcnow() - datetime(1970,1,1)).total_seconds() * 1000)",
"def get_current_unix_timestamp_ms():\r\n return int(datetime.timestamp(datetime.now())) * 1000",
"def get_current_timestamp():\n return int(round(time.time() * 1e3))",
"def _get_now():\n return datetime.now(tz=timezone.utc)",
"def get_now():\n return dt.datetime.now(dt.timezone.utc)",
"def now():\n return datetime.datetime.now(pytz.utc)"
] | [
"0.7358585",
"0.73144686",
"0.73085713",
"0.7295754",
"0.72463673",
"0.719578",
"0.71388984",
"0.71132016",
"0.71132016",
"0.7087358",
"0.70538455",
"0.70202845",
"0.69747436",
"0.69600034",
"0.6942168",
"0.6916312",
"0.69006324",
"0.68955755",
"0.68503773",
"0.68242997",
"0.6822988",
"0.6818244",
"0.6805908",
"0.67947847",
"0.6773551",
"0.6769527",
"0.6767089",
"0.67489755",
"0.6718834",
"0.67101693"
] | 0.7725665 | 0 |
usalbe only when g_wc was used to find pr_wv | def get_cnt_wv_list(g_wn, g_wc, g_sql_i, pr_sql_i, mode):
cnt_list =[]
for b, g_wc1 in enumerate(g_wc):
pr_wn1 = len(pr_sql_i[b]["conds"])
g_wn1 = g_wn[b]
# Now sorting.
# Sort based wc sequence.
if mode == 'test':
idx1 = argsort(array(g_wc1))
elif mode == 'train':
idx1 = list( range( g_wn1) )
else:
raise ValueError
if g_wn1 != pr_wn1:
cnt_list.append(0)
continue
else:
flag = True
for i_wn, idx11 in enumerate(idx1):
g_wvi_str11 = str(g_sql_i[b]["conds"][idx11][2]).lower()
pr_wvi_str11 = str(pr_sql_i[b]["conds"][i_wn][2]).lower()
# print(g_wvi_str11)
# print(pr_wvi_str11)
# print(g_wvi_str11==pr_wvi_str11)
if g_wvi_str11 != pr_wvi_str11:
flag = False
# print(g_wv1, g_wv11)
# print(pr_wv1, pr_wv11)
# input('')
break
if flag:
cnt_list.append(1)
else:
cnt_list.append(0)
return cnt_list | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _compute_wo(self):\n self.uom_id = self.wo_id.uom_id.id\n self.final_product_id = self.wo_id.final_product_id.id\n if not self.supplier_id:\n self.service_ids = [x.product_id.id for x in self.wo_id.consumed_service_ids if x.product_id]\n else:\n service_ids = [x.product_id.id for x in self.wo_id.consumed_service_ids if x.product_id]\n if service_ids:\n product_supp_rcs = self.env['product.supplierinfo'].search([('partner_id', '=', self.supplier_id.id), ('product_id', 'in', service_ids)])\n if product_supp_rcs:\n product_ids = [x.product_id.id for x in product_supp_rcs if x.product_id]\n else:\n product_ids = []\n else:\n product_ids = [] \n \n self.service_ids = product_ids\n \n # Permet de savoir si l'OT suivant est de sous-traitance et qu'il y a qu'un seul OT suivant\n no_direct_fp = True\n if self.wo_id.next_wo_ids and len(self.wo_id.next_wo_ids) == 1 and self.wo_id.next_wo_ids[0].is_subcontracting and self.wo_id.next_wo_ids[0].consumed_service_ids:\n no_direct_fp = False\n \n self.no_direct_fp = no_direct_fp",
"def convert_pr_wvi_to_string(pr_wvi, nlu_t, nlu_wp_t, wp_to_wh_index, nlu):\r\n pr_wv_str_wp = [] # word-piece version\r\n pr_wv_str = []\r\n for b, pr_wvi1 in enumerate(pr_wvi):\r\n pr_wv_str_wp1 = []\r\n pr_wv_str1 = []\r\n wp_to_wh_index1 = wp_to_wh_index[b]\r\n nlu_wp_t1 = nlu_wp_t[b]\r\n nlu_t1 = nlu_t[b]\r\n\r\n for i_wn, pr_wvi11 in enumerate(pr_wvi1):\r\n st_idx, ed_idx = pr_wvi11\r\n\r\n # Ad-hoc modification of ed_idx to deal with wp-tokenization effect.\r\n # e.g.) to convert \"butler cc (\" ->\"butler cc (ks)\" (dev set 1st question).\r\n pr_wv_str_wp11 = nlu_wp_t1[st_idx:ed_idx+1]\r\n pr_wv_str_wp1.append(pr_wv_str_wp11)\r\n\r\n st_wh_idx = wp_to_wh_index1[st_idx]\r\n ed_wh_idx = wp_to_wh_index1[ed_idx]\r\n pr_wv_str11 = nlu_t1[st_wh_idx:ed_wh_idx+1]\r\n\r\n pr_wv_str1.append(pr_wv_str11)\r\n\r\n pr_wv_str_wp.append(pr_wv_str_wp1)\r\n pr_wv_str.append(pr_wv_str1)\r\n\r\n return pr_wv_str, pr_wv_str_wp",
"def unknown(self, w):\n # WORK HERE!!",
"def _compute_wo(self):\n self.uom_id = self.wo_id.uom_id.id\n self.final_product_id = self.wo_id.final_product_id.id\n self.service_ids = [x.product_id.id for x in self.wo_id.consumed_service_ids if x.product_id]",
"def get_wm_ws_Gx_bot(self):\n # BASICALLY SETS self.Gm1_bot, self.dGm1_dS_bot, self.Gt1_bot, self.dGt1_dS_bot \n z_u_r = self.grid_dict['z_u_r']\n z_u_w = self.grid_dict['z_u_w']\n [Ly,N] = self.b.shape\n #---> j-loop\n for j in range(Ly): \n self.kbl[j] = N # initialize search\n #-> end j-loop\n\n #--> k-loop\n for k in range(N-1,0,-1):\n k_w = k\n k_r = k-1\n # --> j loop \n for j in range(Ly):\n if z_u_r[j,k_r] - z_u_w[j,0] > self.hbbl[j]:\n self.kbl[j] = k_w\n\n #--> end k\n # --> end j\n\n\n '''\n Compute nondimenisonal shape function coefficeints Gx() by\n matching values and vertical derivatives of interior mixing\n coefficients at hbbl (sigma=1)\n '''\n\n self.Gm1_bot = np.zeros([Ly])\n self.dGm1_dS_bot = np.zeros([Ly])\n self.Gt1_bot = np.zeros([Ly])\n self.dGt1_dS_bot = np.zeros([Ly]) \n self.Av_bl_bot = np.zeros([Ly])\n self.dAv_bl_bot = np.zeros([Ly]) \n self.cff_up_bot = np.zeros([Ly])\n self.cff_dn_bot = np.zeros([Ly])\n\n\n\n\n\n self.wm_bot = np.zeros([Ly])\n self.ws_bot = np.zeros([Ly]) \n\n # CALCULATE ustar for the bottom based on bototm velocities\n \n \n \n # CALCULATE r_D\n self.r_D = TTTW_func.get_r_D(self.u,self.v,self.Zob,self.grid_dict) \n u = self.u\n v_upts = TTTW_func.v2u(self.v)\n \n ubar = np.mean(u,axis=1)\n vbar = np.mean(v_upts,axis=1)\n\n # --> j loop\n for j in range(Ly):\n # turbulent velocity sclaes with buoyancy effects neglected\n if self.CD_SWITCH:\n # DEPTH AVERAGED APPROACH\n uref = u[j,0]\n vref = v_upts[j,0]\n ustar2 = self.C_D * (uref**2 + vref**2)\n else:\n ustar2 = self.r_D[j] * np.sqrt(u[j,0]**2 + v_upts[j,0]**2)\n wm = self.vonKar * np.sqrt(ustar2)\n ws = wm\n\n self.wm_bot[j] = wm\n self.ws_bot[j] = ws\n \n k_w = self.kbl[j] \n z_bl = z_u_w[j,0] + self.hbbl[j]\n\n if z_bl < z_u_w[j,k_w-1]:\n k_w = k_w-1\n\n cff = 1. / (z_u_w[j,k_w] - z_u_w[j,k_w-1])\n cff_up = cff * (z_bl - z_u_w[j,k_w])\n cff_dn = cff * (z_u_w[j,k_w] - z_bl)\n \n Av_bl = cff_up * self.Kv_old[j,k_w] + cff_dn * self.Kv_old[j,k_w-1]\n dAv_bl = cff * ( self.Kv_old[j,k_w] - self.Kv_old[j,k_w-1])\n self.Av_bl_bot[j] = Av_bl\n self.dAv_bl_bot[j] = dAv_bl\n\n\n self.Gm1_bot[j] = Av_bl / (self.hbbl[j] * wm + self.eps)\n self.dGm1_dS_bot[j] = np.min([0,-dAv_bl/(ws+self.eps)])\n\n At_bl = cff_up * self.Kt_old[j,k_w] + cff_dn * self.Kt_old[j,k_w-1]\n dAt_bl = cff * ( self.Kt_old[j,k_w] - self.Kt_old[j,k_w-1])\n self.Gt1_bot[j] = At_bl / (self.hbbl[j] * ws + self.eps)\n self.dGt1_dS_bot[j] = np.min([0,-dAt_bl/(ws+self.eps)])",
"def grounding_words(self, w):\n story_visual_words = [x for x in self.visual_words if x in self.vocab]\n visual_word_ids = [self.vocab_ids[x] for x in story_visual_words]\n visual_similarities = self.sigma_A[self.vocab_ids[w]][visual_word_ids]\n return sorted(zip(story_visual_words, visual_similarities), key = lambda x : -x[1])",
"def wisdom(search_me, source, pdfurl, userid):\n ### source needs to be name of data source (\"arxiv\", \"google scholar\", \"doaj\")\n search_me = search_me.strip()\n # check if pdfurl has been found before\n pdf = db_summaries.find_one({\"url\": pdfurl})\n if pdf:\n text = pdf.get('text')\n summary = pdf.get('summary')\n topics = pdf.get('topics')\n # update in db if data is 1 days or older\n last_updated = datetime.utcnow() - pdf.get(\"last_updated\")\n last_updated_diff = last_updated.days\n if last_updated_diff > 1:\n search_term = db_search_terms.find_one({\"value\": search_me.lower()})\n search_id = search_term.get(\"_id\")\n data = {\"search_id\": search_id,\n \"url\": pdfurl, \"source\": source, \"text\": text,\n \"summary\": summary, \"topics\": topics, \"last_updated\": datetime.utcnow()}\n db_summaries.update({\"url\": pdfurl}, {\"$set\": data})\n else:\n pass\n else:\n text = wisdomaiengine.pdfdocumentextracter(pdfurl)\n summary = wisdomaiengine.summarisepdfdocument(text)\n topics = wisdomaiengine.wordcloud(search_me, text)\n if topics is None:\n topics = ['No Topics Found']\n # write data to arxiv collection\n search_term = db_search_terms.find_one({\"value\": search_me.lower()})\n search_id = search_term.get(\"_id\")\n data = {\"search_id\": search_id,\n \"url\": pdfurl,\n \"source\": source,\n \"text\": text,\n \"summary\": summary,\n \"topics\": topics,\n \"last_updated\": datetime.utcnow()}\n x = db_summaries.insert(data, check_keys=False)\n # return json\n summaryjson = jsonify(wisdomtopics=topics, wisdomsummary=summary)\n return summaryjson",
"def get_cnt_wvi_list(g_wn, g_wc, g_wvi, pr_wvi, mode):\r\n cnt_list =[]\r\n for b, g_wvi1 in enumerate(g_wvi):\r\n g_wc1 = g_wc[b]\r\n pr_wvi1 = pr_wvi[b]\r\n pr_wn1 = len(pr_wvi1)\r\n g_wn1 = g_wn[b]\r\n\r\n # Now sorting.\r\n # Sort based wc sequence.\r\n if mode == 'test':\r\n idx1 = argsort(array(g_wc1))\r\n elif mode == 'train':\r\n idx1 = list( range( g_wn1) )\r\n else:\r\n raise ValueError\r\n\r\n if g_wn1 != pr_wn1:\r\n cnt_list.append(0)\r\n continue\r\n else:\r\n flag = True\r\n for i_wn, idx11 in enumerate(idx1):\r\n g_wvi11 = g_wvi1[idx11]\r\n pr_wvi11 = pr_wvi1[i_wn]\r\n if g_wvi11 != pr_wvi11:\r\n flag = False\r\n # print(g_wv1, g_wv11)\r\n # print(pr_wv1, pr_wv11)\r\n # input('')\r\n break\r\n if flag:\r\n cnt_list.append(1)\r\n else:\r\n cnt_list.append(0)\r\n\r\n return cnt_list",
"def get_wotd():\n\treturn wotd",
"def _get_wred(self):\n return self.__wred",
"def _get_wred(self):\n return self.__wred",
"def _get_wred(self):\n return self.__wred",
"def svn_info_t_has_wc_info_get(svn_info_t_self): # real signature unknown; restored from __doc__\n pass",
"def get_warc(self):\n raise NotImplementedError()",
"def wing(self):\n return",
"def is_wcw(status):\n test_text = ' '.join(status['text'].lower().split()) # Remove capital letters and excessive whitespace/linebreaks\n usernames = ['just_to_say_bot', 'thisisjustbot', 'Dcd200S', 'willslostplum', 'sosweetbot', 'JustToSayBot', 'thatisjustplums', \\\n\t\t 'EatenBot', 'the_niche_bot', 'KristenCostel10', 'litabottal', 'pythonnina', 'alatest5', 'LisaRob96585017','Stilson28400122', \\\n\t\t 'JohnDun40217560','Cordelia28', 'Rick63556459', 'botsnthings', 'timbot301', 'Rachel53001595', 'NicholasMillma6', 'ThisIsJustTo1'\\\n\t\t'MayISay4', 'breakfast_plum', 'BotBot53368932'] # Block screen_names of known parody accounts\n if status['user']['screen_name'] not in usernames and all(u not in status['text'] for u in usernames) and 'Cheap Bots, Done Quick!' not in status['source']:\n if 'which you were probably' in test_text: # Capture parodies of the form\n return True\n elif 'plums' in test_text and 'icebox' in test_text: # Capture parodies of the content\n return True\n elif 'plum' in test_text and 'icebox' in test_text: # Capture singular 'plum'\n return True\n elif 'plums' in test_text and 'ice box' in test_text: #Capture 'ice box' with a space\n return True\n elif 'plum' in test_text and 'ice box' in test_text: \n return True\n elif 'William Carlos Williams'.lower() in test_text and 'plums' in test_text: #Capture mentions of WCW\n return True\n elif 'William Carlos Williams'.lower() in test_text and 'plum' in test_text:\n return True\n elif 'this is just to say' in test_text and 'that were in' in test_text: # Get only relevant instances of \"this is just to say\"\n return True\n elif 'this is just to say' in test_text and 'forgive me' in test_text:\n return True\n elif 'this is just to say' in test_text and 'and so' in test_text:\n return True\n elif 'so sweet and so cold' in test_text and 'the arms of the ocean' not in test_text: # Get 'so sweet and so cold' tweets that aren't quoting Florence and the Machine\n return True\n else:\n return False\n else:\n return False",
"def get_WS(w2v):\n # get set of MAX_NGRAM-grams in text\n lines = open(INFNAME_FORMAT.format(\"train\")).readlines() \\\n + open(INFNAME_FORMAT.format(\"test\")).readlines()\n raw = [process_line(l) for l in lines ]\n ngrams_in_data = set()\n for words in raw:\n for ngram in tweet_to_ngrams(words):\n ngrams_in_data.add(ngram)\n\n # load sentiment features from model\n clf_pipe = pickle.load(open(CLF_FNAME, 'rb')) # model\n\n vect = clf_pipe.best_estimator_.named_steps['vect']\n clf = clf_pipe.best_estimator_.named_steps['clf']\n\n features_to_sent_idx = vect.vocabulary_ # map from model features to sentiment index\n # currently, sentiment = 2 * (count_pos / (count_pos + count_neg)) - 1\n sentiments = clf.feature_count_[1,:] / np.sum(clf.feature_count_, axis=0) # in [0,1]\n sentiments = 2 * sentiments - 1 # rescale to [-1,1]\n\n features_to_sent = {feat: sentiments[idx] for (feat,idx) in features_to_sent_idx.items()}\n\n # build WS and ngram_idx_map for each MAX_NGRAM-gram in the text\n k = len(next(iter(w2v.values()))) # dimension of embedding\n WS = np.zeros(shape=(len(ngrams_in_data) + 1, k + MAX_NGRAM), dtype='float32')\n ngram_idx_map = {}\n\n index = 1 # first row is left 0, for padding in the cnn. This is also neutral sentiment.\n # For Vader Sentiment analysis\n# vader_analyzer = SentimentIntensityAnalyzer()\n\n\n for ngram in ngrams_in_data:\n ngram_idx_map[ngram] = index\n\n # set word embedding, note that unknown words already randomized in load_embedding \n words = ngram.split(' ')\n WS[index,:k] = w2v[words[-1]] # embedding of last word\n\n # set sentiment embedding\n for n in range(MAX_NGRAM): # for 1, 2, ... length ngrams\n sub_ngram = ' '.join(words[-1 - n:]) \n\n # Naive Bayes Sentiment feature --------------------------------\n sent = features_to_sent.get(sub_ngram, 0.0) # default to neutral 0\n # --------------------------------------------------------------\n\n# # TextBlob sentiment feature -----------------------------------\n# sent = TextBlob(sub_ngram).sentiment.polarity\n# # --------------------------------------------------------------\n\n# # Vader sentiment feature -------------------------------------\n# sent = vader_analyzer.polarity_scores(sub_ngram)['compound']\n# # -------------------------------------------------------------\n WS[index,k+n] = sent\n\n index += 1\n\n return WS, ngram_idx_map",
"def combined_wcs(self) -> BaseHighLevelWCS:",
"def get_cnt_sw_list(g_sc, g_sa, g_wn, g_wc, g_wo, g_wvi,\r\n pr_sc, pr_sa, pr_wn, pr_wc, pr_wo, pr_wvi,\r\n g_sql_i, pr_sql_i,\r\n mode):\r\n cnt_sc = get_cnt_sc_list(g_sc, pr_sc)\r\n cnt_sa = get_cnt_sc_list(g_sa, pr_sa)\r\n cnt_wn = get_cnt_sc_list(g_wn, pr_wn)\r\n cnt_wc = get_cnt_wc_list(g_wc, pr_wc)\r\n cnt_wo = get_cnt_wo_list(g_wn, g_wc, g_wo, pr_wc, pr_wo, mode)\r\n if pr_wvi:\r\n cnt_wvi = get_cnt_wvi_list(g_wn, g_wc, g_wvi, pr_wvi, mode)\r\n else:\r\n cnt_wvi = [0]*len(cnt_sc)\r\n cnt_wv = get_cnt_wv_list(g_wn, g_wc, g_sql_i, pr_sql_i, mode) # compare using wv-str which presented in original data.\r\n\r\n\r\n return cnt_sc, cnt_sa, cnt_wn, cnt_wc, cnt_wo, cnt_wvi, cnt_wv",
"def handlehtmlsearch_wok(querystring, keywordstring, searchlimit, cache, smartconstrain):\n mpsearch, keywords, constraints = handlehtmlsearch_mp(querystring, keywordstring, cache, smartconstrain)\n\n with open(os.path.join(os.getcwd(), 'resources', 'json','wokRecord.json'), 'rt') as record:\n try:\n wlist = json.load(record)\n except ValueError:\n wlist = {}\n\n searchtotal = 0\n for search in mpsearch:\n searchtotal += len(search)\n\n wokresults = []\n i = 0\n\n for search in mpsearch:\n for n in search:\n iterinput = []\n for m in n['unit_cell_formula'].keys():\n iterinput.append(m + str(int(n['unit_cell_formula'][m])))\n\n iterlist = list(itertools.permutations(iterinput))\n\n searchparam = 'topic:' + n['pretty_formula'] + ' or topic:' + n['full_formula']\n\n for term in iterlist:\n searchparam += ' or topic:' + ''.join(term)\n\n i += 1\n if n['material_id'] in wlist.keys() and cache:\n print('loading wok for ' + n['full_formula'] + ' (' + str(i) + '/' + str(searchtotal) + ')')\n wokresults.append(wlist[n['material_id']])\n else:\n print('searching wok for ' + n['full_formula'] + ' (' + str(i) + '/' + str(searchtotal) + ')')\n try:\n searchdata = searchWoKTools.getsearchdata(searchparam, searchlimit)\n except:\n with open('wokRecord.json', 'wt') as record:\n json.dump(wlist, record)\n raise\n\n searchdata[0].update(n)\n wlist[n['material_id']] = searchdata\n wokresults.append(searchdata)\n\n mpsearch, wokresults = searchWoKTools.removeconstrainedwok(mpsearch, wokresults, constraints)\n\n with open(os.path.join(os.getcwd(), 'resources', 'json','wokRecord.json'), 'wt') as record:\n json.dump(wlist, record)\n\n keyresults = []\n for search in wokresults:\n keyresults.append(searchWoKTools.getkeylist(search, keywords))\n\n return keywords, mpsearch, wokresults, keyresults",
"def pwgrwlfilter(self):\n return None",
"def treat_page(self):\n # let's define some basic variables\n urtext = self.current_page.text\n urlang = self.current_page.site.code\n urtitle = self.current_page.title()\n urcat = []\n eng_site = pywikibot.Site('en')\n eng_title = ''\n \n interDict = {}\n try:\n site = pywikibot.Site('ur', 'wikipedia')\n urpage = pywikibot.Page(site, urtitle)\n langlst = urpage.iterlanglinks()\n\n \n for i in langlst:\n lang = str(i.site).split(':')[1]\n interDict[lang] = i.title\n \n eng_title = interDict['en']\n except:\n pywikibot.output(u'\\03{lightred}Unable to fetch interwiki links!\\03{default}')\n return False\n \n site = pywikibot.Site('en', 'wikipedia')\n enpage = pywikibot.Page(site, eng_title)\n\n wikitext = enpage.get() \n wikicode = mwp.parse(wikitext)\n\n # Extracting sfn templates and converting them in REF tags\n sfnlist = []\n for template in wikicode.filter_templates():\n if template.name in ('sfn', 'sfn'):\n sfnlist.append(template)\n templ_rep = '<ref>' + str(template) + '</ref>'\n wikicode.replace(template , templ_rep)\n\n alltags = wikicode.filter_tags() \n reftags = {}\n \n def search(myDict, search1):\n for key, value in myDict.items():\n if search1 in value: \n return key \n \n i=1\n for tag in alltags:\n if tag.tag=='ref':\n if tag.attributes == []: # check if attributes list is empty\n refval='NoRefName' # Reference has no name so assigning \"NoRefName\"\n else:\n name = tag.attributes[0]\n refval = name.value\n \n if tag.contents is None:\n #conval = search(reftags,refval)\n #reftags[i] = (refval,reftags[conval][1])\n pass\n else: \n reftags[i] = (refval,tag.contents)\n i += 1\n\n dlinks = {}\n for k,v in reftags.items():\n dkey = 'و' + str(k) + 'و'\n dlinks[dkey] = '<ref>' + str(v[1]) + '</ref>'\n\n urtext = urpage.text\n for r in tuple(dlinks.items()):\n urtext = urtext.replace(*r)\n\n # newln = '\\n'\n # Using noreferences to add Reference template if not present\n self.norefbot = noreferences.NoReferencesBot(None)\n if self.norefbot.lacksReferences(urtext):\n urtext = self.norefbot.addReferences(urtext)\n else:\n urpage.text = urtext + '\\n'\n\n print(urpage.text)\n \n # save the page \n urpage.save(summary=self.summary, minor=False)\n #self.put_current(urpage.text, summary=self.summary)",
"def CC_wdw(self):\n # Setup param\n loc = 'TSdata'\n if 'single' == self.newParam['survey_type']:\n TS_len = dt.utilities.DB_pd_data_load(self.Database, loc).shape[0]\n elif 'multiple' == self.newParam['survey_type']:\n TS_group = dt.utilities.DB_group_names(self.Database, group_name = loc)[0]\n TS_len = dt.utilities.DB_pd_data_load(self.Database, loc+'/'+TS_group).shape[0]\n\n param = self.newParam\n\n # Assign TS processing length to end_wdws if given\n if param['end_wdws']:\n TS_sig_len = param['end_wdws']\n else:\n TS_sig_len = TS_len\n\n ERROR_MESSAGE = 'The length of a TS signal to be processed is', TS_sig_len, \\\n 'which is < end of the last window'\n\n # Calculate wdwPos for overlapping windows of ww_ol if wdwPos is False\n if param['wdwPos'][0] is False:\n # Error checks\n if TS_sig_len < self.newParam['ww'][0]:\n raise Warning(ERROR_MESSAGE)\n\n wdwStep = np.floor(param['ww'][0] *\n (100 - param['ww_ol']) / 100)\n\n if self.verbose: print('* Length fo TSdata', TS_len)\n\n max_wdwPos = TS_sig_len - param['ww'][0] + 1\n wdwStarts = np.arange(0 + param['sta_wdws'], max_wdwPos, wdwStep).astype(int)\n\n if self.verbose: print('* The step in window potions is %s sample points' % wdwStep)\n if self.verbose: print('* The max window postions is %s sample points'% max_wdwPos)\n\n param['wdwPos'] = [ [wdw_start, wdw_start + param['ww'][0]] for\n wdw_start in wdwStarts ]\n\n # Only update wdwPos structure if not already done so\n elif np.array(param['wdwPos'][0]).shape == ():\n param['wdwPos'] = [ [wdw_start, wdw_start + ww] for wdw_start,ww in\n zip(param['wdwPos'], param['ww'])]\n\n self.newParam['wdwPos'] = param['wdwPos']",
"def fToc_Wtf(Vc1,Vc2,Vc3,Vk,Vrw,Ck,Dc1,Dc2,Dc3,Dk,Dw):\n\tGDen=fOrmGDen(Vc1,Vc2,Vc3,Vk,Vrw,Dc1,Dc2,Dc3,Dk,Dw)\n\tTocwf=Vk*Ck*Dk/GDen\n\treturn Tocwf",
"def buildWPriorTerm(self):\r\n\r\n # self.w_prior.shape == (minibatch size,)\r\n self.w_prior = 0.5*T.sum(1 + T.log(self.qwgy_var) - self.qwgy_mu**2-self.qwgy_var, axis=1)\r\n\r\n self.w_prior_modif = - T.maximum(self.hyper['treshold_w_prior'], -self.w_prior)",
"def wkt(self): # -> str:\n ...",
"def _new_wos_dict():\n wos_dict = {\n 'DI': None,\n 'TI': None,\n 'PY': None,\n 'SO': None,\n 'UT': None,\n 'DE': None,\n }\n\n return wos_dict",
"def wing_geom_eval(self, cpacs):\n\n log.info(\"---------- Analysing wing geometry ----------\")\n\n # Opening tixi and tigl\n tixi = cpacs.tixi\n tigl = cpacs.tigl\n\n # Counting wing number without symmetry --------------------------------------\n w_nb = tixi.getNamedChildrenCount(WINGS_XPATH, \"wing\")\n\n # INITIALIZATION 1 -----------------------------------------------------------\n self.w_nb = w_nb\n self.wing_nb = w_nb\n wing_plt_area_xz = []\n wing_plt_area_yz = []\n wingUID = []\n\n # Counting sections and segments----------------------------------------------\n b = 0\n for i in range(1, w_nb + 1):\n double = 1\n self.wing_sym.append(tigl.wingGetSymmetry(i))\n if self.wing_sym[i - 1] != 0:\n double = 2 # To consider the real amount of wing\n # when they are defined using symmetry\n self.wing_nb += 1\n self.wing_sec_nb.append(tigl.wingGetSectionCount(i))\n self.wing_seg_nb.append(tigl.wingGetSegmentCount(i))\n self.wing_vol.append(tigl.wingGetVolume(i) * double)\n\n self.wing_plt_area.append(tigl.wingGetReferenceArea(i, 1) * double) # x-y plane\n wing_plt_area_xz.append(tigl.wingGetReferenceArea(i, 2) * double) # x-z plane\n wing_plt_area_yz.append(tigl.wingGetReferenceArea(i, 3) * double) # y-z plane\n\n self.wing_tot_vol = self.wing_tot_vol + self.wing_vol[i - 1]\n wingUID.append(tigl.wingGetUID(i))\n self.wing_span.append(tigl.wingGetSpan(wingUID[i - 1]))\n a = np.amax(self.wing_span)\n # Evaluating the index that corresponds to the main wing\n if a > b:\n self.main_wing_index = i\n b = a\n\n # Checking segment and section connection and reordering them\n (\n self.wing_sec_nb,\n _,\n seg_sec,\n _,\n ) = self.wing_check_segment_connection(wing_plt_area_xz, wing_plt_area_yz, tigl)\n\n # INITIALIZATION 2 -----------------------------------------------------------\n\n max_wing_sec_nb = np.amax(self.wing_sec_nb)\n max_wing_seg_nb = np.amax(self.wing_seg_nb)\n wing_center_section_point = np.zeros((max_wing_sec_nb, w_nb, 3))\n self.wing_center_seg_point = np.zeros((max_wing_seg_nb, self.wing_nb, 3))\n self.wing_seg_vol = np.zeros((max_wing_seg_nb, w_nb))\n self.wing_fuel_seg_vol = np.zeros((max_wing_seg_nb, w_nb))\n self.wing_fuel_vol = 0\n self.wing_mac = np.zeros((4, w_nb))\n self.wing_sec_thickness = np.zeros((max_wing_sec_nb + 1, w_nb))\n\n # WING ANALYSIS --------------------------------------------------------------\n\n # Main wing plantform area\n self.wing_plt_area_main = self.wing_plt_area[self.main_wing_index - 1]\n\n # Wing: MAC,chords,thickness,span,plantform area ------------------------------\n\n for i in range(1, w_nb + 1):\n mac = tigl.wingGetMAC(wingUID[i - 1])\n wpx, wpy, wpz = tigl.wingGetChordPoint(i, 1, 0.0, 0.0)\n wpx2, wpy2, wpz2 = tigl.wingGetChordPoint(i, 1, 0.0, 1.0)\n self.wing_max_chord.append(\n np.sqrt((wpx2 - wpx) ** 2 + (wpy2 - wpy) ** 2 + (wpz2 - wpz) ** 2)\n )\n wpx, wpy, wpz = tigl.wingGetChordPoint(i, self.wing_seg_nb[i - 1], 1.0, 0.0)\n wpx2, wpy2, wpz2 = tigl.wingGetChordPoint(i, self.wing_seg_nb[i - 1], 1.0, 1.0)\n self.wing_min_chord.append(\n np.sqrt((wpx2 - wpx) ** 2 + (wpy2 - wpy) ** 2 + (wpz2 - wpz) ** 2)\n )\n for k in range(1, 5):\n self.wing_mac[k - 1][i - 1] = mac[k - 1]\n for jj in range(1, self.wing_seg_nb[i - 1] + 1):\n j = int(seg_sec[jj - 1, i - 1, 2])\n cle = tigl.wingGetChordPoint(i, j, 0.0, 0.0)\n self.wing_seg_vol[j - 1][i - 1] = tigl.wingGetSegmentVolume(i, j)\n lp = tigl.wingGetLowerPoint(i, j, 0.0, 0.0)\n up = tigl.wingGetUpperPoint(i, j, 0.0, 0.0)\n if np.all(cle == lp):\n L = 0.25\n else:\n L = 0.75\n if np.all(cle == up):\n U = 0.25\n else:\n U = 0.75\n wplx, wply, wplz = tigl.wingGetLowerPoint(i, j, 0.0, L)\n wpux, wpuy, wpuz = tigl.wingGetUpperPoint(i, j, 0.0, U)\n wing_center_section_point[j - 1][i - 1][0] = (wplx + wpux) / 2\n wing_center_section_point[j - 1][i - 1][1] = (wply + wpuy) / 2\n wing_center_section_point[j - 1][i - 1][2] = (wplz + wpuz) / 2\n self.wing_sec_thickness[j - 1][i - 1] = np.sqrt(\n (wpux - wplx) ** 2 + (wpuy - wply) ** 2 + (wpuz - wplz) ** 2\n )\n j = int(seg_sec[self.wing_seg_nb[i - 1] - 1, i - 1, 2])\n wplx, wply, wplz = tigl.wingGetLowerPoint(i, self.wing_seg_nb[i - 1], 1.0, L)\n wpux, wpuy, wpuz = tigl.wingGetUpperPoint(i, self.wing_seg_nb[i - 1], 1.0, U)\n self.wing_sec_thickness[j][i - 1] = np.sqrt(\n (wpux - wplx) ** 2 + (wpuy - wply) ** 2 + (wpuz - wplz) ** 2\n )\n wing_center_section_point[self.wing_seg_nb[i - 1]][i - 1][0] = (wplx + wpux) / 2\n wing_center_section_point[self.wing_seg_nb[i - 1]][i - 1][1] = (wply + wpuy) / 2\n wing_center_section_point[self.wing_seg_nb[i - 1]][i - 1][2] = (wplz + wpuz) / 2\n self.wing_sec_mean_thick.append(\n np.mean(self.wing_sec_thickness[0 : self.wing_seg_nb[i - 1] + 1, i - 1])\n )\n # Evaluating wing fuel tank volume in the main wings\n if abs(round(self.wing_plt_area[i - 1], 3) - self.wing_plt_area_main) < 0.001:\n tp_ratio = self.wing_min_chord[i - 1] / self.wing_max_chord[i - 1]\n ratio = round(tp_ratio * self.wing_plt_area[i - 1] / 100, 1)\n if ratio >= 1.0:\n self.wing_fuel_vol = round(self.wing_vol[i - 1] * 0.8, 2)\n elif ratio >= 0.5:\n self.wing_fuel_vol = round(self.wing_vol[i - 1] * 0.72, 2)\n else:\n self.wing_fuel_vol = round(self.wing_vol[i - 1] * 0.5, 2)\n for j in seg_sec[:, i - 1, 2]:\n if j == 0.0:\n break\n self.wing_fuel_seg_vol[int(j) - 1][i - 1] = round(\n (self.wing_seg_vol[int(j) - 1][i - 1] / (sum(self.wing_vol)))\n * self.wing_fuel_vol,\n 2,\n )\n if (\n self.wing_plt_area[i - 1] > wing_plt_area_xz[i - 1]\n and self.wing_plt_area[i - 1] > wing_plt_area_yz[i - 1]\n ):\n self.is_horiz.append(True)\n if self.wing_sym[i - 1] != 0:\n self.is_horiz.append(True)\n else:\n self.is_horiz.append(False)\n if self.wing_sym[i - 1] != 0:\n self.is_horiz.append(False)\n\n # Wing segment length evaluating function\n self.get_wing_segment_length(wing_center_section_point)\n\n # Evaluating the point at the center of each segment, the center\n # is placed at 1/4 of the chord, symmetry is considered.\n\n a = 0\n c = False\n for i in range(1, int(self.wing_nb) + 1):\n if c:\n c = False\n continue\n for jj in range(1, self.wing_seg_nb[i - a - 1] + 1):\n j = int(seg_sec[jj - 1, i - a - 1, 2])\n self.wing_center_seg_point[j - 1][i - 1][0] = (\n wing_center_section_point[j - 1][i - a - 1][0]\n + wing_center_section_point[j][i - a - 1][0]\n ) / 2\n self.wing_center_seg_point[j - 1][i - 1][1] = (\n wing_center_section_point[j - 1][i - a - 1][1]\n + wing_center_section_point[j][i - a - 1][1]\n ) / 2\n self.wing_center_seg_point[j - 1][i - 1][2] = (\n wing_center_section_point[j - 1][i - a - 1][2]\n + wing_center_section_point[j][i - a - 1][2]\n ) / 2\n if self.wing_sym[i - 1 - a] != 0:\n if self.wing_sym[i - 1 - a] == 1:\n symy = 1\n symx = 1\n symz = -1\n if self.wing_sym[i - 1 - a] == 2:\n symy = -1\n symx = 1\n symz = 1\n if self.wing_sym[i - 1 - a] == 3:\n symy = 1\n symx = -1\n symz = 1\n self.wing_center_seg_point[:, i, 0] = (\n self.wing_center_seg_point[:, i - 1, 0] * symx\n )\n self.wing_center_seg_point[:, i, 1] = (\n self.wing_center_seg_point[:, i - 1, 1] * symy\n )\n self.wing_center_seg_point[:, i, 2] = (\n self.wing_center_seg_point[:, i - 1, 2] * symz\n )\n c = True\n a += 1\n\n self.w_seg_sec = seg_sec\n\n self.wing_area = round(self.wing_plt_area_main, 3)\n\n log.info(\"---------------------------------------------\")\n log.info(\"--------------- Wing Results ----------------\")\n log.info(f\"Number of Wings [-]: {self.wing_nb}\")\n log.info(f\"Wing symmetry plane [-]: {self.wing_sym}\")\n log.info(f\"Number of wing sections (not counting symmetry) [-]: {self.wing_sec_nb}\")\n log.info(f\"Number of wing segments (not counting symmetry) [-]: {self.wing_seg_nb}\")\n log.info(f\"Wing Span [m]: {self.wing_span}\")\n log.info(f\"Wing MAC length [m]: {self.wing_mac[0,]}\")\n log.info(f\"Wing max chord length [m]: {self.wing_max_chord}\")\n log.info(f\"Wing min chord length [m]: {self.wing_min_chord}\")\n log.info(f\"Main wing plantform area [m^2]: {self.wing_plt_area_main}\")\n log.info(f\"Wings plantform area [m^2]: {self.wing_plt_area}\")\n log.info(f\"Volume of each wing [m^3]: {self.wing_vol}\")\n log.info(f\"Total wing volume [m^3]: {self.wing_tot_vol}\")\n log.info(f\"Wing volume for fuel storage [m^3]: {self.wing_fuel_vol}\")\n log.info(\"---------------------------------------------\")",
"def get_wo_mthly_smry(self, workorder_browse):\n wo_summary_data = []\n wo_check_dict = {}\n no = 0\n if workorder_browse:\n for work_rec in workorder_browse:\n if work_rec.state and work_rec.state == \"done\":\n no += 1\n identification = \"\"\n repair_line_data = \"\"\n if work_rec.vehicle_id:\n identification += work_rec.vehicle_id.name\n if work_rec.vehicle_id.f_brand_id:\n identification += \" \" + work_rec.vehicle_id.f_brand_id.name\n if work_rec.vehicle_id.model_id:\n identification += \" \" + work_rec.vehicle_id.model_id.name\n for repaire_line in work_rec.repair_line_ids:\n if repaire_line.complete is True:\n if (\n repaire_line.repair_type_id\n and repaire_line.repair_type_id.name\n ):\n repair_line_data += (\n repaire_line.repair_type_id.name + \", \"\n )\n if work_rec.parts_ids:\n for parts_line in work_rec.parts_ids:\n if work_rec.id in wo_check_dict.keys():\n parts_data = {\n \"no\": -1,\n \"location\": \"\",\n \"type\": \"\",\n \"wo\": \"\",\n \"identification\": \"\",\n \"vin\": \"\",\n \"plate_no\": \"\",\n \"work_performed\": \"\",\n \"part\": parts_line.product_id\n and parts_line.product_id.default_code\n or \"\",\n \"qty\": parts_line.qty or 0.0,\n \"uom\": parts_line.product_uom\n and parts_line.product_uom.name\n or \"\",\n }\n wo_summary_data.append(parts_data)\n else:\n wo_check_dict[work_rec.id] = work_rec.id\n parts_data = {\n \"no\": no,\n \"location\": work_rec.team_id\n and work_rec.team_id.name\n or \"\",\n \"type\": work_rec.main_type or \"\",\n \"wo\": work_rec.name or \"\",\n \"identification\": identification or \"\",\n \"vin\": work_rec.vehicle_id\n and work_rec.vehicle_id.vin_sn\n or \"\",\n \"plate_no\": work_rec.vehicle_id\n and work_rec.vehicle_id.license_plate\n or \"\",\n \"work_performed\": repair_line_data\n and repair_line_data[:-2]\n or \"\",\n \"part\": parts_line.product_id\n and parts_line.product_id.default_code\n or \"\",\n \"qty\": parts_line.qty or 0.0,\n \"uom\": parts_line.product_uom\n and parts_line.product_uom.name\n or \"\",\n }\n wo_summary_data.append(parts_data)\n else:\n parts_data = {\n \"no\": no,\n \"location\": work_rec.team_id\n and work_rec.team_id.name\n or \"\",\n \"type\": work_rec.main_type or \"\",\n \"wo\": work_rec.name or \"\",\n \"identification\": identification or \"\",\n \"vin\": work_rec.vehicle_id\n and work_rec.vehicle_id.vin_sn\n or \"\",\n \"plate_no\": work_rec.vehicle_id\n and work_rec.vehicle_id.license_plate\n or \"\",\n \"work_performed\": repair_line_data\n and repair_line_data[:-2]\n or \"\",\n \"vehicle_make\": \"\",\n \"qty\": \"\",\n \"uom\": \"\",\n }\n wo_summary_data.append(parts_data)\n if not wo_summary_data:\n msg = _(\n \"Warning! \\n\\\n No data Available for selected work order.\"\n )\n raise UserError(msg)\n return wo_summary_data",
"def analyzeWeights(self, occludedPortions):\r\n\t\treturn None"
] | [
"0.5585714",
"0.54342604",
"0.5361827",
"0.5358429",
"0.5239973",
"0.51996017",
"0.51747024",
"0.50968176",
"0.50909096",
"0.5046365",
"0.5046365",
"0.5046365",
"0.5030739",
"0.50290895",
"0.50091815",
"0.5000712",
"0.49765882",
"0.49565095",
"0.4948263",
"0.4933547",
"0.49296203",
"0.4927025",
"0.49204654",
"0.48807576",
"0.48656607",
"0.48189992",
"0.47896227",
"0.47831023",
"0.4776721",
"0.47629702"
] | 0.57827145 | 0 |
get master monitor pid | def get_pid(ssh):
pid_file_path = data_dir.MM_PID_DIR+"master_monitord.pid" #獲得master_monitord.pid之檔案路徑
cmd = "sudo cat %s" % pid_file_path #組合cat指令
s_stdin, s_stdout, s_stderr = ssh.exec_command(cmd) #透過ssh執行指令
return s_stdout.read()
#pid, error = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE).communicate() #執行指令
#if error == None:
# return int(pid)
#return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_host_master_id(self):\r\n return self._handler.get_host_master_id()",
"def get_pid(self):\n if self.status():\n file = open(os.path.join(self.data_dir, 'postmaster.pid'))\n pid = int(file.readline())\n return pid\n else:\n return None",
"def master_id(self):\r\n return self._arm.master_id",
"def get_sync_master_port(self):\n self.sync_master_port = None\n pos = None\n sm_port_text = \"Starting syncmaster on port\"\n sw_text = \"syncworker up and running\"\n worker_count = 0\n logging.info(\"detecting sync master port\")\n while worker_count < 3 and self.is_instance_running():\n progress(\"%\")\n lfs = self.get_log_file()\n npos = lfs.find(sw_text, pos)\n if npos >= 0:\n worker_count += 1\n pos = npos + len(sw_text)\n else:\n time.sleep(1)\n lfs = self.get_log_file()\n pos = lfs.find(sm_port_text)\n pos = lfs.find(sm_port_text, pos + len(sm_port_text))\n pos = lfs.find(sm_port_text, pos + len(sm_port_text))\n if pos >= 0:\n pos = pos + len(sm_port_text) + 1\n self.sync_master_port = int(lfs[pos : pos + 4])\n return self.sync_master_port",
"def pid():\n return 0x0204",
"def pid():\n return 0x0204",
"def getPID(self):\r\n self._update('getPID')\r\n return self.supervisord.options.get_pid()",
"def _get_pid(self):\n ps_txt = six.ensure_str(self.controller.run(\n args=[\"ps\", \"ww\", \"-u\"+str(os.getuid())]\n ).stdout.getvalue()).strip()\n lines = ps_txt.split(\"\\n\")[1:]\n\n for line in lines:\n if line.find(\"ceph-{0} -i {1}\".format(self.daemon_type, self.daemon_id)) != -1:\n log.info(\"Found ps line for daemon: {0}\".format(line))\n return int(line.split()[0])\n log.info(\"No match for {0} {1}: {2}\".format(\n self.daemon_type, self.daemon_id, ps_txt\n ))\n return None",
"def pid(self):\n\t\treturn self.__pid",
"def pid(self):",
"def pid(self):\n return self._get_process_id()",
"def master_port(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"master_port\")",
"def master_port(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"master_port\")",
"def masterPort(self):\r\n return self._masterPort",
"def get_process_pid(robot_name):\n\n try:\n result = check_output(['pgrep', 'x{0}'.format(robot_name)])\n return int(result.strip())\n except:\n return None",
"def master_instance_name(self) -> str:\n return pulumi.get(self, \"master_instance_name\")",
"def pid(self):\n return self.__pid",
"def pid(self):\n return self._pid",
"def pid(self):\n return self._pid",
"def pid(self):\n return self._pid",
"def master_host(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"master_host\")",
"def master_host(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"master_host\")",
"def master_host(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"master_host\")",
"def pid(self):\n return self._process.pid",
"def master_instance_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"master_instance_name\")",
"def ppid(self):",
"def getmypid():\n raise NotImplementedError()",
"def pid(self):\n return self._query_status()['pid']",
"def series_master_id(self):\n if \"seriesMasterId\" in self._prop_dict:\n return self._prop_dict[\"seriesMasterId\"]\n else:\n return None",
"def get_PID(self):\n return self.PID"
] | [
"0.7282652",
"0.67779243",
"0.6716466",
"0.6713761",
"0.66976947",
"0.66976947",
"0.66508067",
"0.65205395",
"0.6470929",
"0.642866",
"0.64118284",
"0.6406304",
"0.6406304",
"0.63952607",
"0.6325486",
"0.6319568",
"0.63080573",
"0.62192404",
"0.62192404",
"0.62192404",
"0.6196616",
"0.6196616",
"0.6196616",
"0.6192385",
"0.61922514",
"0.61791617",
"0.6175827",
"0.6161805",
"0.6142123",
"0.6103421"
] | 0.690566 | 1 |
Fetch a html page from url and store in store_path | def get_page_and_store(url, cache_path=None):
page = urllib2.urlopen(url).read()
if cache_path is not None:
open(cache_path, 'w').write(page)
return page | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fetchUrl(self, url):\n self.driver.get(url)\n html = self.driver.page_source\n return html",
"def processUrl(self, url: str) -> dict:\n site = self.sf.urlFQDN(url)\n cookies = None\n\n # Filter out certain file types (if user chooses to)\n if list(filter(lambda ext: url.lower().split('?')[0].endswith('.' + ext.lower()), self.opts['filterfiles'])):\n # self.debug(f\"Ignoring URL with filtered file extension: {link}\")\n return None\n\n if site in self.siteCookies:\n self.debug(f\"Restoring cookies for {site}: {self.siteCookies[site]}\")\n cookies = self.siteCookies[site]\n\n # Fetch the contents of the supplied URL\n fetched = self.sf.fetchUrl(\n url,\n cookies=cookies,\n timeout=self.opts['_fetchtimeout'],\n useragent=self.opts['_useragent'],\n sizeLimit=10000000,\n verify=False\n )\n self.fetchedPages[url] = True\n\n if not fetched:\n return None\n\n # Track cookies a site has sent, then send the back in subsquent requests\n if self.opts['usecookies'] and fetched['headers'] is not None:\n if fetched['headers'].get('Set-Cookie'):\n self.siteCookies[site] = fetched['headers'].get('Set-Cookie')\n self.debug(f\"Saving cookies for {site}: {self.siteCookies[site]}\")\n\n if url not in self.urlEvents:\n # TODO: be more descriptive\n self.error(\"Something strange happened - shouldn't get here: url not in self.urlEvents\")\n self.urlEvents[url] = None\n\n # Notify modules about the content obtained\n self.contentNotify(url, fetched, self.urlEvents[url])\n\n real_url = fetched['realurl']\n if real_url and real_url != url:\n # self.debug(f\"Redirect of {url} to {real_url}\")\n # Store the content for the redirect so that it isn't fetched again\n self.fetchedPages[real_url] = True\n # Notify modules about the new link\n self.urlEvents[real_url] = self.linkNotify(real_url, self.urlEvents[url])\n url = real_url # override the URL if we had a redirect\n\n data = fetched['content']\n\n if not data:\n return None\n\n if isinstance(data, bytes):\n data = data.decode('utf-8', errors='replace')\n\n # Extract links from the content\n links = SpiderFootHelpers.extractLinksFromHtml(\n url,\n data,\n self.getTarget().getNames()\n )\n\n if not links:\n self.debug(f\"No links found at {url}\")\n return None\n\n # Notify modules about the links found\n # Aside from the first URL, this will be the first time a new\n # URL is spotted.\n for link in links:\n if not self.opts['reportduplicates']:\n if link in self.urlEvents:\n continue\n # Supply the SpiderFootEvent of the parent URL as the parent\n self.urlEvents[link] = self.linkNotify(link, self.urlEvents[url])\n\n self.debug(f\"Links found from parsing: {links.keys()}\")\n return links",
"def fetch(self, url):\r\n fname = os.path.join(self._cachedir, self._formatter(url))\r\n if not os.path.exists(fname):\r\n time.sleep(self._sleep)\r\n html = urllib.urlopen(url).read()\r\n with codecs.open(fname, 'w', 'utf-8') as f:\r\n soup = BeautifulSoup(html)\r\n f.write(unicode(soup))\r\n return fname",
"def fetch_and_save(cls, url, path):\n content = cls.fetch_with_retry(url)\n if not content:\n return False\n # print(\"Saving {}\".format(os.path.basename(path)))\n with open(path, \"wb\") as file:\n file.write(content)\n return content",
"def load_page(url):\n parameters = {'User-Agent': \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) \\\n Chrome/69.0.3497.100 Safari/537.36\"}\n response = requests.get(url, params=parameters)\n\n # Abort if server is responding with error\n if not response.status_code == 200:\n print(\"Server stopped responding. Execution aborted.\")\n sys.exit(1)\n\n content = response.content.decode(response.encoding)\n\n # Save page to a file for debugging\n # with open(self.lastpage_path, 'w') as output_file:\n # output_file.write(content)\n\n return content",
"def fetch_and_parse(url, filepath):\n print \"fetch %s\" % (url)\n page = urllib.urlopen(url).read()\n # @todo: add gzip support\n\n parsed_html = BeautifulSoup(page)\n res = parsed_html.head.find('title').text\n\n try:\n f = codecs.open(filepath, \"w\", \"utf-8\")\n f.write(res)\n f.close()\n except:\n logging.error(\"Something went wrong with writing %s\", filepath)\n raise\n\n return {\"url\": url, \"result\": res}",
"def _extract_html(self, url):\n self.response = requests.get(url, timeout=5)\n self.html = BeautifulSoup(self.response.content, \"lxml\") if self.response.ok else None\n # return self.html",
"def scrape_page(url):\n cached_page = cache.get(url)\n\n if cached_page:\n return html.fromstring(cached_page)\n else:\n page = get(url)\n\n cache.set(url, page.text)\n\n return html.fromstring(page.text)",
"def get_page(self):\n self.browser.get(self.url)",
"def get_page_contents(url, cache_dir, verbose,use_cache=True):\n\t#url=f\"https://www.mcgill.ca/study/2020-2021/courses/search?page={page_num}\"\n\tfname = hashlib.sha1(url.encode('utf-8')).hexdigest() # hash the url so we can save the cache\n\tfull_fname = osp.join(cache_dir, fname)\n\tif osp.exists(full_fname) and use_cache: #if the page has already been cached...\n\t\tif(verbose):\n\t\t\tprint(f\"Loading {url} from cache\")\n\t\tcontents = open(full_fname, 'r').read()\n\t\t\n\telse:\n\t\tif(verbose):\n\t\t\tprint(f\"Loading {url} from source\")\n\t\tr = requests.get(url)\n\t\tcontents = r.text\n\t\twith open(full_fname, 'w') as f: # write the cache\n\t\t\tf.write(contents)\n\treturn contents, full_fname # return the full hashed fname so we can use it...",
"def _handle_get_request(self):\n docroot = self._get_config_value('main', 'staticdocumentroot')\n local_path = sanitize_path(self.path)\n path = docroot + local_path\n try:\n # actually try deliver the requested file - First we try to send\n # every static content\n requested_file = open(path)\n text = requested_file.read()\n requested_file.close()\n except IOError:\n try:\n parsed_path = urlparse(self.path)\n params = dict([p.split('=') for p in parsed_path[4].split('&')])\n if params['addurl']:\n tmp = self._insert_url_to_db(params['addurl'])\n if tmp and tmp < 0:\n self._send_database_problem()\n return\n blocked = self._db.is_hash_blocked(tmp)\n if blocked:\n self._send_blocked_page(blocked[3])\n return\n elif tmp:\n self._send_return_page(tmp)\n return\n else:\n # There was a general issue with URL\n self._send_homepage('''<p class=\"warning\">Please check your input.</p>''')\n return\n else:\n # There was a general issue with URL\n self._send_homepage('''<p class=\"warning\">Please check your input.</p>''')\n return\n except YuDatabaseError:\n self._send_database_problem()\n return\n except:\n if self.path in ('/', '/URLRequest'):\n self._send_homepage()\n return\n elif self.path.startswith('/stats') or self.path.endswith('+'):\n if self.path == '/stats':\n # Doing general statistics here\n # Let's hope this page is not getting to popular ....\n # Create a new stats objekt which is fetching data in background\n self._show_general_stats()\n return\n else:\n # Check whether we do have the + or the stats kind of URL\n if self.path.endswith('+'):\n # Well I guess this is the proof you can write\n # real ugly code in Python too.\n try:\n if self.path.startswith('/show/'):\n request_path = self.path[6:]\n elif self.path.startswith('/s/'):\n request_path = self.path[3:]\n elif self.path.startswith('/stats/'):\n request_path = self.path[7:]\n else:\n request_path = self.path[1:]\n self._show_link_stats(request_path[:request_path.rfind('+')])\n return\n except Exception, e:\n # Oopps. Something went wrong. Most likely\n # a malformed link\n # TODO raise a (yet to be written) FileNotFoundException\n self._logger.error(\n u'An exception occurred: %s' % unicode(e), exc_info=True)\n self._send_404()\n return\n else:\n # Trying to understand for which link we shall print\n # out stats.\n splitted = self.path[1:].split('/')\n try:\n self._show_link_stats(splitted[1])\n return\n except IndexError:\n # Something went wrong. Most likely there was a\n # malformed URL for accessing the stats.\n self._send_404()\n return\n # Any other page\n else:\n # First check, whether we want to have a real redirect\n # or just an info\n request_path = self.path\n if self.path.startswith('/show/'):\n request_path = self.path[5:]\n show = True\n elif self.path.startswith('/s/'):\n request_path = self.path[2:]\n show = True\n else:\n show = False\n # Assuming, if there is anything else than an\n # alphanumeric character after the starting /, it's\n # not a valid hash at all\n if request_path[1:].isalnum():\n try:\n result = self._db.get_link_from_db(request_path[1:])\n blocked = self._db.is_hash_blocked(request_path[1:])\n except YuDatabaseError:\n self._send_database_problem()\n return\n if result and blocked == None:\n if show == True:\n template_filename = self._get_config_template('showpage')\n url = \"/\" + request_path[1:]\n new_url = '<p><a href=\"%(url)s\">%(result)s</a></p>' % \\\n {'result': result, 'url': url}\n stats = self._db.get_statistics_for_hash(request_path[1:])\n text = read_template(\n template_filename,\n title=SERVER_NAME,\n header=SERVER_NAME,\n msg=new_url,\n stat=stats,\n statspage=\"/stats/\" + request_path[1:])\n else:\n self._db.add_logentry_to_database(request_path[1:])\n self._send_301(result)\n return\n elif blocked:\n self._send_blocked_page(blocked[3])\n return\n else:\n self._send_404()\n return\n else:\n self._send_404()\n return\n self._send_response(text, 200)",
"def get_page(url):\n # todo need some error checking\n\n r = requests.get(url)\n\n if r.status_code != 200:\n log_date = datetime.now().strftime(\"%Y-%m-%d %H%M%S\")\n filename = f'{log_date} response.html'\n with open(filename, 'w+') as f:\n f.write(r.text)\n logging.critical('get_page failed with status {}. See file {}.'.format(\n r.status_code,\n filename\n ))\n r.raise_for_status()\n\n return r",
"def get(self, url):\n self.browser.get(url)",
"def fetch_save(url):\n\n name = url.split(\"/\")[-1]\n response = requests.get(url, stream=True)\n if response.status_code == 200:\n with open(f\"{DATA_PATH}/{name}\", \"wb\") as f:\n f.write(response.raw.read())\n else:\n logging.info(f\"Failed {url} download\")",
"def retrieve_html(self, input_url, domain_folder_name, data_type, file_name):\n print \"retrieve_html: RETRIEVING HTML CODE FOR PAGE:\", input_url\n try:\n from_path = \"%s%s%s%s\" % (self.main_path, domain_folder_name, data_type, file_name)\n print \"retrieve_html: HTML CODE RETRIEVED LOCALY\\npath:%s\" % from_path\n with io.open(from_path, \"r\", encoding='utf-8') as f:\n content = f.read()\n bs_object = BS(content, 'html.parser')\n return bs_object\n \n except IOError:\n print \"retrieve_html: RETRIEVING HTML CODE ONLINE\"\n\n # time_to_sleep = 2\n # print \"SLEEPING FOR %d s.................\" % time_to_sleep\n # time.sleep(time_to_sleep)\n try:\n response = urllib2.urlopen(input_url)\n content = response.read()\n except:\n print \"retrieve_html: FAILED TO RETRIEVE HTML ONLINE, INCREASING failed_retrieving_html_counter\"\n content = \"<HTML><Retrieval_Error>\"\n self.failed_retrieving_html_counter += 1\n \n\n # for always proper utf-8 encoding\n bs_object = BS(content, 'html.parser')\n bs_content = bs_object.prettify('utf-8')\n u_content = unicode(bs_content, 'utf-8')\n #/\n\n to_path = \"%s%s%s%s\" % (self.main_path, domain_folder_name, data_type, file_name)\n print \"retrieve_html: WRITING RETRIEVED HTML_CODE TO FILE\\npath:%s\" % to_path\n with io.open(to_path, \"w\", encoding='utf-8') as f:\n f.write(u_content)\n\n # print \"html WRITTEN to %s.txt\" % file_name\n return bs_object",
"def fetch_document(self, url: str) -> bytes:\n self.html_document = b''\n try:\n response = requests.get(url, headers=self.headers)\n response.raise_for_status()\n self.html_document = response.content\n logger.info('web page {0} fetched with status code: {1}'.format(url, response.status_code))\n return self.html_document\n except requests.exceptions.RequestException:\n logger.exception('Exception raised in Scraper.fetch_document()')\n raise",
"def load_page(url):\n try:\n response = urllib2.urlopen(url)\n html = response.read()\n\n if response.code == 200:\n body_text = html\n return html\n return \"\"\n except Exception:\n return \"\"",
"def getHTML(url): \n return urlopen(url)",
"def get_page(self, url):\n \"\"\" @param url: Url we want to crawl\"\"\"\n \"\"\" @type url: String \"\"\"\n \"\"\"@return the page\"\"\"\n try:\n u = urlopen(url)\n html = u.read().decode('utf-8')\n # except Exception as e:\n # logging.exception(e)\n finally:\n print(\"Closing\")\n u.close()\n return html",
"def _setContentFromUrl(self, url):\n urlgrabber = UrlGrabber(url)\n self._htmlContent = urlgrabber.get()",
"def do_get(self, url):\n self.driver.get(url)",
"def request_html_page(self):\n try:\n response = requests.get('http://www.indeed.com/jobs?', params=self.payload)\n except:\n print \"got error for \", self.payload\n self.page = response.content",
"def load(self, url):\n pass",
"def load(self, url):\n pass",
"def store_cache(base_url, data, path=\"logs/\"):\n\n # Convert URL to filename and write html content into that file\n url_filename = url_to_filename(base_url)\n filename = f\"{path}CACHE-{url_filename}.html\"\n f = open(filename, \"w+\")\n f.write(data)\n f.close()",
"async def fetch_page(self, url: str) -> PageRaw:\n\n raise NotImplementedError()",
"def test_store(self):\n self.selenium.get('{}/store'.format(self.live_server_url))",
"def retrieve_html(url):\n req = urllib2.Request(url)\n req.add_header('User-Agent', 'Just-Crawling 0.1')\n request = None\n status = 0\n try:\n logger.info(\"Crawling %s\" % url)\n request = urllib2.urlopen(req)\n except urllib2.URLError as e:\n logger.error(\"Exception at url: %s\\n%s\" % (url, e))\n except urllib2.HTTPError as e:\n status = e.code\n except:\n return\n if status == 0:\n status = 200\n\n try:\n data = request.read()\n except:\n return\n\n return str(data)",
"def retrieve_content(self, url):\n page = requests.get(url)\n content = page.content\n return content",
"def fetchContent(self):\n print 'fetching page by its path: '+ self.path\n uri = '%s?path=%s' % (self.client.MakeContentFeedUri(), self.path)\n # get the content feed\n feed = self.client.GetContentFeed(uri=uri)\n # take out the content\n self.entry = feed.get_webpages()[0]"
] | [
"0.63946897",
"0.62761647",
"0.62544036",
"0.6150826",
"0.61108017",
"0.6092718",
"0.60736644",
"0.6055679",
"0.60108894",
"0.5974012",
"0.59731984",
"0.58925676",
"0.5850709",
"0.5833227",
"0.58093405",
"0.5793539",
"0.57740086",
"0.5772137",
"0.57652396",
"0.57540506",
"0.5751512",
"0.5748829",
"0.5748687",
"0.5748687",
"0.57467896",
"0.5726141",
"0.57172745",
"0.56776005",
"0.5673095",
"0.5664517"
] | 0.68053216 | 0 |
Return list of urls of infobox pages | def get_infobox_urls(mapping_page):
pattern = re.compile('index\.php/Mapping_en:Infobox_[-\w\./]+')
return pattern.findall(mapping_page) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getURLs():",
"def list(self, request ):\n\t\tinUrl = request.query_params.get('url', None )\n\t\t#if inUrl is None:\n\t\t#\tinUrl = 'https://google.com'\n\t\tserializer = PageInfoSerializer( instance = PageInfo(url=inUrl), many=False )\n\t\treturn Response( serializer.data )",
"def get_urls():\r\n return []",
"def __url_list(self, page):\n url_list = []\n for tag_a in page.find_all('a'):\n href = str(tag_a.get('href'))\n if self.__verify(href):\n url = parse.quote(self.__add_main_site(href), '/:#')\n url_list.append(url)\n return url_list",
"def urls(self) -> list[str]:\r\n ...",
"def get_all_page(url: str) -> list:\n url_book = get_url_book(url)\n return url_book",
"def get_urls(root):\n urls = []\n classes = \"|\".join([\"msl_organisation_list\", \"view-uclu-societies-directory\",\n \"atoz-container\", \"listsocieties\", \"block-og-menu\"])\n\n req = requests.get(root, headers) # , cookies=cookies)\n soup = BeautifulSoup(req.content, 'html.parser')\n main = soup.find(['div', 'ul', 'section'], class_=re.compile(classes))\n\n for a in main.find_all('a', href=True):\n url = a['href']\n if url.startswith(\"/\"):\n urls.append(domain + url)\n\n if url.startswith(\"https://society.tedu.edu\"):\n urls.append(url)\n\n urls = list(dict.fromkeys(urls))\n return urls",
"def getInformationPages(self):\n mtool = getToolByName(self.context, \"portal_membership\")\n if mtool.checkPermission(\"Manage portal\", self.context) == True:\n omit_edit_link = False\n else:\n omit_edit_link = True\n \n catalog = getToolByName(self.context, \"portal_catalog\")\n brains = catalog.searchResults(\n path = \"/\".join(self.context.getPhysicalPath()),\n portal_type = \"InformationPage\",\n sort_on = \"getObjPositionInParent\",\n )\n \n result = []\n for page in brains:\n result.append({\n \"id\" : page.getId,\n \"title\" : page.Title,\n \"description\" : page.Description,\n \"omit_edit_link\" : omit_edit_link,\n \"url\" : page.getURL(),\n \"edit_url\" : \"%s/edit\" % page.getURL(),\n \"download_url\" : \"%s/at_download/file\" % page.getURL(),\n })\n\n return result",
"def ListUrlEntries(self):\n return [WprUrlEntry(request, self._http_archive[request])\n for request in self._http_archive.get_requests()]",
"def enumerate_profiles(inhandle, page):\n html = inhandle.read()\n soup = BeautifulSoup(html, 'html.parser')\n \n urls = [ node.find('a')['href'] for node in soup.findAll('h1', {'class':'entry-title'})]\n return urls",
"def urls(self):\n return self._list_urls()",
"def get_links() -> list:\n headers = {\n \"User-Agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36\",\n \"Accept\": \"text/html\",\n \"Accept-Encoding\": \"gzip, deflate\",\n }\n p = re.compile(r'\\d+.html')\n base_url = 'http://stateoftheunion.onetwothree.net/texts/'\n essay_url = base_url + 'index.html'\n res = requests.get(essay_url, headers=headers)\n soup = BeautifulSoup(res.content, 'html')\n links = soup.find_all('a')\n sotu_links = {link.text: base_url + link.get('href', '') for link in links if re.match(p, link.get('href', ''))}\n return sotu_links",
"def get_page_urls(self, html_page):\n soup = BeautifulSoup(html_page, 'html.parser')\n links = [link.get('href') for link in soup.find_all('a') if link.get('href') != None]\n return(links)",
"def getlinks(url):\n page = Linkfetcher(url)\n page.linkfetch()\n for i, url in enumerate(page):\n print(\"%d ==> %s\" % (i, url))",
"def get_urls(db):\n return db.meta.find_one({'name':\"urls\"})['urls']",
"def __aux_search(self, url, page_limit):\n info = list()\n count = 1\n while True:\n try:\n print(\"[+] Getting page {} result\".format(count))\n if page_limit >= count:\n jdata, response = get_response(url, apikey=self.apikey, params=self.params)\n count += 1\n if jdata and 'data' in jdata:\n info += jdata['data']\n if response and jdata.get('links', {}).get('next', '') != response.url:\n url = jdata['links']['next']\n else:\n break\n else:\n break\n except Exception as e:\n print(e)\n count += 1\n if page_limit >= count:\n break\n\n return info",
"def URLs(self, default=[{}]):\n tmp = self.data.get('urls', default)\n return [HEP.URLObject(i) for i in tmp]",
"def URLs(self, default=[{}]):\n tmp = self.data.get('metadata', {}).get('urls', default)\n return [HEP.URLObject(i) for i in tmp]",
"def get_urls(num):\n url = \"https://books.google.at/books?id=77cdBQAAQBAJ&lpg=PP1&dq=%E5%82%85%E4%BD%A9%E6%A6%AE&pg=PA{}&jscmd=click3&vq=%E5%82%85%E4%BD%A9%E6%A6%AE\".format(num)\n res = requests.get(url)\n res_text = json.loads(res.text)\n pages = res_text[\"page\"]\n\n result = {}\n for p in pages:\n if 'src' in p:\n page_num = p['pid']\n page_src = p['src'] \n result[page_num] = page_src\n return result",
"def get_links_from_url(url):\n return [get_base(url)]",
"def get_urls(self, **kwargs):\n pass # pragma: no cover",
"def uri(self):\n if not self.parallel:\n return [self.get_url()]\n else:\n self.Chunker = Chunker(\n {\"box\": self.BOX}, chunks=self.chunks, chunksize=self.chunks_maxsize\n )\n boxes = self.Chunker.fit_transform()\n urls = []\n for box in boxes:\n urls.append(Fetch_box(box=box, ds=self.dataset_id).get_url())\n return urls",
"def list_urls(self, prefix: str = \"\", etl_name: str = None) -> Iterable[str]:",
"def extract_urls(genome):\n itemid = genome.get('metadata').get('identifier')\n urls = set([url for url in genome['urls'] if 'archive.org' not in url])\n db_urls_found(itemid, urls)",
"def getLinks(self):\n return self.pageLinks",
"def get_urls(self):\n urls = []\n params = ['<{}>'.format(x) for x in self.args]\n args_length = len(self.args) - len(self.defaults)\n for i in range(len(self.defaults) + 1):\n index = -i if i > args_length else None\n urls.append(self.get_url(params[:index]))\n return urls",
"def get_url_pages():\n url = \"https://swapi.co/api/people/\"\n pages_url = []\n \n while True:\n \n pages_url.append(url)\n \n r = requests.get(url)\n \n assert r.status_code == 200, \"There was a problem connecting with SWAPI.\"\n \n url = r.json()[\"next\"] # If there are more pages to check, this will update the URL accordingly.\n \n if url is None: # If there are no more pages to check, this finishes the function.\n \n print(\"\\n\")\n print(\"- - - All URLs were successfully retrieved. - - -\")\n \n return pages_url\n break\n \n print(\"Getting URL from page\", url[-1], \"...\")",
"def get_resource_urls():\n base_url = 'http://developer.pardot.com/'\n pattern = re.compile(\n r'(?ims)\\<a [^>]*?href=\"(kb/api-version-3/[^>]*?/)\"[^>]*?\\>'\n r'[^<]*?\\</a\\>')\n response = requests.get(base_url)\n return [\n '%s/%s' % (base_url, url) for url in pattern.findall(response.text)]",
"def get_product_urls(self, page):\n return self.__url_list(page)",
"def search_urls():\n r = req('GET', SUB_API + 'search/urls', params=apply_search_filters())\n urls = []\n for url in demisto.get(r.json(), 'data.items'):\n urls.append({\n 'Result': demisto.get(url, 'result'),\n 'Details': demisto.get(url, 'details')\n })\n demisto.results({\n 'Type': entryTypes['note'],\n 'EntryContext': {'ThreatGrid.URLs': urls},\n 'HumanReadable': tableToMarkdown('ThreatGrid - URL Search', urls, ['Result', 'Details']),\n 'ContentsFormat': formats['json'],\n 'Contents': r.json()\n })"
] | [
"0.707302",
"0.68563527",
"0.6789511",
"0.67732596",
"0.66887313",
"0.6289862",
"0.62286484",
"0.6220867",
"0.6209303",
"0.6187336",
"0.6129483",
"0.6121491",
"0.6111746",
"0.61112785",
"0.60911447",
"0.60744447",
"0.60727453",
"0.60689384",
"0.6068492",
"0.60557705",
"0.6024493",
"0.6004732",
"0.5959769",
"0.59491026",
"0.594242",
"0.5936052",
"0.59247273",
"0.5899065",
"0.5865389",
"0.58647174"
] | 0.76755244 | 0 |
Return class of the infobox, given the HTML DBpedia infobox_page class is in CamelCase (possibly with colon and space), exactly as appear in the infobox_page | def get_class(infobox_page):
pattern = re.compile('OntologyClass:[-\w: ]+')
wiki_class = pattern.findall(infobox_page)
if len(wiki_class) == 0:
return None
else:
return wiki_class[0].replace('OntologyClass:', '') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def parse_page_type_get_infobox(\n html: Tag) -> Tuple[PageType, Optional[Dict[str, Tag]]]:\n infoboxes = html.find_all('table', class_='infobox')\n if len(infoboxes) == 1:\n infobox_dict = parse_infobox(infoboxes[0])\n # Check if movie\n image_caption = infobox_dict.get('_image_caption', '')\n if 'theatrical release poster' in image_caption.lower():\n return PageType.MOVIE, infobox_dict\n\n # Check if actor\n if 'Occupation' in infobox_dict:\n occupation = infobox_dict['Occupation']\n if occupation(text=re.compile('(Actor|actor|Actress|actress)')):\n return PageType.ACTOR, infobox_dict\n\n return PageType.OTHER, None",
"def get_infobox_class_pairs(from_cache=True):\n infobox_urls = []\n infobox_class_pairs = []\n\n for i, mapping_url in enumerate(MAPPINGS_URLS):\n cache_path = HTML_CACHE_PATH_PREFIX + 'main_mapping_en_' + str(i+1) + '.html'\n\n if from_cache:\n mapping_page = open(cache_path, 'r').read()\n else:\n mapping_page = get_page_and_store(mapping_url, cache_path)\n\n infobox_urls += get_infobox_urls(mapping_page)\n\n for i, infobox_url in enumerate(infobox_urls):\n full_url = URL_PREFIX + infobox_url\n infobox = infobox_parser.get_class(infobox_url.split(':')[1]).replace('wikipedia-', '')\n cache_path = HTML_CACHE_PATH_PREFIX + 'infobox-' + infobox + '.html'\n\n #print '(%d/%d) %s' % (i+1, len(infobox_urls), infobox)\n\n if from_cache:\n infobox_page = open(cache_path, 'r').read()\n else:\n infobox_page = get_page_and_store(URL_PREFIX + infobox_url, cache_path)\n\n if infobox == 'football-biography': # temporary solution\n infobox_class_pairs.append((infobox, 'SoccerPlayer'))\n else:\n infobox_class_pairs.append((infobox, get_class(infobox_page)))\n\n return infobox_class_pairs",
"def getInfoBox(url: str):\n # Runing the get request\n r = requests.get(url=url)\n if r.ok:\n # If there exist an information box, extract and store it in infobox\n try:\n # converting the request content into a bs object\n soup = BeautifulSoup(r.content, \"lxml\")\n # Accessing the information box using the tag \"table\", and class of \"infobox vevent\".\n # These tag and class was found after inspecting the webpage.\n infobox = soup.find(\"table\", class_=\"infobox vevent\")\n # Removing the superscript and span tags (to clean up the texts).\n for tag in infobox.find_all([\"sup\", \"span\"]):\n tag.decompose()\n except Exception as e:\n infobox = None\n return infobox\n else:\n return r.status_code",
"def _lookup_class(r, widget):\n\n page_cols = current.s3db.get_config(r.tablename, \"profile_cols\")\n if not page_cols:\n page_cols = 2\n widget_cols = widget.get(\"colspan\", 1)\n span = int(12 / page_cols) * widget_cols\n\n # Default (=foundation)\n return \"profile-widget medium-%s columns\" % span",
"def get_infobox_info(soup):\n infobox = soup.find('table', {'class': 'infobox'})\n unwanted = ['Episode Transcript', 'BMI Work No.', 'International versions']\n\n boldtags = [t for t in infobox.findAll('b') if t.text not in unwanted]\n\n _str = ''\n for tag in boldtags:\n _str += tag.text + ': ' + scrapekit.fix_camelcase(tag.findNext('td').text, ',')\n\n return _str",
"def is_infobox(self, name):\n name = name.strip()\n if name.lower().startswith('infobox'):\n return True\n if name == 'Armors_(NEW)':\n return True\n if name == 'All_inclusive_infobox_2020':\n return True\n if name.lower() == 'item':\n return True\n return False",
"def getTitle(infobox):\n\n title = infobox.find(\"th\", class_=\"infobox-above summary\").text\n\n return title",
"def EnrolledClasses(self,html): \n classes = []\n soup = BeautifulSoup(html)\n for element in soup.find_all(\"input\"):\n if element[\"name\"] == \"TITLE\" and element[\"value\"]:\n classes.append(element.get(\"value\"))\n return classes",
"def get_class_name(self):\n\n if \"class\" in self._root.attrib:\n return self._root.attrib['class']\n else:\n return self._root.tag",
"def insert_class_markers(soup):\r\n\r\n # look for class name in a div like <div class=\"title\">Namespace::MyClass Class Reference</div>\r\n title_div = soup.find(\"div\", \"title\")\r\n if not title_div:\r\n raise ValueError(\"The following div was not found : <div class='title'>...<div>\")\r\n\r\n # titlediv.get_text() --> \"Namespace::MyClass Class Reference\"\r\n fullclassname = title_div.get_text().split()[0]\r\n classname = fullclassname.split(\"::\")[-1]\r\n\r\n # look for the contents div\r\n contents_div = soup.find( \"div\", \"contents\" )\r\n if not contents_div:\r\n raise ValueError(\"The following div was not found : <div class='contents'>...<div>\")\r\n \r\n # insert Qt Creator markers around the brief paragraph\r\n brief_p = contents_div.p\r\n brief_p.insert_before(class_brief_start(classname))\r\n brief_p.insert_after(class_brief_end(classname))",
"def _parse_classification(self, item):\n full_name = item.css('td[headers=Name]::text').extract_first()\n\n if \"Metra\" in full_name and \"Board Meeting\" in full_name:\n return BOARD\n elif \"Citizens Advisory\" in full_name:\n return ADVISORY_COMMITTEE\n elif \"Committee Meeting\" in full_name:\n return COMMITTEE\n else:\n return NOT_CLASSIFIED",
"def get_infobox_urls(mapping_page):\n pattern = re.compile('index\\.php/Mapping_en:Infobox_[-\\w\\./]+')\n return pattern.findall(mapping_page)",
"def css_class(self):\n css_type = self.widget_type\n css_title = normalizer.normalize(self.data.title)\n return ('faceted-checkboxtree-widget '\n 'faceted-{0}-widget section-{1}').format(css_type, css_title)",
"def css_class(self):\n css_type = self.widget_type\n css_title = normalizer.normalize(self.data.title)\n return ('faceted-checkboxtree-widget '\n 'faceted-{0}-widget section-{1}').format(css_type, css_title)",
"def _html_class_str_from_tag(self, tag):\r\n if \"html-classes\" not in self.extras:\r\n return \"\"\r\n try:\r\n html_classes_from_tag = self.extras[\"html-classes\"]\r\n except TypeError:\r\n return \"\"\r\n else:\r\n if tag in html_classes_from_tag:\r\n return ' class=\"%s\"' % html_classes_from_tag[tag]\r\n return \"\"",
"def get_classes(html):\n # elements = html.find_all(\"span\", \"code\")\n # titles = html.find_all(\"span\", \"title\")\n # classes = []\n # for i in range(len(elements)):\n # item = elements[i]\n # tit = titles[i]\n # classes += [(item.text.replace('\\xa0', ' '), tit.text.replace('\\xa0', ' '))]\n # return classes",
"def get_class(summary):\n\n if re.search(r\"unlikely\", summary.lower()) > 0:\n var_class = \"2\"\n elif re.search(r\"not\\sclinically\\simportant|benign|polymorphism\", summary.lower()) > 0:\n var_class = \"1\"\n elif re.search(r\"no\\sevidence|no\\sapparent\\sevidence|normal|neg|-ve|no\\smut|no\\sother\\svariants\", summary.lower()) > 0:\n var_class = \"N\"\n elif re.search(r\"likely\\spathogenic|consistent\", summary.lower()) > 0:\n var_class = \"4\"\n elif re.search(r\"pathogenic|out-of-frame\", summary.lower()) > 0:\n var_class = \"5\"\n elif re.search(r\"uv|uncertain|missense\\svariant|unclassified|unknown|variant|in-frame|heterozygous\\s(deletion|duplication)\", summary.lower()) > 0:\n var_class = \"3\"\n elif re.search(r\"pathogenic|confirm|frameshift|nonsense|splice\\ssite\\smutation|deletion|splicesite|mutation\",\n summary.lower()) > 0:\n var_class = \"5\"\n elif re.search(r\"missense\", summary.lower()) > 0:\n var_class = \"3\"\n else:\n var_class = \"U\"\n\n return var_class",
"def extract_classes(soup):\r\n select = soup.find('select', id='dnn_ctr11396_TimeTableView_ClassesList')\r\n return {option['value']: option.text for option in select.findChildren('option')}",
"def check_page_type(html) -> None:\n\ttemp_lst = html.findAll('div', {'class': '_1HBR'})\n\tif temp_lst:\n\t\tpage_type_dict['general'] = True\n\t\tpage_type_dict['detail'] = False\n\telse:\n\t\tpage_type_dict['general'] = False\n\t\tpage_type_dict['detail'] = True",
"def html_class(self):\n return '{0} {1}'.format(\n self.primary_html_class, ' '.join(self.html_classes)\n )",
"def css_class(self, request) -> str:\n url = self.href(self.url, request)\n if isinstance(url, str):\n url = url.split(\"#\")[0]\n return self.ACTIVE_ITEM_CSS_CLASS if request.path_info == url else \"\"",
"def page_name(self):\n page = self.full_name\n if self.overload and self.overload.overload_id:\n page += f'-{self.overload.overload_id}'\n if (self.documenter.objtype == 'class' and\n not sys.platform.startswith('linux')):\n # On macOS and Windows, the filesystem is case-insensitive. To avoid name\n # conflicts between e.g. the class `tensorstore.Context.Spec` and the\n # method `tensorstore.Context.spec`, add a `-class` suffix to classes.\n page = f'{page}-class'\n return page",
"def class_name(name: str) -> str:\n return text.pascal_case(utils.safe_snake(name, \"type\"))",
"def html_class(cls):\n return ' '.join(cls.html_classes)",
"def parse_infobox(infobox: Tag) -> Dict[str, Tag]:\n entries = list(filter(lambda e: isinstance(e, Tag), infobox.tbody.contents))\n entry_dict = {}\n # First find image caption:\n index = 0\n while index < len(entries):\n entry = entries[index]\n links = entry.find_all('a', class_='image')\n found = False\n if len(links) == 1:\n img_link: Tag = links[0]\n for sibling in img_link.next_siblings:\n if sibling.name == 'div' and sibling.string is not None:\n entry_dict['_image_caption'] = sibling.string.strip()\n found = True\n break\n else:\n tds = entry.find_all('td')\n # If we have already reached td, then that means there's no image\n if tds:\n break\n index += 1\n if found:\n break\n\n # Find entries\n while index < len(entries):\n entry = entries[index]\n if entry.find_all('th') and entry.find_all('td'):\n if entry.th.string is not None:\n key = entry.th.string.strip().replace(u'\\xa0', u' ')\n entry_dict[key] = entry.td\n elif entry.th.div is not None and entry.th.div.string is not None:\n key = entry.th.div.string.strip().replace(u'\\xa0', u' ')\n entry_dict[key] = entry.td\n elif entry.th.a is not None and entry.th.a.string is not None:\n key = entry.th.a.string.strip().replace(u'\\xa0', u' ')\n entry_dict[key] = entry.td\n index += 1\n\n return entry_dict",
"def XPGetWidgetClassFunc(inWidgetClass):\n pass",
"def get_page_type(cls) -> str:\n return cls.provided_class.item_key",
"def infobox_parsing(self):\n infobox_count=0\n templates = self.code.filter_templates()\n for temp in templates:\n json_list=[]\n if \"Infobox\" in temp.name:\n try:\n self.revision_page_folder_path=os.path.join(self.rd_folder_path_infobox,self.page_folder)\n if not os.path.exists(self.revision_page_folder_path):\n os.mkdir(self.revision_page_folder_path)\n infobox_folder=remove_markup(str(temp.name))\n infobox_folder=infobox_folder.strip()\n infobox_folder= re.sub('[^a-zA-Z0-9\\n\\.]', ' ', (str(infobox_folder)).lower())\n revision_infobox_folder_path=os.path.join(self.revision_page_folder_path,infobox_folder)\n if not os.path.exists(revision_infobox_folder_path):\n os.mkdir(revision_infobox_folder_path)\n json_list.append(str(temp))\n json.dump(json_list, open(os.path.join(revision_infobox_folder_path, self.revision_id_parent + '_' + self.revision_id_current + \".json\"), \"w\"))\n print('Infobox caption: ', infobox_folder)\n infobox_count=infobox_count+1\n except Exception as e:\n print('Infobox Exception: ', str(e))\n return infobox_count",
"def fidclass(numero,classn):\r\n found=False\r\n# print numero\r\n for cle, valeur in classn.items():\r\n\r\n if valeur == numero:\r\n found=True\r\n return cle\r\n if not found:\r\n return 'unknown'",
"def corbaname_to_classname(item):\n rules = {\n ccReg.FT_STATEMENTITEM._v : 'bankstatement'\n }\n return rules.get(item._v, item._n[3:].lower())"
] | [
"0.6260306",
"0.5738476",
"0.53829914",
"0.5331526",
"0.5307376",
"0.5158402",
"0.50680715",
"0.50504017",
"0.49873435",
"0.49439368",
"0.49332327",
"0.48963484",
"0.48814535",
"0.48814535",
"0.48280013",
"0.48041573",
"0.4792899",
"0.47661728",
"0.47534335",
"0.4744808",
"0.47442484",
"0.47186026",
"0.4701831",
"0.46974328",
"0.46803385",
"0.4675114",
"0.46731433",
"0.4655018",
"0.46503899",
"0.46398523"
] | 0.69977725 | 0 |
Return pairs of (infobox, class) infobox format is lower case with hyphen (e.g. 'aflplayer2') class format is as returbed by get_class. | def get_infobox_class_pairs(from_cache=True):
infobox_urls = []
infobox_class_pairs = []
for i, mapping_url in enumerate(MAPPINGS_URLS):
cache_path = HTML_CACHE_PATH_PREFIX + 'main_mapping_en_' + str(i+1) + '.html'
if from_cache:
mapping_page = open(cache_path, 'r').read()
else:
mapping_page = get_page_and_store(mapping_url, cache_path)
infobox_urls += get_infobox_urls(mapping_page)
for i, infobox_url in enumerate(infobox_urls):
full_url = URL_PREFIX + infobox_url
infobox = infobox_parser.get_class(infobox_url.split(':')[1]).replace('wikipedia-', '')
cache_path = HTML_CACHE_PATH_PREFIX + 'infobox-' + infobox + '.html'
#print '(%d/%d) %s' % (i+1, len(infobox_urls), infobox)
if from_cache:
infobox_page = open(cache_path, 'r').read()
else:
infobox_page = get_page_and_store(URL_PREFIX + infobox_url, cache_path)
if infobox == 'football-biography': # temporary solution
infobox_class_pairs.append((infobox, 'SoccerPlayer'))
else:
infobox_class_pairs.append((infobox, get_class(infobox_page)))
return infobox_class_pairs | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_classes(self):\n out_classes = ()\n classes = super(NamedEntityRecognizerModel, self).get_classes()\n\n for c in classes:\n out_classes += (c[:2],)\n\n return ((self.outside_class, self.outside_class_display),) + out_classes",
"def convert_from_cls_format(cls_boxes, cls_segms, cls_keyps):\n box_list = [b for b in cls_boxes if len(b) > 0]\n if len(box_list) > 0:\n boxes = np.concatenate(box_list)\n else:\n boxes = None\n if cls_segms is not None:\n segms = [s for slist in cls_segms for s in slist]\n else:\n segms = None\n if cls_keyps is not None:\n keyps = [k for klist in cls_keyps for k in klist]\n else:\n keyps = None\n classes = []\n for j in range(len(cls_boxes)):\n classes += [j] * len(cls_boxes[j])\n return boxes, segms, keyps, classes",
"def convert_from_cls_format(cls_boxes, cls_segms, cls_keyps):\n box_list = [b for b in cls_boxes if len(b) > 0]\n if len(box_list) > 0:\n boxes = np.concatenate(box_list)\n else:\n boxes = None\n if cls_segms is not None:\n segms = [s for slist in cls_segms for s in slist]\n else:\n segms = None\n if cls_keyps is not None:\n keyps = [k for klist in cls_keyps for k in klist]\n else:\n keyps = None\n classes = []\n for j in range(len(cls_boxes)):\n classes += [j] * len(cls_boxes[j])\n return boxes, segms, keyps, classes",
"def convert_from_cls_format(cls_boxes, cls_segms, cls_keyps):\n box_list = [b for b in cls_boxes if len(b) > 0]\n if len(box_list) > 0:\n boxes = np.concatenate(box_list)\n else:\n boxes = None\n if cls_segms is not None:\n segms = [s for slist in cls_segms for s in slist]\n else:\n segms = None\n if cls_keyps is not None:\n keyps = [k for klist in cls_keyps for k in klist]\n else:\n keyps = None\n classes = []\n for j in range(len(cls_boxes)):\n classes += [j] * len(cls_boxes[j])\n return boxes, segms, keyps, classes",
"def get_pt_box_info(box_info, pt_obj):\n if type(box_info) is dict:\n pt_box_info = {}\n for elem in box_info:\n box_list, frame_list = zip(*box_info[elem]['list'])\n center_box_list = get_box_center(box_list)\n pt_box_list = pt_obj.get_pred_transform(center_box_list)\n center_box_l = pt_obj.get_inverse_pred_transform(pt_box_list[0])\n pt_box_info[elem] = list(zip(pt_box_list[0], frame_list))\n return pt_box_info\n\n if type(box_info) is list:\n pt_box_info = []\n box_list, frame_list = zip(*box_info)\n center_box_list = get_box_center(box_list)\n pt_box_list = pt_obj.get_pred_transform(center_box_list)\n return list(zip(pt_box_list[0], frame_list))",
"def print_pairing_info(melon_types):\n\n # melon_types is the list of class instances\n # melon is ONE of the class instance \n for melon in melon_types:\n # getting the each instance and it's instance pairing list\n print(f\"{melon.name} pairs with: \\n - {melon.pairings}\")",
"def info(self):\n mallet = c['mallet']\n env = set_env_lang_utf8()\n info_bin = os.path.join(os.path.join(mallet, 'bin'), 'classifier2info')\n info_p = sub.Popen([info_bin, '--classifier', self._model],\n stdout=sub.PIPE, stdin=sub.PIPE, stderr=sub.PIPE, env=env)\n\n cur_class = None\n feats = TwoLevelCountDict()\n\n # Go through and pick out what the features are for\n for line in info_p.stdout:\n content = line.decode(encoding='utf-8')\n\n class_change = re.search('FEATURES FOR CLASS (.*)', content)\n # Set the current class if the section changes\n if class_change:\n cur_class = class_change.group(1).strip()\n continue\n\n # Otherwise, let's catalog the features.\n word, prob = content.split()\n feats.add(cur_class, word, float(prob))\n\n # Now, print some info\n for cur_class in feats.keys():\n print(cur_class, end='\\t')\n print('%s:%.4f' % ('<default>', feats[cur_class]['<default>']), end='\\t')\n top_10 = feats.top_n(cur_class, n=10, key2_re='^nom')\n print('\\t'.join(['%s:%.4f' % (w,p) for w,p in top_10]))",
"def detect_class_onpic(boxes, allowed_classes):\n object_class = \"all\"\n highest_prob = 0\n for box in boxes:\n box_prob = float(box[1].strip('%')) / 100.0\n if box[0] in allowed_classes and box_prob > highest_prob:\n highest_prob = box_prob\n object_class = box[0]\n return object_class, highest_prob",
"def get_class(infobox_page):\n pattern = re.compile('OntologyClass:[-\\w: ]+')\n wiki_class = pattern.findall(infobox_page)\n\n if len(wiki_class) == 0:\n return None\n else:\n return wiki_class[0].replace('OntologyClass:', '')",
"def parse_page_type_get_infobox(\n html: Tag) -> Tuple[PageType, Optional[Dict[str, Tag]]]:\n infoboxes = html.find_all('table', class_='infobox')\n if len(infoboxes) == 1:\n infobox_dict = parse_infobox(infoboxes[0])\n # Check if movie\n image_caption = infobox_dict.get('_image_caption', '')\n if 'theatrical release poster' in image_caption.lower():\n return PageType.MOVIE, infobox_dict\n\n # Check if actor\n if 'Occupation' in infobox_dict:\n occupation = infobox_dict['Occupation']\n if occupation(text=re.compile('(Actor|actor|Actress|actress)')):\n return PageType.ACTOR, infobox_dict\n\n return PageType.OTHER, None",
"def return_boxes_class_as_dict(self) -> Dict[int, Dict]:\n\n boxes_dict = {}\n for index, sg_box in enumerate(self.root.iter('object')):\n boxes_dict[index] = {\"name\": sg_box.find(\"name\").text,\n \"xmin\": int(sg_box.find(\"bndbox\").find(\"xmin\").text),\n \"ymin\": int(sg_box.find(\"bndbox\").find(\"ymin\").text),\n \"xmax\": int(sg_box.find(\"bndbox\").find(\"xmax\").text),\n \"ymax\": int(sg_box.find(\"bndbox\").find(\"ymax\").text)}\n\n return boxes_dict",
"def image_classes():\n\n image_data_path = PROJECT_ROOT + \"/data/CUB_200_2011/\"\n\n # <class_id> <class_name>\n classes = open(image_data_path + \"classes.txt\").readlines()\n classes = [i.strip().split() for i in classes]\n\n # <image_id> <class_id>\n labels = open(image_data_path + \"image_class_labels.txt\").readlines()\n labels = [i.strip().split() for i in labels]\n\n class_ids = {}\n for i in classes:\n class_ids[i[1]] = int(i[0])\n\n label_ids = {}\n for i in labels:\n label_ids[int(i[0])] = int(i[1])\n\n return class_ids, label_ids",
"def _get_arg_vb_class(self, a_toks):\n ret = [0] * 7\n for _, p in a_toks:\n if p in VB_TAG2POS:\n ret[VB_TAG2POS[p]] = 1.\n return ''.join(str(t) for t in ret)",
"def classify(self, item: Union[str, object, Type[Any]]) -> tuple[str, ...]:\n if not isinstance(item, str):\n item = denovo.unit.get_name(item = item)\n kinds = [] \n for kind, classes in self.kinds.items(): \n if item in classes:\n kinds.append(kind)\n return tuple(kinds)",
"def _get_vb_class(self, a_feats, a_toks1, a_toks2):\n # find intersecting verb classes\n vb_classes = Counter()\n vb_cls1 = vb_cls2 = None\n for w1, p1 in a_toks1:\n if w1 not in LCSI or p1 not in VB_TAGS:\n continue\n vb_cls1 = LCSI[w1]\n for w2, p2 in a_toks2:\n if w2 not in LCSI or p2 not in VB_TAGS:\n continue\n vb_cls2 = LCSI[w2]\n vb_classes.update(vb_cls1 & vb_cls2)\n for vbc, cnt in vb_classes.iteritems():\n a_feats[\"LCSI-\" + vbc] = cnt\n # obtain VB tag vectors\n a_feats[\"VBTags1-\" + self._get_arg_vb_class(a_toks1)] = 1.\n a_feats[\"VBTags2-\" + self._get_arg_vb_class(a_toks2)] = 1.",
"def print_pairing_info(melon_types):\n\n # Fill in the rest",
"def getInfo():",
"def print_pairing_info(melon_types):\n# No return statement needed \n# loop through all melon types\n# for each melon type -- print \n# Muskmelon pairs with\n# - mint\n\n # Loop through melon in list melon_types\n for melon in melon_types:\n\n # .name assigns self.name to each melon in loop\n print(f'{melon.name} pairs with:')\n # Attached method to melon from above for loop\n for pairing in melon.pairings:\n print(f'- {pairing}')",
"def get_infobox_info(soup):\n infobox = soup.find('table', {'class': 'infobox'})\n unwanted = ['Episode Transcript', 'BMI Work No.', 'International versions']\n\n boldtags = [t for t in infobox.findAll('b') if t.text not in unwanted]\n\n _str = ''\n for tag in boldtags:\n _str += tag.text + ': ' + scrapekit.fix_camelcase(tag.findNext('td').text, ',')\n\n return _str",
"def class_selection() -> str:\r\n dict_classes = {'1': 'Bounty Hunter', '2': 'Imperial Agent', '3': 'Jedi Consular', '4': 'Jedi Knight',\r\n '5': 'Sith Warrior', '6': 'Sith Inquisitor', '7': 'Smuggler', '8': 'Trooper'}\r\n print(\"(1) Bounty Hunter\\n(2) Imperial Agent\\n(3) Jedi Consular\\n(4) Jedi Knight\\n\"\r\n \"(5) Sith Warrior\\n(6) Sith Inquisitor\\n(7) Smuggler\\n(8) Trooper\\n\")\r\n chosen_class = str(input(\"Choose your class by entering a number from 1-8, ie. for Bounty Hunter type 1\\n\"))\r\n for key, value in dict_classes.items():\r\n if key == chosen_class:\r\n return value",
"def show_classes():\n for obj in Classes.get_all_obj_list():\n print('\\033[33;1m[%s] [%s]校区 [%s]班级 学费[%s]\\033[0m'.center(60, '-') \\\n % (obj.school_nid.get_obj_by_uuid().name, obj.school_nid.get_obj_by_uuid().addr, \\\n obj.name, obj.tuition))",
"def get_classes_with_colors(self):\n i = 0\n out_classes = ()\n classes = super(NamedEntityRecognizerModel, self).get_classes()\n\n for c in classes:\n if len(c) != 3:\n c += (self.default_colors[i],)\n i += 1\n out_classes += (c,)\n\n return (\n (self.outside_class, self.outside_class_display, self.outside_color),\n ) + out_classes",
"def get_labels_and_classes(self):\n query = read_query('structure exploration/labels_and_classes')\n response = self._submit_query(query)\n\n temp = dict()\n for r in response:\n temp[r['l']['value']] = r['type']['value'].split('/')[-1]\n\n return temp",
"def cl(self, clname):\n out = ParameterDictionary()\n for k,p in self.iteritems():\n if \"class\" in p:\n if clname in p['class'].split(\"|\"):\n out[k] = p\n return out",
"def type_classes(self) -> Dict[str, int]:\n return {\n \"bg\": 0,\n \"neutrophil\": 1,\n \"epithelial\": 2,\n \"lymphocyte\": 3,\n \"plasma\": 4,\n \"eosinophil\": 5,\n \"connective\": 6,\n }",
"def print_pairing_info(melon_types):\n\n # Fill in the rest\n\n for melon in melon_types:\n print(\"{} pairs with\".format(melon.name))\n\n for pairing in melon.pairings:\n print(\"- {}\".format(pairing))",
"def vocall_category_info(with_background=True):\n label_map = pascalvoc_label(with_background)\n label_map = sorted(label_map.items(), key=lambda x: x[1])\n cats = [l[0] for l in label_map]\n\n if with_background:\n cats.insert(0, 'background')\n\n clsid2catid = {i: i for i in range(len(cats))}\n catid2name = {i: name for i, name in enumerate(cats)}\n\n return clsid2catid, catid2name",
"def info(self):\n return (self.kind, self.value)",
"def print_pairing_info(melon_types):\n\n # Fill in the rest\n for melon in melon_types:\n print(f\"{melon.name} pairs with\")\n for pairing in melon.pairings:\n print(f\"-{pairing}\")",
"def is_infobox(self, name):\n name = name.strip()\n if name.lower().startswith('infobox'):\n return True\n if name == 'Armors_(NEW)':\n return True\n if name == 'All_inclusive_infobox_2020':\n return True\n if name.lower() == 'item':\n return True\n return False"
] | [
"0.5774047",
"0.5668328",
"0.5668328",
"0.5668328",
"0.5538742",
"0.5415243",
"0.5384948",
"0.5378459",
"0.52978235",
"0.52891785",
"0.5135255",
"0.5103369",
"0.5069673",
"0.50657797",
"0.5051091",
"0.50464505",
"0.5031448",
"0.5025389",
"0.49898514",
"0.4987515",
"0.496683",
"0.49108618",
"0.4903216",
"0.4888069",
"0.4885817",
"0.48642117",
"0.48320788",
"0.48307335",
"0.48034793",
"0.48023674"
] | 0.6894431 | 0 |
distribute targets[lo, hi) into nbucket even partitions the distribution is used by nbucket processes for parallel computation | def dist(targets, lo, hi, nbucket):
distribution = []
for _ in range(nbucket):
distribution.append([])
for i in range(lo, hi):
if 0 <= i and i < len(targets):
distribution[i % nbucket].append(targets[i])
return distribution | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def buckets(data, n):\n # Shuffle all datasets to get a more consistent workload for all threads.\n random.shuffle(data)\n\n for i in range(0, len(data), n):\n yield data[i:i + n]",
"def distribute_discrete(sizes, groups, pow=1.0):\n chunks = np.array(sizes, dtype=np.int64)\n weights = np.power(chunks.astype(np.float64), pow)\n max_per_proc = float(distribute_partition(weights.astype(np.int64), groups))\n\n target = np.sum(weights) / groups\n\n dist = []\n\n off = 0\n curweight = 0.0\n proc = 0\n for cur in range(0, weights.shape[0]):\n if curweight + weights[cur] > max_per_proc:\n dist.append( (off, cur-off) )\n over = curweight - target\n curweight = weights[cur] + over\n off = cur\n proc += 1\n else:\n curweight += weights[cur]\n\n dist.append( (off, weights.shape[0]-off) )\n\n if len(dist) != groups:\n raise RuntimeError(\"Number of distributed groups different than number requested\")\n\n return dist",
"def generate_cluster_size_distributions(gh_bin, gbk_dir, I, params, exclude_paralogs):\n\n C = params[0]\n S = params[1]\n e_name = \"e1\" if exclude_paralogs else \"e0\"\n\n run_id = \"C{}_S{}_{}_\".format(C, S, e_name)\n gh.run_get_homologs(gh_bin, gbk_dir, I, C, S, 0, exclude_paralogs, run_id, clean_up=True, core=False)\n cluster_sizes(run_id)",
"def clusterDistribute(nb, nj, options):\n\n log = open(options.log,\"a\",0)\n log.write(\"\\n %s: Calculating the optimum way of spreading %i bootstrap \" \\\n \"runs over %i nodes...\" % (timeStr(), nb, nj))\n\n remaining_nb = float(nb)\n remaining_nj = float(nj)\n\n bootstrapout=[]\n total_nb = 0\n while remaining_nb>0:\n\n max_nb_job = math.ceil(remaining_nb/remaining_nj)\n nj_max_nb_job = math.floor(remaining_nb/max_nb_job)\n total_nb = total_nb+(nj_max_nb_job*max_nb_job)\n\n log.write(\"\\n\\t\\tAllocating %.0f runs to %.0f nodes (%.0f runs \" \\\n \"assigned).\" % (max_nb_job, nj_max_nb_job, total_nb))\n\n bootstrapout.append((nj_max_nb_job, max_nb_job))\n remaining_nb = float(remaining_nb-(nj_max_nb_job*max_nb_job))\n remaining_nj = float(remaining_nj-nj_max_nb_job)\n\n log.close()\n\n return(bootstrapout)",
"def split_iters(iter_ranges, n_threads = None):\n\n\n if n_threads is None:\n n_threads = cpu_count()\n \n counts = [safediv(r[1] - r[0], r[2]) for r in iter_ranges]\n # largest_dim = np.max(counts)\n total_count = float(np.sum(counts))\n split_factors = [ (c / total_count) ** 2 for c in counts ]\n if len(counts) > 2:\n # kludgy heuristic\n # if you're reading across multiple dimensions\n # assume there might be reuse of data read in \n # and try to split up work so it fits into cache \n expected_bytes = 8 \n for dim in counts:\n expected_bytes *= dim\n expected_kb = expected_bytes / 1024\n l2_cache_size = 8192\n n_pieces = max(n_threads, expected_kb / l2_cache_size)\n else: \n n_pieces = 2*n_threads \n \n # initialize work_items with an empty single range \n work_items = [[]]\n for (dim_idx,dim_count) in enumerate(counts):\n\n dim_start, _, dim_step = iter_ranges[dim_idx]\n n_dim_pieces = int(math.ceil(split_factors[dim_idx] * n_pieces))\n dim_factor = float(dim_count) / n_dim_pieces\n \n old_work_items = [p for p in work_items]\n work_items = []\n for i in xrange(n_dim_pieces):\n # copy all the var ranges, after which we'll modifying \n # the biggest dimension \n\n start = dim_start + int(math.floor(dim_step * dim_factor * i))\n stop = dim_start + int(math.floor(dim_step * dim_factor * (i+1)))\n \n dim_work_item = (start,stop,dim_step)\n for old_work_item in old_work_items:\n new_work_item = [r for r in old_work_item]\n new_work_item.append(dim_work_item) \n work_items.append(new_work_item)\n\n return work_items",
"def bucket_sort(numbers, num_buckets=10):\n # TODO: Find range of given numbers (minimum and maximum values)\n min_num = min(numbers)\n max_num = max(numbers)\n size = max_num/len(numbers)\n\n # TODO: Create list of buckets to store numbers in subranges of input range\n bucket_list = []\n for i in range(len(numbers)):\n # make an empty index to represent each bucket\n bucket_list.append([])\n\n # TODO: Loop over given numbers and place each item in appropriate bucket\n for i in range(len(numbers)):\n j = int(numbers[i]/size)\n\n #if not last bucket\n if j != len(numbers):\n # append index value of the instance of numbers to the propper bucket\n bucket_list[j].append(numbers[i])\n else:\n # append index value to the last bucket\n bucket_list[len(numbers) - 1].append(numbers[i])\n\n # TODO: Sort each bucket using any sorting algorithm (recursive or another)\n for i in range(len(numbers)):\n # calling insertion sort\n insertion(bucket_list[i])\n\n # TODO: Loop over buckets and append each bucket's numbers into output list\n result = []\n for i in range(len(numbers)):\n # \"append each bucket's numbers into output list\"\n result = result + bucket_list[i]\n\n # print('RESULT: ', result)\n return result\n\n\n # FIXME: Improve this to mutate input instead of creating new output list",
"def estimate_bucket_pipeline(bucket_boundaries, num_samples, safe=True):\n if len(bucket_boundaries) < 2:\n raise ValueError('Bucket boundaries must contain at least 2 values')\n\n batch_step = 8\n\n batch_sizes = []\n for boundary in bucket_boundaries:\n batch_size = num_samples / (boundary - 1)\n batch_size = np.floor(batch_size / batch_step) if safe \\\n else np.round(batch_size / batch_step)\n batch_size *= batch_step\n\n if safe and batch_size < batch_step:\n if len(batch_sizes) < 2:\n raise ValueError('Too few samples per batch')\n\n return bucket_boundaries[:len(batch_sizes) - 1], batch_sizes, bucket_boundaries[len(batch_sizes) - 1]\n\n batch_sizes.append(max(batch_step, batch_size.astype(int)))\n\n return bucket_boundaries[:-1], batch_sizes, bucket_boundaries[-1]",
"def bucket_sort(numbers, num_buckets=10):\n # TODO: Find range of given numbers (minimum and maximum values)\n # TODO: Create list of buckets to store numbers in subranges of input range\n # TODO: Loop over given numbers and place each item in appropriate bucket\n # TODO: Sort each bucket using any sorting algorithm (recursive or another)\n # TODO: Loop over buckets and append each bucket's numbers into output list\n # FIXME: Improve this to mutate input instead of creating new output list",
"def gen_buckets(num_buckets, data, max_val=256):\n\n default_size_of_bucket = int(len(data)/3)\n print(f\"Bucket size: {default_size_of_bucket}\")\n all_buckets = []\n for i in range(num_buckets):\n curr_buck = [0 for _ in range(max_val)]\n np.random.shuffle(data)\n curr_sample = data[0:default_size_of_bucket]\n for i in range(len(curr_sample)):\n curr_buck[curr_sample[i]] += 1\n all_buckets.append(curr_buck)\n return all_buckets",
"def distribute(self, value, indices, containers):\r\n raise NotImplementedError",
"def uniform_split(self, nr_agents):\n indices = np.linspace(start=0, stop=self.samples.shape[0], num=nr_agents + 1, dtype=int).tolist()\n\n self.samples = self.partition(self.samples, indices, nr_agents)\n self.labels = self.partition(self.labels, indices, nr_agents)",
"def htable(nbuckets):",
"def distribute_uniform(totalsize, groups):\n ret = []\n for i in range(groups):\n myn = totalsize // groups\n off = 0\n leftover = totalsize % groups\n if ( i < leftover ):\n myn = myn + 1\n off = i * myn\n else:\n off = ((myn + 1) * leftover) + (myn * (i - leftover))\n ret.append( (off, myn) )\n return ret",
"def bucket_sort(numbers, num_buckets=10):\n ##################################################################\n # Step 1:\n # TODO: Find range of given numbers (minimum and maximum values)\n ##################################################################\n \n minimum = min(numbers)\n maximum = max(numbers)\n numbers_range = maximum - minimum\n # print(numbers_range), print(minimum), print(maximum)\n\n ##################################################################################\n # Step 2:\n # TODO: Create list of buckets to store numbers in subranges of input range\n ##################################################################################\n buckets = []\n for i in range(num_buckets+1):\n buckets.append([])\n\n # TODO: Loop over given numbers and place each item in appropriate bucket\n for num in numbers: \n index = int((int((num - minimum) * 100) / numbers_range) / num_buckets)\n buckets[index].append(num)\n\n # TODO: Sort each bucket using any sorting algorithm (recursive or another)\n \n for bucket in buckets:\n for i in range(1, len(bucket)):\n j = i - 1\n num = bucket[i]\n while j >= 0:\n if bucket[i] < bucket[j]:\n bucket[j+1] = bucket[j]\n bucket[j] = num\n j -= 1\n else:\n break\n # for bucket in buckets:\n # for i in range(1, len(buckets)):\n # # Comparison operator \n # while buckets[i-1] > buckets[i] and i > 0:\n # # Swap items \n # buckets[i], buckets[i-1] = buckets[i-1], buckets[i]\n # # Continue looping over items\n # i-=1\n\n return buckets\n\n # TODO: Loop over buckets and append each bucket's numbers into output list\n output = []\n for bucket in buckets:\n for num in bucket:\n output.append(num)\n\n return output\n # FIXME: Improve this to mutate input instead of creating new output list",
"def _iter_assignments_by_transfer_sizes(self, worker_quotas, input_chunk_metas):\n total_transfers = dict((k, sum(v.chunk_size for v in chunk_to_meta.values()))\n for k, chunk_to_meta in input_chunk_metas.items())\n # operands with largest amount of data will be allocated first\n sorted_chunks = sorted(total_transfers.keys(), reverse=True,\n key=lambda k: total_transfers[k])\n for op_key in sorted_chunks:\n # compute data amounts held in workers\n worker_stores = defaultdict(lambda: 0)\n for meta in input_chunk_metas[op_key].values():\n for w in meta.workers:\n worker_stores[w] += meta.chunk_size\n\n max_size, max_workers = self._get_workers_with_max_size(worker_stores)\n if max_workers and max_size > 0.5 * total_transfers[op_key]:\n max_worker = random.choice(max_workers)\n if worker_quotas.get(max_worker, 0) <= 0:\n continue\n worker_quotas[max_worker] -= 1\n yield op_key, max_worker",
"def load_bucket(n, gbar, delg, Ns, iopt=\"sase\"):\n nmax = 10000\n if n > nmax:\n raise ValueError(\"increase nmax, subr load\")\n\n eta = np.zeros(n)\n thet = np.zeros(n)\n if iopt == \"seeded\":\n M = 128 # number of particles in each beamlet\n nb = int(\n np.round(n / M)\n ) # number of beamlet via Fawley between 64 to 256 (x16=1024 to 4096)\n if M * nb != n:\n raise ValueError(\"n must be a multiple of 4\")\n for i in range(nb):\n etaa = delg * np.random.randn(1) + gbar\n # etaa=delg*(np.random.rand(1)-0.5)+gbar\n for j in range(M):\n eta[i * M + j] = etaa\n thet[i * M + j] = 2 * np.pi * (j + 1) / M\n elif iopt == \"sase\":\n M = 32 # number of particles in each beamlet\n nb = int(\n np.round(n / M)\n ) # number of beamlet via Fawley between 64 to 256 (x16=1024 to 4096)\n if M * nb != n:\n raise ValueError(\"n must be a multiple of 4\")\n effnoise = np.sqrt(3 * M / (Ns / nb)) # Penman algorithm for Ns/nb >> M\n for i in range(nb):\n etaa = delg * np.random.randn(1) + gbar\n # etaa=delg*(np.random.rand(1)-0.5)+gbar\n for j in range(M):\n eta[i * M + j] = etaa\n thet[i * M + j] = (\n 2 * np.pi * (j + 1) / M + 2 * np.random.rand(1) * effnoise\n )\n else:\n raise ValueError(f\"Unknown iopt: {iopt}\")\n\n return thet, eta",
"def bucket_sort(numbers, num_buckets=10):\n if len(numbers) <= 1:\n return numbers\n\n # Find range of given numbers (minimum and maximum values)\n maximum = max(numbers)\n minimum = min(numbers)\n\n # calculate each bucket's size\n bucket_size = (maximum - minimum + 1) / num_buckets\n\n # Create list of buckets to store numbers in subranges of input range\n buckets = [[] for i in range(num_buckets)]\n\n # Loop over given numbers and place each item in appropriate bucket\n for num in numbers:\n bucket_index = 0\n while bucket_index < num_buckets:\n if (num - minimum) >= (bucket_size * bucket_index) and (num - minimum) < (bucket_size * (bucket_index+1)):\n buckets[bucket_index].append(num)\n break\n bucket_index += 1\n\n # Sort each bucket using insertion sort\n for i in range(num_buckets):\n insertion_sort(buckets[i])\n\n # Loop over buckets and append each bucket's numbers into output list\n index = 0\n for i in range(num_buckets):\n for j in range(len(buckets[i])):\n # mutate input instead of creating new output list\n numbers[index] = buckets[i][j]\n index += 1\n\n return numbers",
"def gen_jobs(lower_idx, upper_idx, target=\"llvm\"):\n return [LorienTestWorkload(target, idx).to_job() for idx in range(lower_idx, upper_idx)]",
"def _shuffle_buckets(self, buckets, num_buckets_per_round):\n\n\n current_index = 0\n while current_index < len(buckets):\n cur_buckets = [buckets[current_index + i]\n for i in range(num_buckets_per_round)]\n shuffled_users = reduce(lambda x, y: x+y,\n [bucket.users for bucket in cur_buckets])\n shuffle(shuffled_users)\n user_chunks = split_list(shuffled_users, num_buckets_per_round)\n for bucket, user_chunk in zip(cur_buckets, user_chunks):\n bucket.__init__(user_chunk)\n current_index += num_buckets_per_round",
"def distribute_sampling(numSamples, localDevices=None, numChainsPerDevice=1):\n\n global globNumSamples\n\n # Determine number of samples per process\n samplesPerProcess = numSamples // commSize\n\n if rank < numSamples % commSize:\n samplesPerProcess += 1\n\n if localDevices is None:\n\n globNumSamples = numSamples\n\n return samplesPerProcess\n\n numChainsPerProcess = localDevices * numChainsPerDevice\n\n def spc(spp):\n return (spp + numChainsPerProcess - 1) // numChainsPerProcess\n\n a = numSamples % commSize\n globNumSamples = (a * spc(1 + numSamples // commSize) + (commSize - a) * spc(numSamples // commSize)) * numChainsPerProcess\n\n return spc(samplesPerProcess)",
"def createBucketsWithNetworks(trainer, path):\n # Here a list with the model and the corresponding \"address\" (table/bucket) is indexed.\n trainer.trainable_buckets = {\"model\": [], \"table\": [], \"bucketName\": []}\n\n # this list will hold dictionaries with numbers of trainable buckets and utilized buckets.\n trainer.estimators_num_of_table = []\n\n for num, ith_table in enumerate(trainer.lsh.hash_tables):\n print(\"\\nPost-LSH training for buckets in table no. {} of {}...\".format(num + 1, trainer.lsh.num_hashtables))\n # below a set is used to avoid duplicate counting\n table_estimators = {\"buckets_used\": set(), \"trainable_buckets\": 0}\n for jth_hashcode in ith_table.storage.items():\n graphs_in_bucket = [i[1] for i in jth_hashcode[1]]\n\n if len(graphs_in_bucket) >= trainer.lsh.min_bucket_size: # If it's a trainable bucket.\n table_estimators[\"trainable_buckets\"] += 1\n # Turn the bucket into a Dataset\n x = ListDataset(data_list=graphs_in_bucket)\n\n # Create a model for this bucket.\n bucket_model = SimGNN(trainer.args, trainer.number_of_node_labels, trainer.number_of_edge_labels)\n bucket_optimizer = torch.optim.Adam(bucket_model.parameters(), lr=trainer.args.learning_rate,\n weight_decay=trainer.args.weight_decay)\n checkpoint = torch.load(path)\n bucket_model.load_state_dict(checkpoint['model_state_dict'])\n bucket_optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n\n # Save the model's address.\n trainer.trainable_buckets[\"table\"].append(num)\n trainer.trainable_buckets[\"bucketName\"].append(jth_hashcode[0])\n\n # Train the model and save it to the index.\n trainer.fit(lsh_bucket=x, modelToUse=(bucket_model, bucket_optimizer))\n trainer.trainable_buckets[\"model\"].append(bucket_model)\n\n # Add the estimators' counts to the list before moving to the next table.\n trainer.estimators_num_of_table.append(table_estimators)\n\n pass\n print(\"\\nPost-LSH training completed.\")",
"def distributed_sinkhorn(self, Q: torch.Tensor, nmb_iters: int) ->torch.Tensor:\n with torch.no_grad():\n sum_Q = torch.sum(Q)\n dist.all_reduce(sum_Q)\n Q /= sum_Q\n if self.gpus > 0:\n u = torch.zeros(Q.shape[0])\n r = torch.ones(Q.shape[0]) / Q.shape[0]\n c = torch.ones(Q.shape[1]) / (self.gpus * Q.shape[1])\n else:\n u = torch.zeros(Q.shape[0])\n r = torch.ones(Q.shape[0]) / Q.shape[0]\n c = torch.ones(Q.shape[1]) / (self.gpus * Q.shape[1])\n curr_sum = torch.sum(Q, dim=1)\n dist.all_reduce(curr_sum)\n for _ in range(nmb_iters):\n u = curr_sum\n Q *= (r / u).unsqueeze(1)\n Q *= (c / torch.sum(Q, dim=0)).unsqueeze(0)\n curr_sum = torch.sum(Q, dim=1)\n dist.all_reduce(curr_sum)\n return (Q / torch.sum(Q, dim=0, keepdim=True)).t().float()",
"def slice_sample_bounded_max(N, burn, logdist, xx, widths, step_out, max_attempts, bounds):\n xx = copy.deepcopy(xx)\n D = len(xx)\n samples = []\n if (not isinstance(widths, list)) or len(widths) == 1:\n widths = np.ones(D) * widths\n\n log_Px = logdist(xx)\n\n for ii in range(N + burn):\n log_uprime = np.log(random.random()) + log_Px\n for dd in random.sample(range(D), D):\n x_l = copy.deepcopy(xx)\n x_r = copy.deepcopy(xx)\n xprime = copy.deepcopy(xx)\n\n # Create a horizontal interval (x_l, x_r) enclosing xx\n rr = random.random()\n x_l[dd] = max(xx[dd] - rr*widths[dd], bounds[dd][0])\n x_r[dd] = min(xx[dd] + (1-rr)*widths[dd], bounds[dd][1])\n\n if step_out:\n while logdist(x_l) > log_uprime and x_l[dd] > bounds[dd][0]:\n\n x_l[dd] = max(x_l[dd] - widths[dd], bounds[dd][0])\n while logdist(x_r) > log_uprime and x_r[dd] < bounds[dd][1]:\n x_r[dd] = min(x_r[dd] + widths[dd], bounds[dd][1])\n\n # Propose xprimes and shrink interval until good one found\n zz = 0\n num_attempts = 0\n while True:\n zz += 1\n # print(x_l)\n xprime[dd] = random.random()*(x_r[dd] - x_l[dd]) + x_l[dd]\n \n log_Px = logdist(xx)\n if log_Px > log_uprime:\n xx[dd] = xprime[dd]\n break\n else:\n # Shrink in\n num_attempts += 1\n if num_attempts >= max_attempts:\n # print('Failed to find something')\n break\n elif xprime[dd] > xx[dd]:\n x_r[dd] = xprime[dd]\n elif xprime[dd] < xx[dd]:\n x_l[dd] = xprime[dd]\n else:\n raise Exception('Slice sampling failed to find an acceptable point')\n # Record samples\n if ii >= burn:\n samples.append(copy.deepcopy(xx))\n return samples",
"def bucket_sort_sorted_list(q: int = 1000, n: int = 1000):\n times = []\n for i in range(q):\n sorted_list = sorted([random.randint(-100000, 100000) for iter in range(n)])\n start_time = time.time()\n bucket_sort(sorted_list)\n times.append(time.time() - start_time)\n return times",
"def build_distributions(self):\n res = {}\n n_partitions = self.partition_num\n partition_num = 1\n # each part size\n partition_size = int(math.floor(self.size / n_partitions))\n\n for n in range(int(partition_size), self.size + 1, int(partition_size)):\n if self.learn_start <= n <= self.priority_size:\n distribution = {}\n # P(i) = (rank i) ^ (-alpha) / sum ((rank i) ^ (-alpha))\n pdf = list(\n map(lambda x: math.pow(x, -self.alpha), range(1, n + 1))\n )\n pdf_sum = math.fsum(pdf)\n distribution['pdf'] = list(map(lambda x: x / pdf_sum, pdf))\n # split to k segment, and than uniform sample in each k\n # set k = batch_size, each segment has total probability is 1 / batch_size\n # strata_ends keep each segment start pos and end pos\n cdf = np.cumsum(distribution['pdf'])\n strata_ends = {1: 0, self.batch_size + 1: n}\n step = 1.0 / self.batch_size\n index = 1\n for s in range(2, self.batch_size + 1):\n while cdf[index] < step:\n index += 1\n strata_ends[s] = index\n step += 1.0 / self.batch_size\n\n distribution['strata_ends'] = strata_ends\n\n res[partition_num] = distribution\n\n partition_num += 1\n\n return res",
"def new(num_buckets=256):\n aMap=[]",
"def random_split(self, nr_agents):\n np.random.seed(self.random_seed)\n # Get random indices\n indices = sorted(np.random.randint(0, high=self.samples.shape[0], size=nr_agents - 1).tolist())\n indices = [0] + indices\n indices += [self.samples.shape[0]]\n\n self.samples = self.partition(self.samples, indices, nr_agents)\n self.labels = self.partition(self.labels, indices, nr_agents)",
"def test_chunk_size_priority_over_n_splits(self):\n with self.subTest(input='list', chunk_size=1, n_splits=6):\n self.assertEqual(get_n_chunks(self.test_data, iterable_len=None, chunk_size=1, n_splits=6, n_jobs=None), 13)\n with self.subTest(input='numpy', chunk_size=1, n_splits=6):\n self.assertEqual(get_n_chunks(self.test_data_numpy, iterable_len=None, chunk_size=1, n_splits=6,\n n_jobs=None), 100)\n\n with self.subTest(input='list', chunk_size=3, n_splits=3):\n self.assertEqual(get_n_chunks(self.test_data, iterable_len=None, chunk_size=3, n_splits=3, n_jobs=None), 5)\n with self.subTest(input='numpy', chunk_size=3, n_splits=3):\n self.assertEqual(get_n_chunks(self.test_data_numpy, iterable_len=None, chunk_size=3, n_splits=3,\n n_jobs=None), 34)",
"def worker(nums, out_q):\n outdict = {}\n print(threading.current_thread().name)\n print (\"pid:\", os.getpid())\n print (\"data size:\", nums)\n for n in nums:\n outdict[n] = factorize_naive(n)\n out_q.put(outdict)",
"def make_n_queues(self, n):\n #make the grid queues\n for i in range(n):\n gq = GridQueue.GridQueue(self.next_top, self.next_bottom, self.grid_queue_index)\n gq.max_age = self.MAX_AGE\n gq.sex = i # not used\n gq.PREFERRED_AGE_DIFFERENCE= self.PREFERRED_AGE_DIFFERENCE\n gq.AGE_PROBABILITY_MULTIPLIER = self.AGE_PROBABILITY_MULTIPLIER\n gq.PREFERRED_AGE_DIFFERENCE_GROWTH = self.PREFERRED_AGE_DIFFERENCE_GROWTH\n gq.SB_PROBABILITY_MULTIPLIER = self.SB_PROBABILITY_MULTIPLIER\n \n self.grid_queues[gq.index] = gq\n self.grid_queue_index+=1\n self.spawn_process_for(gq) # start a new process for it\n \n #increment for next grid queue\n self.next_top += self.BIN_SIZE*52\n self.next_bottom += self.BIN_SIZE*52"
] | [
"0.60612345",
"0.5802914",
"0.5703838",
"0.5630621",
"0.56188554",
"0.5605208",
"0.5581452",
"0.5499443",
"0.5498334",
"0.54717845",
"0.5436709",
"0.54261446",
"0.5330848",
"0.53118753",
"0.5299507",
"0.5260812",
"0.5231849",
"0.51845485",
"0.51622194",
"0.5148257",
"0.5145854",
"0.5141757",
"0.5136492",
"0.51346326",
"0.5122737",
"0.51108694",
"0.51078975",
"0.5106014",
"0.51018953",
"0.5081378"
] | 0.7943863 | 0 |
run tweets collection on a list of users using one set of apikey, (apikey, users) as args the list of users is run sequentially establish a new database connection for each user, and commit insertions and close connection when done | def runner(args):
apikey, users = args
api = collect.mk_api(apikey)
for user in users:
db_connection = db.mk_connection()
collect.collect_user_tweets(api, user, collect.mk_sql_insert_handler(db_connection))
db.close_connection(db_connection) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def TweetsRealTime(dbname, user, password, table_name, APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET, loop_gathering = False, search_terms = [\"Happy\"]):\n try:\n \"\"\"Be careful with the following global variables. They are necessary to make this script run from the main function\n This is because Twython streamer does not allow other inputs.\n If you run this script stand-alone you can safely remove the globals and it will still work.\"\"\"\n global con \n con = psycopg2.connect(\"dbname = {} user = {} password = {}\".format(dbname,user,password))\n global cur\n cur = con.cursor()\n global tablename\n tablename = table_name\n print \"Connected\"\n except:\n print \"Database connection error\" \n \n try:\n stream = MyStreamer(APP_KEY, APP_SECRET,OAUTH_TOKEN, OAUTH_TOKEN_SECRET)\n print 'Connecting to twitter: will take a minute'\n except ValueError:\n con.close()\n cur.close()\n print 'Something went wrong while making connection with Twitter: '+str(ValueError)\n\n try:\n stream.statuses.filter(track = search_terms) \n except:\n # Shortcut to restarting the script - if the connection cancels then it gracefully terminates the db lock and establishes a new connection\n cur.close\n con.close \n print \"########### Stream terminated ###########\"\n if loop_gathering != False:\n TweetsRealTime(dbname = dbname,\n user = user,\n password = password,\n table_name = table_name,\n search_terms = search_terms,\n APP_KEY = APP_KEY,\n APP_SECRET = APP_SECRET,\n OAUTH_TOKEN = OAUTH_TOKEN,\n OAUTH_TOKEN_SECRET = OAUTH_TOKEN_SECRET,\n loop_gathering = loop_gathering)",
"def get_tweets():\n if not Tweet.objects.all():\n # If the db is empty, don't get max_id.\n tweets = api.search(\n q='#python',\n count=100\n )\n else:\n # If the db is not empty, get max_id.\n subtask(clean_tweetdb)\n max_id = min([tweet.tweet_id for tweet in Tweet.objects.all()])\n tweets = api.search(\n q='#python',\n max_id=max_id,\n count=100\n )\n\n # Store the tweet data in lists.\n tweets_id = [tweet.id for tweet in tweets]\n tweets_date = [tweet.created_at for tweet in tweets]\n tweets_source = [tweet.source for tweet in tweets]\n tweets_favorite_cnt = [tweet.favorite_count for tweet in tweets]\n tweets_retweet_cnt = [tweet.retweet_count for tweet in tweets]\n tweets_text = [tweet.text for tweet in tweets]\n\n # Iterate over these lists and add data to db.\n for i, j, k, l, m, n in zip(\n tweets_id,\n tweets_date,\n tweets_source,\n tweets_favorite_cnt,\n tweets_retweet_cnt,\n tweets_text,\n ):\n try:\n # Check that they are valid.\n Tweet.objects.create(\n tweet_id=i,\n tweet_date=j,\n tweet_source=k,\n tweet_favorite_cnt=l,\n tweet_retweet_cnt=m,\n tweet_text=n,\n )\n except IntegrityError:\n pass",
"def get_tweets():\n clean_tweetdb.delay()\n db_tweets = Tweet.objects.all()\n max_id = min([tweet.tweet_id for tweet in db_tweets])\n tweets = api.search(\n q='#python',\n max_id=max_id,\n count=100\n )\n tweets_id = [tweet.id for tweet in tweets]\n tweets_date = [tweet.created_at for tweet in tweets]\n tweets_source = [tweet.source for tweet in tweets]\n tweets_favorite_cnt = [tweet.favorite_count for tweet in tweets]\n tweets_retweet_cnt = [tweet.retweet_count for tweet in tweets]\n tweets_text = [tweet.text for tweet in tweets]\n\n for i, j, k, l, m, n in zip(\n tweets_id,\n tweets_date,\n tweets_source,\n tweets_favorite_cnt,\n tweets_retweet_cnt,\n tweets_text,\n ):\n try:\n Tweet.objects.create(\n tweet_id=i,\n tweet_date=j,\n tweet_source=k,\n tweet_favorite_cnt=l,\n tweet_retweet_cnt=m,\n tweet_text=n,\n )\n except IntegrityError:\n pass",
"def rest_api(self):\n self.__db_init('rest')\n api = self.__api_init()\n self.c.execute(\"SELECT MAX(id) FROM tweets\")\n db_max_id = self.c.fetchone()[0] \n try: \n most_recent = api.search(q=self.keyword, result_type='recent')[0].id\n except tweepy.TweepError as e:\n print(str(e.message[0]['message']) + \n ' Update api.ini with your proper credentials:')\n print(os.path.abspath(_path_finder('userconfig','api.ini')))\n sys.exit(-1)\n flag = 0\n while ( flag == 0 ):\n try:\n batch = 5000\n flag = batch\n for search_res in tweepy.Cursor(api.search, q=self.keyword,\n count=100, result_type=\"recent\", \n since_id=db_max_id, \n max_id=most_recent).items(batch):\n flag -= 1\n print(search_res.id, search_res.created_at)\n self.c.execute('''INSERT OR IGNORE INTO tweets (id, date) \n VALUES (?, ?)''', \n (search_res.id, search_res.created_at))\n except tweepy.TweepError as e:\n print('I caught an error:', e.message)\n flag = 0\n finally:\n self.c.execute(\"SELECT last_insert_rowid() from tweets\")\n rid = self.c.fetchone()[0]\n if rid:\n self.c.execute('''SELECT id FROM tweets WHERE\n rowid={0}'''.format(rid))\n rid = self.c.fetchone()[0]\n most_recent = rid - 1\n data = api.rate_limit_status()\n print(data['resources']['search'])\n self.conn.commit()\n self.conn.close()\n print('REST database file has been created/updated:') \n print(os.path.abspath(_path_finder(\n 'keydata','{0}_rest.db'.format(self.keyword))))",
"async def run(users):\n async with aiohttp.ClientSession() as session:\n tasks = []\n for user in users:\n tasks.append(\n TornAPI.fetch_torn_user_data(\n session,\n user.params,\n user.id\n )\n )\n\n responses = await asyncio.gather(*tasks, return_exceptions=True)\n return responses",
"def main():\r\n \r\n from TweetProcessor import TweetProcessor\r\n \r\n consumer_key = ''\r\n consumer_secret = ''\r\n tweepy_base_filter = \"Filter:links -Filter:retweets\"\r\n \r\n hashtags = [\r\n \"#covid-19\", \"#covid19\", \"#covid\", \"#coronavirus\", \"#corona\",\r\n \"#covid_19\"\r\n ]\r\n \r\n vt_keys = [\"\"]\r\n batch_size = 5000\r\n \r\n for i in range(len(hashtags)):\r\n \r\n try:\r\n tweepy_filter = hashtags[i] + \" \" + tweepy_base_filter\r\n print(\"starting pull with this filter: \" + str(tweepy_filter))\r\n \r\n tp = TweetProcessor(consumer_key, consumer_secret,\r\n tweepy_filter, vt_keys, batch_size)\r\n \r\n tp.run()\r\n\r\n except Exception as e: \r\n with open(\"tweetProcessorLog.txt\", \"a\") as file:\r\n file.write(\"\\n\" + str(datetime.now()) + \", error: \" + str(e))\r\n \r\n \r\n if e != \"Twitter error response: status code = 429\":\r\n raise e\r\n\r\n \r\n print(\"ERROR OCCURED: waiting for 15 minutes to avoid hitting tweepy request limit\")\r\n print(e)\r\n time.sleep(15 * 60)",
"def get_tweets():\n\n # Read bearer token from secrets file\n with open(\"./secrets.yml\", \"r\") as f:\n bearer_token = yaml.load(f, Loader=yaml.FullLoader)[\"BEARER_TOKEN\"]\n\n # Set start and end times as current time rounded down to nearest minute with supplied offset\n dt_fmt = \"%Y-%m-%dT%H:%M:00Z\"\n dt_now = datetime.datetime.now().replace(second=0, microsecond=0)\n start_time_offset = int(sys.argv[1])\n end_time_offset = int(sys.argv[2])\n dt_end = dt_now - datetime.timedelta(minutes=end_time_offset)\n dt_start = dt_now - datetime.timedelta(minutes=start_time_offset)\n dt_end = dt_end.strftime(dt_fmt)\n dt_start = dt_start.strftime(dt_fmt)\n\n # Make request, checking for mentions in specified time period\n logging.info(\"Getting mentions from Twitter\")\n uri = \"https://api.twitter.com/2/tweets/search/recent\"\n headers = {\"Authorization\": f\"Bearer {bearer_token}\"}\n query = {\"query\": f\"@{ACCOUNT_NAME}\",\n \"expansions\" : \"author_id\",\n \"user.fields\" : \"username\",\n \"start_time\" : dt_start,\n \"end_time\" : dt_end}\n response = requests.get(uri, headers=headers, params=query)\n\n # Make connection to local database\n connection = sqlite3.connect(\"../database/procrystaldb.db\")\n cursor = connection.cursor()\n\n # Get current total number of rows in database\n cursor.execute(\"SELECT COUNT(*) FROM Twitter;\")\n initial_rows = cursor.fetchall()[0][0]\n\n # Get usernames and tweet ids from tweets and save to database\n if response.status_code == 200:\n content = response.json()\n num_results = content[\"meta\"][\"result_count\"]\n if num_results > 0:\n # First get dictionary of usernames\n user_id_to_name = {}\n for user in content[\"includes\"][\"users\"]:\n user_id_to_name[user[\"id\"]] = user[\"username\"]\n # Then get tweet id, username and save to database\n for result in content[\"data\"]:\n # if KEYWORD in result[\"text\"].lower():\n tweet_id = result[\"id\"]\n username = user_id_to_name[result[\"author_id\"]]\n sql_insert = f\"\"\"\n INSERT OR IGNORE INTO Twitter (tweet_id, username, reply_sent)\n VALUES ('{tweet_id}', '{username}', false);\n \"\"\"\n cursor.execute(sql_insert)\n logging.info(f\"Mentions fetched: {num_results}\")\n else:\n logging.error(f\"Get mentions errored with: {response.json()}\")\n\n # Get final total number of rows in database and therefore number of rows added\n cursor.execute(\"SELECT COUNT(*) FROM Twitter;\")\n final_rows = cursor.fetchall()[0][0]\n rows_added = final_rows - initial_rows\n logging.info(f\"New mentions added: {rows_added}\")\n\n # Close database connection\n connection.commit()\n connection.close()\n\n return rows_added",
"def get_users_tweets(users, min_date, max_date, result_limit, key, secret_key):\n \n auth = tweepy.OAuthHandler(key, secret_key)\n max_datetime = datetime.datetime.strptime(max_date, '%Y-%m-%d').date()\n min_datetime = datetime.datetime.strptime(min_date, '%Y-%m-%d').date()\n \n #initialize variables\n max_id = None\n min_id = None\n mydata = []\n\n for user in users:\n my_api = tweepy.API(auth)\n\n statuses = my_api.user_timeline(screen_name=user,\n count=result_limit,\n tweet_mode = 'extended',\n include_retweets=True\n )\n for item in statuses: \n if item.created_at.date() > max_datetime:\n max_id = item.id\n #max_id_date = item.created_at\n elif min_datetime <= item.created_at.date() <= max_datetime:\n mydata.append(get_tweet_info(item))\n if max_id == None:\n max_id = item.id\n else: #less than min_datetime\n min_id = item.id\n #min_id_date = item.created_at\n break\n\n while min_id == None:\n start_id = item.id\n statuses = my_api.user_timeline(screen_name=user,\n count=result_limit,\n max_id=start_id,\n tweet_mode = 'extended',\n include_retweets=True\n )\n for item in statuses: \n if item.created_at.date() > max_datetime:\n max_id = item.id\n #max_id_date = item.created_at\n elif min_datetime <= item.created_at.date() <= max_datetime:\n mydata.append(get_tweet_info(item))\n if max_id == None:\n max_id = item.id\n else: #less than min_datetime\n min_id = item.id\n #min_id_date = item.created_at\n break \n #get another 25 starting with the max... \n # if min_id is None... then call again... using the bottom of mydata as max_id...\n\n df = pd.DataFrame(mydata).loc[:,'tweet_id':'favourite_count']\n return df",
"def get_tweets_upload_to_bq(users, min_date, max_date, result_limit, key, secret_key, project_id, table_id, **context):\n\n if context.get(\"yesterday_ds\"):\n df = get_users_tweets(users, context['yesterday_ds'], context['yesterday_ds'], result_limit, key, secret_key)\n else: \n df = get_users_tweets(users, min_date, max_date, result_limit, key, secret_key)\n upload_df_to_bq(df, project_id, table_id)\n\n return 'scraped tweets and uploaded to bq'",
"def main():\n\tusers = deque([])\n\tthreads = []\n\tprint(\"Starting with: %d \" % SO_FAR)\n\ttry:\n\t\tcursor.execute(\"SET SESSION net_read_timeout = 3600\")\n\t\tcursor.execute(\"SELECT user_id, screen_name FROM `test`.`new_temp` WHERE listed_count > 10 LIMIT %d OFFSET %d\" % (NUM_USERS, SO_FAR))\n\t\tfor row in cursor:\n\t\t\tusers.append((int(row[0]), row[1]))\n\t\tfor t in range(0, NO_THREADS):\n\t\t\tt = Worker(users)\n\t\t\tthreads.append(t)\n\t\t\tt.start()\n\t\tfor t in threads:\n\t\t\tt.join()\n\t\twith open('twitter_get_lists_for_user.txt', 'w') as f:\n\t\t\tf.write(str(count.value))\n\t\tf.close()\n\t\tsys.exit(0)\n\texcept Exception as e:\n\t\tprint e\n\tfinally:\n\t\tcnx.close()",
"def harvest_users_from_tweets( session: sqlalchemy.orm.Session, FLUSH_LIMIT=10, startTweet=None ):\n users = 0\n lastTweetId = None\n\n tweetIter = tweets_with_other_data_generator( session )\n\n try:\n while True:\n tweet = next( tweetIter )\n user = update_or_create_user_from_tweet( tweet, session )\n\n users += 1\n lastTweetId = tweet.tweetID\n\n if users % FLUSH_LIMIT == 0:\n print( 'flushing at %s users' % users )\n session.commit()\n\n except StopIteration:\n print( \"%s users created or updated\" % users )\n session.commit()\n\n finally:\n print(\"Last processed tweet %s\" % lastTweetId)\n # session.commit()\n session.close()",
"def gatherData():\n\n # connect to database, set up the tweepy API object, and find the next date to search\n\n cnx = sqlite3.connect(DB_FILE)\n api = generateAPI(wait_on_rate_limit=True, wait_on_rate_limit_notify=True, **CREDENTIALS)\n\n nextdate = findNextDate(cnx, FIRSTDATE)\n year = nextdate[:4]\n\n # attempt to scrape box office data\n\n bodata = getTopMovies(BO_ENDPOINT, nextdate, CNT_MOVIES)\n\n if not bodata.empty:\n bodata.to_sql('boxoffice', ENGINE, if_exists='append', index=False)\n print(\"Box Office Data for [{0}] Written to Database\".format(nextdate))\n else:\n raise BOError(\"Error Scraping/Writing Box Office Data for [{0}]\".format(nextdate))\n\n # attempt to collect tweet data\n\n for movie in bodata.title:\n try:\n tweets = searchMovie(api, movie, nextdate, MAX_TWEETS)\n if not tweets.empty:\n tweets.to_sql('tweets', ENGINE, if_exists='append', index=False)\n print(\"Tweets for [{0}] Written to Database\".format(movie))\n else:\n raise TweetError(\"Error Fetching/Writing Tweets for [{0}]\".format(movie))\n except tweepy.error.TweepError:\n raise TweetError(\"Error Fetching/Writing Tweets for [{0}]\".format(movie))\n\n # attempt to collect movie metadata\n\n for movie in bodata.title:\n minfo = getMovieInfo(OMDB_ENDPOINT, processTitle(movie), year)\n if minfo:\n insertMovie(cnx, movie, nextdate, minfo)\n else:\n minfo = getMovieInfo(OMDB_ENDPOINT, processTitle(movie), str(int(year)-1))\n if minfo:\n insertMovie(cnx, movie, nextdate, minfo)\n else:\n print(\"Movie: [{0}] Could Not be Found via OMDB\".format(movie))\n\n # commit changes and close DB connection\n\n cnx.commit()\n cnx.close()\n\n print(\"\\nAll Data for {0} Successfully Added to the Database!\\n\".format(nextdate))\n return nextdate",
"def generate():\n db.connection.drop_database(app.config['MONGODB_DB'])\n\n for _ in range(100):\n generate_user()\n\n for _ in range(10):\n generate_api_user()\n\n return json_ok()",
"def insert_tweets(conn: Connection, fetch_data: Iterable[Dict]) -> None:\n\n s = Session(bind=conn)\n meta = MetaData()\n meta.reflect(bind=conn)\n s.add_all([Tweet(**t) for t in fetch_data])\n s.commit()",
"def main():\n parser = argparse.ArgumentParser(\n description=\"\"\"Lookup and Store Tweets utility. Fetches a tweet from\n the Twitter API given its GUID. Stores or updates the author\n Profile and Tweet in the db.\"\"\"\n )\n parser.add_argument(\n \"tweetGUIDs\",\n metavar=\"TWEET_GUID\",\n nargs=\"+\",\n help=\"\"\"List of one or more Tweet GUIDs to lookup, separated by spaces.\n The Tweet 'GUID' in the local db is equivalent to the Tweet 'ID'\n on the Twitter API.\"\"\",\n )\n parser.add_argument(\n \"-u\",\n \"--update-all-fields\",\n action=\"store_true\",\n help=\"\"\"If supplied, update all fields when updating an existing\n local Tweet record. Otherwise, the default behavior is to\n only update the favorite and retweet counts of the record.\"\"\",\n )\n args = parser.parse_args()\n\n APIConn = authentication.getAppOnlyConnection()\n tweets.lookupTweetGuids(\n APIConn, args.tweetGUIDs, onlyUpdateEngagements=not (args.update_all_fields)\n )",
"async def test_fetch_all_w_data(database, valid_data):\n await database.setup_database(reset=True)\n for id,user_id,embeddings,batch_id in valid_data:\n await database.insert_user(user_id=user_id)\n await database.insert(id=id,\n user_id=user_id,\n embeddings=embeddings,\n batch_id=batch_id)\n assert isinstance(await database.fetch_all(user_id=user_id),list)\n await database.close_pool()",
"def populate_table(\n user, created_at, tweet, retweet_count, id_str, my_database=DATABASE):\n\n dbconnect = connect_db(DATABASE)\n\n cursor = dbconnect.cursor()\n cursor.execute(\"USE airflowdb\")\n\n # add content here\n\n try:\n query=\"INSERT INTO tweets (user, created_at, tweet, retweet_count, id_str) VALUES (%s, %s, %s, %s, %s)\"\n \n cursor.execute(query, (user, created_at, tweet, retweet_count, id_str))\n \n dbconnect.commit()\n print(\"commited\")\n\n except mysql.Error as e:\n print(e)\n dbconnect.rollback()\n\n cursor.close()\n dbconnect.close()\n\n return",
"def insert_tweets(post):\n db_file = dbFile\n try:\n conn = sqlite3.connect(db_file)\n except Exception as e:\n print(e)\n for i in range(0,len(post['id_str'])):\n tweet={}\n tweet['user_id']=post['user_id']\n tweet['created_at'] = post['created_at'][i]\n tweet['id_str'] = post['id_str'][i]\n tweet['text'] = post['text'][i]\n tweet['source'] = post['source'][i]\n tweet['truncated'] = post['truncated'][i]\n tweet['in_reply_to_status_id_str'] = post['in_reply_to_status_id_str'][i]\n tweet['in_reply_to_screen_name'] = post['in_reply_to_screen_name'][i]\n tweet['coordinatesNumber'] = post['coordinatesNumber'][i]\n tweet['coordinates'] = post['coordinates'][i]\n tweet['coordinatesType'] = post['coordinatesType'][i]\n tweet['placeCountry'] = post['placeCountry'][i]\n tweet['placeCountryCode'] = post['placeCountryCode'][i]\n tweet['placeFullName'] = post['placeFullName'][i]\n tweet['placeID'] = post['placeID'][i]\n tweet['placeName'] = post['placeName'][i]\n tweet['placeType'] = post['placeType'][i]\n tweet['placeURL'] = post['placeURL'][i]\n tweet['quoted_status_id_str'] = post['quoted_status_id_str'][i]\n tweet['is_quote_status'] = post['is_quote_status'][i]\n tweet['retweeted_status'] = post['retweeted_status'][i]\n tweet['quote_count'] = post['quote_count'][i]\n tweet['reply_count'] = post['reply_count'][i]\n tweet['retweet_count'] = post['retweet_count'][i]\n tweet['favorite_count'] = post['favorite_count'][i]\n tweet['hashtagsNumber'] = post['hashtagsNumber'][i]\n tweet['hashtags'] = post['hashtags'][i]\n tweet['urls'] = post['urls'][i]\n tweet['urlsNumber'] = post['urlsNumber'][i]\n tweet['user_mentionsNumber'] = post['user_mentionsNumber'][i]\n tweet['user_mentions'] = post['user_mentions'][i]\n tweet['mediaNumber'] = post['mediaNumber'][i]\n tweet['mediaURLs'] = post['mediaURLs'][i]\n tweet['mediaType'] = post['mediaType'][i]\n tweet['symbolsNumber'] = post['symbolsNumber'][i]\n tweet['symbols'] = post['symbols'][i]\n tweet['pollsNumber'] = post['pollsNumber'][i]\n tweet['polls'] = post['polls'][i]\n tweet['possibly_sensitive'] = post['possibly_sensitive'][i]\n tweet['filter_level'] = post['filter_level'][i]\n tweet['lang'] = post['lang'][i]\n tweet['matching_rulesNumber'] = post['matching_rulesNumber'][i]\n tweet['matching_rulesTag'] = post['matching_rulesTag'][i]\n tweet['matching_rulesID'] = post['matching_rulesID'][i]\n tweet['collected_at'] = strftime(\"%Y-%m-%d %H:%M:%S\", gmtime())\n sqlite_insert(conn, 'GTapp_tweets', tweet)",
"def processIdiom(i, idiom):\n global db\n cursor = db.cursor()\n \n statuses = searchIdiom(i, idiom)\n #Should have at least 10 statuses to be useful\n if len(statuses) < 10:\n return\n # loop through each of my statuses, and print its content\n for status in statuses:\n #print status[\"text\"]\n try:\n id_str = status[\"id_str\"]\n text = status[\"text\"].encode('ascii','ignore')\n retweet_count = status[\"retweet_count\"]\n user = status[\"user\"]\n created_at = status[\"created_at\"]\n entities = status[\"entities\"]\n entities = json.dumps(entities)\n\n user_id_str = user[\"id_str\"]\n name = user[\"name\"].encode('ascii','ignore')\n screen_name = user[\"screen_name\"]\n description = user[\"description\"].encode('ascii','ignore')\n user_entities = json.dumps(user[\"entities\"])\n followers_count = user[\"followers_count\"]\n listed_count = user[\"listed_count\"]\n profile_image_url = user[\"profile_image_url\"]\n verified = str(user[\"verified\"])\n\n \n cursor.execute('INSERT IGNORE INTO idiomatic_tweets(idiom, id_str, text, retweet_count, user_id_str, created_at, entities, name, profile_image_url, screen_name, verified) \\\n VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s);' \\\n ,(idiom, id_str, text, retweet_count, user_id_str, created_at, entities, name, profile_image_url, screen_name, verified))\n\n cursor.execute('INSERT IGNORE INTO idiomatic_users(id_str, name, screen_name, description, entities, followers_count, listed_count, profile_image_url, verified) \\\n VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s);' \\\n ,(user_id_str, name, screen_name, description, user_entities, followers_count, listed_count, profile_image_url, verified))\n except Exception as e:\n print('Error : ', e)\n print sys.exc_traceback.tb_lineno \n\n if statuses:\n cursor.execute('INSERT IGNORE INTO idiomatic_idioms(idiom) VALUES (%s);', (idiom,))\n else:\n print \"statuses\" , statuses",
"def users_lookup(api, user_ids):\n url = \"https://api.twitter.com/1.1/users/lookup.json\"\n i = 0\n rate_status = check_rate_limit(api, url)\n remaining_requests = rate_status[\"remaining\"]\n if not remaining_requests:\n delay = rate_status['reset'] - time.time()\n if delay > 0:\n print \"Sleeping {0}...\".format(delay)\n time.sleep(delay)\n rate_status = check_rate_limit(api, url)\n remaining_requests = rate_status[\"remaining\"]\n\n users = {}\n for i in range(0, len(user_ids), 100):\n interval = 100\n user_id_param = [long(uid) for uid in user_ids[i:i+interval]]\n params = {\"user_id\": user_id_param, \"include_entities\": True}\n response = api.get(url, params=params)\n if 'errors' in response.json():\n for error in response.json().get('errors', []):\n print 'Error code:', error.get('code', 'NO CODE')\n print 'Error message:', error.get('message', 'NO MESSAGE')\n else:\n for user in response.json():\n id_str = user[\"id_str\"]\n users[id_str] = user\n response.close()\n\n remaining_requests -= 1\n if not remaining_requests:\n delay = rate_status['reset'] - time.time()\n if delay > 0:\n print \"Sleeping {0}...\".format(delay)\n time.sleep(delay)\n rate_status = check_rate_limit(api, url)\n remaining_requests = rate_status[\"remaining\"]\n return users",
"def load_tweets(self, max_items=10000, user=None):\n for name, info in self.users.items():\n try:\n os.mkdir(self.root + info['party'].lower().replace(' ', '_'))\n except FileExistsError:\n pass\n \n filepath = self.root + info['party'].lower().replace(' ', '_')\n filepath = filepath + '/' + name.lower().replace(' ', '')\n try:\n print(f'Reading tweets from {name}')\n user = info['screen_name']\n curs = tweepy.Cursor(self.api.user_timeline,\n screen_name=user,\n count=200,\n tweet_mode=\"extended\"\n ).items(max_items)\n\n with open(filepath + '.jsonl', 'w') as f:\n for status in curs:\n tweet = status._json\n json_dump_line(tweet, f)\n \n except tweepy.TweepError as exc:\n print(exc)\n os.remove(filepath + '.jsonl')",
"def run_full(self):\n # Get a cursor of all the keywords in the databse\n keyword_cursor = self.mongo_controller.get_keyword_batch_cursor()\n\n # Go over each batch\n for batch in keyword_cursor:\n\n # Go over each keyword in the batch\n for keyword_dict in bson.decode_all(batch):\n\n keyword = Keyword.from_dict(keyword_dict) # Cast the keyword to a Keyword object\n twitter_results = self.crawler.search(keyword, limit=self.limit_requests) # Run the search\n self.__save_tweets(twitter_results) # Save all tweets to the DB",
"def add_users(key, users):\n for user in users:\n if not get_user(key, user['username']):\n tenant_name = user['project']\n tenant = get_tenant(key, tenant_name)\n\n password = user['password']\n if 'email' in user:\n email = user['email']\n else:\n email = None\n\n key.users.create(name=user['username'], password=password,\n email=email, tenant_id=tenant.id)\n print(\"Created user '{}'\".format(user['username']))\n return True",
"def getTweets(user,maxTweets=3000,count=0,tweetId=0,cacheKey=False,credentials=False):\n api = ratedTwitter(credentials=credentials)\n limit = api.get_user_timeline_limited()\n if limit:\n print '*** TWITTER RATE-LIMITED: statuses.user_timeline:'+user+':'+str(count)+' ***'\n raise getTweets.retry(countdown = limit)\n else:\n args = {'screen_name':user,'exclude_replies':False,'include_rts':True,'trim_user':False,'count':200}\n if tweetId:\n args['max_id'] = tweetId\n \n okay, result = api.get_user_timeline(**args)\n \n if okay:\n print '*** TWITTER USER_TIMELINE: '+user+':'+str(tweetId)+' ***'\n if result:\n newCount = count + len(result)\n if maxTweets:\n if newCount > maxTweets: # No need for the task to call itself again.\n pushTweets.delay(result,user,cacheKey=cacheKey) # Give pushTweets the cache-key to end the job.\n return\n else:\n pushTweets.delay(result,user)\n\n newTweetId = min([t['id'] for t in result]) - 1 \n # Not done yet, the task calls itself with an updated count and tweetId.\n getTweets.delay(user,maxTweets=maxTweets,count=newCount,tweetId=newTweetId,cacheKey=cacheKey,credentials=credentials)\n else:\n pushTweets.delay([],user,cacheKey=cacheKey) # Nothing more found, so tell pushTweets the job is done.\n else:\n if result == '404':\n setUserDefunct(user)\n cache.set('scrape_tweets','done')\n if result == 'limited':\n raise getTweets.retry(countdown = api.get_user_timeline_limited())",
"def get_data(max_users = 30):\n\n #cache here\n\n\n mongo_db = pymongo.Connection('grande.rutgers.edu', 27017)['citybeat_production']\n tweets_collection = mongo_db['tweets']\n\n\n test_tweets = []\n seed_users = []\n\n\n\n try:\n with open('./cache_tweets.pkl'):\n tweets, test_tweets = pickle.load(open('./cache_tweets.pkl'))\n except:\n print 'in'\n # not here. fetch\n tweets = []\n for n, tweet in enumerate(tweets_collection.find({\"created_time\": {\"$gte\":\"1380643200\", \"$lt\":\"1380902400\"}})):\n tweet['text'] = re.sub(r\"(?:\\@|https?\\://)\\S+\", \"\", tweet['text'])\n tweet['text'] = re.sub(r'^https?:\\/\\/.*[\\r\\n]*', '', tweet['text'], flags=re.MULTILINE)\n tweets.append(tweet)\n print n\n\n #print 'len of tweets ', len(tweets), 'len of test = ', len(test_tweets)\n test_tweets = tweets[-100:-1]\n #pickle.dump((tweets, test_tweets), open('./cache_tweets.pkl','w'))\n\n tweets = [tweet for tweet in tweets if len(tweet['text'].split(' ')) >= 10]\n\n\n\n\n\n\n return tweets, test_tweets",
"def get_tweets():\n\n\tuser ='kaiserkumars'\n\t# api = twitter.Api(consumer_key='iJoZZuV7etVrJfE4K9ir8sIqa',\n\t# consumer_secret='uyJyWoP05z2MUKnggW7vHnIG2sckmM1aHRMgGveZLyrz8401Xs',\n\t# access_token_key='622588040-TYDgG1UlGUvA1hW8PA7mOG5CiMw0WiuPZlkoP8cc',\n\t# access_token_secret='laAmFjeLhWzOK7Y524VevdMdeLeNpnmCUmjee1AQU7osj')\n\tapi = twitter.Api(consumer_key=get_secret('consumer_key'),\n\t consumer_secret=get_secret('consumer_secret'),\n\t access_token_key=get_secret('access_token_key'),\n\t access_token_secret=get_secret('access_token_secret'))\n\n\tstatuses = api.GetUserTimeline(user_id=622588040,count=0)\n\t# print(statuses)\n\t# duplicate='UNIQUE constraint failed: mtwitter_weatherdata.location, core_weatherdata.metric, core_weatherdata.date'\n\tbulk_insert=[]\n\t# print(dir(TwitterData))\n\tfor s in statuses:\n\t\t# print(s)\n\t\tdt = parse(s.created_at)\n\t\t# print(dt)\n\t\tdata = TwitterData(org_name=s.user.name,profile_url=s.user.profile_image_url,tweet_id =s.id,screen_name=s.user.screen_name, tweet = s.text, date= dt, favCount =0)\n\t\tbulk_insert.append(data)\n\ttry:\n\t\tTwitterData.objects.bulk_create(bulk_insert)\n\t\tprint(\"Success.\")\n\texcept Exception as e:\n\t\t# if(str(e)==duplicate):\n\t\t# \tprint('Duplicate Data')\n\t\t# else:\n\t\tprint(str(e))\n\n\treturn statuses",
"def get_all_tweets(user, alltweets):\n\n #TODO check that user is a valid screen name??\n\n #make initial request for most recent tweets (200 is the maximum allowed count)\n new_tweets = api.user_timeline(user, count=200)\n\n #save most recent tweets\n alltweets.extend(new_tweets)\n #print alltweets[0].text\n\n #save the id of the oldest tweet less one\n oldest = alltweets[-1].id - 1\n\n #print \"starting loop\"\n #keep grabbing tweets until there are no tweets left to grab\n while len(new_tweets) > 0:\n\n #all subsiquent requests starting with oldest\n new_tweets = api.user_timeline(user, count=200, max_id=oldest)\n\n #save most recent tweets\n alltweets.extend(new_tweets)\n\n #update the id of the oldest tweet less one\n oldest = alltweets[-1].id - 1",
"def user_scrape(users: List, outfile: str, limit: int, since: str) -> None:\n assert(len(users)>0)\n\n # put params into configuration object\n c = twint.Config()\n c.Hide_output = True\n c.Limit = limit\n c.Language = \"en\"\n c.Output = os.path.join(data_dir, outfile)\n c.Store_csv = True\n c.Since = since\n\n for u in tqdm(users, total=293):\n # and run the search for each username\n sleep(2.5)\n try:\n #print(\"scanning tweets from user {}\".format(u))\n c.Username = u\n twint.run.Search(c)\n except:\n continue",
"def get_tweets(api, listOfTweets, keyword, numOfTweets=20, date_since='2019-1-1', lang=\"en\"):\n spinner = yaspin()\n spinner.start()\n for tweet in tweepy.Cursor(api.search, q=keyword, lang=lang, since=date_since).items(numOfTweets):\n # Add tweets in this format\n dict_ = {'Screen Name': tweet.user.screen_name,\n 'User Name': tweet.user.name,\n 'Tweet Created At': str(tweet.created_at),\n 'Tweet Text': tweet.text,\n 'Cleaned Tweet Text': func.clean_tweets(tweet.text),\n 'User Location': str(tweet.user.location),\n 'Tweet Coordinates': str(tweet.coordinates),\n 'Retweet Count': str(tweet.retweet_count),\n 'Retweeted': str(tweet.retweeted),\n 'Phone Type': str(tweet.source),\n 'Favorite Count': str(tweet.favorite_count),\n 'Favorited': str(tweet.favorited),\n 'Replied': str(tweet.in_reply_to_status_id_str)\n }\n listOfTweets.append(dict_)\n spinner.stop()\n return listOfTweets",
"def pushTwitterUsers(twits):\n rightNow = datetime.now().isoformat()\n for twit in twits:\n twit['last_scraped'] = rightNow\n \n renderedTwits = [ renderTwitterUser(twit) for twit in twits ]\n pushRenderedTwits2Neo.delay(renderedTwits)\n pushRenderedTwits2Cass.delay(renderedTwits)\n #return True"
] | [
"0.62722933",
"0.6248243",
"0.61886966",
"0.6075609",
"0.6019225",
"0.59416574",
"0.5891333",
"0.58175486",
"0.57583445",
"0.57216036",
"0.5684764",
"0.5676086",
"0.56157917",
"0.5606975",
"0.5600138",
"0.55862385",
"0.55794513",
"0.5542012",
"0.55383205",
"0.5537845",
"0.55251604",
"0.5515986",
"0.5500997",
"0.54992473",
"0.548533",
"0.54815876",
"0.5474499",
"0.5463358",
"0.5460292",
"0.5457984"
] | 0.86470985 | 0 |
Returns (indent,rest) depending on line indentation | def separeIndent(self,line):
p=0
while p<len(line) and line[p] in string.whitespace:
p=p+1
rest=line[p:]
return line[:p],rest | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_indent(line):\n if is_blank(line):\n return 0\n\n stripped = line.lstrip(' ')\n if stripped.startswith('- '):\n stripped = stripped[2:].lstrip(' ')\n # This is a list item\n\n return len(line) - len(stripped)",
"def get_function_indent(line: str) -> int:\n first_function_entrance = line.index('def')\n indents = line[:first_function_entrance]\n indents_space_count = len(indents)\n return indents_space_count",
"def test_with_custom_indent(self):\n self.assertEqual(indent('foo', 3), ' foo')",
"def get_indent(op):\n ret = \"\"\n for ii in range(op):\n # Would tab be better?\n ret += \" \"\n return ret",
"def initial_indentation(self):\n if self._indent_first_line[-1] is None:\n return self.indentation\n else:\n return self._indent_first_line[-1]",
"def __indent_text_block(text):\n lines = text.splitlines()\n if len(lines) > 1:\n out = lines[0] + \"\\r\\n\"\n for i in range(1, len(lines)-1):\n out = out + \" \" + lines[i] + \"\\r\\n\"\n out = out + \" \" + lines[-1]\n return out\n return text",
"def section_overindented(): # noqa: D416",
"def dedent(self):\n self._indent_first_line.pop()\n return self._indentation_levels.pop()",
"def GetIndent(self):\r\n\r\n return self._indent",
"def getIndentationLevel(self, code_line):\n print(\"the code line : \", code_line)\n return len(code_line) - len(code_line.lstrip(\" \"))",
"def _indent_spaces(self):\n if prettyprint:\n return self.indentspace * self._indent_level\n else:\n return ''",
"def test_with_multiple_lines(self):\n self.assertEqual(indent('foo\\nbar'),\n ' foo\\n bar')",
"def determine_indentation(self):\n # Ensuring NEWLINE tokens are actually specified as such\n if self.current.tokenum != NEWLINE and self.current.value == \"\\n\":\n self.current.tokenum = NEWLINE\n\n # I want to change dedents into indents, because they seem to screw nesting up\n if self.current.tokenum == DEDENT:\n self.current.tokenum, self.current.value = self.convert_dedent()\n\n if (\n self.after_space\n and not self.is_space\n and (not self.in_container or self.just_started_container)\n ):\n # Record current indentation level\n if not self.indent_amounts or self.current.scol > self.indent_amounts[-1]:\n self.indent_amounts.append(self.current.scol)\n\n # Adjust indent as necessary\n while self.adjust_indent_at:\n self.result[self.adjust_indent_at.pop()] = (\n INDENT,\n self.indent_type * (self.current.scol - self.groups.level),\n )\n\n # Roll back groups as necessary\n if not self.is_space and not self.in_container:\n while not self.groups.root and self.groups.level >= self.current.scol:\n self.finish_hanging()\n self.groups = self.groups.parent\n\n # Reset indentation to deal with nesting\n if self.current.tokenum == INDENT and not self.groups.root:\n self.current.value = self.current.value[self.groups.level :]",
"def test_incorrect_indent(self, x=1, y=2): # noqa: D207, D213, D407",
"def indentation(self) -> str:\n return self._indent",
"def line_indentation(line):\n line = line.replace(\"\\t\", \" \" * 8)\n return len(line) - len(line.lstrip())",
"def get_indent(self, s):\n return len(s) - len(s.lstrip())",
"def _indent(text):\n prefix = ' ' * 4\n\n def prefixed_lines():\n for line in text.splitlines(True):\n yield (prefix + line if line.strip() else line)\n\n return ''.join(prefixed_lines())",
"def printIndent(s,lvl) :\n for line in s.split('\\n') :\n print('%s%s' % (' '*lvl,line))",
"def indent_code(self, code):\n\n if isinstance(code, string_types):\n code_lines = self.indent_code(code.splitlines(True))\n return ''.join(code_lines)\n\n tab = \" \"\n inc_token = ('{', '(', '{\\n', '(\\n')\n dec_token = ('}', ')')\n\n code = [ line.lstrip(' \\t') for line in code ]\n\n increase = [ int(any(map(line.endswith, inc_token))) for line in code ]\n decrease = [ int(any(map(line.startswith, dec_token)))\n for line in code ]\n\n pretty = []\n level = 0\n for n, line in enumerate(code):\n if line == '' or line == '\\n':\n pretty.append(line)\n continue\n level -= decrease[n]\n pretty.append(\"%s%s\" % (tab*level, line))\n level += increase[n]\n return pretty",
"def indentation(self):\n return self.options.indentation_char * sum(self._indentation_levels)",
"def indentation(self, text):\n\n tab = text.rfind(' '*4)\n\n if tab != -1: \n if tab%4 == 0:\n if tab//4 + 1 == self.indent:\n return True\n\n else:\n self.indent = tab//4 + 1\n return False\n \n else:\n return True\n\n else:\n return True",
"def Indent(indents):\n return ' ' * (2 * indents)",
"def test_indent():\n\n multiline_string = \"\"\"test\ntest1\ntest2\ntest3\"\"\"\n\n indented_multiline_string = \"\"\" test\n test1\n test2\n test3\"\"\"\n\n assert indented_multiline_string == _indent(multiline_string, 4)",
"def reindent(text, indent):\n\n lines = textwrap.dedent(text).split('\\n')\n while lines and not lines[0].strip():\n lines.pop(0)\n while lines and not lines[-1].strip():\n lines.pop()\n return indent + ('\\n' + indent).join(lines)",
"def _indent(s, width=4, skip_first_line=False):\n lines = s.splitlines(1)\n indentstr = ' '*width\n if skip_first_line:\n return indentstr.join(lines)\n else:\n return indentstr + indentstr.join(lines)",
"def get_indent(width1: int, width2: int) -> str:\n return ' ' * (width2 - width1)",
"def test_with_default_indent(self):\n self.assertEqual(indent('foo'), ' foo')",
"def is_exactly_indented(line, indent):\n if is_blank(line):\n return False\n return get_indent(line) == indent",
"def block_indent(text, spaces=4):\n return '\\n'.join([(' ' * spaces) + l for l in pprint.pformat(text).splitlines()])"
] | [
"0.6844012",
"0.6561831",
"0.65578353",
"0.64595574",
"0.6439283",
"0.6341889",
"0.6288154",
"0.628526",
"0.6260362",
"0.62497234",
"0.6222218",
"0.62192994",
"0.62021357",
"0.6173746",
"0.6167052",
"0.6164297",
"0.6146771",
"0.6128034",
"0.60974324",
"0.6087453",
"0.60744375",
"0.60639626",
"0.60292524",
"0.602754",
"0.6013943",
"0.59613067",
"0.59443474",
"0.5943707",
"0.5924094",
"0.58903843"
] | 0.714444 | 0 |
Loads the image from disk, returns image with axis in the natural order (deep last). Data format should be numpy.uint8. Time complexity goes square if numpy.uint16, especially for segmentation. | def load(self, path, shape=(1024, 1024, 35), dtype='uint16'):
valid_dtypes = ['uint8', 'uint16']
if dtype not in valid_dtypes:
raise ValueError('dtype should be either one of %s' % ', '.join(valid_dtypes))
im = io.imread(path)
im = numpy.rollaxis(im, 0, 3)
if im.shape != shape and shape is not None:
factors = tuple(map(lambda z: int(z[0] / z[1]), zip(im.shape, shape)))
if any([f > 1 for f in factors]):
# im = resize(im, shape, mode='constant')
im = downscale_local_mean(im, factors=factors).astype(im.dtype)
# if 'conf' in path.lower():
else:
warnings.warn('Target shape is not a multiple below initial shape')
with warnings.catch_warnings():
warnings.simplefilter('ignore')
if dtype == 'uint8' and im.dtype != numpy.uint8:
im = img_as_ubyte(im)
if dtype == 'uint16' and im.dtype != numpy.uint16:
im = img_as_uint(im)
self.image_raw = im
self.name = path | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def loader(path):\n img = np.load(path)\n img = img[1:4]\n if np.random.choice((True, False)):\n img = img[:, :, ::-1]\n img = np.array(img)\n if np.random.choice((True, False)):\n img = img[:, ::-1, :]\n img = np.array(img)\n\n img = img.transpose((1, 2, 0)) # pytorch is going to rotate it back\n return img",
"def load(path, axis=(1, 2, 0), n_jobs=12):\n files = os.listdir(path)\n files.sort()\n\n files = [fn for fn in files if not os.path.basename(fn).endswith('.db')]\n\n # Exclude extra files\n newlist = []\n for file in files:\n if file.endswith('.png') or file.endswith('.bmp') or file.endswith('.tif'):\n try:\n int(file[-7:-4])\n newlist.append(file)\n except ValueError:\n continue\n\n files = newlist[:] # replace list\n\n # Load data and get bounding box\n data = Parallel(n_jobs=n_jobs)(delayed(read_image)(path, file) for file in tqdm(files, 'Loading'))\n if axis != (0, 1, 2):\n return np.transpose(np.array(data), axis)\n\n return np.array(data)",
"def image_load(path) -> numpy.ndarray:\n # file\n na = numpy.array(Image.open(path))\n # fix shape\n na = numpy.moveaxis(na, [2,0,1], [0,1,2])\n # shape is now (3,h,w), add 1\n na = na.reshape(1,3,na.shape[1],na.shape[2])\n # change type\n na = na.astype(\"float32\") / 255.0\n return na",
"def _load_disk(self):\r\n s = self.file_string + ' '\r\n im = Image.open(self.file_string)\r\n\r\n self.ix, self.iy = im.size\r\n s += '(%s)' % im.mode\r\n self.alpha = (im.mode == 'RGBA' or im.mode == 'LA')\r\n\r\n if self.mipmap:\r\n resize_type = Image.BICUBIC\r\n else:\r\n resize_type = Image.NEAREST\r\n\r\n # work out if sizes > MAX_SIZE or coerce to golden values in WIDTHS\r\n if self.iy > self.ix and self.iy > MAX_SIZE: # fairly rare circumstance\r\n im = im.resize((int((MAX_SIZE * self.ix) / self.iy), MAX_SIZE))\r\n self.ix, self.iy = im.size\r\n n = len(WIDTHS)\r\n for i in xrange(n-1, 0, -1):\r\n if self.ix == WIDTHS[i]:\r\n break # no need to resize as already a golden size\r\n if self.ix > WIDTHS[i]:\r\n im = im.resize((WIDTHS[i], int((WIDTHS[i] * self.iy) / self.ix)),\r\n resize_type)\r\n self.ix, self.iy = im.size\r\n break\r\n\r\n if VERBOSE:\r\n print('Loading ...{}'.format(s))\r\n\r\n if self.flip:\r\n im = im.transpose(Image.FLIP_TOP_BOTTOM)\r\n\r\n RGBs = 'RGBA' if self.alpha else 'RGB'\r\n self.image = im.convert(RGBs).tostring('raw', RGBs)\r\n self._tex = ctypes.c_int()\r\n if 'fonts/' in self.file_string:\r\n self.im = im",
"def load(self, file, lazy=True):\n # individual files for each slice\n # we got one file, nice!\n \n if not lazy:\n\n if file in self.imagedict.keys():\n return self.imagedict[file]\n else:\n self.imagedict[file] = self.load(file, True)\n self.imagedict[file] *= 1\n return self.imagedict[file]\n \n else:\n \n ending = splitext(file)[-1].lower()\n if ending in ['.nii', '.hdr', '.nii.gz', '.gz']:\n if self.correct_orientation:\n vol = ni.open_image(file, verbose=False)\n self.affine = vol.get_aligned_transformation(\"RAS\")\n data = vol.aligned_volume\n else:\n f = nib.load(file)\n self.affine = f.affine\n self.pixdim = np.asarray(f.header['pixdim'][1:])\n data = f.get_data()\n return data\n # elif ending in ['.nrrd', '.nhdr']:\n # if self.correct_orientation:\n # vol = nr.open_image(file, verbose=False)\n # self.affine = vol.get_aligned_transformation(\"RAS\")\n # f = vol.aligned_volume\n # else:\n # try:\n # f, h = nrrd.read(file)\n # except:\n # print('could not read file {}'.format(file))\n # logging.getLogger('data').error('could not read file {}'.format(file))\n # raise Exception('could not read file {}'.format(file))\n # self.affine = np.eye(4)\n # return f\n # elif ending in ['.dcm']:\n # f = pydicom.dcmread(file).pixel_array\n # return f\n # elif ending in ['.mha', '.mhd']:\n # f = skio.imread(file, plugin='simpleitk')\n # self.affine = np.eye(4)\n # return f\n elif ending in ['.png', '.pgm', '.pnm']:\n data = imread(file)\n if len(data.shape) > 2:\n return np.transpose(data, [2, 0, 1])\n else:\n return data\n return imread(file)\n else:\n raise Exception('{} not known'.format(ending))",
"def _read_datafile(self,path):\n \tlabels, images = [], []\n \twith gzip.GzipFile(path) as f:\n \t for line in f:\n \t vals = line.strip().split()\n \t labels.append(float(vals[0]))\n \t images.append([float(val) for val in vals[1:]])\n \tlabels = np.array(labels, dtype=np.int32)\n \tlabels[labels == 10] = 0 # fix weird 0 labels\n \timages = np.array(images, dtype=np.float32).reshape(-1, 16, 16, 1)\n \timages = (images + 1) / 2\n \treturn images, labels",
"def load(image_path):\n\tpil_image = Image.open(image_path).convert(\"RGB\")\n\t# convert to BGR format\n\timage = np.array(pil_image)[:, :, [2, 1, 0]]\n\treturn image",
"def load_image(file_name):\n if not osp.exists(file_name):\n print('{} not exist'.format(file_name))\n return\n image = np.asarray(io.imread(file_name))\n if len(image.shape)==3 and image.shape[2]>3:\n image = image[:, :, :3]\n # print(image.shape) #should be (x, x, 3)\n return image",
"def _load(self) -> np.ndarray:\n with self._fs.open(self._filepath, mode=\"r\") as f:\n image = Image.open(f).convert(\"RGBA\")\n return np.asarray(image)",
"def load_nifti(file_path, dtype=np.float32, incl_header=False, z_factor=None, mask=None):\n \n img = nib.load(file_path)\n struct_arr = img.get_data().astype(dtype)\n \n # replace infinite values with 0\n if np.inf in struct_arr:\n struct_arr[struct_arr == np.inf] = 0.\n \n # replace NaN values with 0 \n if np.isnan(struct_arr).any() == True:\n struct_arr[np.isnan(struct_arr)] = 0.\n \n if mask is not None:\n struct_arr *= mask\n \n if z_factor is not None:\n struct_arr = zoom(struct_arr, z_factor)\n \n if incl_header:\n return struct_arr, img\n else:\n return struct_arr",
"def load_images(mraw, h, w, N, bit=16, roll_axis=True):\n\n if int(bit) == 16:\n images = np.memmap(mraw, dtype=np.uint16, mode='r', shape=(N, h, w))\n elif int(bit) == 8:\n images = np.memmap(mraw, dtype=np.uint8, mode='r', shape=(N, h, w))\n elif int(bit) == 12:\n warnings.warn(\"12bit images will be loaded into memory!\")\n #images = _read_uint12_video(mraw, (N, h, w))\n images = _read_uint12_video_prec(mraw, (N, h, w))\n else:\n raise Exception(f\"Unsupported bit depth: {bit}\")\n\n\n #images=np.fromfile(mraw, dtype=np.uint16, count=h * w * N).reshape(N, h, w) # about a 1/3 slower than memmap when loading to RAM. Also memmap doesn't need to read to RAM but can read from disc when needed.\n if roll_axis:\n return np.rollaxis(images, 0, 3)\n else:\n return images",
"def _load_image(self, id_: str) -> Tensor:\n filename = os.path.join(self.root, \"output\", id_ + \".jpg\")\n with Image.open(filename) as img:\n array = np.array(img)\n tensor: Tensor = torch.from_numpy(array) # type: ignore[attr-defined]\n # Convert from HxWxC to CxHxW\n tensor = tensor.permute((2, 0, 1))\n return tensor",
"def read_image(path):\n img = ndimage.imread(path, mode=\"RGB\") \n return img",
"def load_and_process_image(self, im_path):\n image = Image.open(im_path).convert('RGB')\n image = transforms.ToTensor()(image)\n image = 2 * image - 1\n return image",
"def load_data(path,size, scale = True):\n images = os.listdir(path)\n images.sort()\n\n X = []\n for i, img in enumerate(images):\n photo = plt.imread(os.path.join(path,img))\n if size:\n photo = tf.image.resize(photo, (size, size))\n X.append(photo)\n \n X = np.array(X)\n if scale:\n X = X/X.max() \n return X",
"def imread(filename):\n return np.asarray(Image.open(filename), dtype=np.uint8)[..., :3]",
"def load(path):\n print(\"path\", path)\n print(Path(path).is_file())\n if Path(path).is_file():\n img = image.imread(path)\n print(f\"Loading image of dimensions {img.shape[0]} x \"\n f\"{img.shape[1]}\")\n return np.array(img)\n raise FileNotFoundError",
"def load_idx(path: str) -> np.ndarray:\n open_fcn = gzip.open if path.endswith('.gz') else open\n with open_fcn(path, 'rb') as f:\n return _load_uint8(f)",
"def load_volume(name, nx, ny, nz):\n\n # load raw volume into memory\n img = np.fromfile(name, dtype=np.float32)\n img = np.reshape(img, (ny, nx, nz))\n\n return img.transpose(0, 2, 1)",
"def read_image(self, item):\n assert item['image_dtype'] == 'uint16'\n\n filename = os.path.join(self.home(item['basename']))\n s = open(filename, 'rb').read()\n assert hashlib.md5(s).hexdigest() == item['md5']\n img = np.fromstring(s, dtype=item['image_dtype']).byteswap()\n img = img.reshape(item['image_shape'])\n return img",
"def read_image_file(file_name):\n return torch.from_numpy(np.asarray(Image.open(file_name).convert('L')))",
"def read_img(img_path):\n img_list=[]\n print('image loading...')\n for _,_,files in os.walk(img_path):\n for f in files:\n if f.find('.dcm')>=0:\n tmp_img=dicom.dcmread(os.path.join(img_path,f))\n tmp_img=tmp_img.pixel_array#[0::2,0::2]\n img_list.append(tmp_img)\n img_data=np.array(img_list)\n print('done')\n return img_data",
"def read_image(images_root):\n im_array = np.load(images_root)\n return im_array",
"def load_image(filename, color=True):\n img = skimage.img_as_float(skimage.io.imread(filename, as_grey=not color)).astype(np.float32)\n if img.ndim == 2:\n img = img[:, :, np.newaxis]\n if color:\n img = np.tile(img, (1, 1, 3))\n elif img.shape[2] == 4:\n img = img[:, :, :3]\n return img",
"def load_full_im(self, im_name):\n # return np.genfromtxt(im_name, delimiter=self.delim)#[:,1:] # first column gives column number\n try: \n return np.loadtxt(im_name, delimiter=self.delim,\n usecols=range(1,self.pic_width+1))\n except IndexError as e:\n error('Image analysis failed to load image '+im_name+'\\n'+str(e))\n return np.zeros((self.pic_width, self.pic_height))",
"def _load_image(self, filename):\n\n path = filename.split(\"/\")\n image_id = path[len(self.directory.split(\"/\")) - 1]\n\n try:\n img = imread(filename)[:, :, :self.num_channels]\n except IndexError:\n tmp = imread(filename)\n img = np.stack([tmp] * 3).transpose(1, 2, 0)\n orig_shape = img.shape[:2]\n img = self._process(img)\n\n masks = np.zeros(self.imsize)\n\n # Load training labels if we're loading a training dataset\n if self.train:\n masks = self._load_mask(image_id)\n\n return (img, masks, image_id, orig_shape)",
"def load(filename):\n img = image.load_img(filename, target_size=(299, 299))\n np_image = image.img_to_array(img)\n np_image = np.array(np_image).astype('float32')/255\n\n # Make to a rank 4 tensor (1, 299, 299, 3) -> 1 is for the batch size\n np_image = np.expand_dims(np_image, axis=0)\n\n return np_image",
"def imgRead(filename: str, representation: int) -> np.ndarray:\r\n if representation==LOAD_GRAY_SCALE:\r\n img = cv2.imread(filename,0)\r\n else:\r\n img = cv2.imread(filename)\r\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\r\n return img.astype('uint8')",
"def read_img(path):\n img = Image.open(path)\n img_arr = np.array(img, dtype='int32')\n img.close()\n return img_arr",
"def load_data(path, mode='train'):\n\n labels_path = os.path.join(path, f'{mode}-labels-idx1-ubyte.gz')\n images_path = os.path.join(path, f'{mode}-images-idx3-ubyte.gz')\n\n with gzip.open(labels_path, 'rb') as lbpath:\n labels = np.frombuffer(lbpath.read(), dtype=np.uint8, offset=8)\n\n with gzip.open(images_path, 'rb') as imgpath:\n images = np.frombuffer(imgpath.read(), dtype=np.uint8, offset=16).reshape(len(labels), 784)\n\n normalized_images = normalize_data(images)\n one_hot_labels = one_hot_encoding(labels, num_classes=10)\n\n return normalized_images, one_hot_labels"
] | [
"0.6431803",
"0.6287837",
"0.62287796",
"0.619413",
"0.6191195",
"0.61317974",
"0.6061726",
"0.5990384",
"0.59458464",
"0.5924528",
"0.58976215",
"0.5853149",
"0.5837823",
"0.58332664",
"0.58236",
"0.5814487",
"0.581173",
"0.581148",
"0.5806939",
"0.5799971",
"0.5781366",
"0.57706827",
"0.5768538",
"0.5750928",
"0.57467544",
"0.57440454",
"0.57372624",
"0.5722539",
"0.57218444",
"0.5720536"
] | 0.65080833 | 0 |
Filters by first convolving the background with a gaussian filter. Then substract the obtained image to the origin and finally refilter with another Gaussian filter with a variance 10 times smaller. Variance specified in utils module. | def filter(self, op=GaussianFilter):
if self._verbose > 0:
print("Filtering...")
# Import from utils specified params.
params = get_filtering_params()
negative = self.image_raw - op(sigma=params['sigma_bgd']).convolve(self.image_raw)
self.image_filtered = op(sigma=params['sigma_spots']).convolve(negative) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def smooth_gauss(image, variance=2, kernel_size=(9, 9)):\n return cv2.GaussianBlur(image, kernel_size, variance)",
"def differenceOfGausssians(image,sigma0, sigma1,window_size, roi, out = None):\n return (vigra.filters.gaussianSmoothing(image,sigma0,window_size=window_size,roi = roi)-vigra.filters.gaussianSmoothing(image,sigma1,window_size=window_size,roi = roi))",
"def apply_filter(self, image):\n gauss_low = cv2.GaussianBlur(image, ksize=(0,0), sigmaX=self._sigma_low , sigmaY=self._sigma_low)\n gauss_high = cv2.GaussianBlur(image, ksize=(0,0), sigmaX=self._sigma_high, sigmaY=self._sigma_high)\n\n filtered_image = gauss_low - gauss_high\n\n return normalize(filtered_image, nb_bits=8)",
"def __gaussian_blur(self, img, kernel_size=3):\n return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)",
"def gaussianBlur(img,ksize=(5,5),sigma=10):\n #kernel = cv2.getGaussianKernel(ksize,sigma)\n dst = np.zeros_like(img)\n cv2.GaussianBlur(src=img,dst=dst,ksize=ksize,sigmaX=0)\n return dst",
"def gaussian_blur(img, kernel=(3, 3)):\n out = cv2.GaussianBlur(img, kernel, 0)\n return out",
"def run_gaussian_smoothing(image, kernel_size=5):\n return cv2.GaussianBlur(image, (kernel_size, kernel_size), 0)",
"def gaussianBlurring(frame):\n return cv2.GaussianBlur(frame, ksize =(11, 11), sigmaX = 0)",
"def preprocess(image, resize=rsz_default, kernel=kernel_size, sigma=0):\n\timage = to_uint8(image)\n\timage_small = cv2.resize(image, (0,0), fx=resize, fy=resize)\n\timage_gblur = cv2.GaussianBlur(image_small, kernel, sigma)\n\treturn image_gblur",
"def gaussian_blur(self,img):\n return cv2.GaussianBlur(img, (self.kernel_size, self.kernel_size), 0)",
"def gaussian_blurring(self,input_image,kernel_size,sigma):\n #Applying Gaussian Blur filter\n output_image=cv2.GaussianBlur(input_image,kernel_size,sigma)\n return output_image",
"def gs_blur(self,k,img):\n SIG = self.sigma\n sig = [SIG,k*SIG,k*k*SIG,k*k*k*SIG,k*k*k*k*SIG]\n gsArray = [0,1,2,3,4]\n scaleImages = [0,1,2,3,4]\n \n for i in range(5):\n gsArray[i] = scipy.ndimage.filters.gaussian_filter(img,sig[i])\n\n return gsArray",
"def dynamic_masking(image):\n image = img_as_float(image)\n background = gaussian_filter(median_filter(image,3),1)\n image[background > threshold_otsu(background)/5.0] = 0.0\n \n return image",
"def backgroundEstimator(self, image):\n return self.bg_filter.convolve(image)",
"def gaussian_blur(self, img):\n kernel_size = self.gaussian_blur_params[\"kernel_size\"]\n return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)",
"def fake_gaussian(img, vertical_horizontal_sigma, iter=3):\n sigma_vertical, sigma_horizontal = vertical_horizontal_sigma\n h_blured = box_filter1d(img, sigma_horizontal, horizontal=True, iter=iter)\n blured = box_filter1d(h_blured, sigma_vertical, horizontal=False, iter=iter)\n return blured",
"def apply_smoothing(image, kernel_size=3):\n return cv2.GaussianBlur(image, (kernel_size, kernel_size), 0)",
"def blur_ground(X):\n return img_conv(X, kernel_blur)",
"def Gauss_filter(data, sigma=(0,2,2), mode='wrap'): \n import scipy.ndimage.filters as flt\n return flt.gaussian_filter(data, sigma=sigma, mode=mode)",
"def test_gaussian_filter():\n\n def rgb2gray(rgb):\n r, g, b = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]\n gray = 0.2989 * r + 0.5870 * g + 0.1140 * b\n\n return gray\n\n img = rgb2gray(np.array(Image.open('data/graf.png')))\n gx, x = gauss_module.gauss(4)\n gx = gx.reshape(1, gx.shape[0])\n gy = gx.reshape(gx.shape[1], gx.shape[0])\n smooth_img = conv2(img, gx * np.array(gy))\n\n test_smooth_img = gauss_module.gaussianfilter(img, 4)\n\n assert np.all(smooth_img.round(5) == test_smooth_img.round(5))",
"def gaussian_blur(img, size, iterations=1):\n for x in range(iterations):\n img = cv2.GaussianBlur(img, (size, size), 0)\n return img",
"def gaussian_blur(source : Image, destination : Image = None, sigma_x : float = 0, sigma_y : float = 0, sigma_z : float = 0) -> Image:\n\n\n kernel_size_x = sigma_to_kernel_size(sigma_x)\n kernel_size_y = sigma_to_kernel_size(sigma_y)\n kernel_size_z = sigma_to_kernel_size(sigma_z)\n\n execute_separable_kernel(\n source,\n destination,\n __file__,\n '../clij-opencl-kernels/kernels/gaussian_blur_separable_' + str(len(destination.shape)) + 'd_x.cl',\n 'gaussian_blur_separable_' + str(len(destination.shape)) + 'd',\n kernel_size_x,\n kernel_size_y,\n kernel_size_z,\n sigma_x,\n sigma_y,\n sigma_z,\n len(destination.shape)\n )\n\n return destination",
"def gaussian_blur(img, kernel_size):\n return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)",
"def gaussian_blur(img, kernel_size):\n return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)",
"def gaussian_blur(img, kernel_size):\n return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)",
"def gaussian_blur(img, kernel_size):\n return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)",
"def gaussian_blur(img, kernel_size):\n return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)",
"def gaussian_blur(img, kernel_size):\n return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)",
"def gaussian_blur(img, kernel_size):\n return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)",
"def gaussian_blur(img, kernel_size):\n return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)"
] | [
"0.69585425",
"0.65215564",
"0.64743555",
"0.630799",
"0.62878084",
"0.6255153",
"0.6228166",
"0.6222164",
"0.62092054",
"0.62084013",
"0.62078834",
"0.6186285",
"0.61779565",
"0.61562985",
"0.6127383",
"0.6112547",
"0.6106225",
"0.606717",
"0.6064921",
"0.60606796",
"0.602015",
"0.60129267",
"0.6010744",
"0.6010744",
"0.6010744",
"0.6010744",
"0.6010744",
"0.6010744",
"0.6010744",
"0.6010744"
] | 0.70931846 | 0 |
DEPRECATED, replaced by detect_and_fit for simplicity and speed issues. Detect spots with a specified detector (from the spotdetector.py module) and the detection params from utils module. Spots are identified by their position, i.e. 'x.y.z'. | def _detect_spots(self, detector=LocalMax, **kwargs):
if self._verbose > 0:
print("Detecting...", end="")
spots = detector(**kwargs).locate(self.image_filtered)
# Spots are identified by their position:
self.spots = [Spot(tuple(s)) for s in spots]
if self._verbose > 0:
print('%i spots detected.' % len(self.spots)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def spot_detection(data, roi_size=6, blobs=None, processes=None, **kwargs):\n\n if blobs is None:\n blobs = blob_detection(data, **kwargs)\n\n if processes is not None and processes > 1:\n with Pool(processes) as pool:\n spots = pool.map(functools.partial(__spot_detection, data=data, roi_size=roi_size), blobs)\n spots = [spot for spot in spots if not isinstance(spot, LoggingMessage)]\n else:\n spots = []\n for blob in blobs:\n spot = __spot_detection(blob, data, roi_size)\n if isinstance(spot, LoggingMessage):\n _log.log(spot.level, spot.message)\n else:\n spots.append(spot)\n\n _log.info('{} spot(s) were detected'.format(len(spots)))\n spots = numpy.array(spots)\n return spots",
"def object_detector(detector, img_location: str, num_detection=5 ) -> list:\n img = PIL.Image.open(img_location)\n img = np.array(img)\n img = tf.expand_dims(img, axis=0)\n result = detector(img)\n\n ret = []\n\n for i in range(num_detection):\n detection_class_number = int(result['detection_classes'].numpy()[0][i])\n detection_class_name = CLASSES_90[detection_class_number]\n\n detection_score = result['detection_scores'].numpy()[0][i]\n rounded_detection_score = round(float(detection_score), 2)\n\n # Append as a tuple\n ret.append( (detection_class_name, rounded_detection_score) )\n\n return ret",
"def fit_spots(self, spot_model=Mixture, kind='individual'):\n\n model = spot_model()\n # print(model)\n\n # if model.kind == 'individual':\n #\n # loop = self.spots\n #\n # # to_delete = []\n # if self._verbose > 0:\n # loop = tqdm.tqdm(loop, desc=\"Fitting spot models...\")\n #\n # to_delete = []\n # for k in loop:\n # spot = self.image_filtered[extract_cube(point=k.coordinates, side=get_focus_size())]\n # centers = [get_focus_size() // 2, ] * 3\n # results = model.fit(centers=centers, data=spot)\n #\n # # Filter spots for which a model could not be fit.\n # if results:\n # model.params = list(k.coordinates) + list(model.params)\n # k.model = model\n # else:\n # to_delete.append(k)\n #\n # # Filter spots and store in dict\n # self.spots = [k for k in self.spots if k not in to_delete]\n #\n # self.mixture_model = lambda x, y, z: sum([s.model.function(*s.model.params)(x, y, z) for s in self.spots])\n\n if kind == 'collective':\n mask = numpy.zeros(self.image_filtered.shape)\n for s in self.spots:\n mask[ellipse_in_shape(mask.shape, s.coordinates, (10, 10, 5))] = 1\n mask = mask.astype(bool)\n results = model.fit(centers=[s.coordinates for s in self.spots], data=self.image_filtered, mask=mask)\n\n if results:\n params = model.params.reshape(-1, 4)\n for s, p in zip(self.spots, params):\n s.model = Gaussian()\n s.model.params = p\n print(model.params)\n centers = [s.coordinates for s in self.spots]\n backgrounds = [[0], ] * len(self.spots)\n print(centers)\n print(backgrounds)\n self.mixture_model = model.function\n\n if self._verbose > 0:\n time.sleep(0.1)\n print('%i spots fit.' % len(self.spots))",
"def detect_spots(images, threshold=None, remove_duplicate=True,\n return_threshold=False, voxel_size_z=None, voxel_size_yx=100,\n psf_z=None, psf_yx=200):\n # check parameters\n stack.check_parameter(threshold=(float, int, type(None)),\n remove_duplicate=bool,\n return_threshold=bool,\n voxel_size_z=(int, float, type(None)),\n voxel_size_yx=(int, float),\n psf_z=(int, float, type(None)),\n psf_yx=(int, float))\n\n # if one image is provided we enlist it\n if not isinstance(images, list):\n stack.check_array(images,\n ndim=[2, 3],\n dtype=[np.uint8, np.uint16,\n np.float32, np.float64])\n ndim = images.ndim\n images = [images]\n is_list = False\n else:\n ndim = None\n for i, image in enumerate(images):\n stack.check_array(image,\n ndim=[2, 3],\n dtype=[np.uint8, np.uint16,\n np.float32, np.float64])\n if i == 0:\n ndim = image.ndim\n else:\n if ndim != image.ndim:\n raise ValueError(\"Provided images should have the same \"\n \"number of dimensions.\")\n is_list = True\n\n # check consistency between parameters\n if ndim == 3 and voxel_size_z is None:\n raise ValueError(\"Provided images has {0} dimensions but \"\n \"'voxel_size_z' parameter is missing.\".format(ndim))\n if ndim == 3 and psf_z is None:\n raise ValueError(\"Provided images has {0} dimensions but \"\n \"'psf_z' parameter is missing.\".format(ndim))\n if ndim == 2:\n voxel_size_z = None\n psf_z = None\n\n # detect spots\n if return_threshold:\n spots, threshold = _detect_spots_from_images(\n images,\n threshold=threshold,\n remove_duplicate=remove_duplicate,\n return_threshold=return_threshold,\n voxel_size_z=voxel_size_z,\n voxel_size_yx=voxel_size_yx,\n psf_z=psf_z,\n psf_yx=psf_yx)\n else:\n spots = _detect_spots_from_images(\n images,\n threshold=threshold,\n remove_duplicate=remove_duplicate,\n return_threshold=return_threshold,\n voxel_size_z=voxel_size_z,\n voxel_size_yx=voxel_size_yx,\n psf_z=psf_z,\n psf_yx=psf_yx)\n\n # format results\n if not is_list:\n spots = spots[0]\n\n # return threshold or not\n if return_threshold:\n return spots, threshold\n else:\n return spots",
"def _detect_spots_from_images(images, threshold=None, remove_duplicate=True,\n return_threshold=False, voxel_size_z=None,\n voxel_size_yx=100, psf_z=None, psf_yx=200):\n # initialization\n sigma = stack.get_sigma(voxel_size_z, voxel_size_yx, psf_z, psf_yx)\n n = len(images)\n\n # apply LoG filter and find local maximum\n images_filtered = []\n pixel_values = []\n masks = []\n for image in images:\n # filter image\n image_filtered = stack.log_filter(image, sigma)\n images_filtered.append(image_filtered)\n\n # get pixels value\n pixel_values += list(image_filtered.ravel())\n\n # find local maximum\n mask_local_max = local_maximum_detection(image_filtered, sigma)\n masks.append(mask_local_max)\n\n # get optimal threshold if necessary based on all the images\n if threshold is None:\n\n # get threshold values we want to test\n thresholds = _get_candidate_thresholds(pixel_values)\n\n # get spots count and its logarithm\n all_value_spots = []\n minimum_threshold = float(thresholds[0])\n for i in range(n):\n image_filtered = images_filtered[i]\n mask_local_max = masks[i]\n spots, mask_spots = spots_thresholding(\n image_filtered, mask_local_max,\n threshold=minimum_threshold,\n remove_duplicate=False)\n value_spots = image_filtered[mask_spots]\n all_value_spots.append(value_spots)\n all_value_spots = np.concatenate(all_value_spots)\n thresholds, count_spots = _get_spot_counts(thresholds, all_value_spots)\n\n # select threshold where the kink of the distribution is located\n if count_spots.size > 0:\n threshold, _, _ = _get_breaking_point(thresholds, count_spots)\n\n # detect spots\n all_spots = []\n for i in range(n):\n\n # get images and masks\n image_filtered = images_filtered[i]\n mask_local_max = masks[i]\n\n # detection\n spots, _ = spots_thresholding(image_filtered, mask_local_max,\n threshold, remove_duplicate)\n all_spots.append(spots)\n\n # return threshold or not\n if return_threshold:\n return all_spots, threshold\n else:\n return all_spots",
"def process_detections(tracker, detections, nms_max_overlap, frame):\r\n #initialize color map\r\n cmap = plt.get_cmap('tab20b')\r\n colors = [cmap(i)[:3] for i in np.linspace(0, 1, 20)]\r\n\r\n # run non-maxima supression\r\n boxs = np.array([d.tlwh for d in detections])\r\n scores = np.array([d.confidence for d in detections])\r\n classes = np.array([d.class_name for d in detections])\r\n indices = preprocessing.non_max_suppression(boxs, classes, nms_max_overlap, scores)\r\n detections = [detections[i] for i in indices] \r\n\r\n # Call the tracker\r\n tracker.predict()\r\n tracker.update(detections)\r\n\r\n # update tracks\r\n for track in tracker.tracks:\r\n if not track.is_confirmed() or track.time_since_update > 1:\r\n continue \r\n bbox = track.to_tlbr()\r\n class_name = track.get_class()\r\n \r\n # draw bbox on screen\r\n color = colors[int(track.track_id) % len(colors)]\r\n color = [i * 255 for i in color]\r\n cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), color, 1)\r\n cv2.rectangle(frame, (int(bbox[0]), int(bbox[1]-30)), \r\n (int(bbox[0])+(len(class_name)+len(str(track.track_id)))*17, int(bbox[1])), color, -1)\r\n cv2.putText(frame, class_name + \"-\" + str(track.track_id),(int(bbox[0]), \r\n int(bbox[1]-10)),0, 0.5, (255,255,255), 1)\r\n\r\n # if enable info flag then print details about each track\r\n if FLAGS.info:\r\n print(\"Tracker ID: {}, Class: {}, BBox Coords (xmin, ymin, xmax, ymax): {}\".format(str(track.track_id), \r\n class_name, (int(bbox[0]), int(bbox[1]), int(bbox[2]), int(bbox[3]))))\r\n return frame",
"def detector(videoframe, facedetection, maskdetection):\n (h, w) = videoframe.shape[:2]\n blobimage = cv2.dnn.blobFromImage(videoframe, 1.0, (224, 224), (104.0, 177.0, 123.0))\n\n facedetection.setInput(blobimage)\n ffinding = facedetection.forward()\n\n face_list = []\n locations = []\n predictions = []\n\n for i in range(0, ffinding.shape[2]):\n credence = ffinding[0, 0, i, 2]\n if credence > 0.6:\n case = ffinding[0, 0, i, 3:7] * np.array([w, h, w, h])\n (x_start, y_start, x_end, y_end) = case.astype(\"int\")\n (x_start, y_start) = (max(0, x_start), max(0, y_start))\n (x_end, y_end) = (min(w - 1, x_end), min(h - 1, y_end))\n\n image = videoframe[y_start:y_end, x_start:x_end]\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n image = cv2.resize(image, (224, 224))\n image = img_to_array(image)\n image = preprocess_input(image)\n face_list.append(image)\n locations.append((x_start, y_start, x_end, y_end))\n\n if len(face_list) > 0:\n face_list = np.array(face_list, dtype=\"float32\")\n predictions = maskdetection.predict(face_list, batch_size=32)\n return (locations, predictions)",
"def detectSpots(img, detectSpotsParameter = None, correctIlluminationParameter = None, removeBackgroundParameter = None,\n filterDoGParameter = None, findExtendedMaximaParameter = None, detectCellShapeParameter = None,\n verbose = False, out = sys.stdout, **parameter):\n\n timer = Timer();\n \n # normalize data -> to check\n #img = img.astype('float');\n #dmax = 0.075 * 65535;\n #ids = img > dmax;\n #img[ids] = dmax;\n #img /= dmax; \n #out.write(timer.elapsedTime(head = 'Normalization'));\n #img = dataset[600:1000,1600:1800,800:830];\n #img = dataset[600:1000,:,800:830];\n \n # correct illumination\n correctIlluminationParameter = getParameter(detectSpotsParameter, \"correctIlluminationParameter\", correctIlluminationParameter);\n img1 = img.copy();\n img1 = correctIllumination(img1, correctIlluminationParameter = correctIlluminationParameter, verbose = verbose, out = out, **parameter) \n\n # background subtraction in each slice\n #img2 = img.copy();\n removeBackgroundParameter = getParameter(detectSpotsParameter, \"removeBackgroundParameter\", removeBackgroundParameter);\n img2 = removeBackground(img1, removeBackgroundParameter = removeBackgroundParameter, verbose = verbose, out = out, **parameter) \n \n # mask\n #timer.reset();\n #if mask == None: #explicit mask\n # mask = img > 0.01;\n # mask = binary_opening(mask, self.structureELement('Disk', (3,3,3)));\n #img[img < 0.01] = 0; # masking in place # extended maxima\n #out.write(timer.elapsedTime(head = 'Mask')); \n \n #DoG filter\n filterDoGParameter = getParameter(detectSpotsParameter, \"filterDoGParameter\", filterDoGParameter);\n dogSize = getParameter(filterDoGParameter, \"size\", None);\n #img3 = img2.copy(); \n img3 = filterDoG(img2, filterDoGParameter = filterDoGParameter, verbose = verbose, out = out, **parameter);\n \n # normalize \n # imax = img.max();\n # if imax == 0:\n # imax = 1;\n # img /= imax;\n \n # extended maxima\n findExtendedMaximaParameter = getParameter(detectSpotsParameter, \"findExtendedMaximaParameter\", findExtendedMaximaParameter);\n hMax = getParameter(findExtendedMaximaParameter, \"hMax\", None);\n imgmax = findExtendedMaxima(img3, findExtendedMaximaParameter = findExtendedMaximaParameter, verbose = verbose, out = out, **parameter);\n \n #center of maxima\n if not hMax is None:\n centers = findCenterOfMaxima(img, imgmax, verbose = verbose, out = out, **parameter);\n else:\n centers = findPixelCoordinates(imgmax, verbose = verbose, out = out, **parameter);\n \n #cell size detection\n detectCellShapeParameter = getParameter(detectSpotsParameter, \"detectCellShapeParameter\", detectCellShapeParameter);\n cellShapeThreshold = getParameter(detectCellShapeParameter, \"threshold\", None);\n if not cellShapeThreshold is None:\n \n # cell shape via watershed\n imgshape = detectCellShape(img2, centers, detectCellShapeParameter = detectCellShapeParameter, verbose = verbose, out = out, **parameter);\n \n #size of cells \n csize = findCellSize(imgshape, maxLabel = centers.shape[0], out = out, **parameter);\n \n #intensity of cells\n cintensity = findCellIntensity(img, imgshape, maxLabel = centers.shape[0], verbose = verbose, out = out, **parameter);\n\n #intensity of cells in background image\n cintensity2 = findCellIntensity(img2, imgshape, maxLabel = centers.shape[0], verbose = verbose, out = out, **parameter);\n \n #intensity of cells in dog filtered image\n if dogSize is None:\n cintensity3 = cintensity2;\n else:\n cintensity3 = findCellIntensity(img3, imgshape, maxLabel = centers.shape[0], verbose = verbose, out = out, **parameter);\n \n if verbose:\n out.write(timer.elapsedTime(head = 'Spot Detection') + '\\n');\n \n #remove cell;s of size 0\n idz = csize > 0;\n \n return ( centers[idz], numpy.vstack((cintensity[idz], cintensity3[idz], cintensity2[idz], csize[idz])).transpose()); \n \n \n else:\n #intensity of cells\n cintensity = findIntensity(img, centers, verbose = verbose, out = out, **parameter);\n\n #intensity of cells in background image\n cintensity2 = findIntensity(img2, centers, verbose = verbose, out = out, **parameter);\n \n #intensity of cells in dog filtered image\n if dogSize is None:\n cintensity3 = cintensity2;\n else:\n cintensity3 = findIntensity(img3, centers, verbose = verbose, out = out, **parameter);\n\n if verbose:\n out.write(timer.elapsedTime(head = 'Spot Detection') + '\\n');\n \n return ( centers, numpy.vstack((cintensity, cintensity3, cintensity2)).transpose());",
"def vis_detections(frame_path, save_path):\n global detections\n # Find the frame number.\n find_frame_num = re.compile(r'\\d+')\n frame_num = int(find_frame_num.search(f).group(0))\n frame_detections = detections[frame_num]\n\n frame = cv2.imread(frame_path)\n frame_with_detections = _draw_detections(frame, frame_detections)\n cv2.imwrite(save_path, frame_with_detections)",
"def run_detect(**kwargs):\n cmd = 'python yolov3/detect.py'\n pms_list = [\n 'image_folder', 'model_def', \n 'weights_path', 'class_path', \n 'conf_thres', 'nms_thres',\n 'batch_size', 'n_cpu', \n 'img_size', 'checkpoint_model'\n ]\n call_command(pms_list, cmd, kwargs)",
"def parse_spot_request(request):\n warnings = []\n model_domain, time_str, variables, image = split_fields(request, 4)\n spot, location_str = model_domain.split(':', 1)\n assert spot.lower() == 'spot'\n if ':' in location_str:\n model, location_str = location_str.split(':', 1)\n model = model.lower()\n else:\n model = 'gfs'\n location = parse_location(location_str)\n\n hours, time_warnings = parse_times(time_str)\n warnings.extend(time_warnings)\n\n if variables is None:\n variables = []\n else:\n variables = variables.split(',')\n variables, var_warnings = validate_variables(variables)\n warnings.extend(var_warnings)\n\n send_image = image is not None\n\n return {'type': 'spot',\n 'model': model,\n 'location': location,\n 'hours': hours,\n 'vars': variables,\n 'warnings': warnings,\n 'send-image': send_image}",
"def _get_positions(self, image):\n\t\tH, W, _ = image.shape\n\t\tpos_list = self.apply_detection(image)\n\t\tdetections = {}\n\t\thasDetection = False\n\t\tfor i, L in enumerate(pos_list):\n\t\t\ttext, coordinates = L[0], L[1]\n\t\t\tfor x, y, w, h in coordinates:\n\t\t\t\tif x < 0 or y < 0 or x + w > W or \\\n\t\t\t\t y + h > H or w <= 1 or h <= 1:\n\t\t\t\t\tcontinue\n\t\t\t\t# add the detection to the dict for tracking\n\t\t\t\tif text == 'face' or text == 'super woman':\n\t\t\t\t\tself.detection_index[self.num_detect] = (x, y, w, h, self.num_save, text, -1)\n\t\t\t\telse:\n\t\t\t\t\tself.detection_index[self.num_detect] = (x, y, w, h, self.num_save, text, -2)\n\t\t\t\tdetections[self.num_detect] = (x, y, w, h)\n\t\t\t\tself.num_detect += 1\n\t\t\t\thasDetection = True\n\t\tif hasDetection:\n\t\t\tself.detection_frames[self.num_save] = detections\n\t\tself.num_save +=1",
"def visualize_detection(self, image):\n\t\tH, W, _ = image.shape\n\t\tpos_list = self.apply_detection(image)\n\t\tdetections = {}\n\t\thasDetection = False\n\t\tfor i, L in enumerate(pos_list):\n\t\t\ttext, coordinates = L[0], L[1]\n\t\t\tCOLOR = COLORS[text]\n\t\t\tfor x, y, w, h in coordinates:\n\t\t\t\t# prune bad homography points\n\t\t\t\tif x < 0 or y < 0 or x + w > W or \\\n\t\t\t\t y + h > H or w <= 1 or h <= 1:\n\t\t\t\t\tcontinue\n\t\t\t\t# add the detection to the dict for tracking\n\t\t\t\tdetections[self.num_detect] = (x, y, w, h)\n\t\t\t\tself.detection_index[self.num_detect] = (x, y, w, h, self.num_save, text)\n\t\t\t\tself.num_detect += 1\n\t\t\t\thasDetection = True\n\t\t\t\t# if the detection is human\n\t\t\t\tif text == 'face':\n\t\t\t\t\tgender = self.genderDetect.classify(image[y:y+h, x:x+w, :])\n\t\t\t\t\tgender = 'female' if gender[0] < 0.5 else 'male'\n\t\t\t\t\tcv2.putText(image, gender, (x + w // 2 -10, y + h + 15),\n\t\t\t\t\t\tcv2.FONT_HERSHEY_SIMPLEX, 0.6, COLOR, 2, cv2.LINE_AA)\n\n\t\t\t\timage = cv2.rectangle(image, (x, y), (x + w, y + h), COLOR, 2)\n\t\t\t\tcv2.putText(image, text, (x, y - 5),\n\t\t\t\t\tcv2.FONT_HERSHEY_SIMPLEX, 0.6, COLOR, 2, cv2.LINE_AA)\n\t\tif hasDetection:\n\t\t\tself.detection_frames[self.num_save] = detections\n\t\tself.num_save +=1\n\t\treturn image",
"def spots_thresholding(image, mask_local_max, threshold,\n remove_duplicate=True):\n # check parameters\n stack.check_array(image,\n ndim=[2, 3],\n dtype=[np.uint8, np.uint16, np.float32, np.float64])\n stack.check_array(mask_local_max,\n ndim=[2, 3],\n dtype=[bool])\n stack.check_parameter(threshold=(float, int, type(None)),\n remove_duplicate=bool)\n\n if threshold is None:\n mask = np.zeros_like(image, dtype=bool)\n spots = np.array([], dtype=np.int64).reshape((0, image.ndim))\n warnings.warn(\"No spots were detected (threshold is {0}).\"\n .format(threshold),\n UserWarning)\n return spots, mask\n\n # remove peak with a low intensity\n mask = (mask_local_max & (image > threshold))\n if mask.sum() == 0:\n spots = np.array([], dtype=np.int64).reshape((0, image.ndim))\n return spots, mask\n\n # make sure we detect only one coordinate per spot\n if remove_duplicate:\n # when several pixels are assigned to the same spot, keep the centroid\n cc = label(mask)\n local_max_regions = regionprops(cc)\n spots = []\n for local_max_region in local_max_regions:\n spot = np.array(local_max_region.centroid)\n spots.append(spot)\n spots = np.stack(spots).astype(np.int64)\n\n # built mask again\n mask = np.zeros_like(mask)\n mask[spots[:, 0], spots[:, 1]] = True\n\n else:\n # get peak coordinates\n spots = np.nonzero(mask)\n spots = np.column_stack(spots)\n\n # case where no spots were detected\n if spots.size == 0:\n warnings.warn(\"No spots were detected (threshold is {0}).\"\n .format(threshold),\n UserWarning)\n\n return spots, mask",
"def define_spot(self,mpos):\n mpos_coord = ((mpos[0] - 199)/87, (mpos[1] - 116)/87)\n if mpos_coord == (1,2):\n spot = \"1\"\n return spot\n if mpos_coord == (2,2):\n spot = \"2\" \n return spot\n if mpos_coord == (4,0):\n spot = \"3\"\n return spot\n if mpos_coord == (4,1):\n spot = \"4\" \n return spot\n else:\n return False",
"def _raw_face_locations(img, number_of_times_to_upsample=1, model=\"hog\"):\n\tif model == \"cnn\":\n\t\treturn cnn_face_detector(img, number_of_times_to_upsample)\n\telse:\n\t\treturn face_detector(img, number_of_times_to_upsample)",
"def extract_face_detections(self):\n self.detector.setInput(self.image_blob)\n self.detections = self.detector.forward()",
"def get_detections(self):\n frame = self.get_still()\n return detector.process_frame(frame, False)",
"def move_to_point_and_extract(coords_from_to: list,\n gps: adapters.GPSUbloxAdapter,\n vesc_engine: adapters.VescAdapterV4,\n smoothie: adapters.SmoothieAdapter,\n camera: adapters.CameraAdapterIMX219_170,\n periphery_det: detection.YoloOpenCVDetection,\n precise_det: detection.YoloOpenCVDetection,\n logger_full: utility.Logger,\n report_field_names,\n trajectory_saver: utility.TrajectorySaver,\n working_zone_polygon,\n img_output_dir,\n nav: navigation.GPSComputing,\n data_collector: datacollection.DataCollector,\n log_cur_dir,\n image_saver: utility.ImageSaver,\n notification: NotificationClient,\n extraction_manager_v3: ExtractionManagerV3,\n ui_msg_queue: posix_ipc.MessageQueue,\n SI_speed: float,\n wheels_straight: bool,\n navigation_prediction: navigation.NavigationPrediction,\n future_points: list,\n allow_extractions: bool,\n x_scan_poly: list,\n cur_field):\n\n if config.ALLOW_FIELD_LEAVING_PROTECTION and cur_field is not None and len(cur_field) > 2:\n enable_field_leaving_protection = True\n else:\n enable_field_leaving_protection = False\n if config.ALLOW_FIELD_LEAVING_PROTECTION:\n if cur_field is None:\n msg = f\"WARNING: robot field leaving protection WILL NOT WORK as given field is None\"\n print(msg)\n logger_full.write(msg)\n elif len(cur_field) < 3:\n msg = f\"WARNING: robot field leaving protection WILL NOT WORK as given field contains \" \\\n f\"{len(cur_field)} points (required ar least 3 points)\"\n print(msg)\n logger_full.write(msg)\n\n extract = SI_speed > 0 and allow_extractions\n\n vesc_speed = SI_speed * config.MULTIPLIER_SI_SPEED_TO_RPM\n speed_fast = config.SI_SPEED_FAST * config.MULTIPLIER_SI_SPEED_TO_RPM\n vesc_speed_fast = speed_fast if SI_speed >= 0 else -speed_fast\n navigation_prediction.set_SI_speed(SI_speed)\n\n raw_angles_history = []\n detections_period = []\n navigations_period = []\n stop_helping_point = nav.get_coordinate(\n coords_from_to[1], coords_from_to[0], 90, 1000)\n learn_go_straight_index = 0\n learn_go_straight_history = []\n\n last_skipped_point = coords_from_to[0]\n start_Nav_while = True\n last_correct_raw_angle = 0\n point_status = \"origin\"\n last_corridor_side = 0\n current_corridor_side = 1\n almost_start = 0\n\n prev_maneuver_time = time.time()\n working_mode_slow = 1\n working_mode_fast = 2\n working_mode_switching = 3\n current_working_mode = working_mode_slow\n last_working_mode = 0\n # True if robot is close to one of current movement vector points, False otherwise; False if speed limit near points is disabled\n close_to_end = config.USE_SPEED_LIMIT\n bumper_is_pressed = None\n\n # message queue sending temporary performance tracker\n if config.QUEUE_TRACK_PERFORMANCE:\n ui_msg_queue_perf = {\n \"max_time\": 0,\n \"min_time\": float(\"inf\"),\n \"total_time\": 0,\n \"total_sends\": 0,\n \"timeouts_exceeded\": 0\n }\n\n # x movements during periphery scans\n x_scan_cur_idx = 0\n x_scan_idx_increasing = True\n\n # set camera to the Y min\n res = smoothie.custom_separate_xy_move_to(X_F=config.X_F_MAX,\n Y_F=config.Y_F_MAX,\n X=smoothie.smoothie_to_mm(\n (config.X_MAX - config.X_MIN) / 2, \"X\"),\n Y=smoothie.smoothie_to_mm(config.Y_MIN, \"Y\"))\n if res != smoothie.RESPONSE_OK:\n msg = \"INIT: Failed to move camera to Y min, smoothie response:\\n\" + res\n logger_full.write(msg + \"\\n\")\n smoothie.wait_for_all_actions_done()\n\n # TODO: maybe should add sleep time as camera currently has delay\n\n if config.AUDIT_MODE:\n vesc_engine.set_target_rpm(vesc_speed, vesc_engine.PROPULSION_KEY)\n vesc_engine.start_moving(vesc_engine.PROPULSION_KEY)\n\n try:\n notificationQueue = posix_ipc.MessageQueue(\n config.QUEUE_NAME_UI_NOTIFICATION)\n except KeyboardInterrupt:\n raise KeyboardInterrupt\n except:\n notificationQueue = None\n\n degraded_navigation_mode = False\n\n number_navigation_cycle_without_gps = 0\n\n point_reading_t = last_send_gps_time = slow_mode_time = time.time()\n\n have_time_for_inference = True\n predictor_next_gps_expected_ts = float(\"inf\")\n\n # main navigation control loop\n while True:\n # gps point reading time predictor\n if have_time_for_inference and config.ALLOW_GPS_TIME_PREDICTIONS_LIMITING_INFERENCE:\n if time.time() + config.INFERENCE_MAX_TICK_TIME > predictor_next_gps_expected_ts:\n have_time_for_inference = False\n\n if have_time_for_inference:\n # EXTRACTION CONTROL\n start_t = time.time()\n frame = camera.get_image()\n frame_t = time.time()\n\n per_det_start_t = time.time()\n if extract:\n plants_boxes = periphery_det.detect(frame)\n else:\n plants_boxes = list()\n per_det_end_t = time.time()\n detections_period.append(per_det_end_t - start_t)\n\n if config.SAVE_DEBUG_IMAGES:\n image_saver.save_image(\n frame,\n img_output_dir,\n label=\"PE_view_M=\" + str(current_working_mode),\n plants_boxes=plants_boxes)\n if config.ALLOW_GATHERING and current_working_mode == working_mode_slow and \\\n image_saver.get_counter(\"gathering\") < config.DATA_GATHERING_MAX_IMAGES:\n image_saver.save_image(frame, config.DATA_GATHERING_DIR,\n plants_boxes=plants_boxes, counter_key=\"gathering\")\n\n if extract:\n msg = \"View frame time: \" + str(frame_t - start_t) + \"\\t\\tPeri. det. time: \" + \\\n str(per_det_end_t - per_det_start_t)\n else:\n msg = \"View frame time: \" + str(frame_t - start_t) + \"\\t\\tPeri. det. (extractions are off) time: \" + \\\n str(per_det_end_t - per_det_start_t)\n logger_full.write(msg + \"\\n\")\n\n # MOVEMENT AND ACTIONS MODES\n if config.AUDIT_MODE:\n dc_start_t = time.time()\n\n # count detected plant boxes for each type\n plants_count = dict()\n for plant_box in plants_boxes:\n plant_box_name = plant_box.get_name()\n if plant_box_name in plants_count:\n plants_count[plant_box_name] += 1\n else:\n plants_count[plant_box_name] = 1\n\n # save info into data collector\n for plant_label in plants_count:\n data_collector.add_detections_data(plant_label,\n math.ceil((plants_count[plant_label]) / config.AUDIT_DIVIDER))\n\n # flush updates into the audit output file and log measured time\n if len(plants_boxes) > 0:\n data_collector.save_all_data(\n log_cur_dir + config.AUDIT_OUTPUT_FILE)\n\n dc_t = time.time() - dc_start_t\n msg = \"Last scan weeds detected: \" + str(len(plants_boxes)) + \\\n \", audit processing tick time: \" + str(dc_t)\n logger_full.write(msg + \"\\n\")\n else:\n # slow mode\n if current_working_mode == working_mode_slow:\n if last_working_mode != current_working_mode:\n last_working_mode = current_working_mode\n msg = \"[Working mode] : slow\"\n if config.LOG_SPEED_MODES:\n logger_full.write(msg + \"\\n\")\n if config.PRINT_SPEED_MODES:\n print(msg)\n\n if ExtractionManagerV3.any_plant_in_zone(\n plants_boxes,\n x_scan_poly[x_scan_cur_idx] if config.ALLOW_X_MOVEMENT_DURING_SCANS else working_zone_polygon):\n vesc_engine.stop_moving(vesc_engine.PROPULSION_KEY)\n if config.VERBOSE_EXTRACT:\n msg = \"[VERBOSE EXTRACT] Stopping the robot because we have detected plant(s).\"\n logger_full.write_and_flush(msg+\"\\n\")\n data_collector.add_vesc_moving_time_data(\n vesc_engine.get_last_movement_time(vesc_engine.PROPULSION_KEY))\n # TODO this 0 rpm \"movement\" is to prevent robot movement during extractions, need to add this in future to rest speed modes too\n vesc_engine.set_time_to_move(config.VESC_MOVING_TIME, vesc_engine.PROPULSION_KEY)\n vesc_engine.set_target_rpm(0, vesc_engine.PROPULSION_KEY)\n vesc_engine.set_current_rpm(0, vesc_engine.PROPULSION_KEY)\n vesc_engine.start_moving(vesc_engine.PROPULSION_KEY)\n\n # TODO remove thread init from here!\n voltage_thread = threading.Thread(\n target=send_voltage_thread_tf,\n args=(vesc_engine, ui_msg_queue),\n daemon=True)\n voltage_thread.start()\n\n # single precise center scan before calling for PDZ scanning and extractions\n if config.ALLOW_PRECISE_SINGLE_SCAN_BEFORE_PDZ and not config.ALLOW_X_MOVEMENT_DURING_SCANS:\n time.sleep(config.DELAY_BEFORE_2ND_SCAN)\n frame = camera.get_image()\n plants_boxes = precise_det.detect(frame)\n\n # do PDZ scan and extract all plants if single precise scan got plants in working area\n if ExtractionManagerV3.any_plant_in_zone(plants_boxes, working_zone_polygon):\n if config.EXTRACTION_MODE == 1:\n extraction_manager_v3.extract_all_plants()\n elif config.EXTRACTION_MODE == 2:\n extraction_manager_v3.mill_all_plants()\n slow_mode_time = time.time()\n else:\n if config.EXTRACTION_MODE == 1:\n extraction_manager_v3.extract_all_plants()\n elif config.EXTRACTION_MODE == 2:\n extraction_manager_v3.mill_all_plants()\n slow_mode_time = time.time()\n\n if config.VERBOSE_EXTRACT:\n msg = \"[VERBOSE EXTRACT] Extract cycle are finish.\"\n logger_full.write_and_flush(msg+\"\\n\")\n\n vesc_engine.stop_moving(vesc_engine.PROPULSION_KEY)\n\n msg = \"Applying force step forward after extractions cycle(s)\"\n logger_full.write(msg + \"\\n\")\n if config.VERBOSE:\n print(msg)\n vesc_engine.set_time_to_move(config.STEP_FORWARD_TIME, vesc_engine.PROPULSION_KEY)\n vesc_engine.set_target_rpm(\n config.SI_SPEED_STEP_FORWARD * config.MULTIPLIER_SI_SPEED_TO_RPM,\n vesc_engine.PROPULSION_KEY)\n vesc_engine.start_moving(vesc_engine.PROPULSION_KEY)\n vesc_engine.wait_for_stop(vesc_engine.PROPULSION_KEY)\n\n elif config.SLOW_FAST_MODE and time.time() - slow_mode_time > config.SLOW_MODE_MIN_TIME:\n # move cork to fast mode scan position\n if config.VERBOSE:\n msg = \"SLOW MODE: moving cork to fast mode position\\n\"\n logger_full.write(msg)\n\n res = smoothie.custom_separate_xy_move_to(\n X_F=config.X_F_MAX,\n Y_F=config.Y_F_MAX,\n X=smoothie.smoothie_to_mm(\n (config.X_MAX - config.X_MIN) / 2, \"X\"),\n Y=smoothie.smoothie_to_mm((config.Y_MAX - config.Y_MIN) * config.SLOW_FAST_MODE_HEAD_FACTOR,\n \"Y\"))\n if res != smoothie.RESPONSE_OK:\n msg = \"INIT: Keeping in slow mode as failed to move camera to fast mode scan position, smoothie's response:\\n\" + res\n logger_full.write(msg + \"\\n\")\n else:\n msg = \"Switching from 'slow mode' to 'switching mode'\"\n if config.LOG_SPEED_MODES:\n logger_full.write(msg + \"\\n\")\n if config.PRINT_SPEED_MODES:\n print(msg)\n current_working_mode = working_mode_switching\n\n # TODO a bug: will not start moving if config.SLOW_MODE_MIN_TIME == 0 or too low (switch speed applies right after slow mode weeds extractions)\n if not vesc_engine.is_moving(vesc_engine.PROPULSION_KEY):\n vesc_engine.set_time_to_move(config.VESC_MOVING_TIME, vesc_engine.PROPULSION_KEY)\n vesc_engine.set_target_rpm(vesc_speed, vesc_engine.PROPULSION_KEY)\n vesc_engine.start_moving(vesc_engine.PROPULSION_KEY)\n\n # switching (from slow to fast) mode\n elif current_working_mode == working_mode_switching:\n if last_working_mode != current_working_mode:\n last_working_mode = current_working_mode\n msg = \"[Working mode] : switching to fast\"\n if config.LOG_SPEED_MODES:\n logger_full.write(msg + \"\\n\")\n if config.PRINT_SPEED_MODES:\n print(msg)\n\n if ExtractionManagerV3.any_plant_in_zone(\n plants_boxes,\n x_scan_poly[x_scan_cur_idx] if config.ALLOW_X_MOVEMENT_DURING_SCANS else working_zone_polygon):\n vesc_engine.stop_moving(vesc_engine.PROPULSION_KEY)\n data_collector.add_vesc_moving_time_data(\n vesc_engine.get_last_movement_time(vesc_engine.PROPULSION_KEY))\n\n if config.VERBOSE:\n msg = \"Moving cork to slow mode scan position\\n\"\n logger_full.write(msg)\n\n # smoothie.wait_for_all_actions_done()\n res = smoothie.custom_separate_xy_move_to(\n X_F=config.X_F_MAX,\n Y_F=config.Y_F_MAX,\n X=smoothie.smoothie_to_mm(\n (config.X_MAX - config.X_MIN) / 2, \"X\"),\n Y=smoothie.smoothie_to_mm(config.Y_MIN, \"Y\"))\n if res != smoothie.RESPONSE_OK:\n msg = \"INIT: Failed to move camera to Y min, smoothie response:\\n\" + res\n logger_full.write(msg + \"\\n\")\n smoothie.wait_for_all_actions_done()\n\n current_working_mode = working_mode_slow\n slow_mode_time = time.time()\n vesc_engine.set_target_rpm(\n vesc_speed, vesc_engine.PROPULSION_KEY)\n continue\n\n sm_cur_pos = smoothie.get_smoothie_current_coordinates(\n convert_to_mms=False)\n if abs(sm_cur_pos[\"X\"] - (config.X_MAX - config.X_MIN) / 2) < 0.001 and \\\n abs(sm_cur_pos[\"Y\"] - (config.Y_MAX - config.Y_MIN) * config.SLOW_FAST_MODE_HEAD_FACTOR) < 0.001:\n msg = \"Switching from 'switching mode' to 'fast mode'\"\n if config.LOG_SPEED_MODES:\n logger_full.write(msg + \"\\n\")\n if config.PRINT_SPEED_MODES:\n print(msg)\n current_working_mode = working_mode_fast\n\n # fast mode\n elif current_working_mode == working_mode_fast:\n if last_working_mode != current_working_mode:\n last_working_mode = current_working_mode\n msg = \"[Working mode] : fast\"\n if config.LOG_SPEED_MODES:\n logger_full.write_and_flush(msg + \"\\n\")\n if config.PRINT_SPEED_MODES:\n print(msg)\n\n if ExtractionManagerV3.any_plant_in_zone(\n plants_boxes,\n x_scan_poly[x_scan_cur_idx] if config.ALLOW_X_MOVEMENT_DURING_SCANS else working_zone_polygon):\n vesc_engine.stop_moving(vesc_engine.PROPULSION_KEY)\n data_collector.add_vesc_moving_time_data(\n vesc_engine.get_last_movement_time(vesc_engine.PROPULSION_KEY))\n\n if config.VERBOSE:\n msg = \"Moving cork to slow mode scan position\\n\"\n logger_full.write(msg)\n\n # smoothie.wait_for_all_actions_done()\n res = smoothie.custom_separate_xy_move_to(\n X_F=config.X_F_MAX,\n Y_F=config.Y_F_MAX,\n X=smoothie.smoothie_to_mm(\n (config.X_MAX - config.X_MIN) / 2, \"X\"),\n Y=smoothie.smoothie_to_mm(config.Y_MIN, \"Y\"))\n if res != smoothie.RESPONSE_OK:\n msg = \"INIT: Failed to move camera to Y min, smoothie response:\\n\" + res\n logger_full.write(msg + \"\\n\")\n smoothie.wait_for_all_actions_done()\n\n msg = \"Switching from 'fast mode' to 'slow mode'\"\n if config.LOG_SPEED_MODES:\n logger_full.write(msg + \"\\n\")\n if config.PRINT_SPEED_MODES:\n print(msg)\n current_working_mode = working_mode_slow\n slow_mode_time = time.time()\n # TODO dont need anymore? as rpm is set at the end of slow mode\n # vesc_engine.set_rpm(vesc_speed, vesc_engine.PROPULSION_KEY)\n continue\n elif close_to_end:\n cur_vesc_rpm = vesc_engine.get_current_rpm(\n vesc_engine.PROPULSION_KEY)\n if cur_vesc_rpm != vesc_speed:\n msg = f\"Applying slow speed {vesc_speed} at 'fast mode' \" \\\n f\"(was {cur_vesc_rpm}) \" \\\n f\"because of close_to_end flag trigger\"\n if config.LOG_SPEED_MODES:\n logger_full.write(msg + \"\\n\")\n if config.PRINT_SPEED_MODES:\n print(msg)\n vesc_engine.set_target_rpm(\n vesc_speed, vesc_engine.PROPULSION_KEY)\n vesc_engine.set_current_rpm(\n vesc_speed, vesc_engine.PROPULSION_KEY)\n else:\n cur_vesc_rpm = vesc_engine.get_current_rpm(\n vesc_engine.PROPULSION_KEY)\n if cur_vesc_rpm != vesc_speed_fast:\n msg = f\"Applying fast speed {vesc_speed_fast} at 'fast mode' (was {cur_vesc_rpm})\"\n if config.LOG_SPEED_MODES:\n logger_full.write(msg + \"\\n\")\n if config.PRINT_SPEED_MODES:\n print(msg)\n vesc_engine.set_target_rpm(\n vesc_speed_fast, vesc_engine.PROPULSION_KEY)\n vesc_engine.set_current_rpm(\n vesc_speed_fast, vesc_engine.PROPULSION_KEY)\n\n # NAVIGATION CONTROL\n cur_pos_obj = gps.get_last_position_v2()\n cur_pos = cur_pos_obj.as_old_list\n\n nav_start_t = time.time()\n\n if start_Nav_while:\n navigation_period = 1\n else:\n navigation_period = nav_start_t - prev_maneuver_time\n\n navigations_period.append(navigation_period)\n # time reference to decide the number of detection before resuming gps.get\n prev_maneuver_time = nav_start_t\n # print(\"tock\")\n\n if start_Nav_while:\n prev_pos_obj = cur_pos_obj\n prev_pos = prev_pos_obj.as_old_list\n start_Nav_while = False\n\n # mu_navigations_period, sigma_navigations_period = utility.mu_sigma(navigations_period)\n\n navigation_prediction.set_current_lat_long(cur_pos)\n\n # skip same points (non-blocking reading returns old point if new point isn't available yet)\n if math.isclose(cur_pos_obj.creation_ts, prev_pos_obj.creation_ts):\n # stop robot if there's no new points for a while\n if time.time() - point_reading_t > config.GPS_POINT_TIME_BEFORE_STOP:\n vesc_engine.stop_moving(vesc_engine.PROPULSION_KEY)\n msg = f\"Stopping the robot due to exceeding time 'GPS_POINT_TIME_BEFORE_STOP=\" \\\n f\"{config.GPS_POINT_TIME_BEFORE_STOP}' limit without new gps points from adapter\"\n logger_full.write_and_flush(msg + \"\\n\")\n data_collector.add_vesc_moving_time_data(\n vesc_engine.get_last_movement_time(vesc_engine.PROPULSION_KEY))\n\n gps_reconnect_ts = time.time()\n\n while True:\n cur_pos_obj = gps.get_last_position_v2()\n cur_pos = cur_pos_obj.as_old_list\n\n if math.isclose(cur_pos_obj.creation_ts, prev_pos_obj.creation_ts):\n # reconnect gps adapter to ublox if there's no gps points for a while\n if time.time() - gps_reconnect_ts > config.GPS_POINT_TIME_BEFORE_RECONNECT:\n gps.reconnect()\n gps_reconnect_ts = time.time()\n msg = \"Called GPS adapter to reconnect to ublox due to waiting too much for a new GPS \" \\\n \"point (new points filter)\"\n if config.VERBOSE:\n print(msg)\n logger_full.write_and_flush(msg + \"\\n\")\n else:\n msg = \"New GPS point received, continuing movement\"\n logger_full.write_and_flush(msg + \"\\n\")\n vesc_engine.start_moving(vesc_engine.PROPULSION_KEY)\n break\n else:\n continue\n\n # gps points reading time predictor\n predictor_next_gps_expected_ts = cur_pos_obj.receiving_ts + config.GPS_POINT_WAIT_TIME_MAX\n have_time_for_inference = True\n\n # points filter by quality flag\n if cur_pos[2] != \"4\" and config.ALLOW_GPS_BAD_QUALITY_NTRIP_RESTART:\n # restart ntrip if enough time passed since the last ntrip restart\n navigation.NavigationV3.restart_ntrip_service(logger_full)\n\n # stop robot due to bad point quality if allowed\n if config.ALLOW_GPS_BAD_QUALITY_STOP:\n vesc_engine.stop_moving(vesc_engine.PROPULSION_KEY)\n logger_full.write_and_flush(\n \"Stopping the robot for lack of quality gps 4, waiting for it...\\n\")\n data_collector.add_vesc_moving_time_data(\n vesc_engine.get_last_movement_time(vesc_engine.PROPULSION_KEY))\n\n prev_bad_quality_pos_obj = cur_pos_obj\n gps_reconnect_ts = time.time()\n\n while True:\n cur_pos_obj = gps.get_last_position_v2()\n cur_pos = cur_pos_obj.as_old_list\n\n # check if it's a new point\n if math.isclose(cur_pos_obj.creation_ts, prev_bad_quality_pos_obj.creation_ts):\n # reconnect gps adapter to ublox if there's no gps points for a while\n if time.time() - gps_reconnect_ts > config.GPS_POINT_TIME_BEFORE_RECONNECT:\n gps.reconnect()\n gps_reconnect_ts = time.time()\n msg = \"Called GPS adapter to reconnect to ublox due to waiting too much for a new \" \\\n \"GPS point (quality filter)\"\n if config.VERBOSE:\n print(msg)\n logger_full.write_and_flush(msg + \"\\n\")\n continue\n else:\n prev_bad_quality_pos_obj = cur_pos_obj\n\n # check if it's a good quality point\n if cur_pos[2] != \"4\":\n # restart ntrip if enough time passed since the last ntrip restart\n navigation.NavigationV3.restart_ntrip_service(\n logger_full)\n else:\n msg = \"The gps has regained quality 4, starting movement\"\n if config.VERBOSE:\n print(msg)\n logger_full.write_and_flush(msg + \"\\n\")\n vesc_engine.start_moving(vesc_engine.PROPULSION_KEY)\n break\n\n # points filter by distance\n prev_cur_distance = nav.get_distance(prev_pos, cur_pos)\n if config.ALLOW_GPS_PREV_CUR_DIST_STOP and prev_cur_distance > config.PREV_CUR_POINT_MAX_DIST:\n vesc_engine.stop_moving(vesc_engine.PROPULSION_KEY)\n msg = f\"Stopping the robot due to GPS points filter by distance (assuming current position point \" \\\n f\"{str(cur_pos)} is wrong as distance between current position and prev. position {str(prev_pos)}\" \\\n f\" is bigger than config.PREV_CUR_POINT_MAX_DIST={str(config.PREV_CUR_POINT_MAX_DIST)})\"\n logger_full.write_and_flush(msg + \"\\n\")\n data_collector.add_vesc_moving_time_data(\n vesc_engine.get_last_movement_time(vesc_engine.PROPULSION_KEY))\n\n prev_bad_quality_pos_obj = cur_pos_obj\n gps_reconnect_ts = distance_wait_start_ts = time.time()\n\n while True:\n if time.time() - distance_wait_start_ts > config.GPS_DIST_WAIT_TIME_MAX:\n msg = f\"Stopping waiting for good prev-cur distance due to timeout, using current point \" \\\n f\"{cur_pos} and starting moving again\"\n if config.VERBOSE:\n print(msg)\n logger_full.write_and_flush(msg + \"\\n\")\n vesc_engine.start_moving(vesc_engine.PROPULSION_KEY)\n break\n\n cur_pos_obj = gps.get_last_position_v2()\n cur_pos = cur_pos_obj.as_old_list\n\n # check if it's a new point\n if math.isclose(cur_pos_obj.creation_ts, prev_bad_quality_pos_obj.creation_ts):\n # reconnect gps adapter to ublox if there's no gps points for a while\n if time.time() - gps_reconnect_ts > config.GPS_POINT_TIME_BEFORE_RECONNECT:\n gps.reconnect()\n gps_reconnect_ts = time.time()\n msg = \"Called GPS adapter to reconnect to ublox due to waiting too much for a new \" \\\n \"GPS point (distance filter)\"\n if config.VERBOSE:\n print(msg)\n logger_full.write_and_flush(msg + \"\\n\")\n continue\n else:\n prev_bad_quality_pos_obj = cur_pos_obj\n\n # check if it's a good quality point or ignore point quality if bad quality stop is not allowed\n if cur_pos[2] != \"4\" and config.ALLOW_GPS_BAD_QUALITY_NTRIP_RESTART:\n # restart ntrip if enough time passed since the last ntrip restart\n navigation.NavigationV3.restart_ntrip_service(logger_full)\n continue\n\n # check if distance became ok\n prev_cur_distance = nav.get_distance(prev_pos, cur_pos)\n if prev_cur_distance <= config.PREV_CUR_POINT_MAX_DIST:\n msg = f\"Starting moving again after GPS points filter by distance as distance become OK \" \\\n f\"({str(prev_cur_distance)})\"\n logger_full.write_and_flush(msg + \"\\n\")\n vesc_engine.start_moving(vesc_engine.PROPULSION_KEY)\n break\n\n point_reading_t = time.time()\n\n trajectory_saver.save_point(cur_pos)\n if ui_msg_queue is not None and time.time()-last_send_gps_time >= 1:\n try:\n ui_msg_queue_send_ts = time.time()\n ui_msg_queue.send(json.dumps(\n {\"last_gps\": cur_pos}), timeout=config.QUEUE_WAIT_TIME_MAX)\n last_send_gps_time = time.time()\n\n if config.QUEUE_TRACK_PERFORMANCE:\n ui_msg_queue_send_et = last_send_gps_time - ui_msg_queue_send_ts\n if ui_msg_queue_send_et < ui_msg_queue_perf[\"min_time\"]:\n ui_msg_queue_perf[\"min_time\"] = ui_msg_queue_send_et\n if ui_msg_queue_send_et > ui_msg_queue_perf[\"max_time\"]:\n ui_msg_queue_perf[\"max_time\"] = ui_msg_queue_send_et\n ui_msg_queue_perf[\"total_time\"] += ui_msg_queue_send_et\n ui_msg_queue_perf[\"total_sends\"] += 1\n except posix_ipc.BusyError:\n msg = f\"Current position wasn't sent to ui_msg_queue likely due to sending timeout \" \\\n f\"(max wait time: config.QUEUE_WAIT_TIME_MAX={config.QUEUE_WAIT_TIME_MAX}\"\n logger_full.write(msg + \"\\n\")\n\n if config.QUEUE_TRACK_PERFORMANCE:\n ui_msg_queue_perf[\"timeouts_exceeded\"] += 1\n\n if config.CONTINUOUS_INFORMATION_SENDING and not degraded_navigation_mode:\n notification.set_current_coordinate(cur_pos)\n\n distance = nav.get_distance(cur_pos, coords_from_to[1])\n\n last_corridor_side = current_corridor_side\n perpendicular, current_corridor_side = nav.get_deviation(\n coords_from_to[0], coords_from_to[1], cur_pos)\n\n # stop the robot if it has left the field\n if enable_field_leaving_protection:\n for pt_idx in range(len(cur_field)):\n last_point = pt_idx + 1 == len(cur_field)\n\n if last_point:\n deviation, side = nav.get_deviation(cur_field[pt_idx], cur_field[0], cur_pos)\n else:\n deviation, side = nav.get_deviation(cur_field[pt_idx], cur_field[pt_idx + 1], cur_pos)\n\n if side == -1 and deviation > config.LEAVING_PROTECTION_DISTANCE_MAX:\n vesc_engine.stop_moving(vesc_engine.PROPULSION_KEY)\n data_collector.add_vesc_moving_time_data(\n vesc_engine.get_last_movement_time(vesc_engine.PROPULSION_KEY))\n msg = f\"Robot is stopped due to leaving the field. Cur pos: '{str(cur_pos)}'; \" \\\n f\"Field comparison vector - P1: '{str(cur_field[pt_idx])}', \" \\\n f\"P2: '{str(cur_field[0] if last_point else cur_field[pt_idx + 1])}'\"\n print(msg)\n logger_full.write_and_flush(msg + \"\\n\")\n notification.set_robot_state(RobotStates.OUT_OF_SERVICE)\n exit()\n\n # check if arrived\n _, side = nav.get_deviation(\n coords_from_to[1], stop_helping_point, cur_pos)\n # if distance <= config.COURSE_DESTINATION_DIFF: # old way\n if side != 1: # TODO: maybe should use both side and distance checking methods at once\n vesc_engine.stop_moving(vesc_engine.PROPULSION_KEY)\n data_collector.add_vesc_moving_time_data(\n vesc_engine.get_last_movement_time(vesc_engine.PROPULSION_KEY))\n # msg = \"Arrived (allowed destination distance difference \" + str(config.COURSE_DESTINATION_DIFF) + \" mm)\"\n # TODO: service will reload script even if it done his work?\n msg = \"Arrived to \" + str(coords_from_to[1])\n # print(msg)\n logger_full.write(msg + \"\\n\")\n\n # put the wheel straight\n if wheels_straight:\n response = smoothie.custom_move_to(A_F=config.A_F_MAX, A=0)\n if response != smoothie.RESPONSE_OK: # TODO: what if response is not ok?\n msg = \"Couldn't turn wheels to center (0), smoothie response:\\n\" + \\\n response\n print(msg)\n logger_full.write(msg + \"\\n\")\n else:\n # save wheels angle\n with open(config.LAST_ANGLE_WHEELS_FILE, \"w+\") as wheels_angle_file:\n wheels_angle_file.write(\n str(smoothie.get_adapter_current_coordinates()[\"A\"]))\n break\n\n # TODO check for bug: arrival check applies single speed for all path (while multiple speeds are applied)\n # check if can arrived\n if vesc_engine.get_current_rpm(vesc_engine.PROPULSION_KEY) / config.MULTIPLIER_SI_SPEED_TO_RPM * \\\n config.MANEUVERS_FREQUENCY > nav.get_distance(cur_pos, coords_from_to[1]):\n vesc_engine.stop_moving(vesc_engine.PROPULSION_KEY)\n data_collector.add_vesc_moving_time_data(\n vesc_engine.get_last_movement_time(vesc_engine.PROPULSION_KEY))\n msg = \"Will have arrived before the next point to \" + \\\n str(coords_from_to[1])\n # print(msg)\n logger_full.write(msg + \"\\n\")\n\n break\n\n # reduce speed if near the target point\n if config.USE_SPEED_LIMIT:\n distance_from_start = nav.get_distance(coords_from_to[0], cur_pos)\n close_to_end = distance < config.DECREASE_SPEED_TRESHOLD or distance_from_start < config.DECREASE_SPEED_TRESHOLD\n\n msg = \"Distance to B: \" + str(distance)\n # print(msg)\n logger_full.write(msg + \"\\n\")\n\n msg = \"Prev: \" + str(prev_pos) + \" Cur: \" + str(cur_pos) + \" A: \" + str(coords_from_to[0]) \\\n + \" B: \" + str(coords_from_to[1])\n # print(msg)\n logger_full.write(msg + \"\\n\")\n\n # pass by cur points which are very close to prev point to prevent angle errors when robot is staying\n # (too close points in the same position can produce false huge angles)\n\n navigation_prediction.run_prediction(coords_from_to, cur_pos)\n\n # raw_angle_cruise = nav.get_angle(coords_from_to[0], cur_pos, cur_pos, coords_from_to[1])\n # raw_angle_legacy = nav.get_angle(prev_pos, cur_pos, cur_pos, coords_from_to[1])\n raw_angle_centroid = nav.get_angle(\n prev_pos, cur_pos, coords_from_to[0], coords_from_to[1])\n raw_angle_cruise = - current_corridor_side * math.log(1+perpendicular)\n\n if nav.get_distance(coords_from_to[0], coords_from_to[1]) < config.CORNER_THRESHOLD and nav.get_distance(coords_from_to[1], future_points[0][0]) < config.CORNER_THRESHOLD:\n # if abs(raw_angle_legacy)>config.LOST_THRESHOLD:\n centroid_factor = config.CENTROID_FACTOR_LOST\n cruise_factor = 1/centroid_factor\n else:\n centroid_factor = config.CENTROID_FACTOR_ORIENTED\n cruise_factor = 1\n\n raw_angle = raw_angle_centroid*centroid_factor + raw_angle_cruise*cruise_factor\n\n # raw_angle = butter_lowpass_filter(raw_angle, 0.5, 4, 6)\n\n if config.LEARN_GO_STRAIGHT:\n if config.MIN_PERPENDICULAR_GO_STRAIGHT >= perpendicular:\n learn_go_straight_index += 1\n learn_go_straight_history.append(raw_angle)\n if len(learn_go_straight_history) >= config.VALUES_LEARN_GO_STRAIGHT:\n learn_go_straight = sum(\n learn_go_straight_history)/len(learn_go_straight_history)\n msg = f\"Average angle applied to the wheel for the robot to have found : {learn_go_straight}.\"\n logger_full.write_and_flush(msg + \"\\n\")\n # TODO opening and closing file 4 times per second\n with open(config.LEARN_GO_STRAIGHT_FILE, \"w+\") as learn_go_straight_file:\n learn_go_straight_file.write(str(learn_go_straight))\n else:\n learn_go_straight_index = 0\n\n # NAVIGATION STATE MACHINE\n if prev_cur_distance < config.PREV_CUR_POINT_MIN_DIST:\n raw_angle = last_correct_raw_angle\n # print(\"The distance covered is low\")\n point_status = \"skipped\"\n\n # register the last position where the robot almost stop\n # in order to disable the deviation servo for a config.POURSUIT_LIMIT length and then resume in cruise\n last_skipped_point = cur_pos\n else:\n last_correct_raw_angle = raw_angle\n point_status = \"correct\"\n\n almost_start = nav.get_distance(last_skipped_point, cur_pos)\n\n # sum(e)\n if len(raw_angles_history) >= config.WINDOW:\n raw_angles_history.pop(0)\n raw_angles_history.append(raw_angle)\n # print(\"len(raw_angles_history):\",len(raw_angles_history))\n sum_angles = sum(raw_angles_history)\n if sum_angles > config.SUM_ANGLES_HISTORY_MAX:\n msg = \"Sum angles \" + str(sum_angles) + \" is bigger than max allowed value \" + \\\n str(config.SUM_ANGLES_HISTORY_MAX) + \", setting to \" + \\\n str(config.SUM_ANGLES_HISTORY_MAX)\n # print(msg)\n logger_full.write(msg + \"\\n\")\n # Get Ready to go down as soon as the angle get negatif\n raw_angles_history[len(raw_angles_history) -\n 1] -= sum_angles - config.SUM_ANGLES_HISTORY_MAX\n sum_angles = config.SUM_ANGLES_HISTORY_MAX\n elif sum_angles < -config.SUM_ANGLES_HISTORY_MAX:\n msg = \"Sum angles \" + str(sum_angles) + \" is less than min allowed value \" + \\\n str(-config.SUM_ANGLES_HISTORY_MAX) + \", setting to \" + \\\n str(-config.SUM_ANGLES_HISTORY_MAX)\n # print(msg)\n logger_full.write(msg + \"\\n\")\n # get Ready to go up as soon as the angle get positive:\n raw_angles_history[len(raw_angles_history)-1] += - \\\n sum_angles - config.SUM_ANGLES_HISTORY_MAX\n sum_angles = -config.SUM_ANGLES_HISTORY_MAX\n\n # KP = 0.2*0,55\n # KI = 0.0092*0,91\n\n KP = getSpeedDependentConfigParam(\n config.KP, SI_speed, \"KP\", logger_full)\n KI = getSpeedDependentConfigParam(\n config.KI, SI_speed, \"KI\", logger_full)\n\n angle_kp_ki = raw_angle * KP + sum_angles * KI\n\n # smoothie -Value == left, Value == right\n target_angle_sm = angle_kp_ki * -config.A_ONE_DEGREE_IN_SMOOTHIE\n # target_angle_sm = 0 #Debug COVID_PLACE\n ad_wheels_pos = smoothie.get_adapter_current_coordinates()[\"A\"]\n # sm_wheels_pos = smoothie.get_smoothie_current_coordinates()[\"A\"]\n sm_wheels_pos = \"off\"\n\n # compute order angle (smoothie can't turn for huge values immediately also as cancel movement,\n # so we need to do nav. actions in steps)\n order_angle_sm = target_angle_sm - ad_wheels_pos\n\n # check for out of update frequency and smoothie execution speed range (for nav wheels)\n if order_angle_sm > config.MANEUVERS_FREQUENCY * config.A_DEGREES_PER_SECOND * \\\n config.A_ONE_DEGREE_IN_SMOOTHIE:\n msg = \"Order angle changed from \" + str(order_angle_sm) + \" to \" + str(\n config.MANEUVERS_FREQUENCY * config.A_DEGREES_PER_SECOND +\n config.A_ONE_DEGREE_IN_SMOOTHIE) + \" due to exceeding degrees per tick allowed range.\"\n # print(msg)\n logger_full.write(msg + \"\\n\")\n order_angle_sm = config.MANEUVERS_FREQUENCY * config.A_DEGREES_PER_SECOND * \\\n config.A_ONE_DEGREE_IN_SMOOTHIE\n elif order_angle_sm < -(config.MANEUVERS_FREQUENCY * config.A_DEGREES_PER_SECOND *\n config.A_ONE_DEGREE_IN_SMOOTHIE):\n msg = \"Order angle changed from \" + str(order_angle_sm) + \" to \" + str(-(\n config.MANEUVERS_FREQUENCY * config.A_DEGREES_PER_SECOND *\n config.A_ONE_DEGREE_IN_SMOOTHIE)) + \" due to exceeding degrees per tick allowed range.\"\n # print(msg)\n logger_full.write(msg + \"\\n\")\n order_angle_sm = -(config.MANEUVERS_FREQUENCY * config.A_DEGREES_PER_SECOND *\n config.A_ONE_DEGREE_IN_SMOOTHIE)\n\n # convert to global smoothie coordinates\n order_angle_sm += ad_wheels_pos\n\n # checking for out of smoothie supported range\n if order_angle_sm > config.A_MAX:\n msg = \"Global order angle changed from \" + str(order_angle_sm) + \" to config.A_MAX = \" + \\\n str(config.A_MAX) + \\\n \" due to exceeding smoothie allowed values range.\"\n # print(msg)\n logger_full.write(msg + \"\\n\")\n order_angle_sm = config.A_MAX\n elif order_angle_sm < config.A_MIN:\n msg = \"Global order angle changed from \" + str(order_angle_sm) + \" to config.A_MIN = \" + \\\n str(config.A_MIN) + \\\n \" due to exceeding smoothie allowed values range.\"\n # print(msg)\n logger_full.write(msg + \"\\n\")\n order_angle_sm = config.A_MIN\n\n # cork x movement during periphery scans control\n if config.ALLOW_X_MOVEMENT_DURING_SCANS:\n if x_scan_idx_increasing:\n x_scan_cur_idx += 1\n if x_scan_cur_idx >= len(config.X_MOVEMENT_CAMERA_POSITIONS):\n x_scan_idx_increasing = False\n x_scan_cur_idx -= 2\n else:\n x_scan_cur_idx -= 1\n if x_scan_cur_idx < 0:\n x_scan_idx_increasing = True\n x_scan_cur_idx += 2\n # TODO do we check SI_speed earlier and do proper calculations and angle validations if here we'll get here a negative order angle instead of positive?\n response = smoothie.custom_move_to(\n A_F=config.A_F_MAX,\n A=order_angle_sm if SI_speed >= 0 else -order_angle_sm,\n X_F=config.X_MOVEMENT_CAMERA_X_F[x_scan_cur_idx] if config.ALLOW_X_MOVEMENT_DURING_SCANS else None,\n X=config.X_MOVEMENT_CAMERA_POSITIONS[x_scan_cur_idx] if config.ALLOW_X_MOVEMENT_DURING_SCANS else None\n )\n\n if response != smoothie.RESPONSE_OK:\n msg = \"Couldn't turn wheels! Smoothie response:\\n\" + response\n print(msg)\n logger_full.write(msg + \"\\n\")\n else:\n # TODO opening and closing file too often (likely 4 times per second)\n # save wheels angle\n with open(config.LAST_ANGLE_WHEELS_FILE, \"w+\") as wheels_angle_file:\n wheels_angle_file.write(\n str(smoothie.get_adapter_current_coordinates()[\"A\"]))\n\n raw_angle = round(raw_angle, 2)\n angle_kp_ki = round(angle_kp_ki, 2)\n order_angle_sm = round(order_angle_sm, 2)\n sum_angles = round(sum_angles, 2)\n distance = round(distance, 2)\n ad_wheels_pos = round(ad_wheels_pos, 2)\n perpendicular = round(perpendicular, 2)\n # sm_wheels_pos = round(sm_wheels_pos, 2)\n gps_quality = cur_pos[2]\n corridor = \"\"\n if current_corridor_side == -1:\n corridor = \"left\"\n elif current_corridor_side == 1:\n corridor = \"right\"\n\n raw_angle_cruise = round(raw_angle_cruise, 2)\n\n msg = str(gps_quality).ljust(5) + \\\n str(raw_angle).ljust(8) + \\\n str(angle_kp_ki).ljust(8) + \\\n str(order_angle_sm).ljust(8) + \\\n str(sum_angles).ljust(8) + \\\n str(distance).ljust(13) + \\\n str(ad_wheels_pos).ljust(8) + \\\n str(sm_wheels_pos).ljust(9) + \\\n point_status.ljust(12) + \\\n str(perpendicular).ljust(10) + \\\n corridor.ljust(9) + \\\n str(centroid_factor).ljust(16) + \\\n str(cruise_factor).ljust(14)\n print(msg)\n logger_full.write(msg + \"\\n\")\n\n # TODO vesc sensors are being asked 4 times per second\n # send voltage and track bumper state\n vesc_data = vesc_engine.get_sensors_data(\n report_field_names, vesc_engine.PROPULSION_KEY)\n if vesc_data is not None and \"input_voltage\" in vesc_data:\n if bumper_is_pressed is None:\n bumper_is_pressed = not vesc_data[\"input_voltage\"] > config.VESC_BUMBER_UNTRIGGER_VOLTAGE\n if bumper_is_pressed:\n msg = f\"Bumper is pressed initially before starting moving to point. \" \\\n f\"({vesc_data['input_voltage']}V)\"\n logger_full.write(msg + \"\\n\")\n elif not bumper_is_pressed and vesc_data[\"input_voltage\"] < config.VESC_BUMBER_TRIGGER_VOLTAGE:\n bumper_is_pressed = True\n msg = f\"Bumper was pressed. ({vesc_data['input_voltage']}V)\"\n logger_full.write(msg + \"\\n\")\n elif bumper_is_pressed and vesc_data[\"input_voltage\"] > config.VESC_BUMBER_UNTRIGGER_VOLTAGE:\n bumper_is_pressed = False\n msg = f\"Bumper was unpressed. ({vesc_data['input_voltage']}V)\"\n logger_full.write(msg + \"\\n\")\n\n if config.CONTINUOUS_INFORMATION_SENDING:\n notification.set_input_voltage(vesc_data[\"input_voltage\"])\n\n prev_pos_obj = cur_pos_obj\n prev_pos = prev_pos_obj.as_old_list\n\n msg = \"Nav calc time: \" + str(time.time() - nav_start_t)\n logger_full.write(msg + \"\\n\\n\")\n\n if config.QUEUE_TRACK_PERFORMANCE:\n ui_msg_queue_perf[\"avg_time\"] = ui_msg_queue_perf[\"total_time\"] / \\\n ui_msg_queue_perf[\"total_sends\"]\n msg = f\"Position sending performance report: {ui_msg_queue_perf}\"\n if config.VERBOSE:\n print(msg)\n logger_full.write(msg + \"\\n\")",
"def detect_with_tracking(self, video_path, k_frames, per_frames = 1, offset = None):\n\n font = cv2.FONT_HERSHEY_SIMPLEX\n \n current_face_id = 0\n current_frame = 0\n \n face_trackers = {}\n confidence = {}\n\n info = []\n \n cap = cv2.VideoCapture(video_path)\n \n if not cap.isOpened():\n raise Exception(\"Video file does not exist or is invalid\")\n\n if offset:\n cap.set(cv2.CAP_PROP_POS_MSEC, offset)\n \n while cap.isOpened() :\n ret, frame = cap.read()\n if ret:\n if cap.get(cv2.CAP_PROP_POS_FRAMES) % per_frames == 0:\n face_ids_to_delete = []\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n for fid in face_trackers.keys():\n tracking_quality = face_trackers[ fid ].update( frame )\n\n if tracking_quality < 7:\n face_ids_to_delete.append( fid )\n\n for fid in face_ids_to_delete:\n face_trackers.pop(fid)\n\n if (cap.get(cv2.CAP_PROP_POS_FRAMES) % k_frames)==0 or cap.get(cv2.CAP_PROP_POS_FRAMES) == 1:\n faces_info = self.detect_faces_from_image(frame,\n desired_width=224, desired_height=224) \n if faces_info:\n for element in faces_info:\n bbox = element[0][0]\n confidence[ current_face_id ] = round(element[5], 3)\n x = bbox.left()\n y = bbox.top()\n width = bbox.width()\n height = bbox.height()\n\n x_center = x + 0.5 * width\n y_center = y + 0.5 * height\n\n matched_fid = None\n\n for fid in face_trackers.keys():\n tracked_position = face_trackers[fid].get_position()\n\n t_x = int(tracked_position.left())\n t_y = int(tracked_position.top())\n t_w = int(tracked_position.width())\n t_h = int(tracked_position.height())\n\n t_x_center = t_x + 0.5 * t_w\n t_y_center = t_y + 0.5 * t_h\n\n if ( ( t_x <= x_center <= (t_x + t_w)) and \n ( t_y <= y_center <= (t_y + t_h)) and \n ( x <= t_x_center <= (x + width)) and \n ( y <= t_y_center <= (y + height))):\n matched_fid = fid\n\n if matched_fid is None:\n\n tracker = dlib.correlation_tracker()\n tracker.start_track(frame,\n dlib.rectangle( x,\n y,\n x+width,\n y+height))\n\n face_trackers[ current_face_id ] = tracker\n current_face_id += 1\n\n for fid in face_trackers.keys():\n t_x, t_y, t_w, t_h, label, decision_value = self._process_tracked_face(fid, face_trackers, frame)\n t_bbox = dlib.rectangle(t_x, t_y, t_x+t_w, t_y+t_h)\n info.append([\n cap.get(cv2.CAP_PROP_POS_FRAMES), fid, t_bbox, (t_w, t_h), label,\n decision_value, confidence[fid]\n ])\n\n\n else: \n break\n cap.release()\n track_res = pd.DataFrame.from_records(info, columns = ['frame', 'faceid', 'bb', 'size','label', 'decision', 'conf'])\n info = _smooth_labels(track_res)\n \n return info",
"def _create_new_detections(self, boxes_contours, used_boxes):\n for bb in boxes_contours[used_boxes == False]:\n d = Detection(bb)\n self.detections.append(d)",
"def _raw_face_locations(img, number_of_times_to_upsample=1, model=\"hog\"):\n if model == \"cnn\":\n cnn_face_detector = dlib.cnn_face_detection_model_v1('mmod_human_face_detector.dat')\n return cnn_face_detector(img, number_of_times_to_upsample)\n else:\n # face_detector = dlib.get_frontal_face_detector()\n return face_detector(img, number_of_times_to_upsample)",
"def make_spots(self, spots):\n dummy_na_parameters = [0,0,1,0]\n if len(spots[0]) == 4:\n for x in spots:\n x.extend(dummy_na_parameters) #if the spots are missing NA information, add it\n # for x in spots:\n # x[3] = I_cal(x[3])\n spots = np.array(spots)\n assert spots.shape[1]==8, \"Spots are 8 elements long - your array must be (n,8)\"\n self.set_uniform(0, np.reshape(spots,spots.shape[0]*spots.shape[1]))\n self.set_uniform(1, spots.shape[0])",
"def _draw_detections(frame, frame_detections):\n boxColor = (0,255,0)\n for box in frame_detections:\n cv2.rectangle(frame,(int(box[0]),int(box[1])),(int(box[2]),int(box[3])),boxColor,7)\n # cv2.rectangle(frame,(int(box[0]),int(box[1])),(int(box[2]),int(box[3])),boxColor,7)\n cv2.putText(frame,str(format(box[4],'.2f')),(int(box[0]),int(box[3]+20)),cv2.FONT_HERSHEY_SIMPLEX,0.6,boxColor,1,cv2.LINE_AA)\n\n return frame",
"def spot_coords(self,spot):\n if spot == '1':\n return (330 - 60 ,335 - 15)\n if spot == '2':\n return (419 - 60, 335 - 15)\n if spot == '3':\n return (591 - 60, 159 - 15)\n if spot == '4':\n return (588 - 60, 248 - 15)",
"def detect(self, features):\n pass # TODO",
"def detect_points(self):\r\n\r\n\t\r\n\r\n\t\tfeature_mask = np.zeros_like(self.gray) ## Create a mask so we only look for template features in the ROI\r\n\t\t\r\n\t\tfeature_mask[max(0,self.bb[1]):min(360,self.bb[1] + self.bb[3]),max(0,self.bb[0]):min(640,self.bb[0] + self.bb[2])] = 255\r\n\r\n\t\t# search for good points\r\n\t\tfeatures = cv2.goodFeaturesToTrack(self.gray, mask = feature_mask, **feature_params)\r\n\t\t# refine the corner locations\r\n\t\tcv2.cornerSubPix(self.gray,features, **subpix_params)\r\n\r\n\t\tself.features = features\r\n\r\n\t\tself.tracks = [[p] for p in features.reshape((-1,2))]\r\n\r\n\t\tself.prev_gray = self.gray",
"def _detect_person(self, frame, threshold=0.6):\n frames = np.expand_dims(frame, axis=0)\n # Actual detection.\n (boxes, scores, classes, num) = self.sess.run(\n [self.detection_boxes, self.detection_scores,\n self.detection_classes, self.num_detections],\n feed_dict={self.image_tensor: frames})\n\n # Find detected boxes coordinates\n return self._boxes_coordinates(\n frame,\n boxes[0],\n classes[0].astype(np.int32),\n scores[0],\n min_score_thresh=threshold\n )",
"def run_detection(params):\n # make a defaultdict out of @parameters so that we could always access its first-level keys\n params_default_first_level = defaultdict(dict)\n params_default_first_level.update(params)\n params = params_default_first_level\n\n verbose = params['GeneralArguments'].get('verbose', False)\n\n out_folder = params['GeneralArguments'].get('output_folder')\n if out_folder is None:\n out_folder = tempfile.mkdtemp(prefix='sp_tool_')\n warnings.warn('No output folder provided, using {}'.format(out_folder))\n if verbose:\n print >> sys.stderr, 'Outputs will be written to folder', out_folder\n\n saccade_detector = SaccadeDetector(**params['SaccadeDetector'])\n blink_detector = BlinkDetector(**params['BlinkDetector'])\n fixation_detector = FixationDetector(**params['FixationDetector'])\n\n recording_processor = RecordingProcessor(saccade_detector=saccade_detector,\n blink_detector=blink_detector,\n fixation_detector=fixation_detector)\n\n sp_detector = SmoothPursuitDetector(**params['SmoothPursuitDetector'])\n\n # The next lines deal with identifying the names of the video clips used for the eye tracking experiment.\n # Can be initialized in various ways, here we just get all video paths be regex and cut off everything that\n # is not needed.\n #\n #\n in_folder = params['GeneralArguments'].get('input_folder')\n if not in_folder:\n raise ValueError('\\'input_folder\\' is a required parameter of the \\'GeneralArguments\\' group in @params!')\n folder_names = sorted(glob.glob('{}/*/'.format(in_folder))) # getting all the folders of the input folder\n # extract names from path\n if not folder_names and verbose:\n print >> sys.stderr, 'No subfolders found under \"{}\"'.format(in_folder)\n folder_names = [os.path.splitext(os.path.basename(folder.rstrip('/')))[0] for folder in folder_names]\n\n movies = params['GeneralArguments'].get('movies')\n if movies: # not empty, restrict to these folders only\n movies = set(movies)\n folder_names = [fn for fn in folder_names if fn in movies]\n\n if verbose:\n print >> sys.stderr, 'Working with movies:', folder_names\n\n # data files extension\n gaze_pattern = params['GeneralArguments'].get('gaze_file_pattern', '*.coord')\n if '*' not in gaze_pattern:\n gaze_pattern = '*' + gaze_pattern\n\n for movie in folder_names:\n full_out_folder = '{}/{}/'.format(out_folder, movie)\n if not os.path.exists(full_out_folder):\n os.makedirs(full_out_folder)\n if verbose:\n print >> sys.stderr, 'Started processing for {},'.format(movie), 'results will appear in', full_out_folder\n\n # The next lines load the data files of the recording with one particular movie.\n # To do this, here we provide a regex that includes all the .{extension} files in the respective folder.\n #\n #\n gaze_data_files = sorted(glob.glob('{}/{}/{}'.format(in_folder, movie, gaze_pattern)))\n if len(gaze_data_files) == 0:\n print >> sys.stderr, 'Found 0 files with this pattern: \"{}\". Omitting this directory.'.format(\n '{}/{}/{}'.format(in_folder, movie, gaze_pattern)\n )\n continue\n try:\n # The next line loads the data, labels saccades, blinks and fixations.\n gaze_points_list = recording_processor.load_multiple_recordings(\n gaze_data_files, verbose=verbose, data_format=params['GeneralArguments'].get('input_data_type'))\n # This will label the smooth pursuits\n if verbose:\n print >> sys.stderr, 'Saccades/blinks/fixations are detected, starting SP detection.'\n classified_gaze_points = sp_detector.detect(gaze_points_list)\n\n # Now just dump the resulting structure into .arff files in the respective subdirectory of the @out_folder\n for file_name, arff_data in zip(gaze_data_files, classified_gaze_points):\n output_file_name = os.path.splitext(os.path.basename(file_name))[0]\n ArffHelper.dump(arff_data, open(\n '{}/{}.arff'.format(full_out_folder, output_file_name), 'w')).close()\n except Exception as e:\n print >> sys.stderr, 'Had to skip {} due to an error \"{}\"'.format(movie, e.message)\n return out_folder",
"def match_detections(self, detection_positions, tracker_positions):\n matches = []\n unmatched_detections = []\n unmatched_trackers = []\n\n if(len(tracker_positions)==0):\n unmatched_detections = np.arange(len(detection_positions))\n return matches, unmatched_detections, unmatched_trackers\n\n dist = distance_matrix(tracker_positions, detection_positions)\n trk_rows, det_cols = solve_dense(dist)\n\n #distance threshold\n solved_dist = np.array([dist[trk_rows[i], det_cols[i]] for i in range(len(trk_rows))])\n solved_dist_bool = solved_dist < self.distance_threshold\n trk_rows_sel = trk_rows[solved_dist_bool]\n det_cols_sel = det_cols[solved_dist_bool]\n\n matches = np.concatenate([det_cols_sel.reshape(-1,1), trk_rows_sel.reshape(-1,1)], axis=1)\n unmatched_detections = np.array([i for i in range(len(detection_positions)) if i not in det_cols_sel])\n unmatched_trackers = np.array([i for i in range(len(tracker_positions)) if i not in trk_rows_sel])\n return matches, unmatched_detections, unmatched_trackers"
] | [
"0.6162223",
"0.56548464",
"0.5607804",
"0.5593707",
"0.54353815",
"0.5421498",
"0.52761436",
"0.5211606",
"0.50746685",
"0.50697035",
"0.5038055",
"0.50195175",
"0.5013972",
"0.5009768",
"0.4988007",
"0.4983076",
"0.49769273",
"0.49657902",
"0.49529868",
"0.49395737",
"0.49226576",
"0.49158722",
"0.49154973",
"0.4905897",
"0.49052924",
"0.48598114",
"0.48586333",
"0.4819446",
"0.48174676",
"0.47839192"
] | 0.7540299 | 0 |
DEPRECATED Jump to next paragraph. This method goes through all the detected spots and fit a specified spot_model separately to each of them. TODO DONE If a model can not be safely fit to the spot, then the spot is deprecated and deleted from the spots list. Spot_models are built in the fitters module. Extract_cube comes from utils module. A GMM from sklearn mixture model is fit to the dataset. To do so (and avoid too large dataset) the pixel values | def fit_spots(self, spot_model=Mixture, kind='individual'):
model = spot_model()
# print(model)
# if model.kind == 'individual':
#
# loop = self.spots
#
# # to_delete = []
# if self._verbose > 0:
# loop = tqdm.tqdm(loop, desc="Fitting spot models...")
#
# to_delete = []
# for k in loop:
# spot = self.image_filtered[extract_cube(point=k.coordinates, side=get_focus_size())]
# centers = [get_focus_size() // 2, ] * 3
# results = model.fit(centers=centers, data=spot)
#
# # Filter spots for which a model could not be fit.
# if results:
# model.params = list(k.coordinates) + list(model.params)
# k.model = model
# else:
# to_delete.append(k)
#
# # Filter spots and store in dict
# self.spots = [k for k in self.spots if k not in to_delete]
#
# self.mixture_model = lambda x, y, z: sum([s.model.function(*s.model.params)(x, y, z) for s in self.spots])
if kind == 'collective':
mask = numpy.zeros(self.image_filtered.shape)
for s in self.spots:
mask[ellipse_in_shape(mask.shape, s.coordinates, (10, 10, 5))] = 1
mask = mask.astype(bool)
results = model.fit(centers=[s.coordinates for s in self.spots], data=self.image_filtered, mask=mask)
if results:
params = model.params.reshape(-1, 4)
for s, p in zip(self.spots, params):
s.model = Gaussian()
s.model.params = p
print(model.params)
centers = [s.coordinates for s in self.spots]
backgrounds = [[0], ] * len(self.spots)
print(centers)
print(backgrounds)
self.mixture_model = model.function
if self._verbose > 0:
time.sleep(0.1)
print('%i spots fit.' % len(self.spots)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _detect_spots(self, detector=LocalMax, **kwargs):\n if self._verbose > 0:\n print(\"Detecting...\", end=\"\")\n\n spots = detector(**kwargs).locate(self.image_filtered)\n\n # Spots are identified by their position:\n self.spots = [Spot(tuple(s)) for s in spots]\n if self._verbose > 0:\n print('%i spots detected.' % len(self.spots))",
"def make_spots(self, spots):\n dummy_na_parameters = [0,0,1,0]\n if len(spots[0]) == 4:\n for x in spots:\n x.extend(dummy_na_parameters) #if the spots are missing NA information, add it\n # for x in spots:\n # x[3] = I_cal(x[3])\n spots = np.array(spots)\n assert spots.shape[1]==8, \"Spots are 8 elements long - your array must be (n,8)\"\n self.set_uniform(0, np.reshape(spots,spots.shape[0]*spots.shape[1]))\n self.set_uniform(1, spots.shape[0])",
"def multifit(self, p0s, verbose=True):\n t1 = time.perf_counter()\n # fit first (hopefully larger) spot\n opts1 = self.singlefit(p0s)\n t2 = time.perf_counter()\n if verbose:\n print('FIRST FIT: {0:.2f} s'.format(t2 - t1))\n opts = np.array([])\n for i in range(1, self.n_spots):\n t2 = time.perf_counter()\n p = []\n for p1 in opts1:\n y_r = self.y\n # let current light curve be the residual from previously fitted spots\n self.y = y_r - self.solve(p1) + 1\n opts2 = self.singlefit(p0s, star_params=p1[:3])\n # retrieve original light curve\n self.y = y_r\n for p2 in opts2:\n p.append(np.append(p1, p2[3:]))\n t3 = time.perf_counter()\n if verbose:\n print('MULTIFIT #{1}: {0:.2f} s'.format(t3 - t2, i))\n # for each new spot, do a simultaneous fit of all parameters so far\n opts, sses = self.llsq(p)\n t4 = time.perf_counter()\n if verbose:\n print('SIMULFIT #{1}: {0:.2f} s'.format(t4 - t3, i))\n # sort fits with respect to chi\n mask = np.isfinite(sses)\n sses = np.asarray(sses)[mask]\n opts = np.asarray(opts)[mask]\n sorted_ids = np.argsort(sses)\n opts = opts[sorted_ids]\n # opts stores all spots fitted so far\n opts1 = opts\n t4 = time.perf_counter()\n if verbose:\n print('TOTAL: {0:.2f} s'.format(t4 - t1))\n return opts",
"def spot_detection(data, roi_size=6, blobs=None, processes=None, **kwargs):\n\n if blobs is None:\n blobs = blob_detection(data, **kwargs)\n\n if processes is not None and processes > 1:\n with Pool(processes) as pool:\n spots = pool.map(functools.partial(__spot_detection, data=data, roi_size=roi_size), blobs)\n spots = [spot for spot in spots if not isinstance(spot, LoggingMessage)]\n else:\n spots = []\n for blob in blobs:\n spot = __spot_detection(blob, data, roi_size)\n if isinstance(spot, LoggingMessage):\n _log.log(spot.level, spot.message)\n else:\n spots.append(spot)\n\n _log.info('{} spot(s) were detected'.format(len(spots)))\n spots = numpy.array(spots)\n return spots",
"def detectSpots(img, detectSpotsParameter = None, correctIlluminationParameter = None, removeBackgroundParameter = None,\n filterDoGParameter = None, findExtendedMaximaParameter = None, detectCellShapeParameter = None,\n verbose = False, out = sys.stdout, **parameter):\n\n timer = Timer();\n \n # normalize data -> to check\n #img = img.astype('float');\n #dmax = 0.075 * 65535;\n #ids = img > dmax;\n #img[ids] = dmax;\n #img /= dmax; \n #out.write(timer.elapsedTime(head = 'Normalization'));\n #img = dataset[600:1000,1600:1800,800:830];\n #img = dataset[600:1000,:,800:830];\n \n # correct illumination\n correctIlluminationParameter = getParameter(detectSpotsParameter, \"correctIlluminationParameter\", correctIlluminationParameter);\n img1 = img.copy();\n img1 = correctIllumination(img1, correctIlluminationParameter = correctIlluminationParameter, verbose = verbose, out = out, **parameter) \n\n # background subtraction in each slice\n #img2 = img.copy();\n removeBackgroundParameter = getParameter(detectSpotsParameter, \"removeBackgroundParameter\", removeBackgroundParameter);\n img2 = removeBackground(img1, removeBackgroundParameter = removeBackgroundParameter, verbose = verbose, out = out, **parameter) \n \n # mask\n #timer.reset();\n #if mask == None: #explicit mask\n # mask = img > 0.01;\n # mask = binary_opening(mask, self.structureELement('Disk', (3,3,3)));\n #img[img < 0.01] = 0; # masking in place # extended maxima\n #out.write(timer.elapsedTime(head = 'Mask')); \n \n #DoG filter\n filterDoGParameter = getParameter(detectSpotsParameter, \"filterDoGParameter\", filterDoGParameter);\n dogSize = getParameter(filterDoGParameter, \"size\", None);\n #img3 = img2.copy(); \n img3 = filterDoG(img2, filterDoGParameter = filterDoGParameter, verbose = verbose, out = out, **parameter);\n \n # normalize \n # imax = img.max();\n # if imax == 0:\n # imax = 1;\n # img /= imax;\n \n # extended maxima\n findExtendedMaximaParameter = getParameter(detectSpotsParameter, \"findExtendedMaximaParameter\", findExtendedMaximaParameter);\n hMax = getParameter(findExtendedMaximaParameter, \"hMax\", None);\n imgmax = findExtendedMaxima(img3, findExtendedMaximaParameter = findExtendedMaximaParameter, verbose = verbose, out = out, **parameter);\n \n #center of maxima\n if not hMax is None:\n centers = findCenterOfMaxima(img, imgmax, verbose = verbose, out = out, **parameter);\n else:\n centers = findPixelCoordinates(imgmax, verbose = verbose, out = out, **parameter);\n \n #cell size detection\n detectCellShapeParameter = getParameter(detectSpotsParameter, \"detectCellShapeParameter\", detectCellShapeParameter);\n cellShapeThreshold = getParameter(detectCellShapeParameter, \"threshold\", None);\n if not cellShapeThreshold is None:\n \n # cell shape via watershed\n imgshape = detectCellShape(img2, centers, detectCellShapeParameter = detectCellShapeParameter, verbose = verbose, out = out, **parameter);\n \n #size of cells \n csize = findCellSize(imgshape, maxLabel = centers.shape[0], out = out, **parameter);\n \n #intensity of cells\n cintensity = findCellIntensity(img, imgshape, maxLabel = centers.shape[0], verbose = verbose, out = out, **parameter);\n\n #intensity of cells in background image\n cintensity2 = findCellIntensity(img2, imgshape, maxLabel = centers.shape[0], verbose = verbose, out = out, **parameter);\n \n #intensity of cells in dog filtered image\n if dogSize is None:\n cintensity3 = cintensity2;\n else:\n cintensity3 = findCellIntensity(img3, imgshape, maxLabel = centers.shape[0], verbose = verbose, out = out, **parameter);\n \n if verbose:\n out.write(timer.elapsedTime(head = 'Spot Detection') + '\\n');\n \n #remove cell;s of size 0\n idz = csize > 0;\n \n return ( centers[idz], numpy.vstack((cintensity[idz], cintensity3[idz], cintensity2[idz], csize[idz])).transpose()); \n \n \n else:\n #intensity of cells\n cintensity = findIntensity(img, centers, verbose = verbose, out = out, **parameter);\n\n #intensity of cells in background image\n cintensity2 = findIntensity(img2, centers, verbose = verbose, out = out, **parameter);\n \n #intensity of cells in dog filtered image\n if dogSize is None:\n cintensity3 = cintensity2;\n else:\n cintensity3 = findIntensity(img3, centers, verbose = verbose, out = out, **parameter);\n\n if verbose:\n out.write(timer.elapsedTime(head = 'Spot Detection') + '\\n');\n \n return ( centers, numpy.vstack((cintensity, cintensity3, cintensity2)).transpose());",
"def train(self, x_data, y_data):\n for model in self.list_of_models:\n model.fit(x_data, y_data)\n self.trained_models.append(model)",
"def extract_detections(self):\n self.rescue_model.setInput(self.human_blob)\n self.predictions = self.rescue_model.forward()",
"def fit_spots(data, ivar, profile, area=1):\n # Calculate the matrix elements for the linear problem\n # [ M11 M12 ] [ f ] = [ A1 ]\n # [ M12 M22 ] [ b ] [ A2 ]\n M11 = np.sum(ivar * profile ** 2, axis=(-2, -1))\n M12 = np.sum(ivar * area * profile, axis=(-2, -1))\n M22 = np.sum(ivar * area ** 2, axis=(-2, -1))\n A1 = np.sum(ivar * data * profile, axis=(-2, -1))\n A2 = np.sum(ivar * data * area, axis=(-2, -1))\n # Solve the linear problem.\n det = M11 * M22 - M12 ** 2\n M11 /= det\n M12 /= det\n M22 /= det\n f = (M22 * A1 - M12 * A2)\n b = (M11 * A2 - M12 * A1)\n # Calculate the covariance of (f, b).\n cov = np.stack((np.stack((M22, -M12), axis=-1), np.stack((-M12, M11), axis=-1)), axis=-1)\n return f, b, cov",
"def extract_face_detections(self):\n self.detector.setInput(self.image_blob)\n self.detections = self.detector.forward()",
"def _raw_face_locations(img, number_of_times_to_upsample=1, model=\"hog\"):\n\tif model == \"cnn\":\n\t\treturn cnn_face_detector(img, number_of_times_to_upsample)\n\telse:\n\t\treturn face_detector(img, number_of_times_to_upsample)",
"def _process_datasets_all_frames(self):\n datasets = os.listdir(self.separated_root)\n for dataset in datasets:\n dataset_path = join(self.separated_root, dataset)\n\n for model in self.models:\n\n attacks_list = os.listdir(dataset_path)\n\n for attack in attacks_list:\n attack_path = join(dataset_path, attack)\n\n for prop in self.properties:\n property_alias = prop.get_property_alias()\n\n if os.path.exists(\n join(self.output_features, dataset, attack, property_alias, model.alias)):\n print('%s already extracted features' % dataset)\n continue\n\n path_train = join(attack_path, self.train_alias)\n path_test = join(attack_path, self.test_alias)\n\n X_train, y_train, indexes_train, samples_train = self._get_dataset_contents(path_train,\n property_alias)\n X_test, y_test, indexes_test, samples_test = self._get_dataset_contents(path_test,\n property_alias)\n\n output_features = join(self.output_features, dataset, attack, property_alias, model.alias)\n\n features_train = self._fetch_features(X_train, model, output_features, self.train_alias)\n features_test = self._fetch_features(X_test, model, output_features, self.test_alias)\n\n # saving features\n np.save(join(output_features, (NAME_FEATURES % self.train_alias)), features_train)\n np.save(join(output_features, (NAME_FEATURES % self.test_alias)), features_test)\n\n # saving targets\n np.save(join(output_features, (NAME_TARGETS % self.train_alias)), y_train)\n np.save(join(output_features, (NAME_TARGETS % self.test_alias)), y_test)\n np.save(join(output_features, (NAME_TARGETS % self.test_alias)), y_test)\n\n # saving samples names\n self.__save_txt(join(output_features, (NAME_SAMPLES % self.train_alias)), samples_train)\n self.__save_txt(join(output_features, (NAME_SAMPLES % self.test_alias)), samples_test)",
"def image_to_spots(self, data_image: Union[np.ndarray, xr.DataArray]) -> SpotAttributes:\n raise NotImplementedError()",
"def _detect_spots_from_images(images, threshold=None, remove_duplicate=True,\n return_threshold=False, voxel_size_z=None,\n voxel_size_yx=100, psf_z=None, psf_yx=200):\n # initialization\n sigma = stack.get_sigma(voxel_size_z, voxel_size_yx, psf_z, psf_yx)\n n = len(images)\n\n # apply LoG filter and find local maximum\n images_filtered = []\n pixel_values = []\n masks = []\n for image in images:\n # filter image\n image_filtered = stack.log_filter(image, sigma)\n images_filtered.append(image_filtered)\n\n # get pixels value\n pixel_values += list(image_filtered.ravel())\n\n # find local maximum\n mask_local_max = local_maximum_detection(image_filtered, sigma)\n masks.append(mask_local_max)\n\n # get optimal threshold if necessary based on all the images\n if threshold is None:\n\n # get threshold values we want to test\n thresholds = _get_candidate_thresholds(pixel_values)\n\n # get spots count and its logarithm\n all_value_spots = []\n minimum_threshold = float(thresholds[0])\n for i in range(n):\n image_filtered = images_filtered[i]\n mask_local_max = masks[i]\n spots, mask_spots = spots_thresholding(\n image_filtered, mask_local_max,\n threshold=minimum_threshold,\n remove_duplicate=False)\n value_spots = image_filtered[mask_spots]\n all_value_spots.append(value_spots)\n all_value_spots = np.concatenate(all_value_spots)\n thresholds, count_spots = _get_spot_counts(thresholds, all_value_spots)\n\n # select threshold where the kink of the distribution is located\n if count_spots.size > 0:\n threshold, _, _ = _get_breaking_point(thresholds, count_spots)\n\n # detect spots\n all_spots = []\n for i in range(n):\n\n # get images and masks\n image_filtered = images_filtered[i]\n mask_local_max = masks[i]\n\n # detection\n spots, _ = spots_thresholding(image_filtered, mask_local_max,\n threshold, remove_duplicate)\n all_spots.append(spots)\n\n # return threshold or not\n if return_threshold:\n return all_spots, threshold\n else:\n return all_spots",
"def parse_spot_request(request):\n warnings = []\n model_domain, time_str, variables, image = split_fields(request, 4)\n spot, location_str = model_domain.split(':', 1)\n assert spot.lower() == 'spot'\n if ':' in location_str:\n model, location_str = location_str.split(':', 1)\n model = model.lower()\n else:\n model = 'gfs'\n location = parse_location(location_str)\n\n hours, time_warnings = parse_times(time_str)\n warnings.extend(time_warnings)\n\n if variables is None:\n variables = []\n else:\n variables = variables.split(',')\n variables, var_warnings = validate_variables(variables)\n warnings.extend(var_warnings)\n\n send_image = image is not None\n\n return {'type': 'spot',\n 'model': model,\n 'location': location,\n 'hours': hours,\n 'vars': variables,\n 'warnings': warnings,\n 'send-image': send_image}",
"def train_models(self, clf, silent, feature_names=None, target_names=None, live=False):\n X_train, X_test, y_train, y_test = self.X_train, self.X_test, self.y_train, self.y_test\n t0 = time()\n clf.fit(X_train, y_train)\n train_time = time() - t0\n pred = clf.predict(X_test)\n test_time = time() - t0\n accuracy = metrics.accuracy_score(y_test, pred)\n fbeta = metrics.fbeta_score(y_test, pred,1,labels=self.dataset['label'].unique(),average='weighted')\n name = clf.name[0]\n if False:\n score_stats = f'Model : {name} | Score : {accuracy} | F-beta : {fbeta}'\n print(score_stats)\n\n if self.best_score_ledger[name][0] < accuracy:\n last = self.best_score_ledger[name][0]\n print(name)\n self.best_score_ledger[name] = [accuracy,fbeta]\n score_stats = f'Model : {name} | Score : {accuracy} | F-beta : {fbeta}'\n print(self.stemmer, ' ', self.transform)\n print(score_stats)\n\n if accuracy > self.best_models[name] and last != 0.0 and self.tuning_depth in ['normal','maximal']:\n new_model,score = self.hyperparameter_tuning(name,clf)\n if score > accuracy:\n self.best_score_ledger[name][0] = score\n clf = new_model\n dump(clf, os.path.join(os.getcwd(), self.file_term, 'models', f'{\"_\".join([self.uid_base, name])}'))\n\n\n\n if not silent:\n if hasattr(clf, 'coef_'):\n print(\"dimensionality: %d\" % clf.coef_.shape[1])\n print(\"density: %f\" % density(clf.coef_))\n\n if True and feature_names is not None:\n print(\"top 10 keywords per class:\")\n for i, label in enumerate(target_names):\n top10 = np.argsort(clf.coef_[i])[-10:]\n print(trim(\"%s: %s\" % (label, \" \".join(feature_names[top10]))))\n print()\n\n if True:\n print(\"classification report:\")\n print(metrics.classification_report(y_test, pred,\n target_names=target_names))\n\n if True:\n print(\"confusion matrix:\")\n print(metrics.confusion_matrix(y_test, pred))\n # if no model exists for the current settings, create one by default. Prevents issues if models are deleted.\n elif not os.path.exists(\n os.path.join(os.getcwd(), self.file_term, 'models', f'{\"_\".join([self.uid_base, name])}')):\n dump(clf, os.path.join(os.getcwd(), self.file_term, 'models', f'{\"_\".join([self.uid_base, name])}'))\n clf_descr = str(clf).split('(')[0]\n return clf_descr, accuracy, train_time, test_time",
"def forwardModelJointFit(files, out, wavelength, gain=3.1, size=10, burn=500, run=800,\n spotx=2888, spoty=3514, simulated=False, truths=None):\n print '\\n\\n\\n'\n print '_'*120\n\n images = len(files)\n orig = []\n image = []\n noise = []\n rns = []\n peakvalues = []\n xestimate = []\n yestimate = []\n for file in files:\n print file\n #get data and convert to electrons\n o = pf.getdata(file)*gain\n\n if simulated:\n data = o\n else:\n #roughly the correct location - to avoid identifying e.g. cosmic rays\n data = o[spoty-(size*3):spoty+(size*3)+1, spotx-(size*3):spotx+(size*3)+1].copy()\n\n #maximum position within the cutout\n y, x = m.maximum_position(data)\n\n #spot and the peak pixel within the spot, this is also the CCD kernel position\n spot = data[y-size:y+size+1, x-size:x+size+1].copy()\n orig.append(spot.copy())\n\n #bias estimate\n if simulated:\n bias = 9000.\n rn = 4.5\n else:\n bias = np.median(o[spoty-size: spoty+size, spotx-220:spotx-20])\n rn = np.std(o[spoty-size: spoty+size, spotx-220:spotx-20])\n\n print 'Readnoise (e):', rn\n if rn < 2. or rn > 6.:\n print 'NOTE: suspicious readout noise estimate...'\n print 'ADC offset (e):', bias\n\n #remove bias\n spot -= bias\n\n #set highly negative values to zero\n spot[spot + rn**2 < 0.] = 0.\n\n print 'Least Squares Fitting...'\n gaus = models.Gaussian2D(spot.max(), size, size, x_stddev=0.5, y_stddev=0.5)\n gaus.theta.fixed = True #fix angle\n p_init = gaus\n fit_p = fitting.LevMarLSQFitter()\n stopy, stopx = spot.shape\n X, Y = np.meshgrid(np.arange(0, stopx, 1), np.arange(0, stopy, 1))\n p = fit_p(p_init, X, Y, spot)\n print p\n\n max = np.max(spot)\n s = spot.sum()\n print 'Maximum Value:', max\n print 'Sum:', s\n print ''\n\n peakvalues.append(max)\n\n #noise model\n variance = spot.copy() + rn**2\n\n #save to a list\n image.append(spot)\n noise.append(variance)\n xestimate.append(p.x_mean.value)\n yestimate.append(p.y_mean.value)\n rns.append(rn**2)\n\n #sensibility test, try to check if all the files in the fit are of the same dataset\n if np.std(peakvalues) > 5*np.sqrt(np.median(peakvalues)):\n #check for more than 5sigma outliers, however, this is very sensitive to the centroiding of the spot...\n print '\\n\\n\\nPOTENTIAL OUTLIER, please check the input files...'\n print np.std(peakvalues), 5*np.sqrt(np.median(peakvalues))\n\n peakvalues = np.asarray(peakvalues)\n peak = np.median(peakvalues)\n peakrange = (0.95*np.min(peakvalues), 1.7*np.max(peakvalues))\n\n print '\\nPeak Estimate:', peak\n print 'Peak Range:', peakrange\n\n #MCMC based fitting\n ndim = 2*images + 5 #xpos, ypos for each image and single amplitude, radius, focus, and sigmaX and sigmaY\n nwalkers = 1000\n print '\\n\\nBayesian Fitting, model has %i dimensions' % ndim\n\n # Choose an initial set of positions for the walkers using the Gaussian fit\n tmp = _expectedValues()['l' + wavelength.replace('nm', '')]\n print 'Using initial guess [radius, focus, width_x, width_y]:', tmp\n p0 = np.zeros((nwalkers, ndim))\n for x in xrange(images):\n p0[:, 2*x] = np.random.normal(xestimate[x], 0.1, size=nwalkers) # x\n p0[:, 2*x+1] = np.random.normal(yestimate[x], 0.1, size=nwalkers) # y\n p0[:, -5] = np.random.normal(peak, peak/100., size=nwalkers) # amplitude\n p0[:, -4] = np.random.normal(tmp[0], 0.01, size=nwalkers) # radius\n p0[:, -3] = np.random.normal(tmp[1], 0.01, size=nwalkers) # focus\n p0[:, -2] = np.random.normal(tmp[2], 0.01, size=nwalkers) # width_x\n p0[:, -1] = np.random.normal(tmp[3], 0.01, size=nwalkers) # width_y\n\n # Initialize the sampler with the chosen specs.\n #Create the coordinates x and y\n x = np.arange(0, spot.shape[1])\n y = np.arange(0, spot.shape[0])\n #Put the coordinates in a mesh\n xx, yy = np.meshgrid(x, y)\n\n #Flatten the arrays\n xx = xx.flatten()\n yy = yy.flatten()\n\n #initiate sampler\n pool = Pool(cores) #A hack Dan gave me to not have ghost processes running as with threads keyword\n sampler = emcee.EnsembleSampler(nwalkers, ndim, log_posteriorJoint,\n args=[xx, yy, image, rns, peakrange, spot.shape], pool=pool)\n # args=[xx, yy, image, noise, peakrange, spot.shape], pool=pool)\n\n # Run a burn-in and set new starting position\n print \"Burning-in...\"\n pos, prob, state = sampler.run_mcmc(p0, burn)\n best_pos = sampler.flatchain[sampler.flatlnprobability.argmax()]\n print best_pos\n print \"Mean acceptance fraction:\", np.mean(sampler.acceptance_fraction)\n pos = emcee.utils.sample_ball(best_pos, best_pos/100., size=nwalkers)\n # Reset the chain to remove the burn-in samples.\n sampler.reset()\n\n #run another burn-in\n print \"Running an improved estimate...\"\n pos, prob, state = sampler.run_mcmc(pos, burn)\n print \"Mean acceptance fraction:\", np.mean(sampler.acceptance_fraction)\n sampler.reset()\n\n # Starting from the final position in the improved chain\n print \"Running final MCMC...\"\n pos, prob, state = sampler.run_mcmc(pos, run, rstate0=state)\n print \"Mean acceptance fraction:\", np.mean(sampler.acceptance_fraction)\n\n #Get the index with the highest probability\n maxprob_index = np.argmax(prob)\n\n #Get the best parameters and their respective errors and print best fits\n params_fit = pos[maxprob_index]\n errors_fit = [sampler.flatchain[:,i].std() for i in xrange(ndim)]\n print params_fit\n\n #unpack the fixed parameters\n peak, radius, focus, width_x, width_y = params_fit[-5:]\n peakE, radiusE, focusE, width_xE, width_yE = errors_fit[-5:]\n\n #print results\n _printFWHM(width_x, width_y, width_xE, width_yE)\n\n #save the best models per file\n size = size*2 + 1\n gofs = []\n mdiff = []\n for index, file in enumerate(files):\n #path, file = os.path.split(file)\n id = 'results/' + out + str(index)\n #X and Y are always in pairs\n center_x = params_fit[2*index]\n center_y = params_fit[2*index+1]\n\n #1)Generate a model Airy disc\n amplitude = _amplitudeFromPeak(peak, center_x, center_y, radius,\n x_0=int(size/2.-0.5), y_0=int(size/2.-0.5))\n airy = models.AiryDisk2D(amplitude, center_x, center_y, radius)\n adata = airy.eval(xx, yy, amplitude, center_x, center_y, radius).reshape((size, size))\n\n #2)Apply Focus\n f = models.Gaussian2D(1., center_x, center_y, focus, focus, 0.)\n focusdata = f.eval(xx, yy, 1., center_x, center_y, focus, focus, 0.).reshape((size, size))\n model = signal.convolve2d(adata, focusdata, mode='same')\n\n #3)Apply CCD diffusion, approximated with a Gaussian\n CCD = models.Gaussian2D(1., size/2.-0.5, size/2.-0.5, width_x, width_y, 0.)\n CCDdata = CCD.eval(xx, yy, 1., size/2.-0.5, size/2.-0.5, width_x, width_y, 0.).reshape((size, size))\n model = signal.convolve2d(model, CCDdata, mode='same')\n\n #save the data, model and residuals\n fileIO.writeFITS(orig[index], id+'data.fits', int=False)\n fileIO.writeFITS(image[index], id+'datafit.fits', int=False)\n fileIO.writeFITS(model, id+'model.fits', int=False)\n fileIO.writeFITS(model - image[index], id+'residual.fits', int=False)\n fileIO.writeFITS(((model - image[index])**2 / noise[index]), id+'residualSQ.fits', int=False)\n\n #a simple goodness of fit\n gof = (1./(np.size(image[index])*images - ndim)) * np.sum((model - image[index])**2 / noise[index])\n maxdiff = np.max(np.abs(model - image[index]))\n print 'GoF:', gof, ' Max difference', maxdiff\n gofs.append(gof)\n mdiff.append(maxdiff)\n print 'Amplitude Estimate:', amplitude\n\n if np.asarray(mdiff).max() > 3e3 or np.asarray(gofs).max() > 4.:\n print '\\nFIT UNLIKELY TO BE GOOD...\\n'\n\n #save results\n res = dict(wx=width_x, wy=width_y, wxerr=width_xE, wyerr=width_yE, files=files, out=out,\n wavelength=wavelength, peakvalues=np.asarray(peakvalues), CCDmodel=CCD, CCDmodeldata=CCDdata,\n GoFs=gofs, fit=params_fit, maxdiff=mdiff)\n fileIO.cPickleDumpDictionary(res, 'results/' + out + '.pkl')\n\n #plot\n samples = sampler.chain.reshape((-1, ndim))\n #extents = None\n #if simulated:\n # extents = [(0.9*truth, 1.1*truth) for truth in truths]\n # print extents\n if simulated:\n tr = truths[:-5]\n peaks = []\n for x in xrange(images):\n xcen = tr[2*x]\n ycen = tr[2*x+1]\n theta = [truths[-5], xcen, ycen, truths[-4], truths[-3], truths[-2], truths[-1]]\n peaks.append(_peakFromTruth(theta))\n print peaks\n truths[-5] = np.median(np.asarray(peaks))\n fig = triangle.corner(samples, labels=['x', 'y']*images + ['peak', 'radius', 'focus', 'width_x', 'width_y'],\n truths=truths)#, extents=extents)\n fig.savefig('results/' + out + 'Triangle.png')\n plt.close()\n pool.close()",
"def process_detections(tracker, detections, nms_max_overlap, frame):\r\n #initialize color map\r\n cmap = plt.get_cmap('tab20b')\r\n colors = [cmap(i)[:3] for i in np.linspace(0, 1, 20)]\r\n\r\n # run non-maxima supression\r\n boxs = np.array([d.tlwh for d in detections])\r\n scores = np.array([d.confidence for d in detections])\r\n classes = np.array([d.class_name for d in detections])\r\n indices = preprocessing.non_max_suppression(boxs, classes, nms_max_overlap, scores)\r\n detections = [detections[i] for i in indices] \r\n\r\n # Call the tracker\r\n tracker.predict()\r\n tracker.update(detections)\r\n\r\n # update tracks\r\n for track in tracker.tracks:\r\n if not track.is_confirmed() or track.time_since_update > 1:\r\n continue \r\n bbox = track.to_tlbr()\r\n class_name = track.get_class()\r\n \r\n # draw bbox on screen\r\n color = colors[int(track.track_id) % len(colors)]\r\n color = [i * 255 for i in color]\r\n cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), color, 1)\r\n cv2.rectangle(frame, (int(bbox[0]), int(bbox[1]-30)), \r\n (int(bbox[0])+(len(class_name)+len(str(track.track_id)))*17, int(bbox[1])), color, -1)\r\n cv2.putText(frame, class_name + \"-\" + str(track.track_id),(int(bbox[0]), \r\n int(bbox[1]-10)),0, 0.5, (255,255,255), 1)\r\n\r\n # if enable info flag then print details about each track\r\n if FLAGS.info:\r\n print(\"Tracker ID: {}, Class: {}, BBox Coords (xmin, ymin, xmax, ymax): {}\".format(str(track.track_id), \r\n class_name, (int(bbox[0]), int(bbox[1]), int(bbox[2]), int(bbox[3]))))\r\n return frame",
"def fit(self, train_features, train_actuals):\n for name in self.models.keys():\n print('-'*shutil.get_terminal_size().columns)\n print(\"evaluating {}\".format(name).center(columns))\n print('-'*shutil.get_terminal_size().columns)\n estimator = self.models[name]\n est_params = self.params[name]\n gscv = GridSearchCV(estimator, est_params, cv=5, scoring=self.scoring_metric)\n gscv.fit(train_features, train_actuals)\n print(\"best parameters are: {}\".format(gscv.best_estimator_))\n self.single_classifier_best[name] = gscv",
"def fit(self, boards, winners, *args, **kwargs):\n self.model.fit(boards, winners, *args, **kwargs)",
"def fit(self) -> None:\n start_time = time.time()\n # ---- START -----\n train_df = self.processed_train_df[self.processed_train_df[self.filter_col_name]].dropna()\n train_features = train_df[self.feature_list]\n for label, model in zip(self.label_list, self.models):\n model.fit(train_features, train_df[label])\n # ---- END -----\n end_time = time.time()\n print(\"Finished fitting : elasped time : \" + str(end_time - start_time))",
"def visualize_detection_examples(model: tf.keras.Model, dataset: tf.data.Dataset,\n index_to_category: tf.lookup.StaticHashTable,\n examples: int = 5) -> None:\n # Getting anchor shapes for latter use\n anchor_shapes = tf.convert_to_tensor(utils.ANCHORS_SHAPE)\n # Colormap for bounding boxes\n cmap = cm.get_cmap('hsv', 80)\n for image, path, output in dataset.take(examples):\n # Creates figure/axes\n fig, axes = plt.subplots(1, 2)\n fig.set_tight_layout(tight=0.1)\n fig.suptitle(path.numpy().decode('utf-8'))\n # Parses image dimensions\n image_height, image_width, image_depth = image.shape\n # Parses info from sparse outputs\n steps = range(0, output.values.shape[0], 6)\n bboxes = [\n denormalize_bbox_to_image_size(\n yolo.decode_from_yolo_format(\n output.values[i + 1: i + 5],\n output.indices[i][:2]\n ).numpy(),\n image_width,\n image_height\n ) for i in steps\n ]\n labels = [(tf.cast(output.indices[i + 5][2], dtype=tf.int32) - tf.cast(5 * tf.shape(anchor_shapes)[0],\n dtype=tf.int32)).numpy() for i in steps]\n objectnesses = [output.values[i].numpy() for i in steps]\n objects = [Object(*entry) for entry in zip(bboxes, labels, objectnesses)]\n # Plots all objects\n axes[0].imshow(image.numpy())\n for obj in objects:\n add_object_to_axes(axes[0], obj, index_to_category, cmap)\n # Plots detection results\n axes[1].imshow(image.numpy())\n # Gets all valid bboxes (one per cell)\n predicted = tf.squeeze(model(tf.expand_dims(yolo.preprocess_image(image), axis=0)))\n indices = tf.range(5 * tf.shape(anchor_shapes)[0], tf.shape(predicted)[2])\n probability = tf.gather(predicted, indices=indices, axis=-1)\n category = tf.cast(tf.argmax(probability, axis=-1), dtype=tf.int32)\n indices = tf.range(0, tf.shape(anchor_shapes)[0]) * 5\n objectness = tf.gather(predicted, indices=indices, axis=-1)\n anchors = tf.argmax(objectness, axis=-1)\n objects = [\n Object(\n bbox=denormalize_bbox_to_image_size(\n yolo.clip_bbox_to_image(yolo.decode_from_yolo_format(\n predicted[i, j, anchors[i, j] * 5 + 1: anchors[i, j] * 5 + 1 + 4],\n tf.convert_to_tensor([i, j])\n )).numpy(),\n image_width,\n image_height\n ),\n index=category[i, j],\n objectness=objectness[i, j, anchors[i, j]] * probability[i, j, category[i, j]]\n ) for i in range(7) for j in range(7)\n ]\n # Only objects with high certainty are considered\n detections = filter(lambda entry: entry.objectness > OBJECTNESS_THRESHOLD, objects)\n # Performs non-max suppression\n sorted_detections = sorted(detections, key=lambda entry: entry.objectness, reverse=True)\n included_detections = []\n excluded_detections = []\n while len(sorted_detections) > 0:\n # Top element is always a detection since is the highest confidence object\n root = sorted_detections[0]\n included_detections.append(root)\n # Filter out all elements from the same class having a high IoU with the top element\n suppression = [non_max_supression(root, entry) for entry in sorted_detections[1:]]\n excluded_detections.extend([entry for entry, suppressed in zip(sorted_detections[1:], suppression) if suppressed])\n sorted_detections = [entry for entry, suppressed in zip(sorted_detections[1:], suppression) if not suppressed]\n # Plots included detections\n for obj in included_detections:\n add_object_to_axes(axes[1], obj, index_to_category, cmap)\n # Plots excluded detections\n for obj in excluded_detections:\n add_deleted_object_to_axes(axes[1], obj)\n # Let the magic show!\n axes[0].axis('off')\n axes[1].axis('off')\n axes[1].set_xlim(axes[0].get_xlim())\n axes[1].set_ylim(axes[0].get_ylim())\n plt.show()",
"def run_fit(self, optimize_opts=None):\n fit_range = self.config[\"fit\"].get(\"fit_range\")\n model = self.config[\"fit\"][\"model\"]\n\n for obs in self.extraction.spectrum_observations:\n if fit_range is not None:\n obs.mask_fit = obs.counts.energy_mask(fit_range[0], fit_range[1])\n obs.model = model\n\n self.fit = Fit(self.extraction.spectrum_observations)\n self.fit_result = self.fit.run(optimize_opts=optimize_opts)\n\n model = self.config[\"fit\"][\"model\"]\n modelname = model.__class__.__name__\n\n model.parameters.covariance = self.fit_result.parameters.covariance\n\n filename = make_path(self.config[\"outdir\"]) / \"fit_result_{}.yaml\".format(\n modelname\n )\n\n self.write(filename=filename)\n\n obs_stacker = SpectrumDatasetOnOffStacker(self.extraction.spectrum_observations)\n obs_stacker.run()\n\n datasets_fp = obs_stacker.stacked_obs\n datasets_fp.model = model\n self.flux_point_estimator = FluxPointsEstimator(\n e_edges=self.config[\"fp_binning\"], datasets=datasets_fp\n )\n fp = self.flux_point_estimator.run()\n fp.table[\"is_ul\"] = fp.table[\"ts\"] < 4\n self.flux_points = fp",
"def dataModel():\n srcmap001 = fits.open('dataFiles/6gev_srcmap_001.fits')\n srcmap03 = fits.open('dataFiles/6gev_srcmap_03.fits')\n\n image_data = fits.getdata('6gev_image.fits')\n filename = get_pkg_data_filename('6gev_image.fits')\n hdu = fits.open(filename)[0]\n wcs = WCS(hdu.header)\n\n #Given the results of the fit, calculate the model\n modelData001 = np.zeros(srcmap001[0].shape)\n modelData03 = np.zeros(srcmap03[0].shape)\n\n file = open('plotsData/fitResults001.pk1','rb')\n fit001 = pickle.load(file)\n file.close()\n\n file = open('plotsData/fitResults03.pk1','rb')\n fit03 = pickle.load(file)\n file.close()\n\n\n for source in fit001:\n the_index = srcmap001.index_of(source)\n\n modelData001 += fit001[source][:, None, None]*srcmap001[the_index].data[:-1, :, :]/np.sum(np.sum(srcmap001[the_index].data, axis=2), axis=1)[:-1, None, None]\n for source in fit03:\n the_index = srcmap03.index_of(source)\n modelData03 += fit03[source][:, None, None]*srcmap03[the_index].data[:-1, :, :]/np.sum(np.sum(srcmap03[the_index].data, axis=2), axis=1)[:-1, None, None]\n\n fig = plt.figure(figsize=[12, 4.5])\n\n vmin = 0\n vmax = 70.0\n cbStep = 10.0\n ax = fig.add_subplot(121, projection=wcs)\n ax=plt.gca()\n ax.tick_params(direction='in')\n c = Wedge((gc_l, gc_b), 1.0, theta1=0.0, theta2=360.0, width=14.0, edgecolor='black', facecolor='#474747', transform=ax.get_transform('galactic'))\n ax.add_patch(c)\n mappable=plt.imshow((image_data),cmap='inferno',origin='lower',norm=colors.PowerNorm(gamma=0.6),vmin=vmin, vmax=vmax, interpolation='gaussian')#\n plt.xlabel('Galactic Longitude')\n plt.ylabel('Galactic Latitude')\n plt.title('Data ($>6$ GeV)')\n cb = plt.colorbar(mappable, label='Counts per pixel', pad=0.01,ticks=np.arange(vmin, vmax+cbStep, cbStep))\n cb.ax.tick_params(width=0)\n\n\n ax2=fig.add_subplot(122, projection=wcs)\n ax2 = plt.gca()\n\n sources = []\n sources.append({\n 'Name':'3FGL J1745.3-2903c',\n 'RA':266.3434922,\n 'DEC':-29.06274323,\n 'color':'xkcd:bright light blue'})\n\n sources.append({\n 'Name':'1FIG J1748.2-2816',\n 'RA':267.1000722,\n 'DEC':-28.27707114,\n 'color':'xkcd:fire engine red'\n })\n\n sources.append({\n 'Name':'1FIG J1746.4-2843',\n 'RA':266.5942898,\n 'DEC':-28.86244442,\n 'color':'xkcd:fluorescent green'\n })\n\n sources.append({\n 'Name':'Galactic Center',\n 'RA':266.417,\n 'DEC':-29.0079,\n 'color':'black'\n })\n\n #Add source names:\n for source in sources:\n l, b = ra_dec_to_l_b(source['RA'], source['DEC'])\n ax2.scatter(l, b, color=source['color'],marker='x',s=45.0, transform=ax2.get_transform('galactic'), label=source['Name'])\n\n c2 = Wedge((gc_l, gc_b), 1.0, theta1=0.0, theta2=360.0, width=14.0, edgecolor='black', facecolor='#474747', transform=ax2.get_transform('galactic'))\n ax2.add_patch(c2)\n mappable2 = plt.imshow((np.sum(modelData03,axis=0)), cmap='inferno',norm=colors.PowerNorm(gamma=0.6),origin='lower',vmin=vmin, vmax=vmax, interpolation='gaussian')\n plt.xlabel('Galactic Longitude')\n plt.ylabel('Galactic Latitude')\n plt.title('Model ($>6$ GeV)')\n cb2 = plt.colorbar(mappable2, label='Counts per pixel', pad=0.01, ticks=np.arange(vmin, vmax+cbStep, cbStep))\n cb2.ax.tick_params(width=0)\n leg = plt.legend(loc=1,frameon=True)\n leg.get_frame().set_alpha(0.5)\n leg.get_frame().set_edgecolor('white')\n text1 = leg.get_texts()\n for text in text1:\n text.set_color('black')\n\n fig.tight_layout()\n plt.subplots_adjust(wspace = 0.13, left=0.04, bottom=0.13, top=0.92)\n plt.show()\n #plt.savefig('plots/dataModelComparison.pdf',bbox_inches='tight')",
"def updateAnnotations(self):\n self.backupDatafiles()\n print(\"Updating annotation files \", self.field(\"trainDir\"))\n listOfDataFiles = QDir(self.field(\"trainDir\")).entryList(['*.data'])\n for file in listOfDataFiles:\n # Read the annotation\n segments = Segment.SegmentList()\n newsegments = Segment.SegmentList()\n segments.parseJSON(os.path.join(self.field(\"trainDir\"), file))\n allSpSegs = np.arange(len(segments)).tolist()\n newsegments.metadata = segments.metadata\n for segix in allSpSegs:\n seg = segments[segix]\n if self.field(\"species\") not in [fil[\"species\"] for fil in seg[4]]:\n newsegments.addSegment(seg) # leave non-target segments unchanged\n else:\n for seg2 in self.segments:\n if seg2[1] == seg:\n # find the index of target sp and update call type\n seg[4][[fil[\"species\"] for fil in seg[4]].index(self.field(\"species\"))][\"calltype\"] = self.clusters[seg2[-1]]\n newsegments.addSegment(seg)\n newsegments.saveJSON(os.path.join(self.field(\"trainDir\"), file))",
"def scrapeSurfSpots(spots : pd.DataFrame):\n # Create empty DataFrame\n df = pd.DataFrame([])\n # Retrieve data for each surf spot from msw api\n for idx, row in spots.iterrows():\n print('\\nGetting forecast info for', row['spot'])\n # Access MSW API\n df = pd.concat([df, processJson(target_url=row.target,\n spot_name=row.spot,\n longitude=row.longitude,\n latitude=row.latitude)])\n # Reset Index\n df = df.reset_index(drop=True)\n return df",
"def refl_analysis(self,dials_model):\n Z = self.refl_table\n indices = Z['miller_index']\n expts = ExperimentListFactory.from_json_file(dials_model,\n check_format=False)\n self.dials_model=expts[0]\n CRYS = self.dials_model.crystal\n UC = CRYS.get_unit_cell()\n strong_resolutions = UC.d(indices)\n order = flex.sort_permutation(strong_resolutions, reverse=True)\n Z[\"spots_order\"] = order\n self.spots_pixels = flex.size_t()\n spots_offset = flex.int(len(order),-1)\n spots_size = flex.int(len(order),-1)\n\n P = panels = Z['panel']\n S = shoeboxes = Z['shoebox']\n N_visited = 0; N_bad = 0\n for oidx in range(len(order)): #loop through the shoeboxes in correct order\n sidx = order[oidx] # index into the Miller indices\n ipanel = P[sidx]\n slow_size = 254\n fast_size = 254\n panel_size=slow_size*fast_size\n bbox = S[sidx].bbox\n first_position = spots_offset[sidx] = self.spots_pixels.size()\n for islow in range(max(0,bbox[2]-3), min(slow_size,bbox[3]+3)):\n for ifast in range(max(0,bbox[0]-3), min(fast_size,bbox[1]+3)):\n value = self.trusted_mask[ipanel][islow*slow_size + ifast]\n N_visited += 1\n if value: self.spots_pixels.append(ipanel*panel_size+islow*slow_size+ifast)\n else: N_bad+=1\n spot_size = spots_size[sidx] = self.spots_pixels.size() - first_position\n Z[\"spots_offset\"] = spots_offset\n Z[\"spots_size\"] = spots_size\n print (N_visited,\"pixels were visited in the %d shoeboxes (with borders)\"%len(order))\n print (N_bad,\"of these were bad pixels, leaving %d in target\"%(len(self.spots_pixels)))",
"def detect_spots(images, threshold=None, remove_duplicate=True,\n return_threshold=False, voxel_size_z=None, voxel_size_yx=100,\n psf_z=None, psf_yx=200):\n # check parameters\n stack.check_parameter(threshold=(float, int, type(None)),\n remove_duplicate=bool,\n return_threshold=bool,\n voxel_size_z=(int, float, type(None)),\n voxel_size_yx=(int, float),\n psf_z=(int, float, type(None)),\n psf_yx=(int, float))\n\n # if one image is provided we enlist it\n if not isinstance(images, list):\n stack.check_array(images,\n ndim=[2, 3],\n dtype=[np.uint8, np.uint16,\n np.float32, np.float64])\n ndim = images.ndim\n images = [images]\n is_list = False\n else:\n ndim = None\n for i, image in enumerate(images):\n stack.check_array(image,\n ndim=[2, 3],\n dtype=[np.uint8, np.uint16,\n np.float32, np.float64])\n if i == 0:\n ndim = image.ndim\n else:\n if ndim != image.ndim:\n raise ValueError(\"Provided images should have the same \"\n \"number of dimensions.\")\n is_list = True\n\n # check consistency between parameters\n if ndim == 3 and voxel_size_z is None:\n raise ValueError(\"Provided images has {0} dimensions but \"\n \"'voxel_size_z' parameter is missing.\".format(ndim))\n if ndim == 3 and psf_z is None:\n raise ValueError(\"Provided images has {0} dimensions but \"\n \"'psf_z' parameter is missing.\".format(ndim))\n if ndim == 2:\n voxel_size_z = None\n psf_z = None\n\n # detect spots\n if return_threshold:\n spots, threshold = _detect_spots_from_images(\n images,\n threshold=threshold,\n remove_duplicate=remove_duplicate,\n return_threshold=return_threshold,\n voxel_size_z=voxel_size_z,\n voxel_size_yx=voxel_size_yx,\n psf_z=psf_z,\n psf_yx=psf_yx)\n else:\n spots = _detect_spots_from_images(\n images,\n threshold=threshold,\n remove_duplicate=remove_duplicate,\n return_threshold=return_threshold,\n voxel_size_z=voxel_size_z,\n voxel_size_yx=voxel_size_yx,\n psf_z=psf_z,\n psf_yx=psf_yx)\n\n # format results\n if not is_list:\n spots = spots[0]\n\n # return threshold or not\n if return_threshold:\n return spots, threshold\n else:\n return spots",
"def find_fit(self) -> None:\n self.triple_points = self.get_triple_point_distances()\n for read_meth in self.readout_methods:\n self._features[read_meth] = {}\n tps = self.triple_points[read_meth]\n self._features[read_meth][\"triple_points\"] = list(tps)",
"def loading_scatter(self, x, y, label_key, filename=None, spot_cols=None, spots=True, label=False, alpha=0.8, \n topbots=False,\n spot_size=40, label_font_size=7, cut=None, squish_scales=False, **kargs):\n assert filename, \"loading_scatter: Must provide a filename\"\n assert label_key, \"loading_scatter: Must provide a label_key for the label names\"\n assert label_key in self.parent, \"loading_scatter(): I can't find '%s' label_key in the original genelist\" % label_key\n\n ret_data = None\n xdata = self.__u[:,x-1]\n ydata = self.__u[:,y-1]\n perc_weights = self.get_loading_percents(exclude_first_pc=True)\n\n labs = self.parent[label_key]\n if topbots:\n # Get the top and bot from the X and Y sorted PCs:\n sortable_data = list(zip(xdata, ydata, self.parent[label_key]))\n sorted_by_x = sorted(sortable_data, key=lambda sortable_data: sortable_data[0])\n x_tbs = list(sorted_by_x[0:topbots]) + list(sorted_by_x[-topbots:])\n sorted_by_y = sorted(sortable_data, key=lambda sortable_data: sortable_data[1])\n y_tbs = list(sorted_by_y[0:topbots]) + list(sorted_by_y[-topbots:])\n\n # Merge duplicates:\n all_items = list(set(x_tbs + y_tbs))\n\n xdata = [i[0] for i in all_items]\n ydata = [i[1] for i in all_items]\n labs = [i[2] for i in all_items]\n\n #print xdata, ydata\n\n if \"aspect\" not in kargs:\n kargs[\"aspect\"] = \"square\"\n\n fig = self.__draw.getfigure(**kargs)\n ax = fig.add_subplot(111)\n\n cols = self.cols\n if spot_cols:\n cols = spot_cols \n\n if spots:\n ax.scatter(xdata, ydata, s=spot_size, alpha=alpha, edgecolors=\"none\", c=cols)\n else:\n # if spots is false then the axis limits are set to 0..1. I will have to send my\n # own semi-sensible limits:\n ax.set_xlim([min(xdata), max(xdata)])\n ax.set_ylim([min(ydata), max(ydata)])\n\n if label:\n for i, lab in enumerate(labs):\n if not spots and isinstance(spot_cols, list):\n ax.text(xdata[i], ydata[i], lab, size=label_font_size, ha=\"center\", va=\"top\", color=spot_cols[i])\n else:\n ax.text(xdata[i], ydata[i], lab, size=label_font_size, ha=\"center\", va=\"top\", color=\"black\")\n\n # Tighten the axis\n if squish_scales:\n if \"xlims\" not in kargs:\n ax.set_xlim([min(xdata), max(xdata)])\n\n if \"ylims\" not in kargs:\n ax.set_ylim([min(ydata), max(ydata)])\n\n ax.set_xlabel(\"PC%s (%.1f%%)\" % (x, perc_weights[x])) # can be overridden via do_common_args()\n ax.set_ylabel(\"PC%s (%.1f%%)\" % (y, perc_weights[y]))\n\n if cut:\n rect = matplotlib.patches.Rectangle(cut[0:2], cut[2]-cut[0], cut[3]-cut[1], ec=\"none\", alpha=0.2, fc=\"orange\")\n ax.add_patch(rect)\n\n labels = self.parent[label_key] # Just get once or big hit!\n tdata = [\n {\"name\": labels[i], \"pcx\": xdata[i], \"pcy\": ydata[i]}\n for i in range(len(xdata))\n if xdata[i] > cut[0]\n and xdata[i] < cut[2]\n and ydata[i] < cut[1]\n and ydata[i] > cut[3]\n ]\n\n if tdata:\n ret_data = genelist()\n ret_data.load_list(tdata)\n\n self.__draw.do_common_args(ax, **kargs)\n\n real_filename = self.__draw.savefigure(fig, filename)\n config.log.info(\"loading_scatter: Saved 'PC%s' vs 'PC%s' scatter to '%s'\" % (x, y, real_filename))\n return(ret_data)",
"def _batch_inference(self, batched_inputs, detected_instances=None):\n if detected_instances is None:\n detected_instances = [None] * len(batched_inputs)\n\n outputs = []\n all_scores = []\n all_boxes = []\n inputs, instances = [], []\n for idx, input, instance in zip(count(), batched_inputs, detected_instances):\n inputs.append(input)\n instances.append(instance)\n if len(inputs) == self.batch_size or idx == len(batched_inputs) - 1:\n output, all_score, all_box = self.model.inference(\n inputs, instances if instances[0] is not None else None, do_postprocess=False\n )\n outputs.extend(output)\n all_scores.extend(all_score)\n all_boxes.extend(all_box)\n inputs, instances = [], []\n return outputs, all_scores, all_boxes"
] | [
"0.63233507",
"0.6011749",
"0.5599055",
"0.54401183",
"0.5310215",
"0.5199169",
"0.50872684",
"0.50812757",
"0.50468516",
"0.50031716",
"0.49997303",
"0.49940717",
"0.49932045",
"0.49635676",
"0.49512917",
"0.49268216",
"0.48976818",
"0.48886847",
"0.4885543",
"0.48546764",
"0.48544618",
"0.48524198",
"0.4849832",
"0.48470715",
"0.48443118",
"0.48320395",
"0.4827746",
"0.4805525",
"0.47991824",
"0.47919184"
] | 0.7950176 | 0 |
This method is intended at segmenting the nucleis in DAPIimage on Mask images (not FISH). However basic, it seems to give a rather good. approximation. The workflow is MIP > local grad > Otsu thresholding > Connected components labelling > Filtering components based on their size (using either handthreshold or KMeans to distinguish actual cells from noise components. | def segment(self, sg=NucleiSegmenter()):
# mask_path = self.name.replace('w1', 'w3').replace('561', '405')
# cell_mask = io.imread(mask_path)
# self.mask = numpy.swapaxes(cell_mask, 0, 2)
with warnings.catch_warnings():
warnings.simplefilter('ignore')
if self._verbose > 0:
print('Segmenting...', end='', flush=True)
self.nucleis = sg.method(self.image_raw)
if self._verbose > 0:
print('%i nucleis found.' % (numpy.unique(self.nucleis).shape[0] - 1)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def segment_nuclei3D_5(instack, sigma1=3, sigma_dog_small=5, sigma_dog_big=40, seed_window=(70,100,100),\n erosion_length=5, dilation_length=10, sensitivity=0.5, size_min=1e4, \n size_max=5e5, circularity_min=0.5, display=False):\n\n\n def smart_dilate(stack, labelmask, sensitivity, dilation_length):\n \"\"\"\n Dilate nuclei, then apply a threshold to the newly-added pixels and\n only retains pixels that cross it. Change mask in place.\n \"\"\"\n # Get mean pixel values of foreground and background and define threshold.\n bg_mean = np.mean(stack[labelmask == 0])\n fg_mean = np.mean(stack[labelmask > 0])\n t = bg_mean + ((fg_mean - bg_mean) * sensitivity)\n # Dilate labelmask, return as new mask.\n labelmask_dilated = labelmask_apply_morphology(labelmask, \n mfunc=ndi.morphology.binary_dilation, \n struct=np.ones((1, dilation_length, dilation_length)), \n expand_size=(1, dilation_length + 1, dilation_length + 1))\n # Remove any pixels from dilated mask that are below threshhold.\n labelmask_dilated[stack < t] = 0\n # Add pixels matching nuc in dilated mask to old mask, pixels in old mask that are n\n # and 0 in dilated mask are kept at n. So dilation doesn't remove any nuclear pixels.\n for n in np.unique(labelmask)[1:]:\n if (n != 0):\n labelmask[labelmask_dilated == n] = n\n\n # Normalize each Z-slice to mean intensity to account for uneven illumination.\n stack = zstack_normalize_mean(instack)\n # Apply gaussian filter.\n stack_smooth = ndi.filters.gaussian_filter(stack, sigma=sigma1)\n # Threshold, make binary mask, fill.\n t = threshold_otsu(stack_smooth)\n mask = np.where(stack_smooth >= t, 1, 0)\n mask = imfill(mask, find_background_point(mask))\n # Use morphological erosion to remove spurious connections between objects.\n mask = ndi.morphology.binary_erosion(mask, structure=np.ones((1, erosion_length, erosion_length)))\n # Perform distance transform of mask.\n dist = ndi.distance_transform_edt(mask)\n # Find local maxima for watershed seeds.\n seeds, _ = peak_local_max_nD(dist, size=seed_window)\n # Add a background seed.\n seeds[find_background_point(mask)] = seeds.max() + 1\n # Re-smooth, do gradient transform to get substrate for watershedding.\n dog = dog_filter(stack, sigma_dog_small, sigma_dog_big)\n grad = gradient_nD(dog)\n # Remove nan from grad, replace with non-nan max values.\n grad[np.isnan(grad)] = grad[~np.isnan(grad)].max()\n # Segment by watershed algorithm.\n ws = watershed(grad, seeds.astype(int))\n # Filter nuclei for size and circularity.\n labelmask = labelmask_filter_objsize(ws, size_min, size_max)\n labelmask = filter_labelmask(labelmask, object_circularity, circularity_min, 1000)\n # Dilate labeled structures.\n smart_dilate(stack_smooth, labelmask, sensitivity, dilation_length)\n\n if (display):\n middle_slice = int(stack.shape[0] / 2)\n fig, ax = plt.subplots(3,2, figsize=(10,10))\n # Display mask.\n ax[0][0].imshow(mask.max(axis=0))\n ax[0][0].set_title('Initial Mask')\n # Display watershed seeds.\n seeds_vis = ndi.morphology.binary_dilation(seeds, structure=np.ones((1,8,8)))\n ax[0][1].imshow(stack_smooth.max(axis=0), alpha=0.5)\n ax[0][1].imshow(seeds_vis.max(axis=0), alpha=0.5)\n ax[0][1].set_title('Watershed seeds')\n # Display gradient.\n ax[1][0].imshow(grad[middle_slice])\n ax[1][0].set_title('Gradient')\n # Display watershed output.\n ax[1][1].imshow(ws.max(axis=0))\n ax[1][1].set_title('Watershed')\n # Display final mask.\n ax[2][0].imshow(labelmask.max(axis=0))\n ax[2][0].set_title('Final Segmentation')\n \n return labelmask",
"def segment(self, sg=CytoSegmenter()):\n # mask_path = self.name.replace('w1', 'w3').replace('561', '405')\n # cell_mask = io.imread(mask_path)\n # self.mask = numpy.swapaxes(cell_mask, 0, 2)\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n if self._verbose > 0:\n print('Segmenting...', end='', flush=True)\n self.cells = sg.method(self.image_raw, self.nuclei_image)\n if self._verbose > 0:\n print('%i cells found.' % (numpy.unique(self.cells).shape[0] - 1))",
"def segment_nuclei3D_monolayer(stack, sigma1=3, sigma_dog_big=15, \n sigma_dog_small=5, seed_window=(30,30), min_seed_dist=25, \n dilation_length=5, size_min=0, size_max=np.inf, display=False):\n # Make max projection on Z.\n maxp = stack.max(axis=0)\n # Filter with DoG to make nuclei into blobs.\n dog = dog_filter(maxp, sigma_dog_small, sigma_dog_big)\n # Get threshold, use thresh to make initial mask and fill holes.\n t = threshold_otsu(dog)\n mask = np.where(dog > t, 1, 0)\n mask = imfill(mask)\n # Perform distance transform, find local maxima for watershed seeds.\n dist = ndi.distance_transform_edt(mask)\n seeds, _ = peak_local_max_nD(dist, size=seed_window, min_dist=min_seed_dist)\n # Smooth image and take gradient, use as input for watershed.\n im_smooth = ndi.filters.gaussian_filter(maxp, sigma=sigma1)\n grad = gradient_nD(im_smooth)\n ws = watershed(grad, seeds.astype(int))\n # Filter object size, relabel to set background to 0.\n labelmask = labelmask_filter_objsize(ws, size_min, size_max)\n labelmask = relabel_labelmask(labelmask)\n # Dilate segmented nuclei.\n labelmask = labelmask_apply_morphology(labelmask, \n mfunc=ndi.morphology.binary_dilation, \n struct=np.ones((dilation_length, dilation_length)), \n expand_size=(dilation_length + 1, dilation_length + 1))\n\n if (display):\n fig, ax = plt.subplots(3,2, figsize=(10,10))\n # Display mask.\n ax[0][0].imshow(mask)\n ax[0][0].set_title('Initial Mask')\n # Display watershed seeds.\n seeds_vis = ndi.morphology.binary_dilation(seeds, structure=np.ones((8,8)))\n ax[0][1].imshow(im_smooth, alpha=0.5)\n ax[0][1].imshow(seeds_vis, alpha=0.5)\n ax[0][1].set_title('Watershed seeds')\n # Display gradient.\n ax[1][0].imshow(grad)\n ax[1][0].set_title('Gradient')\n # Display watershed output.\n ws = relabel_labelmask(ws)\n ax[1][1].imshow(ws.astype('bool'))\n ax[1][1].set_title('Watershed')\n # Display final mask.\n ax[2][0].imshow(labelmask.astype('bool'))\n ax[2][0].set_title('Final Segmentation')\n \n # Make 2D labelmask into 3D mask by repeating.\n labelmask = np.repeat([labelmask], stack.shape[0], axis=0)\n return labelmask",
"def segment_and_find_positions(self):\n initial_image = self.data\n xdim = self.data.shape[0]\n\n ydim = self.data.shape[1]\n downsized_image = transform.resize(\n initial_image,\n (xdim / DOWNSCALING_FACTOR, ydim / DOWNSCALING_FACTOR),\n mode=\"constant\",\n )\n rescaled_image = exposure.rescale_intensity(downsized_image)\n print(\"Starting Canny filtering\")\n g_edges = skimage.feature.canny(\n rescaled_image,\n sigma=self.canny_sigma,\n low_threshold=self.canny_low_threshold,\n )\n print(\"Starting dilation\")\n dilation = morphology.dilation(g_edges, morphology.disk(3))\n print(\"Starting erosion\")\n eroded = morphology.erosion(dilation, morphology.disk(4))\n dilation = morphology.dilation(\n eroded, morphology.diamond(4)\n ) # Dont change to disk\n print(\"Starting to remove small holes\")\n filled = morphology.remove_small_holes(\n dilation, area_threshold=self.remove_small_holes_area_threshold\n )\n print(\"Starting erosion\")\n eroded = morphology.erosion(filled, morphology.diamond(3))\n print(\"Applying filters\")\n filtered_image = eroded\n if self.colony_filters_dict is not None:\n for filter_name in self.colony_filters_dict.keys():\n filtered_image = segmentation_filters.apply_filter(\n filter_name, filtered_image, self.colony_filters_dict[filter_name]\n )\n\n colony_edges = morphology.dilation(feature.canny(filtered_image, 0.01))\n print(\"Starting outlining\")\n outline = downsized_image.copy()\n outline[colony_edges] = 65535\n distance = ndimage.distance_transform_edt(filtered_image)\n smoothed_well = ndimage.gaussian_filter(downsized_image, 0.35)\n outline.copy()\n objs, num_objs = ndimage.label(filtered_image)\n print(\"Applying filters for points\")\n if self.mode == \"A\":\n # point selection: Smoothest point in the center region\n for obj in range(1, num_objs + 1):\n print(\"On object {} of {}\".format(obj, num_objs))\n mask = objs == obj\n dist_mask = distance * mask\n # for each colony,\n # find the maximum distance from the two fold distance map.\n # The edge is at 0% and the center of the colony is at 100%\n d_max = dist_mask.max()\n # Getting the points which is at least 40% away from the edge\n top_percent = dist_mask > (d_max * 0.40)\n colony_mask = smoothed_well * top_percent\n colony_edges = feature.canny(colony_mask, 0.1)\n # applying the second distance transform\n # to find the smoothest point in the correct region\n inner_edges = ndimage.distance_transform_edt(\n ~colony_edges * top_percent\n )\n smooth_point = numpy.where(inner_edges == inner_edges.max())\n smooth_point = (smooth_point[0][0], smooth_point[1][0])\n smooth_point_corrected = (\n smooth_point[0] * DOWNSCALING_FACTOR,\n smooth_point[1] * DOWNSCALING_FACTOR,\n )\n self._point_locations.append(smooth_point_corrected)\n elif self.mode == \"C\":\n for obj in range(1, num_objs + 1):\n print(\"On object {} of {}\".format(obj, num_objs))\n mask = objs == obj\n dist_mask = distance * mask\n # point selection: edge, ridge & center respectively\n self.get_mode_c_points(dist_mask, 0, 0.03)\n self.get_mode_c_points(dist_mask, 0.15, 0.20)\n self.get_mode_c_points(dist_mask, 0.90, 0.99)",
"def data_assemble(self, x,y, r_cut, add_mask=5, pick_choice=False):\n #segmentation components\n obj_masks,center_mask_info, segments_deblend_list = self._seg_image(x, y, r_cut=r_cut)\n data_masks_center, _, xcenter, ycenter, c_index = center_mask_info\n image = self.cut_image(x,y,r_cut)\n self.raw_image = image\n src_mask = np.zeros_like(image)\n lens_mask = np.zeros_like(image)\n plu_mask = np.zeros_like(image)\n lenslight_mask_index = []\n if self.segmap is not None and self.interaction:\n segmap=self.segmap[0].data\n segdata = segmap[x - r_cut:x + r_cut + 1, y - r_cut:y + r_cut + 1]\n plt.imshow(segdata, origin='lower')\n nlabel = np.unique(segdata)\n for i in range(nlabel.shape[0] - 1):\n ax = (int((np.where(segdata == nlabel[i + 1])[0].max() - np.where(segdata == nlabel[i + 1])[0].min()) / 2 +\n np.where(segdata == nlabel[i + 1])[0].min()))\n ay = (int((np.where(segdata == nlabel[i + 1])[1].max() - np.where(segdata == nlabel[i + 1])[1].min()) / 3 +\n np.where(segdata == nlabel[i + 1])[1].min()))\n plt.text(ay, ax, repr(nlabel[i + 1]), color='r', fontsize=15)\n plt.title('Input segmentation map')\n plt.show()\n source_mask_index = [int(sidex) for sidex in input('Selection of data via (inputed) segmentation index separated by space, e.g., 0 1 :').split()]\n for i in source_mask_index:\n src_mask = src_mask + segdata*(segdata==i*1)\n # lens light\n lenslightyn = input('Hint: is there lens light? (y/n): ')\n if lenslightyn == 'y':\n lenslight_mask_index = [int(lidex) for lidex in input('Selection of lens-plane light via (inputed) segmentation index separated by space, e.g., 0 1 :').split()]\n for i in lenslight_mask_index:\n lens_mask = (lens_mask + segdata*(segdata==i*1))\n elif lenslightyn == 'n':\n lenslight_mask_index = []\n else:\n raise ValueError(\"Please input 'y' or 'n' !\")\n # contamination\n pluyn = input('Hint: is there contamination? (y/n): ')\n if pluyn == 'y':\n plution_mask_index = [int(pidex) for pidex in input('Selection of contamination via (inputed) segmentation index separated by space, e.g., 0 1 :').split()]\n for i in plution_mask_index:\n plu_mask = (plu_mask + segdata*(segdata==i*1))\n elif pluyn == 'n':\n plu_mask = np.zeros_like(image)\n else:\n raise ValueError(\"Please input 'y' or 'n' !\")\n\n\n\n if self.segmap is None and self.interaction:\n self.plot_segmentation(image, segments_deblend_list, xcenter, ycenter, c_index)\n #source light\n if pick_choice:\n source_mask_index = [int(sidex) for sidex in input('Selection of data via segmentation index separated by space, e.g., 0 1 :').split()]\n for i in source_mask_index:\n src_mask = src_mask + obj_masks[i]\n #lens light\n lenslightyn = input('Hint: is there lens light? (y/n): ')\n if lenslightyn == 'y':\n lenslight_mask_index = [int(lidex) for lidex in input('Selection of lens-plane light via segmentation index separated by space, e.g., 0 1 :').split()]\n for i in lenslight_mask_index:\n lens_mask = (lens_mask + obj_masks[i])\n elif lenslightyn == 'n':\n lenslight_mask_index = []\n else:\n raise ValueError(\"Please input 'y' or 'n' !\")\n # contamination\n pluyn = input('Hint: is there contamination? (y/n): ')\n if pluyn == 'y':\n plution_mask_index = [int(pidex) for pidex in input('Selection of contamination via segmentation index separated by space, e.g., 0 1 :').split()]\n for i in plution_mask_index:\n plu_mask = (plu_mask + obj_masks[i])\n elif pluyn == 'n':\n plu_mask = np.zeros_like(image)\n else:\n raise ValueError(\"Please input 'y' or 'n' !\")\n else:\n src_mask = data_masks_center\n\n\n #adding pixels around the selected masks\n selem = np.ones((add_mask, add_mask))\n src_mask = ndimage.binary_dilation(src_mask.astype(np.bool), selem)\n plu_mask_out = ndimage.binary_dilation(plu_mask.astype(np.bool), selem)\n plu_mask_out = (plu_mask_out - 1)*-1\n\n #select source region to fit, or to use whole observation to fit\n ##1.select source region to fit\n snr = self.snr\n source_mask = image * src_mask\n #create background image for picked\n if self.background_rms is None:\n _, _, std = sigma_clipped_stats(image, sigma=snr, mask=source_mask)\n tshape = image.shape\n img_bkg = make_noise_image(tshape, distribution='gaussian', mean=0., stddev=std, seed=12)\n else:\n tshape = image.shape\n std=np.mean(self.background_rms)\n img_bkg = make_noise_image(tshape, distribution='gaussian', mean=0., stddev=std, seed=12)\n\n no_source_mask = (src_mask * -1 + 1) * img_bkg\n picked_data = source_mask + no_source_mask\n\n ##2.use whole observation to fit while mask out the contamination\n maskedimg = image * plu_mask_out\n\n ##orginize the output 'kwargs_data'\n kwargs_data = {}\n if pick_choice:\n kwargs_data['image_data'] = picked_data#select source region to fit\n else:\n kwargs_data['image_data'] = maskedimg#use whole observation to fit while mask out the contamination\n\n if self.background_rms is None:\n kwargs_data['background_rms'] = std\n self.background_rms = std\n else:\n kwargs_data['background_rms'] = np.mean(self.background_rms)\n kwargs_data['exposure_time'] = self.exp_time\n kwargs_data['transform_pix2angle'] = np.array([[1, 0], [0, 1]]) * self.deltaPix\n ra_at_xy_0 = (y - r_cut) * self.deltaPix # (ra,dec) is (y_img,x_img)\n dec_at_xy_0 = (x - r_cut) * self.deltaPix\n kwargs_data['ra_at_xy_0'] = ra_at_xy_0\n kwargs_data['dec_at_xy_0'] = dec_at_xy_0\n\n #coordinate of the lens light\n xlenlight, ylenlight = [], []\n if lenslight_mask_index !=[]:\n for i in lenslight_mask_index:\n xlenlight.append(ra_at_xy_0 + int(xcenter[i]) * self.deltaPix )\n ylenlight.append(dec_at_xy_0 + int(ycenter[i])* self.deltaPix )\n\n #for output\n self.data = kwargs_data['image_data']\n self.kwargs_data = kwargs_data\n self.data_mask = src_mask\n self.lens_mask = lens_mask\n self.plu_mask = plu_mask_out\n self.obj_masks = obj_masks\n imageData = ImageData(**kwargs_data)\n self.imageData = imageData\n kwargs_seg = [segments_deblend_list, xcenter, ycenter, c_index]\n\n return kwargs_data, kwargs_seg, [xlenlight, ylenlight]",
"def segmentation_rgb(self, image, k=2):\n\n \n iterations = 5\n \n print(image.shape)\n imageW = image.shape[0]\n imageH = image.shape[1]\n\n\n dataVector = np.ndarray(shape=(imageW * imageH, 5), dtype=float)\n \n pixelClusterAppartenance = np.ndarray(shape=(imageW * imageH), dtype=int)\n\n \n for y in range(0, imageH):\n for x in range(0, imageW):\n xy = (x, y)\n \n rgb=image[x,y]\n print(rgb)\n #rgb = image.getpixel(xy)\n\n dataVector[x + y * imageW, 0] = rgb[0]\n dataVector[x + y * imageW, 1] = rgb[1]\n dataVector[x + y * imageW, 2] = rgb[2]\n dataVector[x + y * imageW, 3] = x\n dataVector[x + y * imageW, 4] = y\n print(\"data vector\")\n print(dataVector)\n \n dataVector_scaled = preprocessing.normalize(dataVector)\n minValue = np.amin(dataVector_scaled)\n maxValue = np.amax(dataVector_scaled)\n\n centers = np.ndarray(shape=(k,5))\n for index, center in enumerate(centers):\n centers[index] = np.random.uniform(minValue, maxValue, 5)\n print(\"center\")\n print(centers[index])\n\n for iteration in range(iterations):\n \n for idx, data in enumerate(dataVector_scaled):\n distanceToCenters = np.ndarray(shape=(k))\n for index, center in enumerate(centers):\n distanceToCenters[index] = euclidean_distances(data.reshape(1, -1), center.reshape(1, -1))\n pixelClusterAppartenance[idx] = np.argmin(distanceToCenters)\n\n \n clusterToCheck = np.arange(k) \n \n clustersEmpty = np.in1d(clusterToCheck, pixelClusterAppartenance)\n \n for index, item in enumerate(clustersEmpty):\n if item == False:\n pixelClusterAppartenance[np.random.randint(len(pixelClusterAppartenance))] = index\n \n\n for i in range(k):\n dataInCenter = []\n\n for index, item in enumerate(pixelClusterAppartenance):\n if item == i:\n dataInCenter.append(dataVector_scaled[index])\n dataInCenter = np.array(dataInCenter)\n centers[i] = np.mean(dataInCenter, axis=0)\n\n \n print(\"Centers Iteration num\", iteration, \": \\n\", centers)\n\n \n for index, item in enumerate(pixelClusterAppartenance):\n dataVector[index][0] = int(round(centers[item][0] * 255))\n dataVector[index][1] = int(round(centers[item][1] * 255))\n dataVector[index][2] = int(round(centers[item][2] * 255))\n\n \n image = Image.new(\"RGB\", (imageW, imageH))\n\n for y in range(imageH):\n for x in range(imageW):\n image.putpixel((x, y), (int(dataVector[y * imageW + x][0]),\n int(dataVector[y * imageW + x][1]),\n int(dataVector[y * imageW + x][2])))\n\n print(type(image))\n image = cv2.cvtColor(np.asarray(image), cv2.COLOR_BGR2GRAY)\n print(type(image))\n \n return image",
"def detectSpots(img, detectSpotsParameter = None, correctIlluminationParameter = None, removeBackgroundParameter = None,\n filterDoGParameter = None, findExtendedMaximaParameter = None, detectCellShapeParameter = None,\n verbose = False, out = sys.stdout, **parameter):\n\n timer = Timer();\n \n # normalize data -> to check\n #img = img.astype('float');\n #dmax = 0.075 * 65535;\n #ids = img > dmax;\n #img[ids] = dmax;\n #img /= dmax; \n #out.write(timer.elapsedTime(head = 'Normalization'));\n #img = dataset[600:1000,1600:1800,800:830];\n #img = dataset[600:1000,:,800:830];\n \n # correct illumination\n correctIlluminationParameter = getParameter(detectSpotsParameter, \"correctIlluminationParameter\", correctIlluminationParameter);\n img1 = img.copy();\n img1 = correctIllumination(img1, correctIlluminationParameter = correctIlluminationParameter, verbose = verbose, out = out, **parameter) \n\n # background subtraction in each slice\n #img2 = img.copy();\n removeBackgroundParameter = getParameter(detectSpotsParameter, \"removeBackgroundParameter\", removeBackgroundParameter);\n img2 = removeBackground(img1, removeBackgroundParameter = removeBackgroundParameter, verbose = verbose, out = out, **parameter) \n \n # mask\n #timer.reset();\n #if mask == None: #explicit mask\n # mask = img > 0.01;\n # mask = binary_opening(mask, self.structureELement('Disk', (3,3,3)));\n #img[img < 0.01] = 0; # masking in place # extended maxima\n #out.write(timer.elapsedTime(head = 'Mask')); \n \n #DoG filter\n filterDoGParameter = getParameter(detectSpotsParameter, \"filterDoGParameter\", filterDoGParameter);\n dogSize = getParameter(filterDoGParameter, \"size\", None);\n #img3 = img2.copy(); \n img3 = filterDoG(img2, filterDoGParameter = filterDoGParameter, verbose = verbose, out = out, **parameter);\n \n # normalize \n # imax = img.max();\n # if imax == 0:\n # imax = 1;\n # img /= imax;\n \n # extended maxima\n findExtendedMaximaParameter = getParameter(detectSpotsParameter, \"findExtendedMaximaParameter\", findExtendedMaximaParameter);\n hMax = getParameter(findExtendedMaximaParameter, \"hMax\", None);\n imgmax = findExtendedMaxima(img3, findExtendedMaximaParameter = findExtendedMaximaParameter, verbose = verbose, out = out, **parameter);\n \n #center of maxima\n if not hMax is None:\n centers = findCenterOfMaxima(img, imgmax, verbose = verbose, out = out, **parameter);\n else:\n centers = findPixelCoordinates(imgmax, verbose = verbose, out = out, **parameter);\n \n #cell size detection\n detectCellShapeParameter = getParameter(detectSpotsParameter, \"detectCellShapeParameter\", detectCellShapeParameter);\n cellShapeThreshold = getParameter(detectCellShapeParameter, \"threshold\", None);\n if not cellShapeThreshold is None:\n \n # cell shape via watershed\n imgshape = detectCellShape(img2, centers, detectCellShapeParameter = detectCellShapeParameter, verbose = verbose, out = out, **parameter);\n \n #size of cells \n csize = findCellSize(imgshape, maxLabel = centers.shape[0], out = out, **parameter);\n \n #intensity of cells\n cintensity = findCellIntensity(img, imgshape, maxLabel = centers.shape[0], verbose = verbose, out = out, **parameter);\n\n #intensity of cells in background image\n cintensity2 = findCellIntensity(img2, imgshape, maxLabel = centers.shape[0], verbose = verbose, out = out, **parameter);\n \n #intensity of cells in dog filtered image\n if dogSize is None:\n cintensity3 = cintensity2;\n else:\n cintensity3 = findCellIntensity(img3, imgshape, maxLabel = centers.shape[0], verbose = verbose, out = out, **parameter);\n \n if verbose:\n out.write(timer.elapsedTime(head = 'Spot Detection') + '\\n');\n \n #remove cell;s of size 0\n idz = csize > 0;\n \n return ( centers[idz], numpy.vstack((cintensity[idz], cintensity3[idz], cintensity2[idz], csize[idz])).transpose()); \n \n \n else:\n #intensity of cells\n cintensity = findIntensity(img, centers, verbose = verbose, out = out, **parameter);\n\n #intensity of cells in background image\n cintensity2 = findIntensity(img2, centers, verbose = verbose, out = out, **parameter);\n \n #intensity of cells in dog filtered image\n if dogSize is None:\n cintensity3 = cintensity2;\n else:\n cintensity3 = findIntensity(img3, centers, verbose = verbose, out = out, **parameter);\n\n if verbose:\n out.write(timer.elapsedTime(head = 'Spot Detection') + '\\n');\n \n return ( centers, numpy.vstack((cintensity, cintensity3, cintensity2)).transpose());",
"def opencv_watershed(masked, mask) -> JSON_TYPE:\n # For code and detailed explanation see:\n # http://datahacker.rs/007-opencv-projects-image-segmentation-with-watershed-algorithm/\n threshold: int = 30\n gray = cv2.cvtColor(masked, cv2.COLOR_RGB2GRAY)\n ret, thresh_img = cv2.threshold(gray, threshold, 255, cv2.THRESH_BINARY)\n # Noise removal\n kernel = np.ones((3), np.uint8)\n opening_img = cv2.morphologyEx(thresh_img, cv2.MORPH_OPEN, kernel, iterations=9)\n # Noise removal\n closing_img = cv2.morphologyEx(thresh_img, cv2.MORPH_CLOSE, kernel, iterations=4)\n dist_transform = cv2.distanceTransform(255 - closing_img, cv2.DIST_L2, 3)\n local_max_location = peak_local_max(dist_transform, min_distance=1, indices=True)\n\n n_increases: int = 0\n while local_max_location.shape[0] < 30 and n_increases < 15:\n threshold += 20\n ret, thresh_img = cv2.threshold(gray, threshold, 255, cv2.THRESH_BINARY)\n # Noise removal\n kernel = np.ones((3), np.uint8)\n opening_img = cv2.morphologyEx(thresh_img, cv2.MORPH_OPEN, kernel, iterations=9)\n # Noise removal\n closing_img = cv2.morphologyEx(thresh_img, cv2.MORPH_CLOSE, kernel, iterations=4)\n dist_transform = cv2.distanceTransform(255 - closing_img, cv2.DIST_L2, 3)\n local_max_location = peak_local_max(dist_transform, min_distance=1, indices=True)\n n_increases += 1\n # Reset threshold\n threshold = 30\n\n num_clusters: int = 30\n if n_increases >= 15:\n num_clusters = local_max_location.shape[0]\n kmeans = KMeans(n_clusters=num_clusters)\n # If local_max_location size is 0, return 0 predictions\n if not local_max_location.size:\n return {\n \"count\": 0\n }\n kmeans.fit(local_max_location)\n local_max_location = kmeans.cluster_centers_.copy()\n # Kmeans is returning a float data type so we need to convert it to an int. \n local_max_location = local_max_location.astype(int)\n dist_transform_copy = dist_transform.copy()\n for i in range(local_max_location.shape[0]):\n cv2.circle(dist_transform_copy, (local_max_location[i][1], local_max_location[i][0]), 5, 255)\n # markers = np.zeros_like(dist_transform)\n ret, sure = cv2.threshold(dist_transform, 0.01*dist_transform.max(), 255, 0)\n sure = np.uint8(sure)\n ret, markers = cv2.connectedComponents(sure)\n labels = np.arange(kmeans.n_clusters)\n markers[local_max_location[:,0], local_max_location[:,1]] = labels + 1\n # Convert all local markers to an integer. This because cluster centers will be float numbers. \n markers = markers.astype(int)\n markers_copy = markers.copy()\n index_non_zero_markers = np.argwhere(markers != 0)\n markers_copy = markers_copy.astype(np.uint8)\n font = cv2.FONT_HERSHEY_SIMPLEX\n for i in range(index_non_zero_markers.shape[0]):\n string_text = str(markers[index_non_zero_markers[i][0], index_non_zero_markers[i][1]])\n cv2.putText(markers_copy, string_text, (index_non_zero_markers[i][1], index_non_zero_markers[i][0]), font, 1, 255)\n markers = markers.astype(np.int32)\n segmented = cv2.watershed(masked, markers)\n count_segments(markers)\n #return {\n # \"count\": local_max_location.shape[0]\n #}\n return {\n \"count\": count_segments(markers),\n }",
"def _seg_image(self, x, y, r_cut=100):\n snr=self.snr\n npixels=self.npixels\n bakground = self.bakground\n error= self.bkg_rms(x,y,r_cut)\n kernel = self.kernel\n image_cutted = self.cut_image(x,y,r_cut)\n image_data = image_cutted\n threshold_detect_objs=detect_threshold(data=image_data, nsigma=snr,error=error)\n segments=detect_sources(image_data, threshold_detect_objs, npixels=npixels, filter_kernel=kernel)\n segments_deblend = deblend_sources(image_data, segments, npixels=npixels,nlevels=10)\n segments_deblend_info = source_properties(image_data, segments_deblend)\n nobjs = segments_deblend_info.to_table(columns=['id'])['id'].max()\n xcenter = segments_deblend_info.to_table(columns=['xcentroid'])['xcentroid'].value\n ycenter = segments_deblend_info.to_table(columns=['ycentroid'])['ycentroid'].value\n image_data_size = np.int((image_data.shape[0] + 1) / 2.)\n dist = ((xcenter - image_data_size) ** 2 + (ycenter - image_data_size) ** 2) ** 0.5\n c_index = np.where(dist == dist.min())[0][0]\n center_mask=(segments_deblend.data==c_index+1)*1 #supposed to be the data mask\n obj_masks = []\n for i in range(nobjs):\n mask = ((segments_deblend.data==i+1)*1)\n obj_masks.append(mask)\n xmin = segments_deblend_info.to_table(columns=['bbox_xmin'])['bbox_xmin'].value\n xmax = segments_deblend_info.to_table(columns=['bbox_xmax'])['bbox_xmax'].value\n ymin = segments_deblend_info.to_table(columns=['bbox_ymin'])['bbox_ymin'].value\n ymax = segments_deblend_info.to_table(columns=['bbox_ymax'])['bbox_ymax'].value\n xmin_c, xmax_c = xmin[c_index], xmax[c_index]\n ymin_c, ymax_c = ymin[c_index], ymax[c_index]\n xsize_c = xmax_c - xmin_c\n ysize_c = ymax_c - ymin_c\n if xsize_c > ysize_c:\n r_center = np.int(xsize_c)\n else:\n r_center = np.int(ysize_c)\n center_mask_info= [center_mask, r_center, xcenter, ycenter, c_index]\n return obj_masks, center_mask_info, segments_deblend",
"def segment_nuclei3D_monolayer_rpb1(stack, sigma1=3, sigma_dog_big=15, \n sigma_dog_small=5, seed_window=(30,30), min_seed_dist=25, \n dilation_length=5, dilation_length_foci=10, size_min=0, \n circularity_min=0, size_max=np.inf, display=False):\n # Make max projection on Z.\n maxp = stack.max(axis=0)\n # Filter with DoG to make nuclei into blobs.\n dog = dog_filter(maxp, sigma_dog_small, sigma_dog_big)\n # Get threshold, use thresh to make initial mask and fill holes.\n t = threshold_otsu(dog)\n mask = np.where(dog > t, 1, 0)\n mask = imfill(mask)\n # Perform distance transform, find local maxima for watershed seeds.\n dist = ndi.distance_transform_edt(mask)\n seeds, _ = peak_local_max_nD(dist, size=seed_window, min_dist=min_seed_dist)\n # Smooth image and take gradient, use as input for watershed.\n im_smooth = ndi.filters.gaussian_filter(maxp, sigma=sigma1)\n grad = gradient_nD(im_smooth)\n # Make second mask of pol2 foci (presumed HLBs) by re-thresholding within nuclei.\n t_foci = threshold_otsu(im_smooth[mask.astype('bool')])\n mask_foci = np.where(im_smooth > t_foci, True, False)\n mask_foci = ndi.morphology.binary_dilation(mask_foci, structure=np.ones((dilation_length_foci, dilation_length_foci)))\n # Mask out pol2 foci in gradient.\n grad = np.where(mask_foci, 0, grad)\n # Perform watershed segmentation.\n ws = watershed(grad, seeds.astype(int))\n # Filter object size and circularity, relabel to set background to 0.\n labelmask = labelmask_filter_objsize(ws, size_min, size_max)\n # Note: object_circularity works on 3D labelmasks, requiring adding (expand_dims) and removing (squeeze) a dimension.\n labelmask = np.squeeze(filter_labelmask(np.expand_dims(labelmask, axis=0), object_circularity, circularity_min, 1000))\n labelmask = relabel_labelmask(labelmask)\n # Dilate segmented nuclei.\n labelmask = labelmask_apply_morphology(labelmask, \n mfunc=ndi.morphology.binary_dilation, \n struct=np.ones((dilation_length, dilation_length)), \n expand_size=(dilation_length + 1, dilation_length + 1))\n\n if (display):\n fig, ax = plt.subplots(3,2, figsize=(10,10))\n # Display mask.\n ax[0][0].imshow(mask)\n ax[0][0].set_title('Initial Mask')\n # Display watershed seeds.\n seeds_vis = ndi.morphology.binary_dilation(seeds, structure=np.ones((8,8)))\n ax[0][1].imshow(im_smooth, alpha=0.5)\n ax[0][1].imshow(seeds_vis, alpha=0.5)\n ax[0][1].set_title('Watershed seeds')\n # Display gradient.\n ax[1][0].imshow(grad)\n ax[1][0].set_title('Gradient')\n # Display watershed output.\n ws = relabel_labelmask(ws)\n ax[1][1].imshow(ws.astype('bool'))\n ax[1][1].set_title('Watershed')\n # Display final mask.\n ax[2][0].imshow(labelmask.astype('bool'))\n ax[2][0].set_title('Final Segmentation')\n \n # Make 2D labelmask into 3D mask by repeating.\n labelmask = np.repeat([labelmask], stack.shape[0], axis=0)\n return labelmask",
"def vis_mechanically_coupled_regions(img_dir,output_dir,data,dbscn_length,dbscn_min_size,display_not_save=False):\n #Read in the image that is segmented/labelled for nuclei\n img=imread(img_dir)\n\n #save plots to show clusters\n fig = plt.figure(figsize=(6, 2))\n ax0 = fig.add_subplot(131)\n ax1 = fig.add_subplot(132)\n ax3 = fig.add_subplot(133)\n #show segmented image labels\n ax0.imshow(img,aspect='auto') \n ax0.axis('off')\n #nuclear centroid color-coded by their orientation\n img1=ax1.scatter(data[\"Y\"], data[\"X\"], c=data[\"angles\"],s=1)\n ax1.set_xlim(0,img.shape[0])\n ax1.set_ylim(img.shape[1],0)\n plt.colorbar(img1)\n ax1.axis('off')\n\n # plot the cluster assignments\n img3=ax3.scatter(data[data[\"clusters\"]> -1][\"Y\"], data[data[\"clusters\"]> -1][\"X\"], \n c=data[data[\"clusters\"]> -1][\"clusters\"],cmap=\"plasma\",s=1)\n ax3.set_xlim(0,img.shape[0])\n ax3.set_ylim(img.shape[1],0)\n ax3.axis('off')\n\n #add titles\n ax0.title.set_text('Segmented Image')\n ax1.title.set_text('Filtered Orientation')\n ax3.title.set_text('Clusters')\n\n if display_not_save:\n plt.show()\n else: \n plt.savefig((output_dir+\"/\"+img_dir.rsplit('/', 1)[-1][:-4]+\"_\"+str(dbscn_length)+\"_\"+ str(dbscn_min_size)+\".png\"),dpi=600, bbox_inches = 'tight',pad_inches = 0)\n fig.clf()\n plt.close(fig)\n plt.close('all')\n \n \n del fig,ax0,ax1,ax3,img1,img3",
"def watershed_segment(M,xM=None,yM=None):\n\n if xM != None and yM != None:\n sel = np.ones((int(ceil(23.9*xM)),int(ceil(23.9*yM)))) # for opening\n sel2 = np.ones((int(ceil(127.2*xM)),int(ceil(127.2*yM)))) # for local thresholding\n sel3 = np.ones((int(ceil(11.9*xM)),int(ceil(11.9*yM)))) # for erosion\n ma,mi =(44245.21*xM*yM),(316.037*xM*yM) \n else:\n selD = np.array([int(M.shape[0]*.012),int(M.shape[1]*.012)])\n selD = np.where(selD!=0,selD,1)\n \n sel2D = np.array([int(M.shape[0]*.12),int(M.shape[1]*.12)])\n sel2D = np.where(sel2D!=0,sel2D,1)\n\n sel3D = np.array([int(M.shape[0]*.01),int(M.shape[1]*.01)])\n sel3D = np.where(sel3D!=0,sel3D,1)\n\n\n sel = np.ones(selD) # for opening\n sel2 = np.ones(sel2D) # for local thresholding\n sel3 = np.ones(sel3D) # for erosion\n ma,mi = (M.shape[0]*M.shape[1]*.0075),(M.shape[0]*M.shape[1]*.0003)\n\n # get a few points in the center of each blob\n \n # threshold\n bw = ((M>=ndi.percentile_filter(M,80,footprint=sel2)))\n #& (M>=stats.scoreatpercentile(M.flatten(),80)))\n\n # open and erode\n blobs = snm.binary_opening(bw,structure=sel)\n blobs = snm.binary_erosion(blobs,structure=sel3,iterations=2)\n \n # label\n labels,_ = ndi.label(blobs)\n labels[labels > 0] += 1\n labels[0,0] = 1\n\n # rescale and cast to int16, then use watershed\n #M2 = rescaled(M,0,65000).astype(np.uint16)\n #newlabels = ndi.watershed_ift(M2,labels)\n newlabels = labels\n \n # get rid of groups unless they have the right number of pixels\n\n counts = np.bincount(newlabels.flatten())\n old2new = np.arange(len(counts)) \n old2new[(counts < int(mi)) | (counts > int(ma))] = 0\n newlabels = old2new[newlabels]\n\n return newlabels",
"def preprocessing(image, smooth_size, folder):\n from skimage.restoration import denoise_tv_chambolle\n \n dim = int(image.shape[0] / 50.)\n smoothed = rank.median(image, disk(smooth_size))\n #smoothed = denoise_tv_chambolle(image, weight=0.002)\n smoothed = rank.enhance_contrast(smoothed, disk(smooth_size))\n \n pl.subplot(2, 3, 1)\n pl.title(\"after median\")\n pl.imshow(smoothed)\n pl.gray()\n # If after smoothing the \"dot\" disappears\n # use the image value\n \n # TODO: wat do with thresh?\n try:\n im_max = smoothed.max()\n thresh = threshold_otsu(image)\n except:\n im_max = image.max()\n thresh = threshold_otsu(image)\n\n \n if im_max < thresh:\n labeled = np.zeros(smoothed.shape, dtype=np.int32)\n \n else:\n binary = smoothed > thresh\n \n # TODO: this array size is the fault of errors\n bin_open = binary_opening(binary, np.ones((dim, dim)), iterations=5)\n bin_close = binary_closing(bin_open, np.ones((5,5)), iterations=5)\n \n pl.subplot(2, 3, 2)\n pl.title(\"threshold\")\n pl.imshow(binary, interpolation='nearest')\n pl.subplot(2, 3, 3)\n pl.title(\"opening\")\n pl.imshow(bin_open, interpolation='nearest')\n pl.subplot(2, 3, 4)\n pl.title(\"closing\")\n pl.imshow(bin_close, interpolation='nearest')\n \n distance = ndimage.distance_transform_edt(bin_open)\n local_maxi = peak_local_max(distance,\n indices=False, labels=bin_open)\n \n markers = ndimage.label(local_maxi)[0]\n \n labeled = watershed(-distance, markers, mask=bin_open)\n pl.subplot(2, 3, 5)\n pl.title(\"label\")\n pl.imshow(labeled)\n #pl.show()\n pl.savefig(folder)\n pl.close('all')\n\n #misc.imsave(folder, labeled)\n# labels_rw = random_walker(bin_close, markers, mode='cg_mg')\n# \n# pl.imshow(labels_rw, interpolation='nearest')\n# pl.show()\n\n return labeled",
"def segment_func2(self):\n # computing neighboors graph\n A = self.boundaryprob_graph()\n\n # SpectralClustering segmentation\n sc = SpectralClustering(3, affinity='precomputed', n_init=10, assign_labels='discretize')\n labels = sc.fit_predict(A)\n\n return labels",
"def ColorSegmentation(image, kernel_sigma, color_seg, sim_threshold): \n \n color_seg = np.array(color_seg) / 255;\n \n if kernel_sigma >= 1:\n for cha_no in range(image.shape[2]):\n image[:, :, cha_no] = Denoising(image[:, :, cha_no], kernel_sigma);\n \n image = image / 255;\n mask = np.zeros((image.shape[0], image.shape[1]), dtype = bool);\n \n similarity = np.exp(-np.sum((image - color_seg) ** 2, axis = 2));\n mask[similarity > sim_threshold] = 1;\n\n return mask;",
"def segment_func1(self):\n # computing neighboors graph\n A = self.normal_graph()\n\n # SpectralClustering segmentation\n sc = SpectralClustering(3, affinity='precomputed', n_init=10, assign_labels='discretize')\n labels = sc.fit_predict(A)\n\n return labels",
"def processImage(im, options):\n\n#########################################################\n## YOU MUST ADAPT THE CODE IN THIS FUNCTIONS TO:\n## 1- CHANGE THE IMAGE TO THE CORRESPONDING COLOR SPACE FOR KMEANS\n## 2- APPLY KMEANS ACCORDING TO 'OPTIONS' PARAMETER\n## 3- GET THE NAME LABELS DETECTED ON THE 11 DIMENSIONAL SPACE\n#########################################################\n\n## 1- CHANGE THE IMAGE TO THE CORRESPONDING COLOR SPACE FOR KMEANS\n if options['colorspace'].lower() == 'ColorNaming'.lower():\n im = cn.ImColorNamingTSELabDescriptor(im)\n elif options['colorspace'].lower() == 'RGB'.lower():\n pass\n elif options['colorspace'].lower() == 'Lab'.lower():\n im = color.rgb2lab(im)\n elif options['colorspace'].lower() == 'HED'.lower():\n im = color.rgb2hed(im)\n elif options['colorspace'].lower() == 'HSV'.lower():\n im = color.rgb2hsv(im)\n '''\n elif options['colorspace'].lower() == 'opponent'.lower():\n im = color.rgb2lab(im)\n elif options['colorspace'].lower() == 'HSL'.lower():\n im = color.rgb2(im)\n elif options['colorspace'].lower() == 'Lab'.lower():\n im = color.rgb2lab(im)\n '''\n\n\n## 2- APPLY KMEANS ACCORDING TO 'OPTIONS' PARAMETER\n if options['K']<2: # find the bes K\n kmeans = km.KMeans(im, 0, options)\n kmeans.bestK()\n else:\n kmeans = km.KMeans(im, options['K'], options)\n kmeans.run()\n\n## 3- GET THE NAME LABELS DETECTED ON THE 11 DIMENSIONAL SPACE\n if options['colorspace'].lower() == 'Lab'.lower():\n kmeans.centroids = cn.ImColorNamingTSELabDescriptor((color.lab2rgb(kmeans.centroids.reshape(1,len(kmeans.centroids),3))*255).reshape(len(kmeans.centroids),3))\n elif options['colorspace'].lower() == 'HED'.lower():\n kmeans.centroids = cn.ImColorNamingTSELabDescriptor(color.hed2rgb(kmeans.centroids.reshape(1,len(kmeans.centroids),3)).reshape(len(kmeans.centroids),3))\n elif options['colorspace'].lower() == 'HSV'.lower():\n kmeans.centroids = cn.ImColorNamingTSELabDescriptor((color.hsv2rgb(kmeans.centroids.reshape(1,len(kmeans.centroids),3))*255).reshape(len(kmeans.centroids),3))\n elif options['colorspace'].lower() == 'RGB'.lower():\n kmeans.centroids = cn.ImColorNamingTSELabDescriptor(kmeans.centroids)\n\n#########################################################\n## THE FOLLOWING 2 END LINES SHOULD BE KEPT UNMODIFIED\n#########################################################\n colors, which = getLabels(kmeans, options)\n return colors, which, kmeans",
"def get_segmented_image(image_path):\n\n # Setup Caffe Segnet\n sys.path.append('/usr/local/lib/python2.7/site-packages')\n caffe_root = '/opt/caffe-segnet/'\n sys.path.insert(0, caffe_root + 'python')\n import caffe\n\n model = 'static/nn_files/segnet_model_driving_webdemo.prototxt'\n weights = 'static/nn_files/segnet_weights_driving_webdemo.caffemodel'\n colours = 'static/nn_files/camvid12.png'\n\n net = caffe.Net(model,weights, caffe.TEST)\n caffe.set_mode_cpu()\n\n input_shape = net.blobs['data'].data.shape\n output_shape = net.blobs['argmax'].data.shape\n label_colours = cv2.imread(colours).astype(np.uint8)\n\n resized_images = slice_and_resize(image_path)\n\n images = [ cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR) for img in resized_images ]\n\n\n\n def segment_image(image):\n input_image = image.transpose((2,0,1))\n input_image = image.transpose((2,0,1))\n input_image = np.asarray([input_image])\n\n out = net.forward_all(data=input_image)\n\n segmentation_ind = np.squeeze(net.blobs['argmax'].data)\n segmentation_ind_3ch = np.resize(segmentation_ind, (3, input_shape[2], input_shape[3]))\n segmentation_ind_3ch = segmentation_ind_3ch.transpose(1,2,0).astype(np.uint8)\n segmentation_rgb = np.zeros(segmentation_ind_3ch.shape, dtype=np.uint8)\n\n cv2.LUT(segmentation_ind_3ch, label_colours, segmentation_rgb)\n\n return segmentation_rgb\n\n segmented_images = map(segment_image, images)\n\n # 5. Create a single full image from the segmented parts\n segmented_full_image = join_images_horizontally(segmented_images)\n\n folder = \"static/images/segmented\"\n os.system(\"rm %s/*.png\" % (folder))\n\n name = next(tempfile._get_candidate_names())\n segment_path = \"%s/%s_resized.png\" % (folder, name)\n segmented_full_image.save(segment_path)\n return segment_path",
"def __init__(self, device=\"cuda:0\", *args, **kwargs):\n # source_file_wtsd = \"/g/kreshuk/data/leptin/sourabh_data_v1/Segmentation_results_fused_tp_1_ch_0_Masked_WatershedBoundariesMergeTreeFilter_Out1.tif\"\n source_file_wtsd = \"/g/kreshuk/hilt/projects/data/leptin_fused_tp1_ch_0/Masked_WatershedBoundariesMergeTreeFilter_Out1.h5\"\n # wtsd = torch.from_numpy(np.array(imread(source_file_wtsd).astype(np.long))).to(device)\n wtsd = torch.from_numpy(h5py.File(source_file_wtsd, \"r\")[\"data\"][:].astype(np.long)).to(device)\n slices = [0, 157, 316]\n slices_labels = [[1359, 1172, 364, 145, 282, 1172, 1359, 189, 809, 737],\n [886, 748, 1148, 1422, 696, 684, 817, 854, 158, 774],\n [1240, 807, 1447, 69, 1358, 1240, 129, 252, 62, 807]]\n m1, m2 = [], []\n # widths, heights = [], []\n self.outer_cntr_ds, self.inner_cntr_ds = [], []\n for slc, labels in zip(slices, slices_labels):\n bg = wtsd[:, slc, :] == 1\n bg_cnt = find_contours(bg.cpu().numpy(), level=0)\n cnt1 = bg_cnt[0] if bg_cnt[0].shape[0] > bg_cnt[1].shape[0] else bg_cnt[1]\n cnt2 = bg_cnt[1] if bg_cnt[0].shape[0] > bg_cnt[1].shape[0] else bg_cnt[0]\n for m, cnt in zip([m1, m2], [cnt1, cnt2]):\n mask = torch.zeros_like(wtsd[:, slc, :]).cpu()\n mask[np.round(cnt[:, 0]), np.round(cnt[:, 1])] = 1\n m.append(torch.from_numpy(binary_fill_holes(mask.long().cpu().numpy())).to(device).sum().item())\n self.outer_cntr_ds.append(Polygon2d(torch.from_numpy(approximate_polygon(cnt1, tolerance=1.2)).to(device)))\n self.inner_cntr_ds.append(Polygon2d(torch.from_numpy(approximate_polygon(cnt2, tolerance=1.2)).to(device)))\n #\n # for l in labels:\n # mask = wtsd[:, slc, :] == l\n # cnt = find_contours(mask.cpu().numpy(), level=0)[0]\n #\n # # img = torch.zeros_like(wtsd[:, slc, :]).cpu()\n # # img[cnt[:, 0], cnt[:, 1]] = 1\n # # plt.imshow(img);plt.show()\n #\n # ellipseT = fitEllipse(cnt.astype(np.int))\n # widths.append(ellipseT[1][1])\n # heights.append(ellipseT[1][0])\n #\n #\n #\n # self.masses = [np.array(m1).mean(), np.array(m2).mean()]\n # self.expected_ratio = np.array(widths).mean() / np.array(heights).mean()\n self.expected_ratio = 5.573091\n self.masses = [290229.3, 97252.3]",
"def NMS(dets, threshold):\n assert dets.dim() == 2 and dets.size(1) == 5, \"input error of dets\"\n\n x1 = dets[:,0]\n y1 = dets[:,1]\n x2 = dets[:,2]\n y2 = dets[:,3]\n score = dets[:,4]\n\n # 1 compute areas\n areas = (x2-x1+1) * (y2-y1+1)\n\n # 2 sort score \n order = score.sort(dim=0,descending=True)[1]\n\n # 3 del bbox of those IoU greater than threshold\n # import ipdb; ipdb.set_trace()\n mask = torch.zeros_like(order, dtype=torch.uint8).cuda()\n while order.numel() > 0:\n i = order[0]\n mask[i] = 1\n # compute IoU\n xx1 = torch.max(x1[i], x1[order[1:]])\n yy1 = torch.max(y1[i], y1[order[1:]])\n xx2 = torch.min(x2[i], x2[order[1:]])\n yy2 = torch.min(y2[i], y2[order[1:]])\n\n w = xx2 - xx1 + 1\n h = yy2 - yy1 +1\n w[w<0] = 0\n h[h<0] = 0\n inter_area = w*h\n IoU = inter_area/(areas[i]+areas[order[1:]]-inter_area)\n\n order = order[1:][IoU<=threshold]\n\n return mask",
"def __init__(self, device=\"cuda:0\", *args, **kwargs):\n source_file_wtsd = \"/g/kreshuk/data/leptin/sourabh_data_v1/Segmentation_results_fused_tp_1_ch_0_Masked_WatershedBoundariesMergeTreeFilter_Out1.tif\"\n source_file_wtsd = \"/g/kreshuk/hilt/projects/data/leptin_fused_tp1_ch_0/Masked_WatershedBoundariesMergeTreeFilter_Out1.h5\"\n # wtsd = torch.from_numpy(np.array(imread(source_file_wtsd).astype(np.long))).to(device)\n wtsd = torch.from_numpy(h5py.File(source_file_wtsd, \"r\")[\"data\"][:].astype(np.long)).to(device)\n slices = [0, 157, 316]\n label_1 = [1359, 886, 1240]\n label_2 = [1172, 748, 807]\n label_3 = [364, 1148, 1447]\n m1, m2, m3, m4, m5 = [], [], [], [], []\n self.outer_cntr_ds, self.inner_cntr_ds, self.celltype_1_ds, self.celltype_2_ds, self.celltype_3_ds = [], [], [], [], []\n for slc, l1, l2, l3 in zip(slices, label_1, label_2, label_3):\n bg = wtsd[:, slc, :] == 1\n bg_cnt = find_contours(bg.cpu().numpy(), level=0)\n cnt1 = bg_cnt[0] if bg_cnt[0].shape[0] > bg_cnt[1].shape[0] else bg_cnt[1]\n cnt2 = bg_cnt[1] if bg_cnt[0].shape[0] > bg_cnt[1].shape[0] else bg_cnt[0]\n for m, cnt in zip([m1, m2], [cnt1, cnt2]):\n mask = torch.zeros_like(wtsd[:, slc, :]).cpu()\n mask[np.round(cnt[:, 0]), np.round(cnt[:, 1])] = 1\n m.append(torch.from_numpy(binary_fill_holes(mask.long().cpu().numpy())).to(device).sum().item())\n\n mask = wtsd[:, slc, :] == l1\n m3.append(mask.long().sum().item())\n cnt3 = find_contours(mask.cpu().numpy(), level=0)[0]\n mask = wtsd[:, slc, :] == l2\n m4.append(mask.long().sum().item())\n cnt4 = find_contours(mask.cpu().numpy(), level=0)[0]\n mask = wtsd[:, slc, :] == l3\n m5.append(mask.long().sum().item())\n cnt5 = find_contours(mask.cpu().numpy(), level=0)[0]\n\n # img = torch.zeros_like(wtsd[:, slc, :]).cpu()\n # img[cnt1[:, 0], cnt1[:, 1]] = 1\n # plt.imshow(img);plt.show()\n # img = torch.zeros_like(wtsd[:, slc, :]).cpu()\n # img[cnt2[:, 0], cnt2[:, 1]] = 1\n # plt.imshow(img);plt.show()\n # img = torch.zeros_like(wtsd[:, slc, :]).cpu()\n # img[cnt3[:, 0], cnt3[:, 1]] = 1\n # plt.imshow(img);plt.show()\n # img = torch.zeros_like(wtsd[:, slc, :]).cpu()\n # img[cnt4[:, 0], cnt4[:, 1]] = 1\n # plt.imshow(img);plt.show()\n # img = torch.zeros_like(wtsd[:, slc, :]).cpu()\n # img[cnt5[:, 0], cnt5[:, 1]] = 1\n # plt.imshow(img);plt.show()\n\n self.outer_cntr_ds.append(Polygon2d(torch.from_numpy(approximate_polygon(cnt1, tolerance=1.2)).to(device)))\n self.inner_cntr_ds.append(Polygon2d(torch.from_numpy(approximate_polygon(cnt2, tolerance=1.2)).to(device)))\n self.celltype_1_ds.append(Polygon2d(torch.from_numpy(approximate_polygon(cnt3, tolerance=1.2)).to(device)))\n self.celltype_2_ds.append(Polygon2d(torch.from_numpy(approximate_polygon(cnt4, tolerance=1.2)).to(device)))\n self.celltype_3_ds.append(Polygon2d(torch.from_numpy(approximate_polygon(cnt5, tolerance=1.2)).to(device)))\n\n self.masses = [np.array(m1).mean(), np.array(m2).mean(), np.array(m3 + m4 + m5).mean()]\n self.fg_shape_descriptors = self.celltype_1_ds + self.celltype_2_ds + self.celltype_3_ds",
"def watershed(mask, img, plotImage = False, kernelSize = None):\n imgCopy = img.copy()\n maskCopy = np.array(mask.copy(), dtype=np.uint8)\n \n if kernelSize is None:\n kernelSize = 2\n\n # Finding sure foreground area\n #dist_transform = cv2.distanceTransform(mask, cv2.DIST_L2, 5)\n #ret, sure_fg = cv2.threshold(dist_transform,0.3*dist_transform.max(),255,0) #change the second argument to change the sensitivity \n maskClosed = skimage.morphology.closing(np.array(maskCopy, dtype=np.uint8))\n maskClosed = skimage.morphology.closing(np.array(maskClosed, dtype=np.uint8))\n kernel = np.ones((kernelSize,kernelSize), np.uint8)\n # maskCopy = img_as_bool(maskCopy)\n sure_fg = cv2.erode(maskClosed, kernel, iterations = 2) ###\n sure_fg = skimage.morphology.closing(np.array(sure_fg, dtype=np.uint8))\n # kernel = np.ones((2,2), np.uint8)\n # sure_fg = binary_closing(sure_fg, kernel)\n \n # sure background area\n #kernel = np.ones((5, 5), np.uint8)\n #sure_bg = cv2.dilate(mask, kernel, iterations = 1)\n sure_fg_bool = 1 - img_as_bool(sure_fg)\n # sure_bg = np.uint8(1 - morphology.medial_axis(sure_fg_bool)) ### \n sure_bg = np.uint8(1 - morphology.skeletonize(sure_fg_bool))\n sure_bg[0, :] = 1\n sure_bg[-1, :] = 1\n sure_bg[:, 0] = 1\n sure_bg[:, -1] = 1\n \n # Finding unknown region\n sure_fg = np.uint8(sure_fg)\n unknown = cv2.subtract(sure_bg, sure_fg)\n \n if plotImage:\n plt.figure()\n plt.imshow(sure_fg)\n plt.title(\"Inner Marker\")\n plt.figure()\n plt.imshow(sure_bg)\n plt.title(\"Outer Marker\")\n plt.figure()\n plt.imshow(unknown)\n plt.title(\"Unknown\")\n \n # Marker labelling\n ret, markers = cv2.connectedComponents(sure_fg)\n\n # Add one to all labels so that sure background is not 0, but 1\n markers = markers+1\n\n # Now, mark the region of unknown with zero\n markers[unknown==1] = 0\n \n if plotImage:\n plt.figure()\n plt.imshow(markers, cmap='jet')\n plt.title(\"Markers\")\n \n # Do watershed\n markers = cv2.watershed(imgCopy, markers)\n \n imgCopy[markers == -1] = [0, 255 ,0]\n\n if plotImage:\n plt.figure()\n plt.imshow(markers,cmap='jet')\n plt.title(\"Mask\")\n plt.figure()\n plt.imshow(img)\n plt.title(\"Original Image\")\n plt.figure()\n plt.imshow(imgCopy)\n plt.title(\"Marked Image\")\n plt.show()\n\n return markers",
"def KmeansSegmentation(image, kernel_sigma, N_classes, N_iter = 1, tol = 10e-6): \n\n if kernel_sigma >= 1:\n image = Denoising(image, kernel_sigma);\n \n nr, nc = image.shape;\n image_vec = image.reshape(nr * nc, 1);\n mask_pos = image_vec > 0;\n X = image_vec[mask_pos].reshape(mask_pos.sum(), 1);\n kmeans = KMeans(n_clusters = N_classes, random_state=0, max_iter = N_iter, tol = tol).fit(X);\n labels = kmeans.labels_; \n \n mask = np.zeros((nr * nc, 1)); \n mask[mask_pos] = labels;\n mask = mask.reshape(nr, nc);\n \n return mask;",
"def skeletonize(data,subscriber = 0):\n nx,ny=data.shape\n #zero padding\n image = zeros((nx+2,ny+2),'int16')\n image[:,:] = IP.BACKGROUND_COLOR\n image[1:-1,1:-1]=data\n\n erosionComplete = False\n runs = 0\n erosionComplete = False\n runs = 0\n isCorner = zeros((nx+2,ny+2),'bool')\n while not erosionComplete:\n ruleI = (image == IP.FEATURE_COLOR)\n XFeat, YFeat = ruleI.nonzero()\n numberFeatures = len(XFeat)\n erosedPixels = 0\n if runs == 0:\n progressbar = progress(numberFeatures)\n neighbourhood = zeros((nx+2,ny+2,3),'int16')\n for x,y in zip(XFeat.tolist(),YFeat.tolist()):\n fingerprint = checkNeighbours(image[x-1:x+2,y-1:y+2])\n neighbourhood[x,y,:]=numpy.array(fingerprint)\n\n ruleII = neighbourhood[:,:,1]>=1\n ruleIII = neighbourhood[:,:,0]> 1\n border = (ruleI & ruleII & ruleIII)\n #ruleIV and ruleV\n XBord, YBord = border.nonzero()\n XBord2 = []\n YBord2 = []\n for x,y in zip(XBord.tolist(),YBord.tolist()):\n if checkTransitions(image[x-1:x+2,y-1:y+2]) <= 1 and not isCorner[x,y]:\n image[x,y] = IP.BACKGROUND_COLOR\n erosedPixels += 1\n subscriber %= progressbar.step()\n else:\n XBord2.append(x)\n YBord2.append(y)\n for x,y in zip(XBord2,YBord2):\n if checkTransitions(image[x-1:x+2,y-1:y+2]) <= 1 and not isCorner[x,y]:\n image[x,y] = IP.BACKGROUND_COLOR\n erosedPixels += 1\n subscriber %= progressbar.step()\n if erosedPixels == 0:\n erosionComplete = True\n subscriber %= 100.\n else:\n xCorn, yCorn = (neighbourhood[:,:,2] > 0 ).nonzero()\n for x,y in zip(xCorn.tolist(),yCorn.tolist()):\n if neighbourhood[x,y,2] == 1:\n isCorner[x+1,y-1] = True\n elif neighbourhood[x,y,2] == 2:\n isCorner[x+1,y+1] = True\n elif neighbourhood[x,y,2] == 3:\n isCorner[x-1,y+1] = True\n elif neighbourhood[x,y,2] == 4:\n isCorner[x-1,y-1] = True\n runs += 1\n return image[1:-1,1:-1].copy()",
"def color_segmentation(self):\n cv.namedWindow(\"Segmentation parameters\")\n self.create_trackbar(\"h-u\", \"Segmentation parameters\")\n self.create_trackbar(\"h-l\",\"Segmentation parameters\")\n self.create_trackbar(\"s-u\",\"Segmentation parameters\")\n self.create_trackbar(\"s-l\",\"Segmentation parameters\")\n self.create_trackbar(\"v-u\",\"Segmentation parameters\")\n self.create_trackbar(\"v-l\",\"Segmentation parameters\")\n\n image = self.__image.copy()\n\n while True:\n var_h_upper = cv.getTrackbarPos(\"h-u\", \"Segmentation parameters\")\n var_h_lower = cv.getTrackbarPos(\"h-l\", \"Segmentation parameters\")\n var_s_upper = cv.getTrackbarPos(\"s-u\", \"Segmentation parameters\")\n var_s_lower = cv.getTrackbarPos(\"s-l\", \"Segmentation parameters\")\n var_v_upper = cv.getTrackbarPos(\"v-u\", \"Segmentation parameters\")\n var_v_lower = cv.getTrackbarPos(\"v-l\", \"Segmentation parameters\")\n\n lower = np.array([var_h_lower,var_s_lower,var_v_lower])\n upper = np.array([var_h_upper,var_s_upper,var_v_upper])\n\n bin_image = cv.inRange(self.hsv_image, lower, upper)\n cv.imshow(\"Segmentated image\", bin_image)\n\n if (cv.waitKey(1) & 0xFF == ord('q')):\n break\n cv.destroyAllWindows()",
"def Segmentation(WorkingDirectory, ListTrainingDataFile, ListImageName, modelname, noiseReduction, numberOfClasses, classesNamesList, ROI, ListAreaNames, fusionClassesY_N, maskY_N, imageY_N, InfoY_N, NFMaskY_N, BiggestBlobY_N, chosenArea, ReferencePicture):\n ### Create the folder where the output will be saved \n if maskY_N=='Y':\n if not os.path.exists(WorkingDirectory+'/Masks'): \n os.mkdir(WorkingDirectory+'/Masks')\n if imageY_N=='Y':\n if not os.path.exists(WorkingDirectory+'/MaskedImages'): \n os.mkdir(WorkingDirectory+'/MaskedImages')\n if NFMaskY_N=='Y':\n if not os.path.exists(WorkingDirectory+'/NonFilteredMasks'): \n os.mkdir(WorkingDirectory+'/NonFilteredMasks')\n\n \n ### Import and format the training data from the training data files.\n trainDataTab=np.array([[0,0,0,0,0,0,0,0,0,0,0,0,0]])\n for file in ListTrainingDataFile: \n f=open(file,\"r\",newline='') \n TrainData = list(csv.reader(f))\n f.close()\n TrainData.remove(['Class', 'Image', 'x','y','B','G','R','H','S','V','L','a','b'])\n TrainData=np.asarray(TrainData) \n trainDataTab=np.concatenate((trainDataTab, TrainData), axis=0)\n trainDataTab=np.delete(trainDataTab, (0), axis=0)\n if len(ListTrainingDataFile)>1: # if the user choose more than one file, a new file is saved combining all the selected files.\n np.savetxt(WorkingDirectory+'/trainData_'+str(numberOfClasses)+'classes.csv', trainDataTab, delimiter=\",\",header='Class,Image,x,y,B,G,R,H,S,V,L,a,b', comments='',fmt='%s')\n trainDataTab=np.delete(trainDataTab,1, 1)\n trainDataTab=np.delete(trainDataTab,1, 1)\n trainDataTab=np.delete(trainDataTab,1, 1)\n\n ### Format the list of ROI \n if ROI!='Whole pictures':\n ROI=ast.literal_eval(ROI)\n\n \n ### Train the model \n model=TrainModel(trainDataTab, modelname,classesNamesList) \n\n \n \n ### Get the size of the reference picture with a 1 pixel difference to avoid any resizing issue\n FirstImage=cv2.imread(ReferencePicture)\n ShapeFirstImage=np.shape(FirstImage)\n a=ShapeFirstImage[0]\n b=ShapeFirstImage[1]\n c=ShapeFirstImage[2]\n ShapeFirstImage2=(a+1,b,c)\n ShapeFirstImage3=(a+1,b+1,c)\n ShapeFirstImage4=(a+1,b-1,c)\n ShapeFirstImage5=(a,b,c)\n ShapeFirstImage6=(a,b+1,c)\n ShapeFirstImage7=(a,b-1,c) \n ShapeFirstImage8=(a-1,b,c)\n ShapeFirstImage9=(a-1,b+1,c)\n ShapeFirstImage10=(a-1,b-1,c) \n\n ### List initialization \n ListImageWrongSize=[]\n ListRunningTimes=[]\n ListTestDataTimes=[]\n ListApplyModelTimes=[]\n ListSaveOutputTimes=[]\n \n if BiggestBlobY_N=='Y':\n ListAirs=np.array([['Area/Plant','Image Name','Surface','Coverage', 'Aspect Ratio','Extent','Solidity', 'Equivalent Diameter', 'Main axe', 'Secondary axe']]) \n else:\n ListAirs=np.array([['Area/Plant','Image Name','Surface','Coverage']]) \n \n ### Main loop on the image list.\n for i in ListImageName:\n start_time = time.monotonic() \n TestImageBGR=cv2.imread(i) \n ImageName=i.split('/')\n ImageName=ImageName[-1] \n ImageName=ImageName.split('.')\n ImageName=ImageName[0] \n ######################################THESE THREE LINES CAN BE USED TO ADD a TIME FILTER ( only keep the pictures between certain hours)\n# hour=float(ImageName[8:10]) #get the time the picture was taken from the name of the file\n hour=float(10)\n if 8<hour<16: # apply a time condition \n ######################################\n if ROI!='Whole pictures':\n if np.shape(TestImageBGR)==ShapeFirstImage or np.shape(TestImageBGR)==ShapeFirstImage2 or np.shape(TestImageBGR)==ShapeFirstImage3 or np.shape(TestImageBGR)==ShapeFirstImage4 or np.shape(TestImageBGR)==ShapeFirstImage5 or np.shape(TestImageBGR)==ShapeFirstImage6 or np.shape(TestImageBGR)==ShapeFirstImage7 or np.shape(TestImageBGR)==ShapeFirstImage8 or np.shape(TestImageBGR)==ShapeFirstImage9 or np.shape(TestImageBGR)==ShapeFirstImage10 : # Test the size of the picture\n for j in range(len(ROI)): \n #Crop the picture for each ROI\n x1,y1,x2,y2=ROI[j]\n if x1>x2:\n a=x1\n x1=x2\n x2=a\n if y1>y2:\n a=y1\n y1=y2\n y2=a \n croppedImagej=TestImageBGR[y1:y2,x1:x2] \n \n NameArea=ListAreaNames[j] \n #Initialize the output names\n OutputMaskName=''\n OutputimageName=''\n OutputNFMaskName=''\n \n #Create the output names and folders\n if maskY_N=='Y': \n croppedMaskDirectoryArea=WorkingDirectory+'/Masks/'+NameArea \n if not os.path.exists(croppedMaskDirectoryArea): \n os.mkdir(croppedMaskDirectoryArea)\n OutputMaskName=croppedMaskDirectoryArea+'/'+ImageName+'_crop_'+NameArea+'_mask.png'\n \n if imageY_N=='Y': \n croppedMaskedImagesDirectoryArea=WorkingDirectory+'/MaskedImages/'+NameArea \n if not os.path.exists(croppedMaskedImagesDirectoryArea): \n os.mkdir(croppedMaskedImagesDirectoryArea) \n OutputimageName=croppedMaskedImagesDirectoryArea+'/'+ImageName+'_crop_'+NameArea+'_maskedImage.png'\n \n if NFMaskY_N=='Y':\n croppedNonFilteredMaskDirectoryArea=WorkingDirectory+'/NonFilteredMasks/'+NameArea \n if not os.path.exists(croppedNonFilteredMaskDirectoryArea): \n os.mkdir(croppedNonFilteredMaskDirectoryArea) \n OutputNFMaskName=croppedNonFilteredMaskDirectoryArea+'/'+ImageName+'_crop_'+NameArea+'_NFMask.png'\n \n # Segment the image with the function ApplyModelAndSaveOutput\n ListAirs, ListTestDataTimes,ListApplyModelTimes,ListSaveOutputTimes=ApplyModelAndSaveOutput(model, modelname, croppedImagej, ImageName, NameArea, noiseReduction, numberOfClasses, classesNamesList, fusionClassesY_N, maskY_N, InfoY_N, imageY_N, NFMaskY_N, BiggestBlobY_N, chosenArea, OutputMaskName, OutputimageName, OutputNFMaskName, ListAirs, ListTestDataTimes,ListApplyModelTimes,ListSaveOutputTimes)\n \n \n print(str(ImageName)+' '+str(NameArea)+' Done!') \n else: #if the picture is not the right size \n ListImageWrongSize.append(i) \n print(str(ImageName)+' Wrong size')\n \n else: #if the user wants to use the whole pictures\n #Create the output names\n OutputMaskName=WorkingDirectory+'/Masks/'+ImageName+'_mask.png'\n OutputimageName=WorkingDirectory+'/MaskedImages/'+ImageName+'_maskedImage.png'\n OutputNFMaskName=WorkingDirectory+'/NonFilteredMasks/'+ImageName+'_NFMask.png'\n \n # Segment the image with the function ApplyModelAndSaveOutput\n ListAirs, ListTestDataTimes,ListApplyModelTimes,ListSaveOutputTimes=ApplyModelAndSaveOutput(model, modelname, TestImageBGR, ImageName, '', noiseReduction, numberOfClasses, classesNamesList, fusionClassesY_N, maskY_N, InfoY_N, imageY_N, NFMaskY_N, BiggestBlobY_N, chosenArea, OutputMaskName, OutputimageName, OutputNFMaskName, ListAirs, ListTestDataTimes,ListApplyModelTimes,ListSaveOutputTimes)\n \n \n print(str(ImageName)+' Done!')\n \n end_time = time.monotonic()\n RunningTime=timedelta(seconds=end_time - start_time)\n sec=float(RunningTime.days*86400+RunningTime.seconds+RunningTime.microseconds/1000000)\n \n if i==ListImageName[0]: # get an estimation of the running time after the first picture is done\n print('Running time for 1 image =', RunningTime)\n print('Total running time estimation =', RunningTime*len(ListImageName))\n ListRunningTimes.append(sec) \n \n \n else: # usefull only if you apply a time filter \n ListImageWrongSize.append(i) \n print(str(ImageName)+' Wrong time')\n \n # Save the info file \n if len(ListAirs)>1:\n np.savetxt(WorkingDirectory+'/'+'InformationFile.csv', ListAirs, delimiter=\",\", comments='', fmt='%s') \n \n return ListImageWrongSize,ListRunningTimes, ListTestDataTimes,ListApplyModelTimes,ListSaveOutputTimes",
"def filter_by_size(img_segm):\n \n numbers = np.zeros(np.max(img_segm-1))\n for i in range(1,np.max(img_segm)):\n numbers[i-1] = np.sum(img_segm==i)\n \n indexes = np.arange(1,np.max(img_segm))\n #indexes = indexes[numbers>np.mean(numbers)] #Deletes the 1-pixel elements\n indexes = indexes[numbers>500] #Deletes the 1-pixel elements\n \n segm_filtered = np.zeros(img_segm.shape)\n j=1\n for i in (indexes):\n segm_filtered[img_segm==i] = j\n j+=1\n return segm_filtered",
"def watershed_segment_2(M,click_coords):\n \n # todo: choose these structures based on aspect ratio of M and input parameters\n sel = np.ones((4,10)) # for opening\n sel2 = np.ones((15,75)) # for local thresholding\n sel3 = np.ones((2,5)) # for erosion\n # get a few points in the center of each blob\n \n # threshold\n #bw = ((M>=ndi.percentile_filter(M,80,footprint=sel2)) & (M>=scoreatpercentile(M.flatten(),60)))\n \n score = stats.percentileofscore(M.flatten(),M[int(click_coords[0][1]),int(click_coords[0][0])])\n bw = (M>=stats.scoreatpercentile(M.flatten(),score))\n\n # open and erode\n #bools = sp.zeros((M.shape[0],M.shape[1]),int)\n #bools[int(click_coords[0]),int(click_coords[1])] = 1\n #blobs = sp.where(bools == 1,True,False)\n blobs = snm.binary_opening(bw,structure=sel)\n blobs = snm.binary_dilation(blobs,iterations=3)\n blobs = snm.binary_erosion(blobs,structure=sel3)\n \n \n # label\n labels,_ = ndi.label(blobs)\n labels[labels > 0] += 1\n #labels[0,0] = 1\n\n # rescale and cast to int16, then use watershed\n M2 = rescaled(M,0,65000).astype(np.uint16)\n newlabels = ndi.watershed_ift(M2,labels)\n \n # get rid of groups unless they have the right number of pixels\n counts = np.bincount(newlabels.flatten())\n old2new = np.arange(len(counts))\n old2new[(counts < 100) | (counts > 600)] = 0\n newlabels = old2new[newlabels]\n \n return newlabels",
"def basicProcessing(volume, sigma, order, output, mode, truncate):\n\n\n #### Filters ###\n\n result = gaussian_filter(input=volume, sigma=sigma, order=order, output=output, mode=mode, truncate=truncate)\n\n val = threshold_otsu(result)\n print(\"val : {}\".format(val))\n\n mask = np.zeros(volume.shape, dtype=np.int8)\n mask[volume > val] = 1\n #mask = mask.astype(int)\n\n print(\"mask shape: {}\".format(mask.shape))\n print(mask)\n\n\n #### Morphological Operation ###\n\n # Opening removes small objects\n r1 = binary_opening(mask, structure=np.ones((3, 3, 3))).astype(np.int8)\n\n # Closing removes small holes\n r2 = binary_closing(r1, structure=np.ones((3, 3, 3))).astype(np.int8)\n\n\n # 3x3x3 structuring element with connectivity 4 or 8\n struct1 = generate_binary_structure(3, 1) # no diagonal elements\n #struct1 = generate_binary_structure(3, 2) # with diagonal elements\n ############struct1 = struct1.astype(int)\n print (struct1)\n\n\n #r3 = binary_dilation(r2).astype(int)\n r3 = binary_dilation(r2, structure=struct1).astype(int) # using a structure element\n\n # Erosion removes objects smaller than the structure\n r4 = binary_erosion(r3, structure=np.ones((3, 3, 3))).astype(np.int8)\n\n\n #### Measurements ###\n\n struct2 = np.ones((3, 3, 3), dtype=np.int8)\n labeled_array, num_features = label(r4, structure=struct2)\n\n #print(labeled_array)\n print(num_features)\n\n return labeled_array, num_features",
"def showComponents(self, mask):\n\n from skimage import measure\n\n thresh = cv2.threshold(mask, 0, 255, cv2.THRESH_BINARY)[1]\n labels = measure.label(thresh, neighbors=8, background=0)\n for label in range(0,len(labels)):\n img = np.zeros(mask.shape)\n # if this is the background label, ignore it\n if label == 0:\n continue\n img[labels==label]=255\n numPixels = cv2.countNonZero(img)\n\n \t# if the number of pixels in the component is sufficiently\n \t# large, then add it to our mask of \"large blobs\"\n if numPixels > 500:\n showme(img, 'Contour '+str(label))"
] | [
"0.73299015",
"0.6700284",
"0.6588071",
"0.6206197",
"0.61541283",
"0.60890365",
"0.6068497",
"0.6058113",
"0.60470617",
"0.603688",
"0.6011224",
"0.6002645",
"0.59923077",
"0.5974445",
"0.59183335",
"0.58174556",
"0.5809067",
"0.578748",
"0.57815266",
"0.5739608",
"0.5718258",
"0.56802434",
"0.56680644",
"0.559872",
"0.55845606",
"0.55842906",
"0.55701894",
"0.5565093",
"0.55648494",
"0.55638134"
] | 0.7139125 | 1 |
Perform sensitivity analysis (via backpropagation; Simonyan et al. 2014) to determine the relevance of each image pixel for the classification decision. Return a relevance heatmap over the input image. | def sensitivity_analysis(model, image_tensor, device, postprocess='abs'):
if postprocess not in [None, 'abs', 'square']:
raise ValueError("postprocess must be None, 'abs' or 'square'")
# Forward pass.
X = torch.from_numpy(image_tensor) # convert numpy or list to tensor
X.unsqueeze_(0) # add channel of 1
X.unsqueeze_(0) # mimic batch of 1
X = X.to(device)
X.requires_grad_()
output = model(X)
# Backward pass.
model.zero_grad()
output.backward()
relevance_map = X.grad.data[0,0].cpu().numpy()
# Postprocess the relevance map.
if postprocess == 'abs': # as in Simonyan et al. (2014)
return np.abs(relevance_map)
elif postprocess == 'square': # as in Montavon et al. (2018)
return relevance_map**2
elif postprocess is None:
return relevance_map | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def sensitivity(y_test, y_pred):\n\tmatrix = confusion_matrix(y_test, y_pred)\n\treturn matrix[0][0] / (matrix[0][0] + matrix[0][1])",
"def main():\n # initialize the class labels and set the seed of the pseudorandom\n # number generator so we can reproduce our results\n labels = [\"dog\", \"cat\", \"panda\"]\n np.random.seed(1)\n\n # be * learned * by our model, but for the sake of this example, let's use random values\n W = np.random.randn(3, 3072)\n b = np.random.randn(3)\n\n # load our example image, resize it, and then flatten it into our\n # \"feature vector\" representation\n orig = cv2.imread(\"beagle.png\")\n image = cv2.resize(orig, (32, 32)).flatten()\n\n # compute the output scores by taking the dot product between the\n # weight matrix and image pixels, followed by adding in the b\n scores = W.dot(image) + b\n\n # loop over the scores + labels and display them\n for (label, score) in zip(labels, scores):\n print(\"[INFO] {}: {:.2f}\".format(label, score))\n\n # draw the label with the highest score on the image as our prediction\n cv2.putText(\n orig, \"Label: {}\".format(labels[np.argmax(scores)]), (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 255, 0), 2\n )\n\n # display our input image\n cv2.imshow(\"Image\", orig)\n cv2.waitKey(0)",
"def calculate_sensitivity(self, x_train, y_train):\n model_f_activations = self.model_f.predict(x_train)\n reshaped_labels = np.array(y_train).reshape((x_train.shape[0], 1))\n tf_y_labels = tf.convert_to_tensor(reshaped_labels, dtype=np.float32)\n loss = k.binary_crossentropy(tf_y_labels, self.model_h.output)\n grad = k.gradients(loss, self.model_h.input)\n gradient_func = k.function([self.model_h.input], grad)\n calc_grad = gradient_func([model_f_activations])[0]\n sensitivity = np.dot(calc_grad, self.cav)\n self.sensitivity = sensitivity\n self.y_labels = y_train",
"def predict(image_path, wrapper):\n \"\"\"\n #Don't forget to store your prediction into ImgPred\n img_prediction = ImgPred(...)\n \"\"\"\n\n #This is where all of our code will probably go. Here are the steps to success\n\n \n #Step One: Make a list which will contain the locations of every character in our source Image.\n SymPredList = []\n\n #Step Two: Go down that list we just made and use the code from PA4 in conjunction with our new Model to analyze each character. George made this part.\n #This is the find a character part of the code. Max and George worked it out.\n im = cv2.imread(image_path,0)\n (thresh, imbw) = cv2.threshold(im,20,255,cv2.THRESH_BINARY | cv2.THRESH_OTSU)\n #cv2.imwrite('clapfuck.jpg', imbw)\n im3,contours,hierarchy = cv2.findContours(imbw,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)\n idx = 0\n for cnt in contours:\n idx += 1\n x1,y1,w,h = cv2.boundingRect(cnt)\n roi=imbw[y1:y1+h,x1:x1+w]\n\n #Step Two.1: Make a Numpy Array of all the pixels starting from the top left corner of an identified character to the bottom right corner of the identified character.\n height, width = roi.shape\n if height >= width:\n padded = cv2.copyMakeBorder(roi,0,0,(height-width)//2,(height-width)//2,cv2.BORDER_CONSTANT,value=[0,0,0])\n else:\n padded = cv2.copyMakeBorder(roi,(width-height)//2,(width-height)//2,0,0,cv2.BORDER_CONSTANT,value=[0,0,0])\n Smol = cv2.resize(padded, (28, 28))\n (thresh, evaluateMe) = cv2.threshold(Smol, 20, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)\n #scipy.misc.imsave(os.path.basename(file), ree)\n #Step Two.2: Feed that numpy into our PA4 image analyzer converter thing but using our new trained model\n evaluateMeMe = numpy.reshape(evaluateMe, (1, 28, 28, 1))\n prediction = tf.argmax(y_conv,1)\n final_number = prediction.eval(feed_dict={x:evaluateMeMe,y_:numpy.zeros((1,40)), keep_prob:1.0})\n #Step Two.3: Record what we think it is as the prediction field of the SymPred we are currently on\n final_guess = wrapper.label_types[int(final_number)]\n DisSymPred = SymPred(final_guess,x1,y1,x1+w,y1-h)\n SymPredList.append(DisSymPred)\n\n #Step Three: Wrap that now complete SymPred list, in an ImgPred, fill out all the fields of that ImgPred, and then return that shit.\n img_prediction = ImgPred(os.path.basename(image_path), SymPredList)\n\n #Step Four: Were Donezo\n return img_prediction",
"def prediction(score, test_y, lossname):\n if lossname == 'contrastive_loss':\n prediction = sigmoid(euclidean_distance(score))\n\n threshold = 0.5\n correction = 0\n error = 0\n\n for i in range(len(prediction)):\n if prediction[i] >= threshold:\n pred = 1\n else:\n pred = 0\n \n if pred == test_y[i]:\n correction += 1\n else:\n error += 1\n \n accuracy = 1.0 * correction / (correction + error)\n \n return correction, error, accuracy",
"def score_pixels(self, img) -> np.ndarray:\n # Settings to run thresholding operations on\n settings = [{'name': 'lab_b', 'cspace': 'LAB', 'channel': 2, 'clipLimit': 2.0, 'threshold': 150},\n {'name': 'value', 'cspace': 'HSV', 'channel': 2, 'clipLimit': 6.0, 'threshold': 220},\n {'name': 'lightness', 'cspace': 'HLS', 'channel': 1, 'clipLimit': 2.0, 'threshold': 210}]\n\n # Perform binary thresholding according to each setting and combine them into one image.\n scores = np.zeros(img.shape[0:2]).astype('uint8')\n for params in settings:\n # Change color space\n color_t = getattr(cv2, 'COLOR_RGB2{}'.format(params['cspace']))\n gray = cv2.cvtColor(img, color_t)[:, :, params['channel']]\n\n # Normalize regions of the image using CLAHE\n clahe = cv2.createCLAHE(params['clipLimit'], tileGridSize=(8, 8))\n norm_img = clahe.apply(gray)\n\n # Threshold to binary\n ret, binary = cv2.threshold(norm_img, params['threshold'], 1, cv2.THRESH_BINARY)\n\n scores += binary\n\n # Save images\n self.viz_save(params['name'], gray)\n self.viz_save(params['name'] + '_binary', binary)\n\n return cv2.normalize(scores, None, 0, 255, cv2.NORM_MINMAX)",
"def classify(self, predict_wx, threshold):\n # predict_wx = self.compute_wx(data_instances, self.model_weights.w_, self.model_weights.intercept_)\n\n def predict(x):\n prob = activation.sigmoid(x)\n pred_label = 1 if prob > threshold else 0\n return prob, pred_label\n\n predict_table = predict_wx.mapValues(predict)\n return predict_table",
"def vis_detections(im, class_name, dets, thresh=0.8):\n\n dict = {'HolderA': 'Holder', 'WheelA': 'WheelA', 'WheelB': 'WheelB', 'BrakeA': 'Brake', 'SpringA': 'Spring',\n 'BuckleA': 'BuckleA', 'BuckleB': 'BuckleB', 'TubeA': 'Tube', 'NutA': 'NutA', 'ScrewA': 'ScrewA',\n 'NutB': 'NutB', 'ScrewB': 'ScrewB',\n 'WireA': 'Wire', 'PlateA': 'PlateA', 'PlateB': 'PlateB', 'PlateD': 'PlateC', 'PlateE': 'PlateD',\n 'BoltA': 'Bolt', 'LoopB': 'Loop', 'JointA': 'JointA', 'JointB': 'JointB', 'FixatorA': 'Fixator',\n 'BearingA': 'Bearing', 'PlugA': 'Plug'}\n\n for i in range(np.minimum(10, dets.shape[0])):\n bbox = tuple(int(np.round(x)) for x in dets[i, :4])\n score = dets[i, -1]\n if score > thresh:\n # Color site: http://www.wahart.com.hk/rgb.htm\n if class_name == 'HolderA':\n color = (255, 255, 0) # Cyan\n elif class_name == 'WheelA':\n color = (212, 255, 127) # Aquamarina\n elif class_name == 'WheelB':\n color = (99, 99, 238) # IndianRed2\n elif class_name == 'BrakeA':\n color = (99, 99, 238) # IndianRed2\n elif class_name == 'SpringA':\n color = (180, 130, 70) # SteelBlue\n elif class_name == 'BuckleA':\n color = (205, 0, 0) # MediumBlue\n elif class_name == 'BuckleB':\n color = (170, 205, 102) # MediumAquamarine\n elif class_name == 'BuckleC':\n color = (0, 252, 124) # LawnGreen\n elif class_name == 'BuckleD':\n color = (50, 205, 50) # LimeGreen\n elif class_name == 'TubeA':\n color = (147, 112, 219) # PaleVioletRed\n elif class_name == 'ScrewA':\n color = (240, 32, 160) # Purple\n elif class_name == 'ScrewB':\n color = (0, 165, 255) # Orange1\n elif class_name == 'ScrewC':\n color = (48, 48, 255) # Firebrick1\n elif class_name == 'NutA':\n color = (0, 255, 255) # Yellow\n elif class_name == 'NutB':\n color = (255, 144, 30) # DodgerBlue\n elif class_name == 'NutC':\n color = (180, 238, 180) # DarkSeaGreen2\n elif class_name == 'WireA':\n color = (255, 255, 255) # White\n elif class_name == 'PlateA':\n color = (0, 69, 255) # OrangeRed\n elif class_name == 'PlateB':\n color = (102, 205, 0) # SpringGreen3\n elif class_name == 'PlateD':\n color = (0, 255, 0) # Green\n elif class_name == 'PlateE':\n color = (0, 140, 250) # DarkOrange\n elif class_name == 'BoltA':\n color = (255, 255, 0) # Cyan\n elif class_name == 'LoopB':\n color = (180, 105, 255) # HotPink\n elif class_name == 'JointA':\n color = (105, 140, 255) # Salmon1\n elif class_name == 'JointB':\n color = (255, 0, 255) # Magenta3\n elif class_name == 'FixatorA':\n color = (0, 205, 102) # Chartreuse3\n elif class_name == 'BearingA':\n color = (185, 218, 255) # PeachPuff\n elif class_name == 'PlugA':\n color = (193, 193, 255) # RosyBrown1\n else:\n color = (139, 0, 139) # DarkMagenta\n cv2.rectangle(im, bbox[0:2], bbox[2:4], color, 2)\n # cv2.putText(im, '%s: %.3f' % (class_name, score), (bbox[0], bbox[1] + 15), cv2.FONT_HERSHEY_COMPLEX,\n # 0.5, color, thickness=1)\n cv2.putText(im, '%s: %.3f' % (dict[class_name], score), (bbox[0], bbox[1] + 15), cv2.FONT_HERSHEY_COMPLEX,\n 0.5, color, thickness=1)\n return im",
"def get_classification_simulator(self, image):\n\n r_channel = image[:,:,2]\n g_channel = image[:,:,1]\n\n\n\n # Threshold color channel\n s_rgy_min = 50\n s_thresh_min = 245\n s_thresh_max = 255\n \n #s_binary = np.zeros_like(r_channel)\n r_binary = np.zeros_like(r_channel)\n g_binary = np.zeros_like(r_channel)\n y_binary = np.zeros_like(r_channel)\n \n #s_binary[((r_channel >= s_thresh_min) & (r_channel <= s_thresh_max)) | ((g_channel >= s_thresh_min) & (g_channel <= s_thresh_max))] = 1\n \n \n r_binary[((r_channel >= s_thresh_min) & (r_channel <= s_thresh_max)) & (g_channel <= s_rgy_min)] = 1\n g_binary[((g_channel >= s_thresh_min) & (g_channel <= s_thresh_max)) & (r_channel <= s_rgy_min)] = 1\n y_binary[((r_channel >= s_thresh_min) & (r_channel <= s_thresh_max)) & ((g_channel >= s_thresh_min) & (g_channel <= s_thresh_max))] = 1\n \n\n #res = cv2.bitwise_and(img,img,mask = s_binary)\n \n #maxx=image.shape[1]\n maxy=image.shape[0]\n \n y_top=0\n window_size_y=50\n y_bottom=y_top+window_size_y\n \n max_color=0\n tf_color=TrafficLight.UNKNOWN\n \n while (y_bottom< maxy):\n #print(img[y_top:y_bottom,:,:])\n rs= r_binary[y_top:y_bottom,:].sum()\n gs= g_binary[y_top:y_bottom,:].sum()\n ys= y_binary[y_top:y_bottom,:].sum()\n if (rs>max_color):\n max_color=rs\n tf_color=TrafficLight.RED\n if (gs>max_color):\n max_color=gs\n tf_color=TrafficLight.GREEN\n if (ys>max_color):\n max_color=ys\n tf_color=TrafficLight.YELLOW\n y_top+=window_size_y\n y_bottom+=window_size_y\n \n if (max_color<100):\n tf_color=TrafficLight.UNKNOWN\n \n\n\n return tf_color",
"def predict_gesture_and_visualise_result(raw_image):\n key = cv2.waitKey(5) & 0xFF\n\n if key == ord('c'):\n setup_hsv_boundaries()\n if key == ord('q'):\n exit()\n\n img_to_predict, img_conversions = convert_img_for_prediction(raw_image, l_hsv_thresh, u_hsv_thresh,\n image_processing_kind, image_size)\n\n # If the model is trained with shapes (1,50,50), uncomment this line.\n # img_to_predict = np.moveaxis(img_to_predict, -1, 0)\n\n class_num, normalized_vals, class_name = predict(img_to_predict)\n\n texts = [\n '~~~~ PREDICTION MODE ~~~~',\n '',\n 'model directory: ' + str(CONFIG['predictor_model_dir']),\n 'predicted label: ' + class_name,\n '',\n 'Controls:',\n '- Press \"c\" to Calibrate',\n '- Press \"q\" to Quit:'\n ]\n\n coy = img_conversions['center_offset_y']\n cox = img_conversions['center_offset_x']\n # This number provides an offset on each side, that should account for bounding box being of some size.\n visualise_prediction_result(normalized_vals, CONFIG['classes'], cox, coy, CONFIG['size'] - 100)\n visualise(img_conversions, texts)\n\n simulator.perform_action(class_name, cox, coy)",
"def vis_detections(im, class_name, dets, image_name, thresh=0.5):\n inds = np.where(dets[:, -1] >= thresh)[0]\n max_inds = 0\n max_score = 0.0\n if len(inds) == 0:\n # print('Warning: no target detected!')\n return\n elif len(inds) > 1:\n # print('Warning: ' + str(len(inds)) + ' targets detected! Choose the highest one')\n for i in inds:\n if(dets[i, -1] > max_score):\n max_inds = i\n max_score = dets[i, -1]\n\n# im = im[:, :, (2, 1, 0)]\n# fig, ax = plt.subplots(figsize=(12, 12))\n# ax.imshow(im, aspect='equal')\n # for i in inds:\n # bbox = dets[i, :4]\n # score = dets[i, -1]\n #print max_inds\n bbox = dets[max_inds, :4]\n score = dets[max_inds, -1]\n\n# ax.add_patch(\n# plt.Rectangle((bbox[0], bbox[1]),\n# bbox[2] - bbox[0],\n# bbox[3] - bbox[1], fill=False,\n# edgecolor='red', linewidth=3.5)\n# )\n# ax.text(bbox[0], bbox[1] - 2,\n# '{:s} {:.3f}'.format(class_name, score),\n# bbox=dict(facecolor='blue', alpha=0.5),\n# fontsize=14, color='white')\n\n # end for\n #print image_name, class_name\n #print score\n # file.writelines([image_name,'\\t',class_name,'\\t',str(score),'\\n'])\n # ax.set_title(('{} detections with '\n # 'p({} | box) >= {:.1f}').format(class_name, class_name,\n # thresh),fontsize=14)\n # plt.axis('off')\n # plt.tight_layout()\n # plt.draw()\n\t### SAVE IMAGES ? ###\n save_img_dir = os.path.join(cfg.ROOT_DIR, 'result', 'test_img')\n # if not os.path.exists(save_img_dir):\n # os.makedirs(save_img_dir)\n # plt.savefig(os.path.join(save_img_dir, image_name + '_' + class_name))\n\n boxes = {'boxes': ((bbox[0], bbox[1]), bbox[2] - bbox[0], bbox[3] - bbox[1])}\n \n save_mat_dir = os.path.join(cfg.ROOT_DIR, 'result', 'test_box')",
"def evaluate(labels, predictions):\n positive_count = 0\n positive = 0\n negative_count = 0\n negative = 0\n for i in range(len(labels)):\n if labels[i] == 1:\n positive_count+=1\n if predictions[i] == 1:\n positive +=1\n else:\n negative_count+=1\n if predictions[i] == 0:\n negative +=1\n\n sensitivity = positive / positive_count\n specificity = negative / negative_count\n\n return (sensitivity, specificity)",
"def compute_saliency(model, guided_model, img_path, layer_name=conv_name, cls=-1, visualize=False, save=True):\n #--------- slide image get --------------\n ori_img = Image.open(img_path)\n\n #define slide range\n slide_xl = 0\n slide_xr = 100\n slide_yu = 0\n slide_yd = 100\n name_cnt_int = 1\n\n for m in range(9):\n for i in range(9):\n slide_img = ori_img.crop((slide_xl,slide_yu,slide_xr,slide_yd))\n name_cnt_str = str(name_cnt_int)\n roop_str = str(m)\n slide_name = './slide_img/slide_img_' + roop_str + '_' + name_cnt_str + '.jpg'\n slide_img.save(slide_name)\n preprocessed_input = load_image(slide_name)\n\n pred = model.predict(preprocessed_input)[0]\n #print(pred)\n top_n = 3\n top_indices = pred.argsort()[-top_n:][::-1]\n result = [(classes[i], pred[i]) for i in top_indices]\n #print(\"number: \",name_cnt_str)\n print(\"number:\",roop_str,name_cnt_str)\n print(\"xrange: \",slide_xl,slide_xr)\n print(\"yrange: \",slide_yu,slide_yd)\n for x in result:\n print(x)\n\n if cls == -1:\n cls = np.argmax(pred)\n \n print(\"argmax:\",cls)\n if cls == 1:\n print(\"\\n\")\n print(\"-----Careful-----\")\n print(\"-----Doubt spotted-----\")\n print(\"\\n\")\n\n if cls == 2:\n print(\"\\n\")\n print(\"-----Warning!!!-----\")\n print(\"-----Bad spotted!!!!!-----\")\n print(\"\\n\")\n\n gradcam = grad_cam(model, preprocessed_input, cls, layer_name)\n gb = guided_backprop(guided_model, preprocessed_input, layer_name)\n guided_gradcam = gb * gradcam[..., np.newaxis]\n cls = -1\n\n if save:\n cam_name = './cam_image/' + roop_str + '_' + name_cnt_str + '.jpg'\n jetcam = cv2.applyColorMap(np.uint8(255 * gradcam), cv2.COLORMAP_JET)\n jetcam = (np.float32(jetcam) + load_image(slide_name, preprocess=False)) / 2\n cv2.imwrite(cam_name, np.uint8(jetcam))\n #cv2.imwrite('guided_backprop.jpg', deprocess_image(gb[0]))\n #cv2.imwrite('guided_gradcam.jpg', deprocess_image(guided_gradcam[0]))\n \n name_cnt_int = int(name_cnt_str)\n name_cnt_int += 1\n #x軸スライド幅\n slide_xl += 50\n slide_xr += 50\n \n \n if visualize:\n \n plt.figure(figsize=(15, 10))\n plt.subplot(131)\n plt.title('GradCAM')\n plt.axis('off')\n plt.imshow(load_image(img_path, preprocess=False))\n plt.imshow(gradcam, cmap='jet', alpha=0.5)\n\n plt.subplot(132)\n plt.title('Guided Backprop')\n plt.axis('off')\n plt.imshow(np.flip(deprocess_image(gb[0]), -1))\n \n plt.subplot(133)\n plt.title('Guided GradCAM')\n plt.axis('off')\n plt.imshow(np.flip(deprocess_image(guided_gradcam[0]), -1))\n plt.show()\n\n #右端までスライド完了、y軸方向へスライド\n name_cnt_int = 0\n slide_xl = 0\n slide_xr = 100\n slide_yu = slide_yu + 50\n slide_yd = slide_yd + 50\n \n \n\n return gradcam, gb, guided_gradcam",
"def evaluate(labels, predictions):\n #labels and predictions\n truePos = 0\n trueNeg = 0\n for data in range(len(labels)):\n if((predictions[data] == 1) and (predictions[data] == labels[data])):\n truePos+=1\n elif((predictions[data] == 0) and (predictions[data] == labels[data])):\n trueNeg+=1\n sensitivity = truePos/(len(labels) + 1)\n specificity = trueNeg/(len(labels) + 1)\n return (sensitivity, specificity)\n \n\n #raise NotImplementedError",
"def predict(self, xFeat):\n yHat = [] # variable to store the estimated class label\n # TODO\n matrix = xFeat.to_numpy()\n for row in matrix:\n node = self.root\n while node.split != None:\n feat, val = node.split\n if row[feat] >= val: \n node = node.left\n else:\n node = node.right\n #row = np.delete(row, feat, 0)\n yHat.append(majority(node.array))\n return yHat",
"def sensitivity(\n targets: List[int], preds: List[float], threshold: float = 0.5\n) -> float:\n return recall(targets, preds, threshold)",
"def process(self):\n self.output_image = cv.adaptiveThreshold(\n self.input_image,\n # self.MIN_THRESHOLD,\n self.MAX_PIXEL_VALUE,\n cv.ADAPTIVE_THRESH_GAUSSIAN_C,\n cv.THRESH_BINARY_INV,\n self.BLOCK_SIZE,\n self.CONSTANT,\n )\n return self.output_image",
"def vis_detections(lossfunc, image_name, class_name, dets, thresh=0.5):\n if lossfunc == 'vanilla':\n label = 'Smooth L1 Loss'\n if lossfunc == 'robust':\n label = 'Robust L1 Loss (10%)'\n\n inds = np.where(dets[:, -1] >= thresh)[0]\n if len(inds) == 0:\n return\n\n # Load the demo image\n im_file = os.path.join(cfg.DATA_DIR, 'demo', image_name)\n im = cv2.imread(im_file)\n\n im = im[:, :, (2, 1, 0)]\n fig, ax = plt.subplots()\n ax.imshow(im, aspect='equal')\n for i in inds:\n bbox = dets[i, :4]\n score = dets[i, -1]\n\n ax.add_patch(\n plt.Rectangle((bbox[0], bbox[1]),\n bbox[2] - bbox[0],\n bbox[3] - bbox[1], fill=False,\n edgecolor='red', linewidth=3.5)\n )\n ax.text(bbox[0], bbox[1] - 2,\n '{:s} {:.3f}'.format(class_name, score),\n bbox=dict(facecolor='blue', alpha=0.5),\n color='white')\n\n ax.set_title('{}, {} Class'.format(label, class_name.capitalize()))\n plt.axis('off')\n plt.tight_layout()\n fig.savefig(os.path.join('cs231n', 'viz', lossfunc, '{}_{}.png'.format(\n image_name.split('.')[0], class_name)))",
"def display_yolo(img, out, threshold):\n import numpy as np\n numClasses = 20\n anchors = [1.08, 1.19, 3.42, 4.41, 6.63, 11.38, 9.42, 5.11, 16.62, 10.52]\n\n def sigmoid(x, derivative=False):\n return x * (1 - x) if derivative else 1 / (1 + np.exp(-x))\n\n def softmax(x):\n scoreMatExp = np.exp(np.asarray(x))\n return scoreMatExp / scoreMatExp.sum(0)\n\n clut = [(0, 0, 0), (255, 0, 0), (255, 0, 255), (0, 0, 255), (0, 255, 0),\n (0, 255, 128), (128, 255, 0), (128, 128, 0), (0, 128, 255),\n (128, 0, 128), (255, 0, 128), (128, 0, 255), (255, 128, 128),\n (128, 255, 128), (255, 255, 0),\n (255, 128, 128), (128, 128, 255), (255, 128, 128),\n (128, 255, 128), (128, 255, 128)]\n\n draw = ImageDraw.Draw(img)\n for cy in range(0, 13):\n for cx in range(0, 13):\n for b in range(0, 5):\n channel = b * (numClasses + 5)\n tx = out[channel][cy][cx]\n ty = out[channel + 1][cy][cx]\n tw = out[channel + 2][cy][cx]\n th = out[channel + 3][cy][cx]\n tc = out[channel + 4][cy][cx]\n\n x = (float(cx) + sigmoid(tx)) * 32\n y = (float(cy) + sigmoid(ty)) * 32\n\n w = np.exp(tw) * 32 * anchors[2 * b]\n h = np.exp(th) * 32 * anchors[2 * b + 1]\n\n confidence = sigmoid(tc)\n\n classes = np.zeros(numClasses)\n for c in range(0, numClasses):\n classes[c] = out[channel + 5 + c][cy][cx]\n classes = softmax(classes)\n detectedClass = classes.argmax()\n\n if threshold < classes[detectedClass] * confidence:\n color = clut[detectedClass]\n x = x - w / 2\n y = y - h / 2\n draw.line((x, y, x + w, y), fill=color, width=3)\n draw.line((x, y, x, y + h), fill=color, width=3)\n draw.line((x + w, y, x + w, y + h), fill=color, width=3)\n draw.line((x, y + h, x + w, y + h), fill=color, width=3)\n\n return img",
"def get_scores(self):\n hist = self.confusion_matrix\n # hist = [TN,FP;FN,TP]\n acc = np.diag(hist).sum() / hist.sum()\n acc_cls = np.diag(hist) / hist.sum(axis=1)\n acc_cls = np.nanmean(acc_cls)\n iu = np.diag(hist) / (hist.sum(axis=1) + hist.sum(axis=0) - np.diag(hist))\n # iou = iu.sum() / self.n_classes\n mean_iou = np.nanmean(iu) # if classes = 2: iou = miou\n freq = hist.sum(axis=1) / hist.sum()\n fwavacc = (freq[freq > 0] * iu[freq > 0]).sum()\n cls_iou = dict(zip(range(self.n_classes), iu))\n\n ##############################################\n tn = hist[0, 0]\n tp = np.diag(hist).sum() - tn\n fp = np.triu(hist, 1).sum()\n fn = np.tril(hist, -1).sum()\n precision = tp / (tp + fp)\n recall = tp / (tp + fn)\n f1 = 2 * precision * recall / (precision + recall)\n\n # for medical img, img_seg \\in [0,1]\n dice = 2 * tp / (tp + tp + fn + fp)\n # dice = f1-score\n dsc = 2 * tp / (tp + fn + fp)\n # dsc = jaccard\n # voe = 2 * abs(fp + fn) / (tp + tp + fn + fp)\n # voe = 1 - dsc\n\n k2 = {\n # \"Overall Acc: \\t\": acc,\n 'Mean Acc': float(judge_nan(acc_cls)),\n # \"FreqW Acc : \\t\": fwavacc,\n 'Mean IoU': float(judge_nan(mean_iou)),\n 'F1-score': float(judge_nan(f1)),\n 'DSC': float(judge_nan(dsc)),\n 'Precision': float(judge_nan(precision)),\n 'Recall': float(judge_nan(recall)),\n }\n\n return k2",
"def evaluate(labels, predictions):\n correct_positive = 0\n correct_negative = 0\n total_positive = 0\n total_negative = 0\n\n for i in range(len(labels)):\n if labels[i] == 1:\n total_positive += 1\n if predictions[i] == 1:\n correct_positive += 1\n else:\n total_negative += 1\n if predictions[i] == 0:\n correct_negative += 1\n\n sensitivity = correct_positive / total_positive\n specificity = correct_negative / total_negative\n\n return sensitivity, specificity",
"def predict(self, img):\n visual_diff = {}\n\n # TODO: get keypoints and descriptors from input image using SIFT\n # store keypoints in variable kp and descriptors in des\n\n kp = self.sift.detect(img)\n _,des = self.sift.compute(img,kp)\n\n for k in self.signs.keys():\n #cycle trough templage images (k) and get the image differences\n visual_diff[k] = self._compute_prediction(k, img, kp, des)\n\n if visual_diff:\n # print visual_diff\n # TODO: convert difference between images (from visual_diff)\n # to confidence values (stored in template_confidence)\n diff = 0.0\n for k in self.signs.keys():\n visual_diff[k] = 1/visual_diff[k]\n diff += visual_diff[k]\n for k in self.signs.keys():\n self.score[k] = visual_diff[k]/diff\n template_confidence = {k: self.score[k] for k in self.signs.keys()}\n\n else: # if visual diff was not computed (bad crop, homography could not be computed)\n # set 0 confidence for all signs\n template_confidence = {k: 0 for k in self.signs.keys()}\n\n return template_confidence",
"def decision_heatmaps(obs):\n global model\n assert obs > 4 and obs < 1788, \"Chosen observation is outside the range of the demonstration episode.\"\n saved_observations = np.load('saved_observations.npy')\n state = saved_observations[obs-3:obs+1]\n state_batch = np.expand_dims(state, axis=0)\n q_vals = model.compute_q_values(state)\n print(q_vals)\n decision = np.argmax(q_vals)\n print(decision)\n decision_encodings = ['None','Up','Right','Left','Down','Right-Up','Left-Up','Right-Down','Left-Down']\n decision_node = model.model.output[:, decision]\n last_conv_layer = model.model.layers[3]\n grads = K.gradients(decision_node, last_conv_layer.output)[0]\n pooled_grads = K.mean(grads, axis=(0,2,3))\n frame = Image.fromarray(state[-1])\n iterate = K.function([model.model.input],[pooled_grads, last_conv_layer.output[0]])\n pooled_grads_value, conv_layer_output_value = iterate([state_batch])\n for i in range(64):\n conv_layer_output_value[i, :, :] *= pooled_grads_value[i]\n\n heatmap = np.mean(conv_layer_output_value, axis=0)\n heatmap = np.maximum(heatmap, 0)\n heatmap /= np.max(heatmap)\n heatmap = cv2.resize(heatmap, (84, 84))\n heatmap = np.uint8(255 * heatmap)\n superimposed_img = heatmap * .3 + np.array(frame) * .35\n\n plt.xlabel(decision_encodings[decision])\n plt.imshow(superimposed_img, cmap='gray')\n plt.show()",
"def image_quality(img):\n # convert bgr image to gray -> float32\n score = 0.0\n if img is None:\n return score\n\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n x = gray.astype(np.float32)\n h, w = x.shape[0], x.shape[1]\n\n # horizontal\n d_h = x[:,1:] - x[:,:-1]\n w_bound = int(8*(np.floor(w/8.0)-1)) + 1\n B_h = np.mean(np.abs(d_h[:,7:w_bound:8]))\n A_h = (8.0 * np.mean(np.abs(d_h)) - B_h) / 7.0\n sig_h = np.sign(d_h)\n left_sig, right_sig = sig_h[:,:-2], sig_h[:,1:-1]\n Z_h = np.mean((left_sig * right_sig)<0)\n\n # vertical\n d_v = x[1:, :] - x[:-1, :]\n h_bound = int(8*(np.floor(h/8.0)-1)) + 1\n B_v = np.mean(np.abs(d_v[7:h_bound:8, :]))\n A_v = (8.0 * np.mean(np.abs(d_v)) - B_v) / 7.0\n sig_v = np.sign(d_v)\n up_sig, down_sig = sig_v[:-2, :], sig_v[1:-1, :]\n Z_v = np.mean((up_sig * down_sig)<0)\n\n # combine the weights\n B = (B_h + B_v)/2.0\n A = (A_h + A_v)/2.0\n Z = (Z_h + Z_v)/2.0\n\n # quality prediction\n alpha = -245.8909\n beta = 261.9373\n gamma1 = -239.8886 / 10000.0 \n gamma2 = 160.1664 / 10000.0 \n gamma3 = 64.2859 / 10000.0 \n\n # corner case of a black / white frame\n if np.abs(A) < 1e-3 or np.abs(B) < 1e-3 or np.abs(Z) < 1e-3:\n score = 0.0\n else:\n score = alpha + beta*(B**gamma1)*(A**gamma2)*(Z**gamma3)\n\n return score",
"def explain(self):\n # build the 2 versions of the model\n model = self.build_model()\n last_conv_model = self.build_cut_model()\n\n for i, label_name in enumerate(self.label_names):\n # This is the algorithm for the last convolution layer's tensor image\n # Get the index of the image that was classified correctly with the most confidence for the class\n predicted_col_proba = np.array(self.predicted_labels)[0][:, i]\n predicted_col_argsort = predicted_col_proba.argsort()[::-1]\n predicted_col = (predicted_col_proba > 0.2).astype(int)\n true_col = self.true_labels[:, 0]\n\n representative_image_index = None\n for most_probable_arg_index in predicted_col_argsort:\n if predicted_col[most_probable_arg_index] == true_col[most_probable_arg_index]:\n representative_image_index = most_probable_arg_index\n break\n\n # Resize the image to fit the neural network and keep the original resized image\n original_img = io.imread('{}/{}/{}'.format(path_to_img_directory, self.ex_format, np.array(self.image_names)[representative_image_index]))\n original_img = cv2.normalize(original_img, None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)\n original_img = cv2.resize(original_img, dsize=(self.ex_input_size, self.ex_input_size), interpolation=cv2.INTER_CUBIC)\n img = np.expand_dims(original_img, axis=0)\n original_img = original_img[:, :, :3]\n\n # Get the output of the neural network for this image as a tensor\n model.predict(np.array(img))\n class_output = model.output[:, i]\n last_conv_layer = model.get_layer(self.ex_last_conv_layer_name1).output\n # if self.model_name == 'vit':\n # last_conv_layer = tf.nn.relu(tf.reshape(last_conv_layer[:, :256, :], (-1, 16, 16, 1024)))\n\n # Get the output for the cut model\n cut_img = last_conv_model.predict(np.array(img))[0]\n if self.model_name == 'vit':\n cut_img = np.reshape(cut_img[:256, :], (16, 16, 1024))\n cut_img = np.mean(cut_img, axis=-1)\n cut_img = cv2.normalize(cut_img, None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)\n if self.model_name == 'vit':\n cut_img[0, 0] = np.mean(cut_img)\n cut_img = cv2.normalize(cut_img, None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)\n cut_img = cv2.resize(cut_img, (self.ex_input_size, self.ex_input_size))\n\n # This is the algorithm of the Grad-CAM model\n # Refine the output of the last convolutional layer according to the class output\n grads = K.gradients(class_output, last_conv_layer)[0]\n if self.model_name == 'vit':\n last_conv_layer = tf.reshape(last_conv_layer[:, :256, :], (-1, 16, 16, 1024))\n last_conv_layer = last_conv_layer / tf.norm(last_conv_layer)\n\n grads = tf.reshape(grads[:, :256, :], (-1, 16, 16, 1024))\n grads = grads / tf.norm(grads)\n\n pooled_grads = K.mean(grads, axis=(0, 1, 2))\n iterate = K.function([model.input], [pooled_grads, last_conv_layer[0]])\n pooled_grads_value, conv_layer_output_value = iterate([img])\n for j in range(self.ex_last_conv_layer_filter_number):\n conv_layer_output_value[:, :, j] *= pooled_grads_value[j]\n\n # Create a 16x16 heatmap and scale it to the same size as the original image\n heatmap = np.mean(conv_layer_output_value, axis=-1)\n heatmap = np.maximum(heatmap, 0)\n heatmap /= np.max(heatmap)\n heatmap = cv2.resize(heatmap, (self.ex_input_size, self.ex_input_size))\n heatmap = np.uint8(255 * heatmap)\n heatmap = cv2.applyColorMap(heatmap, cv2.COLORMAP_JET)\n heatmap = cv2.normalize(heatmap, None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)\n superimposed_img = cv2.addWeighted(original_img, 0.7, heatmap, 0.4, 0)\n\n # save the original image\n plt.matshow(original_img)\n plt.axis('off')\n plt.title(label_name, fontdict={'fontsize': 18})\n plt.savefig('{}/{}/{}_{}.png'.format(path_to_explainable, 'original', self.model_name, label_name), bbox_inches='tight', pad_inches=0.1)\n\n # save the cut image\n plt.matshow(cut_img, cmap=plt.get_cmap('Spectral'))\n plt.colorbar(shrink=0.75, ticks=np.linspace(0, 1, 11).tolist())\n plt.axis('off')\n plt.title(label_name, fontdict={'fontsize': 18})\n plt.savefig('{}/{}/{}_{}.png'.format(path_to_explainable, 'cut', self.model_name, label_name), bbox_inches='tight', pad_inches=0.1)\n\n # save the superimposed gradcam image\n plt.matshow(superimposed_img, cmap=plt.get_cmap('Spectral'))\n plt.colorbar(shrink=0.75, ticks=np.linspace(0, 1, 11).tolist())\n plt.axis('off')\n plt.title(label_name, fontdict={'fontsize': 18})\n plt.savefig('{}/{}/{}_{}.png'.format(path_to_explainable, 'gradcam', self.model_name, label_name), bbox_inches='tight', pad_inches=0.1)",
"def analyze_confusion_matrix(confusion_matrix):\n \n n_classes = len(confusion_matrix)\n \n # True positive : correct prediction, ie the diagonal of the confusion matrix\n TP = confusion_matrix.diag()\n for c in range(n_classes):\n idx = torch.ones(n_classes).byte()\n idx[c] = 0\n TN = confusion_matrix[idx.nonzero()[:, None], idx.nonzero()].sum()\n FP = confusion_matrix[c, idx].sum()\n FN = confusion_matrix[idx, c].sum()\n\n sensitivity = (TP[c] / (TP[c] + FN))\n specificity = (TN / (TN + FP))\n \n # Display the analysis in the console\n print('Class {}\\nTP {}, TN {}, FP {}, FN {}'.format(c, TP[c], TN, FP, FN))\n print(\"Sensitivity :\", sensitivity)\n print(\"Specificity : {0}\\n------\".format(specificity))",
"def map_objects_classifier_evaluation(self):\n df = self.results[(self.results['iou'] > 0.7)]\n y_true = df['true_class']\n y_pred = df['pred_class']\n print(classification_report(y_true, y_pred))\n matrix = confusion_matrix(y_true, y_pred)\n matrix = matrix.astype('float') / matrix.sum(axis=1)[:, np.newaxis]\n import seaborn as sns\n\n plt.figure(figsize=(10, 7))\n sns.set(font_scale=2.4)\n sns.heatmap(matrix, annot=True, annot_kws={'size': 25},\n cmap=plt.cm.Reds)\n # Add labels to the plot\n class_names = ['background', 'building', 'water']\n tick_marks = np.arange(len(class_names))\n tick_marks2 = tick_marks + 0.28\n tick_marks2[0] = tick_marks2[0] - 0.2\n tick_marks = tick_marks + 0.5\n plt.xticks(tick_marks, class_names, rotation=0)\n plt.yticks(tick_marks2, class_names, rotation=90)\n plt.xlabel('Predicted label', labelpad=13)\n plt.ylabel('True label', labelpad=13)\n plt.show()",
"def evaluate(self, heatmap, img): # noqa\n # compress heatmap to 2D if needed\n if heatmap.ndim == 3:\n heatmap = heatmap.mean(0)\n heatmap = heatmap.mean(0)\n\n # sort pixel in attribution\n num_pixels = torch.numel(heatmap)\n _, indices = torch.topk(heatmap.flatten(), num_pixels)\n indices = np.unravel_index(indices.cpu().numpy(), heatmap.size())\n\n # apply deletion game\n deletion_perturber = PixelPerturber(img, torch.zeros_like(img))\n deletion_scores = self._procedure_perturb(deletion_perturber,\n num_pixels, indices)\n\n # apply insertion game\n blurred_img = self.gaussian_blurr(img)\n insertion_perturber = PixelPerturber(blurred_img, img)\n insertion_scores = self._procedure_perturb(insertion_perturber,\n num_pixels, indices)\n\n # calculate AUC\n insertion_auc = trapezoid(insertion_scores,\n dx=1. / len(insertion_scores))\n deletion_auc = trapezoid(deletion_scores, dx=1. / len(deletion_scores))\n\n # deletion_img and insertion_img are final results, they are only used for debug purpose\n # TODO check if it is necessary to convert the Tensors to np.ndarray\n return {\n \"del_scores\": deletion_scores,\n \"ins_scores\": insertion_scores,\n \"del_img\": deletion_perturber.get_current(),\n \"ins_img\": insertion_perturber.get_current(),\n \"ins_auc\": insertion_auc,\n \"del_auc\": deletion_auc\n }",
"def evaluate_SURF(x,y,NN,feature,inst,data,multiclass_map,maxInst):\r\n diff = 0\r\n if not data.discretePhenotype: #if continuous phenotype\r\n same_class_bound=data.phenSD #boundary to determine similarity between classes for continuous attributes\r\n \r\n if data.attributeInfo[feature][0]: #Continuous Attribute\r\n #determining boundaries for continuous attributes\r\n min_bound=data.attributeInfo[feature][1][0]\r\n max_bound=data.attributeInfo[feature][1][1]\r\n \r\n diff_hit=0 #initializing the score to 0\r\n diff_miss=0\r\n \r\n count_hit=0\r\n count_miss=0\r\n \r\n if data.discretePhenotype:\r\n if len(data.phenotypeList) > 2: #multiclass endpoint\r\n class_Store = {}\r\n missClassPSum = 0\r\n for each in multiclass_map:\r\n if each != y[inst]: #Store all miss classes\r\n class_Store[each] = [0,0] #stores cout_miss and diff_miss\r\n missClassPSum += multiclass_map[each]\r\n \r\n for i in range(len(NN)): #for all nearest neighbors\r\n if x[inst][feature]!=data.labelMissingData and x[NN[i]][feature]!=data.labelMissingData: # add appropriate normalization.\r\n if y[inst]==y[NN[i]]: #HIT\r\n count_hit+=1\r\n if x[inst][feature]!=x[NN[i]][feature]:\r\n if data.attributeInfo[feature][0]: #Continuous Attribute\r\n diff_hit-=abs(x[inst][feature]-x[NN[i]][feature])/(max_bound-min_bound)\r\n else:#Discrete\r\n diff_hit-=1\r\n else: #MISS\r\n for missClass in class_Store:\r\n if y[NN[i]] == missClass:\r\n class_Store[missClass][0] += 1\r\n if x[inst][feature]!=x[NN[i]][feature]:\r\n if data.attributeInfo[feature][0]: #Continuous Attribute\r\n class_Store[missClass][1]+=abs(x[inst][feature]-x[NN[i]][feature])/(max_bound-min_bound)\r\n else:#Discrete\r\n class_Store[missClass][1]+=1\r\n \r\n #Corrects for both multiple classes, as well as missing data.\r\n missSum = 0 \r\n for each in class_Store:\r\n missSum += class_Store[each][0]\r\n missAverage = missSum/float(len(class_Store))\r\n \r\n hit_proportion=count_hit/float(len(NN)) #Correcting for Missing Data.\r\n for each in class_Store:\r\n diff_miss += (multiclass_map[each]/float(missClassPSum))*class_Store[each][1]\r\n \r\n diff = diff_miss*hit_proportion\r\n miss_proportion=missAverage/float(len(NN))\r\n diff += diff_hit*miss_proportion\r\n \r\n else: #Binary Class Problem\r\n for i in range(len(NN)): #for all nearest neighbors\r\n if x[inst][feature]!=data.labelMissingData and x[NN[i]][feature]!=data.labelMissingData: # add appropriate normalization.\r\n \r\n if y[inst]==y[NN[i]]: #HIT\r\n count_hit+=1\r\n if x[inst][feature]!=x[NN[i]][feature]:\r\n if data.attributeInfo[feature][0]: #Continuous Attribute\r\n diff_hit-=abs(x[inst][feature]-x[NN[i]][feature])/(max_bound-min_bound)\r\n else:#Discrete\r\n diff_hit-=1\r\n else: #MISS\r\n count_miss+=1\r\n if x[inst][feature]!=x[NN[i]][feature]:\r\n if data.attributeInfo[feature][0]: #Continuous Attribute\r\n diff_miss+=abs(x[inst][feature]-x[NN[i]][feature])/(max_bound-min_bound)\r\n else:#Discrete\r\n diff_miss+=1 \r\n\r\n #Take hit/miss inbalance into account (coming from missing data)\r\n hit_proportion=count_hit/float(len(NN))\r\n miss_proportion=count_miss/float(len(NN))\r\n \r\n diff=diff_hit*miss_proportion + diff_miss*hit_proportion #applying weighting scheme to balance the scores \r\n \r\n else: #continuous endpoint\r\n for i in range(len(NN)): #for all nearest neighbors\r\n if x[inst][feature]!=data.labelMissingData and x[NN[i]][feature]!=data.labelMissingData: # add appropriate normalization.\r\n \r\n if abs(y[inst]-y[NN[i]])<same_class_bound: #HIT\r\n count_hit+=1 \r\n if x[inst][feature]!=x[NN[i]][feature]:\r\n if data.attributeInfo[feature][0]: #Continuous Attribute\r\n diff_hit-=abs(x[inst][feature]-x[NN[i]][feature])/(max_bound-min_bound)\r\n else:#Discrete\r\n diff_hit-=1\r\n else: #MISS\r\n count_miss+=1\r\n if x[inst][feature]!=x[NN[i]][feature]:\r\n if data.attributeInfo[feature][0]: #Continuous Attribute\r\n diff_miss+=abs(x[inst][feature]-x[NN[i]][feature])/(max_bound-min_bound)\r\n else:#Discrete\r\n diff_miss+=1\r\n\r\n #Take hit/miss inbalance into account (coming from missing data, or inability to find enough continuous neighbors)\r\n hit_proportion=count_hit/float(len(NN))\r\n miss_proportion=count_miss/float(len(NN))\r\n \r\n diff=diff_hit*miss_proportion + diff_miss*hit_proportion #applying weighting scheme to balance the scores \r\n \r\n return diff",
"def scoring_function(self, model, y_true, y_predicted_probability):"
] | [
"0.62368286",
"0.60717547",
"0.5930693",
"0.58902764",
"0.58234394",
"0.5802739",
"0.5779326",
"0.5775463",
"0.57620656",
"0.5677118",
"0.5666969",
"0.56534714",
"0.56438595",
"0.56366694",
"0.56218135",
"0.56197983",
"0.5616983",
"0.56108636",
"0.5610154",
"0.5609649",
"0.560248",
"0.5588804",
"0.5587006",
"0.5579388",
"0.5578639",
"0.557833",
"0.55664295",
"0.5566285",
"0.5565144",
"0.55475044"
] | 0.66770697 | 0 |
In this function, the server receive data from the front end, call the preprocessing program and after constructing the model, it returns the plan generated by our model back to the front end. | def receiveData():
preference = request.get_json()
program = preference.pop('program')
enroll_yr = preference.pop('enroll_yr')
enroll_sem = preference.pop('enroll_sem')
spec = 0
if 'spec' in preference:
spec = int(preference['spec'])
preference.pop('spec')
program_link = 'https://programsandcourses.anu.edu.au/2019/program/'
program_link = str(program_link) + str(program)
# calculate which type of semester does the enrolled semester fall in
# S1 in odd year, S2 in odd year, S1 in even year or S2 in even year
if int(enroll_yr)%2 == 1:
if int(enroll_sem)%2 == 1:
sem = 1
else:
sem = 2
else:
if int(enroll_sem)%2 == 1:
sem = 3
else:
sem = 4
# call the pre-processing program which put the model in file test1.mzn & test1.dzn
scraper = dp.DegreeRuleScraper(str(program_link))
orders = scraper.build_program_order_struct()
orders.buildAModel(preference, sem, spec)
# call MiniZinc to solve for the model
cmd = 'minizinc --solver OSICBC test1.mzn test1.dzn > plan.txt'
os.system(cmd)
jsondata = readmyJson('plan')
return jsonify(jsondata) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_process(\n self,\n model,\n client_plans,\n client_config,\n server_config,\n server_averaging_plan,\n client_protocols=None,\n ):\n process = FLProcess(\n model=model,\n client_plans=client_plans,\n client_config=client_config,\n server_config=server_config,\n client_protocols=client_protocols,\n server_averaging_plan=server_averaging_plan,\n )\n\n self.processes[process.id] = process\n return self.processes[process.id]",
"def handle(self, data, context):\n \n model_input = self.preprocess(data)\n model_out = self.inference(model_input)\n return self.postprocess(model_out)",
"def pollination(self):\n self.validate_req(ignore_failure=False)\n runner_fn = self.model_runner.execute_model\n return self.do_handle_request(runner_fn)",
"def main_modeling_pipeline():\n\n\n data_df = pd.read_csv('gs://aiplatformfilipegracio2020/head_train_data.csv')\n data_df = data_df[[LABEL, 'price', 'days_on_site']]\n\n class_weights = calculate_class_weights(data_df[LABEL])\n print('class weights', class_weights)\n logging.info('Data loaded and processed')\n train_ds, val_ds, test_ds = make_tf_datasets(data_df, LABEL)\n logging.info('Tensorflow datasets created')\n\n with strategy.scope():\n logging.info('Inside strategy')\n simple_feature_layer = make_simple_feature_layer(data_df)\n logging.info('Going to make model')\n simple_model = make_simple_model(simple_feature_layer)\n\n logging.info('Going fit model')\n simple_model_results, simple_model = model_fit_and_evaluate(model=simple_model,\n train_ds=train_ds,\n val_ds=val_ds,\n test_ds=test_ds,\n class_weights=class_weights,\n epochs=TRAINING_EPOCHS,\n job_name='simple_model')\n\n simple_model.save('gs://aiplatformfilipegracio2020/')",
"def _execute_after_reading(self):\n # Auxiliary parameters object for the CheckAndPepareModelProcess\n params = KratosMultiphysics.Parameters(\"{}\")\n params.AddValue(\"computing_model_part_name\",self.settings[\"computing_model_part_name\"])\n params.AddValue(\"problem_domain_sub_model_part_list\",self.settings[\"problem_domain_sub_model_part_list\"])\n params.AddValue(\"processes_sub_model_part_list\",self.settings[\"processes_sub_model_part_list\"])\n # Assign mesh entities from domain and process sub model parts to the computing model part.\n import check_and_prepare_model_process_structural\n check_and_prepare_model_process_structural.CheckAndPrepareModelProcess(self.main_model_part, params).Execute()\n\n # Import constitutive laws.\n materials_imported = self.import_constitutive_laws()\n if materials_imported:\n self.print_on_rank_zero(\"::[MechanicalSolver]:: \", \"Constitutive law was successfully imported.\")\n else:\n self.print_on_rank_zero(\"::[MechanicalSolver]:: \", \"Constitutive law was not imported.\")",
"def plan(self):\n\n self.logger.info(\"*** start planning......\")\n\n request_list = self.dbh.get_requests()\n\n if len(request_list) > 0:\n if not self._handle_requests(request_list):\n self.logger.error(\"while planning\")\n return False\n else:\n self.logger.error(\"while reading plan\")\n return False\n\n return True",
"def main():\n df = prepro_last()\n X, y = train_build(df)\n fit_store(X, y)",
"def returnTheTable():\r\n preference = request.get_json()\r\n # Obtain the list containing replaced courses and the to-be-updated plan\r\n replaced = preference.pop('replaced')\r\n oldPlan = dict()\r\n readPlan = open('plan.txt')\r\n try:\r\n content = str(readPlan.read())\r\n courses = content.split(' ')[:-1]\r\n current = 0\r\n for i in courses:\r\n if re.match(r'^[0-9]', str(i)):\r\n current = int(i)\r\n else:\r\n oldPlan[i] = current\r\n finally:\r\n readPlan.close()\r\n \r\n # collect user information\r\n program = preference.pop('program')\r\n enroll_yr = preference.pop('enroll_yr')\r\n enroll_sem = preference.pop('enroll_sem')\r\n # user's specialisation \r\n spec = 0\r\n if 'spec' in preference:\r\n spec = int(preference['spec'])\r\n preference.pop('spec')\r\n \r\n program_link = 'https://programsandcourses.anu.edu.au/2019/program/'\r\n\r\n program_link = str(program_link) + str(program)\r\n if int(enroll_yr)%2 == 1:\r\n if int(enroll_sem)%2 == 1:\r\n sem = 1\r\n else:\r\n sem = 2\r\n else:\r\n if int(enroll_sem)%2 == 1:\r\n sem = 3\r\n else:\r\n sem = 4\r\n # call the pre-processing program which put the model in file test1.mzn & test1.dzn\r\n scraper = dp.DegreeRuleScraper(str(program_link))\r\n orders = scraper.build_program_order_struct()\r\n orders.buildAModel(preference, sem, spec, oldPlan, replaced)\r\n \r\n # call MiniZinc to solve for the model\r\n cmd = 'minizinc --solver OSICBC test1.mzn test1.dzn > re-plan.txt'\r\n os.system(cmd)\r\n # read the new plan from the file containing it.\r\n jsondata = readmyJson('re-plan')\r\n return jsonify(jsondata)",
"def main_training():\n if request.method == 'GET':\n print(\"Working directory: \", path_creator())\n train_knn_model_params=[config_gettype('train_knn_model','FRS.ini',param) for param in inspect.getfullargspec(train_knn_model)[0]]\n train_knn_model(*train_knn_model_params)\n return_text=\"FRS_training_model.py completed\"\n return jsonify(return_text)\n else:\n return_text1 = \"Опа\"\n return jsonify(return_text1)",
"def download_and_preprocess(self):\n print('Preparing steering angle database.')\n print('Downloading...')\n self.download()\n print('Preprocessing...')\n self.preprocess()",
"def main():\n if len(sys.argv) == 2 and sys.argv[1] == 'train':\n trainer = FlightModelTrainer()\n trainer.train()\n return 0\n\n if len(sys.argv) == 2 and sys.argv[1] == 'graphics':\n trainer = FlightModelTrainer()\n trainer.visualize()\n return 0\n\n predictor = FlightPredictor(path_to_weather=WEATHER_TRAIN_DATA_PATH)\n result = predictor.predict(pd.read_csv(FLIGHTS_TEST_DATA_PATH))\n print('result')\n print(result)\n # result.to_csv(\"out.csv\")\n return 0",
"def run(self) -> None:\n self.model = self.trainer.train_model(self.model, self.data)",
"def post(self):\n text = None\n\n # 1. Get the text for processing\n if 'text' in request.form:\n text = request.form['text']\n else:\n return ({\n 'teprolin-conf': self._teprolin.getConfiguration(),\n 'teprolin-result': 'No text=\"...\" field has been supplied!'},\n int(HTTPStatus.BAD_REQUEST))\n\n # 2. Do the requested configurations,\n # if such pairs exist\n noConfigRequested = True\n\n for op in TeproAlgo.getAvailableOperations():\n if op in request.form:\n algo = request.form[op]\n\n try:\n self._teprolin.configure(op, algo)\n noConfigRequested = False\n except RuntimeError as err:\n return ({\n 'teprolin-conf': self._teprolin.getConfiguration(),\n 'teprolin-result': str(err)},\n int(HTTPStatus.BAD_REQUEST))\n\n # 2.1 If no configuration was requested,\n # go to the default configuration for the object.\n # Clear previous configuration requests.\n if noConfigRequested:\n self._teprolin.defaultConfiguration()\n\n # 3. Extract the requested text processing\n # operations. If none is specified, do the full\n # processing chain.\n requestedOps = []\n\n if 'exec' in request.form:\n exop = request.form['exec'].split(\",\")\n\n for op in exop:\n if op in TeproAlgo.getAvailableOperations():\n requestedOps.append(op)\n else:\n return ({\n 'teprolin-conf': self._teprolin.getConfiguration(),\n 'teprolin-result': \"Operation '\" + op + \"' is not recognized. See class TeproAlgo.\"},\n int(HTTPStatus.BAD_REQUEST))\n\n # 4. Do the actual work and return the JSON object.\n if requestedOps:\n dto = self._teprolin.pcExec(text, requestedOps)\n else:\n dto = self._teprolin.pcFull(text)\n\n return ({\n 'teprolin-conf': self._teprolin.getConfiguration(),\n 'teprolin-result': dto.jsonDict()},\n int(HTTPStatus.OK))",
"def main(self):\r\n\r\n #Train the GEN and DISC\r\n self.modelTrain.main()\r\n self.disp.show()",
"def plan(self):\n raise NotImplementedError('You must implement the plan() method '\n 'yourself!')",
"def main(domain):\n\n filepath_train1 = '../../Non_covid_data_15oct/train_data_batch1_disregard_removed.pkl'\n filepath_test1 = '../../Non_covid_data_15oct/test_data_batch1_disregard_removed.pkl'\n filepath_train2 = '../../Covid_data_11nov/traindata_covidbatch.pkl'\n filepath_test2 = '../../Covid_data_11nov/testdata_covidbatch.pkl'\n\n df_train_nc, df_test_nc = createDataframe(filepath_train1, filepath_test1, domain, 'noncovid')\n df_train_c, df_test_c = createDataframe(filepath_train2, filepath_test2, domain, 'covid')\n #print(df_train)\n sen_reps_tr_nc, labels_tr_nc, sen_reps_te_nc, labels_te_nc = prepro(df_train_nc, df_test_nc)\n sen_reps_tr_c, labels_tr_c, sen_reps_te_c, labels_te_c = prepro(df_train_c, df_test_c)\n #print(labels_te)\n\n #Uncomment to combine training datasets \n #sen_reps_tr_c += sen_reps_tr_nc\n #labels_tr_c += labels_tr_nc\n\n #Uncomment to combine test datasets and test labels if necessary (if you do so, also combine test df's)\n #sen_reps_te_c += sen_reps_te_nc\n #labels_te_c += labels_te_nc\n #df_test = pd.concat([df_test_c, df_test_nc])\n\n #Feed selected train and test data to regression model\n predictions = get_predictions(sen_reps_tr_c, labels_tr_c, sen_reps_te_c)\n\n #Make dataframes of note id's and labels\n df_ann = make_note_df(df_test_c, labels_te_c)\n df_pred = make_note_df(df_test_c, predictions)\n\n #Evaluate on sentence level\n MSE, MAE, RMSE = evaluation(labels_te_c, predictions)\n\n print(\"MSE \"+domain, MSE)\n print(\"MAE \"+domain, MAE)\n print(\"RMSE \"+domain, RMSE)\n\n #Aggregate per note\n means_ann = means(df_ann)\n means_pred = means(df_pred)\n\n #Evaluate on note level\n MSE, MAE, RMSE = evaluation(means_ann, means_pred)\n\n print(\"MSE agg\"+domain, MSE)\n print(\"MAE agg\"+domain, MAE)\n print(\"RMSE agg\"+domain, RMSE)",
"def main(model,pmap):\n\n addPppParams(model)\n\n# addTransportParams(model,pmap)\n\n #translationSources(model)\n\n #addLipidMetabs(model)\n\n return",
"def __init__(self):\r\n\r\n #480p 2.39:1 720x302\r\n #2048x2048 is more than 7.3GB of vRAM for the Master DISC model\r\n\r\n #Loading the preprocessed data\r\n preprocessVars = Preprocess()\r\n\r\n #The training and display of the trained models\r\n self.modelTrain = train.Train(preprocessVars)\r\n self.disp = display.Display(preprocessVars)",
"def apply(self):\n\n sc = SparkContext(appName=\"Model Applier\")\n sqlContext = SQLContext(sc)\n\n # Add model and supporting files to SparkContext\n for item in self.model_location_dict.items():\n ModelApplier.add_files_to_context(item[1], sc)\n\n partition_processor = self.get_partition_processor()\n infile = sc.textFile(self.input_location)\n header_line = infile.first()\n infile = infile.filter(lambda x: x != header_line)\n\n result = infile.mapPartitions(partition_processor).flatMap(lambda x: x)\n print('result.class', result.__class__)\n\n result = result.map(lambda (x, a, y, segment, model_version):\n (int(x), float(a), float(y), segment, model_version))\n sqlContext.createDataFrame(result).saveAsParquetFile(self.output_location)",
"def main():\n # Instantiate the data problem.\n data = create_data_model()\n\n # NEW SPOT TO MAKE distance_matrix\n distance_matrix = compute_euclidean_distance_matrix(destinations_1)\n manager = pywrapcp.RoutingIndexManager(\n len(destinations_1), data['num_vehicles'], data['depot'])\n\n# # Create the routing index manager.\n# manager = pywrapcp.RoutingIndexManager(\n# len(data['locations']), data['num_vehicles'], data['depot'])\n\n # Create Routing Model.\n routing = pywrapcp.RoutingModel(manager)",
"def main():\n\n # Start and import data\n print('>>>>> START')\n dates, prices, price_diff = get_data('data.csv')\n indices = [i for i in range(0, len(prices))]\n \n # Quick filtering to enable good visualization\n # This section can be commented if wished\n delete_outlayers(dates, price_diff)\n delete_outlayers(prices, price_diff)\n delete_outlayers(indices, price_diff)\n price_diff.remove(max(price_diff))\n price_diff.remove(min(price_diff))\n print('>>>>> STATUS: DATA FORMATTING DONE')\n\n # Model and prediction\n predicted_price = predict_price(indices, dates, price_diff, 1829)\n print('RESULTING PREDICTION = ', (predicted_price * -1) + prices[0])\n print('>>>>> DONE')",
"def yield_prediction():\n ## input code here\n if request.method == \"POST\":\n re = request.get_json()\n city = re[\"city\"]\n state = re[\"state\"]\n ## convert into lower case\n state = state.lower()\n city = city.lower()\n model_crop = re[\"crop\"]\n model_crop = model_crop.lower()\n model_season = re[\"season\"]\n model_season = model_season.lower()\n model_area = re[\"area\"]\n model_area = int(model_area)\n\n ## store name of crop for the graph\n crop = model_crop\n ## preprocesss the code\n\n try:\n state_le = load(\"static/labelencoder/state_le.joblib\")\n district_le = load(\"static/labelencoder/district_le.joblib\")\n season_le = load(\"static/labelencoder/season_le.joblib\")\n crop_le = load(\"static/labelencoder/crop_le.joblib\")\n model_crop = crop_le.transform([model_crop])[0]\n model_season = season_le.transform([model_season])[0]\n model_state = state_le.transform([state])[0]\n model_city = district_le.transform([city])[0]\n except:\n response_dict = {\n \"status\": False,\n \"message\": \"Enter Valid Data\"\n }\n return jsonify(response_dict)\n\n model_city = int(model_city)\n model_state = int(model_state)\n model_crop = int(model_crop)\n model_season = int(model_season)\n model_para = [model_state, model_city, model_season, model_crop, model_area]\n\n ## prediction code here\n\n import requests\n # NOTE: you must manually set API_KEY below using information retrieved from your IBM Cloud account.\n API_KEY = \"S30qFHkYTHMDO81ijSRiGSiE1jOfnlt01Vtn9UBU2KqL\"\n token_response = requests.post('https://iam.cloud.ibm.com/identity/token',\n data={\"apikey\": API_KEY, \"grant_type\": 'urn:ibm:params:oauth:grant-type:apikey'})\n mltoken = token_response.json()[\"access_token\"]\n\n header = {'Content-Type': 'application/json', 'Authorization': 'Bearer ' + mltoken}\n\n # NOTE: manually define and pass the array(s) of values to be scored in the next line\n payload_scoring = {\"input_data\": [\n {\"fields\": [\"State_Name\", \"District_Name\", \"Season\", \"Crop\", \"Area\"], \"values\": [model_para]}]}\n\n response_scoring = requests.post(\n 'https://us-south.ml.cloud.ibm.com/ml/v4/deployments/180fe5c1-a652-4e59-8b33-781326790706/predictions?version=2021-07-16',\n json=payload_scoring, headers={'Authorization': 'Bearer ' + mltoken})\n\n output = response_scoring.json()\n\n ## retrive the output\n\n pred_yield = output[\"predictions\"][0]['values'][0][0]\n pred_production = pred_yield * model_area\n\n ## PIE CHART\n try:\n kharif_value = kharif_yield.query.filter_by(crop_name=crop).first()\n kharif_values = kharif_value.yield_value\n except:\n kharif_values = 0\n try:\n rabi_value = rabi_yield.query.filter_by(crop_name=crop).first()\n rabi_values = rabi_value.yield_value\n except:\n rabi_values = 0\n\n try:\n summer_value = summer_yield.query.filter_by(crop_name=crop).first()\n summer_values = summer_value.yield_value\n except:\n summer_values = 0\n\n try:\n winter_value = winter_yield.query.filter_by(crop_name=crop).first()\n winter_values = winter_value.yield_value\n except:\n winter_values = 0\n\n try:\n autumn_value = autumn_yield.query.filter_by(crop_name=crop).first()\n autumn_values = autumn_value.yield_value\n except:\n autumn_values = 0\n\n try:\n whole_year_value = whole_year_yield.query.filter_by(crop_name=crop).first()\n whole_year_values = whole_year_value.yield_value\n except:\n whole_year_values = 0\n\n season_name = ['kharif', 'rabi', 'summer', 'winter', 'autumn', 'whole year']\n yield_list = [kharif_values, rabi_values, summer_values, winter_values, autumn_values, whole_year_values]\n\n season_yield_dict = dict()\n pie_list = list()\n for season, value in zip(season_name, yield_list):\n if value == 0:\n pass\n else:\n season_yield_dict[season] = round(value, 2)\n pie_list.append(round(value, 2))\n bar_graph_label = list(season_yield_dict.keys())\n pie_final_list = list()\n sum_list = sum(pie_list)\n for val in pie_list:\n suceess = val / sum_list\n suceess = round(suceess, 2)\n pie_final_list.append(suceess * 100)\n\n ## reponse dict here\n response_dict = {\n \"predYield\": pred_yield,\n \"predProduction\": pred_production,\n \"barGraphLabel\": bar_graph_label,\n \"barGraphvalue\": yield_list,\n \"pieChartLabel\": bar_graph_label,\n \"pieChartValue\": pie_final_list\n }\n return jsonify(response_dict)",
"def main():\n data = load_data()\n analyze_features(data['full_features'])\n model = train(data)\n\n with open('model.pickle', 'wb') as f:\n pickle.dump(model, f)\n evaluate(model, data)",
"def generate():\n global transformer_top\n assert transformer_top is not None\n global transformer_bottom\n assert transformer_bottom is not None\n global label_encoders_per_modality\n assert label_encoders_per_modality is not None\n global DEVICE\n assert DEVICE is not None\n global partial_sample_model\n assert partial_sample_model is not None\n\n temperature = float(request.args.get('temperature'))\n pitch = int(request.args.get('pitch'))\n instrument_family_str = str(request.args.get('instrument_family_str'))\n\n class_conditioning_top = class_conditioning_bottom = {\n 'pitch': pitch,\n 'instrument_family_str': instrument_family_str\n }\n class_conditioning_tensors_top = make_conditioning_tensors(\n class_conditioning_top,\n label_encoders_per_modality)\n class_conditioning_tensors_bottom = make_conditioning_tensors(\n class_conditioning_bottom,\n label_encoders_per_modality)\n\n batch_size = 1\n top_code = partial_sample_model(\n model=transformer_top,\n batch_size=batch_size,\n codemap_size=transformer_top.shape,\n temperature=temperature,\n class_conditioning=class_conditioning_tensors_top\n )\n bottom_code = partial_sample_model(\n model=transformer_bottom,\n condition=top_code,\n batch_size=batch_size,\n codemap_size=transformer_bottom.shape,\n temperature=temperature,\n class_conditioning=class_conditioning_tensors_bottom,\n )\n\n class_conditioning_top_map = {\n modality: make_matrix(transformer_top.shape,\n value)\n for modality, value in class_conditioning_top.items()\n }\n class_conditioning_bottom_map = {\n modality: make_matrix(transformer_bottom.shape,\n value)\n for modality, value in class_conditioning_bottom.items()\n }\n\n response = make_response(top_code, bottom_code,\n class_conditioning_top_map,\n class_conditioning_bottom_map)\n return response",
"def main(self):\n if self.mode==0: #drawing\n self.draw()\n self.graph_drawing=self.cleanGraph(self.graph_drawing)\n #if len(self.graph_drawing)>1:\n # self.function_interpolation=self.polynomialInterpolation2D(self.graph_drawing,1)\n # self.graph_interpolation=self.sample(self.function_interpolation,len(self.graph_drawing))\n elif self.mode==1: #construction\n self.step+=1\n self.time=self.step/self.max_step\n if self.step>self.max_step:\n self.mode=2\n #self.graph_construction=self.discreteComplexComposeGraph(self.coefficients,self.time) #complex now\n self.graph_construction=self.numpyComposeConstructionGraph(self.coefficients,t=self.time)\n self.vectors=self.getVectors([(0,0)]+self.graph_construction)\n self.graph_display.append(self.graph_construction[-1])\n\n elif self.mode==2:\n self.draw()",
"def main(database_filepath,model_filepath):\n X_train, X_test, y_train, y_test = load_data(database_filepath)\n \n print(X_train.shape,y_train.shape)\n \n print('Building model...')\n model = build_pipeline()\n \n print('Training model...')\n model.fit(X_train, y_train)\n \n print('Evaluating model...')\n evaluate_model(model, X_test, y_test)\n\n print('Saving model...')\n save_model(model, model_filepath)\n\n print('Trained model saved!')",
"def __init__(self,\n model_cfg: dict,\n trainer_cfg: dict,\n plan: dict,\n **kwargs\n ):\n super().__init__()\n self.model_cfg = model_cfg\n self.trainer_cfg = trainer_cfg\n self.plan = plan\n\n self.model = self.from_config_plan(\n model_cfg=self.model_cfg,\n plan_arch=self.plan[\"architecture\"],\n plan_anchors=self.plan[\"anchors\"],\n )\n\n self.example_input_array_shape = (\n 1, plan[\"architecture\"][\"in_channels\"], *plan[\"patch_size\"],\n )\n\n self.epoch_start_tic = 0\n self.epoch_end_toc = 0",
"def post(self):\n return CreateSavingPlan(request, current_user.id)",
"def run(self, data, training=False):\n # Set mode\n if training:\n self._model.train()\n else:\n self._model.eval()\n # Compute\n return self._model(data)",
"def run(self):\r\n # Close any open models\r\n self.cleanUp()\r\n # Dynamically select file to preview\r\n self.selectTrial() \r\n # Add adjusted COM (RRA/CMC) model\r\n self.loadAdjustedModel()\r\n # Hide the markers from view\r\n self.hideModelMarkers()\r\n # Load CMC motion to model\r\n self.loadCMCMotion()"
] | [
"0.5819126",
"0.5687763",
"0.56812865",
"0.5644644",
"0.56177247",
"0.56124663",
"0.5591375",
"0.55907804",
"0.5575372",
"0.55574733",
"0.55520725",
"0.55112386",
"0.5506368",
"0.5506181",
"0.5499399",
"0.54953074",
"0.54870474",
"0.5463988",
"0.5453932",
"0.544917",
"0.5431036",
"0.54236186",
"0.5415465",
"0.5411643",
"0.5410282",
"0.5395476",
"0.5370502",
"0.5365928",
"0.53531337",
"0.5340732"
] | 0.6357549 | 0 |
This function obtains data from the table in our GUI when updating it. After receiving the table, the MiniZinc model would be called and replan the courses. | def returnTheTable():
preference = request.get_json()
# Obtain the list containing replaced courses and the to-be-updated plan
replaced = preference.pop('replaced')
oldPlan = dict()
readPlan = open('plan.txt')
try:
content = str(readPlan.read())
courses = content.split(' ')[:-1]
current = 0
for i in courses:
if re.match(r'^[0-9]', str(i)):
current = int(i)
else:
oldPlan[i] = current
finally:
readPlan.close()
# collect user information
program = preference.pop('program')
enroll_yr = preference.pop('enroll_yr')
enroll_sem = preference.pop('enroll_sem')
# user's specialisation
spec = 0
if 'spec' in preference:
spec = int(preference['spec'])
preference.pop('spec')
program_link = 'https://programsandcourses.anu.edu.au/2019/program/'
program_link = str(program_link) + str(program)
if int(enroll_yr)%2 == 1:
if int(enroll_sem)%2 == 1:
sem = 1
else:
sem = 2
else:
if int(enroll_sem)%2 == 1:
sem = 3
else:
sem = 4
# call the pre-processing program which put the model in file test1.mzn & test1.dzn
scraper = dp.DegreeRuleScraper(str(program_link))
orders = scraper.build_program_order_struct()
orders.buildAModel(preference, sem, spec, oldPlan, replaced)
# call MiniZinc to solve for the model
cmd = 'minizinc --solver OSICBC test1.mzn test1.dzn > re-plan.txt'
os.system(cmd)
# read the new plan from the file containing it.
jsondata = readmyJson('re-plan')
return jsonify(jsondata) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def refresh_course(self):\r\n self.course = modulestore().get_course(self.course.id)",
"def refresh_table(self):\n selection_index = self._lb_tables.GetSelection()\n if selection_index != -1:\n table_id = self._tables[selection_index][0]\n \n #remake table ui so that new columns can be added\n self._recreate_dvl_data()\n\n #create datatable object\n datatable = sciplot.datatable.Datatable(self._datafile)\n\n #set variable ids for columns\n variable_ids = []\n variable_symbols = []\n format_strings = []\n for variable_symbol, variable_id, format_string in self._datafile.query(sciplot.database.Query(\"SELECT Variable.Symbol, Variable.VariableID, TableColumn.FormatPattern FROM Variable INNER JOIN TableColumn ON TableColumn.VariableID = Variable.VariableID WHERE TableColumn.TableID = (?);\", [table_id], 1))[0]:\n self._dvl_columns.append(self._dvl_data.AppendTextColumn(variable_symbol)) #create column header\n variable_symbols.append(variable_symbol)\n variable_ids.append(variable_id)\n format_strings.append(format_string)\n \n datatable.set_variables(variable_ids)\n\n #load constants for the datatable\n constants_table = {}\n for composite_unit_id, constant_symbol, constant_value in self._datafile.query(sciplot.database.Query(\"SELECT UnitCompositeID, Symbol, Value FROM Constant;\", [], 1))[0]:\n value = sciplot.functions.Value(constant_value) #make a value object so that the data can be formatted with the format strings\n if composite_unit_id != None:\n value.units = self._datafile.get_unit_by_id(composite_unit_id)[1]\n constants_table[constant_symbol] = constant_value\n \n #load all data from the datafile into memory\n no_exception = True\n try:\n datatable.load(constants_table)\n \n except Exception as e:\n wx.MessageBox('Couldn\\'t generate table\\n{}'.format(str(e)), type(e).__name__, wx.ICON_ERROR | wx.OK) #display error message for the user\n no_exception = False\n\n if no_exception:\n #load transposed data\n data_as_rows = datatable.as_rows()\n \n #put data into table\n for row in data_as_rows:\n formatted_row = []\n for i in range(len(row)):\n value, exponent = row[i].format(format_strings[i])\n \n if exponent is None: #not in exponential form, just display the value\n formatted_row.append(value)\n else: #exponential form, display correctly\n if int(exponent) < 0:\n sign = ''\n else:\n sign = '+'\n\n formatted_row.append('{}E{}{}'.format(value, sign, exponent))\n\n self._dvl_data.AppendItem(formatted_row) #add row to table\n \n #set column titles\n if len(data_as_rows) > 0:\n for index in range(len(data_as_rows[0])):\n column_obj = self._dvl_columns[index]\n new_col_string = variable_symbols[index]\n value_obj = data_as_rows[0][index]\n\n unit_string = self._datafile.get_unit_string(value_obj.units)\n \n if unit_string != '': #add si units to title, if there are any\n new_col_string += ': ' + unit_string\n column_obj.SetTitle(new_col_string)\n \n #set column widths\n if len(self._dvl_columns) > 0:\n col_width = (self._dvl_data.GetSize()[0] - 30) / len(self._dvl_columns)\n for col in self._dvl_columns:\n col.SetWidth(col_width)",
"def refresh_data(self):\r\n self.tableWidget.setRowCount(globals.no_sections)\r\n self.le.setText(str(globals.no_sections))\r\n self.tableWidget.fill_table(globals.sections)",
"def reload_course(self):\r\n self.course = self.store.get_course(self.course.id)",
"def callUpdateTable(self):\r\n self.updateTable()",
"def refreshCredit(self):\n self.mainmenu.updateCR()",
"def _refresh_table(self):\n self._column_selected()\n self._table_selected()\n self._column_selection_change()\n self.refresh_column_list()\n self.refresh_table_list()\n self.refresh_table()",
"def update_course(self):\n # ensure that updating course is exists\n if self.is_course_exists():\n db = Course._file.read_db()\n for crs_i in range(len(db[\"courses\"])):\n if db[\"courses\"][crs_i][\"course_name\"] == self._course_name:\n\n # ensuring that user does not provided less number of limited places\n if db[\"courses\"][crs_i][\"total_place\"] > self._total_place:\n print(\"{} course's limited places number must be more than {}\".format(\n self._course_name,\n db[\"courses\"][crs_i][\"total_place\"]\n ))\n return\n\n db[\"courses\"][crs_i][\"teacher\"] = self._teacher\n db[\"courses\"][crs_i][\"total_place\"] = self._total_place\n break\n self._file.write_db(db)\n print(\"The course - {} is updated\".format(self._course_name))\n return self.get_course().course_info()",
"def edit_course(self, course):\n EDIT_COURSE = \"\"\"UPDATE Course SET subject_code = %s, credit_hours = %s, description = %s WHERE name = %s\"\"\"\n\n self.db_cursor.execute(EDIT_COURSE, (\n course.subject_code, course.credit_hours, course.description, course.name))\n self.db_connection.commit()\n\n DELETE_COURSE_TOPICS = \"\"\"DELETE FROM CourseTopics WHERE course_name = %s\"\"\"\n self.db_cursor.execute(DELETE_COURSE_TOPICS, (course.name,))\n self.db_connection.commit()\n INSERT_COURSE_TOPICS = \"\"\"INSERT INTO CourseTopics (course_name, topic_id) VALUES (%s, %s)\"\"\"\n for ct in course.topics:\n self.db_cursor.execute(INSERT_COURSE_TOPICS, (course.name,ct))\n self.db_connection.commit()\n\n DELETE_COURSE_GOALS = \"\"\"DELETE FROM CourseGoals WHERE course_name = %s\"\"\"\n self.db_cursor.execute(DELETE_COURSE_GOALS, (course.name,))\n self.db_connection.commit()\n INSERT_COURSE_GOALS = \"\"\"INSERT INTO CourseGoals (course_name, goal_id) VALUES (%s, %s)\"\"\"\n for cg in course.goals:\n self.db_cursor.execute(INSERT_COURSE_GOALS, (course.name, cg))\n self.db_connection.commit()",
"def save_course(self):\n\t\tprint(\"Course sauvegardee\")\n\t\tprint(self.Course)\n\n\t\tprint(\"self.var_nom : \"+self.var_nom.get())\n\t\tself.Course.name=self.var_nom.get()\n\t\tprint(\"self.vqr_ete : \"+str(self.var_ete.get()))\n\t\tif(self.var_ete.get()==1):\n\t\t\tself.Course.season = \"Seulement ete\"\n\t\telif(self.var_hiver.get()==1):\n\t\t\tself.Course.season = \"Seulement hiver\"\n\t\telse:\n\t\t\tself.Course.season = \"Toutes\"\n\n\n\t\tif self.var_OK_invites.get() == 1:\n\t\t\tself.Course.OK_for_invitee = True\n\n\t\tif self.var_preparer_la_veille.get() == 1:\n\t\t\tself.Course.prepare_day1 = True\n\n\t\tif self.var_legume.get() == 1:\n\t\t\tself.Course.type_course = \"Legume\"\n\t\telif self.var_viande.get() == 1:\n\t\t\tself.Course.type_course = \"Viande\"\n\t\telif self.var_poisson.get() == 1:\n\t\t\tself.Course.type_course = \"Poisson\"\n\t\telif self.var_puree.get() == 1:\n\t\t\tself.Course.type_course = \"Puree\"\n\t\telif self.var_soupe.get() == 1:\n\t\t\tself.Course.type_course = \"Soupe\"\n\t\telif self.var_salade.get() == 1:\n\t\t\tself.Course.type_course = \"Salade\"\n\t\telif self.var_autre .get() == 1:\n\t\t\tself.Course.type_course = \"Autres\"\n\t\telse:\t\n\t\t\tself.Course.type_course = \"Autres\"\n\t\t\n\n\t\tself.Course.recipe = self.text_recipe.get(\"1.0\",END)\n\t\tself.Course.link = self.text_link.get(\"1.0\",END)\n\t\tprint(self.Course)\n\t\t\n\t\tself.getListOfRecette()\n\t\tself.list_course.append(self.Course)\n\t\tself.saveListOfRecette()\n\t\t#on quitte la fenetreTopLevel\t\n\t\tself.parentFrame.destroy()",
"def confirm_table(self):\n self.words_frame.grid(column=0, row=2, padx=20, pady=20, ipadx=66)\n self.words_frame.grid_propagate(0)\n self.words_frame.grid_columnconfigure(0, weight=0)\n self.load_words()\n self.table_db = self.table.get()\n self.db = self.database.get()",
"def ret(self):\n self.model = QtSql.QSqlQueryModel(self)\n sql = \"select s.Name,f.Q1,f.Q2,f.Q3,f.Q4 from feedback f, student s where f.ID = s.ID;\"\n self.model.setQuery(sql)\n self.ui.tableView.setModel(self.model)",
"def _get_courses(self) -> None:\n\n courses_content: NavigableString = self.soup.find(\"div\", \n {\"class\": \"coursesContent\"})\n course_items: ResultSet = courses_content.find_all(\"div\", \n {\"class\": \"courseItem\"})\n\n for item in course_items:\n course_name: str = item.a[\"href\"].split(\"/\")[-2].lower()\n course_data: ParseType = self._parse(item)\n self._update(course_name, course_data)",
"def fetch_data():\n data.fetch_data()\n data.start_updating()",
"def open_table(self, table_name, sort_index):\n self.db = self.__connect_db('local') # Connect to database\n if self.db.open():\n self.model = self.__create_model(self.db, table_name, sort_index) # Create database model\n self.field_index = self.__get_fields(self.model) # Retrieve the fields dictionary\n\n # Set up table header data\n self.model.setHeaderData(self.field_index['NO'], Qt.Horizontal, 'ID Number')\n self.model.setHeaderData(self.field_index['NAME'], Qt.Horizontal, 'Name')\n self.model.setHeaderData(self.field_index['GENDER'], Qt.Horizontal, 'Gender')\n self.model.setHeaderData(self.field_index['BIRTH'], Qt.Horizontal, 'Birth')\n self.model.setHeaderData(self.field_index['PROVINCE'], Qt.Horizontal, 'Province')\n self.model.setHeaderData(self.field_index['DEPT'], Qt.Horizontal, 'Department')\n self.model.setHeaderData(self.field_index['SALARY'], Qt.Horizontal, 'Salary')\n self.model.setHeaderData(self.field_index['PHOTO'], Qt.Horizontal, 'Photo')\n self.model.setHeaderData(self.field_index['MEMO'], Qt.Horizontal, 'Memo')\n\n # Set up mapping between table data and display widgets\n self.mapper = QDataWidgetMapper()\n self.mapper.setModel(self.model)\n self.mapper.setSubmitPolicy(QDataWidgetMapper.AutoSubmit)\n self.mapper.addMapping(self.ui.spin_info_id, self.field_index['NO'])\n self.mapper.addMapping(self.ui.lineedit_name, self.field_index['NAME'])\n self.mapper.addMapping(self.ui.combo_info_sex, self.field_index['GENDER'])\n self.mapper.addMapping(self.ui.dateedit_brith_year, self.field_index['BIRTH'])\n self.mapper.addMapping(self.ui.combo_info_birth_addr, self.field_index['PROVINCE'])\n self.mapper.addMapping(self.ui.combo_info_dept, self.field_index['DEPT'])\n self.mapper.addMapping(self.ui.combo_info_salary, self.field_index['SALARY'])\n self.mapper.addMapping(self.ui.textedit_memo, self.field_index['MEMO'])\n self.mapper.toFirst()\n\n # Set up Selection model for each row of table\n self.sel_model = QItemSelectionModel(self.model)\n self.sel_model.currentChanged.connect(self.act_cur_changed)\n self.sel_model.currentRowChanged.connect(self.act_cur_row_changed)\n\n # Connect table view and table model\n self.ui.tbl_view_show_data.setModel(self.model)\n self.ui.tbl_view_show_data.setSelectionModel(self.sel_model)\n self.ui.tbl_view_show_data.setColumnHidden(self.field_index['PHOTO'], True)\n self.ui.tbl_view_show_data.setColumnHidden(self.field_index['MEMO'], True)\n\n # Customized delegates for table data\n sex_list = ['Male', 'Female']\n self.__delegate_sex = QmyComboBoxDelegate()\n self.__delegate_sex.setItems(sex_list, False) # Link sex list and delegate, not editable\n self.ui.tbl_view_show_data.setItemDelegateForColumn(self.field_index['GENDER'], self.__delegate_sex)\n dept_list = ['CS', 'AI', 'Network', 'Unix', 'Business']\n self.__delegate_dept = QmyComboBoxDelegate()\n self.__delegate_dept.setItems(dept_list, True) # Link dept list and delegate, editable\n self.ui.tbl_view_show_data.setItemDelegateForColumn(self.field_index['DEPT'], self.__delegate_dept)\n\n # Enable and Disable actions\n self.ui.act_add.setEnabled(True)\n self.ui.act_insert.setEnabled(True)\n self.ui.act_del.setEnabled(True)\n self.ui.group_sort.setEnabled(True)\n else:\n QMessageBox.warning(self, 'Error', 'Open database failed')",
"def test_update_entry_courses(self):\r\n # This method utilises the PUT request method and will make changes to the Canvas instance. This needs consideration.\r\n pass",
"def editTable(self):\r\n\r\n from .TableDialog import TableDialog\r\n\r\n variables = self.getSelectedVariables()\r\n\r\n if len(variables) == 1:\r\n start_values = self.startValues.copy()\r\n dialog = TableDialog(modelVariables=self.modelDescription.modelVariables,\r\n variable=variables[0],\r\n startValues=start_values)\r\n\r\n if dialog.exec_() == QDialog.Accepted:\r\n self.startValues.clear()\r\n self.startValues.update(start_values)",
"def on_update(self, game):\n if self.strategy:\n self.set_course(self.strategy.choose_course(self, game))",
"def get_course_table(self, table):\n json_result = {}\n row_list = table.xpath('.//table[@id = \"s_course\"]/tr[position() > 1]')\n for row in row_list:\n session = row.xpath('./td[1]/text()')\n course_full_code_list = row.xpath('.//a[starts-with(@href, \"javascript:course_popup\")]/text()')\n course_name_list = row.xpath('.//font[@style = \"font-size:7pt;\"]/text()')\n course_list = []\n if len(course_full_code_list) != len(course_name_list):\n # year course design project would be count twice\n if (\"Design Project\" == course_name_list[0]) & \\\n (len(course_full_code_list) + 1 == len(course_name_list)):\n course_name_list = course_name_list[1:]\n else:\n raise ProfileException(\n \"Error: unmatched lists. course code list:\",\n course_full_code_list, \"\\n course name list:\", course_name_list)\n for i, full_code in enumerate(course_full_code_list):\n if re.match(re.compile('\\w{3}\\d{3}[YH]1\\s+[SFY]'), full_code) is None:\n raise ProfileException(\"Illegal course code!:\" + full_code)\n course_list.append({\n \"courseName\": course_name_list[i],\n \"courseCode\": full_code[0:6],\n \"courseTime\": full_code[-1],\n \"courseLength\": full_code[6:8]\n })\n # there is a empty session\n if session:\n json_result.update({session[0]: course_list})\n if json_result:\n return json_result\n else:\n raise ProfileException(\"Failed to get course_table table(row list is empty)\")",
"def update_plan_choisen():\n # SOLO USO PARA AMBIENTE EN DESARROLLO\n for client in Client.objects.all():\n try:\n plan_chosen = get_query_set_plan()\n plan_active = plan_chosen.filter(queryplansclient__client=client.id, is_active=True,\n queryplansclient__is_chosen=True)\n if plan_active:\n plan = QueryPlansAcquiredSerializer(plan_active[0])\n chosen_plan(client.id, plan.data)\n print(\"success\")\n print(\"empty\")\n except Exception as e:\n print(\"error\"+str(e))",
"def fetchDataToForm(self, row, column, fields=\"Recent\"):\n columns = self.piggybank.columnCount()\n\n for columnCounter in range(columns):\n\n self.columnHeaderLabel = str(self.piggybank.horizontalHeaderItem(columnCounter).text())\n self.cellValue = str(self.piggybank.item(row, columnCounter).text())\n\n if self.columnHeaderLabel == \"Description Type\":\n self.typeIndex = self.comboBoxType.findText(self.cellValue)\n self.comboBoxType.setCurrentIndex(self.typeIndex)\n\n elif self.columnHeaderLabel == \"Priority\":\n self.priorityIndex = self.comboBoxPriority.findText(self.cellValue)\n self.comboBoxPriority.setCurrentIndex(self.priorityIndex)\n\n elif self.columnHeaderLabel == \"Source\":\n self.sourceIndex = self.comboBoxSource.findText(self.cellValue)\n self.comboBoxSource.setCurrentIndex(self.sourceIndex)\n\n elif self.columnHeaderLabel == \"BU\":\n self.BUIndex = self.comboBoxBU.findText(self.cellValue)\n self.comboBoxBU.setCurrentIndex(self.BUIndex)\n self.populateSuperCategory()\n\n elif self.columnHeaderLabel == \"Super-Category\":\n self.superIndex = self.comboBoxSuperCategory.findText(self.cellValue)\n self.comboBoxSuperCategory.setCurrentIndex(self.superIndex)\n self.populateCategory()\n\n elif self.columnHeaderLabel == \"Category\":\n self.categoryIndex = self.comboBoxCategory.findText(self.cellValue)\n self.comboBoxCategory.setCurrentIndex(self.categoryIndex)\n self.populateSubCategory()\n\n elif self.columnHeaderLabel == \"Sub-Category\":\n self.subCatIndex = self.comboBoxSubCategory.findText(self.cellValue)\n self.comboBoxSubCategory.setCurrentIndex(self.subCatIndex)\n self.populateBrandVertical()\n\n elif self.columnHeaderLabel == \"Vertical\":\n self.verticalIndex = self.comboBoxVertical.findText(self.cellValue)\n self.comboBoxVertical.setCurrentIndex(self.verticalIndex)\n\n elif self.columnHeaderLabel == \"Brand\":\n self.lineEditBrand.setText(self.cellValue)\n\n elif fields == \"All\":\n if self.columnHeaderLabel == \"FSN\":\n self.lineEditFSN.setText(self.cellValue)\n elif self.columnHeaderLabel == \"Upload Link\":\n self.lineEditUploadLink.setText(self.cellValue)\n elif self.columnHeaderLabel == \"Reference Link\":\n self.lineEditRefLink.setText(self.cellValue)\n elif self.columnHeaderLabel == \"Word Count\":\n self.spinBoxWordCount.setValue(int(self.cellValue))",
"def update(table, id_):\n\n new_data = ui.get_inputs(\n [\"TITLE\", \"PRICE\", \"MONTH\", \"DAY\", \"YEAR\"],\n \"Please enter the new data to update\"\n )\n\n if common.confirm_option():\n\n ID = 0\n\n for game in table:\n if game[ID] == id_:\n for game_data_index in range(len(new_data)):\n game[game_data_index + 1] = new_data[game_data_index]\n\n return table",
"def refillCodeTable(self):\n\n tableSQL = \"\"\n sortDirection = self.pushButton_sortDirection.text()\n sortField = self.pushButton_sort.text()\n if sortDirection == \"Ascending\":\n if sortField == \"Sorted by code\":\n tableSQL = self.orderByCodeAscendSQL\n else:\n tableSQL = self.orderByCategoryAscendSQL\n else: # sort descending\n if sortField == \"Sorted by code\":\n tableSQL = self.orderByCodeDescendSQL\n else:\n tableSQL = self.orderByCategoryDescendSQL\n\n for row in self.codes:\n self.tableWidget.removeRow(0)\n self.codes = []\n cur = self.settings['conn'].cursor()\n cur.execute(tableSQL)\n result = cur.fetchall()\n for row in result:\n self.codes.append({'name':row[0], 'category':row[1], 'id':row[2], 'color':row[3]})\n self.fillTableWidget()",
"def _update(self, course_name: str, newdata: ParseType) -> None:\n\n self.courses[course_name] = newdata",
"def refresh_table(self):\n self._table['bounty_column'] = Driver.instance.find_elements(*self._selectors['bounty_column'])\n self._table['first_name_column'] = Driver.instance.find_elements(*self._selectors['first_name_column'])\n self._table['last_name_column'] = Driver.instance.find_elements(*self._selectors['last_name_column'])\n self._table['edit_column'] = Driver.instance.find_elements(*self._selectors['edit_column'])\n self._table['details_column'] = Driver.instance.find_elements(*self._selectors['details_column'])\n self._table['delete_column'] = Driver.instance.find_elements(*self._selectors['delete_column'])",
"def main():\n\n #Courses\n years = [2016, 2017, 2018, 2019, 2020]\n with sqlite3.connect(\"determined.db\") as conn:\n c = conn.cursor()\n c.execute(\n \"\"\"\n CREATE TABLE \"course\" (\n \"course_number\"\tint NOT NULL,\n \"dept_id\"\tvarchar(4) NOT NULL,\n \"title\"\tvarchar(100) NOT NULL,\n \"instructor_fname\"\tvarchar(35) DEFAULT NULL,\n \"instructor_lname\"\tvarchar(35) DEFAULT NULL,\n \"student_work_products\"\tjson DEFAULT NULL,\n `term` varchar(7) NOT NULL,\n `year` int NOT NULL,\n PRIMARY KEY(\"course_number\", \"term\", \"year\")) \n \"\"\"\n )\n conn.commit()\n courses = [\n (1370, \"CPSC\", \"Computer Literacy\", names.get_first_name(), names.get_last_name(), json.dumps({\"swp\": [\"Midterm\", \"Final Exam\", \"Project 1\"]}), \"Fall\"),\n (1375, \"CPSC\", \"Programming I\", names.get_first_name(), names.get_last_name(), json.dumps({\"swp\": [\"Midterm\", \"Final Exam\", \"Project 1\"]}), \"Fall\"),\n (2376, \"CPSC\", \"Intro to Game Programming\", names.get_first_name(), names.get_last_name(), json.dumps({\"swp\": [\"Midterm\", \"Final Exam\", \"Project 1\"]}), \"Fall\"),\n (2380, \"CPSC\", \"Algorithms\", names.get_first_name(), names.get_last_name(), json.dumps({\"swp\": [\"Midterm\", \"Final Exam\", \"Project 1\"]}), \"Fall\"),\n (2482, \"CPSC\", \"Computer Organization\", names.get_first_name(), names.get_last_name(), json.dumps({\"swp\": [\"Midterm\", \"Final Exam\", \"Project 1\"]}), \"Spring\"),\n (3377, \"CPSC\", \"Advanced Game Programming\", names.get_first_name(), names.get_last_name(), json.dumps({\"swp\": [\"Midterm\", \"Final Exam\", \"Project 1\"]}), \"Spring\"),\n (3380, \"CPSC\", \"Operating Systems\", names.get_first_name(), names.get_last_name(), json.dumps({\"swp\": [\"Midterm\", \"Final Exam\", \"Project 1\"]}), \"Spring\"),\n (3383, \"CPSC\", \"Programming Languages\", names.get_first_name(), names.get_last_name(), json.dumps({\"swp\": [\"Midterm\", \"Final Exam\", \"Project 1\"]}), \"Spring\"),\n (3384, \"CPSC\", \"Computer Networks\", names.get_first_name(), names.get_last_name(), json.dumps({\"swp\": [\"Midterm\", \"Final Exam\", \"Project 1\"]}), \"Summer\"),\n (4360, \"CPSC\", \"Computer Security\", names.get_first_name(), names.get_last_name(), json.dumps({\"swp\": [\"Midterm\", \"Final Exam\", \"Project 1\"]}), \"Summer\")\n ]\n #Adding years\n upload_courses = []\n for year in years:\n upload_courses += [x + (year,) for x in courses]\n #Making a few instructors teach multiple course\n new_courses = [\n (4557, \"CPSC\", \"Natural Language Processing\", ),\n (2375, \"CPSC\", \"Programming II\",),\n (2776, \"CPSC\", \"Data Structures and Algorithms\",),\n (4862, \"CPSC\", \"Image Recognition\", ),\n ]\n for i in range(0,len(new_courses)):\n year = choice(years)\n for y in range(0,2): #Number of times new course is taught\n c = upload_courses[i]\n new_data = (c[3], c[4], c[5], choice([\"Fall\", \"Spring\", \"Summer\"]), year+y)\n data = new_courses[i] + new_data\n upload_courses.append(data)\n #Adding solo instructors and solo courses\n upload_courses += [\n (4672, \"CPSC\", \"Programming Memes\", names.get_first_name(), names.get_last_name(), json.dumps({\"swp\": [\"Midterm\", \"Final Exam\", \"Project 1\"]}), \"Spring\", choice(years)),\n (1872, \"CPSC\", \"Information Systems\", names.get_first_name(), names.get_last_name(), json.dumps({\"swp\": [\"Midterm\", \"Final Exam\", \"Project 1\"]}), \"Summer\", choice(years)),\n (1123, \"CPSC\", \"Microsoft Office\", names.get_first_name(), names.get_last_name(), json.dumps({\"swp\": [\"Midterm\", \"Final Exam\", \"Project 1\"]}), \"Fall\", choice(years))\n ]\n\n with sqlite3.connect(\"determined.db\") as conn:\n c = conn.cursor()\n c.executemany('''INSERT INTO course (course_number, dept_id, title, instructor_fname, instructor_lname, student_work_products, term, year)\n VALUES ( ?, ?, ?, ?, ?, ?, ?, ?)''', upload_courses)\n conn.commit()\n\n #SWP\n with sqlite3.connect(\"determined.db\") as conn:\n c = conn.cursor()\n c.execute(\n \"\"\"\n CREATE TABLE `student_work_product` (\n `id` INTEGER PRIMARY KEY,\n `product` varchar(250) NOT NULL,\n `course_id` int NOT NULL,\n `dept_id` int NOT NULL,\n `student_fname` varchar(35) NOT NULL,\n `student_lname` varchar(35) NOT NULL,\n `student_outcome` int DEFAULT NULL,\n `score` int DEFAULT NULL,\n `term` varchar(7) NOT NULL,\n `year` int NOT NULL,\n CONSTRAINT `course` FOREIGN KEY (`course_id`) REFERENCES `course` (`course_number`)\n CONSTRAINT `course` FOREIGN KEY (`dept_id`) REFERENCES `course` (`dept_id`)\n )\n \"\"\"\n )\n conn.commit()\n \n swps = []\n with sqlite3.connect(\"determined.db\") as conn:\n conn.row_factory = sqlite3.Row\n c = conn.cursor()\n c.execute (\"Select * from course\")\n records = [dict(x) for x in c.fetchall()]\n #Generating 20 student records for each swp in each course\n for i, course in enumerate(records):\n student_names = []\n for _ in range(20):\n student_names.append({'fname': names.get_first_name(),\n 'lname': names.get_last_name()})\n for product in json.loads(course['student_work_products'])['swp']:\n for student in student_names:\n if i%7 == 0:\n score = int(triangular(50, 85))\n else:\n score = int(triangular(50, 100))\n if score >= 90: outcome = 4\n elif score >= 80: outcome = 3\n elif score >= 70: outcome = 2\n elif score >= 60: outcome = 1\n else: outcome = 0 \n swps.append((\n product,\n course['course_number'],\n \"CPSC\",\n student['fname'],\n student['lname'],\n outcome,\n score, \n course['term'], \n course['year']\n ))\n \n c.executemany('''INSERT INTO student_work_product (product, course_id, dept_id, student_fname, student_lname, student_outcome, score, term, year)\n VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ?)''', swps)\n conn.commit()",
"def make_changes(self):\n dd = mg.DATADETS_OBJ\n if not self.read_only:\n ## NB must run Validate on the panel because the objects are\n ## contained by that and not the dialog itself.\n ## http://www.nabble.com/validator-not-in-a-dialog-td23112169.html\n if not self.panel.Validate(): ## runs validators on all assoc ctrls\n raise Exception(_('Invalid table design.'))\n gui_tblname = self.txt_tblname.GetValue()\n if self.new:\n try:\n del self.tblname_lst[0] ## empty ready to repopulate\n except Exception:\n pass ## OK to fail to delete item in list if already empty\n self.tblname_lst.append(gui_tblname)\n self.make_new_tbl()\n dd.set_tbl(tbl=gui_tblname)\n else:\n if not self.read_only:\n orig_tblname = self.tblname_lst[0]\n del self.tblname_lst[0] ## empty ready to repopulate\n self.tblname_lst.append(gui_tblname)\n dd.set_tbl(tbl=orig_tblname) ## The new one hasn't hit the database yet\n self.modify_tbl()\n self.changes_made = True",
"def updateJobsTable(self):\n self.checkJobsDict()\n jobdict = self.DB.meta.peatsa_jobs \n M = TableModel()\n #open job log from file\n f=open('jobstates.log','r')\n jl = pickle.load(f) \n for j in jobdict: \n jobid = jobdict[j] \n try:\n M.addRecord(j,state=jl[jobid]['State'],date=jl[jobid]['Date'])\n except:\n M.addRecord(j,state='Not in DB')\n self.jobstable = TableCanvas(self.tf, model=M, height=100, editable=False)\n self.jobstable.createTableFrame() \n self.log.yview('moveto', 1)\n f.close()\n return",
"def on_chosen_currency(self):\n main_currency_title = self.choose_currency.currentText()\n # the string needs to be modified to be compatible with the database values\n main_currency = main_currency_title.replace(\" \", \"_\").lower()\n relation_currency = self.choose_relation_currency.currentText().replace(\" \", \"_\").lower()\n # graph\n if len(load_all(main_currency)) < 2:\n gui_warnings.on_loading_values()\n else:\n try:\n canvas = Canvas(relation_currency, self)\n canvas.plot(main_currency)\n except ValueError:\n pass # plots empty graph if main_currency = relation_currency\n self.clear_graph_layout(self.graph_layout)\n self.graph_layout.addWidget(canvas)\n # title\n self.gui_title.setText(main_currency_title)\n # table\n self.currency_table.setRowCount(0)\n currency_list = [\n \"Brazilian Real\",\n \"American Dollar\",\n \"European Euro\",\n \"British Pound\",\n \"Japanese Yen\",\n \"Swiss Frank\",\n \"Canadian Dollar\",\n \"Australian Dollar\"\n ]\n for currency in currency_list:\n temp = currency_list[currency_list.index(currency)]\n currency_list[currency_list.index(currency)] = currency_list[0]\n currency_list[0] = temp\n if main_currency_title == currency:\n self.currency_table.setHorizontalHeaderLabels((*currency_list[1:], \"Date\"))\n # from https://www.youtube.com/watch?v=l2OoXj1Z2hM&t=411s\n records = enumerate(load_all(main_currency))\n for row_num, row_data in records:\n self.currency_table.insertRow(row_num)\n for column_num, data in enumerate(row_data):\n self.currency_table.setItem(\n row_num, column_num, QTableWidgetItem(str(data))\n )",
"def update_ui(self):\n if self.results:\n self.table.setRowCount(len(self.results))\n for idx, item in enumerate(self.results):\n char, latex, description, user_description = item\n char_item = QTableWidgetItem(char)\n char_item.setFlags(char_item.flags() & ~Qt.ItemIsEditable)\n latex_item = QTableWidgetItem(latex)\n latex_item.setFlags(latex_item.flags() & ~Qt.ItemIsEditable)\n user_desc = \" [{}]\".format(user_description) if user_description else \"\"\n description_item = QTableWidgetItem(\"{}{}\".format(description, user_desc))\n description_item.setFlags(description_item.flags() & ~Qt.ItemIsEditable)\n self.table.setItem(idx, 0, char_item)\n self.table.setItem(idx, 1, latex_item)\n self.table.setItem(idx, 2, description_item)\n self.table.setCurrentCell(0, 0)\n else:\n self.table.setRowCount(0)"
] | [
"0.61419713",
"0.6002335",
"0.5974986",
"0.5882579",
"0.58677083",
"0.5769657",
"0.5767029",
"0.5742593",
"0.5692034",
"0.5499051",
"0.54903704",
"0.5464179",
"0.5461997",
"0.54431784",
"0.54431045",
"0.5406274",
"0.53638023",
"0.53550744",
"0.5337333",
"0.53190416",
"0.5300453",
"0.52891356",
"0.5255966",
"0.52545667",
"0.52500993",
"0.52397037",
"0.5225906",
"0.5216967",
"0.52132267",
"0.52114415"
] | 0.64010185 | 0 |
Runs all the tests in the experiment with the given file and number of samples | def run_tests(file, samples):
# Get the script dir, name and check if the file given exists
test_dir = os.path.dirname(os.path.realpath(__file__))
script_name = os.path.basename(__file__)
if not os.path.isfile(os.path.join(test_dir, file)):
sys.stderr.write('{0}: file \'{1}\' not found\n'.format(script_name, file))
sys.exit(0)
result_dir = os.path.join(test_dir, 'results')
if not os.path.exists(result_dir):
os.mkdir(result_dir)
# Get a path to the build dir to run iengine and cd into it
filepath = os.path.join(test_dir, file)
exe_path = os.path.join(os.path.join(test_dir, '..'), 'cmake-build-debug')
os.chdir(exe_path)
# Open csv files for writing to
time_dist = open(os.path.join(result_dir, 'time.csv'), 'a')
inference_dist = open(os.path.join(result_dir, 'inference.csv'), 'a')
time_writer = csv.DictWriter(time_dist, delimiter=',', fieldnames=['method',
'file',
'sample',
'time'])
inference_writer = csv.DictWriter(inference_dist, delimiter=',',
fieldnames=['method', 'file',
'sample', 'inference_length'])
time_writer.writeheader()
inference_writer.writeheader()
# Run through tests for all inference methods
for method in ['FC', 'BC', 'TT']:
timer = timeit.Timer(functools.partial(execute, filepath, method))
avg = 0
avg_path = 0
# Run through all samples for the current inference method getting the execution
# time and the number of inferences/models considered in the process
for i in range(0, samples):
print(timer.timeit(1))
current, (result, err) = timer.timeit(1)
avg += current * 1000
result = result.decode('ascii').replace(',', '').replace(':', '')
result_list = str.split(result)[1:]
length = len(result_list)
if method == 'TT':
length = int(result_list[0])
avg_path += length
time_writer.writerow({'method': method, 'file': file, 'sample': i,
'time': current})
inference_writer.writerow({'method': method, 'file': file, 'sample': i,
'inference_length': length})
terminology = 'inferences'
if method == 'TT':
terminology = 'models'
print('Method: {0}, Average time: {1:.3f}ms, Average {2}: {3}'.format(method,
avg / samples, terminology, avg_path / samples))
time_dist.close()
inference_dist.close() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_run(self):\n files = [\n (\"AS1-1.phy_r8s.txt\", \"AS1-1.phy_r8s.txt_2.5.txt\"),\n (\"AS1-3.phy_r8s.txt\", \"AS1-3.phy_r8s.txt_2.5.txt\"),\n (\"AS1-4.phy_r8s.txt\", \"AS1-4.phy_r8s.txt_2.5.txt\"),\n ]\n for file_pair in files:\n input_file = file_pair[0]\n expected_file = file_pair[1]\n infile = self.test_data_path + input_file\n outfile = self.test_data_path + expected_file\n divnum = 2.5\n result = run(infile, divnum)\n\n with open(outfile) as handle:\n expected_result = handle.read()\n self.assertEqual(expected_result, result)",
"def main():\n for filename in sys.argv[1:]:\n test(filename)",
"def run_tests(sample_dir):\n print(\"Preparing tests\")\n sample_files = file.find_all_files(sample_dir)\n total_files = len(sample_files)\n progress = 0\n results = [] # [[success, correct, filename, time, confidence, found_id],...]\n for sample_file in sample_files:\n progress += 1\n print(\"\\rRunning tests (%d/%d)...\" % (progress, total_files), end=\"\")\n sample_file_path = os.path.join(sample_dir, sample_file)\n is_valid, song_id = file.get_song_id_from_filename(sample_file)\n if not is_valid:\n print(\"File %s doesn't have a valid filename, skipping\" % sample_file)\n continue\n if song_id == 0:\n continue\n success, confidences, found_song_id, result, time, title, artist = match.match_file(sample_file_path)\n correct = song_id == found_song_id or (not success and song_id == 0)\n results.append([success, correct, sample_file, time, confidences[0][1], found_song_id])\n total_results = 0\n total_successful_results = 0\n total_correct_results = 0\n total_correct_successful_results = 0\n total_time = 0\n times = []\n for result in results:\n total_results += 1\n total_time += result[3]\n times.append(result[3])\n if not result[0]:\n if result[1]:\n print(\"Could not find %s in the database in %fs, as expected\" % (result[2], result[3]))\n total_correct_results += 1\n else:\n print(\"File %s could not be matched in %fs\" % (result[2], result[3]))\n else:\n total_successful_results += 1\n if result[1]:\n print(\"File %s was correctly matched with a confidence of %f in %fs\" % (result[2], result[4], result[3]))\n total_correct_results += 1\n total_correct_successful_results += 1\n else:\n print(\"File %s was incorrectly matched with a confidence of %f in %fs. (found %d)\" % (result[2], result[4], result[3], result[5]))\n print(\"========================================================================\")\n print(\" Tests run: %d\" % total_results)\n print(\" Tests successful: %d\" % total_successful_results)\n print(\" Tests correct: %d\" % total_correct_results)\n print(\"------------------------------------------------------------------------\")\n print(\" Average time: %fs\" % (total_time/total_results))\n print(\" 90-percentile: %fs\" % get_90_percentile(times))\n print(\" Percentage correct of successful: %f\" % ((float(total_correct_successful_results)/total_successful_results)*100))\n print(\" Percentage correct of total: %f\" % ((float(total_correct_results)/total_results)*100))\n print(\"========================================================================\")",
"def run_simulations(self,i_iteration,n_samples=None,filename=None):\n\n assert type(i_iteration) is int\n assert type(n_samples) in [type(None),int]\n assert type(filename) in [type(None),str]\n\n\n # define some convenience local variables for readability\n i = i_iteration\n if n_samples is not None:\n _n_samples = self.configuration.sampling_type[i]['n_samples']\n else:\n _n_samples = n_samples\n\n _sampling_type = self.configuration.sampling_type[i]['type']\n if filename is not None:\n _filename = self.configuration.sampling_type[i][n_samples]\n else:\n pass",
"def main():\n dims = params['dims']\n\n for d in dims:\n print('**** Running test for d={0:d} ****'.format(d))\n run_test(d)",
"def RunSuite(config, files, extra_flags, errors):\n global ERRORS, CONCURRENCY\n Banner('running %d tests' % (len(files)))\n pool = multiprocessing.Pool(processes=CONCURRENCY)\n # create a list of run arguments to map over\n argslist = [(num, len(files), config, test, extra_flags)\n for num, test in enumerate(files)]\n # let the process pool handle the test assignments, order doesn't matter\n pool.map(RunTest, argslist)\n while not ERRORS.empty():\n phase, test = ERRORS.get()\n errors[phase].append(test)",
"def RunData(files, wavelength=None, out='testdata'):\n for i, file in enumerate(files):\n forwardModel(file=file, out='results/%s%i' % (out, i), wavelength=wavelength)",
"def repeat_tagger_tests(fname, number_of_tests=2, **kwargs):\n for n in range(number_of_tests):\n t = TaggerTester(file_name=fname, **kwargs)\n t.split_groups()\n t.estimate_tagger_accuracy()\n t.print_results()\n print \"\\n\"",
"def run_sample(smp: sample.Sample,\n run_dir: Text,\n summary_file: Optional[Text] = None,\n generate_sample_ns: Optional[int] = None):\n start = time.time()\n # Create a script named 'run.sh' for rerunning the sample.\n args = [\n SAMPLE_RUNNER_MAIN_PATH,\n '--logtostderr',\n '--input_file=sample.x',\n '--options_file=options.pbtxt',\n ]\n\n _write_to_file(run_dir, 'sample.x', smp.input_text)\n _write_to_file(run_dir, 'options.pbtxt', smp.options.to_pbtxt())\n args_filename = 'args.txt'\n _write_to_file(\n run_dir, args_filename, sample.args_batch_to_text(smp.args_batch)\n )\n args.append('--args_file=args.txt')\n ir_channel_names_filename = None\n if smp.ir_channel_names is not None:\n ir_channel_names_filename = 'ir_channel_names.txt'\n _write_to_file(run_dir, ir_channel_names_filename,\n sample.ir_channel_names_to_text(smp.ir_channel_names))\n args.append('--ir_channel_names_file=ir_channel_names.txt')\n args.append(run_dir)\n _write_to_file(\n run_dir,\n 'run.sh',\n f'#!/bin/sh\\n\\n{subprocess.list2cmdline(args)}\\n',\n executable=True)\n logging.vlog(1, 'Starting to run sample')\n logging.vlog(2, smp.input_text)\n runner = sample_runner.SampleRunner(run_dir)\n runner.run_from_files(\n 'sample.x', 'options.pbtxt', args_filename, ir_channel_names_filename\n )\n timing = runner.timing\n\n timing.total_ns = int((time.time() - start) * 1e9)\n if generate_sample_ns:\n # The sample generation time, if given, is not part of the measured total\n # time, so add it in.\n timing.total_ns += generate_sample_ns\n timing.generate_sample_ns = generate_sample_ns\n\n logging.vlog(1, 'Completed running sample, elapsed: %0.2fs',\n time.time() - start)\n\n if summary_file:\n _write_ir_summaries(run_dir, timing, summary_file)",
"def main():\n run_test_summary1a()\n run_test_summary1c()\n run_test_summary1c()",
"def run(config, workdir, files, output=None, n=1):\n file = None\n for i in range(1, n+1):\n Logger.info(f'Starting run {i}/{n}')\n\n if output:\n file = f'{output}/run_{i}.dat'\n\n # Make sure these directories are clean\n for file in glob(f'{workdir}/lut_h2o/*'):\n os.remove(file)\n for file in glob(f'{workdir}/output/*'):\n os.remove(file)\n\n profile(\n args = SimpleNamespace(\n input_radiance = files[0],\n input_loc = files[1],\n input_obs = files[2],\n working_directory = workdir,\n config_file = config,\n wavelength_path = None,\n log_file = None,\n logging_level = 'DEBUG',\n pressure_elevation = None\n ),\n output = file\n )",
"def run_tests():\n db.connect()\n db.create_tables([Result])\n\n config = configparser.ConfigParser()\n config.read(\"config.ini\")\n number_of_generations = int(config[\"genetics\"][\"number_of_generations\"])\n\n test_file_paths = []\n\n for file in os.listdir(\"resources/tests/umpalettierung\"):\n if file.endswith(\".csv\"):\n test_file_paths.append(os.path.join(\"resources/tests/umpalettierung\", file))\n\n for path in test_file_paths:\n start = timeit.default_timer()\n boxes_to_pack, box_types = read_input(path)\n size_of_population = int(config[\"genetics\"][\"population_multiplier\"]) * len(boxes_to_pack)\n test_name_list = []\n for box_type in box_types:\n test_name_list.append(f\"{box_type.identifier[:5]}_{box_type.quantity()}\")\n test_name_list.sort()\n test_name = '.'.join(test_name_list)\n print(\n f\"Running {test_name} with {number_of_generations} generations with a population size of {size_of_population}\")\n print(box_types)\n pop, stats, hof = run_genetics(boxes_to_pack, box_types, number_of_generations, size_of_population)\n get_packing_order(hof[0], boxes_to_pack, box_types, test_name=test_name)\n stop = timeit.default_timer()\n save_results(test_name, start, stop, len(pop), number_of_generations, pop, stats)",
"def runner_scenario_x_times(repetitions, scenario_names, feature_files, out):\n if scenario_names is not None:\n to_test = scenario_names\n elif feature_files is not None:\n to_test = feature_files\n else:\n to_test = \"testsuite\"\n msg = (\"\\nRunning \" + str(repetitions) + \" times test(s):\\n \" \n + str(to_test) + \"\\n\")\n print(msg)\n if out:\n out_name = os.path.splitext(out)[0]\n ext = os.path.splitext(out)[1]\n for i in range(repetitions):\n print(\"Iteration number: \" + str(i+1))\n if out:\n out = out_name + \"-\" + str(i) + ext\n p = Process(target=worker_scenario, \n args=(scenario_names, feature_files, out))\n p.start()\n p.join()",
"def perform_filecheck():\n\n\t# Open files\n\ttrain = open('train_aae_final', 'r')\n\ttest = open('test_aae_final', 'r')\n\n\n\t# Check number of training and testing samples\n\tprint (\"\")\n\tprint (\"Number of training samples =\", len(train.readlines()))\n\tprint (\"Number of testing samples =\", len(test.readlines()))\n\tprint (\"\")\n\n\ttrain.close()\n\ttest.close()",
"def run_tests():\r\n source1 = TextModel('50 Shades of Gray')\r\n source1.add_file('50.txt')\r\n \r\n print()\r\n \r\n source2 = TextModel('King James Version of the Bible')\r\n source2.add_file('kjv.txt')\r\n\r\n print()\r\n\r\n new1 = TextModel('Shakespeare')\r\n new1.add_file('shake.txt')\r\n new1.classify(source1, source2)\r\n \r\n print()\r\n \r\n new2 = TextModel('JK Rowling')\r\n new2.add_file('hp.txt')\r\n new2.classify(source1, source2)\r\n \r\n print()\r\n \r\n new3 = TextModel('Breitbart News Network')\r\n new3.add_file('bnn.txt')\r\n new3.classify(source1, source2)\r\n \r\n print()\r\n \r\n new4 = TextModel('Chaucer')\r\n new4.add_file('tct.txt')\r\n new4.classify(source1, source2)",
"def TestSample(self, index=None, params=None):\t\t\n\t\tif index == None:\n\t\t\tindex = random.randint(1,self.nTest)\n\t\ts = ReadAIFF(self.dataDir+'test'+('%i'%index)+'.aiff')\n\t\tP, freqs, bins = mlab.specgram(s, **params)\n\t\treturn P, freqs, bins",
"def run_tests():\n source1 = TextModel('hilary_speaches')\n source1.add_file('hilary_source_text.txt')\n\n source2 = TextModel('bernie_speaches')\n source2.add_file('bernie_source_text.txt')\n\n new1 = TextModel('trump_speach')\n new1.add_file('trump_text.txt')\n new1.classify(source1, source2)\n\n new2 = TextModel('hilary_test')\n new2.add_file('hilary_test.txt')\n new2.classify(source1, source2)\n\n new3 = TextModel('bernie_test')\n new3.add_file('bernie_test.txt')\n new3.classify(source1, source2)\n\n new4 = TextModel('bill_clinton_test')\n new4.add_file('bill_clinton_source.txt')\n new4.classify(source1, source2)",
"def run_all_tests(self):\n for index in range(len(self.__test_set_list)):\n self.run_test(index)",
"def test_run(self):\n sut = ExperimentEmail()\n train = os.path.join(os.path.dirname(__file__), \"data\", \"sample.csv\")\n val = os.path.join(os.path.dirname(__file__), \"data\", \"sample.csv\")\n outdir = tempfile.mkdtemp()\n\n # Act\n sut.run(train, val, outdir, batch_size=32, epochs=2)",
"def runAllTests(path, runAll=False, skipEka1Crash=False):\n# if not runAll:\n# try:\n# passed = tools.dataFromFile('passed_tests')\n# except IOError:\n# passed = {}\n# print len(passed), 'tests passed'\n passed = {}\n skipped = {}\n failed = {}\n output_file.write( \"Running tests in \" + path + \"\\n\" )\n for f in os.listdir(path):\n if f[-4:] == 'phpt':\n if runAll or str(f) not in passed:\n try:\n output_file.write( f + \": \" )\n runTest( os.path.join( path, f ),\n skipEka1Crash=skipEka1Crash )\n except DiffError, e:\n output_file.write( \"** FAIL **\\n\" )\n output_file.write( e.diff() + \"\\n\" )\n failed[str(f)] = 1\n except SkipError, e:\n output_file.write( \"** SKIP ** (%s)\\n\" % str(e) )\n skipped[str(f)] = 1\n except Exception, e:\n output_file.write( \"Unknown exception (%s) from runTest\\n\" % str(e) )\n output_file.flush()\n else:\n output_file.write( \"* OK *\\n\" )\n passed[str(f)] = 1\n output_file.flush()\n output_file.write( \"==================================================\\n\" )\n output_file.write( \"Summary for tests in \" + path + \"\\n\" )\n output_file.write( \"Passed (\" + str(len(passed)) + \"):\\n\" )\n for filename in passed.keys():\n output_file.write( filename + \"\\n\" )\n output_file.write( \"--------------------------------------------------\\n\" )\n output_file.write( \"Failed (\" + str(len(failed)) + \"):\\n\" )\n for filename in failed.keys():\n output_file.write( filename + \"\\n\" )\n output_file.write( \"--------------------------------------------------\\n\" )\n output_file.write( \"Skipped (\" + str(len(skipped)) + \"):\\n\" )\n for filename in skipped.keys():\n output_file.write( filename + \"\\n\" )\n output_file.write( \"==================================================\\n\" )\n output_file.flush()",
"def run_analysis(wf):\n if wf.analysis[\"type\"] == \"one_sample_tests\":\n start_one_sample_tests(wf)\n\n elif wf.analysis[\"type\"] == \"two_sample_tests\":\n start_two_sample_tests(wf)\n\n elif wf.analysis[\"type\"] == \"factorial_tests\":\n start_factorial_tests(wf)\n\n elif wf.analysis[\"type\"] == \"n_sample_tests\":\n start_n_sample_tests(wf)\n\n info(\"> Finished analysis\")",
"def run_sample(smp: sample.Sample,\n run_dir: Text,\n summary_file: Optional[Text] = None,\n generate_sample_ns: Optional[int] = None):\n start = time.time()\n\n _write_to_file(run_dir, 'sample.x', smp.input_text)\n _write_to_file(run_dir, 'options.json', smp.options.to_json())\n if smp.args_batch:\n _write_to_file(run_dir, 'args.txt',\n sample.args_batch_to_text(smp.args_batch))\n\n # Create a script named 'run.sh' for rerunning the sample.\n args = [\n SAMPLE_RUNNER_MAIN_PATH, '--logtostderr', '--input_file=sample.x',\n '--options_file=options.json'\n ]\n if smp.args_batch:\n args.append('--args_file=args.txt')\n args.append(run_dir)\n _write_to_file(\n run_dir,\n 'run.sh',\n f'#!/bin/sh\\n\\n{subprocess.list2cmdline(args)}\\n',\n executable=True)\n logging.vlog(1, 'Starting to run sample')\n logging.vlog(2, smp.input_text)\n runner = sample_runner.SampleRunner(run_dir)\n runner.run_from_files('sample.x', 'options.json', 'args.txt')\n timing = runner.timing\n\n timing.total_ns = int((time.time() - start) * 1e9)\n if generate_sample_ns:\n # The sample generation time, if given, is not part of the measured total\n # time, so add it in.\n timing.total_ns += generate_sample_ns\n timing.generate_sample_ns = generate_sample_ns\n\n logging.vlog(1, 'Completed running sample, elapsed: %0.2fs',\n time.time() - start)\n\n if summary_file:\n _write_ir_summaries(run_dir, timing, summary_file)",
"def ConstrTest():\n with open(path.join(MAIN_PATH, TEST)) as f:\n for line in f:\n line = line.strip().split(\"\\t\")\n src, dest = line[1:]\n features = Features(src, dest)\n test_instances.append(features)",
"def run_tests():\n def print_result(result, correct):\n if result == correct:\n print(\" OK!\")\n else:\n print(f\" Failed ({result} != {correct})!\")\n for n, test in enumerate(_tests, start=1):\n print(f\"Running test {n}...\")\n nums = line2ints(test[\"in\"])\n try:\n correct = test[\"part1\"]\n except KeyError:\n pass\n else:\n print(\" Testing part 1...\", end=\"\")\n result = part1(nums, steps=test.get(\"phases1\", 100))\n print_result(result, correct)\n try:\n correct = test[\"part2\"]\n except KeyError:\n pass\n else:\n print(\" Testing part 2...\", end=\"\")\n result = part2(nums, steps=test.get(\"phases2\", 100))\n print_result(result, correct)",
"def train(self, n, filename):\n self.n = n\n for line in open(filename):\n samp = line.rstrip('\\n')\n# samp = '~' + samp + '~'\n for i in range(len(samp) - n):\n w = samp[i:i + n]\n self.counts[w] += 1\n self.total_count += 1",
"def test_example_runs(self):\n run_example(\n verbose=False,\n testapp=self.testapp,\n )",
"def automatic_checking(files):\n for i in range(10):\n fft_checking(files[i])",
"def run_tests_from_file(base_url, test_file_path, format):\n \n\n with open(test_file_path) as data_file:\n \n tests_json = json.load(data_file)\n tests = []\n\n for t in tests_json:\n\n if \"payload\" in t:\n new_test = Test(t[\"name\"], t[\"endpoint\"], t[\"method\"], t[\"payload\"])\n else:\n new_test = Test(t[\"name\"],t[\"endpoint\"],t[\"method\"])\n\n if \"expected_response_values\" in t:\n\n exp_vals = t[\"expected_response_values\"]\n for key in exp_vals:\n\n new_test.add_expected_value(key, exp_vals[key])\n\n if \"expected_response_types\" in t:\n\n exp_types = t[\"expected_response_types\"]\n for key in exp_types:\n\n new_test.add_expected_type(key, exp_types[key])\n\n tests.append(new_test)\n\n run_tests(base_url, tests, format)",
"def runTests(self):\n \n pass",
"def run_test(files, full=False, threshold=2):\n columns = ['detector', 'image', 'common', 'repeat']\n data = []\n\n if full:\n det_s = {**DetectorDescriptor.detectors, **DetectorDescriptor.xdetectors}.keys()\n else:\n det_s = DetectorDescriptor.detectors.keys()\n\n for detector in det_s:\n algo = DetectorDescriptor(detector)\n print(\"Running test {}\".format(detector))\n\n for f in files:\n filename = os.path.basename(f).split('.')[0]\n image = cv2.imread(f, 0)\n kps = algo.detect(image)\n\n h = h_for_file(f)\n if h is None: # This will be the case for the base image (img1)\n baseimg = image\n basepts = kps\n\n data.append([detector, filename, len(basepts), len(basepts)])\n continue\n\n hi = linalg.inv(h)\n mask = create_mask(baseimg.shape, hi)\n\n # Only those that are common\n bpts = []\n for pt in basepts:\n if point_in_mask(pt.pt, mask):\n bpts.append(pt)\n bptst = np.vstack([pt.pt for pt in bpts])\n\n rep = 0\n for point in kps:\n tp = transform_point(point.pt, hi)\n if point_in_mask(tp, mask):\n dists = distance.cdist([tp], bptst)\n if np.min(dists) < threshold:\n rep += 1\n\n data.append([detector, filename, len(bpts), rep])\n\n df = pd.DataFrame(data, columns=columns)\n df['repeatability'] = df['repeat'] / df['common']\n return df"
] | [
"0.7284664",
"0.7053512",
"0.67617613",
"0.6700593",
"0.66708344",
"0.6572991",
"0.6553279",
"0.6417811",
"0.6270118",
"0.62472415",
"0.6210755",
"0.6205478",
"0.61505103",
"0.6144112",
"0.6141312",
"0.61252826",
"0.6122838",
"0.60793597",
"0.60695887",
"0.6057799",
"0.6051734",
"0.6050729",
"0.6029565",
"0.6014296",
"0.60083103",
"0.59951127",
"0.59943646",
"0.59845656",
"0.5982719",
"0.5974663"
] | 0.7327586 | 0 |
R""" equality comparison between this and another Classifier, simply checks if A B == 0 | def __eq__(self,other):
return (self - other == 0.) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_equal(self, a, b):\n return a.X[0] == b.X[0]",
"def __eq__(self, other):\n if isinstance(other, DenseUnit):\n return (Counter(self.dimension) == Counter(other.dimension) and Counter(self.points) == Counter(\n other.points))\n return False",
"def __eq__(self, other):\n return True if self._compare(other) == 0 else False",
"def __eq__(self, other):\r\n return self.label == other.label and self.positive_state == other.positive_state",
"def __eq__(self, other):\n\n if isinstance(other, (int, type(Zero()))):\n if other == 0:\n if self.args == []:\n return True\n else:\n return False\n\n frame = self.args[0][1]\n for v in frame:\n if expand((self - other) & v) != 0:\n return False\n return True",
"def __eq__(self, other: object) -> bool:\n\n if self.__sub__(other) == 0:\n return True\n return False",
"def __eq__(self, other):\n return np.allclose(self.P, other.P)",
"def __eq__(self, other):\n eq = True\n for attr in ['geocode',\n 'geocodeDict',\n 'geolevel',\n 'parentGeocode',\n 'raw',\n 'raw_housing',\n 'dp',\n 'syn',\n 'syn_unrounded',\n # 'cons',\n # 'invar',\n # 'dp_queries',\n # 'congDistGeocode',\n # 'sldlGeocode',\n # 'slduGeocode',\n ]:\n\n eq = eq and self.__getattribute__(attr) == other.__getattribute__(attr)\n\n #eq = eq and (np.array_equal(self.raw.toDense(), other.raw.toDense()))\n return eq",
"def __eq__(self, other):\r\n if isinstance(other, vec4):\r\n return self.x==other.x and self.y==other.y and self.z==other.z\r\n else:\r\n return 0",
"def __eq__(self, other):\n return abs(self - other) < 10e-10",
"def __eq__(self, other):\n\n return self._comparator.compare_measurements(self, other) == 0",
"def __eq__(self, other):\n return self.num == other.num",
"def __eq__(self, other):\n return np.array_equal(\n self.np_floats(),\n other.np_floats()) and np.array_equal(\n self.np_ints(),\n other.np_ints()) and np.array_equal(\n self.freqs,\n other.freqs)",
"def __eq__(self,v2):\n\t\treturn -1e-13<(self-v2).norm()<1e-13",
"def equals(self, other):\n return (self.same_labels_as(other) and np.allclose(self, other))",
"def __eq__(self, other):\n firstnum = self.num*other.den\n secondnum = self.den*other.num\n\n return firstnum == secondnum",
"def __eq__(self, other):\n return (self.vertices == other.vertices and self.weight == other.weight)",
"def __eq__(self, other):\n # Note that tf doesn't override \"==\" and \"!=\", unlike numpy.\n return tf.math.equal(self._ordinals, other.ordinal())",
"def __eq__(self, other):\n return (isinstance(other, KGCorrelation) and\n self.nbins == other.nbins and\n self.bin_size == other.bin_size and\n self.min_sep == other.min_sep and\n self.max_sep == other.max_sep and\n self.sep_units == other.sep_units and\n self.coords == other.coords and\n self.bin_type == other.bin_type and\n self.bin_slop == other.bin_slop and\n self.min_rpar == other.min_rpar and\n self.max_rpar == other.max_rpar and\n self.xperiod == other.xperiod and\n self.yperiod == other.yperiod and\n self.zperiod == other.zperiod and\n np.array_equal(self.meanr, other.meanr) and\n np.array_equal(self.meanlogr, other.meanlogr) and\n np.array_equal(self.xi, other.xi) and\n np.array_equal(self.xi_im, other.xi_im) and\n np.array_equal(self.varxi, other.varxi) and\n np.array_equal(self.weight, other.weight) and\n np.array_equal(self.npairs, other.npairs))",
"def __eq__(self, other):\n return self._reNum == other._reNum and self._imNum == other._imNum",
"def __eq__(self, other):\n if not isinstance(other, PantsMappingClass):\n # print(\"A\")\n return False\n # if other._pants_decomposition != self._pants_decomposition:\n # print(\"B\")\n # return False\n # print(\"C\")\n return (self * other.inverse()).is_identity()",
"def __eq__(self, other):\n return bool(_make._alpha_equal(self, other))",
"def __eq__(self, other):\n \n if not tools.data_are_equal(self.attrs, other.attrs):\n print('here')\n return False\n \n return tools.data_are_equal(self.components, other.components)",
"def __eq__(self, other):\n return (isinstance(other, type(self)) and (self.get_all_features() == other.get_all_features()))",
"def __eq__(self, other):\n if isinstance(other, self.__class__):\n return self.index == other.index and \\\n self.label == other.label and \\\n self.labels == other.labels and \\\n np.all(self.vector == other.vector)\n return NotImplemented",
"def __eq__(self, other):\n # check equality of names and attributes as well as that of the incident Node objects\n return \\\n self.weight == other.get_weight() and \\\n self.attributes.__eq__(other.get_attributes()) and \\\n self.get_incident_nodes().__eq__(other.get_incident_nodes())",
"def __eq__(self, oth):\n return int(self) != oth",
"def __eq__(self, other):\n return ZeroaryOperator.__eq__(self, other) and \\\n self.relation_key == other.relation_key",
"def is_converged(self,a,b):\n return np.array_equal(a,b)",
"def alpha_equivalent(self, other) -> bool:\n raise NotImplementedError()"
] | [
"0.7100766",
"0.6762525",
"0.67273325",
"0.6711093",
"0.66767555",
"0.663195",
"0.6626179",
"0.6616397",
"0.6603185",
"0.65802336",
"0.6551513",
"0.6542839",
"0.6533325",
"0.64744145",
"0.6461463",
"0.64576995",
"0.63981915",
"0.6391639",
"0.6390027",
"0.63806355",
"0.63771987",
"0.63741046",
"0.63382494",
"0.6333512",
"0.6331757",
"0.631757",
"0.63137716",
"0.63027585",
"0.6295428",
"0.62917006"
] | 0.7466905 | 0 |
R""" difference between this and another Graph, just the norm between graphwide Graphlet Degree Vectors | def __sub__(self,other):
return np.linalg.norm(self.ngdv-other.ngdv) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def norm(self):",
"def gradient_other(self):\n # This is just the difference in the feature values\n return self.fvs",
"def __abs__(self):\n return Vector.createFromPoint(self).norm",
"def fangle_degr(self):\r\n\r\n return self._versor_1.angle_degr(self._versor_2)",
"def sym_difference(self, other):\n return self._geomgen(capi.geom_sym_diff, other)",
"def dist(first, other):\n if isinstance(first,FreeCAD.Vector) and isinstance(other,FreeCAD.Vector):\n return length(sub(first,other))",
"def norm(self):\n # TODO: implement\n return",
"def symmetric_difference(self, other): # -> BaseGeometry:\n ...",
"def difference(self, other): # -> BaseGeometry:\n ...",
"def norm(self) -> \"Vector\":\n self.values = tuple(self/self.mag())\n return self",
"def __rtruediv__(self, other):\n value = -1 / (self.val * self.val)\n total = {self.var: other * value}\n return AutoDiffReverse(other / self.val, None, total)",
"def norm_distance(self):\n graph_size = self.N + self.M\n return self.distance() / (1. * graph_size)",
"def reflect(self, other):\n x1, y1 = self\n x2, y2 = other\n L = (x2 * x2 + y2 * y2)\n if L > pygonal.EPSILON2:\n temp = 2 * (x1 * x2 + y1 * y2) / L\n return tuple.__new__(Vec2, (x2 * temp - x1, y2 * temp - y1))\n else:\n return null",
"def attraction(self, other: Body) -> Vector:\n dist = self.position - other.position\n dist_modsq = dist.lensq\n dist_unit = dist / math.sqrt(dist_modsq) # Unit vector\n G = 6.674384e-11\n force_mod = G * self.mass * other.mass / dist_modsq\n return dist_unit * force_mod",
"def get_difference(self, other, x, y, norm='L2'):\n norms = {'L2': None, 'Linf': numpy.inf}\n field = self.restrict(x, y)\n other = other.restrict(x, y)\n subtracted = field.subtract(other)\n return numpy.linalg.norm(subtracted.values, ord=norms[norm])",
"def dist(self, other: \"Vector\", sqr=False) -> float: #distance between 2 vectors\n if sqr:\n return (self-other).sqr_mag()\n return (self-other).mag()",
"def normal(self) -> Vector:\n return normalize(cross(self.d1, self.d2))",
"def difference(self, other):\n return self._geomgen(capi.geom_diff, other)",
"def norm2(self):\n\t\treturn self.x ** 2 + self.y ** 2 + self.z ** 2",
"def rel_diff (x, y, ord=2):\n return np.linalg.norm (x - y, ord=ord) / np.linalg.norm (y, ord=ord)",
"def norm(self):\n raise NotImplementedError",
"def assign_lengths(G):\r\n for u, v, d in G.edges(data=True):\r\n posA = nx.get_node_attributes(G, 'pos')[u]\r\n posB = nx.get_node_attributes(G, 'pos')[v]\r\n\r\n dist = np.linalg.norm(np.subtract(posA, posB))\r\n d['distance'] = dist\r\n return G",
"def difference(G, H):\n\n if G.order() != H.order():\n msg = \"Node sets of the two directed graphs are not equal!\"\n raise StaticGraphNotEqNodesException(msg)\n \n n_nodes = G.order()\n edges = ((u, v) for u in G.nodes()\n for v in set(G.successors(u)) - set(H.successors(u)))\n deg = make_deg(n_nodes, edges)\n edges = ((u, v) for u in G.nodes()\n for v in set(G.successors(u)) - set(H.successors(u)))\n D = make(n_nodes, G.size(), edges, deg)\n return D",
"def dirVector(self,p1,p2):\n v=p2-p1\n l=v.Length\n return self.toMatrix(v)/l",
"def diff(self, other):\n return mldivide(self, other)",
"def diff(self, other):\n return mldivide(self, other)",
"def __rtruediv__(self, other):\r\n return other * self.reciprocal()",
"def set_boundary_degrees_old(g, sg):\n boundary_degree = {}\n\n for u in sg.nodes():\n boundary_degree[u] = 0\n for v in g.neighbors(u):\n if not sg.has_node(v):\n boundary_degree[u] += g.number_of_edges(u, v) # for a multi-graph\n\n nx.set_node_attributes(sg, values=boundary_degree, name='b_deg')",
"def norm(self):\n return math.sqrt(self.dotProduct(self))",
"def norm(self):\n\n return self.abs()"
] | [
"0.61547583",
"0.6145987",
"0.592434",
"0.5875257",
"0.5720956",
"0.56777096",
"0.5639697",
"0.56370777",
"0.56287086",
"0.55806684",
"0.55758834",
"0.556339",
"0.55611145",
"0.5552496",
"0.555196",
"0.5551724",
"0.5530757",
"0.55174506",
"0.5510223",
"0.55084956",
"0.54966426",
"0.5492809",
"0.5491346",
"0.54820466",
"0.5477536",
"0.5477536",
"0.54633737",
"0.54455423",
"0.54259133",
"0.540688"
] | 0.6748099 | 0 |
R""" builds the GraphLibrary from neighborhoods | def build(self,neighborhoods,k=5):
g_idx = np.zeros(len(neighborhoods),dtype=np.int)
for i, nn in enumerate(neighborhoods):
G = Graph(nn,k)
g_idx[i] = self.encounter(G)
for i, sig in enumerate(self.sigs):
if sig not in self.lookup:
self.lookup[sig] = np.array([],dtype=np.int)
self.lookup[sig] = np.hstack((self.lookup[sig],np.argwhere(g_idx==self.index[sig]).flatten())) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _build_graph(self):\n pass",
"def build_graph(self):\n pass",
"def build_graph(self):\n\t\tself._create_placeholders()\n\t\tself._create_embedding()\n\t\tself._create_recurrent_layers()\n\t\tself._create_de_embedding()\n\t\tself._create_loss()\n\t\tself._create_optimizer()\n\t\tself._create_summaries()",
"def _construct_graph(self):\n raise NotImplementedError",
"def build_graph(self):\n raise NotImplementedError",
"def makeGraph(self):\n self.floorGraph = graph.Graph()\n file = open(\"edges.csv\")\n edges = file.readlines()\n for edge in edges:\n params = edge.split(\",\")\n self.floorGraph.addEdge(params[0],params[1],float(params[2]))\n self.floorGraph.addEdge(params[1],params[0],float(params[2]))",
"def populate_graph(self):",
"def build_graph(self):\n self.__create_placeholders()\n self.__create_encoder()\n self.__create_latent()\n self.__create_decoder()\n self.__create_loss()\n self.__create_generate()\n self.__create_reconstruct()\n self.__create_optimizer()\n self.__create_summary()",
"def build(self,A,k=5):\n # instantiate a Crayon::Graph object\n self.cpp = _crayon.neighborhood(A,k)\n # retrieve adjacency matrix\n self.adj = self.cpp.adj()\n # compute its Graphlet Degree Vector\n self.gdv = self.cpp.gdv()\n # convert node-wise to graph-wise graphlet frequencies\n self.sgdv = np.sum(self.gdv,axis=0)\n # weight GDV according to dependencies between orbits\n o = np.array([1, 2, 2, 2, 3, 4, 3, 3, 4, 3,\n 4, 4, 4, 4, 3, 4, 6, 5, 4, 5,\n 6, 6, 4, 4, 4, 5, 7, 4, 6, 6,\n 7, 4, 6, 6, 6, 5, 6, 7, 7, 5,\n 7, 6, 7, 6, 5, 5, 6, 8, 7, 6,\n 6, 8, 6, 9, 5, 6, 4, 6, 6, 7,\n 8, 6, 6, 8, 7, 6, 7, 7, 8, 5,\n 6, 6, 4],dtype=np.float)\n w = 1. - o / 73.\n self.ngdv = self.sgdv * w[:self.sgdv.shape[0]]\n self.ngdv = self.ngdv / max(float(np.sum(self.ngdv)),1.)",
"def build_graph(self, graph, inst_name, port_nets):\n return",
"def build_graph(self):\n self.import_tree(ZOO_PATH, self.import_zoo, self.verify_zoos)\n self.import_tree(WILD_PATH, self.import_wild, self.verify_wilds)\n self.import_tree(PANDA_PATH, self.import_redpanda, self.verify_pandas)\n self.import_tree(MEDIA_PATH, self.import_media, self.verify_media)",
"def build_graph(self):\n for each_list in self.lab.look():\n vertice = self._add_vertice(each_list)\n if vertice:\n self.unvisited.add(vertice)\n self.graph.addEdge((self.current, vertice))\n \n self.unvisited -= self.visited\n self._connect_neighbours()",
"def _CreateGraph(self):\n self.nodes = []\n self.edges = []\n for i, r in self.airports.set_index('airport_id').iterrows():\n self.nodes.append((i,r.to_dict()))\n for i, r in self.routes.set_index(['src_id','dst_id']).iterrows():\n self.edges.append((i[0],i[1],r.to_dict()))\n # print('node ex: {}'.format(self.nodes[0]))\n # print('edge ex: {}'.format(self.edges[0]))\n\n self.graph = self._CreateAdjacencyListGraph()",
"def gen_graph(self):",
"def construct_graph(social_edges, spatial_edges, output_path=None):\n G = nx.DiGraph()\n with open(social_edges, 'r') as f:\n for l in f.read().splitlines():\n edge = l.split(\"\\t\")\n G.add_edge(USER_NODE_PREFIX + edge[0], USER_NODE_PREFIX + edge[-2], weight=float(edge[-1]))\n\n business_nodes = set([])\n with open(spatial_edges, 'r') as f:\n for l in f.read().splitlines():\n edge = l.split(\"\\t\")\n lat = float(edge[2])\n lng = float(edge[3])\n if edge[-2] not in business_nodes:\n G.add_node(BUSINESS_NODE_PREFIX + edge[-2], spatial={'lat': lat, 'lng': lng})\n business_nodes.add(edge[-2])\n\n with open(spatial_edges, 'r') as f:\n for l in f.read().splitlines():\n edge = l.split(\"\\t\")\n G.add_edge(USER_NODE_PREFIX + edge[0], BUSINESS_NODE_PREFIX + edge[-2], weight=float(edge[-1]))\n\n if output_path:\n pickle.dump(G, open(output_path, 'w'))\n return G",
"def build_graph(self, graph, inst_name, port_nets):\n self.add_graph_edges(graph, port_nets)",
"def build_graph(self):\n edge_data_by_type, all_edges, all_nodes = self.load_training_data(\n self.train_edges_file,\n slf_loop=self.config['slf_loop'],\n symmetry_edge=self.config['symmetry_edge'])\n\n num_nodes = len(all_nodes)\n node_features = {\n 'index': np.array(\n [i for i in range(num_nodes)], dtype=np.int64).reshape(-1, 1)\n }\n\n self.graph = heter_graph.HeterGraph(\n num_nodes=num_nodes,\n edges=edge_data_by_type,\n node_types=None,\n node_feat=node_features)\n\n self.edge_types = sorted(self.graph.edge_types_info())\n logging.info('total %d nodes are loaded' % (self.graph.num_nodes))",
"def _build_graph_general(self): \n\n #Find a canonical coloring scheme\n #Each node has a color that is determined by the non-mapped aspects\n nodecolors=set()\n for nl in self.net.iter_node_layers():\n nodecolors.add(self._slice_node_layer_not_allowed(nl))\n nodecolors_sorted=sorted(list(nodecolors))\n del nodecolors\n self._assert_full_order(nodecolors_sorted)\n self.colormap=dict( ((color,colorid) for colorid,color in enumerate(nodecolors_sorted) ))\n\n #each aux node has a color that is determined by the aspect\n self.auxcolormap=dict( ((auxcolor, auxcolorid+len(self.colormap)) for auxcolorid,auxcolor in enumerate(sorted(self.asp)) ) )\n\n\n #Add the underlying network\n #node-layers:\n for nl in self.net.iter_node_layers():\n nlid=self._get_node_id(nl)\n color=self._slice_node_layer_not_allowed(nl)\n colorid=self.colormap[color]\n self.add_node(nlid,colorid)\n\n #edges between node-layers:\n for nl1 in self.net.iter_node_layers():\n for nl2 in self.net[nl1]:\n nl1id=self._get_node_id(nl1)\n nl2id=self._get_node_id(nl2)\n self.add_link(nl1id,nl2id)\n\n\n #Add the auxiliary nodes and edges\n #add the aux nodes\n for a in self.asp:\n for elayer in self.net.slices[a]:\n auxid=self._get_auxnode_id( (a,elayer) )\n auxcolorid=self.auxcolormap[a]\n self.add_node(auxid,auxcolorid)\n \n #add the aux edges\n for nl in self.net.iter_node_layers():\n for a in self.asp:\n nlid=self._get_node_id(nl)\n auxid=self._get_auxnode_id( (a,nl[a]) )\n self.add_link(nlid,auxid)",
"def _build_graphs(self):\n g1 = self._build_graph1()\n g2 = self._build_graph2(g1)\n return g1, g2",
"def build_graph(edges):\n \n G = nx.MultiGraph()\n G.add_edges_from(edges)\n return G",
"def build_graph(self):\n for node in self.graph.nodes():\n self.c2py[node] = PyNode(node)\n for _input in node.inputs():\n if _input not in self.c2py:\n self.c2py[_input] = PyNode(_input, True)\n if _input in self.forward_edge:\n self.forward_edge[_input].append(node)\n else:\n self.forward_edge[_input] = [node]\n for output in node.outputs():\n if output not in self.c2py:\n self.c2py[output] = PyNode(output, True)\n if node in self.forward_edge:\n self.forward_edge[node].append(output)\n else:\n self.forward_edge[node] = [output]",
"def build_geom_neighbor_graph(geoms, n_neighbors):\n n_pts = geoms.shape[0]\n pyflann.set_distance_type('euclidean') # squared euclidean actually\n fli = pyflann.FLANN()\n build_params = dict(algorithm='kdtree', num_neighbors=n_neighbors)\n gneighbs, _ = fli.nn(geoms, geoms, **build_params)\n data = np.ones((n_pts, n_neighbors), dtype='u1')\n indptr = np.arange(0, n_pts * n_neighbors + 1, n_neighbors, dtype=int)\n gadj = sparse.csr_matrix(\n (data.ravel(), gneighbs.ravel(), indptr), shape=(n_pts, n_pts))\n return gadj",
"def make_n_glycan_neighborhoods():\n neighborhoods = NeighborhoodCollection()\n\n _neuraminic = \"(%s)\" % ' + '.join(map(str, (\n FrozenMonosaccharideResidue.from_iupac_lite(\"NeuAc\"),\n FrozenMonosaccharideResidue.from_iupac_lite(\"NeuGc\")\n )))\n _hexose = \"(%s)\" % ' + '.join(\n map(str, map(FrozenMonosaccharideResidue.from_iupac_lite, ['Hex', ])))\n _hexnac = \"(%s)\" % ' + '.join(\n map(str, map(FrozenMonosaccharideResidue.from_iupac_lite, ['HexNAc', ])))\n\n high_mannose = CompositionRangeRule(\n _hexose, 3, 12) & CompositionRangeRule(\n _hexnac, 2, 2) & CompositionRangeRule(\n _neuraminic, 0, 0)\n high_mannose.name = \"high-mannose\"\n neighborhoods.add(high_mannose)\n\n base_hexnac = 3\n base_neuac = 2\n for i, spec in enumerate(['hybrid', 'bi', 'tri', 'tetra', 'penta', \"hexa\", \"hepta\"]):\n if i == 0:\n rule = CompositionRangeRule(\n _hexnac, base_hexnac - 1, base_hexnac + 1\n ) & CompositionRangeRule(\n _neuraminic, 0, base_neuac) & CompositionRangeRule(\n _hexose, base_hexnac + i - 1,\n base_hexnac + i + 3)\n rule.name = spec\n neighborhoods.add(rule)\n else:\n sialo = CompositionRangeRule(\n _hexnac, base_hexnac + i - 1, base_hexnac + i + 1\n ) & CompositionRangeRule(\n _neuraminic, 1, base_neuac + i\n ) & CompositionRangeRule(\n _hexose, base_hexnac + i - 1,\n base_hexnac + i + 2)\n\n sialo.name = \"%s-antennary\" % spec\n asialo = CompositionRangeRule(\n _hexnac, base_hexnac + i - 1, base_hexnac + i + 1\n ) & CompositionRangeRule(\n _neuraminic, 0, 1 if i < 2 else 0\n ) & CompositionRangeRule(\n _hexose, base_hexnac + i - 1,\n base_hexnac + i + 2)\n\n asialo.name = \"asialo-%s-antennary\" % spec\n neighborhoods.add(sialo)\n neighborhoods.add(asialo)\n return neighborhoods",
"def main(config):\n data_path = config['paths']['data']\n\n # data\n output_file = os.path.join(config['paths']['figures'], 'network-bridge-map.png')\n road_edge_file = os.path.join(data_path, 'network', 'road_edges.shp')\n bridge_file = os.path.join(data_path, 'network', 'bridges.shp')\n\n # basemap\n proj_lat_lon = ccrs.PlateCarree()\n ax = get_axes()\n plot_basemap(ax, data_path)\n scale_bar(ax, location=(0.8, 0.05))\n plot_basemap_labels(ax, data_path, include_regions=False)\n\n styles = OrderedDict([\n ('national', Style(color='#ba0f03', zindex=5, label='National roads')),\n ('MAYOR SOBRE AGUA Y RUTA', Style(color='#9467bd', zindex=7, label='AGUA Y RUTA')),\n ('MAYOR SOBRE FERROCARRIL', Style(color='#2ca02c', zindex=8, label='FERROCARRIL')),\n ('MAYOR SOBRE RUTA', Style(color= '#ff7f0e', zindex=9, label='RUTA')),\n ('MAYOR SOBRE RUTA Y FERROCARRIL', Style(color='#e377c2', zindex=10, label='RUTA Y FERROCARRIL')),\n ('MAYOR SOBRE RUTA, AGUA Y FERROCARRIL', Style(color='#8c564b', zindex=11, label='RUTA, AGUA Y FERROCARRIL')),\n ('MAYOR SOBRE VIA DE AGUA', Style(color='#1f77b4', zindex=6, label='VIA DE AGUA')),\n ])\n\n # edges\n geoms_by_category = {\n 'national': [],\n 'MAYOR SOBRE AGUA Y RUTA': [],\n 'MAYOR SOBRE FERROCARRIL': [],\n 'MAYOR SOBRE RUTA':[],\n 'MAYOR SOBRE RUTA Y FERROCARRIL':[],\n 'MAYOR SOBRE RUTA, AGUA Y FERROCARRIL':[],\n 'MAYOR SOBRE VIA DE AGUA':[]\n }\n\n edges_national = geopandas.read_file(road_edge_file)\n edges_national = edges_national[edges_national['road_type'] == 'national']\n geoms_by_category['national'] = list(edges_national.geometry)\n\n\n bridges = geopandas.read_file(bridge_file,encoding='utf-8')\n for iter_, val in bridges.iterrows():\n cat = val['structur_1'].strip()\n geoms_by_category[cat].append(val.geometry)\n\n\n for cat, geoms in geoms_by_category.items():\n cat_style = styles[cat]\n if cat == 'national':\n ax.add_geometries(\n geoms,\n crs=proj_lat_lon,\n linewidth=1.25,\n facecolor='none',\n edgecolor=cat_style.color,\n zorder=cat_style.zindex\n )\n else:\n ax.scatter(\n [g.x for g in geoms],\n [g.y for g in geoms],\n transform=proj_lat_lon,\n facecolor=cat_style.color,\n s=6,\n zorder=cat_style.zindex\n )\n\n # legend\n legend_from_style_spec(ax, styles, loc=(0.48,0.2))\n\n # save\n save_fig(output_file)",
"def generate_model(self):\n rootpath = 'c:\\\\Users\\\\Gamelab\\\\Desktop\\\\RT\\\\Others\\\\Thesis\\\\Thesis_coding\\\\ABM\\\\' \n \n df = pd.read_csv(rootpath+'data\\\\subset_initialized_latlonvalues.csv')\n df = df.drop(columns='Unnamed: 0')\n households_in_block = {}\n household_ids_in_block = {}\n # holds all the graphs indexed by blockid [geoid]\n \n def add_and_remove_edges(G, p_new_connection, p_remove_connection): \n\n new_edges = [] \n rem_edges = [] \n for node in G.nodes(): \n # find the other nodes this one is connected to \n connected = [to for (fr, to) in G.edges(node)] \n # and find the remainder of nodes, which are candidates for new edges \n unconnected = [n for n in G.nodes() if not n in connected] \n\n # probabilistically add a random edge \n if len(unconnected): # only try if new edge is possible \n if random.random() < p_new_connection: \n new = random.choice(unconnected) \n G.add_edge(node, new) \n #print(\"\\tnew edge:\\t {} -- {}\".format(node, new) \n new_edges.append( (node, new) ) \n # book-keeping, in case both add and remove done in same cycle \n unconnected.remove(new) \n connected.append(new) \n\n # probabilistically remove a random edge \n if len(connected): # only try if an edge exists to remove \n if random.random() < p_remove_connection: \n remove = random.choice(connected) \n G.remove_edge(node, remove) \n #print \"\\tedge removed:\\t {} -- {}\".format(node, remove) \n rem_edges.append( (node, remove) ) \n # book-keeping, in case lists are important later? \n connected.remove(remove) \n unconnected.append(remove) \n return rem_edges, new_edges\n\n\n\n\n #now i need to get number of geoids unique \n for block in df['geoid'].unique(): \n G_temp=nx.Graph()\n households_in_block[block] = df[df['geoid']==block] # contains all the information about the households \n household_ids_in_block[block] = df[df['geoid']==block]['CASE_ID'].values \n # contains only their ID\n # you only need id to initialize a node\n tempdf = households_in_block[block]\n for household in household_ids_in_block[block]:\n lon = tempdf.loc[tempdf['CASE_ID']==household,'lon'].values[0]\n lat = tempdf.loc[tempdf['CASE_ID']==household,'lat'].values[0] \n \n G_temp.add_node(str(household), pos=(lon,lat))\n self.G.add_node(str(household), pos=(lon,lat))\n \n ## add G to the dictionary\n self.graph_dict[block] = G_temp\n \n \n rem_edges, new_edges = add_and_remove_edges(self.G, 0.5, 0.5)\n self.G.remove_edges_from(rem_edges)\n self.G.add_edges_from(new_edges)\n\n \n\n self.grid= NetworkGrid(self.G)\n \n for _, row in df.iterrows(): # index, row in ...\n \n agent = Household(unique_id = str(row['CASE_ID']),\n model = self, \n income = row['income'],\n age= row['age'],\n size= row['household_'],\n ami_category = row['ami_categ'],\n elec_consumption= row['elec_consumption'],\n attitude = row['attitude'],\n pbc = row['pbc'],\n subnorms = row['subnorms'],\n geoid = row['geoid'],\n tract = row['tract'],\n bgid = row['bgid'],\n adoption_status = 0)\n \n \n\n if agent:\n self.schedule.add(agent)\n y = row['lat']\n x = row['lon']\n self.grid.place_agent(agent, node_id=agent.unique_id)\n #self.space.place_agent(agent, (x, y))\n #agent.pos = (x, y)",
"def create_graph(data, drone_altitude, safety_distance):\n # Find grid and offsets.\n grid, north_offset, east_offset = create_grid(data, drone_altitude, safety_distance)\n\n # Find object centers.\n centers = get_object_centers(data, north_offset, east_offset, drone_altitude, safety_distance)\n\n # Create Voronoid from centers\n voronoi = Voronoi(centers)\n\n # Find open edges\n edges = find_open_edges_voronoi(voronoi, grid)\n\n # Create graph.\n return (create_graph_from_edges(edges), north_offset, east_offset)",
"def __init__(self, graph: ghidra.graph.GImplicitDirectedGraph):\n ...",
"def build_graph(self):\n for node in self.nodes:\n self.graph.add_node(node.id, node_obj=node)\n edges = []\n for i in range(0, len(self.nodes)):\n for j in range(i+1, len(self.nodes)):\n if (self.nodes[i].distance(self.nodes[j]) < self.radio_range):\n edges.append((self.nodes[i].id, self.nodes[j].id,1))\n self.graph.add_weighted_edges_from(edges)",
"def __create_graph(self):\n # create the nodes\n for h in range(self.height):\n row: List[JuncNode] = list()\n for w in range(self.width):\n jnodes: List[Node] = [self.add_node() for _ in range(4)]\n jn = JuncNode(jnodes, (h, w))\n row.append(jn)\n self.__juncs.append(row)\n # create all connections\n self.__create_connections()",
"def build_edges(self):\n print(\"Constructing Edges.\")\n # -----------------------------------------\n # TODO: You should write this method!\n\n # Note: this method may take some time to run - it is likely to be O(N^2), and some lists have N = 10,000 words or more.\n # (I've had students decide that their program was \"broken\" and quit it before this process finished... every time,\n # not realizing that the program was working hard behind the scenes.)\n # I recommend that you keep track of the number of edges you have added, and if it is a multiple of 1000, print\n # something so that you know your program is making progress.\n n = len(self.vertices)\n\n\n\n \n # -----------------------------------------\n print(\"Done Constructing Edges.\\n------------------------------------\")"
] | [
"0.64713144",
"0.6390506",
"0.6251868",
"0.6238143",
"0.62126094",
"0.6179312",
"0.6168436",
"0.61585855",
"0.61355734",
"0.6126878",
"0.6122993",
"0.60826135",
"0.60792905",
"0.5959465",
"0.5944761",
"0.5919414",
"0.5903653",
"0.58958435",
"0.58561826",
"0.58410925",
"0.5815487",
"0.5809945",
"0.57381135",
"0.5723922",
"0.5693853",
"0.5693433",
"0.5655722",
"0.5650643",
"0.5635204",
"0.56217754"
] | 0.6859379 | 0 |
extended_euclidean_algorithm(a, b) The result is the largest common divisor for a and b. | def extended_euclidean_algorithm(a, b):
if a == 0: return b, 0, 1
else:
g, y, x = extended_euclidean_algorithm(b % a, a)
return g, x - (b // a) * y, y | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def euclidean_algorithm(a, b):\n if a == 0: return b\n if b == 0: return a\n r = a % b\n return euclidean_algorithm(b, r)",
"def extended_euclidean_algorithm(a, b):\n s, old_s = 0, 1\n t, old_t = 1, 0\n r, old_r = b, a\n\n while r != 0:\n quotient = old_r // r\n old_r, r = r, old_r - quotient * r\n old_s, s = s, old_s - quotient * s\n old_t, t = t, old_t - quotient * t\n\n return old_r, old_s, old_t",
"def extended_euclidean(a, b):\n vprint(\"a: {}, b: {}\".format(a, b))\n assert(a >= b)\n if a == b:\n x = 1\n y = 0\n d = a\n vprint(\"Base case: a == b\")\n vprint(\"x: {}, y: {}, d: {}\".format(x, y, d))\n return x, y, d\n\n q, r = division_algo(a, b)\n vprint(\" a = q * b + r\")\n vprint(\"{} = {} * {} + {}\".format(a, q, b, r))\n\n if r == 0:\n x = 0\n y = 1\n d = b\n vprint(\"Base case r == 0\")\n vprint(\"x: {}, y: {}, d: {}\".format(x, y, d))\n return x, y, d\n\n vprint(\"Entering recursive call\")\n vprint(\"extended_euclidean({}, {})\".format(b, r))\n x1, y1, d1 = extended_euclidean(b, r)\n vprint(\"Returning from recursive call\")\n vprint(\"x1: {}, y1: {}, d1: {}\".format(x1, y1, d1))\n x = y1\n y = x1 - q*y1\n d = d1\n vprint(\"x: {}, y: {}, d: {}\".format(x, y, d))\n return x, y, d",
"def extended_euclid(a, b):\n v0 = (1, 0)\n v1 = (0, 1)\n\n while b != 0:\n q = a // b\n a, b = b, a % b\n v0, v1 = v1, (v0[0] - q * v1[0], v0[1] - q * v1[1])\n\n gcd = abs(a)\n alpha, beta = v0\n return gcd, alpha, beta",
"def euclidean_gcd(a: int, b: int) -> int:\n\n if a == 0 or b == 0:\n return a + b\n if a == b:\n return a\n if a < b:\n a, b = b, a\n mod = a % b\n if mod == 0:\n return b\n return euclidean_gcd(b, mod)",
"def extended_euclid(a: int, b: int) -> (int, int, int):\r\n x, y, u, v = 0, 1, 1, 0\r\n while a != 0:\r\n q, r = b // a, b % a\r\n m, n = x - u * q, y - v * q\r\n b, a, x, y, u, v = a, r, u, v, m, n\r\n gcd = b\r\n return gcd, x, y",
"def extended_euclid(a, b):\n A, B = a, b\n sa, sb = (1 if a >= 0 else -1), (1 if b >= 0 else -1)\n xp, yp = 1, 0\n x, y = 0, 1\n while b:\n assert A * xp + B * yp == a\n assert A * x + B * y == b\n r = a // b\n a, b = b, a % b\n x, xp = xp - r * x, x\n y, yp = yp - r * y, y\n return sa * xp, sb * yp",
"def extended_euclidean(self):\n self.a = gmpy2.invert(self.e1, self.e2)\n self.b = (float(self.gcd(self.e1, self.e2)-(self.a*self.e1)))/float(self.e2)",
"def extended_gcd(a, b):\n x, lastx, y, lasty = 0, 1, 1, 0\n while b != 0:\n q, r = divmod(a, b)\n a, b = b, r\n x, lastx = lastx - q * x, x\n y, lasty = lasty - q * y, y\n return lastx, lasty",
"def extEuclid(a, b):\n x = 0\n lastx = 1\n y = 1\n lasty = 0\n while b != 0:\n quotient = a // b\n a, b = b, a % b\n x, lastx = lastx - quotient * x, x\n y, lasty = lasty - quotient * y, y\n return (lastx, lasty, a)",
"def extended_gcd(a, b):\r\n x_prev, x = 0, 1\r\n y_prev, y = 1, 0\r\n\r\n while a:\r\n q = b // a\r\n x, x_prev = x_prev - q * x, x\r\n y, y_prev = y_prev - q * y, y\r\n a, b = b % a, a\r\n\r\n return b, x_prev, y_prev",
"def extendedGcd(a, b):\n x0 = 1\n x1 = 0\n y0 = 0\n y1 = 1\n\n while b != 0:\n p = a // b\n z = a % b\n a = b\n b = z\n\n w = x1\n x1 = x0 - p * x1\n x0 = w\n \n v = y1\n y1 = y0 - p * y1\n y0 = v\n print(\"returns: gcd, si, ti\")\n return (gcd(a, b), x0, y0)",
"def extended_gcd(_a, _b):\n previous_remainder, remainder = _a, _b\n current_x, previous_x, current_y, previous_y = 0, 1, 1, 0\n while remainder > 0:\n previous_remainder, (quotient, remainder) = remainder, divmod(\n previous_remainder, remainder)\n current_x, previous_x = previous_x - quotient * current_x, current_x\n current_y, previous_y = previous_y - quotient * current_y, current_y\n # The loop terminates with remainder == 0, x == b and y == -a. This is not what we want, and is because we have\n # walked it through one time \"too many\". Therefore, return the values\n # of the previous round:\n return previous_remainder, previous_x, previous_y",
"def gcd_algo(a,b):\n i = max(a,b)\n j = min(a,b)\n\n if j == 0:\n return i\n else:\n reminder = i%j\n return gcd_algo(j, reminder)",
"def edist(a, b):\n return euclidean(np.array(a), np.array(b))",
"def extEuclidR(a, b):\n if b == 0:\n return (1, 0, a)\n else:\n (x, y, gcd) = extEuclidR(b, a % b)\n x, y = y, x - (y * (a / b))\n return (x, y, gcd)",
"def euclidean_gcd_recursive(a: int, b: int) -> int:\n return a if b == 0 else euclidean_gcd_recursive(b, a % b)",
"def gcd(self, a, b):\n raise NotImplementedError",
"def gcd(a, b):\n __check_args(a, b)\n\n if b > a:\n return __calc_gcd(b, a)\n else:\n return __calc_gcd(a, b)",
"def extended_greatest_common_denominator(a, b):\r\n if a == 0:\r\n return (b, 0, 1)\r\n else:\r\n g, y, x = extended_greatest_common_denominator(b % a, a)\r\n return (g, x - (b // a) * y, y)",
"def gcd(a, b):\n if not a:\n return b\n else:\n a = abs(a)\n b = abs(b)\n return gcd(b%a, a)",
"def gcd(a,b):\r\n\tif a == 0:\r\n\t\treturn abs(b)\r\n\treturn abs(gcd(b % a, a))",
"def eucl_alg(a, b):\n if a == 0:\n return b, 0, 1\n else:\n g, x, y = eucl_alg(b % a, a)\n return g, y - (b // a) * x, x",
"def greatest_common_divisor(a: int, b: int) -> int:\n#[SOLUTION]\n while b:\n a, b = b, a % b\n return a",
"def find_gcd(a, b):\n\n gcd = min(a, b)\n\n # Keep looping until gcd divides both a & b evenly\n while a % gcd != 0 or b % gcd != 0:\n gcd -= 1\n\n return gcd",
"def gcd(a, b):\n\tif a == 0:\n\t\treturn b\n\n\treturn gcd(b%a, a)",
"def lcm(a: int, b: int):\n return (a * b) // euclid(a, b)",
"def gcd(a, b):\n r0, r1 = abs(a), abs(b)\n while r1 > 0:\n r0, r1 = r1, r0 % r1\n return r0",
"def euclidean_distance(a, b):\n return sqrt((a[0] - b[0])**2 + (a[1] - b[1])**2)",
"def gcd(a, b):\n a = abs(a)\n b = abs(b)\n if a == b:\n return a\n if b > a:\n a, b = b, a\n q = a // b\n r = a - b * q\n while r != 0:\n a = b\n b = r\n q = a // b\n r = a - b * q\n return b"
] | [
"0.83563834",
"0.8242443",
"0.7883765",
"0.7874354",
"0.78629833",
"0.77744085",
"0.77626127",
"0.77376217",
"0.7703958",
"0.7693146",
"0.76605105",
"0.7453624",
"0.7439825",
"0.7404717",
"0.7386229",
"0.7382866",
"0.73674756",
"0.73155385",
"0.73059714",
"0.7270737",
"0.7218881",
"0.7212154",
"0.720394",
"0.71806747",
"0.71763104",
"0.7163301",
"0.7161169",
"0.7159898",
"0.71573406",
"0.71523714"
] | 0.8602561 | 0 |
modular_inverse(e, z) Calculates modular multiplicative inverse for e and t. | def modular_inverse(e, z):
g, x, y = extended_euclidean_algorithm(e, z)
if g != 1: raise Exception('Modular inverse does not exist')
else: return x % z | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def modular_inverse(self):\n i = gmpy2.invert(self.c2, self.n)\n mx = pow(self.c1, self.a, self.n)\n my = pow(i, int(-self.b), self.n)\n self.m= mx * my % self.n",
"def modular_inverse(a, mod):\n r_prev, u_prev, v_prev, r, u, v = a, 1, 0, mod, 0, 1\n while r != 0:\n q = r_prev // r\n r_prev, u_prev, v_prev, r, u, v = (\n r,\n u,\n v,\n r_prev - q * r,\n u_prev - q * u,\n v_prev - q * v,\n )\n return u_prev",
"def inv_efunc(z):\n return 1. / sqrt(omega_m * (1. + z)**3 + omega_lam)",
"def modinv(a, m):\n g, x, y = egcd(a, m)\n if g != 1:\n raise Exception('modular inverse does not exist')\n else:\n return x % m",
"def _mult_inverse(self, a, m):\n g, x, y = self._egcd(a, m)\n if g != 1:\n raise Exception('modular inverse does not exist')\n else:\n return x % m",
"def mod_inverse(x, m):\n inv, _ = extended_gcd(x, m)\n return inv",
"def mod_inverse(base, m):\n g, x, y = mod_inverse_iterative(base, m)\n if g != 1:\n return None\n else:\n return x % m",
"def multiplicative_inverse(e, phi):\n\t\n\td, x1, x2, y1 = 0, 0, 1, 1\n\toriginal_phi = phi\n\t\n\twhile e > 0:\n\t\ttemp1 = phi // e\n\t\tphi, e = e, phi % e\n\t\tx2, x1 = x1, (x2 - temp1 * x1)\n\t\td, y1 = y1, (d - temp1 * y1)\n \n\treturn d % original_phi",
"def mod_inverse(base, m):\n\n g, x, y = mod_inverse_iterative(base, m)\n if (g != 1):\n return None\n else:\n return (x % m)",
"def mod_inverse(num: int, modulus: int) -> int:\n if gcd(num, modulus) != 1:\n raise ModularInverseError('gcd is equals to 1')\n u_1, u_2, u_3 = 1, 0, num\n v_1, v_2, v_3 = 0, 1, modulus\n\n while v_3 != 0:\n quot = u_3 // v_3\n v_1, v_2, v_3, u_1, u_2, u_3 = (\n u_1 - quot * v_1), (u_2 - quot * v_2), (u_3 - quot * v_3), v_1, v_2, v_3\n return u_1 % modulus",
"def inverse_mod(a, m):\r\n g, x, y = extended_greatest_common_denominator(a, m)\r\n if g != 1:\r\n raise Exception('modular inverse does not exist')\r\n else:\r\n return x % m",
"def invmod(b,n):\r\n\treturn inverse_mod(b,n)",
"def invmod(b,n):\n\treturn inverse_mod(b,n)",
"def multiplicative_inverse(e, n):\n x, y = extended_gcd(e, n)\n if x < 0:\n return n + x\n return x",
"def inverse_fisher_z_transform(z):\r\n return ((e ** (2 * z)) - 1.) / ((e ** (2 * z)) + 1.)",
"def mod_inverse(a, n):\n b = n\n if abs(b) == 0:\n return (1, 0, a)\n\n x1, x2, y1, y2 = 0, 1, 1, 0\n while abs(b) > 0:\n q, r = divmod(a, b)\n x = x2 - q * x1\n y = y2 - q * y1\n a, b, x2, x1, y2, y1 = b, r, x1, x, y1, y\n\n return x2 % n",
"def mod_inverse(a, n):\n \n b = n\n if abs(b) == 0:\n return (1, 0, a)\n\n x1, x2, y1, y2 = 0, 1, 1, 0\n while abs(b) > 0:\n q, r = divmod(a, b)\n x = x2 - q * x1\n y = y2 - q * y1\n a, b, x2, x1, y2, y1 = b, r, x1, x, y1, y\n\n return x2 % n",
"def modular_inverse(a, m):\n\n def extended_gcd(_a, _b):\n \"\"\" Use the Extended Euclidean algorithm to calculate the \"extended greatest common divisor\".\n It takes as input two positive integers a and b, then calculates the following:\n 1. The greatest common divisor (gcd) between a and b -- that is, the integer number g which is the largest\n integer for which a/g and b/g both are integers (This can also be obtained using math.gcd)\n 2. The integer x and y so that a*x + b*y = gcd(x, y)\n :param _a: Positive integer\n :param _b: Positive integer\n :return: Tuple (gcd, x, y)\n \"\"\"\n previous_remainder, remainder = _a, _b\n current_x, previous_x, current_y, previous_y = 0, 1, 1, 0\n while remainder > 0:\n previous_remainder, (quotient, remainder) = remainder, divmod(\n previous_remainder, remainder)\n current_x, previous_x = previous_x - quotient * current_x, current_x\n current_y, previous_y = previous_y - quotient * current_y, current_y\n # The loop terminates with remainder == 0, x == b and y == -a. This is not what we want, and is because we have\n # walked it through one time \"too many\". Therefore, return the values\n # of the previous round:\n return previous_remainder, previous_x, previous_y\n\n gcd_value, x, y = extended_gcd(a, m)\n if gcd_value != 1:\n return False\n # print('No inverse. gcd (%d, %d) is %d. Decoding is not unique. Choose another key than %d'\n # % (a, m, math.gcd(a, m), a))\n return x % m",
"def inverse_mod( a, m ):\r\n\r\n if a < 0 or m <= a: a = a % m\r\n\r\n # From Ferguson and Schneier, roughly:\r\n\r\n c, d = a, m\r\n uc, vc, ud, vd = 1, 0, 0, 1\r\n while c != 0:\r\n q, c, d = divmod( d, c ) + ( c, )\r\n uc, vc, ud, vd = ud - q*uc, vd - q*vc, uc, vc\r\n\r\n # At this point, d is the GCD, and ud*a+vd*m = d.\r\n # If d == 1, this means that ud is a inverse.\r\n\r\n assert d == 1\r\n if ud > 0: return ud\r\n else: return ud + m",
"def mod_inv(val, modulus):\n return mod_exp(val, modulus - 2, modulus)",
"def complex_inverse(c1,cr):",
"def modInverse(cls, a, m):\n a = a % m\n for x in range(1, m):\n if ((a * x) % m == 1):\n return x\n return 1",
"def modinv(a, m):\n b = 1\n while not (a * b) % m == 1:\n b += 1\n return b",
"def mod_inverse_iterative(a, b):\n x, y, u, v = 0, 1, 1, 0\n while a != 0:\n q = int(b / a)\n r = b % a\n m = x - u * q\n n = y - v * q\n b, a, x, y, u, v = a, r, u, v, m, n\n return b, x, y",
"def inverseMod(a,b):\n if GMPY:\n return int(gmpy2.invert(a,b))\n else:\n gcd, x, y = computeGCD(a, m)\n if gcd != 1:\n None # there is no inverse of a mod b\n else:\n return x % m",
"def inv(z: int) -> int:\n # Adapted from curve25519_athlon.c in djb's Curve25519.\n z2 = z * z % q # 2\n z9 = pow2(z2, 2) * z % q # 9\n z11 = z9 * z2 % q # 11\n z2_5_0 = (z11 * z11) % q * z9 % q # 31 == 2^5 - 2^0\n z2_10_0 = pow2(z2_5_0, 5) * z2_5_0 % q # 2^10 - 2^0\n z2_20_0 = pow2(z2_10_0, 10) * z2_10_0 % q # ...\n z2_40_0 = pow2(z2_20_0, 20) * z2_20_0 % q\n z2_50_0 = pow2(z2_40_0, 10) * z2_10_0 % q\n z2_100_0 = pow2(z2_50_0, 50) * z2_50_0 % q\n z2_200_0 = pow2(z2_100_0, 100) * z2_100_0 % q\n z2_250_0 = pow2(z2_200_0, 50) * z2_50_0 % q # 2^250 - 2^0\n return pow2(z2_250_0, 5) * z11 % q # 2^255 - 2^5 + 11 = q - 2",
"def inverse_mod(a, m):\n if a < 0 or m <= a:\n a = a % m\n # From Ferguson and Schneier, roughly:\n c, d = a, m\n uc, vc, ud, vd = 1, 0, 0, 1\n while c != 0:\n q, c, d = divmod(d, c) + (c,)\n uc, vc, ud, vd = ud - q * uc, vd - q * vc, uc, vc\n # At this point, d is the GCD, and ud*a+vd*m = d.\n # If d == 1, this means that ud is a inverse.\n assert d == 1\n if ud > 0:\n return ud\n else:\n return ud + m",
"def multiple_inverse(p_final, n):\r\n return one_minus_exp(log_one_minus(p_final) / n)",
"def test_multiple_inverse(self):\r\n # NOTE: multiple_inverse not very accurate close to 1\r\n self.assertFloatEqual(multiple_inverse(1 - 0.9990005, 10000), 1e-7)\r\n self.assertFloatEqual(multiple_inverse(0.4012631, 10), 0.05)\r\n self.assertFloatEqual(multiple_inverse(1e-20, 1), 1e-20)\r\n self.assertFloatEqual(multiple_inverse(1e-300, 1), 1e-300)\r\n self.assertFloatEqual(multiple_inverse(0.96875, 5), 0.5)\r\n self.assertFloatEqual(multiple_inverse(1e-19, 10), 1e-20)",
"def inverse_basis(T, dimensions, t):\n B = basis(T, dimensions, t)\n return inv(B.T.dot(B)).dot(B.T)"
] | [
"0.73496014",
"0.6930735",
"0.6873958",
"0.6728242",
"0.66591424",
"0.6651403",
"0.66340345",
"0.6575672",
"0.6552733",
"0.654498",
"0.64918435",
"0.6366117",
"0.6332576",
"0.6314671",
"0.6263555",
"0.6190621",
"0.61679864",
"0.61214244",
"0.609041",
"0.6076969",
"0.6060829",
"0.60521954",
"0.60224926",
"0.59743583",
"0.5972267",
"0.59687436",
"0.59130085",
"0.59099966",
"0.5888194",
"0.58825517"
] | 0.84206843 | 0 |
An account alias associated with a customer's account. | def account_alias(self) -> pulumi.Input[str]:
return pulumi.get(self, "account_alias") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def account_alias(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"account_alias\")",
"def get_account_alias(self):\r\n return self.get_response('ListAccountAliases', {},\r\n list_marker='AccountAliases')",
"def account_alias_resource_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"account_alias_resource_id\")",
"def create_account_alias(self, alias):\r\n params = {'AccountAlias': alias}\r\n return self.get_response('CreateAccountAlias', params)",
"def __init__(__self__, *,\n account_alias: pulumi.Input[str]):\n pulumi.set(__self__, \"account_alias\", account_alias)",
"def account(self) -> str:\n return self._account",
"def account(self) -> str:\n return self._account",
"def account(self, acct):\n aMgr = self.acctManager\n if len(aMgr.accounts) <= acct:\n raise Exception(\"requested unknown account number %i\" % acct)\n return aMgr.account(acct)",
"def getaccountaddress(self, account):\n return self.proxy.getaccountaddress(account)",
"def get_accountname_for_active_connection(self):\n aliases = self.get_account_aliases()\n if aliases:\n return aliases[0]\n return None",
"def account_name(self) -> str:\n return pulumi.get(self, \"account_name\")",
"def account_name(self) -> str:\n return pulumi.get(self, \"account_name\")",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'AccountAlias':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = AccountAliasArgs.__new__(AccountAliasArgs)\n\n __props__.__dict__[\"account_alias\"] = None\n __props__.__dict__[\"account_alias_resource_id\"] = None\n return AccountAlias(resource_name, opts=opts, __props__=__props__)",
"def getCustomerAccount(self):\n return self._CustomerAccount",
"def getCustomerAccount(self):\n return self._CustomerAccount",
"def get_account(self, account):\n \n pass",
"def account(self, account_code):\r\n return acc.Account(self, account_code)",
"def get_connections_accountname(self):\n account_info = self.get_account()\n return getattr(account_info, 'account_name', None)",
"def display_account(account):\n if 'accountName' not in account and 'emailAddress' not in account:\n account_template = '{accountId}'\n elif 'emailAddress' not in account:\n account_template = '{accountName} ({accountId})'\n elif 'accountName' not in account:\n account_template = '{emailAddress} ({accountId})'\n else:\n account_template = '{accountName}, {emailAddress} ({accountId})'\n return account_template.format(**account)",
"def alias(self):\n return self._alias",
"def alias(self):\n return self._alias",
"def account_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"account_name\")",
"def account_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"account_name\")",
"def account_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"account_name\")",
"def get_account_for_tenant(test_auth, tenant_id):\n return '%s%s' % (test_auth.reseller_prefixes[0], tenant_id)",
"def delete_account_alias(self, alias):\r\n params = {'AccountAlias': alias}\r\n return self.get_response('DeleteAccountAlias', params)",
"def alias(self):\n\n return self._alias",
"def account_id(self) -> str:\n return self._account_id",
"def get_account(self):\n return self._account",
"def get_account(self):\n return self._account"
] | [
"0.7883774",
"0.75032926",
"0.70096886",
"0.6901152",
"0.663217",
"0.6561262",
"0.6561262",
"0.6388388",
"0.6385573",
"0.6365307",
"0.6309277",
"0.6309277",
"0.62986344",
"0.6257074",
"0.6257074",
"0.62428296",
"0.6241097",
"0.620877",
"0.60629225",
"0.6019607",
"0.6019607",
"0.59752417",
"0.59752417",
"0.59752417",
"0.5973346",
"0.5942614",
"0.59423244",
"0.59374624",
"0.5909374",
"0.5909374"
] | 0.78406376 | 1 |
An AWS Support App resource that creates, updates, reads, and deletes a customer's account alias. | def __init__(__self__,
resource_name: str,
args: AccountAliasArgs,
opts: Optional[pulumi.ResourceOptions] = None):
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def account_alias_resource_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"account_alias_resource_id\")",
"def create_account_alias(self, alias):\r\n params = {'AccountAlias': alias}\r\n return self.get_response('CreateAccountAlias', params)",
"def account_alias(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"account_alias\")",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'AccountAlias':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = AccountAliasArgs.__new__(AccountAliasArgs)\n\n __props__.__dict__[\"account_alias\"] = None\n __props__.__dict__[\"account_alias_resource_id\"] = None\n return AccountAlias(resource_name, opts=opts, __props__=__props__)",
"def account_alias(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"account_alias\")",
"def example_alias2():\n\n # Define app_id and secret\n my_app_id = 'my_app_id'\n my_secret = 'my_secret'\n # Create a Pushbots instance\n pushbots = Pushbots(app_id=my_app_id, secret=my_secret)\n # Define data\n data = {'platform': Pushbots.PLATFORM_ANDROID, 'alias': 'my_new_alias',\n 'current_alias': 'my_current_alias'}\n code, message = pushbots.alias(data=data)\n print('Returned code: {0}'.format(code))\n print('Returned message: {0}'.format(message))",
"def account(self, account_id):\r\n return resources.Account(self, account_id)",
"def example_alias1():\n\n # Define app_id and secret\n my_app_id = 'my_app_id'\n my_secret = 'my_secret'\n # Create a Pushbots instance\n pushbots = Pushbots(app_id=my_app_id, secret=my_secret)\n # Define new and current alias and platform\n new_alias = 'my_new_alias'\n old_alias = 'my_current_alias'\n platform = Pushbots.PLATFORM_ANDROID\n code, message = pushbots.alias(platform=platform, alias=new_alias,\n current_alias=old_alias)\n print('Returned code: {0}'.format(code))\n print('Returned message: {0}'.format(message))",
"def get_account_alias(self):\r\n return self.get_response('ListAccountAliases', {},\r\n list_marker='AccountAliases')",
"def catalog_alias_create(self, args):\n try:\n if args.id:\n alias = self.server.connect_ermrest_alias(args.id)\n try:\n if alias.retrieve():\n print(\"Catalog alias already exists\")\n return\n except requests.HTTPError as e:\n if e.response.status_code == 404:\n pass\n else:\n raise\n owner = args.owner if args.owner else None\n alias = self.server.create_ermrest_alias(args.id, owner, args.alias_target)\n if not args.quiet:\n print(\"Created new catalog alias %s with the following configuration:\\n\" % alias.alias_id)\n pp(alias.retrieve())\n except HTTPError as e:\n if e.response.status_code == requests.codes.not_found:\n raise ResourceException('Catalog alias not found', e)\n elif e.response.status_code == requests.codes.conflict:\n raise ResourceException(\"Catalog alias already exists\", e)\n else:\n raise",
"def create(self, **kwargs):\n resource = self.resource.create(kwargs)\n if 'admin_token' in kwargs:\n resource.context.authorize('Gem-Application',\n api_token=resource.api_token,\n admin_token=kwargs['admin_token'])\n app = self.wrap(resource)\n return self.add(app)",
"def __init__(__self__, *,\n account_alias: pulumi.Input[str]):\n pulumi.set(__self__, \"account_alias\", account_alias)",
"def alias(ctx, search, backend):\n projects = ctx.obj['projects_db'].search(search, active_only=True)\n projects = sorted(projects, key=lambda project: project.name)\n\n if len(projects) == 0:\n ctx.obj['view'].msg(\n \"No active project matches your search string '%s'.\" %\n ''.join(search)\n )\n return\n\n ctx.obj['view'].projects_list(projects, True)\n\n try:\n number = ctx.obj['view'].select_project(projects)\n except CancelException:\n return\n\n project = projects[number]\n ctx.obj['view'].project_with_activities(project, numbered_activities=True)\n\n try:\n number = ctx.obj['view'].select_activity(project.activities)\n except CancelException:\n return\n\n retry = True\n while retry:\n try:\n alias = ctx.obj['view'].select_alias()\n except CancelException:\n return\n\n if alias in aliases_database:\n mapping = aliases_database[alias]\n overwrite = ctx.obj['view'].overwrite_alias(alias, mapping)\n\n if not overwrite:\n return\n elif overwrite:\n retry = False\n # User chose \"retry\"\n else:\n retry = True\n else:\n retry = False\n\n activity = project.activities[number]\n mapping = Mapping(mapping=(project.id, activity.id),\n backend=project.backend)\n ctx.obj['settings'].add_alias(alias, mapping)\n ctx.obj['settings'].write_config()\n\n ctx.obj['view'].alias_added(alias, (project.id, activity.id))",
"def alias_catalog(self, *args, **kwargs):\n # Implemented from kitosid template for -\n # osid.resource.BinAdminSession.alias_bin\n self._get_provider_session('catalog_admin_session').alias_catalog(*args, **kwargs)",
"def amazon_accounts():\n import json\n from security_monkey.datastore import Account, AccountType\n from os.path import dirname, join\n\n data_file = join(dirname(dirname(__file__)), \"data\", \"aws_accounts.json\")\n data = json.load(open(data_file, 'r'))\n\n app.logger.info('Adding / updating Amazon owned accounts')\n try:\n account_type_result = AccountType.query.filter(AccountType.name == 'AWS').first()\n if not account_type_result:\n account_type_result = AccountType(name='AWS')\n db.session.add(account_type_result)\n db.session.commit()\n db.session.refresh(account_type_result)\n\n for group, info in data.items():\n for aws_account in info['accounts']:\n acct_name = \"{group} ({region})\".format(group=group, region=aws_account['region'])\n account = Account.query.filter(Account.identifier == aws_account['account_id']).first()\n if not account:\n app.logger.debug(' Adding account {0}'.format(acct_name))\n account = Account()\n else:\n app.logger.debug(' Updating account {0}'.format(acct_name))\n\n account.identifier = aws_account['account_id']\n account.account_type_id = account_type_result.id\n account.active = False\n account.third_party = True\n account.name = acct_name\n account.notes = info['url']\n\n db.session.add(account)\n\n db.session.commit()\n app.logger.info('Finished adding Amazon owned accounts')\n except Exception as e:\n app.logger.exception(\"An error occured while adding accounts\")\n store_exception(\"manager-amazon-accounts\", None, e)",
"def run_app(\n region_name, # region to deploy the app into\n app_name, # identifier for the app\n image_name, # AMI to start the app from\n # App container settings\n container_location=None, # Docker repository:tag to find the image\n env=None, # Runtime environment variables for the app\n container_access_id=None, # credentials for private repository\n container_access_key=None,# credentials for private repository\n # DjaoApp gate settings\n djaoapp_version=None, # version of the djaoapp gate\n settings_location=None, # where to find Runtime djaoapp settings\n settings_crypt_key=None, # key to decrypt djaoapp settings\n s3_uploads_bucket=None, # where uploaded media are stored\n # connection and monitoring settings.\n identities_url=None, # files to copy on the image\n s3_logs_bucket=None, # where to upload log files\n ssh_key_name=None, # AWS SSH key to connect\n queue_url=None, # To send remote commands to the instance\n tls_priv_key=None, # install TLS cert\n tls_fullchain_cert=None, # install TLS cert\n # Cloud infrastructure settings\n instance_type=None, # EC2 instance the app runs on\n storage_enckey=None, # Key to encrypt the EBS volume\n app_subnet_id=None, # Subnet the app runs in\n vpc_id=None, # VPC the app runs in\n vpc_cidr=None, # VPC the app runs in (as IP range)\n hosted_zone_id=None, # To set DNS\n app_prefix=None, # account_id for billing purposes\n tag_prefix=None,\n dry_run=False):\n if not app_prefix:\n app_prefix = app_name\n\n ecr_access_role_arn = None\n if container_location and is_aws_ecr(container_location):\n ecr_access_role_arn = container_access_id\n\n create_app_resources(\n region_name, app_name, image_name,\n instance_type=instance_type,\n storage_enckey=storage_enckey,\n s3_logs_bucket=s3_logs_bucket,\n identities_url=identities_url,\n ssh_key_name=ssh_key_name,\n ecr_access_role_arn=ecr_access_role_arn,\n settings_location=settings_location,\n settings_crypt_key=settings_crypt_key,\n s3_uploads_bucket=s3_uploads_bucket,\n queue_url=queue_url,\n app_subnet_id=app_subnet_id,\n vpc_id=vpc_id,\n vpc_cidr=vpc_cidr,\n hosted_zone_id=hosted_zone_id,\n app_prefix=app_prefix,\n tag_prefix=tag_prefix,\n dry_run=dry_run)\n if tls_fullchain_cert and tls_priv_key:\n create_domain_forward(region_name, djaoapp_version,\n tls_priv_key=tls_priv_key,\n tls_fullchain_cert=tls_fullchain_cert,\n tag_prefix=tag_prefix,\n dry_run=dry_run)\n\n # Environment variables is an array of name/value.\n if container_location:\n deploy_app_container(app_name, container_location,\n env=env,\n container_access_id=container_access_id,\n container_access_key=container_access_key,\n queue_url=queue_url,\n region_name=region_name,\n dry_run=dry_run)",
"def create_app_resources(region_name, app_name, image_name,\n storage_enckey=None,\n s3_logs_bucket=None,\n identities_url=None,\n ssh_key_name=None,\n company_domain=None,\n ldap_host=None,\n ecr_access_role_arn=None,\n settings_location=None,\n settings_crypt_key=None,\n s3_uploads_bucket=None,\n instance_type=None,\n queue_url=None,\n app_subnet_id=None,\n vpc_id=None,\n vpc_cidr=None,\n hosted_zone_id=None,\n app_prefix=None,\n tag_prefix=None,\n dry_run=False):\n if not instance_type:\n instance_type = 't3a.small'\n if not app_prefix:\n app_prefix = app_name\n subnet_id = app_subnet_id\n ec2_client = boto3.client('ec2', region_name=region_name)\n #pylint:disable=unbalanced-tuple-unpacking\n gate_name, kitchen_door_name = _get_security_group_names([\n 'castle-gate', 'kitchen-door'], tag_prefix=tag_prefix)\n app_sg_name = _get_security_group_names([\n 'courtyard'], tag_prefix=app_prefix)[0]\n\n # Create a Queue to communicate with the agent on the EC2 instance.\n queue_name = app_name\n if queue_url:\n queue_name = queue_url.split('/')[-1]\n # Implementation Note:\n # strange but no exception thrown when queue already exists.\n sqs_client = boto3.client('sqs', region_name=region_name)\n if not dry_run:\n resp = sqs_client.create_queue(QueueName=queue_name)\n if queue_url and queue_url != resp.get(\"QueueUrl\"):\n LOGGER.warning(\n \"Expected queue_url %s but found or created queue %s\",\n queue_url, resp.get(\"QueueUrl\"))\n queue_url = resp.get(\"QueueUrl\")\n LOGGER.info(\"found or created queue. queue_url set to %s\", queue_url)\n else:\n if not queue_url:\n queue_url = \\\n 'https://dry-run-sqs.%(region_name)s.amazonaws.com/%(app_name)s' % {\n 'region_name': region_name,\n 'app_name': app_name\n }\n LOGGER.warning(\n \"(dryrun) queue not created. queue_url set to %s\", queue_url)\n\n if not vpc_id:\n vpc_id, _ = _get_vpc_id(tag_prefix, ec2_client=ec2_client,\n region_name=region_name)\n if not subnet_id:\n #pylint:disable=unused-variable\n _, _, app_subnet_cidrs = _split_cidrs(\n vpc_cidr, ec2_client=ec2_client, region_name=region_name)\n app_subnet_by_cidrs = _get_subnet_by_cidrs(\n app_subnet_cidrs, tag_prefix,\n vpc_id=vpc_id, ec2_client=ec2_client)\n # Use first valid subnet that does not require a public IP.\n subnet_id = next(iter(app_subnet_by_cidrs.values()))['SubnetId']\n\n group_ids = _get_security_group_ids(\n [app_sg_name], app_prefix,\n vpc_id=vpc_id, ec2_client=ec2_client)\n app_sg_id = group_ids[0]\n group_ids = _get_security_group_ids(\n [gate_name, kitchen_door_name], tag_prefix,\n vpc_id=vpc_id, ec2_client=ec2_client)\n gate_sg_id = group_ids[0]\n kitchen_door_sg_id = group_ids[1]\n if not app_sg_id:\n if app_prefix and app_prefix.endswith('-'):\n descr = '%s %s' % (app_prefix[:-1], app_name)\n elif app_prefix:\n descr = ('%s %s' % (app_prefix, app_name)).strip()\n else:\n descr = app_name\n resp = ec2_client.create_security_group(\n Description=descr,\n GroupName=app_sg_name,\n VpcId=vpc_id,\n DryRun=dry_run)\n app_sg_id = resp['GroupId']\n LOGGER.info(\"%s created %s security group %s\",\n tag_prefix, app_sg_name, app_sg_id)\n # app_sg_id allow rules\n try:\n resp = ec2_client.authorize_security_group_ingress(\n DryRun=dry_run,\n GroupId=app_sg_id,\n IpPermissions=[{\n 'IpProtocol': 'tcp',\n 'FromPort': 80,\n 'ToPort': 80,\n 'UserIdGroupPairs': [{'GroupId': gate_sg_id}]\n }])\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'InvalidPermission.Duplicate':\n raise\n try:\n resp = ec2_client.authorize_security_group_ingress(\n DryRun=dry_run,\n GroupId=app_sg_id,\n IpPermissions=[{\n 'IpProtocol': 'tcp',\n 'FromPort': 443,\n 'ToPort': 443,\n 'UserIdGroupPairs': [{'GroupId': gate_sg_id}]\n }])\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'InvalidPermission.Duplicate':\n raise\n if ssh_key_name:\n try:\n resp = ec2_client.authorize_security_group_ingress(\n DryRun=dry_run,\n GroupId=app_sg_id,\n IpPermissions=[{\n 'IpProtocol': 'tcp',\n 'FromPort': 22,\n 'ToPort': 22,\n 'UserIdGroupPairs': [{'GroupId': kitchen_door_sg_id}]\n }])\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'InvalidPermission.Duplicate':\n raise\n\n app_role = app_name\n iam_client = boto3.client('iam')\n try:\n if not dry_run:\n resp = iam_client.create_role(\n RoleName=app_role,\n AssumeRolePolicyDocument=json.dumps({\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Sid\": \"\",\n \"Effect\": \"Allow\",\n \"Principal\": {\n \"Service\": \"ec2.amazonaws.com\"\n },\n \"Action\": \"sts:AssumeRole\"\n }\n ]\n }))\n iam_client.put_role_policy(\n RoleName=app_role,\n PolicyName='AgentCtrlMessages',\n PolicyDocument=json.dumps({\n \"Version\": \"2012-10-17\",\n \"Statement\": [{\n \"Action\": [\n \"sqs:ReceiveMessage\",\n \"sqs:DeleteMessage\"\n ],\n \"Effect\": \"Allow\",\n \"Resource\": \"*\"\n }]}))\n if ecr_access_role_arn:\n iam_client.put_role_policy(\n RoleName=app_role,\n PolicyName='DeployContainer',\n PolicyDocument=json.dumps({\n \"Version\": \"2012-10-17\",\n \"Statement\": [{\n \"Effect\": \"Allow\",\n \"Action\": [\n \"sts:AssumeRole\"\n ],\n \"Resource\": [\n ecr_access_role_arn\n ]\n }, {\n \"Effect\": \"Allow\",\n \"Action\": [\n \"ecr:GetAuthorizationToken\",\n \"ecr:BatchCheckLayerAvailability\",\n \"ecr:GetDownloadUrlForLayer\",\n \"ecr:BatchGetImage\"\n ],\n \"Resource\": \"*\"\n }]}))\n if s3_logs_bucket:\n iam_client.put_role_policy(\n RoleName=app_role,\n PolicyName='WriteslogsToStorage',\n PolicyDocument=json.dumps({\n \"Version\": \"2012-10-17\",\n \"Statement\": [{\n \"Action\": [\n \"s3:PutObject\"\n ],\n \"Effect\": \"Allow\",\n \"Resource\": [\n \"arn:aws:s3:::%s/%s/var/log/*\" % (\n s3_logs_bucket, app_name)\n ]\n }]}))\n if s3_uploads_bucket:\n iam_client.put_role_policy(\n RoleName=app_role,\n PolicyName='AccessesUploadedDocuments',\n PolicyDocument=json.dumps({\n \"Version\": \"2012-10-17\",\n \"Statement\": [{\n \"Action\": [\n \"s3:GetObject\",\n \"s3:PutObject\",\n # XXX Without `s3:GetObjectAcl` and\n # `s3:ListBucket`, cloud-init cannot run\n # a recursive copy\n # (i.e. `aws s3 cp s3://... / --recursive`)\n \"s3:GetObjectAcl\",\n \"s3:ListBucket\"\n ],\n \"Effect\": \"Allow\",\n \"Resource\": [\n \"arn:aws:s3:::%s\" % s3_uploads_bucket,\n \"arn:aws:s3:::%s/*\" % s3_uploads_bucket\n ]\n }, {\n \"Action\": [\n \"s3:PutObject\"\n ],\n \"Effect\": \"Disallow\",\n \"Resource\": [\n \"arn:aws:s3:::%s/identities/\" % s3_uploads_bucket\n ]\n }]}))\n LOGGER.info(\"%s%s created IAM role %s\",\n \"(dryrun) \" if dry_run else \"\", tag_prefix, app_role)\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'EntityAlreadyExists':\n raise\n LOGGER.info(\"%s found IAM role %s\", tag_prefix, app_role)\n\n instance_profile_arn = create_instance_profile(\n app_role, iam_client=iam_client, region_name=region_name,\n tag_prefix=tag_prefix, dry_run=dry_run)\n\n instances = create_instances(region_name, app_name, image_name,\n storage_enckey=storage_enckey,\n s3_logs_bucket=s3_logs_bucket,\n identities_url=identities_url,\n ssh_key_name=ssh_key_name,\n company_domain=company_domain,\n ldap_host=ldap_host,\n instance_type=instance_type,\n instance_profile_arn=instance_profile_arn,\n security_group_ids=[app_sg_id],\n subnet_id=subnet_id,\n tag_prefix=tag_prefix,\n dry_run=dry_run,\n template_name=\"app-cloud-init-script.j2\",\n ec2_client=ec2_client,\n settings_location=settings_location if settings_location else \"\",\n settings_crypt_key=settings_crypt_key if settings_crypt_key else \"\",\n queue_url=queue_url)\n\n # Associates an internal domain name to the instance\n update_dns_record = True\n if update_dns_record:\n hosted_zone = None\n default_hosted_zone = None\n hosted_zone_name = '%s.internal.' % region_name\n route53 = boto3.client('route53')\n if hosted_zone_id:\n hosted_zone = route53.get_hosted_zone(\n Id=hosted_zone_id)['HostedZone']\n else:\n hosted_zones_resp = route53.list_hosted_zones()\n hosted_zones = hosted_zones_resp.get('HostedZones')\n for hzone in hosted_zones:\n if hzone.get('Name').startswith(region_name):\n hosted_zone = hzone\n hosted_zone_id = hzone.get('Id')\n break\n if hzone.get('Name') == hosted_zone_name:\n default_hosted_zone = hzone\n if hosted_zone:\n hosted_zone_name = hosted_zone['Name']\n LOGGER.info(\"found hosted zone %s\", hosted_zone_name)\n else:\n hosted_zone_id = default_hosted_zone.get('Id')\n LOGGER.info(\n \"cannot find hosted zone for region %s, defaults to %s\",\n region_name, hosted_zone_name)\n\n host_name = \"%(app_name)s.%(hosted_zone_name)s\" % {\n 'app_name': app_name, 'hosted_zone_name': hosted_zone_name}\n private_ip_addrs = [{'Value': instance['PrivateIpAddress']}\n for instance in instances]\n LOGGER.info(\"%supdate DNS record for %s to %s ...\",\n \"(dry_run) \" if dry_run else \"\",\n host_name, [ip_addr['Value'] for ip_addr in private_ip_addrs])\n LOGGER.debug(\"route53.change_resource_record_sets(\"\\\n \"HostedZoneId=%(hosted_zone_id)s, ChangeBatch={'Changes':\"\\\n \" [{'Action': 'UPSERT', 'ResourceRecordSet': {\"\\\n \"'Name': %(host_name)s, 'Type': 'A', 'TTL': 60,\"\\\n \" 'ResourceRecords': %(private_ip_addrs)s}}]})\",\n hosted_zone_id=hosted_zone_id, host_name=host_name,\n private_ip_addrs=private_ip_addrs)\n if not dry_run:\n route53.change_resource_record_sets(\n HostedZoneId=hosted_zone_id,\n ChangeBatch={'Changes': [{\n 'Action': 'UPSERT',\n 'ResourceRecordSet': {\n 'Name': host_name,\n 'Type': 'A',\n # 'Region': DEFAULT_REGION\n 'TTL': 60,\n 'ResourceRecords': private_ip_addrs\n }}]})\n\n return [instance['InstanceId'] for instance in instances]",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'Application':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = ApplicationArgs.__new__(ApplicationArgs)\n\n __props__.__dict__[\"description\"] = None\n __props__.__dict__[\"display_name\"] = None\n __props__.__dict__[\"name\"] = None\n __props__.__dict__[\"source_resource_type\"] = None\n __props__.__dict__[\"type\"] = None\n return Application(resource_name, opts=opts, __props__=__props__)",
"def delete_app(short_name):\r\n delete_memoized(get_app, short_name)",
"def update_alias(self, semantic_version, target_alias):\n alias_name = self.semantic_version_to_alias_name(semantic_version)\n target_version = self._get_version_of_alias(self.semantic_version_to_alias_name(target_alias))\n cprint(f'Updating {self.name}:{alias_name} to {target_version}', colour=Fore.CYAN)\n try:\n self._lambda_client.update_alias(\n FunctionName=self._name,\n Name=alias_name,\n FunctionVersion=target_version\n )\n except ClientError as e:\n if 'ResourceNotFound' in str(e):\n cprint(f'Lambda alias {self.name}:{alias_name} does not exist', colour=Fore.YELLOW)\n self._lambda_client.create_alias(\n FunctionName=self._name,\n Name=alias_name,\n FunctionVersion=target_version\n )\n else:\n raise",
"def create_app(StackId=None, Shortname=None, Name=None, Description=None, DataSources=None, Type=None, AppSource=None, Domains=None, EnableSsl=None, SslConfiguration=None, Attributes=None, Environment=None):\n pass",
"def account_id():\n return client.get_caller_identity()['Account']",
"def __init__(__self__, resource_name, opts=None, cloudwatch_logging_options=None, code=None, description=None, inputs=None, name=None, outputs=None, reference_data_sources=None, tags=None, __props__=None, __name__=None, __opts__=None):\n if __name__ is not None:\n warnings.warn(\"explicit use of __name__ is deprecated\", DeprecationWarning)\n resource_name = __name__\n if __opts__ is not None:\n warnings.warn(\"explicit use of __opts__ is deprecated, use 'opts' instead\", DeprecationWarning)\n opts = __opts__\n if opts is None:\n opts = pulumi.ResourceOptions()\n if not isinstance(opts, pulumi.ResourceOptions):\n raise TypeError('Expected resource options to be a ResourceOptions instance')\n if opts.version is None:\n opts.version = utilities.get_version()\n if opts.id is None:\n if __props__ is not None:\n raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')\n __props__ = dict()\n\n __props__['cloudwatch_logging_options'] = cloudwatch_logging_options\n __props__['code'] = code\n __props__['description'] = description\n __props__['inputs'] = inputs\n __props__['name'] = name\n __props__['outputs'] = outputs\n __props__['reference_data_sources'] = reference_data_sources\n __props__['tags'] = tags\n __props__['arn'] = None\n __props__['create_timestamp'] = None\n __props__['last_update_timestamp'] = None\n __props__['status'] = None\n __props__['version'] = None\n super(AnalyticsApplication, __self__).__init__(\n 'aws:kinesis/analyticsApplication:AnalyticsApplication',\n resource_name,\n __props__,\n opts)",
"def validate_account_alias(iam_client, account_alias):\n # Super overkill here using pagination when an account can only\n # have a single alias, but at least this implementation should be\n # future-proof\n current_account_aliases = []\n paginator = iam_client.get_paginator('list_account_aliases')\n response_iterator = paginator.paginate()\n for page in response_iterator:\n current_account_aliases.extend(page.get('AccountAliases', []))\n if account_alias in current_account_aliases:\n LOGGER.info('Verified current AWS account alias matches required '\n 'alias %s.',\n account_alias)\n else:\n LOGGER.error('Current AWS account aliases \"%s\" do not match '\n 'required account alias %s in Runway config.',\n ','.join(current_account_aliases),\n account_alias)\n sys.exit(1)",
"def get_name(app):\n from uuid import uuid4 as uuid\n return (f'accelpy_{app[\"application\"][\"product_id\"]}'\n f'_{str(uuid()).replace(\"-\", \"\")[:8]}')",
"def delete_account_alias(self, alias):\r\n params = {'AccountAlias': alias}\r\n return self.get_response('DeleteAccountAlias', params)",
"def update_app(AppId=None, Name=None, Description=None, DataSources=None, Type=None, AppSource=None, Domains=None, EnableSsl=None, SslConfiguration=None, Attributes=None, Environment=None):\n pass",
"def create_application(name=None, description=None):\n pass",
"def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n api: Optional[pulumi.Input[pulumi.InputType['ApplicationApiArgs']]] = None,\n app_roles: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ApplicationAppRoleArgs']]]]] = None,\n description: Optional[pulumi.Input[str]] = None,\n device_only_auth_enabled: Optional[pulumi.Input[bool]] = None,\n display_name: Optional[pulumi.Input[str]] = None,\n fallback_public_client_enabled: Optional[pulumi.Input[bool]] = None,\n feature_tags: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ApplicationFeatureTagArgs']]]]] = None,\n group_membership_claims: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n identifier_uris: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n logo_image: Optional[pulumi.Input[str]] = None,\n marketing_url: Optional[pulumi.Input[str]] = None,\n notes: Optional[pulumi.Input[str]] = None,\n oauth2_post_response_required: Optional[pulumi.Input[bool]] = None,\n optional_claims: Optional[pulumi.Input[pulumi.InputType['ApplicationOptionalClaimsArgs']]] = None,\n owners: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n prevent_duplicate_names: Optional[pulumi.Input[bool]] = None,\n privacy_statement_url: Optional[pulumi.Input[str]] = None,\n public_client: Optional[pulumi.Input[pulumi.InputType['ApplicationPublicClientArgs']]] = None,\n required_resource_accesses: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ApplicationRequiredResourceAccessArgs']]]]] = None,\n service_management_reference: Optional[pulumi.Input[str]] = None,\n sign_in_audience: Optional[pulumi.Input[str]] = None,\n single_page_application: Optional[pulumi.Input[pulumi.InputType['ApplicationSinglePageApplicationArgs']]] = None,\n support_url: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n template_id: Optional[pulumi.Input[str]] = None,\n terms_of_service_url: Optional[pulumi.Input[str]] = None,\n web: Optional[pulumi.Input[pulumi.InputType['ApplicationWebArgs']]] = None,\n __props__=None):\n ...",
"def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n account_alias: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ..."
] | [
"0.5721863",
"0.539991",
"0.52493685",
"0.5185072",
"0.50867456",
"0.50587976",
"0.50040096",
"0.49955353",
"0.49649015",
"0.4962699",
"0.4920891",
"0.49197406",
"0.48996067",
"0.48474464",
"0.48399547",
"0.48251143",
"0.4793121",
"0.4738738",
"0.4676235",
"0.4674404",
"0.46740916",
"0.46709988",
"0.46539143",
"0.46457246",
"0.46251756",
"0.461774",
"0.4599477",
"0.45885172",
"0.45817024",
"0.45544958"
] | 0.5441419 | 1 |
Get an existing AccountAlias resource's state with the given name, id, and optional extra properties used to qualify the lookup. | def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'AccountAlias':
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = AccountAliasArgs.__new__(AccountAliasArgs)
__props__.__dict__["account_alias"] = None
__props__.__dict__["account_alias_resource_id"] = None
return AccountAlias(resource_name, opts=opts, __props__=__props__) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_account(self, account_id, **kwargs):\r\n\r\n if 'mask' not in kwargs:\r\n kwargs['mask'] = 'status'\r\n\r\n return self.account.getObject(id=account_id, **kwargs)",
"def get_account_alias(self):\r\n return self.get_response('ListAccountAliases', {},\r\n list_marker='AccountAliases')",
"def get_alias(function_name: Optional[str] = None,\n name: Optional[str] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetAliasResult:\n __args__ = dict()\n __args__['functionName'] = function_name\n __args__['name'] = name\n opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)\n __ret__ = pulumi.runtime.invoke('aws:lambda/getAlias:getAlias', __args__, opts=opts, typ=GetAliasResult).value\n\n return AwaitableGetAliasResult(\n arn=pulumi.get(__ret__, 'arn'),\n description=pulumi.get(__ret__, 'description'),\n function_name=pulumi.get(__ret__, 'function_name'),\n function_version=pulumi.get(__ret__, 'function_version'),\n id=pulumi.get(__ret__, 'id'),\n invoke_arn=pulumi.get(__ret__, 'invoke_arn'),\n name=pulumi.get(__ret__, 'name'))",
"def catalog_alias_get(self, args):\n try:\n alias = self.server.connect_ermrest_alias(args.id)\n response = alias.retrieve()\n if not args.quiet:\n pp(response)\n except HTTPError as e:\n if e.response.status_code == requests.codes.not_found:\n raise ResourceException('Catalog alias not found', e)\n else:\n raise e",
"def get_account(self, account_id=None, account_name=None, search=False):\n if not (account_id or account_name):\n aliases = self.get_account_aliases()\n if aliases:\n account_name = aliases[0]\n else:\n raise ValueError('get_account(). Account id, name, or alias not found')\n accounts = self.get_all_accounts(account_id=account_id, account_name=account_name,\n search=search)\n if accounts:\n if len(accounts) > 1:\n raise ValueError('get_account matched more than a single account with the '\n 'provided criteria: account_id=\"{0}\", account_name=\"{1}\". '\n 'Matched:{2}'\n .format(account_id, account_name,\n \", \".join(str(x) for x in accounts)))\n else:\n return accounts[0]\n return None",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n arn: Optional[pulumi.Input[str]] = None,\n minimum_engine_version: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n name_prefix: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n user_names: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None) -> 'Acl':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _AclState.__new__(_AclState)\n\n __props__.__dict__[\"arn\"] = arn\n __props__.__dict__[\"minimum_engine_version\"] = minimum_engine_version\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"name_prefix\"] = name_prefix\n __props__.__dict__[\"tags\"] = tags\n __props__.__dict__[\"tags_all\"] = tags_all\n __props__.__dict__[\"user_names\"] = user_names\n return Acl(resource_name, opts=opts, __props__=__props__)",
"def get_account(self, name):\n return self._accounts[name]",
"def account_alias_resource_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"account_alias_resource_id\")",
"def alias_lookup(alias):\n try:\n s = (session.query(Series)\n .filter_by(alias=alias, following=True)\n .one())\n except NoResultFound:\n output.error('Could not find alias \"{}\"'.format(alias))\n exit(1)\n else:\n return s",
"def __call__(self, alias):\n return self.get_by_alias(alias)",
"def get_adaccount(self, account_id, fields=None, batch=False):\n path = 'act_%s' % account_id\n args = {'fields': fields} if fields else {}\n return self.make_request(path, 'GET', args, batch=batch)",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n asset_statuses: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ZoneAssetStatusArgs']]]]] = None,\n create_time: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n discovery_spec: Optional[pulumi.Input[pulumi.InputType['ZoneDiscoverySpecArgs']]] = None,\n display_name: Optional[pulumi.Input[str]] = None,\n labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n lake: Optional[pulumi.Input[str]] = None,\n location: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n project: Optional[pulumi.Input[str]] = None,\n resource_spec: Optional[pulumi.Input[pulumi.InputType['ZoneResourceSpecArgs']]] = None,\n state: Optional[pulumi.Input[str]] = None,\n type: Optional[pulumi.Input[str]] = None,\n uid: Optional[pulumi.Input[str]] = None,\n update_time: Optional[pulumi.Input[str]] = None) -> 'Zone':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _ZoneState.__new__(_ZoneState)\n\n __props__.__dict__[\"asset_statuses\"] = asset_statuses\n __props__.__dict__[\"create_time\"] = create_time\n __props__.__dict__[\"description\"] = description\n __props__.__dict__[\"discovery_spec\"] = discovery_spec\n __props__.__dict__[\"display_name\"] = display_name\n __props__.__dict__[\"labels\"] = labels\n __props__.__dict__[\"lake\"] = lake\n __props__.__dict__[\"location\"] = location\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"project\"] = project\n __props__.__dict__[\"resource_spec\"] = resource_spec\n __props__.__dict__[\"state\"] = state\n __props__.__dict__[\"type\"] = type\n __props__.__dict__[\"uid\"] = uid\n __props__.__dict__[\"update_time\"] = update_time\n return Zone(resource_name, opts=opts, __props__=__props__)",
"def account(self, account_id: str):\n return get_from_list(self.accounts, \"id\", account_id)",
"def _find_account(account_id: str) -> AdAccount:\n try:\n return AdAccount(f\"act_{account_id}\").api_get()\n except FacebookRequestError as exc:\n message = (\n f\"Error: {exc.api_error_code()}, {exc.api_error_message()}. \"\n f\"Please also verify your Account ID: \"\n f\"See the https://www.facebook.com/business/help/1492627900875762 for more information.\"\n )\n raise AirbyteTracedException(\n message=message,\n failure_type=FailureType.config_error,\n ) from exc",
"def account_alias(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"account_alias\")",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n login: Optional[pulumi.Input[str]] = None,\n object_id: Optional[pulumi.Input[str]] = None,\n synapse_workspace_id: Optional[pulumi.Input[str]] = None,\n tenant_id: Optional[pulumi.Input[str]] = None) -> 'WorkspaceAadAdmin':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _WorkspaceAadAdminState.__new__(_WorkspaceAadAdminState)\n\n __props__.__dict__[\"login\"] = login\n __props__.__dict__[\"object_id\"] = object_id\n __props__.__dict__[\"synapse_workspace_id\"] = synapse_workspace_id\n __props__.__dict__[\"tenant_id\"] = tenant_id\n return WorkspaceAadAdmin(resource_name, opts=opts, __props__=__props__)",
"def __init__(__self__,\n resource_name: str,\n args: AccountAliasArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...",
"def _get_alias(cfg, name):\n aliases = cfg.get('aliases', {})\n if name in aliases:\n return aliases[name]\n if cfg['group_attribute_as_default_alias']:\n return name.split(SEP)[-1]\n return name",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n activation_key: Optional[pulumi.Input[str]] = None,\n arn: Optional[pulumi.Input[str]] = None,\n ip_address: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n private_link_endpoint: Optional[pulumi.Input[str]] = None,\n security_group_arns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n subnet_arns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n vpc_endpoint_id: Optional[pulumi.Input[str]] = None) -> 'Agent':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _AgentState.__new__(_AgentState)\n\n __props__.__dict__[\"activation_key\"] = activation_key\n __props__.__dict__[\"arn\"] = arn\n __props__.__dict__[\"ip_address\"] = ip_address\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"private_link_endpoint\"] = private_link_endpoint\n __props__.__dict__[\"security_group_arns\"] = security_group_arns\n __props__.__dict__[\"subnet_arns\"] = subnet_arns\n __props__.__dict__[\"tags\"] = tags\n __props__.__dict__[\"tags_all\"] = tags_all\n __props__.__dict__[\"vpc_endpoint_id\"] = vpc_endpoint_id\n return Agent(resource_name, opts=opts, __props__=__props__)",
"def a_state(id):\n state = storage.get(State, id)\n if state is not None:\n return jsonify(state.to_dict())\n abort(404)",
"def get_account_by_name(self, account_name):\n accounts = self.service_old.management().accounts().list().execute()\n\n account = None\n if accounts.get('items'):\n account = next(acnt for acnt in accounts.get('items') if acnt[\"name\"] == account_name)\n\n if account is None:\n log_msg = \"The account named \" + account_name + \" does not exist!\"\n print(log_msg)\n\n return account",
"def account(self, account_id):\r\n return resources.Account(self, account_id)",
"def get_integrations_speech_lex_bot_alias(self, alias_id, **kwargs):\n\n all_params = ['alias_id']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method get_integrations_speech_lex_bot_alias\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'alias_id' is set\n if ('alias_id' not in params) or (params['alias_id'] is None):\n raise ValueError(\"Missing the required parameter `alias_id` when calling `get_integrations_speech_lex_bot_alias`\")\n\n\n resource_path = '/api/v2/integrations/speech/lex/bot/alias/{aliasId}'.replace('{format}', 'json')\n path_params = {}\n if 'alias_id' in params:\n path_params['aliasId'] = params['alias_id']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json'])\n\n # Authentication setting\n auth_settings = ['PureCloud OAuth']\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='LexBotAlias',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def create_account_alias(self, alias):\r\n params = {'AccountAlias': alias}\r\n return self.get_response('CreateAccountAlias', params)",
"def get_by_alias(self, alias):\n if alias not in self._aliases:\n raise DataInvalidAlias('A dataset with alias {} does not exist'.format(alias))\n\n return self.get_by_index(self._aliases[alias])",
"def get_alias_output(function_name: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetAliasResult]:\n ...",
"def alias(ctx, search, backend):\n projects = ctx.obj['projects_db'].search(search, active_only=True)\n projects = sorted(projects, key=lambda project: project.name)\n\n if len(projects) == 0:\n ctx.obj['view'].msg(\n \"No active project matches your search string '%s'.\" %\n ''.join(search)\n )\n return\n\n ctx.obj['view'].projects_list(projects, True)\n\n try:\n number = ctx.obj['view'].select_project(projects)\n except CancelException:\n return\n\n project = projects[number]\n ctx.obj['view'].project_with_activities(project, numbered_activities=True)\n\n try:\n number = ctx.obj['view'].select_activity(project.activities)\n except CancelException:\n return\n\n retry = True\n while retry:\n try:\n alias = ctx.obj['view'].select_alias()\n except CancelException:\n return\n\n if alias in aliases_database:\n mapping = aliases_database[alias]\n overwrite = ctx.obj['view'].overwrite_alias(alias, mapping)\n\n if not overwrite:\n return\n elif overwrite:\n retry = False\n # User chose \"retry\"\n else:\n retry = True\n else:\n retry = False\n\n activity = project.activities[number]\n mapping = Mapping(mapping=(project.id, activity.id),\n backend=project.backend)\n ctx.obj['settings'].add_alias(alias, mapping)\n ctx.obj['settings'].write_config()\n\n ctx.obj['view'].alias_added(alias, (project.id, activity.id))",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n access_configuration_id: Optional[pulumi.Input[str]] = None,\n access_configuration_name: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n directory_id: Optional[pulumi.Input[str]] = None,\n force_remove_permission_policies: Optional[pulumi.Input[bool]] = None,\n permission_policies: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['AccessConfigurationPermissionPolicyArgs']]]]] = None,\n relay_state: Optional[pulumi.Input[str]] = None,\n session_duration: Optional[pulumi.Input[int]] = None) -> 'AccessConfiguration':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _AccessConfigurationState.__new__(_AccessConfigurationState)\n\n __props__.__dict__[\"access_configuration_id\"] = access_configuration_id\n __props__.__dict__[\"access_configuration_name\"] = access_configuration_name\n __props__.__dict__[\"description\"] = description\n __props__.__dict__[\"directory_id\"] = directory_id\n __props__.__dict__[\"force_remove_permission_policies\"] = force_remove_permission_policies\n __props__.__dict__[\"permission_policies\"] = permission_policies\n __props__.__dict__[\"relay_state\"] = relay_state\n __props__.__dict__[\"session_duration\"] = session_duration\n return AccessConfiguration(resource_name, opts=opts, __props__=__props__)",
"def get_account(self, accountid):\n payload = {'appkey': self._lr_object._get_api_key(), 'appsecret': self._lr_object._get_api_secret(),\n 'accountid': accountid}\n url = SECURE_API_URL + \"raas/v1/account\"\n return self._lr_object._get_json(url, payload)",
"def account_alias(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"account_alias\")"
] | [
"0.57623905",
"0.5468993",
"0.5423683",
"0.5413363",
"0.5407908",
"0.5336177",
"0.52379334",
"0.5185594",
"0.51565856",
"0.51535463",
"0.5095249",
"0.50847876",
"0.50281215",
"0.49911034",
"0.49316874",
"0.49263456",
"0.49024594",
"0.49007356",
"0.48846614",
"0.48834765",
"0.48685518",
"0.48618463",
"0.48577717",
"0.4816911",
"0.48117885",
"0.48037234",
"0.48026788",
"0.4778228",
"0.4768821",
"0.47604132"
] | 0.7493493 | 0 |
An account alias associated with a customer's account. | def account_alias(self) -> pulumi.Output[str]:
return pulumi.get(self, "account_alias") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def account_alias(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"account_alias\")",
"def get_account_alias(self):\r\n return self.get_response('ListAccountAliases', {},\r\n list_marker='AccountAliases')",
"def account_alias_resource_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"account_alias_resource_id\")",
"def create_account_alias(self, alias):\r\n params = {'AccountAlias': alias}\r\n return self.get_response('CreateAccountAlias', params)",
"def __init__(__self__, *,\n account_alias: pulumi.Input[str]):\n pulumi.set(__self__, \"account_alias\", account_alias)",
"def account(self) -> str:\n return self._account",
"def account(self) -> str:\n return self._account",
"def account(self, acct):\n aMgr = self.acctManager\n if len(aMgr.accounts) <= acct:\n raise Exception(\"requested unknown account number %i\" % acct)\n return aMgr.account(acct)",
"def getaccountaddress(self, account):\n return self.proxy.getaccountaddress(account)",
"def get_accountname_for_active_connection(self):\n aliases = self.get_account_aliases()\n if aliases:\n return aliases[0]\n return None",
"def account_name(self) -> str:\n return pulumi.get(self, \"account_name\")",
"def account_name(self) -> str:\n return pulumi.get(self, \"account_name\")",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'AccountAlias':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = AccountAliasArgs.__new__(AccountAliasArgs)\n\n __props__.__dict__[\"account_alias\"] = None\n __props__.__dict__[\"account_alias_resource_id\"] = None\n return AccountAlias(resource_name, opts=opts, __props__=__props__)",
"def getCustomerAccount(self):\n return self._CustomerAccount",
"def getCustomerAccount(self):\n return self._CustomerAccount",
"def get_account(self, account):\n \n pass",
"def account(self, account_code):\r\n return acc.Account(self, account_code)",
"def get_connections_accountname(self):\n account_info = self.get_account()\n return getattr(account_info, 'account_name', None)",
"def display_account(account):\n if 'accountName' not in account and 'emailAddress' not in account:\n account_template = '{accountId}'\n elif 'emailAddress' not in account:\n account_template = '{accountName} ({accountId})'\n elif 'accountName' not in account:\n account_template = '{emailAddress} ({accountId})'\n else:\n account_template = '{accountName}, {emailAddress} ({accountId})'\n return account_template.format(**account)",
"def alias(self):\n return self._alias",
"def alias(self):\n return self._alias",
"def account_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"account_name\")",
"def account_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"account_name\")",
"def account_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"account_name\")",
"def get_account_for_tenant(test_auth, tenant_id):\n return '%s%s' % (test_auth.reseller_prefixes[0], tenant_id)",
"def delete_account_alias(self, alias):\r\n params = {'AccountAlias': alias}\r\n return self.get_response('DeleteAccountAlias', params)",
"def alias(self):\n\n return self._alias",
"def account_id(self) -> str:\n return self._account_id",
"def get_account(self):\n return self._account",
"def get_account(self):\n return self._account"
] | [
"0.78406376",
"0.75032926",
"0.70096886",
"0.6901152",
"0.663217",
"0.6561262",
"0.6561262",
"0.6388388",
"0.6385573",
"0.6365307",
"0.6309277",
"0.6309277",
"0.62986344",
"0.6257074",
"0.6257074",
"0.62428296",
"0.6241097",
"0.620877",
"0.60629225",
"0.6019607",
"0.6019607",
"0.59752417",
"0.59752417",
"0.59752417",
"0.5973346",
"0.5942614",
"0.59423244",
"0.59374624",
"0.5909374",
"0.5909374"
] | 0.7883774 | 0 |
Unique identifier representing an alias tied to an account | def account_alias_resource_id(self) -> pulumi.Output[str]:
return pulumi.get(self, "account_alias_resource_id") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def account_alias(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"account_alias\")",
"def account_alias(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"account_alias\")",
"def account_id(self) -> str:\n return self._account_id",
"def account_id(self) -> str:\n return pulumi.get(self, \"account_id\")",
"def account_id(self) -> str:\n return pulumi.get(self, \"account_id\")",
"def account_id(self) -> str:\n return pulumi.get(self, \"account_id\")",
"def account_id(self) -> str:\n return pulumi.get(self, \"account_id\")",
"def account_id(self) -> str:\n return pulumi.get(self, \"account_id\")",
"def account_id(self) -> str:\n return pulumi.get(self, \"account_id\")",
"def account_id(self) -> str:\n return pulumi.get(self, \"account_id\")",
"def account_id(self) -> str:\n return pulumi.get(self, \"account_id\")",
"def get_alias(self):",
"def create_account_alias(self, alias):\r\n params = {'AccountAlias': alias}\r\n return self.get_response('CreateAccountAlias', params)",
"def get_account_alias(self):\r\n return self.get_response('ListAccountAliases', {},\r\n list_marker='AccountAliases')",
"def unique_id(self):\n return self.config_entry.entry_id + \"lsa\"",
"async def _get_account_id(db, name):\n assert name, 'no account name specified'\n _id = await db.query_one(\"SELECT id FROM hive_accounts WHERE name = :n\", n=name)\n assert _id, \"account not found: `%s`\" % name\n return _id",
"def __init__(__self__, *,\n account_alias: pulumi.Input[str]):\n pulumi.set(__self__, \"account_alias\", account_alias)",
"def account_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"account_id\")",
"def account_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"account_id\")",
"def account_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"account_id\")",
"def get_account_id():\n STORED_ID[\"account_id\"] = CONFIG_DATA['account_id']\n STORED_ID[\"member_id\"] = CONFIG_DATA['member_id']",
"def unique_id(self):\n id = \"{}{}{}\".format(\n DOMAIN, self._account, self.sensorName.lower().replace(\" \", \"\")\n )\n return id",
"def account_id(self):\n return self._account_id",
"def alias(self):\n return self._alias",
"def alias(self):\n return self._alias",
"def name(self):\n return self._alias",
"def _get_alias(full_or_partial_id):\n # Note that this works for identifiers of all types currently described in the spec, i.e.:\n # 1. did:factom:f0e4c2f76c58916ec258f246851bea091d14d4247a2fc3e18694461b1816e13b#management-2\n # 2. did:factom:mainnet:f0e4c2f76c58916ec258f246851bea091d14d4247a2fc3e18694461b1816e13b#management-2\n # 2. #inbox\n # 3. management-1\n # The function will return management-2, inbox and management-1, respectively\n return full_or_partial_id.split(\"#\")[-1]",
"def unique_id(self):\n return '{}-{}-{}'.format(self.airly.latitude, self.airly.longitude,\n self.type)",
"def unique_id() -> str:",
"def account_id(self):\n return self.config.account_id"
] | [
"0.7159028",
"0.7086017",
"0.66211665",
"0.6494042",
"0.6494042",
"0.6494042",
"0.6494042",
"0.6494042",
"0.6494042",
"0.6494042",
"0.6494042",
"0.63838947",
"0.63640165",
"0.63252497",
"0.63003075",
"0.62900585",
"0.62604374",
"0.6224986",
"0.6224986",
"0.6224986",
"0.6215175",
"0.620134",
"0.61976767",
"0.61927986",
"0.61927986",
"0.6191151",
"0.6155124",
"0.6154621",
"0.6152426",
"0.6146685"
] | 0.7182103 | 0 |
Empty entry point to the Lambda function invoked from the edge. | def lambda_handler(event, context):
return | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def lambda_handler(event, context):\n return dispatch(event)",
"def test_lambda_support_no_parameters_no_body(self):\n self.assert_contains_lambda_expression_in_m(\n parse.parse(setup_java_class(\"() -> {};\")))",
"def default_event_handler(event):\n pass",
"def lambda_handler(event, context):\n\n logger.debug('event.bot.name={}'.format(event['bot']['name']))\n\n return dispatch(event)",
"def dummy_fn(self):\n\t\tpass",
"def dummy_fn(self, *args, **kwargs):",
"def visit_Lambda(self, node: ast.Lambda) -> None:\n self._check_useless_lambda(node)\n self._check_implicit_primitive(node)\n self.generic_visit(node)",
"def lambda_handler(event: Any, context: Any) -> Any:\n \n operation = event['op']\n order_id = int(event['order_id'])\n\n action_class = getattr(sys.modules[__name__], 'Action')\n action_instance = action_class(order_id)\n action_instance_method = getattr(action_instance, \"handle_\" + operation)\n return action_instance_method(event)",
"def visit_Lambda(self, node: ast.Lambda) -> None:\n self._counter.check_arguments_count(node)\n self.generic_visit(node)",
"def dummy_callback_handler(self, ret):\n pass",
"def on_invoke(self, ins, const, obj, args):\n pass",
"def bind(self, _target: aws_cdk.aws_lambda.IFunction) -> None:\n ...",
"def invoke(self, event_args, *args, **kwargs):\n pass # pragma: no cover",
"def dummy_callback(obj):\n pass",
"def register_apply_edge_func(self, func, block_id=...): # -> None:\n ...",
"def callback(self, fun: Callable[[], None] | None) -> None:",
"def lambda_handler(event, context):\n try:\n aq = Aquifer()\n aq.run()\n\n return \"Completed\"\n\n except (Exception, KeyboardInterrupt) as e:\n return \"Error occurred\"",
"def lambda_handler(event):\r\n return 'Hello ' + event['queryParams']['name']",
"def n_lambda(self):\n return self.b()",
"def test_lambda_support_no_parameters_expression_body(self):\n test_classes = [\n setup_java_class(\"() -> 3;\"),\n setup_java_class(\"() -> null;\"),\n setup_java_class(\"() -> { return 21; };\"),\n setup_java_class(\"() -> { System.exit(1); };\"),\n ]\n for test_class in test_classes:\n clazz = parse.parse(test_class)\n self.assert_contains_lambda_expression_in_m(clazz)",
"def lambda_method(self,t): \n return 5*math.sin(2*math.pi*1*t) # I don't see the value of 1 here but this is how lamda is defined in the exercise.",
"def one():\n return lambda f: lambda x: f(x)",
"def testGetLambda(self):\n self.ports.get_lambda(file_name = 'get_lambda.xml', port_ids = portsDict['port_ids'], lambdas = portsDict['lambda'])",
"def _funcOrLambda(self, node, gen, ndecorators):\n gen.Start()\n gen.FindLocals()\n gen.Dispatch(node.code)\n gen.Finish()\n\n self.set_lineno(node)\n for default in node.defaults:\n self.visit(default)\n self._makeClosure(gen, len(node.defaults))\n for i in xrange(ndecorators):\n self.emit('CALL_FUNCTION', 1)",
"def __call__(fun_name):",
"def lambda_handler(event, context):\n name: str = event['name']\n return f'Hi {name}!'",
"def lambdafan(func):\n if 'AWS_LAMBDA_FUNCTION_NAME' not in os.environ:\n return func\n\n @functools.wraps(func)\n def scaleout(*args, **kw):\n client = boto3.client('lambda')\n client.invoke(\n FunctionName=os.environ['AWS_LAMBDA_FUNCTION_NAME'],\n InvocationType='Event',\n Payload=dumps({\n 'event': 'fanout',\n 'function': func.__name__,\n 'args': args,\n 'kwargs': kw}),\n Qualifier=os.environ['AWS_LAMBDA_FUNCTION_VERSION'])\n return scaleout",
"def test_lambda_wrapper_basic_events(reporter_mock, context):\n\n @lumigo_tracer(token=\"123\")\n def lambda_test_function(event, context):\n pass\n\n lambda_test_function({}, context)\n function_span = SpansContainer.get_span().function_span\n assert not SpansContainer.get_span().spans\n assert \"started\" in function_span\n assert \"ended\" in function_span\n assert reporter_mock.call_count == 2\n first_send = reporter_mock.call_args_list[0][1][\"msgs\"]\n assert len(first_send) == 1\n assert first_send[0][\"id\"].endswith(\"_started\")\n assert first_send[0][\"maxFinishTime\"]",
"def callback(self, function: Optional[Callable[[int], None]]) -> None:",
"def lambda_function(f):\n @functools.wraps(f)\n def wrapper(event, context):\n global _CURRENT_LAMBDA_CONTEXT\n _CURRENT_LAMBDA_CONTEXT = context\n try:\n result = f(event, context)\n return wait(lambda: result)\n except:\n cls, exc, trace = sys.exc_info()\n report_exc_info((cls, exc, trace.tb_next))\n wait()\n raise\n return wrapper"
] | [
"0.6477498",
"0.59714395",
"0.5928787",
"0.5917701",
"0.5867142",
"0.5849005",
"0.5807291",
"0.56738883",
"0.5651294",
"0.56336075",
"0.5581271",
"0.5554514",
"0.555258",
"0.55266166",
"0.55265856",
"0.5456136",
"0.54127485",
"0.54027086",
"0.53797555",
"0.5375195",
"0.53732866",
"0.53415954",
"0.5322909",
"0.53228927",
"0.5314293",
"0.53093976",
"0.5297773",
"0.5292454",
"0.52866215",
"0.5284046"
] | 0.65870404 | 1 |
Method updates the image data. This currently encodes the numpy array to jpg but can be modified to support other encodings. frame Numpy array containing the image data of the next frame in the project stream. | def set_frame_data(self, frame):
ret, jpeg = cv2.imencode('.jpg', cv2.resize(frame, self.resolution))
if not ret:
raise Exception('Failed to set frame data')
self.frame = jpeg | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_frame_data(self, frame):\n ret, jpeg = cv2.imencode('.jpg', cv2.resize(frame, self.resolution))\n \n if not ret:\n raise Exception('Failed to set frame data')\n self.frame = jpeg",
"def _write_frame(self : \"animation\",\n frame : \"np.ndarray\"\n ):\n self._writer.append_data(frame)\n self._frame_number += 1\n self._prevFrame = frame",
"def update_frame(self, frame):\n\n t = datetime.now()\n delta_t = t - self.dpar.frame_timestamp[0]\n fps = self.dpar.update_fps(1./delta_t.total_seconds())\n\n self.dpar.frame_timestamp[0] = t\n\n if self.config.black_correct:\n cframe = self.ffc.black_correct(frame)\n else:\n cframe = frame\n\n self.dpar.latest_frame = np.copy(cframe)\n \n if self.dpar.cap_live_swap:\n pix, gray = self._get_pixmap(cframe[::4,::4], self.dpar.iwindow[0])\n self.cap_screen.cap_title = self._live_title(fps)\n self.cap_screen.setPixmap(pix)\n else: \n pix, gray = self._get_pixmap(cframe, self.dpar.iwindow[0])\n self.live_screen.live_title = self._live_title(fps)\n self.live_screen.setPixmap(pix)\n\n self.draw_histogram()\n\n\n if self.recording_sequence:\n\n # MRP ToDo update these tags properly.\n et = np.int(np.round(self.camera.actual_exposure_time_ms))\n ifi_ms = 1000. / self.camera.actual_frame_rate\n ts_ms = np.int(np.round(ifi_ms * self.seq_frame_num))\n\n self.ifd.update_tags((self.seq_frame_num, 0), et, 0, ts_ms, 99)\n\n cap_image = np.copy(self.dpar.latest_frame).astype(np.uint16)\n #cv2.imwrite(cfn, (cap_image << (16 - self.camera.pixel_bits)).astype(np.uint16))\n\n \"\"\"\n Perform the TIFF windowing and then rebinning (compress) according to config file options\n \"\"\"\n x0 = max(0, (cap_image.shape[1] - config.tiff_seq_x_window) // 2)\n x1 = cap_image.shape[1] - x0\n y0 = max(0, (cap_image.shape[0] - config.tiff_seq_y_window) // 2)\n y1 = cap_image.shape[0] - y0\n cap_image = cap_image[y0:y1, x0:x1]\n\n shift_bits = 16 - self.camera.pixel_bits\n if config.tiff_seq_rebin > 1: # not tested for r ne 2\n r = config.tiff_seq_rebin\n cap_image = cap_image.reshape((cap_image.shape[0] // r, r, cap_image.shape[1] // r, -1)).sum(axis=3).sum(axis=1)\n extra_bits = 2 * (r.bit_length() -1)\n shift_bits = max(0, shift_bits - extra_bits)\n\n\n #im = PIL.Image.fromarray(gray)\n im = PIL.Image.fromarray((cap_image << shift_bits).astype(np.uint16))\n\n im.save(self.tiff_out, tiffinfo=self.ifd, compression=TIFF_COMPRESSION)\n self.tiff_out.newFrame()\n self.seq_frame_num += 1\n self.seq_frame_label.setText(str(self.seq_frame_num))\n\n if self.recording_video:\n # cframe is int16\n #f8 = ((cframe >> (self.camera.pixel_bits - 8)) & 0xff).astype(np.uint8)\n #Style 1:\n #fc = np.stack((f8, f8, f8), axis=-1)\n #self.rv_vout.write(fc)\n #Style 2&3:\n self.rv_vout.write(gray)\n self.recorded_video_frame_number += 1\n #Style 4: (16-bit)\n #self.rv_vout.write(cframe)\n\n #if self.recorded_video_frame_number == 20:\n # self.record_video() # turn off",
"def gen():\n global dataFrame\n while True:\n frame = vs.read()\n # frame = imutils.resize(frame, width=400)\n \n (flag, encodedImage) = cv2.imencode(\".jpg\", frame.copy())\n if not flag: continue\n # print (encodedImage)\n dataFrame = yield (b'--frame\\r\\n'\n b'Content-Type: image/jpeg\\r\\n\\r\\n' + bytearray(encodedImage) + b'\\r\\n')",
"def send_frame(self):\n frame = self.frame_buffer.get()\n result, jpeg = cv2.imencode(\".jpg\", frame.nparray)#, self.encode_param)\n data = numpy.array(jpeg)\n string_data = data.tostring()\n self.sock.send(str(len(string_data)).ljust(16))\n self.sock.send(string_data)",
"def update_anim(frame, self):\n self.step()\n self.im.set_data(self.array)\n self.im2.set_data(self.array2)",
"def adjust_image_data(self):\r\n\r\n print('Adjusting image data: ')\r\n\r\n if self.removeFirstSequence: # used to remove the first trial from the sequence\r\n\r\n frames_per_rep = self.nFrames/self.nrepetitions\r\n\r\n self.imageData = self.imageData[frames_per_rep:, :, :]\r\n\r\n self.nFrames = self.imageData.shape[0]\r\n\r\n self.nrepetitions = int(self.nFrames/(self.period * self.framerate))\r\n\r\n self.times = np.arange(0, self.nFrames/self.framerate, 1.0/self.framerate)\r\n\r\n \r\n\r\n # first squeeze the image to 3d if it is 4d\r\n\r\n maxt = np.max(self.times) # find last image time\r\n\r\n sh = self.imageData.shape\r\n\r\n if len(sh) == 4:\r\n\r\n self.imageData = self.imageData.squeeze()\r\n\r\n sh = self.imageData.shape\r\n\r\n dt = np.mean(np.diff(self.times)) # get the mean dt\r\n\r\n n_Periods = int((maxt+dt)/self.period) # how many full periods in the image set - include the first?\r\n\r\n if self.nrepetitions > 0 and self.nrepetitions < n_Periods:\r\n\r\n n_Periods = self.nrepetitions\r\n\r\n n_PtsPerCycle = int(np.floor(self.period/dt)); # estimate image points in a stimulus cycle\r\n\r\n ndt = self.period/n_PtsPerCycle\r\n\r\n self.imageData = self.imageData[range(0, n_Periods*n_PtsPerCycle),:,:] # reduce to only what we need\r\n\r\n print (' Adjusted image info')\r\n\r\n print (\" # Periods: %d Pts/cycle: %d Cycle dt %8.4fs (%8.3fHz) Cycle: %7.4fs\" %(n_Periods, n_PtsPerCycle, ndt, 1.0/ndt, self.period))\r\n\r\n self.print_image_info()",
"def update_frame(self, frame):\n self.set_bank(frame)\n offset = 0\n for chunk in self._chunk(self._buf[frame], 32):\n self.i2c.write_i2c_block_data(self.address, _COLOR_OFFSET + offset, chunk)\n offset += 32",
"def _save_frame_as_png(\n self : \"animation\",\n frame : \"np.ndarray\",\n filename : \"str\"\n ):\n im = Image.fromarray(frame)\n im.save(filename)",
"def draw(self, frame):\n frame[OFS:OFS+self.image.shape[0], OFS:OFS+self.image.shape[1]] = self.image",
"def write_frame(self, img):\n if img.shape[0] % 2 != 0:\n print(\"Warning: height is not divisible by 2! Dropping last row\")\n img = img[:-1]\n if img.shape[1] % 2 != 0:\n print(\"Warning: width is not divisible by 2! Dropping last column\")\n img = img[:, :-1]\n if self.post_processor:\n img = self.post_processor.process(img)\n if self.width is None:\n self.width = img.shape[0]\n self.height = img.shape[1]\n assert os.path.exists(self.directory)\n fn = FRAME_FN_TEMPLATE % self.frame_counter\n self.frame_fns.append(fn)\n imwrite(img, os.path.join(self.frame_directory, fn))\n self.frame_counter += 1\n if self.frame_counter % self.next_video_checkpoint == 0:\n if self.automatic_build:\n self.make_video()\n self.next_video_checkpoint *= 2",
"def NextFrame(self, event):\n buffer = self.GetDataBuffer()\n if buffer is not None:\n # Update bitmap widget with new image frame:\n self.bitmap.CopyFromBuffer(buffer)\n # Refresh panel to draw image into bitmap:\n self.Refresh()\n pass",
"async def put(self, frame: RawArray):\r\n if self.full():\r\n raise IndexError(\"not enough internal buffer\")\r\n self.frames[self._write_index][:] = frame\r\n\r\n self._write_index = (self._write_index + 1) % self.capacity()\r\n self._is_full = self._read_index == self._write_index",
"def update(self, frame = None):\n if type(frame) == type(None):\n frame = self.video.get_frame()\n height, width, channel = frame.shape\n bytesPerLine = 3 * width\n image = QImage(frame.data, width, height, bytesPerLine, QImage.Format_RGB888)\n self.pixmap = QtGui.QPixmap(image)\n size = self.size()\n scaledPix = self.pixmap.scaled(size, Qt.KeepAspectRatio, transformMode = Qt.FastTransformation)\n self.setPixmap(scaledPix)\n\n QtCore.QCoreApplication.processEvents()",
"def write(self, Width, Height, ImageData, Speed):\n # write_begin = datetime.datetime.now()\n\n self.Data.Game.Speed = Speed\n\n # TODO Not sure if needed\n AspectRatio = Width / Height\n TargetWidth = int(self._TargetResolution[1] * AspectRatio)\n\n if TargetWidth >= self._TargetResolution[0]:\n if Width != TargetWidth or Height != self._TargetResolution[1]:\n ImageData = cv2.resize(ImageData, (TargetWidth, self._TargetResolution[1]))\n\n if TargetWidth != self._TargetResolution[0]:\n XStart = int(TargetWidth / 2 - self._TargetResolution[0] / 2)\n XStop = int(TargetWidth / 2 + self._TargetResolution[0] / 2)\n ImageData = ImageData[:, XStart:XStop]\n\n else:\n TargetHeight = int(self._TargetResolution[0] / AspectRatio)\n\n if Width != self._TargetResolution[0] or Height != TargetHeight:\n ImageData = cv2.resize(ImageData, (self._TargetResolution[1], TargetHeight))\n\n if TargetHeight != self._TargetResolution[1]:\n YStart = int(TargetHeight / 2 - self._TargetResolution[1] / 2)\n YStop = int(TargetHeight / 2 + self._TargetResolution[1] / 2)\n ImageData = ImageData[YStart:YStop, :]\n ImageData = cv2.flip(ImageData, 0)\n # Update Parameters\n\n Height, Width = ImageData.shape[:2]\n # print(\"Type is \", np.array(ImageData).dtype)\n\n # Set the SHM\n self.Data.Image.ImageWidth = Width\n self.Data.Image.ImageHeight = Height\n\n # Reshape ImageData to 1 D array\n ImageData = ImageData.flatten()\n\n\n # print(\"Target Image data\", Width, Height)\n\n start_time = datetime.datetime.now()\n self.Data.Image.Data = (ctypes.c_uint8 * (RECORD_MAX_IMAGE_HEIGHT * RECORD_MAX_IMAGE_WIDTH * RECORD_IMAGE_CHANNELS))(*np.array(ImageData))\n\n # elapsed = datetime.datetime.now() - start_time\n # print(\"Setting Image data \", int(elapsed.total_seconds() * 1000) )\n #\n # Notify we wrote a new data - Maybe we can also share the frame number\n #self.Data.Sync.IsWritten = 1\n # elapsed = datetime.datetime.now() - write_begin\n # print(\"Write to memory took \", int(elapsed.total_seconds() * 1000))\n\n if self._IsPauseOn:\n self.Data.Sync.IsPauseOn = 1\n else:\n self.Data.Sync.IsPauseOn = 0",
"def update_image(self):\n self.image = Image.fromarray(self.img)",
"def convert_to_image(self, frame, base64_encode=False):\n #NOTE: tuple (85010, 1) ndarray --> data reduction\n img_buf_arr = cv2.imencode(\".jpeg\", frame)[1]\n if base64_encode:\n img_buf_arr = b\"data:image/jpeg;base64,\" + base64.b64encode(img_buf_arr)\n return img_buf_arr\n return bytes(img_buf_arr)",
"def update_img(self):\n self.img = np.array(self.image)",
"def encoder(cls, frames) -> bytearray:\n\t\tframe_it = iter(frames)\n\t\tprev = next(frame_it).copy()\n\t\tall_events = get_events_by_position(frames)\n\n\t\t# Encode resolution and number of frames\n\t\tyield struct.pack('>3I', prev.shape[0], prev.shape[1], len(frames))\n\n\t\t# Encode first frame\n\t\tyield prev.tobytes()\n\n\t\t# Yield events for each pixel in turn\n\t\tyield from cls._events_to_bytes(all_events)",
"def send_frame(self, frame: np.ndarray) -> None:\n self.sink.putFrame(frame)",
"def get_data(self):\n global CAM\n while CAM.isOpened():\n _, frame = CAM.read()\n _, jpeg = cv2.imencode('.jpg', frame)\n encoded_img = \"data:image/jpg;base64,\" + str(base64.b64encode(jpeg.tobytes()).decode())\n SIO.emit('video_frame',\n {'frame': encoded_img},\n namespace='/live-stream')\n sleep(self.delay)",
"def animate_with_numpy_frame_sequence(self, numpy_frame_sequence, frames_per_second=15):\n\n sleep_time = 1/frames_per_second\n for animation_frame in numpy_frame_sequence:\n tic = time.time()\n self.set_image_from_numpy_array(animation_frame)\n self.update()\n toc = time.time()\n frame_generation_time = toc-tic\n if frame_generation_time < sleep_time:\n new_sleep_time = sleep_time - frame_generation_time\n time.sleep(new_sleep_time)\n else:\n pass",
"def draw(self, frame, offset=OFS):\n frame[\n OFS : OFS + self.image.shape[0], OFS : OFS + self.image.shape[1]\n ] = self.image",
"def send_jpg(frame_jpg, frame_count):\n try:\n\n img_bytes = frame_jpg\n ticks = time.time()\n\n frame_package = {\n 'CaptureTime': ticks,\n 'FrameCount': frame_count,\n 'ImageBytes': img_bytes\n }\n\n # Put encoded image in kinesis stream\n print(\"Sending image to Kinesis...\")\n response = kinesis_client.put_record(\n StreamName=KINESIS_STREAM_NAME,\n Data=pickle.dumps(frame_package),\n PartitionKey=str(uuid.uuid4())\n )\n print(response)\n except Exception as ex:\n print(ex)",
"def save_frame(frame):\n try:\n img = Image.fromarray(frame.array, 'RGB')\n out_path = settings['app']['web_path']\n if not os.path.isabs(out_path):\n out_path = os.path.join(basepath, out_path)\n filename = os.path.join(out_path, 'static', 'latest.jpg')\n tmp_filename = '{}.part'.format(filename)\n img.save(tmp_filename, 'jpeg')\n os.rename(tmp_filename, filename)\n except Exception, error:\n print('Error saving frame: {}'.format(error))",
"def update_frame(self):\n if not self.image_queue: return\n image = self.image_queue.pop()\n self.image_queue.rotate(-1)\n self.original_image = image\n self.altered_image = image.copy()\n\n if self.tracking:\n self.update_frame_tracking()\n self.display_image(True)\n elif self.calibrating:\n self.update_frame_calibrating()\n self.display_image(True)\n else:\n image = cv2.flip(self.altered_image, 1)\n self.display_image(True)",
"def get_frame(self):\n self._serial_port.close()\n self._serial_port.open()\n\n self._request_frame()\n\n serial_data = self._serial_port.readall()\n\n frame_start_idx = serial_data.find(BEGIN_FRAME) + len(BEGIN_FRAME)\n frame_end_idx = serial_data.find(END_FRAME)\n\n print serial_data[0:frame_start_idx]\n print serial_data[frame_end_idx:]\n\n raw_frame = serial_data[frame_start_idx:frame_end_idx]\n\n np_frame = np.fromstring(raw_frame, dtype=np.uint8)\n # np_frame = np_frame.reshape((30, 30))\n\n # image = cv2.fromarray(np_frame)\n\n # return image\n return np_frame",
"def store_frame(self, frame):\n if self.obs is None:\n self.obs = np.empty([self.size] + list(frame.shape), dtype=np.uint8)\n self.action = np.empty([self.size], dtype=np.int32)\n self.reward = np.empty([self.size], dtype=np.float32)\n self.done = np.empty([self.size], dtype=np.bool)\n self.obs[self.next_idx] = frame\n\n ret = self.next_idx\n self.next_idx = (self.next_idx + 1) % self.size\n self.num_in_buffer = min(self.size, self.num_in_buffer + 1)\n\n return ret",
"def read(self):\n try:\n if self.Data.Sync.IsWritten == 1:\n\n if self._IsPauseOn:\n self.Data.Sync.IsPauseOn = 1\n else:\n self.Data.Sync.IsPauseOn = 0\n\n Width = self.Data.Image.ImageWidth\n Height = self.Data.Image.ImageHeight\n\n # Image = np.fromstring(self.Data.Image.Data, np.uint8, Width * Height * self.TARGET_IMAGE_CHANNELS)\n Image = np.frombuffer(self.Data.Image.Data, np.uint8, Width * Height * self.TARGET_IMAGE_CHANNELS)\n Image = Image.reshape(Height, Width, self.TARGET_IMAGE_CHANNELS)\n\n AspectRatio = Width / Height\n TargetWidth = int(self._TargetResolution[1] * AspectRatio)\n\n if TargetWidth >= self._TargetResolution[0]:\n if Width != TargetWidth or Height != self._TargetResolution[1]:\n Image = cv2.resize(Image, (TargetWidth, self._TargetResolution[1]))\n\n if TargetWidth != self._TargetResolution[0]:\n XStart = int(TargetWidth/2 - self._TargetResolution[0]/2)\n XStop = int(TargetWidth/2 + self._TargetResolution[0]/2)\n Image = Image[:, XStart:XStop]\n\n else:\n TargetHeight = int(self._TargetResolution[0]/AspectRatio)\n\n if Width != self._TargetResolution[0] or Height != TargetHeight:\n Image = cv2.resize(Image, (self._TargetResolution[1], TargetHeight))\n\n if TargetHeight != self._TargetResolution[1]:\n YStart = int(TargetHeight/2 - self._TargetResolution[1]/2)\n YStop = int(TargetHeight/2 + self._TargetResolution[1]/2)\n Image = Image[YStart:YStop, :]\n\n # Shall we convert this to 0 - 1 ?\n self._RawImage = Image\n self._Image = cv2.flip(Image, 0)\n\n # This one does not flip the image, but it rotate and crop !!\n # self._Image = np.array(cv2.flip(Image, 0)/255, dtype=np.float32)\n # self._Image = cv2.flip(Image, 0)\n\n\n # This one is flipped upside/down\n # print(\"Image from memory reshaped as WxH with Mean\", Width, Height, np.mean((self._Image), axis=(0, 1)))\n # self.store_to_file(self._Image)\n\n return True\n except:\n print(\"Unexpected error in Shared Memory Read\", sys.exc_info()[0])\n\n return False",
"def calculate_frame(self):\n frame = self.stream.read()\n self.keypoints, self.image = self.openpose.forward(frame, True)"
] | [
"0.7208118",
"0.66432834",
"0.6403263",
"0.6372892",
"0.63293654",
"0.631112",
"0.63068485",
"0.62924564",
"0.61214113",
"0.6091055",
"0.607973",
"0.60394657",
"0.5955175",
"0.5935915",
"0.58978486",
"0.58809006",
"0.58774006",
"0.5766277",
"0.56740296",
"0.566615",
"0.56462663",
"0.56438863",
"0.55876",
"0.5572704",
"0.55626225",
"0.5543542",
"0.5538186",
"0.5536543",
"0.55057",
"0.54538727"
] | 0.7251213 | 1 |
Run the DeepLens inference loop frame by frame | def infinite_infer_run():
try:
# This cat-dog model is implemented as binary classifier, since the number
# of labels is small, create a dictionary that converts the machine
# labels to human readable labels.
model_type = 'classification'
output_map = {0: 'dog', 1: 'cat'}
# Create an IoT client for sending to messages to the cloud.
client = greengrasssdk.client('iot-data')
iot_topic = '$aws/things/{}/infer'.format(os.environ['AWS_IOT_THING_NAME'])
# Create a local display instance that will dump the image bytes to a FIFO
# file that the image can be rendered locally.
local_display = LocalDisplay('480p')
local_display.start()
# The sample projects come with optimized artifacts, hence only the artifact
# path is required.
model_path = '/opt/awscam/artifacts/mxnet_resnet18-catsvsdogs_FP32_FUSED.xml'
# Load the model onto the GPU.
client.publish(topic=iot_topic, payload='Loading action cat-dog model')
model = awscam.Model(model_path, {'GPU': 1})
client.publish(topic=iot_topic, payload='Cat-Dog model loaded')
# Since this is a binary classifier only retrieve 2 classes.
num_top_k = 2
# The height and width of the training set images
input_height = 224
input_width = 224
# Do inference until the lambda is killed.
while True:
# inference loop to add. See the next step
...
except Exception as ex:
client.publish(topic=iot_topic, payload='Error in cat-dog lambda: {}'.format(ex))
# snippet-end:[deeplens.python.deeplens_inference_lambda.inference_loop]
# snippet-start:[deeplens.python.deeplens_inference_lambda.inference_step]
# Get a frame from the video stream
ret, frame = awscam.getLastFrame()
if not ret:
raise Exception('Failed to get frame from the stream')
# Resize frame to the same size as the training set.
frame_resize = cv2.resize(frame, (input_height, input_width))
# Run the images through the inference engine and parse the results using
# the parser API, note it is possible to get the output of doInference
# and do the parsing manually, but since it is a classification model,
# a simple API is provided.
parsed_inference_results = model.parseResult(model_type,
model.doInference(frame_resize))
# Get top k results with highest probabilities
top_k = parsed_inference_results[model_type][0:num_top_k]
# Add the label of the top result to the frame used by local display.
# See https://docs.opencv.org/3.4.1/d6/d6e/group__imgproc__draw.html
# for more information about the cv2.putText method.
# Method signature: image, text, origin, font face, font scale, color, and thickness
cv2.putText(frame, output_map[top_k[0]['label']], (10, 70),
cv2.FONT_HERSHEY_SIMPLEX, 3, (255, 165, 20), 8)
# Set the next frame in the local display stream.
local_display.set_frame_data(frame)
# Send the top k results to the IoT console via MQTT
cloud_output = {}
for obj in top_k:
cloud_output[output_map[obj['label']]] = obj['prob']
client.publish(topic=iot_topic, payload=json.dumps(cloud_output)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def loop_over_frames(self):\n while Rescue_PI.run_program:\n self.grab_next_frame()\n self.set_dimensions_for_frame()\n self.create_frame_blob()\n self.extract_face_detections()\n for i in range(0, self.detections.shape[2]):\n self.extract_confidence_from_human_detections(i)\n if self.confidence > MIN_CONFIDENCE:\n self.get_class_label(i)\n if self.label == 15:\n self.create_human_box(i)\n self.extract_human_roi()\n if self.f_w < 20 or self.f_h < 20:\n continue\n if self.is_blur(self.human_blob, self.threshold):\n continue\n else:\n self.super_res(self.human_blob)\n self.create_predictions_blob()\n self.extract_detections()\n self.perform_classification()\n if self.name == \"Fighting\":\n print(\"[Prediction] Fighting is occurring\")\n self.play_audio()\n if self.name == \"Crying\":\n print(\"[Prediction] Crying is occurring\")\n self.play_audio()\n if self.name == \"Normal\":\n print(\"[Prediction] Normal\")\n if self.use_graphics:\n self.create_frame_icons()\n cv2.putText(self.orig_frame, self.text, (15, 15), cv2.FONT_HERSHEY_SIMPLEX,\n 0.45, COLORS[self.colorIndex], 2)\n else:\n pass\n if OPEN_DISPLAY:\n cv2.imshow(\"Frame\", self.orig_frame)\n key = cv2.waitKey(1) & 0xFF\n\n if key == ord('q'):\n break",
"def infinite_infer_run():\n try:\n # This cat-dog model is implemented as binary classifier, since the number\n # of labels is small, create a dictionary that converts the machine\n # labels to human readable labels.\n model_type = 'classification'\n output_map = {0: 'dog', 1: 'cat'}\n # Create an IoT client for sending to messages to the cloud.\n client = greengrasssdk.client('iot-data')\n iot_topic = '$aws/things/{}/infer'.format(os.environ['AWS_IOT_THING_NAME'])\n # Create a local display instance that will dump the image bytes to a FIFO\n # file that the image can be rendered locally.\n local_display = LocalDisplay('480p')\n local_display.start()\n # The sample projects come with optimized artifacts, hence only the artifact\n # path is required.\n model_path = '/opt/awscam/artifacts/mxnet_resnet18-catsvsdogs_FP32_FUSED.xml'\n # Load the model onto the GPU.\n client.publish(topic=iot_topic, payload='Loading action cat-dog model')\n model = awscam.Model(model_path, {'GPU': 1})\n client.publish(topic=iot_topic, payload='Cat-Dog model loaded')\n # Since this is a binary classifier only retrieve 2 classes.\n num_top_k = 2\n # The height and width of the training set images\n input_height = 224\n input_width = 224\n # Do inference until the lambda is killed.\n while True:\n # Get a frame from the video stream\n ret, frame = awscam.getLastFrame()\n if not ret:\n raise Exception('Failed to get frame from the stream')\n # Resize frame to the same size as the training set.\n frame_resize = cv2.resize(frame, (input_height, input_width))\n # Run the images through the inference engine and parse the results using\n # the parser API, note it is possible to get the output of doInference\n # and do the parsing manually, but since it is a classification model,\n # a simple API is provided.\n parsed_inference_results = model.parseResult(model_type,\n model.doInference(frame_resize))\n # Get top k results with highest probabilities\n top_k = parsed_inference_results[model_type][0:num_top_k]\n # Add the label of the top result to the frame used by local display.\n # See https://docs.opencv.org/3.4.1/d6/d6e/group__imgproc__draw.html\n # for more information about the cv2.putText method.\n # Method signature: image, text, origin, font face, font scale, color, and thickness\n cv2.putText(frame, output_map[top_k[0]['label']], (10, 70),\n cv2.FONT_HERSHEY_SIMPLEX, 3, (255, 165, 20), 8)\n # Set the next frame in the local display stream.\n local_display.set_frame_data(frame)\n # Send the top k results to the IoT console via MQTT\n cloud_output = {}\n for obj in top_k:\n cloud_output[output_map[obj['label']]] = obj['prob']\n client.publish(topic=iot_topic, payload=json.dumps(cloud_output))\n except Exception as ex:\n client.publish(topic=iot_topic, payload='Error in cat-dog lambda: {}'.format(ex))",
"def run(self):\n count = self.neuron_count\n for i in range(0, count):\n self.run(i)",
"def inference():\n inf_dataset = dataset\n net.eval()\n frames_gen, frame_cnt, rel_props, prop_ticks, prop_scaling = inf_dataset[index]\n \n num_crop = args.test_crops\n length = 3\n if args.modality == 'Flow':\n length = 10\n elif args.modality == 'RGBDiff':\n length = 18\n \n # First get the base_out outputs\n base_output = torch.autograd.Variable(torch.zeros((num_crop, frame_cnt, base_out_dim)).cuda(),\n volatile=True)\n cnt = 0\n for frames in frames_gen:\n # frames.shape == [frame_batch_size * num_crops * 3, 224, 224]\n # frame_batch_size is 4 by default\n input_var = torch.autograd.Variable(frames.view(-1, length, frames.size(-2), frames.size(-1)).cuda(),\n volatile=True)\n base_out = net(input_var, None, None, None, None)\n bsc = base_out.view(num_crop, -1, base_out_dim)\n base_output[:, cnt:cnt+bsc.size(1), :] = bsc\n cnt += bsc.size(1)\n\n n_frames = base_output.size(1)\n assert frame_cnt == n_frames\n # GLCU\n step_features = base_output.mean(dim=0).mean(dim=0).unsqueeze(0)\n gate, glcu_task_pred = net.glcu(step_features)\n glcu_task_pred = F.softmax(glcu_task_pred.squeeze(), dim=0).data.cpu().numpy()\n gate = gate.repeat(1, num_crop * n_frames).view(num_crop, n_frames, base_out_dim)\n if net.additive_glcu:\n base_output = base_output + gate\n else:\n base_output = base_output * gate\n\n # output.shape == [num_frames, 7791]\n output = torch.zeros((frame_cnt, output_dim)).cuda()\n cnt = 0\n for i in range(0, frame_cnt, 4):\n base_out = base_output[:, i:i+4, :].contiguous().view(-1, base_out_dim)\n rst = net.test_fc(base_out)\n sc = rst.data.view(num_crop, -1, output_dim).mean(dim=0)\n output[cnt: cnt + sc.size(0), :] = sc\n cnt += sc.size(0)\n base_output = base_output.mean(dim=0).data\n\n # act_scores.shape == [num_proposals, K+1]\n # comp_scores.shape == [num_proposals, K]\n act_scores, comp_scores, reg_scores = reorg_stpp.forward(output, prop_ticks, prop_scaling)\n act_scores = torch.autograd.Variable(act_scores, volatile=True)\n comp_scores = torch.autograd.Variable(comp_scores, volatile=True)\n\n # Task Head\n combined_scores = F.softmax(act_scores[:, 1:], dim=1) * torch.exp(comp_scores)\n combined_scores = combined_scores.mean(dim=0).unsqueeze(0)\n task_pred = F.softmax(net.task_head(combined_scores).squeeze(), dim=0).data.cpu().numpy()\n\n act_scores = act_scores.data\n comp_scores = comp_scores.data\n\n if reg_scores is not None:\n reg_scores = reg_scores.view(-1, num_class, 2)\n reg_scores[:, :, 0] = reg_scores[:, :, 0] * stats[1, 0] + stats[0, 0]\n reg_scores[:, :, 1] = reg_scores[:, :, 1] * stats[1, 1] + stats[0, 1]\n\n torch.cuda.empty_cache() # To empty the cache from previous iterations\n\n # perform stpp on scores\n return ((inf_dataset.video_list[index].id,\n (rel_props.numpy(), act_scores.cpu().numpy(), comp_scores.cpu().numpy(), reg_scores.cpu().numpy(), \n glcu_task_pred, task_pred),\n output.cpu().numpy(),\n base_output.cpu().numpy()))",
"def run(self):\r\n self.create_output_dirs()\r\n data = self.read_input()\r\n while (data):\r\n # Initiate ORB detector\r\n orb = cv2.ORB_create()\r\n\r\n if (self.continu):\r\n current_frame_nr = data[\"frameNr\"]\r\n if(current_frame_nr > 1):\r\n self.determine_flow(orb, current_frame_nr)\r\n else:\r\n #Read first image\r\n self.previous_frame_path = os.path.join(self.frames_dir, '%05d.png' % 1)\r\n self.previous_frame = cv2.imread(self.previous_frame_path, 0) # queryImage\r\n # Find the keypoints and descriptors with ORB\r\n self.kp_previous_frame, self.des_previous_frame = orb.detectAndCompute(self.previous_frame, None)\r\n elif(self.stitch_completed):\r\n self.write_stitched_image()\r\n self.continu = False\r\n self.stitch_completed = True\r\n return\r\n data = self.read_input()",
"def _process(self):\n while True:\n with Timer() as data_timer:\n frame = self._frames_q.get()\n\n with Timer() as agent_timer:\n s, frame_metadata = self._unwrap_frame(frame)\n s = np.expand_dims(s, 0) # batch\n act = self.pred(s)[0][0].argmax()\n put_overwrite(self._actions_q, self._wrap_action(act, frame_metadata))\n\n print('.', end='', flush=True)\n if self.verbose:\n print('Avg data wait time: %.3f' % data_timer.time())\n print('Avg agent neural net eval time: %.3f' % agent_timer.time())",
"def inference(self):\n for i in range(len(self.nodes)):\n for j in range(len(self.nodes[i])):\n self.pipes[i][j].send(\"inference\")\n \n ## wait for the finalization to be completed\n for i in range(len(self.nodes)):\n for j in range(len(self.nodes[i])):\n self.pipes[i][j].recv()",
"def step(self):\n for layer in self.layers:\n layer.step()",
"def run(layers):",
"def run_inference(loop_op: tf.Operation, infeed_queue_initializer: tf.Operation, outfeed_op: tf.Operation,\n batch_size: int, batches_per_step: int, network_name: str,\n decode_predictions: Callable, ground_truth: Tuple[str], num_iterations: Optional[int] = 500,\n num_ipus: Optional[int] = 1, mode: Optional[str] = \"single_ipu\",\n data: Optional[str] = \"real\", available_memory_proportion: Optional[float] = 0.6) -> None:\n # Set compile and device options\n opts = IPUConfig()\n opts.matmuls.poplar_options = {'availableMemoryProportion': str(\n available_memory_proportion)}\n opts.convolutions.poplar_options = {'availableMemoryProportion': str(\n available_memory_proportion)}\n\n if mode == 'replicated':\n num_replicas = num_ipus\n os.environ[\"TF_POPLAR_FLAGS\"] += \" --force_replicated_mode\"\n else:\n num_replicas = 1\n opts.auto_select_ipus = num_ipus\n opts.configure_ipu_system()\n with tf.Session() as session:\n session.run(infeed_queue_initializer)\n fps = []\n for iter_count in range(num_iterations):\n start = time.time()\n session.run(loop_op)\n predictions = session.run(outfeed_op)\n stop = time.time()\n fps.append(batch_size * batches_per_step * num_replicas / (stop - start))\n logging.info(\n \"Iter {4}: {0} Throughput using {1} data = {2:.1f} imgs/sec at batch size = {3}\".format(network_name,\n data,\n fps[-1],\n batch_size,\n iter_count))\n duration = stop - start\n report_string = \"{:<7.3} sec/itr.\".format(duration)\n report_string += \" {:5f} images/sec.\".format(fps[-1])\n print(report_string)\n print(\"Total time: {}\".format(duration))\n\n # Decode a random prediction per step to check functional correctness.\n if data == 'real':\n predictions = np.reshape(predictions, (-1, predictions.shape[-1]))\n index = np.random.randint(0, len(predictions))\n if network_name in (\"inceptionv1\", \"efficientnet-s\", \"efficientnet-m\", \"efficientnet-l\"):\n # These models encode background in 0th index.\n decoded_predictions = decode_predictions(predictions[index: index + 1, 1:], top=3)\n else:\n decoded_predictions = decode_predictions(predictions[index: index + 1, :], top=3)\n labels_and_probs = [(label, prob) for _, label, prob in decoded_predictions[0]]\n print('Actual: ',\n ground_truth[\n (index + num_replicas * iter_count * batches_per_step * batch_size) % len(ground_truth)])\n print('Predicted: ', labels_and_probs)\n\n print(\"Average statistics excluding the 1st 20 iterations.\")\n print(\"-------------------------------------------------------------------------------------------\")\n fps = fps[20:]\n print(\"Throughput at bs={}, data_mode={}, data_type={}, mode={},\"\n \" num_ipus={}, of {}: min={}, max={}, mean={}, std={}.\".format(batch_size,\n data,\n predictions.dtype,\n mode,\n num_ipus,\n network_name,\n min(fps),\n max(fps),\n np.mean(fps),\n np.std(fps)))",
"def infer(self, n_iter=150):\n if self.ppm:\n print(\"Running infer is forbidden for principled predictive model.\")\n return\n if DEBUG:\n # fix some variables to their true values\n self._fix_post_assigns(self.ground_truth['true_omega'], self.ground_truth['true_beta'])\n\n with self.sess.as_default():\n for i in range(n_iter):\n\n # users\n start_time = time.time()\n self.sess.run(self.u_update_one, feed_dict={self.edge_idx: self.edge_idx_d})\n self.sess.run(self.u_update_two, feed_dict={self.edge_idx: self.edge_idx_d})\n\n # items\n if not(self.fix_item_params):\n start_time = time.time()\n self.sess.run(self.i_update_one, feed_dict={self.edge_idx: self.edge_idx_d})\n self.sess.run(self.i_update_two, feed_dict={self.edge_idx: self.edge_idx_d})\n\n # edges\n start_time = time.time()\n if self.simple_graph:\n for sg_edge_param_update in self.sg_edge_param_update:\n self.sess.run(sg_edge_param_update, feed_dict={self.edge_idx: self.edge_idx_d})\n else:\n for lphi_update in self.lphi_update:\n self.sess.run(lphi_update, feed_dict={self.edge_idx: self.edge_idx_d})\n\n # mean degree (caching)\n start_time = time.time()\n self.sess.run(self.deg_update, feed_dict={self.edge_vals: self.edge_vals_d, self.edge_idx: self.edge_idx_d})\n\n ### Print the total item and user mass ###\n if np.mod(i, 30) == 0:\n self._logging(i)\n print(\"appx_elbo: {}\".format(self.sess.run(self.appx_elbo,\n feed_dict={self.edge_idx: self.edge_idx_d})))\n\n ## DONE TRAINING\n self.user_affil_est = to_prob(self.theta_shp / self.theta_rte).eval()\n self.item_affil_est = to_prob(self.beta_shp / self.beta_rte).eval()\n if DEBUG: \n self.true_user_affil = to_prob(self.ground_truth['true_theta']).eval()\n self.true_item_affil = to_prob(self.ground_truth['true_beta']).eval()\n\n # User params\n gam_shp, gam_rte, theta_shp, theta_rte, g = self.sess.run([self.gam_shp, self.gam_rte, self.theta_shp, self.theta_rte, self.g])\n\n # Item params\n omega_shp, omega_rte, beta_shp, beta_rte, w = self.sess.run([self.omega_shp, self.omega_rte, self.beta_shp, self.beta_rte, self.w])\n\n return gam_shp, gam_rte, theta_shp, theta_rte, g, omega_shp, omega_rte, beta_shp, beta_rte, w",
"def inference(self, inputs):\n # test_2\n memory = self.get_go_frame(inputs)\n memory = self._update_memory(memory)\n\n self._init_states(inputs, mask=None)\n self.attention.init_states(inputs)\n\n outputs, stop_tokens, alignments, t = [], [], [], 0\n while True:\n memory = self.prenet(memory)\n decoder_output, alignment, stop_token = self.decode(memory)\n stop_token = torch.sigmoid(stop_token.data)\n outputs += [decoder_output.squeeze(1)]\n stop_tokens += [stop_token]\n alignments += [alignment]\n\n if stop_token > self.stop_threshold and t > inputs.shape[0] // 2:\n break\n if len(outputs) == self.max_decoder_steps:\n print(\" | > Decoder stopped with 'max_decoder_steps\")\n break\n\n memory = self._update_memory(decoder_output)\n t += 1\n\n outputs, stop_tokens, alignments = self._parse_outputs(\n outputs, stop_tokens, alignments)\n\n return outputs, alignments, stop_tokens",
"def _inference_step(self, state):\n\n decoder_inputs = state[\"inputs\"]\n encoder_outputs = state[\"encoder_outputs\"]\n attention_bias = state[\"encoder_decoder_attention_bias\"]\n alignment_positions = state[\"alignment_positions\"]\n\n outputs = self._decode_pass(\n decoder_inputs=decoder_inputs,\n encoder_outputs=encoder_outputs,\n enc_dec_attention_bias=attention_bias,\n alignment_positions=alignment_positions\n )\n\n with tf.variable_scope(\"inference_step\"):\n next_inputs_mel = outputs[\"post_net_spec\"][:, -1:, :]\n next_inputs_mel = self._expand(next_inputs_mel, self.reduction_factor)\n next_inputs_mag = outputs[\"mag_spec\"][:, -1:, :]\n next_inputs_mag = self._expand(next_inputs_mag, self.reduction_factor)\n next_inputs = tf.concat([next_inputs_mel, next_inputs_mag], axis=-1)\n\n n_features = self.num_mels + self.num_freq\n next_inputs = self._shrink(next_inputs, n_features, self.reduction_factor)\n\n # Set zero if sequence is finished\n next_inputs = tf.where(\n state[\"finished\"],\n tf.zeros_like(next_inputs),\n next_inputs\n )\n next_inputs = tf.concat([decoder_inputs, next_inputs], 1)\n\n # Update lengths\n lengths = state[\"outputs\"][\"lengths\"]\n lengths = tf.where(\n state[\"finished\"],\n lengths,\n lengths + 1 * self.reduction_factor\n )\n outputs[\"lengths\"] = lengths\n\n # Update spec, post_net_spec and mag_spec\n for key in [\"spec\", \"post_net_spec\", \"mag_spec\"]:\n output = outputs[key][:, -1:, :]\n output = tf.where(state[\"finished\"], tf.zeros_like(output), output)\n outputs[key] = tf.concat([state[\"outputs\"][key], output], 1)\n\n # Update stop token logits\n stop_token_logits = outputs[\"stop_token_logits\"][:, -1:, :]\n stop_token_logits = tf.where(\n state[\"finished\"],\n tf.zeros_like(stop_token_logits) + 1e9,\n stop_token_logits\n )\n stop_prediction = tf.sigmoid(stop_token_logits)\n stop_prediction = tf.reduce_max(stop_prediction, axis=-1)\n\n # Uncomment next line if you want to use stop token predictions\n finished = tf.reshape(tf.cast(tf.round(stop_prediction), tf.bool), [-1])\n finished = tf.reshape(finished, [-1])\n\n stop_token_logits = tf.concat(\n [state[\"outputs\"][\"stop_token_logits\"], stop_token_logits],\n axis=1\n )\n outputs[\"stop_token_logits\"] = stop_token_logits\n\n with tf.variable_scope(\"alignments\"):\n weights = []\n for index, attention in enumerate(self.attentions):\n if isinstance(attention, AttentionBlock):\n weights.append(attention.multiheaded_attention.attention_weights)\n\n weights = tf.stack(weights)\n outputs[\"alignments\"] = [weights]\n\n alignment_positions = tf.argmax(\n weights,\n axis=-1,\n output_type=tf.int32\n )[:, :, :, -1:]\n state[\"alignment_positions\"] = tf.concat(\n [state[\"alignment_positions\"], alignment_positions],\n axis=-1\n )\n\n state[\"iteration\"] = state[\"iteration\"] + 1\n state[\"inputs\"] = next_inputs\n state[\"finished\"] = finished\n state[\"outputs\"] = outputs\n\n return state",
"def _next_test(self):\n idx = self.it\n self.it = (self.it + 1) % self.n_examples\n\n if self.render_path:\n target_view = data_types.Views(\n rays=jax.tree_map(lambda r: r[idx], self.render_rays),)\n else:\n target_view = data_types.Views(\n rays=jax.tree_map(lambda r: r[idx], self.rays), rgb=self.images[idx])\n\n #--------------------------------------------------------------------------------------\n # Get the reference data\n batch_near_cam_idx = self.sorted_near_cam[idx]\n ref_images = self.train_images[batch_near_cam_idx]\n ref_images = ref_images.reshape(ref_images.shape[0], self.h, self.w, 3)\n\n ref_cameratoworld = self.train_camtoworlds[batch_near_cam_idx]\n ref_worldtocamera = self.train_worldtocamera[batch_near_cam_idx]\n\n #--------------------------------------------------------------------------------------\n # Replicate these so that they may be distributed onto several devices for\n # parallel computaion.\n l_devices = jax.local_device_count()\n reference_views = data_types.ReferenceViews(\n rgb=np.tile(ref_images, (l_devices, 1, 1, 1)),\n ref_worldtocamera=np.tile(ref_worldtocamera, (l_devices, 1, 1)),\n ref_cameratoworld=np.tile(ref_cameratoworld, (l_devices, 1, 1)),\n intrinsic_matrix=np.tile(self.intrinsic_matrix[None, :],\n (l_devices, 1, 1)),\n idx=np.tile(batch_near_cam_idx[None, :], (jax.local_device_count(), 1)),\n )\n\n return_batch = data_types.Batch(\n target_view=target_view, reference_views=reference_views)\n\n return return_batch",
"def Advance():\n warp.step()",
"def main():\n\n # Experiment Start\n start_time = datetime.now()\n logger.info(\n '################ Bergson Team Experiment Start #################')\n logger.info(\n f'Starting Bergson Astro Pi team experiment at {start_time.strftime(\"%Y-%m-%d %H:%M:%S\")}')\n\n '''\n # Load simple Conv2D AI Model\n logger.info(\"Loading AI Convolutional Model\")\n conv2D_model = load_model(\"Conv2D_TF114\")\n '''\n\n # Load TFLite Model\n logger.info(\"Loading TFLite Mobilenetv2 Model\")\n mobilenetv2_interpreter = load_tflite_model(\"./Mobilenetv2_TF114.tflite\")\n\n # Create Log File\n logger.info(f'Creating Log file at {str(data_file)}')\n with open(data_file, 'w') as f:\n writer = csv.writer(f)\n header = (\"Date/time\", \"Location\", \"Picture Name\", \"Predicted NO2\")\n writer.writerow(header)\n\n # Start Loop over 3 hours\n\n now_time = datetime.now()\n i = 0\n # run a loop for 2 minutes\n while (now_time < start_time + timedelta(minutes=175)):\n\n # Take Earth Picture\n timestamp = datetime.now().strftime(\"%Y-%m-%d_%H:%M:%S\")\n pic_name = f'bergson_img_{timestamp}.jpg'\n capture(rpi_cam, str(dir_path/pic_name))\n logger.info(f'Experiment Pipeline {i} on picture {pic_name}')\n\n # NDVI Preprocessing\n ndvi_image = get_ndvi(str(dir_path/pic_name))\n ndvi_image = np.expand_dims(ndvi_image, axis=2)\n\n # RGB Prepprocessing for expected shape by Mobilenetv2 - comment below line when using simple Conv2D model\n ndvi_rgb_image = get_ndvi_rgb(ndvi_image)\n\n '''\n # Do Inference with simple Conv2D AI Model\n prediction = make_inference(ndvi_image,conv2D_model)\n '''\n \n # Do Inference with TFLite Model\n ndvi_rgb_image = ndvi_rgb_image.astype('float32')\n prediction = make_tflite_inference(\n ndvi_rgb_image, mobilenetv2_interpreter)\n\n # Get Decoded Inference results\n decoded_prediction = decode_prediction(prediction)\n\n # Write Prediction as CSV to disk\n logger.info(\n f'Logging NO2 prediction \\\"{decoded_prediction}\\\" for {pic_name}')\n exif_data = get_img_exif(pic_name, iss, decoded_prediction)\n row = (exif_data['Date/Time'], exif_data['Location'],\n pic_name, exif_data['NO2'])\n with open(data_file, mode='a') as f:\n writer = csv.writer(f)\n writer.writerow(row)\n\n # update the current time\n now_time = datetime.now()\n i = i+1\n\n # End Loop over 3 hours\n\n # Experiment End\n end_time = datetime.now()\n logger.info(\n f'Finishing Bergson Astro Pi team experiment at {end_time.strftime(\"%Y-%m-%d %H:%M:%S\")}')\n experiment_time = end_time - start_time\n logger.info(f'Bergson Astro Pi team experiment run time {experiment_time}')\n logger.info('################ Bergson Team Experiment End #################')",
"def infer_on_stream(args, client):\n count_current = 0\n count_last = 0\n count_last_last = 0\n total_count = 0\n duration = 0\n avg_duration = 0\n total_duration = 0\n start_time = 0\n active_person = 0\n net_input_shape = []\n frame_count = 0\n\n # Initialise the class\n infer_network = Network()\n # Set Probability threshold for detections\n prob_threshold = args.prob_threshold\n\n ### TODO: Load the model through `infer_network` ###\n infer_network.load_model(model=args.model, device=args.device, cpu_extension=args.cpu_extension)\n\n ### TODO: Handle the input stream ###\n cap = cv2.VideoCapture(args.input)\n cap.open(args.input)\n\n # get the required shape for the network\n net_input_shape = infer_network.get_input_shape()\n\n # get the shape of the input image\n width = int(cap.get(3))\n height = int(cap.get(4))\n\n if net_input_shape != [1, 3, 600, 600]:\n #net_input_shape = [1, 3, 600, 600]\n #sometimes gives [1,3] and causes an error, so hard coded shape to match model\n sys.exit(\"Input shape error, forced exit. Please run again until this error does not appear.\")\n\n ### TODO: Loop until stream is over ###\n while cap.isOpened():\n\n ### TODO: Read from the video capture ###\n flag, frame = cap.read()\n frame_count += 1\n\n if not flag:\n #video stream ended, go to end and close out\n break\n\n ### TODO: Start asynchronous inference for specified request ###\n if frame_count%2 == 0: #check every other frame\n ### TODO: Pre-process the image as needed ###\n vid_frame = cv2.resize(frame, (net_input_shape[3], net_input_shape[2]))\n #save a copy of the input frame to use on output\n vid_frame_copy = vid_frame\n vid_frame = vid_frame.transpose((2, 0, 1))\n vid_frame = vid_frame.reshape(1, *vid_frame.shape)\n\n infer_network.exec_net(vid_frame)\n\n ### TODO: Wait for the result ###\n if infer_network.wait() == 0:\n\n ### TODO: Get the results of the inference request ###\n results = infer_network.get_output()\n\n # for this model, results should be shape [1, 1, N, 7]\n # N is number of hits, last is a 7 item list [image_id, label, conf, x_min,\n # y_min, x_max, y_max] where label is the predicted class\n\n ### TODO: Extract any desired stats from the results ###\n out_frame, count_current, box = draw_boxes(vid_frame_copy, results, args, net_input_shape[3], net_input_shape[2])\n #out_frame = cv2.putText(out_frame, \"Last Frame Analyzed = \"+str(frame_count), (10, 420), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (255, 0, 0), 1, cv2.LINE_AA)\n\n ### TODO: Calculate and send relevant information on ###\n ### count_current, total_count and duration to the MQTT server ###\n ### Topic \"person\": keys of \"count\" and \"total\" ###\n ### Topic \"person/duration\": key of \"duration\" ###\n\n # This block of code from Mentor Help question 129845, some modifications by me\n # If both last and last_last are equal, positive ID for two frames.\n if count_current > count_last and count_last_last == count_last:\n start_time = time.time()\n total_count = total_count + count_current - count_last\n\n #client.publish(\"person\", json.dumps({\"total\": total_count}))\n client.publish(\"person\", json.dumps({\"count\": count_current}))\n\n #out_frame = cv2.putText(out_frame, \"Current Time = \"+str('% 6.2f' % time.time()), (10, 450), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (255, 0, 0), 1, cv2.LINE_AA)\n out_frame = cv2.putText(out_frame, \"Person Entered Frame = \"+str(count_current), (10, 510), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 0), 1, cv2.LINE_AA)\n out_frame = cv2.putText(out_frame, \"Total Counted = \"+str(total_count), (10, 540), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 0), 1, cv2.LINE_AA)\n\n # Person duration in the video is calculated if two frames of no detect to account for skipped frame\n if count_current < count_last_last and count_last < count_last_last:\n duration = int(time.time() - start_time)\n total_duration += duration / 11 #frames per second and evaluating only every other frame\n avg_duration = int(total_duration / total_count)\n client.publish(\"person/duration\", json.dumps({\"duration\": avg_duration}))\n\n #out_frame = cv2.putText(out_frame, \"Duration = \"+str('% 6.2f' % duration), (10, 540), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (255, 0, 0), 1, cv2.LINE_AA)\n out_frame = cv2.putText(out_frame, \"Average Duration = \" + str('% 4.2f' % avg_duration) + \" seconds.\", (10, 570), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 0), 1, cv2.LINE_AA)\n\n # Set a double counter to review two frames at a time\n count_last_last = count_last\n count_last = count_current\n #End block of code from Mentor Help question 129845\n\n\n ### TODO: Send the frame to the FFMPEG server ###\n out_frame = out_frame.copy(order='C')\n out_frame = cv2.resize(out_frame, (width, height))\n np.ascontiguousarray(out_frame, dtype=np.float32)\n sys.stdout.buffer.write(out_frame)\n sys.stdout.flush()\n\n ### TODO: Write an output image if `single_image_mode` ###\n\n #Release the capture and destroy any OpenCV windows\n cap.release()\n cv2.destroyAllWindows()\n\n #Disconnect from MQTT\n client.disconnect()\n\n #Print final numbers for reference\n print(\"Video stream ended.\")\n print(\"Final count was \" + str(total_count))\n print(\"Average Duration was \" + str(avg_duration) + \" seconds.\")",
"def greengrass_infinite_infer_run():\n try:\n model_type, output_map, client, iot_topic, local_display, model_path, model, detection_threshold, input_height, input_width = init_greengrass()\n # Do inference until the lambda is killed.\n \n while True:\n t2 = Thread(target = upload_image)\n t1 = Thread(target = capture_img, args=[model_type, output_map, client, iot_topic, local_display, model_path, model, detection_threshold, input_height, input_width])\n t1.start()\n t2.start()\n t1.join()\n t2.join()\n # capture_img(model_type, output_map, client, iot_topic, local_display, model_path, model, detection_threshold, input_height, input_width)\n except Exception as ex:\n client.publish(topic=iot_topic, payload='Error in face detection lambda: {}'.format(ex))",
"def infer():\n\n # Create StreamManagerApi object\n stream_manager_api = StreamManagerApi()\n # Use InitManager method init StreamManagerApi\n ret = stream_manager_api.InitManager()\n if ret != 0:\n print(\"Failed to init Stream manager, ret=%s\" % str(ret))\n exit()\n\n # create streams by pipeline config file\n with open(args.pipeline_path, \"rb\") as f:\n pipeline_str = f.read()\n\n # Configuring a stream\n ret = stream_manager_api.CreateMultipleStreams(pipeline_str)\n if ret != 0:\n print(\"Failed to create Stream, ret=%s\" % str(ret))\n exit()\n\n # Construct the input of the stream\n data_input = MxDataInput()\n # Stream_name encoded in UTF-8\n stream_name = args.stream_name.encode()\n print(stream_name)\n predictions = []\n with open(args.label_path, 'rt') as f:\n val_cls = f.read().rstrip(\"\\n\").split(\"\\n\")\n val_cls_dict = {}\n for i, cls in enumerate(val_cls):\n val_cls_dict[i] = cls\n coco_gt = COCO(args.instances_path)\n classs_dict = {}\n cat_ids = coco_gt.loadCats(coco_gt.getCatIds())\n for cat in cat_ids:\n classs_dict[cat[\"name\"]] = cat[\"id\"]\n\n for file_name in os.listdir(args.img_path):\n pred_data = []\n # Gets the Address of each image\n img_id = int(file_name.split('.')[0])\n file_path = args.img_path + file_name\n size = (cv2.imread(file_path)).shape\n\n # Read each photo in turn\n with open(file_path, \"rb\") as f:\n img_data = f.read()\n if not img_data:\n print(f\"read empty data from img:{file_name}\")\n continue\n # The element value img_data\n data_input.data = img_data\n boxes_output, scores_output = send_data_get_output(stream_name, data_input, stream_manager_api)\n pred_data.append({\"boxes\": boxes_output,\n \"box_scores\": scores_output,\n \"img_id\": img_id,\n \"image_shape\": size})\n\n parse_img_infer_result(pred_data[0], predictions, val_cls_dict, classs_dict)\n print(f\"Inferred image:{file_name} success!\")\n\n # Save the result in JSON format\n if not os.path.exists(args.res_path):\n os.makedirs(args.res_path)\n with open(args.res_path + 'predictions_test.json', 'w') as f:\n json.dump(predictions, f)\n stream_manager_api.DestroyAllStreams()",
"def run_tracker(p):\n # load model\n net = torch.load(os.path.join(p.net_base_path, p.net))\n net = net.to(device)\n\n # evaluation mode\n net.eval()\n\n # load sequence\n img_list, target_position, target_size = load_sequence(p.seq_base_path, p.video)\n\n # first frame\n img_uint8 = cv2.imread(img_list[0])\n img_uint8 = cv2.cvtColor(img_uint8, cv2.COLOR_BGR2RGB)\n img_double = np.double(img_uint8) # uint8 to float\n\n # compute avg for padding\n avg_chans = np.mean(img_double, axis=(0, 1))\n\n wc_z = target_size[1] + p.context_amount * sum(target_size)\n hc_z = target_size[0] + p.context_amount * sum(target_size)\n s_z = np.sqrt(wc_z * hc_z)\n scale_z = p.examplar_size / s_z\n\n # crop examplar z in the first frame\n z_crop = get_subwindow_tracking(img_double, target_position, p.examplar_size, round(s_z), avg_chans)\n\n z_crop = np.uint8(z_crop) # you need to convert it to uint8\n # convert image to tensor\n z_crop_tensor = 255.0 * F.to_tensor(z_crop).unsqueeze(0)\n\n d_search = (p.instance_size - p.examplar_size) / 2\n pad = d_search / scale_z\n s_x = s_z + 2 * pad\n # arbitrary scale saturation\n min_s_x = p.scale_min * s_x\n max_s_x = p.scale_max * s_x\n\n # generate cosine window\n if p.windowing == 'cosine':\n window = np.outer(np.hanning(p.score_size * p.response_UP), np.hanning(p.score_size * p.response_UP))\n elif p.windowing == 'uniform':\n window = np.ones((p.score_size * p.response_UP, p.score_size * p.response_UP))\n window = window / sum(sum(window))\n\n # pyramid scale search\n scales = p.scale_step**np.linspace(-np.ceil(p.num_scale/2), np.ceil(p.num_scale/2), p.num_scale)\n\n # extract feature for examplar z\n z_features = net.feat_extraction(Variable(z_crop_tensor).to(device))\n z_features = z_features.repeat(p.num_scale, 1, 1, 1)\n\n # do tracking\n bboxes = np.zeros((len(img_list), 4), dtype=np.double) # save tracking result\n start_time = datetime.datetime.now()\n for i in range(0, len(img_list)):\n if i > 0:\n # do detection\n # currently, we only consider RGB images for tracking\n img_uint8 = cv2.imread(img_list[i])\n img_uint8 = cv2.cvtColor(img_uint8, cv2.COLOR_BGR2RGB)\n img_double = np.double(img_uint8) # uint8 to float\n\n scaled_instance = s_x * scales\n scaled_target = np.zeros((2, scales.size), dtype = np.double)\n scaled_target[0, :] = target_size[0] * scales\n scaled_target[1, :] = target_size[1] * scales\n\n # extract scaled crops for search region x at previous target position\n x_crops = make_scale_pyramid(img_double, target_position, scaled_instance, p.instance_size, avg_chans, p)\n\n # get features of search regions\n x_crops_tensor = torch.FloatTensor(x_crops.shape[3], x_crops.shape[2], x_crops.shape[1], x_crops.shape[0])\n # response_map = SiameseNet.get_response_map(z_features, x_crops)\n for k in range(x_crops.shape[3]):\n tmp_x_crop = x_crops[:, :, :, k]\n tmp_x_crop = np.uint8(tmp_x_crop)\n # numpy array to tensor\n x_crops_tensor[k, :, :, :] = 255.0 * F.to_tensor(tmp_x_crop).unsqueeze(0)\n\n # get features of search regions\n x_features = net.feat_extraction(Variable(x_crops_tensor).to(device))\n\n # evaluate the offline-trained network for exemplar x features\n target_position, new_scale = tracker_eval(net, round(s_x), z_features, x_features, target_position, window, p)\n\n # scale damping and saturation\n s_x = max(min_s_x, min(max_s_x, (1 - p.scale_LR) * s_x + p.scale_LR * scaled_instance[int(new_scale)]))\n target_size = (1 - p.scale_LR) * target_size + p.scale_LR * np.array([scaled_target[0, int(new_scale)], scaled_target[1, int(new_scale)]])\n\n rect_position = np.array([target_position[1]-target_size[1]/2, target_position[0]-target_size[0]/2, target_size[1], target_size[0]])\n\n if p.visualization:\n visualize_tracking_result(img_uint8, rect_position, 1)\n\n # output bbox in the original frame coordinates\n o_target_position = target_position\n o_target_size = target_size\n bboxes[i,:] = np.array([o_target_position[1]-o_target_size[1]/2, o_target_position[0]-o_target_size[0]/2, o_target_size[1], o_target_size[0]])\n\n end_time = datetime.datetime.now()\n fps = len(img_list)/max(1.0, (end_time-start_time).seconds)\n\n return bboxes, fps",
"def _run(self):\n if not self.is_train:\n return self.test() \n\n logger.debug(\"Actor {} resuming at Step {}, {}\".format(self.actor_id, \n self.global_step.value(), time.ctime()))\n\n s = self.emulator.get_initial_state()\n \n s_batch = []\n a_batch = []\n y_batch = []\n bonuses = deque(maxlen=100)\n\n exec_update_target = False\n total_episode_reward = 0\n episode_ave_max_q = 0\n episode_over = False\n qmax_down = 0\n qmax_up = 0\n prev_qmax = -10*6\n low_qmax = 0\n ep_t = 0\n \n while (self.global_step.value() < self.max_global_steps):\n # Sync local learning net with shared mem\n self.sync_net_with_shared_memory(self.local_network, self.learning_vars)\n self.save_vars()\n\n rewards = []\n states = []\n actions = []\n local_step_start = self.local_step\n \n while not episode_over:\n logger.debug('steps: {} / {}'.format(self.global_step.value(), self.max_global_steps))\n # Choose next action and execute it\n a, readout_t = self.choose_next_action(s)\n\n new_s, reward, episode_over = self.emulator.next(a)\n total_episode_reward += reward\n\n current_frame = new_s[...,-1]\n bonus = self.density_model.update(current_frame)\n bonuses.append(bonus)\n\n if (self.actor_id == 0) and (self.local_step % 200 == 0):\n bonus_array = np.array(bonuses)\n logger.debug('Mean Bonus={:.4f} / Max Bonus={:.4f}'.format(\n bonus_array.mean(), bonus_array.max()))\n\n # Rescale or clip immediate reward\n # reward = self.rescale_reward(reward + bonus)\n reward = self.rescale_reward(reward)\n ep_t += 1\n \n rewards.append(reward)\n states.append(s)\n actions.append(a)\n \n s = new_s\n self.local_step += 1\n episode_ave_max_q += np.max(readout_t)\n \n global_step, update_target = self.global_step.increment(\n self.q_target_update_steps)\n\n if update_target:\n update_target = False\n exec_update_target = True\n\n if self.local_step % 4 == 0:\n self.batch_update()\n \n self.local_network.global_step = global_step\n\n else:\n mc_returns = list()\n running_total = 0.0\n for r in reversed(rewards):\n running_total = r + self.gamma*running_total\n mc_returns.insert(0, running_total)\n\n mixed_returns = self.cts_eta*np.array(rewards) + (1-self.cts_eta)*np.array(mc_returns)\n\n states.append(new_s)\n episode_length = len(rewards)\n for i in range(episode_length):\n self.replay_memory.append((\n states[i],\n actions[i],\n mixed_returns[i],\n states[i+1],\n i+1 == episode_length))\n\n \n if exec_update_target:\n self.update_target()\n exec_update_target = False\n # Sync local tensorflow target network params with shared target network params\n if self.target_update_flags.updated[self.actor_id] == 1:\n self.sync_net_with_shared_memory(self.target_network, self.target_vars)\n self.target_update_flags.updated[self.actor_id] = 0\n\n s, total_episode_reward, _, ep_t, episode_ave_max_q, episode_over = \\\n self.prepare_state(s, total_episode_reward, self.local_step, ep_t, episode_ave_max_q, episode_over)",
"def _run_one_iteration(self, iteration):\n statistics = iteration_statistics.IterationStatistics()\n logging.info('Starting iteration %d', iteration)\n num_episodes_train, average_reward_train, average_steps_per_second = (\n self._run_train_phase(statistics))\n active_num_episodes_eval, active_average_reward_eval = self._run_eval_phase(\n statistics, 'active')\n passive_num_episodes_eval, passive_average_reward_eval = (\n self._run_eval_phase(statistics, 'passive'))\n\n self._save_tensorboard_summaries(iteration, num_episodes_train,\n average_reward_train,\n active_num_episodes_eval,\n active_average_reward_eval,\n passive_num_episodes_eval,\n passive_average_reward_eval,\n average_steps_per_second)\n return statistics.data_lists",
"def loop_and_detect(cam, runtime, trt_yolov3, conf_th, vis):\n\n while True:\n if cv2.getWindowProperty(WINDOW_NAME, 0) < 0:\n break\n timer = cv2.getTickCount()\n img = cam.read().copy()\n if img is not None:\n if runtime:\n boxes, confs, label, _preprocess_time, _postprocess_time,_network_time = trt_yolov3.detect(img, conf_th)\n img, _visualize_time = vis.draw_bboxes(img, boxes, confs, label)\n time_stamp = record_time(_preprocess_time, _postprocess_time, _network_time, _visualize_time)\n show_runtime(time_stamp)\n else:\n boxes, confs, label, _, _, _ = trt_yolov3.detect(img, conf_th)\n img, _ = vis.draw_bboxes(img, boxes, confs, label)\n \n fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer)\n img = show_fps(img, fps)\n cv2.imshow(WINDOW_NAME, img)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break",
"def detect(self):\n # process the input video and get the attributes:\n self.process_video()\n\n # build a rcnn/ yolov5 predictor:\n self.build_predictor()\n\n \n # assert not os.path.isfile(args.output_file), \"File with the name %s already exists\"%args.output_file\n # build the writer with same attributes:\n self.vid_writer = cv2.VideoWriter(self.output, self.fourcc, self.fps, (self.w, self.h))\n\n # inference time:\n start = time.time()\n print(\"Started inference\\n\")\n \n # progress bar using tqdm:\n pbar = tqdm(total=self.nframes)\n\n while(self.cap.isOpened()):\n ret, frame = self.cap.read()\n if ret == False:\n break # when the last frame is read \n\n # different formats of results:\n if self.library == \"yolov5\":\n # predict and bring the outputs to cpu:\n results = self.predictor(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)) # convert to RGB\n predictions = results.xyxy[0].cpu()\n # find the instance indices with person:\n person_idx = predictions[:,5] == self.label_dict[\"person\"]\n # extract the corresponding boxes and scores:\n boxes = predictions[person_idx,:4].numpy()\n probs = predictions[person_idx,4].numpy()\n\n if self.library == \"detectron2\":\n # predict and bring the outputs to cpu:\n results = self.predictor(frame) # RGB conversion done automatically in detectron\n predictions = results[\"instances\"].to(\"cpu\")\n # find the instance indices with person:\n person_idx = [predictions.pred_classes == self.label_dict[\"person\"]]\n # extract the corresponding boxes and scores:\n boxes = predictions.pred_boxes[person_idx].tensor.numpy()\n probs = predictions.scores[person_idx].numpy()\n\n # draw boxes and write the frame to the video:\n if len(boxes): # check whether there are predictions\n box_frame = self.draw_person_boxes(frame, boxes, probs)\n else:\n box_frame = frame\n self.vid_writer.write(box_frame)\n\n pbar.update(1)\n pbar.close()\n\n # release the video capture object and write object:\n self.cap.release()\n self.vid_writer.release()\n\n print(\"Inferene on the video file took %0.3f seconds\"%(time.time()-start))",
"def run(self) -> None:\n for episode in range(1, self.episodes + 1):\n print('Episode:', episode)\n steps, state_action_history = self.run_one_episode()\n self.steps_per_episode.append(steps)\n if episode % parameters.CACHING_INTERVAL == 0 or steps < 1000:\n visualize.animate_track(state_action_history, f'agent-{episode}')\n\n print('Training completed.')\n visualize.plot_steps_per_episode(self.steps_per_episode)\n visualize.plot_epsilon(self.agent.epsilon_history)\n\n if parameters.VISUALIZE_FINAL_GAME:\n print('Showing one episode with the greedy strategy.')\n self.agent.epsilon = 0\n steps, state_action_history = self.run_one_episode()\n print(f'Episode completed in {steps} steps.')\n visualize.animate_track(state_action_history)",
"def trainAgent(self):\r\n\t\tfor episode in range(self.TOT_EPISODES):\r\n\t\t\t#reset environment, stacked frames every episode.\r\n\t\t\tstate = self.env.reset()\r\n\t\t\trewards = 0\r\n\t\t\t#preprocess and stack the frame/state.\r\n\t\t\tstate, self.stacked_frames = stack_frames(self.stack_size,\r\n\t\t\t\t\t\t\t\t\tself.stacked_frames, state, True)\r\n\t\t\t\r\n\t\t\tfor step in range(self.MAX_STEPS):\r\n\t\t\t#for every step in episode:\r\n\t\t\t\r\n\t\t\t\tif (step%100==0):\r\n\t\t\t\t\tprint(\"Episode No.: \", episode, \"Step No.: \", step)\r\n\t\t\t\t\r\n\t\t\t\t#agent acts - explores or exploitation of the model\r\n\t\t\t\taction = self.dqn.predictAction(state)\r\n\t\t\t\t#reduce epsilon for more exploitation later.\r\n\t\t\t\tself.dqn.decayEpsilon()\r\n\t\t\t\t#Perform the action and get the next_state, reward, and done vals.\r\n\t\t\t\tnext_state, reward, done, _ = self.env.step(action)\r\n\t\t\t\t#append this state to the frame. Pass the previous stacked frame.\r\n\t\t\t\tnext_state, self.stacked_frames = stack_frames(self.stack_size,\r\n\t\t\t\t\t\t\t\t\t\tself.stacked_frames, next_state, False)\r\n\t\t\t\trewards+=reward\r\n\t\t\t\t\r\n\t\t\t\t#add this experience into memory (experience buffer)\r\n\t\t\t\tself.dqn.remember(state, action, reward, next_state, done)\r\n\t\t\t\t\r\n\t\t\t\tstate = next_state\r\n\t\t\t\t\r\n\t\t\t\tif done:\r\n\t\t\t\t\tprint(\"took %d steps\" %step)\r\n\t\t\t\t\tprint(\"Earned a total of reward equal to \", rewards)\r\n\t\t\t\t\tbreak\r\n\t\t\t\r\n\t\t\t\t# TRAIN\r\n\t\t\t\tself.dqn.replay()\r\n\t\t\t\t#sync target_model and model weights every 10k steps.\r\n\t\t\t\tif step % 10000 == 9999:\r\n\t\t\t\t\tself.dqn.target_train()\r\n\t\t\t\r\n\t\t\t# Save the network every 1000 iterations\r\n\t\t\tif episode % 5 == 4:\r\n\t\t\t\tprint(\"Saving Network\")\r\n\t\t\t\tself.dqn.save_network(self.path)",
"def step(self):\n # Fast learning\n task_embedding = self._ilp.infer_task()\n\n # Posterior update\n #self._skip_flag = self._is_graph_same(task_embedding, self._prev_task_embedding)\n self._skip_flag = False # XXX do not skip test\n if not self._skip_flag:\n self._grprop.observe_task(task_embedding)\n self._prev_task_embedding = task_embedding\n else:\n print(\"skipping!\")",
"def run():\n\tglobal kinect \n\tkinect.depth_frame_ready += DEPTH\n\tkinect.depth_stream.open( nui.ImageStreamType.Depth, 2,\n\t\t\t\t\t\t\t nui.ImageResolution.Resolution320x240,\n\t\t\t\t\t\t\t nui.ImageType.Depth )\n\tcv2.namedWindow( 'VideoDEPTH', cv2.WINDOW_AUTOSIZE )",
"def run(self):\n while True:\n self.kinect.captureVideoFrame()\n self.kinect.captureDepthFrame()\n self.kinect.ColorizeDepthFrame()\n self.kinect.blockDetector()\n rgb_frame = self.kinect.convertQtVideoFrame()\n depth_frame = self.kinect.convertQtDepthFrame()\n depth_filtered_frame = self.kinect.convertQtFilteredFrame()\n # Emit the new frames to be handled by Gui.setImage function\n self.updateFrame.emit(rgb_frame, depth_frame, depth_filtered_frame)\n time.sleep(.03)",
"def run_no_learn(self):\n\n for agent in self.match_controller.agents:\n assert agent.get_agent_type() == Constants.AGENT_TYPE.AGENT, \"Both agents must be in inference mode\"\n\n self.current_step = 0\n self.last_observation_object = None\n\n # Reset game + map\n self.match_controller.reset(randomize_team_order=False)\n # Running\n self.match_generator = self.match_controller.run_to_next_observation()\n try:\n next(self.match_generator)\n except StopIteration:\n # The game episode is done.\n is_game_error = False\n print('Episode run finished successfully!')\n except GameStepFailedException:\n # Game step failed.\n is_game_error = True\n\n return is_game_error"
] | [
"0.6336105",
"0.63183016",
"0.62164974",
"0.6207791",
"0.61121464",
"0.6077845",
"0.60773385",
"0.60439396",
"0.60370135",
"0.5874364",
"0.5873513",
"0.5855923",
"0.5849908",
"0.5848853",
"0.5842768",
"0.58283395",
"0.57712567",
"0.57294315",
"0.5708104",
"0.56961703",
"0.56749105",
"0.5647066",
"0.5644706",
"0.5617529",
"0.55744076",
"0.5551185",
"0.55475247",
"0.55405957",
"0.5508324",
"0.5507635"
] | 0.7098092 | 0 |
Empty entry point to the Lambda function invoked from the edge. | def lambda_handler(event, context):
return | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def lambda_handler(event, context):\n return dispatch(event)",
"def test_lambda_support_no_parameters_no_body(self):\n self.assert_contains_lambda_expression_in_m(\n parse.parse(setup_java_class(\"() -> {};\")))",
"def default_event_handler(event):\n pass",
"def lambda_handler(event, context):\n\n logger.debug('event.bot.name={}'.format(event['bot']['name']))\n\n return dispatch(event)",
"def dummy_fn(self):\n\t\tpass",
"def dummy_fn(self, *args, **kwargs):",
"def visit_Lambda(self, node: ast.Lambda) -> None:\n self._check_useless_lambda(node)\n self._check_implicit_primitive(node)\n self.generic_visit(node)",
"def lambda_handler(event: Any, context: Any) -> Any:\n \n operation = event['op']\n order_id = int(event['order_id'])\n\n action_class = getattr(sys.modules[__name__], 'Action')\n action_instance = action_class(order_id)\n action_instance_method = getattr(action_instance, \"handle_\" + operation)\n return action_instance_method(event)",
"def visit_Lambda(self, node: ast.Lambda) -> None:\n self._counter.check_arguments_count(node)\n self.generic_visit(node)",
"def dummy_callback_handler(self, ret):\n pass",
"def on_invoke(self, ins, const, obj, args):\n pass",
"def bind(self, _target: aws_cdk.aws_lambda.IFunction) -> None:\n ...",
"def invoke(self, event_args, *args, **kwargs):\n pass # pragma: no cover",
"def register_apply_edge_func(self, func, block_id=...): # -> None:\n ...",
"def dummy_callback(obj):\n pass",
"def callback(self, fun: Callable[[], None] | None) -> None:",
"def lambda_handler(event, context):\n try:\n aq = Aquifer()\n aq.run()\n\n return \"Completed\"\n\n except (Exception, KeyboardInterrupt) as e:\n return \"Error occurred\"",
"def lambda_handler(event):\r\n return 'Hello ' + event['queryParams']['name']",
"def n_lambda(self):\n return self.b()",
"def test_lambda_support_no_parameters_expression_body(self):\n test_classes = [\n setup_java_class(\"() -> 3;\"),\n setup_java_class(\"() -> null;\"),\n setup_java_class(\"() -> { return 21; };\"),\n setup_java_class(\"() -> { System.exit(1); };\"),\n ]\n for test_class in test_classes:\n clazz = parse.parse(test_class)\n self.assert_contains_lambda_expression_in_m(clazz)",
"def lambda_method(self,t): \n return 5*math.sin(2*math.pi*1*t) # I don't see the value of 1 here but this is how lamda is defined in the exercise.",
"def one():\n return lambda f: lambda x: f(x)",
"def _funcOrLambda(self, node, gen, ndecorators):\n gen.Start()\n gen.FindLocals()\n gen.Dispatch(node.code)\n gen.Finish()\n\n self.set_lineno(node)\n for default in node.defaults:\n self.visit(default)\n self._makeClosure(gen, len(node.defaults))\n for i in xrange(ndecorators):\n self.emit('CALL_FUNCTION', 1)",
"def testGetLambda(self):\n self.ports.get_lambda(file_name = 'get_lambda.xml', port_ids = portsDict['port_ids'], lambdas = portsDict['lambda'])",
"def __call__(fun_name):",
"def lambda_handler(event, context):\n name: str = event['name']\n return f'Hi {name}!'",
"def lambdafan(func):\n if 'AWS_LAMBDA_FUNCTION_NAME' not in os.environ:\n return func\n\n @functools.wraps(func)\n def scaleout(*args, **kw):\n client = boto3.client('lambda')\n client.invoke(\n FunctionName=os.environ['AWS_LAMBDA_FUNCTION_NAME'],\n InvocationType='Event',\n Payload=dumps({\n 'event': 'fanout',\n 'function': func.__name__,\n 'args': args,\n 'kwargs': kw}),\n Qualifier=os.environ['AWS_LAMBDA_FUNCTION_VERSION'])\n return scaleout",
"def test_lambda_wrapper_basic_events(reporter_mock, context):\n\n @lumigo_tracer(token=\"123\")\n def lambda_test_function(event, context):\n pass\n\n lambda_test_function({}, context)\n function_span = SpansContainer.get_span().function_span\n assert not SpansContainer.get_span().spans\n assert \"started\" in function_span\n assert \"ended\" in function_span\n assert reporter_mock.call_count == 2\n first_send = reporter_mock.call_args_list[0][1][\"msgs\"]\n assert len(first_send) == 1\n assert first_send[0][\"id\"].endswith(\"_started\")\n assert first_send[0][\"maxFinishTime\"]",
"def callback(self, function: Optional[Callable[[int], None]]) -> None:",
"def lambda_function(f):\n @functools.wraps(f)\n def wrapper(event, context):\n global _CURRENT_LAMBDA_CONTEXT\n _CURRENT_LAMBDA_CONTEXT = context\n try:\n result = f(event, context)\n return wait(lambda: result)\n except:\n cls, exc, trace = sys.exc_info()\n report_exc_info((cls, exc, trace.tb_next))\n wait()\n raise\n return wrapper"
] | [
"0.6476532",
"0.597203",
"0.59268284",
"0.5917011",
"0.5866253",
"0.58485323",
"0.5809084",
"0.5671117",
"0.565327",
"0.5632782",
"0.5579186",
"0.555291",
"0.55512804",
"0.55272305",
"0.55262905",
"0.5456578",
"0.5411757",
"0.54025686",
"0.5380574",
"0.5376376",
"0.5373923",
"0.5342568",
"0.5324346",
"0.53224224",
"0.53128815",
"0.53093725",
"0.5297057",
"0.52915233",
"0.52863246",
"0.5284363"
] | 0.6586704 | 0 |
Method updates the image data. This currently encodes the numpy array to jpg but can be modified to support other encodings. frame Numpy array containing the image data of the next frame in the project stream. | def set_frame_data(self, frame):
ret, jpeg = cv2.imencode('.jpg', cv2.resize(frame, self.resolution))
if not ret:
raise Exception('Failed to set frame data')
self.frame = jpeg | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_frame_data(self, frame):\n ret, jpeg = cv2.imencode('.jpg', cv2.resize(frame, self.resolution))\n \n if not ret:\n raise Exception('Failed to set frame data')\n self.frame = jpeg",
"def _write_frame(self : \"animation\",\n frame : \"np.ndarray\"\n ):\n self._writer.append_data(frame)\n self._frame_number += 1\n self._prevFrame = frame",
"def update_frame(self, frame):\n\n t = datetime.now()\n delta_t = t - self.dpar.frame_timestamp[0]\n fps = self.dpar.update_fps(1./delta_t.total_seconds())\n\n self.dpar.frame_timestamp[0] = t\n\n if self.config.black_correct:\n cframe = self.ffc.black_correct(frame)\n else:\n cframe = frame\n\n self.dpar.latest_frame = np.copy(cframe)\n \n if self.dpar.cap_live_swap:\n pix, gray = self._get_pixmap(cframe[::4,::4], self.dpar.iwindow[0])\n self.cap_screen.cap_title = self._live_title(fps)\n self.cap_screen.setPixmap(pix)\n else: \n pix, gray = self._get_pixmap(cframe, self.dpar.iwindow[0])\n self.live_screen.live_title = self._live_title(fps)\n self.live_screen.setPixmap(pix)\n\n self.draw_histogram()\n\n\n if self.recording_sequence:\n\n # MRP ToDo update these tags properly.\n et = np.int(np.round(self.camera.actual_exposure_time_ms))\n ifi_ms = 1000. / self.camera.actual_frame_rate\n ts_ms = np.int(np.round(ifi_ms * self.seq_frame_num))\n\n self.ifd.update_tags((self.seq_frame_num, 0), et, 0, ts_ms, 99)\n\n cap_image = np.copy(self.dpar.latest_frame).astype(np.uint16)\n #cv2.imwrite(cfn, (cap_image << (16 - self.camera.pixel_bits)).astype(np.uint16))\n\n \"\"\"\n Perform the TIFF windowing and then rebinning (compress) according to config file options\n \"\"\"\n x0 = max(0, (cap_image.shape[1] - config.tiff_seq_x_window) // 2)\n x1 = cap_image.shape[1] - x0\n y0 = max(0, (cap_image.shape[0] - config.tiff_seq_y_window) // 2)\n y1 = cap_image.shape[0] - y0\n cap_image = cap_image[y0:y1, x0:x1]\n\n shift_bits = 16 - self.camera.pixel_bits\n if config.tiff_seq_rebin > 1: # not tested for r ne 2\n r = config.tiff_seq_rebin\n cap_image = cap_image.reshape((cap_image.shape[0] // r, r, cap_image.shape[1] // r, -1)).sum(axis=3).sum(axis=1)\n extra_bits = 2 * (r.bit_length() -1)\n shift_bits = max(0, shift_bits - extra_bits)\n\n\n #im = PIL.Image.fromarray(gray)\n im = PIL.Image.fromarray((cap_image << shift_bits).astype(np.uint16))\n\n im.save(self.tiff_out, tiffinfo=self.ifd, compression=TIFF_COMPRESSION)\n self.tiff_out.newFrame()\n self.seq_frame_num += 1\n self.seq_frame_label.setText(str(self.seq_frame_num))\n\n if self.recording_video:\n # cframe is int16\n #f8 = ((cframe >> (self.camera.pixel_bits - 8)) & 0xff).astype(np.uint8)\n #Style 1:\n #fc = np.stack((f8, f8, f8), axis=-1)\n #self.rv_vout.write(fc)\n #Style 2&3:\n self.rv_vout.write(gray)\n self.recorded_video_frame_number += 1\n #Style 4: (16-bit)\n #self.rv_vout.write(cframe)\n\n #if self.recorded_video_frame_number == 20:\n # self.record_video() # turn off",
"def gen():\n global dataFrame\n while True:\n frame = vs.read()\n # frame = imutils.resize(frame, width=400)\n \n (flag, encodedImage) = cv2.imencode(\".jpg\", frame.copy())\n if not flag: continue\n # print (encodedImage)\n dataFrame = yield (b'--frame\\r\\n'\n b'Content-Type: image/jpeg\\r\\n\\r\\n' + bytearray(encodedImage) + b'\\r\\n')",
"def send_frame(self):\n frame = self.frame_buffer.get()\n result, jpeg = cv2.imencode(\".jpg\", frame.nparray)#, self.encode_param)\n data = numpy.array(jpeg)\n string_data = data.tostring()\n self.sock.send(str(len(string_data)).ljust(16))\n self.sock.send(string_data)",
"def update_anim(frame, self):\n self.step()\n self.im.set_data(self.array)\n self.im2.set_data(self.array2)",
"def adjust_image_data(self):\r\n\r\n print('Adjusting image data: ')\r\n\r\n if self.removeFirstSequence: # used to remove the first trial from the sequence\r\n\r\n frames_per_rep = self.nFrames/self.nrepetitions\r\n\r\n self.imageData = self.imageData[frames_per_rep:, :, :]\r\n\r\n self.nFrames = self.imageData.shape[0]\r\n\r\n self.nrepetitions = int(self.nFrames/(self.period * self.framerate))\r\n\r\n self.times = np.arange(0, self.nFrames/self.framerate, 1.0/self.framerate)\r\n\r\n \r\n\r\n # first squeeze the image to 3d if it is 4d\r\n\r\n maxt = np.max(self.times) # find last image time\r\n\r\n sh = self.imageData.shape\r\n\r\n if len(sh) == 4:\r\n\r\n self.imageData = self.imageData.squeeze()\r\n\r\n sh = self.imageData.shape\r\n\r\n dt = np.mean(np.diff(self.times)) # get the mean dt\r\n\r\n n_Periods = int((maxt+dt)/self.period) # how many full periods in the image set - include the first?\r\n\r\n if self.nrepetitions > 0 and self.nrepetitions < n_Periods:\r\n\r\n n_Periods = self.nrepetitions\r\n\r\n n_PtsPerCycle = int(np.floor(self.period/dt)); # estimate image points in a stimulus cycle\r\n\r\n ndt = self.period/n_PtsPerCycle\r\n\r\n self.imageData = self.imageData[range(0, n_Periods*n_PtsPerCycle),:,:] # reduce to only what we need\r\n\r\n print (' Adjusted image info')\r\n\r\n print (\" # Periods: %d Pts/cycle: %d Cycle dt %8.4fs (%8.3fHz) Cycle: %7.4fs\" %(n_Periods, n_PtsPerCycle, ndt, 1.0/ndt, self.period))\r\n\r\n self.print_image_info()",
"def update_frame(self, frame):\n self.set_bank(frame)\n offset = 0\n for chunk in self._chunk(self._buf[frame], 32):\n self.i2c.write_i2c_block_data(self.address, _COLOR_OFFSET + offset, chunk)\n offset += 32",
"def _save_frame_as_png(\n self : \"animation\",\n frame : \"np.ndarray\",\n filename : \"str\"\n ):\n im = Image.fromarray(frame)\n im.save(filename)",
"def draw(self, frame):\n frame[OFS:OFS+self.image.shape[0], OFS:OFS+self.image.shape[1]] = self.image",
"def write_frame(self, img):\n if img.shape[0] % 2 != 0:\n print(\"Warning: height is not divisible by 2! Dropping last row\")\n img = img[:-1]\n if img.shape[1] % 2 != 0:\n print(\"Warning: width is not divisible by 2! Dropping last column\")\n img = img[:, :-1]\n if self.post_processor:\n img = self.post_processor.process(img)\n if self.width is None:\n self.width = img.shape[0]\n self.height = img.shape[1]\n assert os.path.exists(self.directory)\n fn = FRAME_FN_TEMPLATE % self.frame_counter\n self.frame_fns.append(fn)\n imwrite(img, os.path.join(self.frame_directory, fn))\n self.frame_counter += 1\n if self.frame_counter % self.next_video_checkpoint == 0:\n if self.automatic_build:\n self.make_video()\n self.next_video_checkpoint *= 2",
"def NextFrame(self, event):\n buffer = self.GetDataBuffer()\n if buffer is not None:\n # Update bitmap widget with new image frame:\n self.bitmap.CopyFromBuffer(buffer)\n # Refresh panel to draw image into bitmap:\n self.Refresh()\n pass",
"async def put(self, frame: RawArray):\r\n if self.full():\r\n raise IndexError(\"not enough internal buffer\")\r\n self.frames[self._write_index][:] = frame\r\n\r\n self._write_index = (self._write_index + 1) % self.capacity()\r\n self._is_full = self._read_index == self._write_index",
"def update(self, frame = None):\n if type(frame) == type(None):\n frame = self.video.get_frame()\n height, width, channel = frame.shape\n bytesPerLine = 3 * width\n image = QImage(frame.data, width, height, bytesPerLine, QImage.Format_RGB888)\n self.pixmap = QtGui.QPixmap(image)\n size = self.size()\n scaledPix = self.pixmap.scaled(size, Qt.KeepAspectRatio, transformMode = Qt.FastTransformation)\n self.setPixmap(scaledPix)\n\n QtCore.QCoreApplication.processEvents()",
"def write(self, Width, Height, ImageData, Speed):\n # write_begin = datetime.datetime.now()\n\n self.Data.Game.Speed = Speed\n\n # TODO Not sure if needed\n AspectRatio = Width / Height\n TargetWidth = int(self._TargetResolution[1] * AspectRatio)\n\n if TargetWidth >= self._TargetResolution[0]:\n if Width != TargetWidth or Height != self._TargetResolution[1]:\n ImageData = cv2.resize(ImageData, (TargetWidth, self._TargetResolution[1]))\n\n if TargetWidth != self._TargetResolution[0]:\n XStart = int(TargetWidth / 2 - self._TargetResolution[0] / 2)\n XStop = int(TargetWidth / 2 + self._TargetResolution[0] / 2)\n ImageData = ImageData[:, XStart:XStop]\n\n else:\n TargetHeight = int(self._TargetResolution[0] / AspectRatio)\n\n if Width != self._TargetResolution[0] or Height != TargetHeight:\n ImageData = cv2.resize(ImageData, (self._TargetResolution[1], TargetHeight))\n\n if TargetHeight != self._TargetResolution[1]:\n YStart = int(TargetHeight / 2 - self._TargetResolution[1] / 2)\n YStop = int(TargetHeight / 2 + self._TargetResolution[1] / 2)\n ImageData = ImageData[YStart:YStop, :]\n ImageData = cv2.flip(ImageData, 0)\n # Update Parameters\n\n Height, Width = ImageData.shape[:2]\n # print(\"Type is \", np.array(ImageData).dtype)\n\n # Set the SHM\n self.Data.Image.ImageWidth = Width\n self.Data.Image.ImageHeight = Height\n\n # Reshape ImageData to 1 D array\n ImageData = ImageData.flatten()\n\n\n # print(\"Target Image data\", Width, Height)\n\n start_time = datetime.datetime.now()\n self.Data.Image.Data = (ctypes.c_uint8 * (RECORD_MAX_IMAGE_HEIGHT * RECORD_MAX_IMAGE_WIDTH * RECORD_IMAGE_CHANNELS))(*np.array(ImageData))\n\n # elapsed = datetime.datetime.now() - start_time\n # print(\"Setting Image data \", int(elapsed.total_seconds() * 1000) )\n #\n # Notify we wrote a new data - Maybe we can also share the frame number\n #self.Data.Sync.IsWritten = 1\n # elapsed = datetime.datetime.now() - write_begin\n # print(\"Write to memory took \", int(elapsed.total_seconds() * 1000))\n\n if self._IsPauseOn:\n self.Data.Sync.IsPauseOn = 1\n else:\n self.Data.Sync.IsPauseOn = 0",
"def update_image(self):\n self.image = Image.fromarray(self.img)",
"def convert_to_image(self, frame, base64_encode=False):\n #NOTE: tuple (85010, 1) ndarray --> data reduction\n img_buf_arr = cv2.imencode(\".jpeg\", frame)[1]\n if base64_encode:\n img_buf_arr = b\"data:image/jpeg;base64,\" + base64.b64encode(img_buf_arr)\n return img_buf_arr\n return bytes(img_buf_arr)",
"def update_img(self):\n self.img = np.array(self.image)",
"def encoder(cls, frames) -> bytearray:\n\t\tframe_it = iter(frames)\n\t\tprev = next(frame_it).copy()\n\t\tall_events = get_events_by_position(frames)\n\n\t\t# Encode resolution and number of frames\n\t\tyield struct.pack('>3I', prev.shape[0], prev.shape[1], len(frames))\n\n\t\t# Encode first frame\n\t\tyield prev.tobytes()\n\n\t\t# Yield events for each pixel in turn\n\t\tyield from cls._events_to_bytes(all_events)",
"def send_frame(self, frame: np.ndarray) -> None:\n self.sink.putFrame(frame)",
"def get_data(self):\n global CAM\n while CAM.isOpened():\n _, frame = CAM.read()\n _, jpeg = cv2.imencode('.jpg', frame)\n encoded_img = \"data:image/jpg;base64,\" + str(base64.b64encode(jpeg.tobytes()).decode())\n SIO.emit('video_frame',\n {'frame': encoded_img},\n namespace='/live-stream')\n sleep(self.delay)",
"def animate_with_numpy_frame_sequence(self, numpy_frame_sequence, frames_per_second=15):\n\n sleep_time = 1/frames_per_second\n for animation_frame in numpy_frame_sequence:\n tic = time.time()\n self.set_image_from_numpy_array(animation_frame)\n self.update()\n toc = time.time()\n frame_generation_time = toc-tic\n if frame_generation_time < sleep_time:\n new_sleep_time = sleep_time - frame_generation_time\n time.sleep(new_sleep_time)\n else:\n pass",
"def draw(self, frame, offset=OFS):\n frame[\n OFS : OFS + self.image.shape[0], OFS : OFS + self.image.shape[1]\n ] = self.image",
"def send_jpg(frame_jpg, frame_count):\n try:\n\n img_bytes = frame_jpg\n ticks = time.time()\n\n frame_package = {\n 'CaptureTime': ticks,\n 'FrameCount': frame_count,\n 'ImageBytes': img_bytes\n }\n\n # Put encoded image in kinesis stream\n print(\"Sending image to Kinesis...\")\n response = kinesis_client.put_record(\n StreamName=KINESIS_STREAM_NAME,\n Data=pickle.dumps(frame_package),\n PartitionKey=str(uuid.uuid4())\n )\n print(response)\n except Exception as ex:\n print(ex)",
"def save_frame(frame):\n try:\n img = Image.fromarray(frame.array, 'RGB')\n out_path = settings['app']['web_path']\n if not os.path.isabs(out_path):\n out_path = os.path.join(basepath, out_path)\n filename = os.path.join(out_path, 'static', 'latest.jpg')\n tmp_filename = '{}.part'.format(filename)\n img.save(tmp_filename, 'jpeg')\n os.rename(tmp_filename, filename)\n except Exception, error:\n print('Error saving frame: {}'.format(error))",
"def update_frame(self):\n if not self.image_queue: return\n image = self.image_queue.pop()\n self.image_queue.rotate(-1)\n self.original_image = image\n self.altered_image = image.copy()\n\n if self.tracking:\n self.update_frame_tracking()\n self.display_image(True)\n elif self.calibrating:\n self.update_frame_calibrating()\n self.display_image(True)\n else:\n image = cv2.flip(self.altered_image, 1)\n self.display_image(True)",
"def get_frame(self):\n self._serial_port.close()\n self._serial_port.open()\n\n self._request_frame()\n\n serial_data = self._serial_port.readall()\n\n frame_start_idx = serial_data.find(BEGIN_FRAME) + len(BEGIN_FRAME)\n frame_end_idx = serial_data.find(END_FRAME)\n\n print serial_data[0:frame_start_idx]\n print serial_data[frame_end_idx:]\n\n raw_frame = serial_data[frame_start_idx:frame_end_idx]\n\n np_frame = np.fromstring(raw_frame, dtype=np.uint8)\n # np_frame = np_frame.reshape((30, 30))\n\n # image = cv2.fromarray(np_frame)\n\n # return image\n return np_frame",
"def store_frame(self, frame):\n if self.obs is None:\n self.obs = np.empty([self.size] + list(frame.shape), dtype=np.uint8)\n self.action = np.empty([self.size], dtype=np.int32)\n self.reward = np.empty([self.size], dtype=np.float32)\n self.done = np.empty([self.size], dtype=np.bool)\n self.obs[self.next_idx] = frame\n\n ret = self.next_idx\n self.next_idx = (self.next_idx + 1) % self.size\n self.num_in_buffer = min(self.size, self.num_in_buffer + 1)\n\n return ret",
"def read(self):\n try:\n if self.Data.Sync.IsWritten == 1:\n\n if self._IsPauseOn:\n self.Data.Sync.IsPauseOn = 1\n else:\n self.Data.Sync.IsPauseOn = 0\n\n Width = self.Data.Image.ImageWidth\n Height = self.Data.Image.ImageHeight\n\n # Image = np.fromstring(self.Data.Image.Data, np.uint8, Width * Height * self.TARGET_IMAGE_CHANNELS)\n Image = np.frombuffer(self.Data.Image.Data, np.uint8, Width * Height * self.TARGET_IMAGE_CHANNELS)\n Image = Image.reshape(Height, Width, self.TARGET_IMAGE_CHANNELS)\n\n AspectRatio = Width / Height\n TargetWidth = int(self._TargetResolution[1] * AspectRatio)\n\n if TargetWidth >= self._TargetResolution[0]:\n if Width != TargetWidth or Height != self._TargetResolution[1]:\n Image = cv2.resize(Image, (TargetWidth, self._TargetResolution[1]))\n\n if TargetWidth != self._TargetResolution[0]:\n XStart = int(TargetWidth/2 - self._TargetResolution[0]/2)\n XStop = int(TargetWidth/2 + self._TargetResolution[0]/2)\n Image = Image[:, XStart:XStop]\n\n else:\n TargetHeight = int(self._TargetResolution[0]/AspectRatio)\n\n if Width != self._TargetResolution[0] or Height != TargetHeight:\n Image = cv2.resize(Image, (self._TargetResolution[1], TargetHeight))\n\n if TargetHeight != self._TargetResolution[1]:\n YStart = int(TargetHeight/2 - self._TargetResolution[1]/2)\n YStop = int(TargetHeight/2 + self._TargetResolution[1]/2)\n Image = Image[YStart:YStop, :]\n\n # Shall we convert this to 0 - 1 ?\n self._RawImage = Image\n self._Image = cv2.flip(Image, 0)\n\n # This one does not flip the image, but it rotate and crop !!\n # self._Image = np.array(cv2.flip(Image, 0)/255, dtype=np.float32)\n # self._Image = cv2.flip(Image, 0)\n\n\n # This one is flipped upside/down\n # print(\"Image from memory reshaped as WxH with Mean\", Width, Height, np.mean((self._Image), axis=(0, 1)))\n # self.store_to_file(self._Image)\n\n return True\n except:\n print(\"Unexpected error in Shared Memory Read\", sys.exc_info()[0])\n\n return False",
"def calculate_frame(self):\n frame = self.stream.read()\n self.keypoints, self.image = self.openpose.forward(frame, True)"
] | [
"0.72074133",
"0.66426784",
"0.6403696",
"0.6373772",
"0.6328509",
"0.6311963",
"0.63089526",
"0.62927353",
"0.6120985",
"0.60910094",
"0.60802954",
"0.6040295",
"0.59539485",
"0.5935913",
"0.5899413",
"0.5882713",
"0.58772403",
"0.5768093",
"0.5674097",
"0.5664655",
"0.56475234",
"0.5642928",
"0.5587867",
"0.5571092",
"0.55618393",
"0.55439425",
"0.55392617",
"0.55368423",
"0.55085135",
"0.54553413"
] | 0.72505695 | 0 |
Gets the fav drinks for a given user id. | def get_fav_drinks(self, user_id):
assert type(user_id) == str
return next((fd.get('drink_id') for fd in self.favorite_drinks if fd.get('user_id')==user_id), None) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_favorites(self, user_id=None):\n if not user_id:\n user_id = self.user_id\n\n favorite_decks = self.data_source.get_favorites(user_id)\n\n return favorite_decks",
"def add_fav_drinks(self, user_id, drinks): \n assert type(user_id) == str\n assert type(drinks) == list\n\n fav_drinks = self.get_fav_drinks(user_id)\n user_check = self.users.get_user_name(user_id)\n drinks_check = [self.drinks.get_drinks_by_flavor_and_type(d.get('flavor'), d.get('type')) for d in drinks]\n\n # make sure that at least one drink exists in the list\n if all(x is None for x in drinks_check):\n print(\"All drinks provided do not exist. We will not add favorite drinks since one of the drinks must already exist.\")\n \n # user does not exist\n elif user_check is None: \n print(\"User Id {} does not exist.\".format(user_id))\n \n # add fav drinks\n else : \n # user has existing fav drinks\n if fav_drinks is not None:\n for d in drinks:\n # add the drink if it does not exist \n drink_id = self.drinks.add_drink(d.get('type'), d.get('flavor'))\n fav_drinks.append(drink_id)\n # user has no existing fav drinks\n else :\n ids = []\n for d in drinks:\n # add the drink if it does not exist \n ids.append(self.drinks.add_drink(d.get('type'), d.get('flavor')))\n\n fd_id = self.__generate_id()\n self.favorite_drinks.append({\"id\": fd_id, \"user_id\": user_id, \"drink_id\": ids})",
"def add_fav_drink(self, user_id, drink_id):\n assert type(user_id) == str\n assert type(drink_id) == str \n\n existing_drink = False if self.drinks.get_drink_by_id(drink_id) is None else True\n existing_user = False if self.users.get_user_name(user_id) is None else True\n if not existing_drink:\n print(\"Drink does not exist.\")\n elif not existing_user:\n print(\"User does not exist.\")\n else :\n fav_drinks = self.get_fav_drinks(user_id)\n if fav_drinks is not None:\n if drink_id not in fav_drinks:\n fav_drinks.append(drink_id)\n else : # user exists but has no fav drinks\n fd_id = self.__generate_id()\n self.favorite_drinks.append({\"id\": fd_id, \"user_id\": user_id, \"drink_id\": [drink_id]})",
"def delete_fav_drink(self, user_id, drink_id):\n assert type(user_id) == str\n assert type(drink_id) == str\n drinks = self.get_fav_drinks(user_id)\n user_check = self.users.get_user_name(user_id)\n if drinks is not None and drink_id in drinks:\n drinks.remove(drink_id)\n elif user_check is None:\n print(\"User Id {} does not exist.\".format(user_id))\n else :\n print(\"User Id {} does not have a favorite drink id {}.\".format(user_id, drink_id))",
"def get_favourites(self, username):\n self.cur.execute(\"SELECT video_ID FROM favourites WHERE username = \\\"{}\\\"\".format(username))\n favourites = []\n for ID in self.cur.fetchall():\n favourites.append(ID[0])\n return favourites",
"def get_user_ratings(self, user_id):\r\n return self.df_app_data.loc[(self.df_app_data[\"user_id\"] == int(user_id))]",
"def _get_user_ratings(self, user_id):\n return self.ratings[self.ratings['user_id'] == user_id]",
"def get_favorites(self):\n url = \"https://api.imgur.com/3/account/{0}/favorites\".format(self.name)\n resp = self._imgur._send_request(url, needs_auth=True)\n return [_get_album_or_image(thing, self) for thing in resp]",
"def favourites_read(self, data, sesh):\n\n\t\t# Fetch the favourites for the thrower\n\t\tlFavourites = Favourites.get(sesh['thrower']['_id'], raw=['ids'])\n\n\t\t# If there's none\n\t\tif not lFavourites:\n\t\t\treturn Services.Effect([])\n\n\t\t# Look up all the throwers using the IDs\n\t\tlThrowers = Thrower.get(lFavourites['ids'], raw=['_id', 'alias'])\n\n\t\t# Return what's found\n\t\treturn Services.Effect(lThrowers)",
"def getUserFavorites(request, userid):\n try:\n User.objects.get(id=userid)\n favList = list(Favorite.objects.filter(user=userid).values())\n favInfoDict = {}\n num = 0\n\n for fav in favList:\n try:\n favItem = Item.objects.get(id=fav.get(\"item_id\")) \n favInfoDict[num] = model_to_dict(favItem)\n num = num + 1\n \n except Item.DoesNotExist:\n favInfoDict[\"favorite\"] = \"doesnotexist\"\n\n return JsonResponse(favInfoDict)\n\n except User.DoesNotExist:\n fail = {\n \"user\":\"doesnotexist\"\n }\n return JsonResponse(fail)",
"def user_ratings(user_id):\n return _fetch_records(f\"SELECT item_id, rating_type FROM ratings WHERE user_id = {user_id}\")",
"def favorites(self):\n path = self._get_path('favorites')\n \n response = self._GET(path)\n self._set_attrs_to_values(response)\n return self._clean_return(response)",
"def find_favorite_recipes_for_user(self, user_doc, count):\n try:\n self.client.connect()\n db = self.client[self.db_name]\n latest_user_doc = db[user_doc['_id']]\n if 'recipes' in latest_user_doc.keys():\n user_recipes = latest_user_doc['recipes']\n user_recipes.sort(key=lambda x: x['count'], reverse=True)\n recipes = []\n for i, recipe in enumerate(user_recipes):\n if i >= count:\n break\n recipes.append(recipe)\n return recipes\n else:\n return []\n finally:\n self.client.disconnect()",
"def favorite(self):\n url = \"https://api.imgur.com/3/album/{0}/favorite\".format(self.id)\n return self._imgur._send_request(url, needs_auth=True, method=\"POST\")",
"def show_fav_recipes():\n if not g.user:\n flash(\"Please login to view.\",\"warning\")\n return redirect('/login')\n \n data = search_recipes(request) \n favorite_list = [l.id for l in g.user.recipes]\n favorites = [f['id'] for f in data['results'] if f['id'] in favorite_list]\n \n\n return render_template(\"favs/show.html\", favorites=favorites)",
"def add_favorite(self, deck_id):\n added_deck = self.data_source.add_favorite(self.user_id, deck_id)\n\n return added_deck",
"def get_recommendations_for_user(self, user_id):\r\n\r\n sql_command = \"\"\"\r\n SELECT event_id, score\r\n FROM UserRecommendations\r\n WHERE user_id = '{0}'\r\n ORDER BY score\r\n \"\"\".format(user_id)\r\n self.controller.execute(sql_command)\r\n\r\n return self.controller.fetchall()",
"def _getFavorites(self):\n url = self._genFavoritesUrlByUser(self._username)\n doc = html.document_fromstring(requests.get(url).text)\n out = dict()\n pages = get_pages(doc)\n favs = doc.xpath(\"//div[@class='user_favorites']//a[@class='post_title']\")\n for f in favs:\n # out[f.text] = str(f.attrib['href']).split('/')[-2]\n # topic_id =\n out[f.text] = str(f.attrib['href']).split('/')[-2]\n for p in range(2, pages):\n url = 'http://habrahabr.ru/users/{0}/favorites/page{1}/'.format(self._username, p)\n # if show_progress:\n # print('parsing page{0}... url={1}'.format(p, url))\n doc = html.document_fromstring(requests.get(url).text)\n favs = doc.xpath(\"//div[@class='user_favorites']//a[@class='post_title']\")\n for f in favs:\n # out[f.text] = f.attrib['href'][-7:-1]\n out[f.text] = str(f.attrib['href']).split('/')[-2]\n return out",
"def get_bookmarked_items(user_id):\n return list(Bookmark.objects.filter(user=user_id).values_list(\n 'item_id', flat=True))",
"def get_favorites_questions(user_id, api_site_parameter, page = 1, body = False, comments = False, pagesize = 100, sort = 'added'):\n path = \"users/%d/favorites\" % user_id\n \n query_filter = ')(Ybxw_gbz'\n \n if body:\n query_filter = '9F)u(CSWCtKt'\n if comments:\n query_filter = ')(YbxuzQQ.'\n if body and comments:\n query_filter = ')(YbxuzQTp'\n \n results = __fetch_results(path, api_site_parameter, page = page, filter = query_filter, pagesize = pagesize, sort = sort)\n return results",
"def get_favorite(self, obj):\n article_fav_users = obj.favorite.all()\n return self.fetch_usernames(article_fav_users)",
"def favorite(self):\n url = \"https://api.imgur.com/3/image/{0}/favorite\".format(self.id)\n return self._imgur._send_request(url, needs_auth=True, method='POST')",
"def read_songs_by_user_id(user_id):\n logging.debug('{CRUD_operations} BEGIN function read_songs_by_user_id()')\n logging.debug('{CRUD_operations} Data received: user_id: %s', user_id)\n songs = Song.query.filter_by(user_id=user_id).filter_by(is_deleted=False)\n logging.debug('{CRUD_operations} END function read_songs_by_user_id()')\n return songs",
"def get_friends(self, user_id):\n # if user_id is alias, replace it with id\n if not self._is_positive_number(user_id):\n user_id = get_names_of_users(set([user_id]))[0].id\n api = pyvkontakte.VkontakteApi()\n return set(api.call('friends.get', user_id=user_id, v='5.8')['items'])",
"def get_my_fav():\n\n shows_to_session()\n\n try:\n shows = make_multi_requests(session['show_ids'])\n # We handle exceptions when the API is not working as we expect\n except APIError as error:\n print(error)\n return redirect(url_for('error'))\n except KeyError as error:\n print('ERROR The following field must have been removed from the API : ' + str(error))\n return redirect(url_for('error'))\n except TypeError as error:\n print('ERROR The following field must have been modified in the API : ' + str(error))\n return redirect(url_for('error'))\n\n return render_template('myfav/myfav.html', shows=shows)",
"def get_hotdesk(self, account_id, user_id, filters=None):\n return self.rest_request.get('accounts/' + str(account_id) +\n '/users/' + str(user_id) + '/hotdesks',\n filters)",
"def get_restaurants(user_id, **kwargs):\n search_criteria_values = get_search_criteria_values(**kwargs)\n if search_criteria_values:\n rest_ids = get_rest_ids_by_search_criteria(**search_criteria_values)\n if rest_ids:\n return get_rest_info_by_rest_id(rest_ids)\n\n print 'There was nothing found'\n return []",
"def favorites(self):\n if not self._user_favorites_loaded:\n self._user_favorites = self._getFavorites()\n self._user_favorites_loaded = True\n return deepcopy(self._user_favorites)",
"def get_users():\n table_response = USER_FAVORITES_TABLE.scan()\n return table_response['Items']",
"def get_friends(self, user_id=None, fields='sex,bdate'):\n if user_id is None:\n friends_info = self.vk.friends.get(fields=fields)\n else:\n friends_info = self.vk.friends.get(fields=fields, user_id=user_id)\n return friends_info['items']"
] | [
"0.7683676",
"0.7625044",
"0.7077731",
"0.6953718",
"0.6094342",
"0.59908515",
"0.59745145",
"0.59438324",
"0.5888048",
"0.5862097",
"0.57868314",
"0.5751655",
"0.56791073",
"0.56489706",
"0.56454253",
"0.56270546",
"0.561332",
"0.5574944",
"0.5572457",
"0.5544575",
"0.5500325",
"0.5486297",
"0.5474039",
"0.54723",
"0.54363006",
"0.54046434",
"0.5355697",
"0.5339248",
"0.53233707",
"0.5321137"
] | 0.88145494 | 0 |
Incrementally generates fav drink ids. | def __generate_id(self):
ids = [int(fd.get('id')) for fd in self.favorite_drinks]
return str(max(ids)+1) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_next_id(cls):\n cls.next_id += 1",
"def incr_circuit_fav_count(self, circuit_id):\n key = ':'.join(\n [CIRCUIT_NMBR_FAVS_1, \n str(circuit_id), \n CIRCUIT_NMBR_FAVS_2]\n ) \n self.RS.incr(key)",
"def new_id(self):\n self.next += 1\n return self.next",
"def id_generator():\n\t\tcount = 0\n\t\twhile True:\n\t\t\tyield count\n\t\t\tcount += 1",
"def _current_item_id_gen(self):\n id = 0\n while True:\n id += 1\n yield id",
"def __generateUserIDs(self,_count):\n return map(lambda x:self.__getNewUserID(),range(_count))",
"def generate_tie_fighters(self):\n while self.number_of_tie_fighters < self.number_limit_of_tie_fighters:\n self.tie_fighters.append(TieFighter(self.tie_fighter_speed_x, self.tie_fighter_speed_y))\n self.number_of_tie_fighters += 1",
"def __dice_generator(self):\n self.current_dice = np.random.randint(1, 6 + 1)",
"def add_fav_drink(self, user_id, drink_id):\n assert type(user_id) == str\n assert type(drink_id) == str \n\n existing_drink = False if self.drinks.get_drink_by_id(drink_id) is None else True\n existing_user = False if self.users.get_user_name(user_id) is None else True\n if not existing_drink:\n print(\"Drink does not exist.\")\n elif not existing_user:\n print(\"User does not exist.\")\n else :\n fav_drinks = self.get_fav_drinks(user_id)\n if fav_drinks is not None:\n if drink_id not in fav_drinks:\n fav_drinks.append(drink_id)\n else : # user exists but has no fav drinks\n fd_id = self.__generate_id()\n self.favorite_drinks.append({\"id\": fd_id, \"user_id\": user_id, \"drink_id\": [drink_id]})",
"def incr_id(id, n):\n return id[:-1] + (id[-1] + n,)",
"def id_generator():\n start_value = 0\n while True:\n yield start_value\n start_value += 1",
"def add_fav_drinks(self, user_id, drinks): \n assert type(user_id) == str\n assert type(drinks) == list\n\n fav_drinks = self.get_fav_drinks(user_id)\n user_check = self.users.get_user_name(user_id)\n drinks_check = [self.drinks.get_drinks_by_flavor_and_type(d.get('flavor'), d.get('type')) for d in drinks]\n\n # make sure that at least one drink exists in the list\n if all(x is None for x in drinks_check):\n print(\"All drinks provided do not exist. We will not add favorite drinks since one of the drinks must already exist.\")\n \n # user does not exist\n elif user_check is None: \n print(\"User Id {} does not exist.\".format(user_id))\n \n # add fav drinks\n else : \n # user has existing fav drinks\n if fav_drinks is not None:\n for d in drinks:\n # add the drink if it does not exist \n drink_id = self.drinks.add_drink(d.get('type'), d.get('flavor'))\n fav_drinks.append(drink_id)\n # user has no existing fav drinks\n else :\n ids = []\n for d in drinks:\n # add the drink if it does not exist \n ids.append(self.drinks.add_drink(d.get('type'), d.get('flavor')))\n\n fd_id = self.__generate_id()\n self.favorite_drinks.append({\"id\": fd_id, \"user_id\": user_id, \"drink_id\": ids})",
"def getRandomID(self, ids):\r\n \r\n while True:\r\n num = randint(1, 1000)\r\n if num not in ids:\r\n ids.append(num)\r\n return num",
"def inc( self ):\n self.count += 1",
"def generate_id(cls):\n cls._index += 1\n return 'fp_%s' % cls._index",
"def inc(self):\n \n self.count += 1",
"def increment(cls):\n index = random.randint(0, SimpleCounterShard.NUM_SHARDS - 1)\n shard_name = 'shard' + str(index)\n counter = SimpleCounterShard.objects.get_or_create(pk=shard_name)[0]\n counter.count += 1\n counter.save()",
"def create_list(self):\n for _ in range(self.count):\n id_ = random.randint(10000, 99999)\n self.ids.append(id_)\n self.img_paths.append(f\"{self.save_path}{self.name}/images/{id_}.png\")\n if hasattr(self, \"masks\"):\n self.masks.append(f\"{self.save_path}{self.name}/masks/{id_}.png\")",
"def _get_next_venue_id():\n VenueCrawler._venue_id += 1\n return VenueCrawler._venue_id",
"def next_id(self):\n self.id_counter += 1\n return self.id_counter - 1",
"def increment_counter(self) -> None:",
"def monkey_count(n):\n return [i for i in range(1, n + 1)]",
"def _next_id(self, prefix):\n return f\"{prefix}_{next(self._ids)}\"",
"def onDrinkCreated(self, event):",
"def make_drink ():\n \n customer_pref = customer_order.drink_order()\n drink = []\n \n for pref in customer_pref:\n if customer_pref[pref] == True:\n drink.append(random.choice(ingredients[pref]))\n \n return drink",
"def genNumID(size):\n\tid = \"\"\n\tfor i in range(size):\n\t\tid = id + selectRandomFromList(numTokens)\n\treturn id",
"def inc_dec_fav_count(clubname, amt):\n clubs = read_json()\n\n for i, club in enumerate(clubs):\n if club[\"name\"] == clubname:\n print(clubs[i])\n clubs[i][\"favourites\"] += amt\n break # Stop loop when the club is found\n write_json(clubs)",
"def auto_increment(table):\n print(table)\n i = 1\n for row in table:\n row.id = i\n i = i + 1",
"def fancy_id_generator(mapper, connection, target):\n push_id = PushID()\n target.id = push_id.next_id()",
"def new_id(users):\n\n #nonlocal index\n if len(users) > 1:\n new_index = new_player_id.index\n new_player_id.index += 1\n else:\n new_index = users[0]\n\n return new_index"
] | [
"0.6040626",
"0.5970284",
"0.5897605",
"0.5723138",
"0.5417529",
"0.5353754",
"0.5269767",
"0.5263355",
"0.52627593",
"0.5222546",
"0.52203166",
"0.5219099",
"0.51929504",
"0.5167325",
"0.51630044",
"0.5160587",
"0.51526725",
"0.515147",
"0.51489604",
"0.514435",
"0.5135074",
"0.51067466",
"0.50967395",
"0.509659",
"0.5095801",
"0.5078492",
"0.5054097",
"0.50505406",
"0.504384",
"0.50434864"
] | 0.6941434 | 0 |
Adds a list of drinks to the user's favorite_tr_drinks. At least one drink needs to exist in the drinks object. | def add_fav_drinks(self, user_id, drinks):
assert type(user_id) == str
assert type(drinks) == list
fav_drinks = self.get_fav_drinks(user_id)
user_check = self.users.get_user_name(user_id)
drinks_check = [self.drinks.get_drinks_by_flavor_and_type(d.get('flavor'), d.get('type')) for d in drinks]
# make sure that at least one drink exists in the list
if all(x is None for x in drinks_check):
print("All drinks provided do not exist. We will not add favorite drinks since one of the drinks must already exist.")
# user does not exist
elif user_check is None:
print("User Id {} does not exist.".format(user_id))
# add fav drinks
else :
# user has existing fav drinks
if fav_drinks is not None:
for d in drinks:
# add the drink if it does not exist
drink_id = self.drinks.add_drink(d.get('type'), d.get('flavor'))
fav_drinks.append(drink_id)
# user has no existing fav drinks
else :
ids = []
for d in drinks:
# add the drink if it does not exist
ids.append(self.drinks.add_drink(d.get('type'), d.get('flavor')))
fd_id = self.__generate_id()
self.favorite_drinks.append({"id": fd_id, "user_id": user_id, "drink_id": ids}) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_fav_drink(self, user_id, drink_id):\n assert type(user_id) == str\n assert type(drink_id) == str \n\n existing_drink = False if self.drinks.get_drink_by_id(drink_id) is None else True\n existing_user = False if self.users.get_user_name(user_id) is None else True\n if not existing_drink:\n print(\"Drink does not exist.\")\n elif not existing_user:\n print(\"User does not exist.\")\n else :\n fav_drinks = self.get_fav_drinks(user_id)\n if fav_drinks is not None:\n if drink_id not in fav_drinks:\n fav_drinks.append(drink_id)\n else : # user exists but has no fav drinks\n fd_id = self.__generate_id()\n self.favorite_drinks.append({\"id\": fd_id, \"user_id\": user_id, \"drink_id\": [drink_id]})",
"def add_drink(self, _drink):\n self.drinks.append(_drink)",
"def reload(self, favorite_drinks):\n self.favorite_drinks = favorite_drinks",
"def delete_fav_drink(self, user_id, drink_id):\n assert type(user_id) == str\n assert type(drink_id) == str\n drinks = self.get_fav_drinks(user_id)\n user_check = self.users.get_user_name(user_id)\n if drinks is not None and drink_id in drinks:\n drinks.remove(drink_id)\n elif user_check is None:\n print(\"User Id {} does not exist.\".format(user_id))\n else :\n print(\"User Id {} does not have a favorite drink id {}.\".format(user_id, drink_id))",
"def add_favourite(recipe_id):\r\n if \"user\" in session:\r\n user = coll_users.find_one(\r\n {\"username_lower\": session[\"user\"]})[\"_id\"]\r\n coll_users.update_one(\r\n {\"_id\": ObjectId(user)},\r\n {\"$push\": {\"user_favs\": ObjectId(recipe_id)}})\r\n coll_recipes.update(\r\n {\"_id\": ObjectId(recipe_id)}, {\"$inc\": {\"favourites\": 1}})\r\n return redirect(url_for(\r\n \"recipes.recipe_detail\",\r\n recipe_id=recipe_id))\r\n else:\r\n flash(\"You must be logged in to perform that action!\")\r\n return redirect(url_for(\"users.login\"))",
"def add_drink_order(self, chair_num, _drink):\n self.customers[chair_num].add_drink(_drink)",
"def add_to_wishlist(self, beer_id: str) -> Dict:\n method = \"user/wishlist/add\"\n auth = self._get_access_token()\n params = {\"bid\": beer_id}\n return self._do_get(method, auth, params)",
"async def addfavseries(self, ctx, series_id=None):\n if not series_id:\n await ctx.send('You must pass at least one series ID with this command. '\n 'Use `!help addfavseries` for more info.')\n\n await self.setfavseries(ctx, ids=str(series_id))",
"def get_fav_drinks(self, user_id):\n assert type(user_id) == str\n return next((fd.get('drink_id') for fd in self.favorite_drinks if fd.get('user_id')==user_id), None)",
"def add_to_fav(show_id, name):\n db = get_db()\n db.execute(\n 'INSERT INTO shows_users (show_id, user_id)'\n ' VALUES (?, ?)',\n (show_id, session['user_id'])\n )\n\n flash('\\\"%s\\\" has been successfully added to your favourite TV Shows!' % name)\n db.commit()\n return redirect(request.referrer)",
"def add_to_fav(request, q_id):\n if request.method == 'POST':\n Quotes.objects.add_to_user_fav(request.session['id'], q_id)\n return redirect('/quotes')",
"def add_to_drinks_list(chat_id, drink):\n if chat_id in drinksDict:\n temp_list = drinksDict[chat_id]\n temp_list.append(drink.replace(\"_\", \" | \"))\n drinksDict[chat_id] = temp_list\n else:\n drinksDict[chat_id] = [drink.replace(\"_\", \" | \")]",
"def favourite_create(self, data, sesh):\n\n\t\t# Verify fields\n\t\ttry: DictHelper.eval(data, ['id'])\n\t\texcept ValueError as e: return Services.Effect(error=(1001, [(f, \"missing\") for f in e.args]))\n\n\t\t# If someone tries to add themselves\n\t\tif data['id'] == sesh['thrower']['_id']:\n\t\t\treturn Services.Effect(False);\n\n\t\t# Make sure the thrower exists\n\t\tif not Thrower.exists(data['id']):\n\t\t\treturn Services.Effect(error=(1104, data['id']))\n\n\t\t# Add the thrower to the logged in thrower's favourites and return the\n\t\t#\tresult\n\t\treturn Services.Effect(\n\t\t\tFavourites.add(sesh['thrower']['_id'], data['id'])\n\t\t)",
"def test_if_user_can_add_and_retrieve_data(self):\n # take the first three drinks\n drinks = self.test_data[\"drinks\"][:3]\n # create drink objects from the json data\n drinks = [Drink(**i) for i in drinks]\n Drink.objects.bulk_create(drinks)\n\n data = self.test_data[\"data\"][0]\n # use drink ids added to the db for this particular\n # test\n data[\"drink_id\"] = drinks[random.randint(0, 2)]._id\n\n response = self.client.post(\"/data/data_collected/\",\n data, format='json')\n\n # assert it data was added correctly\n self.assertEqual(response.status_code,\n status.HTTP_201_CREATED)\n\n # retrieve the data added\n response = self.client.get(\"/data/data_collected/\")\n\n # assert if the response is 200\n self.assertEqual(response.status_code, 200)\n\n # get the number of added data records\n data_added_count = len(response.json())\n\n # assert if the data added is one\n self.assertEqual(data_added_count, 1)",
"def update_favorites():\n\n check_favorite = Favorite.query.filter(Favorite.favorited_item==session[\"athlete_id\"]).first()\n route = f'/athletes/{session[\"athlete_id\"]}'\n\n if check_favorite is None:\n new_update = Favorite(id=current_user.id, favorited_item=session[\"athlete_id\"])\n db.session.add(new_update) \n \n else:\n db.session.delete(check_favorite)\n \n db.session.commit()\n \n return redirect(route)",
"def add_favorite(self, deck_id):\n added_deck = self.data_source.add_favorite(self.user_id, deck_id)\n\n return added_deck",
"def save_favorited_trail(hike_id, user_id):\n\n trail = Trail(hike_id = hike_id, user_id = user_id)\n\n db.session.add(trail)\n db.session.commit()\n\n return (trail)",
"async def create(self, favorite: Favorite) -> Favorite:",
"def favorite(user, wine):\n\n favorite = Favorite(user=user, wine=wine)\n\n db.session.add(favorite)\n db.session.commit()\n\n # return favorite",
"def add_to_wishlist(request, product_id):\n redirect_url = request.POST.get('redirect_url')\n\n user = get_object_or_404(UserProfile, user=request.user)\n wishlist = Wishlist.objects.get_or_create(user=user)\n wishlist_user = wishlist[0]\n\n product = Product.objects.get(pk=product_id)\n if request.POST:\n existingWishlistItem = WishlistItem.objects.filter(\n wishlist=wishlist_user, product=product).exists()\n if existingWishlistItem:\n messages.error(request, \"Item already in your wishlist\")\n return redirect(redirect_url)\n\n else:\n added_item = WishlistItem(\n wishlist=wishlist_user, product=product, date_added=timezone.now())\n added_item.save()\n messages.success(request, \"Product added to your wishlist\")\n return redirect(redirect_url)\n else:\n messages.error(request, \"Click 'Add to wishlist' to add a item \")\n return render(request, 'home/index.html')",
"def add_to_wishlist(request, product_id):\n product = get_object_or_404(Product, pk=product_id)\n wishlist = get_object_or_404(Wishlist, user=request.user)\n\n if product not in wishlist.products.all():\n wishlist.products.add(product)\n messages.info(request,\n f\"{product.name} has been added to your wishlist.\")\n else:\n messages.error(request,\n \"Error, you already have this item in your wishlist!\")\n return redirect(reverse(\"product_detail\", args=[product_id]))",
"def create_new_drink(user_data):\n drink_res = requests.post(url = \"http://127.0.0.1:5000/add_drink\", json=user_data)\n return drink_res.text",
"def save_to_favorites_list():\n\n #get show id from the event handler/post request\n show_id = str(request.form.get(\"id\"))\n #get button content from the event handler/post request\n button_content = request.form.get(\"button_content\")\n\n button_content_encoded = button_content.encode('utf-8')\n\n #save utf-8 encoded checkmark as a string variable\n check_mark = \"\\xe2\\x9c\\x93\"\n\n #find the current logged in user\n email = session.get(\"current_user\")\n\n if email:\n\n #use email to find the user_id\n user_id = User.find_user_id_with_email(email)\n\n #if the show has not been favorited yet\n if check_mark not in button_content_encoded:\n #add row in favorites table\n favorite = Favorite.add_to_favorites(show_id, user_id)\n\n #pass back the show_id and that the show has been favorited\n payload = {\"show_id\":show_id,\"favorite\":\"True\"}\n return jsonify(payload)\n else:\n #delete row in favorites table\n Favorite.delete_favorite(show_id)\n\n #pass back the show_id and that the show has been unfavorited\n payload = {\"show_id\":show_id,\"favorite\":\"False\"}\n return jsonify(payload)\n else:\n flash(\"You need to be logged in to see that page.\")\n return redirect(\"/login\")",
"def test_if_user_can_retrive_drinks(self):\n # take data from the ones extracted from the json file \n drinks = self.test_data[\"drinks\"]\n save_drinks = []\n for drink in drinks:\n drink = Drink(**drink)\n save_drinks.append(drink)\n Drink.objects.bulk_create(save_drinks)\n\n drink_count = Drink.objects.count()\n\n # assert the saving of the drinks was successful\n self.assertEqual(drink_count, 10)\n\n # retrieve the data via a request\n response = self.client.get(\"/data/drinks/\")\n\n # assert the request was successful\n self.assertEqual(response.status_code,\n status.HTTP_200_OK)\n\n recieved_data_count = len(response.json())\n\n # assert the number of drinks recieved is correct\n self.assertEqual(recieved_data_count, 10)",
"def insert_favorite_food(self, user_answer_id_food, name_substitute):\n self.cursor = self.data_base.cursor(MySQLCursorPrepared)\n save_favorite_food = \"\"\"INSERT INTO Favorite\n (id_food, id_substitute_chooses)\n VALUES({0}, \n (SELECT id FROM Food WHERE name_food = {1}))\"\"\" \\\n .format(int(user_answer_id_food),\n \"\\'\" + name_substitute + \"\\'\")\n self.cursor.execute(save_favorite_food)\n self.data_base.commit()",
"def add_session_to_wishlist(self, websafe_session_key, user):\n wl_key = self.get_wishlist_key(user)\n\n wishlist = wl_key.get()\n\n if websafe_session_key in wishlist.sessionKeys:\n raise ConflictException(\n \"You already have this session in your wishlist.\")\n\n wishlist.sessionKeys.append(websafe_session_key)\n wishlist.put()\n\n return self.to_message(wishlist)",
"def add_favorite(self, id):\n path = self._get_path('alter_favorite').format(id=id)\n \n return self._clean_return(self._PUT(path))",
"def addToFavorites(self, shortName, absPath):\n logger.debug(\"Func: addToFavorites\")\n\n # old Name userFavoritesAdd\n bookmarksData = self.loadFavorites()\n bookmarksData.append([shortName, absPath])\n self._dumpJson(bookmarksData, self._pathsDict[\"bookmarksFile\"])\n return bookmarksData",
"def wishlist(request):\n items = []\n user = get_object_or_404(UserProfile, user=request.user)\n wishlist = Wishlist.objects.get_or_create(user=user)\n wishlist_user = wishlist[0]\n existingWishlist = WishlistItem.objects.filter(\n wishlist=wishlist_user).exists()\n\n if existingWishlist:\n user_wishlist = get_list_or_404(WishlistItem, wishlist=wishlist_user)\n for obj in user_wishlist:\n product = get_object_or_404(Product, name=obj)\n items.append(product)\n context = {\n 'wishlist': True,\n 'products': items\n }\n return render(request, 'wishlist/wishlist.html', context)\n\n else:\n context = {\n 'wishlist': False,\n }\n return render(request, 'wishlist/wishlist.html', context)",
"def favourite(self, favourite):\n\n self._favourite = favourite"
] | [
"0.7802421",
"0.6467667",
"0.6065164",
"0.5909702",
"0.58994406",
"0.5785789",
"0.57702625",
"0.56755793",
"0.56145716",
"0.5599154",
"0.55618584",
"0.55594814",
"0.5455796",
"0.5450204",
"0.5390807",
"0.5324672",
"0.53246087",
"0.5250622",
"0.5193845",
"0.5191468",
"0.5179421",
"0.5167231",
"0.515271",
"0.5149318",
"0.51389456",
"0.5128458",
"0.50701195",
"0.50472444",
"0.5010452",
"0.50079066"
] | 0.80658937 | 0 |
Adds a single existing drink id to a user's fav_drinks. | def add_fav_drink(self, user_id, drink_id):
assert type(user_id) == str
assert type(drink_id) == str
existing_drink = False if self.drinks.get_drink_by_id(drink_id) is None else True
existing_user = False if self.users.get_user_name(user_id) is None else True
if not existing_drink:
print("Drink does not exist.")
elif not existing_user:
print("User does not exist.")
else :
fav_drinks = self.get_fav_drinks(user_id)
if fav_drinks is not None:
if drink_id not in fav_drinks:
fav_drinks.append(drink_id)
else : # user exists but has no fav drinks
fd_id = self.__generate_id()
self.favorite_drinks.append({"id": fd_id, "user_id": user_id, "drink_id": [drink_id]}) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_fav_drinks(self, user_id, drinks): \n assert type(user_id) == str\n assert type(drinks) == list\n\n fav_drinks = self.get_fav_drinks(user_id)\n user_check = self.users.get_user_name(user_id)\n drinks_check = [self.drinks.get_drinks_by_flavor_and_type(d.get('flavor'), d.get('type')) for d in drinks]\n\n # make sure that at least one drink exists in the list\n if all(x is None for x in drinks_check):\n print(\"All drinks provided do not exist. We will not add favorite drinks since one of the drinks must already exist.\")\n \n # user does not exist\n elif user_check is None: \n print(\"User Id {} does not exist.\".format(user_id))\n \n # add fav drinks\n else : \n # user has existing fav drinks\n if fav_drinks is not None:\n for d in drinks:\n # add the drink if it does not exist \n drink_id = self.drinks.add_drink(d.get('type'), d.get('flavor'))\n fav_drinks.append(drink_id)\n # user has no existing fav drinks\n else :\n ids = []\n for d in drinks:\n # add the drink if it does not exist \n ids.append(self.drinks.add_drink(d.get('type'), d.get('flavor')))\n\n fd_id = self.__generate_id()\n self.favorite_drinks.append({\"id\": fd_id, \"user_id\": user_id, \"drink_id\": ids})",
"def delete_fav_drink(self, user_id, drink_id):\n assert type(user_id) == str\n assert type(drink_id) == str\n drinks = self.get_fav_drinks(user_id)\n user_check = self.users.get_user_name(user_id)\n if drinks is not None and drink_id in drinks:\n drinks.remove(drink_id)\n elif user_check is None:\n print(\"User Id {} does not exist.\".format(user_id))\n else :\n print(\"User Id {} does not have a favorite drink id {}.\".format(user_id, drink_id))",
"def add_to_fav(show_id, name):\n db = get_db()\n db.execute(\n 'INSERT INTO shows_users (show_id, user_id)'\n ' VALUES (?, ?)',\n (show_id, session['user_id'])\n )\n\n flash('\\\"%s\\\" has been successfully added to your favourite TV Shows!' % name)\n db.commit()\n return redirect(request.referrer)",
"def add_favourite(recipe_id):\r\n if \"user\" in session:\r\n user = coll_users.find_one(\r\n {\"username_lower\": session[\"user\"]})[\"_id\"]\r\n coll_users.update_one(\r\n {\"_id\": ObjectId(user)},\r\n {\"$push\": {\"user_favs\": ObjectId(recipe_id)}})\r\n coll_recipes.update(\r\n {\"_id\": ObjectId(recipe_id)}, {\"$inc\": {\"favourites\": 1}})\r\n return redirect(url_for(\r\n \"recipes.recipe_detail\",\r\n recipe_id=recipe_id))\r\n else:\r\n flash(\"You must be logged in to perform that action!\")\r\n return redirect(url_for(\"users.login\"))",
"def add_drink(self, _drink):\n self.drinks.append(_drink)",
"def get_fav_drinks(self, user_id):\n assert type(user_id) == str\n return next((fd.get('drink_id') for fd in self.favorite_drinks if fd.get('user_id')==user_id), None)",
"async def addfavseries(self, ctx, series_id=None):\n if not series_id:\n await ctx.send('You must pass at least one series ID with this command. '\n 'Use `!help addfavseries` for more info.')\n\n await self.setfavseries(ctx, ids=str(series_id))",
"def add_favorite(self, deck_id):\n added_deck = self.data_source.add_favorite(self.user_id, deck_id)\n\n return added_deck",
"def add_favorite(self, id):\n path = self._get_path('alter_favorite').format(id=id)\n \n return self._clean_return(self._PUT(path))",
"def favourite_create(self, data, sesh):\n\n\t\t# Verify fields\n\t\ttry: DictHelper.eval(data, ['id'])\n\t\texcept ValueError as e: return Services.Effect(error=(1001, [(f, \"missing\") for f in e.args]))\n\n\t\t# If someone tries to add themselves\n\t\tif data['id'] == sesh['thrower']['_id']:\n\t\t\treturn Services.Effect(False);\n\n\t\t# Make sure the thrower exists\n\t\tif not Thrower.exists(data['id']):\n\t\t\treturn Services.Effect(error=(1104, data['id']))\n\n\t\t# Add the thrower to the logged in thrower's favourites and return the\n\t\t#\tresult\n\t\treturn Services.Effect(\n\t\t\tFavourites.add(sesh['thrower']['_id'], data['id'])\n\t\t)",
"def add_to_fav(request, q_id):\n if request.method == 'POST':\n Quotes.objects.add_to_user_fav(request.session['id'], q_id)\n return redirect('/quotes')",
"def add_favoriting_user_id(self, circuit_id, user_id):\n key = ':'.join(\n [CIRCUIT_FAV_USRS_1, \n str(circuit_id), \n CIRCUIT_FAV_USRS_2]\n )\n self.RS.sadd(key, user_id)",
"def add_to_blacklist(self, user_id, blacklist_user_id):\n try:\n self.table.insert().values( user_id=user_id,\n blacklisted_id=blacklist_user_id).execute()\n except sqlalchemy.exc.IntegrityError as e:\n if e.orig.args[0] == 1062 :\n # duplicate entry, don't care !\n pass\n elif e.orig.args[0] == 1452 :\n self.log(e, self.identifier)\n raise egg_errors.UnknownUserOrBadgeIDException\n else:\n self.log(e, self.identifier)\n raise egg_errors.QueryNotPossible\n except Exception as e:\n self.log(e, self.identifier)\n raise egg_errors.QueryNotPossible",
"def add_to_wishlist(self, beer_id: str) -> Dict:\n method = \"user/wishlist/add\"\n auth = self._get_access_token()\n params = {\"bid\": beer_id}\n return self._do_get(method, auth, params)",
"def mark_favorite(request, object_id):\n feed_item = get_object_or_404(FeedItem, id=object_id)\n fav_item, is_new = FavoriteItem.objects.get_or_create(feed_item=feed_item)\n if request.is_ajax():\n return JSONResponse({'status': 'ok', 'text': 'Marked as favorite'}, False)\n return redirect(request.META.get('HTTP_REFERER', 'feed_item_list'))",
"def add_follow(follow_id):\n followed_user = User.query.get_or_404(follow_id)\n if not g.user or g.user.id == follow_id or followed_user.is_blocking(g.user):\n flash(\"Access unauthorized.\", \"danger\")\n return redirect(\"/\")\n\n g.user.following.append(followed_user)\n db.session.commit()\n\n return redirect(f\"/users/{g.user.id}/following\")",
"def insert_favorite_food(self, user_answer_id_food, name_substitute):\n self.cursor = self.data_base.cursor(MySQLCursorPrepared)\n save_favorite_food = \"\"\"INSERT INTO Favorite\n (id_food, id_substitute_chooses)\n VALUES({0}, \n (SELECT id FROM Food WHERE name_food = {1}))\"\"\" \\\n .format(int(user_answer_id_food),\n \"\\'\" + name_substitute + \"\\'\")\n self.cursor.execute(save_favorite_food)\n self.data_base.commit()",
"def add_favorite(self, pk: int) -> Response:\n try:\n TagDAO.favorite_tag_by_id_for_current_user(pk)\n return self.response(200, result=\"OK\")\n except TagNotFoundError:\n return self.response_404()\n except MissingUserContextException as ex:\n return self.response_422(message=str(ex))",
"def favourite(self, favourite):\n\n self._favourite = favourite",
"def reload(self, favorite_drinks):\n self.favorite_drinks = favorite_drinks",
"def create_new_drink(user_data):\n drink_res = requests.post(url = \"http://127.0.0.1:5000/add_drink\", json=user_data)\n return drink_res.text",
"def SetNewFav(self, newFav):\n self.favouriteFood = newFav",
"def update_favorites():\n\n check_favorite = Favorite.query.filter(Favorite.favorited_item==session[\"athlete_id\"]).first()\n route = f'/athletes/{session[\"athlete_id\"]}'\n\n if check_favorite is None:\n new_update = Favorite(id=current_user.id, favorited_item=session[\"athlete_id\"])\n db.session.add(new_update) \n \n else:\n db.session.delete(check_favorite)\n \n db.session.commit()\n \n return redirect(route)",
"def add_to_drinks_list(chat_id, drink):\n if chat_id in drinksDict:\n temp_list = drinksDict[chat_id]\n temp_list.append(drink.replace(\"_\", \" | \"))\n drinksDict[chat_id] = temp_list\n else:\n drinksDict[chat_id] = [drink.replace(\"_\", \" | \")]",
"def add_following(self, user_id):\n sleep(360) # too much follows => function ban\n self.following.append(user_id)\n return perform_with_ran_delay(self.instagram.follow, user_id)",
"def favorite(user, wine):\n\n favorite = Favorite(user=user, wine=wine)\n\n db.session.add(favorite)\n db.session.commit()\n\n # return favorite",
"def favorite_chart(chart_id):\n chart = Chart.query.get(chart_id)\n if chart is None:\n flash(\n \"No chart with that chart_id found!\",\n \"alert-warning\",\n )\n else:\n current_user.favorite_chart(chart)\n db.session.commit()\n flash(\n \"Added Chart: {name} to favorites list\".format(name=chart.name),\n \"alert-success\",\n )\n return redirect(request.args.get('next') or url_for('reports.my_charts'))",
"def add_follow(follow_id):\n\n want_to_follow_user = User.query.get_or_404(follow_id)\n if want_to_follow_user.private:\n # =========== NEED TO IMPLEMENT ====================\n # send them a request to follow\n want_to_follow_user.from_users.append(g.user) \n db.session.commit()\n flash(\"Your request has been sent\", \"success\")\n return redirect(f\"/users/{g.user.id}/following\")\n\n g.user.following.append(want_to_follow_user)\n db.session.commit()\n\n return redirect(f\"/users/{g.user.id}/following\")",
"def post_favorite(request, pk=None):\n post = Post.objects.get(pk=pk).original_or_self()\n if post.favorites.filter(pk=request.user.pk).exists():\n post.favorites.remove(request.user)\n else:\n post.favorites.add(request.user)\n post.save()\n\n referer = request.META['HTTP_REFERER']\n if referer:\n return redirect(referer)\n else:\n return redirect('posts:post', pk=post.pk)",
"def add_drink_order(self, chair_num, _drink):\n self.customers[chair_num].add_drink(_drink)"
] | [
"0.8466836",
"0.7160987",
"0.69984883",
"0.6947171",
"0.6840639",
"0.6773383",
"0.66296184",
"0.65369165",
"0.64328474",
"0.6358613",
"0.614724",
"0.6010638",
"0.59958446",
"0.5963398",
"0.59074044",
"0.58854777",
"0.5838287",
"0.58275396",
"0.580626",
"0.5790011",
"0.57858276",
"0.57656395",
"0.5748952",
"0.5736234",
"0.57008195",
"0.5654239",
"0.5648887",
"0.56150085",
"0.55966455",
"0.55754066"
] | 0.90073866 | 0 |
Removes a single drink id from a given user's favorite_tr_drinks | def delete_fav_drink(self, user_id, drink_id):
assert type(user_id) == str
assert type(drink_id) == str
drinks = self.get_fav_drinks(user_id)
user_check = self.users.get_user_name(user_id)
if drinks is not None and drink_id in drinks:
drinks.remove(drink_id)
elif user_check is None:
print("User Id {} does not exist.".format(user_id))
else :
print("User Id {} does not have a favorite drink id {}.".format(user_id, drink_id)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def remove_from_fav(request, favorite_id):\n # Gets a favorite designated by favorite_id or returns 404\n favorite = get_object_or_404(Favorite, pk=favorite_id)\n favorite.delete()\n\n print(\"{}, {} a été supprimé des favoris\".format(\n favorite.products.name, favorite.products.brand))\n\n return redirect(request.META['HTTP_REFERER'])",
"def favourite_delete(self, data, sesh):\n\n\t\t# Verify fields\n\t\ttry: DictHelper.eval(data, ['id'])\n\t\texcept ValueError as e: return Services.Effect(error=(1001, [(f, \"missing\") for f in e.args]))\n\n\t\t# Remove the thrower from the logged in thrower's favourites and return\n\t\t#\tthe result\n\t\treturn Services.Effect(\n\t\t\tFavourites.remove(sesh['thrower']['_id'], data['id'])\n\t\t)",
"def delete_drink(user_data):\n delete_res = requests.post(url = \"http://127.0.0.1:5000/remove_drink\", json=user_data)\n return delete_res.text",
"def remove_favourite(recipe_id):\r\n if \"user\" in session:\r\n user = coll_users.find_one({\"username_lower\": session[\"user\"]})[\"_id\"]\r\n coll_users.update_one(\r\n {\"_id\": ObjectId(user)},\r\n {\"$pull\": {\"user_favs\": ObjectId(recipe_id)}})\r\n coll_recipes.update(\r\n {\"_id\": ObjectId(recipe_id)}, {\"$inc\": {\"favourites\": -1}})\r\n return redirect(url_for(\r\n \"recipes.recipe_detail\",\r\n recipe_id=recipe_id))\r\n else:\r\n flash(\"You must be logged in to perform that action!\")\r\n return redirect(url_for(\"users.login\"))",
"def unfavorite(user_id, wine_id):\n\n # favorite = Favorite.query.get(user_id, wine_id)\n favorite = Favorite.query.filter(Favorite.user_id==user_id, Favorite.wine_id==wine_id).delete() \n db.session.commit()",
"def remove_favorite(self, favorite_id):\n removed_favorite_id = self.data_source.remove_favorite(self.user_id,\n favorite_id)\n\n return removed_favorite_id",
"def remove_drink(self, _drink):\n try:\n self.drinks.remove(_drink)\n except ValueError:\n print(\"This order doesn't have that drink.\")",
"def rm_from_fav(show_id, name):\n\n db = get_db()\n db.execute(\n 'DELETE FROM shows_users WHERE show_id = ? and user_id = ?',\n (show_id, session['user_id'])\n )\n\n flash('\\\"%s\\\" has been successfully removed from your favourite TV Shows!' % name)\n db.commit()\n return redirect(request.referrer)",
"def remove_from_wishlist(self, beer_id: str):\n method = \"user/wishlist/delete\"\n auth = self._get_access_token()\n params = {\"bid\": beer_id}\n return self._do_get(method, auth, params)",
"def add_fav_drink(self, user_id, drink_id):\n assert type(user_id) == str\n assert type(drink_id) == str \n\n existing_drink = False if self.drinks.get_drink_by_id(drink_id) is None else True\n existing_user = False if self.users.get_user_name(user_id) is None else True\n if not existing_drink:\n print(\"Drink does not exist.\")\n elif not existing_user:\n print(\"User does not exist.\")\n else :\n fav_drinks = self.get_fav_drinks(user_id)\n if fav_drinks is not None:\n if drink_id not in fav_drinks:\n fav_drinks.append(drink_id)\n else : # user exists but has no fav drinks\n fd_id = self.__generate_id()\n self.favorite_drinks.append({\"id\": fd_id, \"user_id\": user_id, \"drink_id\": [drink_id]})",
"def delete_favorite_food(self, user_answer_choice_id_substitute):\n self.cursor = self.data_base.cursor(MySQLCursorPrepared)\n self.cursor.execute(\"\"\"DELETE FROM Favorite where id = {}\"\"\"\n .format(int(user_answer_choice_id_substitute)))\n self.data_base.commit()",
"def delete_favorite(self, id):\n path = self._get_path('alter_favorite').format(id=id)\n \n return self._clean_return(self._DELETE(path))",
"def remove_from_fav(request, q_id):\n if request.method == 'POST':\n Quotes.objects.remove_from_user_fav(request.session['id'], q_id)\n return redirect('/quotes')",
"def get_fav_drinks(self, user_id):\n assert type(user_id) == str\n return next((fd.get('drink_id') for fd in self.favorite_drinks if fd.get('user_id')==user_id), None)",
"def rm_favoriting_user_id(self, circuit_id, user_id):\n key = ':'.join(\n [CIRCUIT_FAV_USRS_1, \n str(circuit_id), \n CIRCUIT_FAV_USRS_2]\n )\n self.RS.srem(key, user_id)",
"def remove_favorits(request):\n product = request.GET.get(\"delete_prod\",\"\")\n print(product)\n user_name = request.user\n print(user_name)\n if product is not None:\n del_prod = UserFavorite.objects.filter(user_name=user_name,product=product)\n \n # Category.objects.filter().delete(del_prod)\n print(del_prod.id)\n context = {\n 'product' : product\n }\n return render(request,\"favorits.html\",context)",
"def delete_from_blacklist(self, user_id, blacklist_user_id):\n try:\n self.table.delete().where(and_(\n self.table.c.user_id == user_id,\n self.table.c.blacklisted_id == blacklist_user_id )).execute() \n except Exception as e:\n self.log(e, self.identifier)\n raise egg_errors.QueryNotPossible",
"def add_fav_drinks(self, user_id, drinks): \n assert type(user_id) == str\n assert type(drinks) == list\n\n fav_drinks = self.get_fav_drinks(user_id)\n user_check = self.users.get_user_name(user_id)\n drinks_check = [self.drinks.get_drinks_by_flavor_and_type(d.get('flavor'), d.get('type')) for d in drinks]\n\n # make sure that at least one drink exists in the list\n if all(x is None for x in drinks_check):\n print(\"All drinks provided do not exist. We will not add favorite drinks since one of the drinks must already exist.\")\n \n # user does not exist\n elif user_check is None: \n print(\"User Id {} does not exist.\".format(user_id))\n \n # add fav drinks\n else : \n # user has existing fav drinks\n if fav_drinks is not None:\n for d in drinks:\n # add the drink if it does not exist \n drink_id = self.drinks.add_drink(d.get('type'), d.get('flavor'))\n fav_drinks.append(drink_id)\n # user has no existing fav drinks\n else :\n ids = []\n for d in drinks:\n # add the drink if it does not exist \n ids.append(self.drinks.add_drink(d.get('type'), d.get('flavor')))\n\n fd_id = self.__generate_id()\n self.favorite_drinks.append({\"id\": fd_id, \"user_id\": user_id, \"drink_id\": ids})",
"def delete_recipe(recipe_id):\r\n if \"user\" in session:\r\n selected_recipe = coll_recipes.find_one({\"_id\": ObjectId(recipe_id)})\r\n user = coll_users.find_one({\"username_lower\": session[\"user\"]})[\"_id\"]\r\n if user == selected_recipe.get(\"author\"):\r\n author = coll_recipes.find_one({\"_id\": ObjectId(recipe_id)})[\"author\"]\r\n coll_recipes.remove({\"_id\": ObjectId(recipe_id)})\r\n coll_users.update_one(\r\n {\"_id\": ObjectId(author)},\r\n {\"$pull\": {\"user_recipes\": ObjectId(recipe_id)}})\r\n coll_users.update_many({}, {\"$pull\": {\"user_favs\": ObjectId(recipe_id)}})\r\n return redirect(url_for(\"recipes.show_recipes\"))\r\n else:\r\n flash(\"You are not authorised to perform that action!\")\r\n return redirect(url_for(\"recipes.recipe_detail\", recipe_id=recipe_id))\r\n else:\r\n flash(\"You must be logged in to perform that action!\")\r\n return redirect(url_for(\"users.login\"))",
"def remove_from_wishlist(request, product_id):\n\n redirect_url = request.POST.get('redirect_url')\n\n user = get_object_or_404(UserProfile, user=request.user)\n wishlist = Wishlist.objects.get_or_create(user=user)\n wishlist_user = wishlist[0]\n if request.POST:\n product = Product.objects.get(pk=product_id)\n\n # look for item in the user's wishlistItem - returns true if it exists\n existingWishlistItem = WishlistItem.objects.filter(\n product=product).exists()\n\n if existingWishlistItem:\n product = WishlistItem.objects.get(product=product)\n product.delete()\n messages.success(request, \"Item removed from wishlist\")\n return redirect(redirect_url)\n\n if existingWishlistItem is None:\n messages.error(\n request, \"You can not delete a item thats not in the wishlist\")\n return redirect(redirect_url)\n else:\n messages.error(request, 'Item can not be deleted from your wishlist')\n return render(request, 'home/index.html')",
"def remove(self, user_id):\n pass",
"def removeFollower(self,id):\n # DELETE /followers/$id\n pass",
"def unmark_favorite(request, object_id):\n fav_item = get_object_or_404(FavoriteItem, feed_item__id=object_id)\n fav_item.delete()\n if request.is_ajax():\n return JSONResponse({'status': 'ok', 'text': 'Unmarked favorite'}, False)\n return redirect(request.META.get('HTTP_REFERER', 'feed_item_list'))",
"def delete_favorite(request):\n company_id = request.data.get('id')\n company = Company.objects.get(id=company_id)\n\n request.user.profile.companies.remove(company)\n return Response({'favorite': False})",
"def unfollow(self, user):\n f = self.followed.filter_by(followed_id=user.id).first()\n if f:\n db.session.delete(f)",
"def remove_restaurant(restaurant_id):\n user_id = login_session['user_id']\n r = read_restaurants(restaurant_id, user_id)\n if r[1] is True: # Means if user is owner\n if request.method == 'POST':\n # Next we do the db delete\n delete_restaurant(restaurant_id)\n # Finally we return the success html\n flash(\"Deleted your restaurant\")\n return render_template(\"submitted.html\")\n else:\n return render_template('restaurants/deleterestaurant.html',\n restaurant=r[0][0])\n else:\n flash(\"You need to be the owner of the restaurant to delete\")\n return redirect(url_for('site.show_restaurants',\n restaurant_id=restaurant_id))",
"def remove(self, product):\n product_id = str(product.id)\n if product_id in self.wishlist:\n del self.wishlist[product_id]\n self.save()",
"def unfavorite_chart(chart_id):\n chart = Chart.query.get(chart_id)\n if chart is None:\n flash(\n \"No chart with that chart_id found!\",\n \"alert-warning\",\n )\n else:\n current_user.unfavorite_chart(chart)\n db.session.commit()\n flash(\n \"Removed Chart: {name} from favorites list\".format(name=chart.name),\n \"alert-success\",\n )\n return redirect(request.args.get('next') or url_for('reports.my_charts'))",
"def delete_wishlist(cust_id, wishlist_id):\n # \"\"\" Delete the wishlist with the provided id\"\"\"\n success = Customer.delete_by_id(cust_id, wishlist_id)\n return make_response('', status.HTTP_204_NO_CONTENT)",
"def remove_favorite(self, pk: int) -> Response:\n try:\n TagDAO.remove_user_favorite_tag(pk)\n return self.response(200, result=\"OK\")\n except TagNotFoundError:\n return self.response_404()\n except MissingUserContextException as ex:\n return self.response_422(message=str(ex))"
] | [
"0.7396384",
"0.7170701",
"0.71676636",
"0.7118096",
"0.69414896",
"0.69243777",
"0.68612635",
"0.68539405",
"0.68146366",
"0.6617351",
"0.6581387",
"0.64681506",
"0.6427771",
"0.6424399",
"0.64004254",
"0.63968754",
"0.63752973",
"0.6372094",
"0.63381314",
"0.6281305",
"0.62023365",
"0.6172984",
"0.61524916",
"0.60918355",
"0.60898364",
"0.6039735",
"0.6013743",
"0.5965992",
"0.596557",
"0.596101"
] | 0.85434836 | 0 |
Creates and displays a simple frame containing the RichTextPanel. | def showEditorWindow(parent, title, allowEditting = True):
frame = wx.Frame(parent, -1, title, size=(630, 320), style = wx.DEFAULT_FRAME_STYLE)
panel = RichTextPanel(allowEditting, frame, -1)
#frame.Fit()
#frame.SetMinSize(frame.GetSize())
frame.Show()
return panel | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def MakeFrame(self, name, parent=None, pos=None, size=(900,700), style=wx.DEFAULT_FRAME_STYLE, visible = True):\n if pos is None:\n pos = self._CheckBoundaries(size)\n\n\n frame = self.mf.MainFrame(self, name, parent, pos, size, style, self.text_editor)\n self.frame_position = (pos[0] + 50,pos[1] + 50)\n self.AddFrameObj(name, frame)\n \n if os.name is not \"posix\":\n frame.Iconize()\n \n frame.Show(visible)\n return frame",
"def _framed(widget):\n frame = Gtk.Frame()\n frame.add(widget)\n return frame",
"def create(self, parent):\n self.widget = QFrame(parent)",
"def __init__(self):\n super().__init__()\n self.geometry('{}x{}'.format(425, 185))\n self.title('PAD Tracker')\n self.frame = Frame(self)\n self.populateFields()\n self.frame.pack()",
"def show(self):\r\n\t\tself.frame.Show(True)",
"def NewDiv(width=default_width, height=default_height):\n\n global wid\n wid = uuid.uuid4().hex\n print('Display id = {}JS9'.format(wid))\n fmt = dict(url=default_root, port0=default_port_html, wid=wid, width=width, height=height)\n html_command = \"\"\"\n <iframe src='{url}:{port0}/{wid}' width='{width}' height='{height}'>\n </iframe>\n \"\"\".format(**fmt)\n get_ipython().run_cell_magic('html', '', html_command)",
"def createFrame (self,message):\n \n f = self.frame\n \n lab = Tk.Label(f,text=message)\n lab.pack(pady=10,side=\"left\")\n \n self.number_entry = t = Tk.Entry(f,width=20)\n t.pack(side=\"left\")",
"def show_editor(self):\r\n self.frame.Show()\r\n self.frame.Raise()",
"def createFrame(self, module, name):\n if name not in self.data.frames:\n display = module(self.container, self)\n display.grid(row=0, column=0, sticky=\"nsew\")\n self.data.frames[name] = display",
"def receiveFrame(self):\n\t\treceiveFrame = Frame(self)\n\t\treceiveFrame.grid(column=2, columnspan=2, row=0, rowspan=6)\n\t\treceiveFrame.config(bg = \"white\")\n\n\t\treceiveLabel = Label(receiveFrame, text=\"Receive\", font=(\"Sans Serif\", 20, \"bold\"), fg=\"blue\", bg = \"white\")\n\t\tself.receiveText = Text(receiveFrame, width=67, height = 10, fg = \"blue\", highlightthickness = 2, highlightcolor = \"blue\", highlightbackground = \"light slate gray\")\n\n\t\treceiveLabel.pack(pady=\"10 0\")\n\t\tself.receiveText.pack(padx = 10, pady = 10)",
"def body(self, frame):\n frame.rowconfigure(0, weight=0, pad=5)\n frame.rowconfigure(1, weight=0)\n frame.columnconfigure(0, weight=0)\n frame.columnconfigure(1, weight=0)\n\n self.name_label = tk.Label(frame, width=6, text=\"Name: \")\n self.name_label.grid(column=0, row=0)\n\n self.name_box = tk.Entry(frame, width=30)\n if self.name != \"\":\n self.name_box.insert(0, self.name)\n self.name_box.grid(column=1, row=0)\n\n self.url_label = tk.Label(frame, width=6, text=\"URL: \")\n self.url_label.grid(column=0, row=1)\n self.url_box = tk.Entry(frame, width=30)\n if self.url != \"\":\n self.url_box.insert(0, self.url)\n self.url_box.grid(column=1, row=1)\n return frame",
"def _add_frame(self):\n w = QtWidgets.QWidget(self)\n self.layout().addWidget(w)\n w.setSizePolicy(Policy.Expanding, Policy.Maximum)\n w.setLayout(QtWidgets.QHBoxLayout())\n w.layout().setContentsMargins(0, 0, 0, 0)\n w.layout().setSpacing(0)\n return w",
"def showUI(cls):\r\n win = cls()\r\n win.create()\r\n return win",
"def show(self):\n self.frame.grid()\n self.visible = True",
"def createMessageFrame (self,message):\n \n label = Tk.Label(self.frame,text=message)\n label.pack(pady=10)",
"def createWindow(self):\r\n\t\t# give the window a title\r\n\t\tself.parent.title( 'Acrobat Data Acquisition')\r\n\t\t# set the style\r\n\t\tself.style = ttk.Style()\r\n\t\tself.style.theme_use('default')\r\n\t\tself.pack(fill= tk.BOTH, expand=1)",
"def widget(self) -> tk.Frame:\r\n return self.main_frame",
"def inicialUI(self):\r\n\r\n self.setGeometry(500, 500, 500, 500)\r\n self.setWindownTitle(\"Pesquisa\")\r\n self.displayWidgets()\r\n\r\n self.show()",
"def init_control_panel(self):\n # initialize panel as QFrame\n panel = QtGui.QFrame(self)\n panel.setFrameStyle(QtGui.QFrame.StyledPanel)\n\n # set components\n vbox = QtGui.QVBoxLayout(panel)\n vbox.setSpacing(15)\n vbox.addWidget(self.init_summary_panel())\n vbox.addWidget(self.init_edit_panel())\n\n return panel",
"def createFrame(\n self,\n parent: Widget,\n name: str,\n hPolicy: Policy=None,\n vPolicy: Policy=None,\n lineWidth: int=1,\n shadow: Shadow=None,\n shape: Shape=None,\n ) -> Widget:\n if shadow is None:\n shadow = Shadow.Plain\n if shape is None:\n shape = Shape.NoFrame\n #\n w = QtWidgets.QFrame(parent)\n self.setSizePolicy(w, kind1=hPolicy, kind2=vPolicy)\n w.setFrameShape(shape)\n w.setFrameShadow(shadow)\n w.setLineWidth(lineWidth)\n w.setObjectName(name)\n return w",
"def showBasic(self):\n self.setWindowIcon(QIcon(self.icon))\n self.setWindowTitle(self.title)\n self.setGeometry(*self.posXY, *self.windowSize)\n self.show()",
"def createFrame(self,message):\n \n f = self.frame\n \n label = Tk.Label(f,text=message)\n label.pack(pady=10)\n \n self.id_entry = text = Tk.Entry(f,width=20)\n text.pack()",
"def create_panel(self):\n # Main Frame creation\n frame1 = Frame(self.window)\n frame1.pack(fill=\"both\")\n tablayout = Notebook(frame1)\n \n ##### TRACKER #####\n tab = Frame(tablayout) # creating 1st nested frame\n tab.pack(fill=\"both\")\n table = Frame(tab)\n table.pack(fill=\"both\")\n self.show_table(self.t.timeline[\"week\" + str(self.week)], table) # Grids the week with data\n self.add_buttons(tab, table)\n tablayout.add(tab, text=\"Current Week\") \n \n \n ##### STATS #####\n tab = Frame(tablayout) # creating 2nd nested frame\n tab.pack(fill=\"both\")\n self.stats.create_canvas(tab)\n\n\n # once its packed you can add it to the window object under a title\n tablayout.add(tab, text=\"Statistics\") \n tablayout.pack(fill=\"both\") # once everything is done now you pack the tablayout",
"def initialize(self):\r\n\r\n\t\t#wx.STAY_ON_TOP \r\n\t\tstyle = ( wx.NO_BORDER | wx.STAY_ON_TOP)\r\n\t\tcodeStyle = (wx.TE_MULTILINE | wx.TE_READONLY )\r\n\t\tself.frame = CodeFrame(None, -1, '',self.height,self.width,style,codeStyle)\r\n\t\tself.frame.SetPosition(self.pos)\r\n\t\tself.show()\r\n\t\r\n\t\tself.showOnlySelection = True\r\n\t\treturn True",
"def render_preview(text, parser, component='post'):\n tree = parse(text, parser, '%s-preview' % component)\n intro, body = split_intro(tree)\n if intro:\n return u'<div class=\"intro\">%s</div>%s' % (intro.to_html(),\n body.to_html())\n return body.to_html()",
"def body(self, frame):\n frame.rowconfigure(0, weight=0, pad=10)\n frame.rowconfigure(1, weight=0)\n\n popup_text = \"Your item '\" + self.name + \"' is back in stock!\"\n self.text = tk.Label(frame, text=popup_text, wraplength=300, justify=tk.LEFT)\n self.text.grid(row=0)\n\n self.link = tk.Label(\n frame,\n text=self.url,\n fg=\"blue\",\n cursor=\"hand2\",\n wraplength=300,\n justify=tk.LEFT,\n )\n self.link.grid(row=1)\n self.link.bind(\"<Button-1>\", self.followlink)\n\n return frame",
"def SetContent(self, window):\n window.SetName(\"content\")\n window.SetBackgroundColour(wx.GetApp().settings.bg_color)\n window.SetForegroundColour(wx.GetApp().settings.fg_color)\n window.SetFont(wx.GetApp().settings.text_font)",
"def CreateConsole(self):\n lc = launcher.TextFrame('title')\n return lc",
"def _setup_ui(self):\n\n self.window = ui.Widget()\n self.window.dimensions = ui.normalize_dimension((\n 0, 0,\n self.normalized_screen_resolution[0],\n self.normalized_screen_resolution[1]\n ))\n self.window.background_color = ImageColor.getcolor('#000000', 'RGB')\n\n interface_frame = ui.Widget(parent=self.window)\n interface_frame.dimensions = ui.normalize_dimension((\n self.preview_renderer.window[2],\n 0,\n self.normalized_screen_resolution[0] - self.preview_renderer.window[2],\n self.normalized_screen_resolution[1]\n ))\n interface_frame.background_color = ImageColor.getcolor('#ffffff', 'RGB')\n\n number = ui.LabelWidget(\"\",\n name=NAME_GET_STARTED,\n parent=interface_frame,\n align=\"center\",\n font_color=(0, 0, 0, 255))\n number.dimensions = (\n 5, 5,\n interface_frame.width - 10,\n interface_frame.height - 10\n )",
"def __init__(self, master, _type=REGULAR, **kw):\r\n Frame.__init__(self, master, **kw)\r\n self.main_frame = Frame(self, bd=1)\r\n self.main_frame.pack()"
] | [
"0.60286325",
"0.5938992",
"0.59213364",
"0.58199555",
"0.5770706",
"0.5710505",
"0.5705657",
"0.57053775",
"0.5688911",
"0.567008",
"0.563585",
"0.5622285",
"0.5615228",
"0.56087345",
"0.5592374",
"0.55808675",
"0.55758315",
"0.5544468",
"0.55370015",
"0.5525293",
"0.5516264",
"0.55083644",
"0.5492347",
"0.5481177",
"0.54727346",
"0.54703194",
"0.5459997",
"0.5446191",
"0.54302675",
"0.5429258"
] | 0.7123116 | 0 |
Indicates that we are beginning a new frame for the GIF. A new Figure object is created, using specifications provided to the Gif's constructor. Note that you are constrained to make one frame at a timefor every start_frame, there must be a end_frame without another start_frame in between. | def start_frame(self):
# Check whether we're supposed to make a frame on this iteration:
if self.frame_count % self.stride != 0:
return
# Check whether we're already making a frame.
if self.in_scope:
print("The Gif object for {} has encountered 'start_frame' twice\
without an intervening 'end_frame'".format(self.filename))
raise SyntaxError
# Construct a new figure
fig = plt.figure(figsize=(self.width,self.height), **(self.kwargs))
self.current_frame = fig
# Set the "in_scope" member True
self.in_scope = True
return self.current_frame | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_frame(\n self : \"animation\",\n frame : \"matplotlib.figure.Figure\",\n facecolor : \"str\" = 'white'\n ):\n self._make_animation_from_raw_list([frame], facecolor=facecolor)",
"def __init__(self, gif_fps=None, color_depth=None, gif_loop=None, height=None, start=None, duration=None,\n suffix=None, overlay=None, overlay_alignment=None, overlay_scale='fit', label=None):\n\n self._gif_fps = None\n self._color_depth = None\n self._gif_loop = None\n self._height = None\n self._start = None\n self._duration = None\n self._suffix = None\n self._overlay = None\n self._overlay_alignment = None\n self._overlay_scale = None\n self._label = None\n self.discriminator = None\n\n if gif_fps is not None:\n self.gif_fps = gif_fps\n if color_depth is not None:\n self.color_depth = color_depth\n if gif_loop is not None:\n self.gif_loop = gif_loop\n self.height = height\n self.start = start\n if duration is not None:\n self.duration = duration\n if suffix is not None:\n self.suffix = suffix\n if overlay is not None:\n self.overlay = overlay\n if overlay_alignment is not None:\n self.overlay_alignment = overlay_alignment\n if overlay_scale is not None:\n self.overlay_scale = overlay_scale\n if label is not None:\n self.label = label",
"def end_frame(self, **kwargs):\n \n # Check whether we're supposed to make a frame on this iteration:\n if self.frame_count % self.stride != 0:\n self.frame_count += 1\n return\n\n # Check whether we're still making another frame\n if not self.in_scope: \n print(\"The Gif object for {} has encountered 'end_frame' twice\\\n without an intervening 'start_frame'\".format(self.filename))\n raise SyntaxError\n\n # Save the frame to the temporary directory\n count_width = str(int(math.log10(self.max_frames) + 1))\n label = \"{:0>\"+count_width+\"d}\"\n label = label.format(self.frame_count)\n file_path = \"{}_{}{}\".format(self.tmp_prefix, label, self.tmp_suffix)\n self.current_frame.savefig(file_path,**kwargs)\n\n # Close the figure\n plt.close(self.current_frame)\n \n # Update some relevant attributes\n self.current_frame = None\n self.in_scope = False\n self.frame_count += 1\n\n return",
"def FrameStart(builder):\n return Start(builder)",
"def _init_frame(self : \"animation\",\n init_frame : \"matplotlib.figure.Figure\",\n init_ax : \"matplotlib.axes._subplots.AxesSubplot\"\n ):\n self._cframe = init_frame.canvas.copy_from_bbox(init_ax.bbox)",
"def new_frame(self):\n\n new_frame = False\n\n if self._frame_cursor < 10:\n new_frame = Frame(self)\n elif self._frame_cursor == 10:\n new_frame = TenthFrame(self)\n\n if new_frame != False:\n self._frames.append(new_frame)",
"def __init__(\n self : \"animation\",\n filename : \"str\",\n size : \"Tuple[int,int]\" = None,\n pbar : \"bool\" = False,\n mbs : \"int\" = 16,\n dpi : \"int\" = 150,\n init_frame : \"matplotlib.figure.Figure\" = None,\n init_ax : \"matplotlib.axes._subplots.AxesSubplot\" = None,\n fps : \"int\" = 5,\n interactive : \"bool\" = False,\n autoSmooth : \"bool\" = False,\n smoothingFrames : \"int\" = 5,\n saveFinalFrame : \"int\" = False,\n smoothingTime : float = None,\n smoothingFunction : \"Callable\" = None\n ):\n self.filename = filename\n self.size = size\n self._mbs = mbs\n self._writer = imageio.get_writer(\n self.filename,\n mode='I',\n macro_block_size=self._mbs,\n fps=fps\n )\n self.fps = fps\n self.pbar = pbar\n self._frame_number = 0\n self._closed = False\n self.dpi = dpi\n self._cframe = None\n if init_frame and init_ax:\n self._init_frame(init_frame, init_ax)\n\n self._init_interactive = matplotlib.is_interactive()\n if self._init_interactive and not interactive:\n matplotlib.interactive(False)\n else:\n matplotlib.interactive(interactive)\n if autoSmooth:\n assert smoothingFrames > 0\n\n self._autosmooth = autoSmooth\n self._prevFrame = None\n\n\n # Set up smoothing\n if smoothingTime is None:\n self._smoothingFrames = smoothingFrames\n else:\n self._smoothingFrames = int(smoothingTime*fps)\n\n if smoothingFunction is None:\n self._smoothingFunction = self._linear_interpolation\n else:\n self._smoothingFunction = smoothingFunction\n\n self._saveFinalFrame = saveFinalFrame",
"def __init__(self, frames=[], loop = 0):\n\t\t\n\t\tif isinstance(frames, (list, tuple)):\n\t\t\tself.frames = frames\n\t\telse:\n\t\t\traise TypeError\n\t\t\t\n\t\tif not loop:\n\t\t\tself.loop = 0\n\t\telse:\n\t\t\tself.loop = 1\n\t\t\t\n\t\tself.present_frame = None",
"def start(self):\n\t\tif self._start is not None:\n\t\t\traise RuntimeError('Animations can only be run once')\n\t\tself._start = 1\t\n\t\t# start time\n\t\tstartticks = self._startticks if self.startticks else _pg.time.get_ticks()\n\t\tfor anim in self.animations:\n\t\t\tanim._startticks = startticks\n\t\t\tanim.start()\n\t\t\tstartticks += anim.duration\n\t\t# get updated\n\t\t_running.append(self)\n\t\t_anim_started(self)",
"def start(self):\n\t\tif self._start is not None:\n\t\t\traise RuntimeError('Animations can only be run once')\n\t\tself._start = 1\t\n\t\t# start time\n\t\tif not self._startticks:\n\t\t\tself._startticks = _pg.time.get_ticks()\n\t\tfor anim in self.animations:\n\t\t\tanim._startticks = self._startticks\n\t\t\tanim.start()\n\t\t# get updated\n\t\t_running.append(self)\n\t\t_anim_started(self)",
"def Start(self): # this is used to start the object\n ani = anim.FuncAnimation(self.f, self.animate, interval=1000)\n # animating object wth 1 sec gap\n self.plt_0.tight_layout()\n self.plt_0.show()\n # showing the plot",
"def __init__(self, frames):\n self._frames = frames",
"def __init__(self, frames):\n self._frames = frames",
"def __init__(self, frames):\n self._frames = frames",
"def __init__(self, frames):\n self._frames = frames",
"def anim_produce_frame(up_to_line, *fargs):\n #unpack *fargs\n axes,running_reward_exists,running_loss_exists,actions_exists,\\\n running_reward_file,running_loss_file,actions_file,actions_to_plot, \\\n actions_per_log,is_tri,actions_ylim = fargs\n #produce the plots for the current frame\n axis_ind = 0\n if running_reward_exists:\n axes[axis_ind].clear()\n plot_running_reward_on_axis(running_reward_file, axes[axis_ind], up_to_line)\n axis_ind += 1\n if running_loss_exists:\n axes[axis_ind].clear()\n axes[axis_ind+1].clear()\n plot_running_loss_on_axis(running_loss_file, axes[axis_ind],axes[axis_ind+1], up_to_line)\n axis_ind += 2\n if actions_exists:\n axes[axis_ind].clear()\n plot_actions_on_axis(actions_file,axes[axis_ind],is_tri,actions_to_plot=actions_to_plot,\n plot_to_file_line=int(up_to_line*actions_per_log),\n actions_ylim=actions_ylim)",
"def display_frames_as_gif(frames):\n fig=e.cube.show_layout(frames[0]) \n print(\"Drawn\")\n def animate(i):\n return e.cube.update_plot(frames[i])\n anim = animation.FuncAnimation(fig, animate, frames = len(frames), interval=50,blit=True)",
"def enter(self):\n\n self.im = Image.open(self.filename)\n\n self.frame = 0\n self.timedelta = TimeDelta().reset()\n\n self._load_frame(self.frame)\n self.dur = self._get_duration()\n\n self.is_playing = True",
"def markFrame(self, event):\n color = Style.frameMarkerColor.gdkString\n\n if self.absoluteFrame is None:\n # Initialize the system. This is now absolute frame zero,\n # and we're declaring it to be the first marked frame.\n self.absoluteFrame = 0\n self.nextMarkedFrame = [0, 0]\n else:\n # Use the delta in real frame numbers (accounting for rollover)\n # to update our 'absolute' frame number, which never rolls over.\n d = event.frame - self.lastFrame\n\n if d < 0:\n d += 1024\n elif d == 0:\n # Duplicate frame, mark it in a different color\n color = Style.duplicateFrameColor.gdkString\n\n self.absoluteFrame += d\n self.lastFrame = event.frame\n\n # Small marks- default\n h = 3\n w = 0.4\n\n # Bigger marks every 100 frames\n if self.absoluteFrame > self.nextMarkedFrame[1]:\n self.nextMarkedFrame[1] += 100\n h = 10\n w = 0.5\n\n # Huge marks every 1000 frames\n if self.absoluteFrame > self.nextMarkedFrame[0]:\n self.nextMarkedFrame[0] += 1000\n h = 25\n w = 0.75\n\n self.resizer.track(self.frameGroup.add(gnomecanvas.CanvasRect,\n y1 = self.height - h,\n y2 = self.height,\n fill_color = color),\n x1=(event.timestamp, -w),\n x2=(event.timestamp, w))",
"def __init__(self, frame):\n self.frame = frame",
"def _nextAnimFrame(step=0):\n lfp_frame.set_data(timestamps[step:step+frame_size], lfp[step:step+frame_size])\n r_raw_frame.set_data(timestamps[step:step+frame_size], raw_ripple[step:step+frame_size])\n r_pow_frame.set_data(timestamps[step:step+frame_size], ripple_power[step:step+frame_size])\n lfp_measure.set_text(txt_template % timestamps[step])\n # Updating the limits is needed still so that the correct range of data\n # is displayed! It doesn't update the axis labels though - That's a\n # different ballgame!\n plot_axes.set_xlim(timestamps[step], timestamps[step+frame_size])\n return lfp_frame, r_raw_frame, r_pow_frame, lfp_measure",
"def add_frames(\n self : \"animation\",\n frameList : \"list[matplotlib.figure.Figure]\",\n facecolor : \"str\" = 'white'\n ):\n self._make_animation_from_raw_list(frameList, facecolor=facecolor)",
"def __init__(self, frame):\n super().__init__(frame)\n self.frames = None\n self.delay = None",
"def start(self):\n\t\tif self._start is not None:\n\t\t\traise RuntimeError('Animations can only be run once')\n\t\t# initial state of all attributes\n\t\tself._start = dict()\t\n\t\tfor attr in self._end:\n\t\t\tsep = attr.split('__')\n\t\t\tsubtarget, subattr = eval('.'.join(['self.target']+sep[:-1])), sep[-1]\n\t\t\tself._start[attr] = getattr(subtarget, subattr)\n\t\t# start time\n\t\tif not self._startticks:\n\t\t\tself._startticks = _pg.time.get_ticks()\n\t\t# get updated\n\t\t_running.append(self)\n\t\t_anim_started(self)",
"def setupFrame(self, frame_width, frame_height):\n x, y = 0.0, 0.4\n self.x0 = int(frame_width*x)\n self.y0 = int(frame_height*y)\n self.width = 260\n self.height = 260",
"def animate(self,frame,im = None):\n # With matplotlib, it's much, much faster to just update the properties\n # of a display object than it is to create a new one, so we'll just update\n # the data and position of the same objects throughout this animation...\n\n # Since we're making an animation with matplotlib, we need \n # ion() instead of show()...\n fig = plt.gcf()\n ax = plt.axes([.25, .55, .6, .4], facecolor='y')\n plt.axis('off')\n\n # Make an image based on the first frame that we'll update later\n # (The first frame is never actually displayed)\n if im is None:\n plt.imshow(frame,cmap='brg')\n else:\n plt.imshow(im)\n plt.title('Image Space')\n\n # Make 4 rectangles that we can later move to the position of each paw\n rects = [Rectangle((0,0), 1,1, fc='none', ec='red') for i in range(4)]\n [ax.add_patch(rect) for rect in rects]\n\n\n # Process and display each frame\n\n paw_slices = self.find_paws(frame)\n\n # Hide any rectangles that might be visible\n [rect.set_visible(False) for rect in rects]\n\n # Set the position and size of a rectangle for each paw and display it\n for slice, rect in zip(paw_slices, rects):\n dy, dx = slice\n rect.set_xy((dx.start, dy.start))\n rect.set_width(dx.stop - dx.start + 1)\n rect.set_height(dy.stop - dy.start + 1)\n rect.set_visible(True)",
"def start_animation(self) -> None:\n increment_values = {0: 1, self.original_height: -1}\n self.increment = increment_values.get(self.current_height, 0) # Compressed if",
"def start_sim(self):\n self.anim = animation.FuncAnimation(self.fig, self.anim_func, frames = self.timesteps, interval = 1, blit=True)\n plt.show()",
"def __init__(self, frame=1):\n self._frame = frame\n self._ticks = []",
"def start(self):\n for i in xrange(self.num_pulses):\n self.fillColor = \"white\"\n for j in xrange(self.num_frames_on):\n self.draw()\n self.win.flip()\n if j == 0:\n # Only store the time of the first occuring on frame.\n self.utc_timestamps.append(datetime.strftime(datetime.utcnow(), '%Y-%m-%dT%H:%M:%S.%fZ'))\n self.fillColor = \"black\"\n for j in xrange(self.num_frames_off):\n self.draw()\n self.win.flip()"
] | [
"0.6030946",
"0.5884757",
"0.58019",
"0.57871383",
"0.5710823",
"0.56462824",
"0.5613234",
"0.5611469",
"0.5561925",
"0.5536683",
"0.54781246",
"0.54582393",
"0.54582393",
"0.54582393",
"0.54582393",
"0.5429345",
"0.54275835",
"0.5406712",
"0.5402544",
"0.5368462",
"0.53589135",
"0.5338133",
"0.5324804",
"0.53096867",
"0.52740145",
"0.5259881",
"0.5253948",
"0.5251676",
"0.5249897",
"0.5239855"
] | 0.806977 | 0 |
Pushes game state onto history. | def _push_history(self):
self._history.append(self._state) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def storeState(self):\n\n self.action_history[self.trial] = self.action\n self.ball_history[self.trial] = self.ballcolor",
"def update_history(self, move):\r\n player_number = self.player_numbers[self.current_player]\r\n heaps = tuple(self.heaps)\r\n self.history.append([player_number, heaps, move])",
"def push(self, action):\n\n self.history = self.history[0:self.position]\n self.history.append(action)",
"def push(self, value):\n self.history.append(value)",
"def add_to_game_history(self, tile, flag=False):\n move = {\n 'tile': tile,\n 'flag':flag,\n 'coordinate': self.stack[tile]['coordinate'],\n 'value': self.stack[tile]['value']}\n self.history.append(move)",
"def save_state(self):\n # add (turn number, active player, player 1, player 2) to game history\n # player 1 and player 2 contain data about active mods\n turn_number = self.turn_number\n player_1 = Class.copy_monster(self.player1)\n player_2 = Class.copy_monster(self.player2)\n # save which player's turn it is\n if self.current_player == self.player1:\n active_player = 'player 1'\n else:\n active_player = 'player 2'\n\n # add this information to history list\n self.history.append((turn_number, active_player, player_1, player_2))",
"def set_state(self, state):\n self.history = state",
"def update_history(self, play, coplay):\r\n self.history.append(play, coplay)\r\n self.base.history.append(play,coplay)\r\n self.trust.history.append(play,coplay)\r\n self.conviction.history.append(play,coplay)",
"def __add_current_fen_to_history(self):\n self.history = np.hstack((self.history, self.fen()))",
"def append(self, screen):\n if len(self.state) == 0:\n # Initial insertion\n # No need to handle terminal cases as we don't restart from a game over, we just start a whole new game\n self.state = deque([process_screen(screen)] * 4, maxlen=config.HISTORY_LENGTH)\n\n else:\n self.state.append(process_screen(screen))",
"def new_game(self):\n old_state = self.rstate\n del old_state\n self.rstate = self.rsimulator.new_game()",
"def _push_move(self, notation: str):\n # copy the state of the board\n self.positions_copy = self.copy_board()\n # push it to the end of the move history\n self.move_history.append((notation, self.positions_copy))",
"def reset_state_history(self):\n self.state_history = []",
"def push_history(self, submission, action, program_compiled):\n\n history_data = {\n 'user': self.user.get().name,\n 'score': self.score,\n 'action': action,\n 'submission': submission,\n 'program_compiled': program_compiled,\n 'level': self.current_level\n }\n taskqueue.add(url='/tasks/push_game_history', params=history_data)",
"def save_state(self):\n\t\tself._history['time'].append(self.t)\n\t\tstate = np.array(self.x[np.newaxis,:,:])\n\t\tself._history['state'] = np.vstack([self._history['state'],state])",
"def update_to_state(self, game_state):\n pass",
"def push(self):\n self.stack.append(self.save())",
"def history_go(self, relative):\n self.thistab.history_go(int(relative))",
"def store(self, state, action, reward, next_state, done):\n self.replay_memory.append((state, action, reward, next_state, done))",
"def save_history_to_session_history(request: Request):\n session_history = request.session.get('session_history')\n if session_history is not None:\n session_history.append_action(request)\n request.session.update({'session_history': session_history})",
"def history(self, history):\n self._history = history",
"def push(scene):\n if _stack:\n spyral.event.handle('director.scene.exit', _scene = _stack[-1])\n old = _stack[-1]\n spyral.sprite._switch_scene()\n _stack.append(scene)\n spyral.event.handle('director.scene.enter', _scene = scene)\n pygame.event.get()",
"def update_history(self, guess='', result=''):\n item = json.dumps({'guess': guess, 'result': result})\n self.history.append(item)",
"def push_state(self, **lamp_parameters):\n # Also note that we do not filter out identical states from being pushed.\n # Since the enabled state can be fiddled with IOTool, there is good reason\n # for pushing an enabled state identical to the current one, so that it\n # will be restored after any such fiddling.\n old_state = {}\n for prop, value in lamp_parameters.items():\n getter, setter = self._get_getter_setter(prop)\n old_state[prop] = getter()\n setter(value)\n self._state_stack.append(old_state)",
"def addHistory(self):\r\n \r\n data = self.get()\r\n \r\n if data == '':\r\n return\r\n elif len(self.history) != 0 and self.history[0] == data:\r\n return\r\n \r\n if len(self.history) == self.historySize:\r\n self.history.pop()\r\n \r\n self.history.insert(0, data)",
"def __add_current_state_to_state_dict(self):\n board_fen = self.board_fen()\n if board_fen not in self.states:\n self.states[self.board_fen()] = GameState(self.board_array())",
"def RecordHistory( self ):\n if not self.restoringHistory:\n record = self.activated_node\n if self.historyIndex < -1:\n try:\n del self.history[self.historyIndex+1:]\n except AttributeError, err:\n pass\n if (not self.history) or record != self.history[-1]:\n self.history.append( record )\n del self.history[:-200]\n self.historyIndex = -1",
"def push(self, transition_tuple):\n if len(self.replay_memory) < self.state.replay_size:\n self.replay_memory.append(None)\n self.replay_memory[self.state.position] = transition_tuple\n self.state.position = (self.state.position + 1) % self.state.replay_size",
"def remember(self, *args):\n state, action, reward, next_state, done = args\n self.memory.append((state, action, reward, next_state, done))",
"def get_new_gamestate(self):"
] | [
"0.6968883",
"0.6701469",
"0.6685477",
"0.66748035",
"0.6566776",
"0.6524166",
"0.65231085",
"0.6444915",
"0.63551337",
"0.6299285",
"0.6295774",
"0.62556654",
"0.6236586",
"0.62092465",
"0.6204507",
"0.61990005",
"0.6162195",
"0.6140269",
"0.6104237",
"0.60969216",
"0.60812014",
"0.6076682",
"0.60713524",
"0.6067069",
"0.6064638",
"0.60585713",
"0.6036097",
"0.60228276",
"0.60065454",
"0.59940076"
] | 0.831839 | 0 |
Pops and loads game state from history. | def _pop_history(self):
current_state = self._state
try:
self._load_state(self._history.pop())
return current_state
except IndexError:
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_new_gamestate(self):",
"def new_game(self):\n old_state = self.rstate\n del old_state\n self.rstate = self.rsimulator.new_game()",
"def reset_state_history(self):\n self.state_history = []",
"def undo(self):\n if (0 == len(self._undoStack)):\n raise ValueError(\"Nothing to undo\")\n else:\n self._redoStack.append(self.gameState())\n\n lastGameState = self._undoStack.pop()\n self.counter = lastGameState[\"counter\"]\n self.wonRounds = lastGameState[\"wonRounds\"]\n self.wonGames = lastGameState[\"wonGames\"]\n self.currentMaxPoints = lastGameState[\"currentMaxPoints\"]\n self.sidesChanged = lastGameState[\"sidesChanged\"]\n self.playerPositions = lastGameState[\"playerPositions\"]\n self.servePosition = lastGameState[\"servePosition\"]",
"def _push_history(self):\n self._history.append(self._state)",
"def load_game(self, path):\n temp_stack = self.state_stack\n try:\n file = open(path, 'rb')\n self.state_stack = pic.load(file)\n for i in self.state_stack.states:\n i.on_load()\n del temp_stack\n except IOError or pic.UnpicklingError as e:\n print(\"Game load error: {}\".format(e))\n self.state_stack = temp_stack",
"def back(self,**kwargs):\n self.mm.loadPreviousMenu()",
"def restore(self, key, history):\n self.goal, used = key\n self._used = []\n for row in used:\n self._used.append(list(row))\n self.history = list(history)",
"def __reset(self):\n self.game_moves_history = []\n self.player_id = None",
"def step_back(self):\n if len(self.history) > 0:\n (\n self.round,\n r_raised,\n self.game_pointer,\n self.round_counter,\n d_deck,\n self.public_card,\n self.players,\n ps_hand,\n ) = self.history.pop()\n self.round.raised = r_raised\n self.dealer.deck = d_deck\n for i, hand in enumerate(ps_hand):\n self.players[i].hand = hand\n return True\n return False",
"def storeState(self):\n\n self.action_history[self.trial] = self.action\n self.ball_history[self.trial] = self.ballcolor",
"def save_state(self):\n # add (turn number, active player, player 1, player 2) to game history\n # player 1 and player 2 contain data about active mods\n turn_number = self.turn_number\n player_1 = Class.copy_monster(self.player1)\n player_2 = Class.copy_monster(self.player2)\n # save which player's turn it is\n if self.current_player == self.player1:\n active_player = 'player 1'\n else:\n active_player = 'player 2'\n\n # add this information to history list\n self.history.append((turn_number, active_player, player_1, player_2))",
"def get_state(self):\n return self.history",
"def redo(self):\n if (0 == len(self._redoStack)):\n raise ValueError(\"Nothing to redo\")\n else:\n self._undoStack.append(self.gameState())\n\n nextGameState = self._redoStack.pop()\n self.counter = nextGameState[\"counter\"]\n self.wonRounds = nextGameState[\"wonRounds\"]\n self.wonGames = nextGameState[\"wonGames\"]\n self.currentMaxPoints = nextGameState[\"currentMaxPoints\"]\n self.sidesChanged = nextGameState[\"sidesChanged\"]\n self.playerPositions = nextGameState[\"playerPositions\"]\n self.servePosition = nextGameState[\"servePosition\"]",
"def loadState(self):\n\t\tif not path.exists(STATEFILE):\n\t\t\tprint \"No previous statefile, assuming first run\"\n\t\t\tself.state['lastrun'] = datetime.datetime.now()-datetime.timedelta(days=365)\n\t\telse:\n\t\t\tsfile = open(STATEFILE,'r')\n\t\t\tself.state = cPickle.load(sfile)\n\t\tself.lastrun = self.state['lastrun']",
"def get_game_history(self, request):\n return games_ctrl.get_game_history(request.urlsafe_game_key)",
"def GetHistory(index=0):\n if index == \"clear\":\n state_mgr.entire_history = []\n else:\n print state_mgr.entire_history[int(index):]",
"def get_game_history(self, req):\n return models.BattleShip.getByUrlKey(req.url_key).getHistory()",
"def load_from_snapshot(self, when):\n self.state_manager_.restore(when)",
"def get_game_history(self, request):\n game = get_by_urlsafe(request.urlsafe_game_key, Game)\n if game:\n return HistoryForm(items=game.history)\n else:\n raise endpoints.NotFoundException('Game not found!')",
"def load_(self):\n path = os.path.join(os.path.dirname(self.arch_handler.dicomdir_path), self.SAVE_NAME)\n if not os.path.isfile(path):\n print(\"No history to load\")\n return\n with open(path, \"r\") as infile:\n data = json.load(infile)\n self.load(data['history'])\n self._edited = False",
"def reload(self):\n self.restore()",
"def load_state(self):\n return self.state.read()",
"def get_history(self):\n\t\t#state = (np.array(self._history['state'])).rehsape(\n\t\tself._history['state'] = (np.squeeze(self._history['state']))\n\t\treturn self._history",
"def get_game_history(self, request):\n game = get_by_urlsafe(request.urlsafe_key, Game)\n\n if not game:\n raise endpoints.NotFoundException('Game Not Found!')\n else:\n return StringMessage(message=\"Moves Made: %s\" % game.history)",
"def back(self):\n self.clearScreen()\n from screen1 import Screen1\n Screen1(self.parent, self.store)",
"def load_history(self, *args):\n slname = 'hpssic.' + self.name + '_sublib'\n if slname in sys.modules:\n S = sys.modules[slname]\n else:\n S = __import__(slname, fromlist=[slname])\n S.load_history(*args)",
"def get_game_history(self, request):\n game = get_by_urlsafe(request.urlsafe_game_key, Game)\n if not game:\n raise endpoints.NotFoundException('Game not found')\n return StringMessage(message=str(game.history))",
"def pop(self):\n return self.history.pop()",
"def history():"
] | [
"0.6477058",
"0.63318163",
"0.62972474",
"0.62654704",
"0.6068052",
"0.60675156",
"0.60423017",
"0.5992579",
"0.5983971",
"0.5978606",
"0.59785825",
"0.59690744",
"0.5935602",
"0.5877124",
"0.5856809",
"0.5821736",
"0.5818696",
"0.58089757",
"0.5758095",
"0.57310545",
"0.57208294",
"0.5706508",
"0.5704825",
"0.5698815",
"0.5673888",
"0.56640124",
"0.5641748",
"0.56411004",
"0.564062",
"0.56367296"
] | 0.69553375 | 0 |
Same thing as Array.__getitem__, but returns None if coordinates are not within array dimensions. | def _get_none(self, x, y):
try:
return self[x, y]
except ArrayError:
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __getitem__(self, index):\n x, y = index\n if 0 <= x < self.width and 0 <= y < self.height:\n return self.cells[x + y * self.width]\n else:\n return None",
"def __getitem__(self, pos):\n if (self.master.__class__.__name__ == 'OneDimGrid') or (issubclass(self.master.__class__, OneDimGrid)):\n return self._getitem_1d(pos)\n if (self.master.__class__.__name__ == 'Grid') or (issubclass(self.master.__class__, Grid)):\n return self._getitem_2d(pos)\n else:\n return None",
"def __getitem__(self, pos):\n if (self.master.__class__.__name__ == 'OneDimGrid') or (issubclass(self.master.__class__, OneDimGrid)):\n return self._getitem_1d(pos)\n if (self.master.__class__.__name__ == 'Grid') or (issubclass(self.master.__class__, Grid)):\n return self._getitem_2d(pos)\n else:\n return None",
"def __getitem__(self, index):\n if index == Ellipsis:\n index = tuple(self.dim*[slice(None)])\n\n if len(index) < self.dim:\n # --- Add extra dims to index if needed\n index = list(index)\n for i in range(len(index), self.dim):\n index.append(slice(None))\n index = tuple(index)\n\n if self.dim == 2:\n return self._getitem2d(index)\n elif self.dim == 3:\n return self._getitem3d(index)",
"def __getitem__(self, index):\n #Check to see whether or not the index is within the array's element range.\n if index >= 0 and index < len(self):\n return self._items[index]\n\n return None",
"def __getitem__(self, index_tuple):\n assert len(index_tuple) == 2, \"Invalid number of array subscripts.\"\n row, col = index_tuple\n assert 0 <= row < self.num_rows() and 0 <= col < self.num_cols(), \\\n \"Array subscript out of range.\"\n array_1d = self.rows[row]\n return array_1d[col]",
"def __getitem__(self, pos):\n if isinstance(pos, (int, np.integer)):\n return self.elements[pos % self.shape[0]]\n elif isinstance(pos, slice):\n return self.__getitem__(pos=_conv_slice_to_list(slice_obj=pos, stop_def=self.shape[0]))\n elif isinstance(pos, np.ndarray):\n return np.array([self.__getitem__(pos=p) for p in pos])\n elif isinstance(pos, list):\n return [self.__getitem__(pos=p) for p in pos]\n elif pos is None:\n return None\n else:\n raise TypeError(\"CircularMapping received an invalid index:%s\" % str(pos))",
"def __getitem__(self, idx):\n row, col = idx\n\n if row < 0 or row >= self.num_rows:\n raise IndexError(\"Row out of bounds\")\n\n if col < 0 or col >= self.num_cols:\n raise IndexError(\"Col out of bounds\")\n\n array_row = self._find_row_before(row)\n array_row = array_row.next_row\n if array_row == None:\n return self.default\n if array_row.row_number > row:\n return self.default\n\n array_entry = self._find_column_before(array_row, col)\n array_entry = array_entry.next_entry\n if array_entry == None:\n return self.default\n if array_entry.column_number > col:\n return self.default\n return array_entry.value",
"def _getitem_1d(self, pos):\n # Check if pos has multiple elements.\n if isinstance(pos, OneDimGrid):\n return self._getitem_1d(pos=pos.elements)\n elif isinstance(pos, slice):\n return self._getitem_1d(_conv_slice_to_list(slice_obj=pos, stop_def=self.master.shape[0]))\n elif isinstance(pos, np.ndarray):\n return self._getitem_1d(pos.tolist())\n elif isinstance(pos, list):\n return np.array([self._getitem_1d(p) for p in pos])\n elif pos is None:\n raise TypeError(\"_AbsToPhyConverter._getitem_1d does not accept None as its input.\")\n else:\n # pos is a single element.\n for i, e in np.ndenumerate(self.master.elements):\n if (pos - e) % self.master.width == 0:\n return int(round((pos - e) / self.master.width)) * self.master.elements.shape[0] + i[0]\n return None # no matched coordinate",
"def __getitem__(self, key):\n if key in ('x','y','z'):\n return self.asDict()[key]\n else:\n return self.coords.__getitem__(key)",
"def __getitem__(self, idx):\n return None",
"def __getitem__(self, index):\n if index == 0:\n return self.x\n elif index == 1:\n return self.y\n raise IndexError",
"def __getitem__(self, k):\n return self._coords[k]",
"def __getitem__(self, idx):\n return self.GetArray(idx)",
"def __getitem__(self, idx):\n return self.GetArray(idx)",
"def get(self, pos_x: int, pos_y: int):\n\n\t\treturn self.shape[pos_y][pos_x]",
"def __getitem__(self, inds):\n i, j = inds\n return self.array[i][j]",
"def get_cell(self, x, y):\n if y < 0 or y >= len(self.g): return None\n if x < 0 or x >= len(self.g[y]): return None\n return self.g[y][x]",
"def __getitem__(self, j):\n\t\treturn self._coords[j]",
"def __getitem__(self, n):\n return self._array[n]",
"def __getitem__(self, key):\n try:\n return self._get_slice(self.data_array, key)\n except KeyError:\n return self.read(bls=key)[0][key]",
"def __getitem__(self, item):\n if isinstance(item, slice):\n start = item.start or 0\n stop = item.stop if item.stop is not None else len(self.data)\n stop = min(stop, len(self.data))\n if stop - start == 0:\n return type(self)(xnd.xnd([], type=self.data.type))\n\n elif isinstance(item, Iterable):\n if not is_array_like(item):\n item = np.array(item)\n if is_integer_dtype(item):\n return self.take(item)\n elif is_bool_dtype(item):\n indices = np.array(item)\n indices = np.argwhere(indices).flatten()\n return self.take(indices)\n else:\n raise IndexError(\n \"Only integers, slices and integer or boolean \\\n arrays are valid indices.\"\n )\n\n elif is_integer(item):\n if item < 0:\n item += len(self)\n if item >= len(self):\n return None\n else:\n\n return self.data[item]\n\n value = self.data[item]\n return type(self)(value)",
"def __call__(self, *args):\n return args[self.i_dim]",
"def __getitem__(self, key: Tuple) -> np.array:\n # If the user has requested XYZ mode, the first thing to do is reverse\n # the array indices. Then you can continue this fn without any\n # additional changes.\n if self.axis_order == AxisOrder.XYZ:\n key = (key[2], key[1], key[0])\n\n # Next, we need to get the shape of the dataset. We do this currently\n # by getting the coordinate frame, which means that we need the\n # coordframe data and experiment data if we don't have it already. In\n # the future, we may also want to allow the user to specify general\n # shape information so that we can avoid calling the API.\n\n # Populate the experiment metadata if unset:\n if self._exp is None:\n self._populate_exp()\n\n # Populate the coordinate frame metadata if not yet set:\n if self._coord_frame is None:\n self._populate_coord_frame()\n\n # Now we can begin. There is a wide variety of indexing options\n # available, including single-integer indexing, tuple-of-slices\n # indexing, tuple-of-int indexing...\n\n # First we'll address if the user presents a single integer.\n # ```\n # my_array[500]\n # ```\n # In this case, the user is asking for a single Z slice (or single X\n # slice if in XYZ order... But that's a far less common use case.)\n # We will get the full XY extents and download a single 2D array:\n if isinstance(key, int):\n # Get the full Z slice:\n xs = (0, self.shape[2])\n ys = (0, self.shape[1])\n zs = (key, key + 1)\n else:\n # We also support indexing with units. For example, you can ask for\n # ```\n # my_array[0:10, 0:10, 0:10, \"nanometers\"]\n # ```\n # which will download as many pixels as are required in order to\n # download 10nm in each dimension. We do this by storing a\n # \"normalized units\" measure which is a rescale factor for each\n # dimension (in the same order, e.g. ZYX, as the array).\n _normalize_units = (1, 1, 1)\n if isinstance(key[-1], str) and len(key) == 4:\n if key[-1] != self._coord_frame.voxel_unit:\n raise NotImplementedError(\n \"Can only reference voxels in native size format which is \"\n f\"{self._coord_frame.voxel_unit} for this dataset.\"\n )\n _normalize_units = self.voxel_size\n\n # We will now do the following codeblock three times, for X,Y,Z:\n # First, we check to see if this index is a single integer. If so,\n # the user is requesting a 2D array with zero depth along this\n # dimension. For example, if the user asks for\n # ```\n # my_data[0:120, 0:120, 150]\n # ```\n # Then \"150\" suggests that the user just wants one single X slice.\n if isinstance(key[2], int):\n xs = (key[2], key[2] + 1)\n else:\n # If the key is a Slice, then it has .start and .stop attrs.\n # (The user is requesting an array with more than one slice\n # in this dimension.)\n start = key[2].start if key[2].start else 0\n stop = key[2].stop if key[2].stop else self.shape[0]\n\n start = int(start / _normalize_units[0])\n stop = int(stop / _normalize_units[0])\n\n # Cast the coords to integers (since Boss needs int coords)\n xs = (int(start), int(stop))\n\n # Do the same thing again for the next dimension: Either a single\n # integer, or a slice...\n if isinstance(key[1], int):\n ys = (key[1], key[1] + 1)\n else:\n start = key[1].start if key[1].start else 0\n stop = key[1].stop if key[1].stop else self.shape[1]\n\n start = start / _normalize_units[1]\n stop = stop / _normalize_units[1]\n\n ys = (int(start), int(stop))\n\n # Do the same thing again for the last dimension: Either a single\n # integer, or a slice...\n if isinstance(key[0], int):\n zs = (key[0], key[0] + 1)\n else:\n start = key[0].start if key[0].start else 0\n stop = key[0].stop if key[0].stop else self.shape[2]\n\n start = start / _normalize_units[2]\n stop = stop / _normalize_units[2]\n\n zs = (int(start), int(stop))\n\n # Finally, we can perform the cutout itself, using the x, y, and z\n # coordinates that we computed in the previous step.\n cutout = self.volume_provider.get_cutout(\n self._channel, self.resolution, xs, ys, zs\n )\n\n # Data are returned in ZYX order:\n if self.axis_order == AxisOrder.XYZ:\n data = np.rollaxis(np.rollaxis(cutout, 1), 2)\n elif self.axis_order == AxisOrder.ZYX:\n data = cutout\n\n # If any of the dimensions are of length 1, it's because the user\n # requested a single slice in their key; flatten the array in that\n # dimension. For example, if you request `[10, 0:10, 0:10]` then the\n # result should be 2D (no Z component).\n _shape = data.shape\n if _shape[0] == 1:\n data = data[0, :, :]\n if _shape[1] == 1:\n data = data[:, 0, :]\n if _shape[2] == 1:\n data = data[:, :, 0]\n return data",
"def __getitem__(self, index):\n if self._list_like(index):\n len_var = len(index)\n if len_var==0:\n raise IndexError(\"Received empty index.\")\n elif len_var==1:\n return self._points[index[0]]\n elif len_var==2:\n return self._points[index[0]][index[1]]\n else:\n raise IndexError(\"Received too long index.\")\n return self._points[index]",
"def _get_axis(array, axis_num, i):\n if array.ndim == 2:\n if axis_num == 0:\n return array[i, :]\n elif axis_num == 1:\n return array[:, i]\n else:\n return None\n elif array.ndim == 3:\n if axis_num == 0:\n return array[i, :, :]\n elif axis_num == 1:\n return array[:, i, :]\n elif axis_num == 2:\n return array[:, :, i]\n else:\n return None\n else:\n return None",
"def _get_axis(array, axis_num, i):\n if array.ndim == 2:\n if axis_num == 0:\n return array[i, :]\n elif axis_num == 1:\n return array[:, i]\n else:\n return None\n elif array.ndim == 3:\n if axis_num == 0:\n return array[i, :, :]\n elif axis_num == 1:\n return array[:, i, :]\n elif axis_num == 2:\n return array[:, :, i]\n else:\n return None\n else:\n return None",
"def __getitem__(self, index: int) -> float:\n if index == 0:\n return self.x\n elif index == 1:\n return self.y\n else:\n raise IndexError",
"def __getitem__(self, key):\r\n T=type(key)\r\n if T!=types.IntType and T!=types.LongType:\r\n raise TypeError, \"index must be integer\"\r\n\r\n if key==0: return self.x\r\n elif key==1: return self.y\r\n elif key==2: return self.z\r\n elif key==3: return self.w\r\n else:\r\n raise IndexError,\"index out of range\"",
"def __getitem__(self, index):\n if not (type(index) in MATRIX_VALID_INTS):\n return NotImplemented\n return self._value[index]"
] | [
"0.6713357",
"0.6549222",
"0.6549222",
"0.65480006",
"0.6530882",
"0.6258413",
"0.6224236",
"0.6155776",
"0.5970968",
"0.59563416",
"0.592938",
"0.59288377",
"0.59217894",
"0.5912077",
"0.5912077",
"0.5891601",
"0.58608264",
"0.5762777",
"0.5749256",
"0.57162493",
"0.5684842",
"0.56829166",
"0.5677998",
"0.56763166",
"0.5668597",
"0.5664107",
"0.5664107",
"0.5656688",
"0.56387156",
"0.5591888"
] | 0.69117695 | 0 |
Gets information about the surrounding locations for a specified coordinate. Returns a tuple of the locations clockwise starting from the top. | def _get_surrounding(self, x, y):
coords = (
(x, y - 1),
(x + 1, y),
(x, y + 1),
(x - 1, y),
)
return filter(lambda i: bool(i[0]), [
(self._get_none(a, b), (a, b))
for a, b in coords
]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_allowed_positions(coordXY, grid):\n\n\tsurrounding_coord = []\n\ttesting_coord = []\n\n\t# Get the coordinates of the external square\n\tfor i in range(coordXY[0] - 1, coordXY[0] + 2, 2):\n\t\tfor j in range(coordXY[1] - 1, coordXY[1] +2, 1):\n\t\t\tif (i,j) == coordXY:\n\t\t\t\tpass\n\t\t\telif i < 0 or j < 0:\n\t\t\t\tsurrounding_coord.append('None')\n\t\t\telse:\n\t\t\t\tsurrounding_coord.append((i,j))\n\n\t# Get the coordinates of the internal square\n\tfor i in range(coordXY[0] - 2, coordXY[0] + 3, 4):\n\t\tfor j in range(coordXY[1] - 2, coordXY[1] + 3, 2):\n\t\t\tif i < 0 or j < 0 or i > 7 or j > 7:\n\t\t\t\ttesting_coord.append('None')\n\t\t\telse:\n\t\t\t\ttesting_coord.append((i,j))\n\n\t# Get the position of Bottom and Top of the 2 squares\n\tTC = [(coordXY[0], coordXY[1] + 2), (coordXY[0], coordXY[1] - 2)]\n\tfor elem in TC:\n\n\t\tif elem[0] not in range(8) or elem[1] not in range(8):\n\t\t\ttesting_coord.append('None')\n\t\telse:\n\t\t\ttesting_coord.append(elem)\n\n\n\tSC = [(coordXY[0], coordXY[1] + 1), (coordXY[0], coordXY[1] - 1)]\n\tfor elem in SC:\n\t\tif elem[0] not in range(8) or elem[1] not in range(8):\n\t\t\tsurrounding_coord.append('None')\n\t\telse:\n\t\t\tsurrounding_coord.append(elem)\n\n\treturn testing_coord, surrounding_coord",
"def get_surroundings(matrix, coord):\n width = np.shape(matrix)[0]\n height = np.shape(matrix)[1]\n coordinates = []\n\n # top\n (\n coordinates.append((coord[0], coord[1] - 1))\n if coord[1] - 1 >= 0\n else None\n )\n # bottom\n (\n coordinates.append((coord[0], coord[1] + 1))\n if coord[1] + 1 < height\n else None\n )\n # left\n (\n coordinates.append((coord[0] - 1, coord[1]))\n if coord[0] - 1 >= 0\n else None\n )\n # right\n (\n coordinates.append((coord[0] + 1, coord[1]))\n if coord[0] + 1 < width\n else None\n )\n\n return coordinates",
"def neighbours_of_position(coords):\n row = coords[0]\n col = coords[1]\n \n #assign each of neighbours corrds\n #top left to top rigt\n top_left = (row - 1, col - 1)\n top_center = (row - 1, col)\n top_right = (row - 1, col + 1)\n \n # left to right (center)\n left = (row, col - 1)\n # the (row, col) cordinates passed into this function are situated here\n right = (row, col + 1)\n \n #bottom-left to bottom-right\n bottom_left = (row +1, col -1)\n bottom_center = (row +1, col)\n bottom_right = (row +1, col +1)\n \n return [top_left, top_center, top_right,\n left , right ,\n bottom_left, bottom_center, bottom_right]",
"def get_corners(self):\n lon, lat = self.coordinates\n \n ll = (lon[0][0],lat[0][0])\n ul = (lon[-1][0],lat[-1][0])\n ur = (lon[-1][-1],lat[-1][-1])\n lr = (lon[0][-1],lat[0][-1])\n\n return (ll, ul, ur, lr)",
"def neighbours_of_position(coords):\n row = coords[0]\n col = coords[1]\n \n #Assign each of the neighbours\n # Top-left to the top-right\n top_left = (row - 1, col - 1)\n top_center = (row - 1, col)\n top_right = (row - 1, col + 1)\n \n # Left to right\n left = (row, col - 1)\n # The '(row, col)' coordinates passed to this\n # function are situated here\n right = (row, col + 1)\n \n # Bottom-left to bottom-right\n bottom_left = (row + 1, col - 1)\n bottom_center = (row + 1, col)\n bottom_right = (row + 1, col + 1)\n \n return [top_left, top_center, top_right,\n left, right,\n bottom_left, bottom_center, bottom_right]",
"def _get_neighbours(point):\n # Pull coords out of point.\n x = point[0]\n y = point[1]\n z = point[2]\n return ((x-1, y, z), (x+1, y, z), (x, y-1, z), (x, y+1, z), (x, y, z-1), (x, y, z+1))",
"def sw_corner(self):\n return (self.min_lat, self.min_lon)",
"def _coord(self, x, y):\n gridEdge = 7 # originally 5\n y = gridEdge - y\n cx = 100 * (x - 1) + 50\n cy = 100 * (y - 1) + 50\n r = 20\n return (cx - r, cy - r, cx + r, cy + r)",
"def get_neighbours(coords):\n\n dxdy = [(-1,-2),(0,-2),(1,-2),(-2,-1),(-1,-1),(0,-1),(1,-1),(2,-1),\n (-2,0),(-1,0),(1,0),(2,0),(-2,1),(-1,1),(0,1),(1,1),(2,1),\n (-1,2),(0,2),(1,2),(0,0)]\n neighbours = []\n for dx, dy in dxdy:\n neighbour_coords = coords[0] + dx, coords[1] + dy\n if not (0 <= neighbour_coords[0] < nx and\n 0 <= neighbour_coords[1] < ny):\n # We're off the grid: no neighbours here.\n continue\n neighbour_cell = cells[neighbour_coords]\n if neighbour_cell is not None:\n # This cell is occupied: store this index of the contained point.\n neighbours.append(neighbour_cell)\n return neighbours",
"def get_near_positions(self, position: tuple):\n\n return ((x, y) for x, y in (\n (position[0], position[1] + 1),\n (position[0], position[1] - 1),\n (position[0] + 1, position[1]),\n (position[0] - 1, position[1])\n ) if 0 <= x < self._map_height and 0 <= y < self._map_width)",
"def get_neighbours_8(x, y):\n return [(x - 1, y - 1), (x, y - 1), (x + 1, y - 1), \\\n (x - 1, y), (x + 1, y), \\\n (x - 1, y + 1), (x, y + 1), (x + 1, y + 1)]",
"def get_neighbours(x, y, board):\n return [get_left(x, y, board), get_upper(x, y, board), get_right(x, y, board), get_lower(x, y, board)]",
"def find_obstacle_loc(self, obstacle_list):\n\n x_obst = []\n y_obst = []\n #x_obst_append = x_obst.append\n #y_obst_append = y_obst.append\n locs = []\n\n for x in obstacle_list:\n if x < self.width:\n x_obst.append(x*self.resolution + self.resolution/2)\n else:\n x_obst.append((x % self.width)*self.resolution + self.resolution/2)\n\n for y in obstacle_list:\n y_obst.append((y/self.width)*self.resolution + self.resolution/2)\n\n locs = map(lambda x: x, zip(x_obst, y_obst))\n\n return(locs)",
"def inside_square(self, x, y):\n square_centers = self.get_square_centers()\n for i, row in enumerate(square_centers):\n for j, (square_x, square_y) in enumerate(row):\n\n if (square_x - self.square_width_half < x < square_x + self.square_width_half and\n square_y - self.square_width_half < y < square_y + self.square_width_half):\n\n return (i, j), (float(square_x), float(square_y))\n\n return None, None",
"def check_nieghbours(self, coordinate):\n row = coordinate[0]\n cols = coordinate[1]\n member = self.cells[row, cols]\n top = self.cells[self.d_index(row), cols]\n bottom = self.cells[self.i_index(row), cols]\n right = self.cells[row, self.i_index(cols)]\n left = self.cells[row, self.d_index(cols)]\n nieghbours = [left, right, top, bottom]\n return member, nieghbours",
"def which_position(location, some_number):\n xpos, ypos = location\n # Is this location in a corner?\n upper_r = xpos == 4 and ypos == 4\n lower_l = xpos == 1 and ypos == 1\n others = (1 in location and some_number in location)\n if upper_r or lower_l or others:\n return 2 # CORNER. 2- Neighbors.\n # Is this location a non-edge (by priority) corner?\n elif 1 in location or some_number in location:\n return 3 # EDGE. 3- Neighbors.\n # This is not a corner or an edge.\n else:\n return 4 # Else 4- Neighbors.",
"def get_coordinates(self):\n x_houses = []\n y_houses = []\n\n x_batt = []\n y_batt = []\n\n # turn dict to list so we can iterate through\n houses_list = list(self.houses.values())\n batteries_list = list(self.batteries.values())\n\n # for every house save coordinates to lists\n for house in houses_list:\n x_houses.append(house.x)\n y_houses.append(house.y)\n\n # for every battery save coordinates to lists\n for battery in batteries_list:\n x_batt.append(battery.x)\n y_batt.append(battery.y)\n\n return x_houses, y_houses, x_batt, y_batt",
"def get_current_edges(self) -> Tuple[int, int, int, int]:\n top = int(self.tile_rows[0], 2)\n bottom = int(self.tile_rows[-1], 2)\n left = int(''.join([r[0] for r in self.tile_rows]), 2)\n right = int(''.join([r[-1] for r in self.tile_rows]), 2)\n\n return (top, bottom, left, right)",
"def locationByCoordinate(latitude, longitude) :\n geoLoc = Nominatim(user_agent=\"GetLoc\")\n coordinateString = f\"{latitude}, {longitude}\"\n locationCoordinates = geoLoc.reverse(coordinateString)\n return locationCoordinates.address",
"def neighbors8(point):\n x, y = point\n return ((x + 1, y), (x - 1, y), (x, y + 1), (x, y - 1),\n (x + 1, y + 1), (x - 1, y - 1), (x + 1, y - 1), (x - 1, y + 1))",
"def get_neighbours(lat, long):\n # ns = north east, ew = east west (ratio between 1 feet and degree) \n # its different on diferent places on earth (sphere)!!\n ns = 0.0025\n ew = 0.0025\n walk = []\n for i in range(-2, 3):\n for j in range(-2, 3):\n thiscell = CellId.from_lat_lng(LatLng.from_degrees(lat + ns*i, long + ew*j)).parent(S2_CELL_LEVEL)\n if abs(i * j) < 4:\n walk.append(thiscell.id())\n return sorted(walk)",
"def get_cell(self, business):\n x = self.longitudes.searchsorted(business.longitude) - 1\n y = self.latitudes.searchsorted(business.latitude) - 1\n return x, y",
"def check_neighbours(coordinates):\n x_coord = coordinates[0]\n y_coord = coordinates[1]\n coordinates_value = 0\n for x_move in [-1, 0, 1]:\n x = x_coord + x_move\n for y_move in [-1, 0, 1]:\n y = y_coord + y_move\n try:\n value = grid[(x,y)]\n coordinates_value += value\n except KeyError:\n pass\n\n grid[coordinates] = coordinates_value\n # print(coordinates_value)\n return coordinates_value",
"def locate_point(self, coord):\n lowest_lat = self.lower_left[0]\n leftmost_lng = self.lower_left[1]\n dist_lat = utils.haversine((coord[0], leftmost_lng), self.lower_left)*1000 # in meters\n dist_lng = utils.haversine((lowest_lat, coord[1]), self.lower_left)*1000 # in meters\n grid_coord = (floor(dist_lng/self.distance), floor(dist_lat/self.distance))\n if grid_coord in self.cells:\n return grid_coord\n return None",
"def pixelcoord(coordx: float, coordy: float) -> Tuple[int, int]:\n ox, oy = origin()\n x, y = int(round(ox+coordx)), int(round(oy-coordy))\n return (x, y)",
"def get_coordinates(num: int) -> tuple:\r\n return num * math.sin(num), num * math.cos(num)",
"def ne_corner(self):\n return (self.max_lat, self.max_lon)",
"def iter_coords():\n yield (0, 0)\n incr = 0\n x = 1\n y = 0\n\n while True:\n incr += 2\n\n top = y + incr - 1\n bot = y - 1\n left = x - incr\n right = x\n\n yield (x, y)\n while y < top:\n y += 1\n yield (x, y)\n\n while x > left:\n x -= 1\n yield (x, y)\n\n while y > bot:\n y -= 1\n yield (x, y)\n\n while x < right:\n x += 1\n yield (x, y)\n\n x += 1",
"def iter_neighbors(x: int, y: int) -> t.Generator[COORDINATE, None, None]:\n yield x - 1, y\n yield x + 1, y\n yield x, y - 1\n yield x, y + 1",
"def neighbor(self, x, y, direction):\n if direction == Compass.NORTH:\n if y > 0:\n return (x, y-1)\n elif direction == Compass.EAST:\n if x < self.width - 1:\n return (x+1, y)\n elif direction == Compass.SOUTH:\n if y < self.height - 1:\n return (x, y+1)\n else:\n if x > 0:\n return (x-1, y)"
] | [
"0.59596604",
"0.58594525",
"0.582257",
"0.5809534",
"0.57916594",
"0.577245",
"0.56088185",
"0.55754143",
"0.5539363",
"0.55352014",
"0.5510887",
"0.5510782",
"0.54977924",
"0.548646",
"0.5474126",
"0.54684633",
"0.5423838",
"0.5408791",
"0.53864664",
"0.53790015",
"0.5370212",
"0.5353954",
"0.5352728",
"0.534967",
"0.53405565",
"0.5328307",
"0.5321325",
"0.53180593",
"0.5298207",
"0.5286894"
] | 0.6087281 | 0 |
Recursively traverses adjacent locations of the same color to find all locations which are members of the same group. | def _get_group(self, x, y, traversed):
loc = self[x, y]
# Get surrounding locations which have the same color and whose
# coordinates have not already been traversed
locations = [
(p, (a, b))
for p, (a, b) in self._get_surrounding(x, y)
if p is loc and (a, b) not in traversed
]
# Add current coordinates to traversed coordinates
traversed.add((x, y))
# Find coordinates of similar neighbors
if locations:
return traversed.union(*[
self._get_group(a, b, traversed)
for _, (a, b) in locations
])
else:
return traversed | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def grasps_within_pile(color_mask):\n hue_counts, hue_pixels = get_hsv_hist(color_mask)\n\n individual_masks = []\n\n #color to binary\n focus_mask = color_to_binary(color_mask)\n\n #segment by hsv\n for block_color in hue_counts.keys():\n #same threshold values for number of objects\n if hue_counts[block_color] > cfg.SIZE_TOL:\n valid_pix = hue_pixels[block_color]\n obj_mask = focus_mask.mask_by_ind(np.array(valid_pix))\n individual_masks.append(obj_mask)\n if len(individual_masks) > 0:\n obj_focus_mask = individual_masks[0]\n for im in individual_masks[1:]:\n obj_focus_mask += im\n\n #for each hsv block, again separate by connectivity\n all_groups = []\n for i, obj_mask in enumerate(individual_masks):\n groups = get_cluster_info(obj_mask)\n\n for group in groups:\n #matches endpoints of line in visualization\n cm = group.cm\n d = group.dir\n\n grasp_top = cm + d * cfg.LINE_SIZE/2\n grasp_bot = cm - d * cfg.LINE_SIZE/2\n if is_valid_grasp(grasp_top, obj_focus_mask) and is_valid_grasp(grasp_bot, obj_focus_mask):\n all_groups.append(group)\n\n return all_groups",
"def solve(arr, pos, color):\n i = 0\n same_color = [pos]\n while i < len(same_color):\n for j in get_neighbors(arr, same_color[i], arr[pos[0]][pos[1]]):\n if j not in same_color:\n same_color.append(j)\n i += 1\n for i in same_color:\n arr[i[0]][i[1]] = color\n return arr",
"def locate_neighbors(grouped, row, column, width, height, reach):\n neighbors = []\n for row_val in range(2*int(reach) + 1):\n for col_val in range(2*int(reach) + 1):\n row_final = row - int(reach) + row_val\n col_final = column - int(reach) + col_val\n if col_final == column and row_final == row:\n continue\n if col_final >= width or col_final < 0:\n continue\n if row_final >= height or row_final < 0:\n continue\n row_num = (row_final * width) + col_final\n final_int = grouped[row_num][0]\n neighbors.append(final_int)\n return neighbors",
"def get_groups(board: numpy.ndarray, player: int) -> List[Group]:\n # Generate couples\n # Array of (p1, p2, x) where x = -1 if p1 == p2, 0 if p1 and p2 are close and 1 if they are close\n couples = []\n size = board.shape[0]\n for i in range(1, size - 1):\n for j in range(1, size - 1):\n if board[i, j] == player:\n l0 = [(i + x, j + y) for x, y in NEIGHBORS_1]\n l1 = [(i + x, j + y) for x, y in NEIGHBORS_2]\n for p in l0 + l1 + [(i, j)]:\n corner = all([x in [0, size - 1] for x in p])\n if 0 <= p[0] < size and 0 <= p[1] < size and board[p] == player and not corner:\n if p == (i, j):\n couples.append(((i, j), p, -1))\n elif p in l0:\n couples.append(((i, j), p, 0))\n else:\n p1, p2 = get_common_neighbours((i, j), p)\n if player not in [board[p1], board[p2]] and (board[p1] == -1 and board[p2] == -1):\n couples.append(((i, j), p, 1))\n\n # Group couples\n groups = [[k] for k in couples]\n\n def fusion(f_groups):\n for group1 in f_groups:\n for group2 in f_groups:\n if group1 != group2:\n for c1 in group1:\n for c2 in group2:\n if c1[0] == c2[0] or c1[0] == c2[1] or c1[1] == c2[0] or c1[1] == c2[1]:\n group1.extend(group2)\n f_groups.remove(group2)\n return True\n return False\n\n while fusion(groups):\n pass\n\n return groups",
"def group_adjacents(group, board, filter_by=None):\n liberties = set([])\n for location in group:\n if filter_by == \"None\":\n liberties |= xy_adjacents(location, board, filter_by=\"None\")\n elif filter_by == \"friend\":\n liberties |= xy_adjacents(location, board, filter_by=\"friend\")\n elif filter_by == \"foe\":\n liberties |= xy_adjacents(location, board, filter_by=\"foe\")\n else:\n liberties |= xy_adjacents(location, board)\n liberties -= group\n return liberties",
"def get_groups(nodes):\n return list(set([node.color for node in nodes]))",
"def get_neighbors(loc):\n dim = len(loc)\n offsets = product((-1, 0, 1), repeat=dim)\n neighbors = set()\n for offset in offsets:\n if offset == (0,) * dim:\n continue\n neighbors.add(tuple(a + b for a, b in zip(loc, offset)))\n return neighbors",
"def get_neighbors(arr, pos, color):\n neighbors = []\n try:\n if arr[pos[0] + 1][pos[1]] == color:\n neighbors.append((pos[0] + 1, pos[1]))\n except IndexError:\n pass\n try:\n if arr[pos[0] - 1][pos[1]] == color:\n neighbors.append((pos[0] - 1, pos[1]))\n except IndexError:\n pass\n try:\n if arr[pos[0] + 1][pos[1]] == color:\n neighbors.append((pos[0] + 1, pos[1]))\n except IndexError:\n pass\n try:\n if arr[pos[0]][pos[1] - 1] == color:\n neighbors.append((pos[0], pos[1] - 1))\n except IndexError:\n pass\n return neighbors",
"def xy_to_group(xy, board):\n group = {xy}\n inspected = set([])\n to_inspect = group - inspected\n while to_inspect:\n for stone in to_inspect:\n inspected.add(stone)\n group |= xy_adjacents(stone, board, filter_by=\"friend\")\n to_inspect = group - inspected\n return group",
"def group_connected(triggers):\n groups = []\n ungrouped = set((x, y, difficulty) for (x, y), difficulty in triggers.items())\n while ungrouped:\n x, y, difficulty = ungrouped.pop()\n pos = x, y\n locs = [pos]\n queue = [pos]\n while queue:\n pos = queue.pop()\n for x, y in HexGrid.neighbours(pos):\n if (x, y, difficulty) in ungrouped:\n ungrouped.discard((x, y, difficulty))\n c = x, y\n locs.append(c)\n queue.append(c)\n groups.append(TriggerArea(locs, difficulty))\n\n groups_by_loc = {}\n for g in groups:\n for l in g.locs:\n groups_by_loc[l] = g\n return groups, groups_by_loc",
"def getGroups(self):\n groups_ = {'black': [], 'white': []}\n for color, stones in self.stones.items():\n if not stones: continue\n # (group_labels) is a parallel array to (stones). Where each value is an\n # int and each int value represents a group. Examples:\n # [1, 1] = 1 group: 1 group of 2 stones\n # [1, 1, 2] = 2 groups: 1 group of 2 stones and 1 group of 1 stone\n # [1, 1, 2, 3] = 3 groups: 1 group of 2 stones, 1 group of 1 stone, and 1 group of 1 stone\n group_labels = [0] * len(stones)\n\n new_label = 1\n for i, stone in enumerate(stones):\n # Assign new label to stone, if stone has yet to be labelled.\n if group_labels[i] == 0:\n group_labels[i] = new_label\n new_label += 1\n # Inner loop compares outer loop (stone) with all other (stones).\n for other_i, other_stone in enumerate(stones):\n if i == other_i: continue\n if stone.isNeighbor(other_stone):\n # If inner loop stone has yet to be labelled, then inner loop stone is\n # labelled with outer loop stones label.\n if group_labels[other_i] == 0:\n group_labels[other_i] = group_labels[i]\n # If inner loop stone has already been labelled, then all stones previously\n # labelled with outer loop stone's label, get their labels reassigned to the\n # inner loop stone's label.\n else:\n new_labels = []\n for ga in group_labels:\n if ga == group_labels[i]: new_labels += [ group_labels[other_i] ]\n else: new_labels += [ ga ]\n group_labels = new_labels\n # (groups_) are created now that (group_labels) has been generated.\n for master_label in range(max(group_labels)):\n master_label += 1\n stones_to_group = []\n for i, label in enumerate(group_labels):\n if master_label == label:\n stones_to_group += [ self.stones[color][i] ]\n groups_[color] += [ Group(self, stones_to_group) ]\n return groups_",
"def xy_to_captures(xy, color, board):\n captures = set([])\n for adj in xy_adjacents(xy, board, \"foe\", color):\n potential_captured_group = xy_to_group(adj, board)\n captured_groups_adjacents = group_adjacents(potential_captured_group, board, filter_by=\"None\")\n if len(captured_groups_adjacents) <= 1:\n captures |= potential_captured_group\n return captures",
"def nearby():\n for i in ids:\n for j in ids:\n if i != j:\n if sum([1 for x,y in zip(i,j) if x!=y]) == 1:\n print(\"\".join([x for x,y in zip(i,j) if x==y]))\n return",
"def locations_adjacent_to(loc):\n return [(loc[0] + direction[0], loc[1] + direction[1]) for direction in [(0,-1),(0,1),(-1,0),(1,0)]]",
"def color_group(max_range):\n\n color = []\n\n for _ in range(0, max_range):\n col = []\n col.append(random.random() % 1)\n col.append(random.random() % 1)\n col.append(random.random() % 1)\n color.append(col)\n\n dist_table = []\n\n for idx in range(0, max_range):\n dist_table.append([color_distance(color[idx], x) for x in color[:]])\n\n for _ in range(0, 50):\n for idx_start in range(0, max_range):\n global_point_distance = sum(dist_table[idx_start])\n tmp_dist_table = dist_table[idx_start][:]\n tmp_table = color[:]\n for idx_end in range(0, max_range):\n tmp_table[idx_end] = mutate_color(color[idx_end])\n tmp_dist_table[idx_end] = color_distance(\n color[idx_start],\n color[idx_end])\n if sum(tmp_dist_table) > global_point_distance:\n dist_table[idx_start] = tmp_dist_table[:]\n color = tmp_table[:]\n\n #for index in range(0, len(color)):\n # color[index] = hls_to_rgb(\n # color[index][0],\n # color[index][1],\n # color[index][2])\n\n return color",
"def get_neighbors(self, node):\r\n neighbors = set()\r\n for neighbor in ORTHOGONAL_POSITIONS[(node.pos[0], node.pos[1])]:\r\n if self.board[neighbor[0]][neighbor[1]].color == node.color:\r\n neighbors.add(neighbor)\r\n else:\r\n continue\r\n return neighbors",
"def coords_reachable(self, start, distance): # TODO: Accept a lambda that\n # determines blocked or not\n visited = set() # set of hexes\n visited.add(start)\n fringes = list() # array of arrays of hexes\n fringes.append([start])\n\n for idx in range(1, distance+1):\n fringes.append([])\n for coord in fringes[idx-1]:\n for direction in self.dirs:\n neighbor = coord+direction\n if neighbor not in visited: # TODO: add exemptions (impassable)\n #or mandatory neighbors (direct\n #connections)\n visited.add(neighbor)\n fringes[idx].append(neighbor)\n\n return visited",
"def find_image(grouped):\n for _i in grouped:\n _i[0] = _i[0] * 10 #increases value of red components\n if _i[0] > 225:\n _i[0] = 225\n _i[1] = _i[0] #sets green components equal to red\n _i[2] = _i[0] #sets blue components equal to red\n return grouped",
"def nearby_cells(self, cell):\n cells = set()\n\n for i in range(cell[0] - 1, cell[0] + 2):\n for j in range(cell[1] - 1, cell[1] + 2):\n\n if (i, j) == cell:\n continue\n\n if 0 <= i < self.height and 0 <= j < self.width:\n cells.add((i, j))\n\n return cells",
"def fin_pos_all(my_color, opp_color):\r\n for y_pos in range(SIZE):\r\n for x_pos in range(SIZE):\r\n if fin_pos(x_pos, y_pos, my_color, opp_color, False):\r\n return True\r\n return False",
"def searchDeadEnd(self):\n boundaries = []\n if not self.red:\n i = self.midWidth - 1\n else:\n i = self.midWidth + 1\n boudaries = [(i, j) for j in range(self.height)]\n validPositions = []\n for i in boudaries:\n if not (i[0], i[1]) in self.walls:\n validPositions.append(i)\n\n dangerPos = []\n\n toExpand = self.scanmap.twoEntryPoints()\n for (x,y) in toExpand:\n adjacent = self.scanmap.adjacentValidPoints(x, y)\n if not (x,y) in dangerPos:\n for (u, w) in adjacent:\n visited = []\n visited.append((x, y))\n safe = False\n danger = False\n DFS = util.Stack()\n DFS.push((u,w))\n while not safe and not danger:\n (i,j) = DFS.pop()\n visited.append((i,j))\n adjacents = self.scanmap.adjacentValidPoints(i,j)\n for position in adjacents:\n if not position in visited:\n DFS.push(position)\n if DFS.isEmpty():\n danger = True\n dangerPos = list(set(dangerPos) | set(visited))\n\n if (i,j) in validPositions:\n safe = True\n oneEntry = self.scanmap.oneEntryPoints()\n dangerPos = list(set(oneEntry).union(set(dangerPos)))\n dangerPos.sort()\n return dangerPos",
"def find_groups_from_ctypes(self, mesh, ctypes):\n raise NotImplementedError",
"def get_valid_locations(location_list, grid, shape):",
"def _check_neighbors(self):\n for direction, dir_info in self.DIRECTIONS.items():\n pos = Point(\n self.position.x + dir_info[\"mask\"][0],\n self.position.y + dir_info[\"mask\"][1]\n )\n status = self.move(direction)\n self.grid[status].add(pos)\n if status in (1, 2):\n # moved\n self.move(dir_info[\"opposite\"])\n yield pos",
"def get_neighbors(start_square, visited=[]):\n neighbors = []\n\n # loop over possible x values\n for i in [start_square.x - 1, start_square.x, start_square.x + 1]:\n\n # drop neighbors outside of our region of interest\n if i < 0 or i > MAX_X:\n continue\n\n # loop over possible y values\n for j in [start_square.y - 1, start_square.y, start_square.y + 1]:\n\n # drop neighbors outside of our region of interest\n if j < 0 or j > MAX_Y:\n continue\n\n # Ignore ourself\n if i == start_square.x and j == start_square.y:\n continue\n\n # Ignore corner pieces\n if i == start_square.x - 1 and j != start_square.y:\n continue\n if i == start_square.x + 1 and j != start_square.y:\n continue\n\n # Deal with barriers\n found = False\n for square in visited:\n if square.pos == [i, j]:\n found = True\n break\n if found:\n continue\n\n neighbors.append(Square(i, j))\n\n return neighbors",
"def test_iter_color_groups(self):\r\n\r\n obs = iter_color_groups(self.mapping, self.prefs)\r\n obs1 = list(obs)\r\n obs_label = obs1[0][0]\r\n obs_groups = obs1[0][1]\r\n obs_colors = obs1[0][2]\r\n obs_data_colors = obs1[0][3]\r\n obs_data_color_order = obs1[0][4]\r\n\r\n data_colors = color_dict_to_objects(self.data_color_hsv)\r\n\r\n self.assertEqual(obs_label, self.labelname)\r\n self.assertEqual(obs_groups, self.dict)\r\n self.assertEqual(obs_colors, self.colors)\r\n self.assertEqual(obs_data_colors.keys(), data_colors.keys())\r\n\r\n # Need to iterate through color object, since they has different ids\r\n # assigned each time using color_dict_to_objects\r\n for key in data_colors:\r\n self.assertEqual(obs_data_colors[key].toHex(),\r\n data_colors[key].toHex())\r\n\r\n self.assertEqual(obs_data_color_order, self.data_color_order)",
"def find_next_moves(self):\n # iterate through all cells, and group them with upper cells and left\n # cells\n\n # generate separated cells then merge the them with same neighbours\n matrix_rows = len(self.status)\n if matrix_rows == 0:\n matrix_cols = 0\n else:\n matrix_cols = len(self.status[0])\n matrix = []\n for i in range(matrix_rows):\n matrix.append([[(i, j)] for j in range(matrix_cols)])\n # merge coordinations\n for i in range(matrix_rows):\n for j in range(matrix_cols):\n if self.status[i][j] != '':\n # is same with right cell?\n if j < matrix_cols - 1 and self.status[i][j] == self.status[i][j + 1]:\n new_item = matrix[i][j] + matrix[i][j + 1]\n matrix[i][j] = matrix[i][j + 1] = new_item\n # is same with down cell?\n if i < matrix_rows - 1 and self.status[i][j] == self.status[i + 1][j]:\n new_item = matrix[i][j] + matrix[i + 1][j]\n matrix[i][j] = matrix[i + 1][j] = new_item\n\n # filter out all unvalid results\n result = []\n # filter out all single-cell groups\n for i in range(matrix_rows):\n for j in range(matrix_cols):\n if (len(matrix[i][j]) > 1 and\n matrix[i][j] not in result):\n result.append(matrix[i][j])\n\n # filter sublists\n result = sorted(result, key=len, reverse=True)\n changed = True\n while changed:\n changed = False\n for i in range(len(result)):\n for j in range(i + 1, len(result)):\n if set(result[i]).issuperset(set(result[j])):\n result.remove(result[j])\n changed = True\n break\n if changed:\n break\n\n if result:\n for i in result:\n yield (self.convert_coordinations(i),\n len(i) * len(i) * 5,\n self.calc_new_status(i))\n else:\n left_cells = sum([len(i) - i.count('') for i in self.status])\n left_cells_score = 2000 - 20 * left_cells * left_cells\n if left_cells_score < 0:\n left_cells_score = 0\n for i in self.parents:\n i.children[self] = [(i.children[self][0][0] + left_cells_score,\n i.children[self][0][1],\n i.children[self][0][2])]",
"def find_grid(image, frame=False, possible_colors=None):\n grid_color = -1\n size = [1, 1]\n\n if possible_colors is None:\n possible_colors = list(range(10))\n\n for color in possible_colors:\n for i in range(size[0] + 1, image.shape[0] // 2 + 1):\n if (image.shape[0] + 1) % i == 0:\n step = (image.shape[0] + 1) // i\n if (image[(step - 1) :: step] == color).all():\n size[0] = i\n grid_color = color\n for i in range(size[1] + 1, image.shape[1] // 2 + 1):\n if (image.shape[1] + 1) % i == 0:\n step = (image.shape[1] + 1) // i\n if (image[:, (step - 1) :: step] == color).all():\n size[1] = i\n grid_color = color\n\n if grid_color == -1 and not frame:\n color_candidate = image[0, 0]\n if (\n (image[0] == color_candidate).all()\n and (image[-1] == color_candidate).all()\n and (image[:, -1] == color_candidate).all()\n and (image[:, 0] == color_candidate).all()\n ):\n grid_color, size, _ = find_grid(\n image[1 : image.shape[0] - 1, 1 : image.shape[1] - 1], frame=True, possible_colors=[color_candidate]\n )\n return grid_color, size, frame\n else:\n return grid_color, size, frame\n\n return grid_color, size, frame",
"def color_groups(groups, colors, data_color_order):\r\n group_num = -1\r\n for g in natsort(groups):\r\n if g not in colors:\r\n group_num += 1\r\n if group_num == len(data_color_order):\r\n group_num = 0\r\n colors[g] = data_color_order[group_num]",
"def recursive_check(self, current):\n grid = self.ids.grid\n children = grid.children\n own_color = children[current].background_color\n children[current].visited = True\n own_list = [current]\n\n # Get all children next to the current one\n child_top = current - grid.cols\n if child_top < 0:\n child_top = None\n child_bot = current + grid.cols\n if child_bot >= grid.rows * grid.cols:\n child_bot = None\n child_left = None\n child_right = None\n if current % grid.cols > 0:\n child_left = current - 1\n if current % grid.cols < grid.cols - 1:\n child_right = current + 1\n children_next = [child_top, child_bot, child_left, child_right]\n\n # Check if children need to get added to the list\n for child in children_next:\n if child is not None:\n if children[child].background_color == own_color and not children[child].visited:\n own_list.extend(self.recursive_check(child))\n\n return own_list"
] | [
"0.62518436",
"0.6219405",
"0.5994733",
"0.5941772",
"0.57764816",
"0.57504255",
"0.57399064",
"0.56628907",
"0.56418866",
"0.5627706",
"0.56172466",
"0.5478914",
"0.5477339",
"0.54491454",
"0.5443947",
"0.5414975",
"0.536038",
"0.5335464",
"0.5306588",
"0.53047246",
"0.5282622",
"0.5279627",
"0.5275146",
"0.52476746",
"0.52393866",
"0.5216152",
"0.52042645",
"0.51804006",
"0.517497",
"0.5163882"
] | 0.7293468 | 0 |
Kills a group of black or white pieces and returns its size for scoring. | def _kill_group(self, x, y):
if self[x, y] not in self.TURNS:
raise BoardError('Can only kill black or white group')
group = self.get_group(x, y)
score = len(group)
for x1, y1 in group:
self[x1, y1] = self.EMPTY
return score | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def remove(self, pieces):\n for piece in pieces:\n self.board[piece.row][piece.col] = None\n if piece.get_player() is Player.white:\n self.num_white_pieces -= 1\n if piece.is_king():\n self.num_white_kings -= 1\n\n elif piece.get_player() is Player.black:\n self.num_black_pieces -= 1\n if piece.is_king():\n self.num_black_kings -= 1",
"def _take_pieces(self, x, y):\n scores = []\n for p, (x1, y1) in self._get_surrounding(x, y):\n # If location is opponent's color and has no liberties, tally it up\n if p is self._next_turn and self.count_liberties(x1, y1) == 0:\n score = self._kill_group(x1, y1)\n scores.append(score)\n self._tally(score)\n return sum(scores)",
"def get_num_black_pieces(self):\n return self.num_black_pieces",
"def shatter(self):\n self.delete()\n if self.size==0:\n #if this rock is a small rock, then dont spawn any new rocks when its shattered\n return\n numberOfRocksLeftToSpawn=numberOfNewRocksToSpawnOnShatter\n while (numberOfRocksLeftToSpawn>0):\n Rock(canvas=self.canvasIGetDrawnOn,xPos=self.xPos,yPos=self.yPos,size=self.size-1)\n numberOfRocksLeftToSpawn-=1",
"def check_removal(self, multiplier=1):\n children = self.ids.grid.children\n groups = []\n points = 0\n\n # Recursively check all children and creates groups with them\n for i, child in enumerate(children):\n if not child.visited:\n groups.append(self.recursive_check(i))\n\n # Reset visit status for the next pass\n for child in children:\n child.visited = False\n\n # Get the groups that contain more than 3 blocks of the same colour, calculate points and let new blocks fall\n high_groups = [x for x in groups if len(x) > 3]\n for g in high_groups:\n # I sort the blocks by reversed id, this helps in the implementation of how blocks fall\n # If this was unsorted, a block might get the colour of the block above that actually should get removed\n g.sort(reverse=True)\n points += multiplier * len(g)\n multiplier += 1\n for button_id in g:\n self.fall(button_id)\n if len(high_groups) > 0:\n return self.check_removal(multiplier) + points\n else:\n return 0",
"def deduct_from_rack(self, old_word, new_word):\r\n chrs_used = new_word.replace(old_word, \"\")\r\n print(\"Rack previous size = {}\".format(len(self.rack)))\r\n for chr_val in chrs_used:\r\n used_tile = Tile(chr_val, self.game_bag.letter_freq_and_val[chr_val][1])\r\n self.rack.remove(used_tile)\r\n\r\n if len(new_word) - len(old_word) >= 7: #used all 7 tiles\r\n self.cur_score += 50\r\n \r\n print(\"Rack new size = {}\".format(len(self.rack)))",
"def submit_kill(self, showpoints: bool = True) -> None:\n # FIXME Clean this up.\n # pylint: disable=too-many-statements\n from ba._lang import Lstr\n from ba._general import Call\n self._multi_kill_count += 1\n stats = self._stats()\n assert stats\n if self._multi_kill_count == 1:\n score = 0\n name = None\n delay = 0.0\n color = (0.0, 0.0, 0.0, 1.0)\n scale = 1.0\n sound = None\n elif self._multi_kill_count == 2:\n score = 20\n name = Lstr(resource='twoKillText')\n color = (0.1, 1.0, 0.0, 1)\n scale = 1.0\n delay = 0.0\n sound = stats.orchestrahitsound1\n elif self._multi_kill_count == 3:\n score = 40\n name = Lstr(resource='threeKillText')\n color = (1.0, 0.7, 0.0, 1)\n scale = 1.1\n delay = 0.3\n sound = stats.orchestrahitsound2\n elif self._multi_kill_count == 4:\n score = 60\n name = Lstr(resource='fourKillText')\n color = (1.0, 1.0, 0.0, 1)\n scale = 1.2\n delay = 0.6\n sound = stats.orchestrahitsound3\n elif self._multi_kill_count == 5:\n score = 80\n name = Lstr(resource='fiveKillText')\n color = (1.0, 0.5, 0.0, 1)\n scale = 1.3\n delay = 0.9\n sound = stats.orchestrahitsound4\n else:\n score = 100\n name = Lstr(resource='multiKillText',\n subs=[('${COUNT}', str(self._multi_kill_count))])\n color = (1.0, 0.5, 0.0, 1)\n scale = 1.3\n delay = 1.0\n sound = stats.orchestrahitsound4\n\n def _apply(name2: Lstr, score2: int, showpoints2: bool,\n color2: Tuple[float, float, float, float], scale2: float,\n sound2: Optional[ba.Sound]) -> None:\n from bastd.actor.popuptext import PopupText\n\n # Only award this if they're still alive and we can get\n # a current position for them.\n our_pos: Optional[ba.Vec3] = None\n if self._sessionplayer:\n if self._sessionplayer.activityplayer is not None:\n try:\n our_pos = self._sessionplayer.activityplayer.position\n except NotFoundError:\n pass\n if our_pos is None:\n return\n\n # Jitter position a bit since these often come in clusters.\n our_pos = _ba.Vec3(our_pos[0] + (random.random() - 0.5) * 2.0,\n our_pos[1] + (random.random() - 0.5) * 2.0,\n our_pos[2] + (random.random() - 0.5) * 2.0)\n activity = self.getactivity()\n if activity is not None:\n PopupText(Lstr(\n value=(('+' + str(score2) + ' ') if showpoints2 else '') +\n '${N}',\n subs=[('${N}', name2)]),\n color=color2,\n scale=scale2,\n position=our_pos).autoretain()\n if sound2:\n _ba.playsound(sound2)\n\n self.score += score2\n self.accumscore += score2\n\n # Inform a running game of the score.\n if score2 != 0 and activity is not None:\n activity.handlemessage(PlayerScoredMessage(score=score2))\n\n if name is not None:\n _ba.timer(\n 0.3 + delay,\n Call(_apply, name, score, showpoints, color, scale, sound))\n\n # Keep the tally rollin'...\n # set a timer for a bit in the future.\n self._multi_kill_timer = _ba.Timer(1.0, self._end_multi_kill)",
"def get_kills_overall(self, uuid):\n\n return self.template(uuid, \"kills\")",
"def test_consumed_cards_longer(self):\n game = TestGames.replay(9, [3, 1, 0, 0, 1, 2, 2, 0, 6, 3,\n 0, 0, 1, 2, 6, 0, 0, 0, 0, 0])\n consumed_cards = game.consumed_cards()\n self.assertEqual(len(consumed_cards), 8)\n\n self.assertListEqual(list(consumed_cards),\n [3 / 5, # guards\n 0 / 2, # priest\n 1 / 2, # baron\n 1 / 2, # handmaid\n 1 / 2, # prince\n 1 / 1, # king\n 0 / 1, # countess\n 0 / 1]) # princess",
"def get_num_white_pieces(self):\n return self.num_white_pieces",
"def kill(self):\n for piece in self.board.pieces:\n piece.destroyed = True",
"def computer_plays():\n global piles\n global num_piles\n\n print('Your move was MEDIOCRE at best MY TURN!!!!')\n opt = opt_play()\n print('I shall remove '+str(opt[1])+' from pile '+str(opt[0]))\n piles[opt[0]] -= opt[1]",
"def pick(self, inv, pl, group, sc):\r\n if self.rect.colliderect(pl) and not self.used:\r\n group.remove(self)\r\n inv += ['score {}'.format(id(self))]\r\n sc += [sc[len(sc) - 1] + 100]\r\n self.used = True",
"def numSprites(self, key):\n deleted = len([s for s in self.kill_list if key in s.stypes])\n if key in self.sprite_groups:\n return len(self.sprite_groups[key])-deleted\n else: \n return len([s for s in self if key in s.stypes])-deleted",
"def calculatePieces(self):\n pass;",
"def cal_kill_turn(deck): \n #Init board/game state\n goldfish_life = 20\n turn = 0 \n \n #lands in hand\n lands_in_hand = []\n #spell count in hand\n spells_in_hand = []\n #lands in play\n lands_in_play = []\n #creatures in play\n spells_in_play = []\n #creatures' in play power\n #creature_pwr = 1\n \n #shuffle and draw 7 cards, mulls if hand bad\n hand = None\n keep_hand = False\n hand_count = 8\n while keep_hand == False:\n hand_count = hand_count - 1\n deck.shuffle()\n hand = deck.peep(hand_count)\n keep_hand = Mull.keep_or_mull(hand)\n hand = deck.draw_hand(num = hand_count) \n \n #Init Hand state\n for card in hand:\n if card.is_land == True:\n lands_in_hand.append(card)\n else:\n spells_in_hand.append(card) \n \n #SIMULATE GOLDFISH KILL\n while(goldfish_life >= 0 and deck.size() > 0): \n if VERBOSE:\n print(\"+++++++++++++ Turn \" + str(turn) + \"++++++++++++++\") \n print(\" Goldfish life = \" + str(goldfish_life))\n \n print(\" Lands in play\")\n for card in lands_in_play:\n print(card)\n print(\" Spells in play\")\n for card in spells_in_play:\n print(card)\n print(\" Lands in hand\")\n for card in lands_in_hand:\n print(card)\n print(\" Creatures in hand\")\n for card in spells_in_hand:\n print(card) \n \n # Draw a card if not first turn\n if turn > 0:\n card_to_draw = deck.draw() \n if card_to_draw.is_land == True: \n lands_in_hand.append(copy.deepcopy(card_to_draw))\n else: \n spells_in_hand.append(copy.deepcopy(card_to_draw)) \n\n #MAIN PHASE 1 play land card if we have any\n if len(lands_in_hand) > 0: \n lowest_cost = None\n land_to_play = None\n #Play the land card that has the lowest cost creature in hand\n for land in lands_in_hand[:]:\n for creature in spells_in_hand[:]:\n if land.manaEachTurn == creature.manaCost:\n # this land card has a playable creature\n if land_to_play != None:\n temp_cost = creature.manaCost\n if temp_cost < lowest_cost:\n if len(np.where(temp_cost > 0)[0]) <= \\\n len(np.where(lowest_cost > 0)[0]): \n # play the land that corresponds to\n # the creatures that require the \n # least different types\n land_to_play = land\n lowest_cost = temp_cost\n else:\n #first land card, we store it to play\n land_to_play = land\n lowest_cost = creature.manaCost\n if land_to_play == None: #No spell cards in hand\n land_to_play = lands_in_hand[0] #play first land card\n \n lands_in_play.append(copy.deepcopy(land_to_play)) \n lands_in_hand.pop(lands_in_hand.index(land_to_play)) \n \n #ATTACK GOLDFISH \n for creature in spells_in_play:\n goldfish_life = goldfish_life - creature.damageEachTurn\n if goldfish_life <= 0:\n if VERBOSE:\n print('Goldfish killed on turn ' + str(turn))\n return turn\n \n #MAIN PHASE 2 play spells\n if len(spells_in_hand) > 0 and len(lands_in_play) > 0: \n #Spells in hand and mana available --> play a creature\n #GOLDFISH LOGIC\n if p_goldfish:\n if goldfish_interactions > 0:\n pass\n if q_goldfish:\n if r.random(1) < q_goldfish_prob:\n if goldfish_interactions > 0:\n pass\n #Check mana pool\n mana_pool = np.array([0] * Mana.MANA_TYPES)\n for card in lands_in_play:\n mana_pool += card.manaEachTurn\n \n for creature in spells_in_hand:\n temp_pool = np.array(mana_pool - \\\n np.array(creature.manaCost))\n if len(np.where(temp_pool < 0)[0]) == 0: \n #can afford to play card\n mana_pool = temp_pool[:]\n spells_in_play.append(copy.deepcopy(creature))\n spells_in_hand.remove(creature)\n if VERBOSE:\n print(\"++++++++++++ End Turn \" + str(turn) + \"++++++++++\") \n turn += 1 \n #End Gold Fish kill \n \n if VERBOSE:\n print('Goldfish killed on turn ' + str(turn))\n return turn",
"def eat_fruit(self):\r\n self.app.fruit.remove(self.grid_pos)\r\n self.current_score += 5",
"def test_discard(self):\r\n deck_size = 3\r\n d = Deck(deck_size)\r\n for _ in range(deck_size):\r\n d.draw()\r\n d.discard([1, 3])\r\n drawn = d.draw(2)\r\n self.assertEqual(len(drawn), 2)\r\n self.assertIn(1, drawn)\r\n self.assertIn(3, drawn)",
"def destroy(explosions,inkblots,hero,deaths,stats):\n explosion_hits_inkblot = pygame.sprite.groupcollide(explosions,inkblots,False,True,pygame.sprite.collide_mask)\n explosion_hits_hero = pygame.sprite.spritecollideany(hero,explosions,pygame.sprite.collide_mask)\n explosion_hits_death = pygame.sprite.groupcollide(explosions,deaths,False,True,pygame.sprite.collide_mask)\n \n if explosion_hits_inkblot:\n stats.inkblot_killed()\n if explosion_hits_death:\n stats.death_killed()\n if explosion_hits_hero != None:\n hero.alive = False",
"def _layer_size_score(size, hole_count, hole_area):\r\n board_area = size[0] * size[1]\r\n if board_area == 0:\r\n return 0\r\n \r\n hole_percentage = hole_area / board_area\r\n hole_score = (hole_percentage - 0.25) ** 2\r\n size_score = (board_area - 8) **2\r\n return hole_score * size_score",
"def select_trump(self, rnd: PlayerRound) -> int:\n # select the trump with the largest number of cards\n # print(rnd.hand)\n # print(rnd.hand.shape)\n\n trump = 0\n max_number_in_color = 0\n for c in range(4):\n number_in_color = (rnd.hand * color_masks[c]).sum()\n if number_in_color > max_number_in_color:\n max_number_in_color = number_in_color\n trump = c\n return trump",
"def test_consumed_cards(self):\n game = TestGames.replay(9, [3, 1, 0, 0])\n consumed_cards = game.consumed_cards()\n self.assertEqual(len(consumed_cards), 8)\n\n self.assertListEqual(list(consumed_cards),\n [2 / 5, # guards\n 0 / 2, # priest\n 1 / 2, # baron\n 0 / 2, # handmaid\n 1 / 2, # prince\n 0 / 1, # king\n 0 / 1, # countess\n 0 / 1]) # princess",
"def get_card(self, card):\n\n\t\tself.add_card_to_grps(card)\n\n\t\tself.grps = sorted(self.grps, key = lambda x: -len(x))\n\n\n\t\t# check if # of cards forming sets is more than 5; if yes, then break the set to allow computer to form runs\n\t\tnum_set_cards = 0\n\t\tpos = -1\n\t\tfor i in range(len(self.grps)):\n\t\t\tif len(self.grps[i]) > 1 and self.grps[i][0] == self.grps[i][1]:\n\t\t\t\tnum_set_cards += len(self.grps[i])\n\t\t\t\tpos = i\n\n\t\tif num_set_cards > 5:\n\t\t\tcard = self.grps[pos][-1]\n\t\t\tself.grps[pos].remove(card)\n\t\t\tlogger.info(f\"In computer.py/get_card: computer returned {card} to break too many set, computer = {self}\")\n\t\t\treturn card\n\n\n\t\t# if # of sets is fine, then remove a card from the group with least size\n\t\tcard = self.grps[-1][-1]\n\n\t\t\n\t\tif len(self.grps[-1]) == 1:\n\t\t\tself.grps.remove(self.grps[-1])\n\t\telse:\n\t\t\tself.grps[-1].remove(self.grps[-1][-1])\n\n\t\tlogger.info(f\"In computer.py/get_card: computer returned {card}, computer = {self}\")\n\n\t\treturn card",
"def Get_empty_cells(difficulty, size):\n if(difficulty == 'beginner'):\n return size**2 - 50\n elif (difficulty == 'easy'):\n return size**2 - 40\n elif (difficulty == 'medium'):\n return size**2 - 33\n elif (difficulty == 'hard'):\n return size**2 - 26\n elif (difficulty == 'hell'):\n return size**2 - 17",
"def _team_sizes(rating_groups):\n team_sizes = [0]\n for group in rating_groups:\n team_sizes.append(len(group) + team_sizes[-1])\n del team_sizes[0]\n return team_sizes",
"def drop_curr_piece(self):\n if self.over: return\n delta = (0, 0) # now make this as big as possible\n while True:\n new_delta = tuple_add(delta, (0, 1))\n if self.can_move_curr_piece(new_delta):\n delta = new_delta\n else:\n break\n self.increment_score(delta[1])\n self.move_curr_piece(delta)\n self.lock_curr_piece()\n self.queue_draw()",
"def _evaluate_num_pieces(self, player):\n evaluation = 0\n if player is Player.black:\n evaluation += self.num_black_pieces * 10\n evaluation -= self.num_white_pieces * 10\n evaluation += self.num_black_kings * 10\n evaluation -= self.num_white_kings * 10\n elif player is Player.white:\n evaluation -= self.num_black_pieces * 10\n evaluation += self.num_white_pieces * 10\n evaluation -= self.num_black_kings * 10\n evaluation += self.num_white_kings * 10\n\n return evaluation",
"def num_pieces_left(self):\n return self.num_white_pieces + self.num_black_pieces",
"def drop(self):\r\n\t\t#print \"drop_list: {0}\".format(\" \".join(self.gb.drop_list))\r\n\t\tresult = []\r\n\t\tall_cards = [self.wang_list, self.tube_list, self.bamb_list, self.word_list, self.wind_list]\r\n\t\tprevious = \"\"\r\n\t\tfor cards in all_cards:\r\n\t\t\tfor i in range(len(cards)):\r\n\t\t\t\t\"\"\" avoid running same card \"\"\"\r\n\t\t\t\tif (cards[i] == previous): continue\r\n\t\t\t\tc = cards.pop(i)\r\n\t\t\t\tprevious = c\r\n\t\t\t\tmini, useful_amount, score = self.count_steps()\r\n\t\t\t\tcards.insert(i, c)\r\n\t\t\t\tresult.append([mini, useful_amount, score, c])\r\n\t\t\t\t#print \"min: {0}, useful_amount: {1}, score: {2}, dcard: {3}\".format(mini, useful_amount, score, c)\r\n\r\n\t\tdcard = self.sorting_by_criteria(result)\r\n\t\t#print \"\\tGeniusAgent drop: {0}\".format(dcard)\r\n\t\tctype = GameBoard.CardType(dcard)\r\n\t\tall_cards[ctype-1].remove(dcard)\r\n\t\tself.card_count -= 1\r\n\t\treturn dcard",
"def no_kills(self) -> int:\n return sum([e.deaths for e in self.enemies])"
] | [
"0.60348535",
"0.57435024",
"0.54608136",
"0.5445019",
"0.542634",
"0.54196596",
"0.5302651",
"0.5200759",
"0.5154724",
"0.5072169",
"0.50539815",
"0.5046292",
"0.49920136",
"0.49912956",
"0.49609512",
"0.4869226",
"0.4835175",
"0.48231968",
"0.479942",
"0.47956777",
"0.47943616",
"0.47836682",
"0.4780049",
"0.47785857",
"0.47759408",
"0.47660708",
"0.47637597",
"0.47636244",
"0.47578245",
"0.47566885"
] | 0.6882954 | 0 |
Recursively traverses adjacent locations of the same color to find all surrounding liberties for the group at the given coordinates. | def _get_liberties(self, x, y, traversed):
loc = self[x, y]
if loc is self.EMPTY:
# Return coords of empty location (this counts as a liberty)
return set([(x, y)])
else:
# Get surrounding locations which are empty or have the same color
# and whose coordinates have not already been traversed
locations = [
(p, (a, b))
for p, (a, b) in self._get_surrounding(x, y)
if (p is loc or p is self.EMPTY) and (a, b) not in traversed
]
# Mark current coordinates as having been traversed
traversed.add((x, y))
# Collect unique coordinates of surrounding liberties
if locations:
return set.union(*[
self._get_liberties(a, b, traversed)
for _, (a, b) in locations
])
else:
return set() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_group(self, x, y, traversed):\n loc = self[x, y]\n\n # Get surrounding locations which have the same color and whose\n # coordinates have not already been traversed\n locations = [\n (p, (a, b))\n for p, (a, b) in self._get_surrounding(x, y)\n if p is loc and (a, b) not in traversed\n ]\n\n # Add current coordinates to traversed coordinates\n traversed.add((x, y))\n\n # Find coordinates of similar neighbors\n if locations:\n return traversed.union(*[\n self._get_group(a, b, traversed)\n for _, (a, b) in locations\n ])\n else:\n return traversed",
"def locate_neighbors(grouped, row, column, width, height, reach):\n neighbors = []\n for row_val in range(2*int(reach) + 1):\n for col_val in range(2*int(reach) + 1):\n row_final = row - int(reach) + row_val\n col_final = column - int(reach) + col_val\n if col_final == column and row_final == row:\n continue\n if col_final >= width or col_final < 0:\n continue\n if row_final >= height or row_final < 0:\n continue\n row_num = (row_final * width) + col_final\n final_int = grouped[row_num][0]\n neighbors.append(final_int)\n return neighbors",
"def _get_surrounding(self, x, y):\n coords = (\n (x, y - 1),\n (x + 1, y),\n (x, y + 1),\n (x - 1, y),\n )\n return filter(lambda i: bool(i[0]), [\n (self._get_none(a, b), (a, b))\n for a, b in coords\n ])",
"def group_adjacents(group, board, filter_by=None):\n liberties = set([])\n for location in group:\n if filter_by == \"None\":\n liberties |= xy_adjacents(location, board, filter_by=\"None\")\n elif filter_by == \"friend\":\n liberties |= xy_adjacents(location, board, filter_by=\"friend\")\n elif filter_by == \"foe\":\n liberties |= xy_adjacents(location, board, filter_by=\"foe\")\n else:\n liberties |= xy_adjacents(location, board)\n liberties -= group\n return liberties",
"def grasps_within_pile(color_mask):\n hue_counts, hue_pixels = get_hsv_hist(color_mask)\n\n individual_masks = []\n\n #color to binary\n focus_mask = color_to_binary(color_mask)\n\n #segment by hsv\n for block_color in hue_counts.keys():\n #same threshold values for number of objects\n if hue_counts[block_color] > cfg.SIZE_TOL:\n valid_pix = hue_pixels[block_color]\n obj_mask = focus_mask.mask_by_ind(np.array(valid_pix))\n individual_masks.append(obj_mask)\n if len(individual_masks) > 0:\n obj_focus_mask = individual_masks[0]\n for im in individual_masks[1:]:\n obj_focus_mask += im\n\n #for each hsv block, again separate by connectivity\n all_groups = []\n for i, obj_mask in enumerate(individual_masks):\n groups = get_cluster_info(obj_mask)\n\n for group in groups:\n #matches endpoints of line in visualization\n cm = group.cm\n d = group.dir\n\n grasp_top = cm + d * cfg.LINE_SIZE/2\n grasp_bot = cm - d * cfg.LINE_SIZE/2\n if is_valid_grasp(grasp_top, obj_focus_mask) and is_valid_grasp(grasp_bot, obj_focus_mask):\n all_groups.append(group)\n\n return all_groups",
"def get_neighbors(start_square, visited=[]):\n neighbors = []\n\n # loop over possible x values\n for i in [start_square.x - 1, start_square.x, start_square.x + 1]:\n\n # drop neighbors outside of our region of interest\n if i < 0 or i > MAX_X:\n continue\n\n # loop over possible y values\n for j in [start_square.y - 1, start_square.y, start_square.y + 1]:\n\n # drop neighbors outside of our region of interest\n if j < 0 or j > MAX_Y:\n continue\n\n # Ignore ourself\n if i == start_square.x and j == start_square.y:\n continue\n\n # Ignore corner pieces\n if i == start_square.x - 1 and j != start_square.y:\n continue\n if i == start_square.x + 1 and j != start_square.y:\n continue\n\n # Deal with barriers\n found = False\n for square in visited:\n if square.pos == [i, j]:\n found = True\n break\n if found:\n continue\n\n neighbors.append(Square(i, j))\n\n return neighbors",
"def searchDeadEnd(self):\n boundaries = []\n if not self.red:\n i = self.midWidth - 1\n else:\n i = self.midWidth + 1\n boudaries = [(i, j) for j in range(self.height)]\n validPositions = []\n for i in boudaries:\n if not (i[0], i[1]) in self.walls:\n validPositions.append(i)\n\n dangerPos = []\n\n toExpand = self.scanmap.twoEntryPoints()\n for (x,y) in toExpand:\n adjacent = self.scanmap.adjacentValidPoints(x, y)\n if not (x,y) in dangerPos:\n for (u, w) in adjacent:\n visited = []\n visited.append((x, y))\n safe = False\n danger = False\n DFS = util.Stack()\n DFS.push((u,w))\n while not safe and not danger:\n (i,j) = DFS.pop()\n visited.append((i,j))\n adjacents = self.scanmap.adjacentValidPoints(i,j)\n for position in adjacents:\n if not position in visited:\n DFS.push(position)\n if DFS.isEmpty():\n danger = True\n dangerPos = list(set(dangerPos) | set(visited))\n\n if (i,j) in validPositions:\n safe = True\n oneEntry = self.scanmap.oneEntryPoints()\n dangerPos = list(set(oneEntry).union(set(dangerPos)))\n dangerPos.sort()\n return dangerPos",
"def get_surroundings(matrix, coord):\n width = np.shape(matrix)[0]\n height = np.shape(matrix)[1]\n coordinates = []\n\n # top\n (\n coordinates.append((coord[0], coord[1] - 1))\n if coord[1] - 1 >= 0\n else None\n )\n # bottom\n (\n coordinates.append((coord[0], coord[1] + 1))\n if coord[1] + 1 < height\n else None\n )\n # left\n (\n coordinates.append((coord[0] - 1, coord[1]))\n if coord[0] - 1 >= 0\n else None\n )\n # right\n (\n coordinates.append((coord[0] + 1, coord[1]))\n if coord[0] + 1 < width\n else None\n )\n\n return coordinates",
"def group_connected(triggers):\n groups = []\n ungrouped = set((x, y, difficulty) for (x, y), difficulty in triggers.items())\n while ungrouped:\n x, y, difficulty = ungrouped.pop()\n pos = x, y\n locs = [pos]\n queue = [pos]\n while queue:\n pos = queue.pop()\n for x, y in HexGrid.neighbours(pos):\n if (x, y, difficulty) in ungrouped:\n ungrouped.discard((x, y, difficulty))\n c = x, y\n locs.append(c)\n queue.append(c)\n groups.append(TriggerArea(locs, difficulty))\n\n groups_by_loc = {}\n for g in groups:\n for l in g.locs:\n groups_by_loc[l] = g\n return groups, groups_by_loc",
"def solve(arr, pos, color):\n i = 0\n same_color = [pos]\n while i < len(same_color):\n for j in get_neighbors(arr, same_color[i], arr[pos[0]][pos[1]]):\n if j not in same_color:\n same_color.append(j)\n i += 1\n for i in same_color:\n arr[i[0]][i[1]] = color\n return arr",
"def identify_dbs(image):\n locations = {\"red\": Point(), \"green\": Point(), \"blue\": Point()}\n masks = {\"red\": [], \"green\": [], \"blue\": []}\n\n bridge = cv_bridge.CvBridge()\n image = bridge.imgmsg_to_cv2(image, \"bgr8\")\n hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)\n\n # upper and lower bounds for red\n # using python 3 bgr [0,0,188] = hsv [0, 255, 188]\n lower_red = numpy.array([0, 100, 100]) \n upper_red = numpy.array([10, 255, 255])\n masks[\"red\"] = cv2.inRange(hsv, lower_red, upper_red)\n\n # upper and lower bounds for green\n # using python 3 bgr [0,175,0] = hsv [60, 255, 175]\n lower_green = numpy.array([50, 100, 100]) \n upper_green = numpy.array([70, 255, 255])\n masks[\"green\"] = cv2.inRange(hsv, lower_green, upper_green)\n\n # upper and lower bounds for blue\n # using python 3 bgr [176, 0, 17] = hsv [123, 255, 176]\n lower_blue = numpy.array([113, 100, 100])\n upper_blue = numpy.array([133, 255, 255])\n masks[\"blue\"] = cv2.inRange(hsv, lower_blue, upper_blue)\n\n x, y, w, h = 0, 0, image.shape[1]//3, image.shape[0]\n\n for color, mask in masks.items():\n pixels = {\"left\": 0, \"middle\": 0, \"right\": 0}\n \n # define section of image to use for left, middle and right\n left = mask[y:y+h, x:x+w]\n middle = mask[y:y+h, x+w:x+w+w]\n right = mask[y:y+h, x+w+w:x+3*w]\n\n # count the number of pixels in each section\n pixels[\"left\"] = cv2.countNonZero(left)\n pixels[\"middle\"] = cv2.countNonZero(middle)\n pixels[\"right\"] = cv2.countNonZero(right)\n location = max(pixels, key=pixels.get)\n\n # map the relative position of the db (left, middle, right) to the correct Point()\n locations[color] = db_locations[location]\n \n return locations",
"def neighbours(self):\n\n neighbours = []\n root = self.root\n if self == root:\n return neighbours\n\n ########################\n # IMMEDIATELY ADJACENT #\n sizes = [self.maxs[0] - self.mins[0], self.maxs[1] - self.mins[1]]\n coords = [(self.mins[0] + sizes[0] / 2, self.maxs[1] + sizes[1] / 2,),\n (self.maxs[0] + sizes[0] / 2, self.mins[1] + sizes[1] / 2,),\n (self.mins[0] + sizes[0] / 2, self.mins[1] - sizes[1] / 2,),\n (self.maxs[0] - sizes[0] / 2, self.mins[1] + sizes[1] / 2,),]\n # loop through top, right, bottom, left\n for i in range(4):\n x, y = coords[i]\n query_quad = root.query_xy(x, y)\n if query_quad is not None:\n same_size_idx = query_quad.location[: self.tree_depth]\n same_size_quad = root[same_size_idx]\n neighbours += list(self._get_border_children(same_size_quad, i))\n\n #############\n # DIAGONALS #\n root_sizes = [root.maxs[0] - root.mins[0], root.maxs[1] - root.mins[1]]\n xs, ys = (root_sizes / 2 ** root.max_tree_depth) / 2\n neighbours += [\n root.query_xy(self.mins[0] - xs, self.mins[1] - ys), # TL\n root.query_xy(self.maxs[0] + xs, self.mins[1] - ys), # TR\n root.query_xy(self.mins[0] - xs, self.maxs[1] + ys), # BL\n root.query_xy(self.maxs[0] + xs, self.maxs[1] + ys), # BR\n ]\n\n unique_neighbours = list(set(neighbours))\n try:\n unique_neighbours.remove(self)\n except ValueError:\n pass\n\n return unique_neighbours",
"def find_color_boundaries(array, color):\n if (array == color).any() == False:\n return None\n ind_0 = np.arange(array.shape[0])\n ind_1 = np.arange(array.shape[1])\n\n temp_0 = ind_0[(array == color).max(axis=1)] # axis 0\n min_0, max_0 = temp_0.min(), temp_0.max()\n\n temp_1 = ind_1[(array == color).max(axis=0)] # axis 1\n min_1, max_1 = temp_1.min(), temp_1.max()\n\n return min_0, max_0, min_1, max_1",
"def framewise_inside_polygon_roi(\n bp_location: np.ndarray, roi_coords: np.ndarray\n ) -> np.ndarray:\n\n results = np.full((bp_location.shape[0]), 0)\n for i in prange(0, results.shape[0]):\n x, y, n = bp_location[i][0], bp_location[i][1], len(roi_coords)\n p2x, p2y, xints, inside = 0.0, 0.0, 0.0, False\n p1x, p1y = roi_coords[0]\n for j in prange(n + 1):\n p2x, p2y = roi_coords[j % n]\n if (\n (y > min(p1y, p2y))\n and (y <= max(p1y, p2y))\n and (x <= max(p1x, p2x))\n ):\n if p1y != p2y:\n xints = (y - p1y) * (p2x - p1x) / (p2y - p1y) + p1x\n if p1x == p2x or x <= xints:\n inside = not inside\n p1x, p1y = p2x, p2y\n if inside:\n results[i] = 1\n\n return results",
"def find_all_nearest_neighbours(point_cloud:np.ndarray) -> np.ndarray:\n pass",
"def get_neighbors(arr, pos, color):\n neighbors = []\n try:\n if arr[pos[0] + 1][pos[1]] == color:\n neighbors.append((pos[0] + 1, pos[1]))\n except IndexError:\n pass\n try:\n if arr[pos[0] - 1][pos[1]] == color:\n neighbors.append((pos[0] - 1, pos[1]))\n except IndexError:\n pass\n try:\n if arr[pos[0] + 1][pos[1]] == color:\n neighbors.append((pos[0] + 1, pos[1]))\n except IndexError:\n pass\n try:\n if arr[pos[0]][pos[1] - 1] == color:\n neighbors.append((pos[0], pos[1] - 1))\n except IndexError:\n pass\n return neighbors",
"def get_groups(board: numpy.ndarray, player: int) -> List[Group]:\n # Generate couples\n # Array of (p1, p2, x) where x = -1 if p1 == p2, 0 if p1 and p2 are close and 1 if they are close\n couples = []\n size = board.shape[0]\n for i in range(1, size - 1):\n for j in range(1, size - 1):\n if board[i, j] == player:\n l0 = [(i + x, j + y) for x, y in NEIGHBORS_1]\n l1 = [(i + x, j + y) for x, y in NEIGHBORS_2]\n for p in l0 + l1 + [(i, j)]:\n corner = all([x in [0, size - 1] for x in p])\n if 0 <= p[0] < size and 0 <= p[1] < size and board[p] == player and not corner:\n if p == (i, j):\n couples.append(((i, j), p, -1))\n elif p in l0:\n couples.append(((i, j), p, 0))\n else:\n p1, p2 = get_common_neighbours((i, j), p)\n if player not in [board[p1], board[p2]] and (board[p1] == -1 and board[p2] == -1):\n couples.append(((i, j), p, 1))\n\n # Group couples\n groups = [[k] for k in couples]\n\n def fusion(f_groups):\n for group1 in f_groups:\n for group2 in f_groups:\n if group1 != group2:\n for c1 in group1:\n for c2 in group2:\n if c1[0] == c2[0] or c1[0] == c2[1] or c1[1] == c2[0] or c1[1] == c2[1]:\n group1.extend(group2)\n f_groups.remove(group2)\n return True\n return False\n\n while fusion(groups):\n pass\n\n return groups",
"def get_surround(xy, dim_x=10, dim_y=10, radius=1, exclude_self=True):\n laterals = []\n for dx in range(-int(radius), int(radius)+1, 1):\n for dy in range(-int(radius), int(radius)+1, 1):\n if dx**2 + dy**2 > radius**2:\n continue\n if (xy[0]+dx >= 0) and (xy[0]+dx < dim_x) and (xy[1]+dy >= 0) and (xy[1]+dy < dim_y):\n if not (exclude_self and dx == 0 and dy == 0):\n laterals.append((xy[0]+dx, xy[1]+dy))\n return laterals",
"def leftright_neighbors(j, chain) :\n i, k = find_neighbors(j, chain.bridges_dict)\n if chain.lumens_dict[i].pos <= chain.lumens_dict[k].pos :\n return [i, k]\n else :\n return [k, i]",
"def neighbours_of_position(coords):\n row = coords[0]\n col = coords[1]\n \n #assign each of neighbours corrds\n #top left to top rigt\n top_left = (row - 1, col - 1)\n top_center = (row - 1, col)\n top_right = (row - 1, col + 1)\n \n # left to right (center)\n left = (row, col - 1)\n # the (row, col) cordinates passed into this function are situated here\n right = (row, col + 1)\n \n #bottom-left to bottom-right\n bottom_left = (row +1, col -1)\n bottom_center = (row +1, col)\n bottom_right = (row +1, col +1)\n \n return [top_left, top_center, top_right,\n left , right ,\n bottom_left, bottom_center, bottom_right]",
"def box_line_coords(self):\n not_visited = set()\n for shape in self.shapes:\n for r, c in shape:\n # get points next to the numbers in one shape\n # and check which of them are inside the same shape too\n neighbors = [(r-1, c), (r+1, c), (r, c-1), (r, c+1)]\n for next_r, next_c in neighbors:\n if (0 <= next_r < 9 and 0 <= next_c < 9 and\n (next_r, next_c) not in shape):\n \n # maximum of the point and its neighbor will be\n # the beginning of the line part that builds shape \n x0, y0 = max(next_c, c), max(next_r, r)\n x1 = (next_c == c) and c + 1 or x0\n y1 = (next_r == r) and r + 1 or y0\n not_visited.add((x0, y0, x1, y1))\n not_visited.add((x1, y1, x0, y0))\n return not_visited",
"def update_filled(self, filled_edges, filled_surrounded):\n surrounded_cells = []\n for cell in filled_edges:\n coord_x = cell[1]\n coord_y = cell[0]\n color = self.get_color(cell)\n surrounded = True\n\n # up\n if coord_y - 1 >= 0:\n surrounded &= self.check_if_filled((coord_y-1, coord_x), color, filled_edges, filled_surrounded)\n\n # down\n if coord_y + 1 < self.height:\n surrounded &= self.check_if_filled((coord_y+1, coord_x), color, filled_edges, filled_surrounded)\n\n # left\n if coord_x - 1 >= 0:\n surrounded &= self.check_if_filled((coord_y, coord_x-1), color, filled_edges, filled_surrounded)\n\n # right\n if coord_x + 1 < self.width:\n surrounded &= self.check_if_filled((coord_y, coord_x+1), color, filled_edges, filled_surrounded)\n\n if surrounded:\n surrounded_cells.append(cell)\n\n for cell in surrounded_cells:\n filled_surrounded.append(cell)\n filled_edges.remove(cell)",
"def findNeighbours(self):\n neighbours = []\n\n for i in range(self.xCoordinate - 1, self.xCoordinate + 2):\n for j in range(self.yCoordinate - 1, self.yCoordinate + 2):\n if (not (i == self.xCoordinate and j == self.yCoordinate)) and (0 <= i <= 394 and 0 <= j <= 499):\n neighbours.append(PixelPosition(i, j))\n\n return neighbours",
"def get_boundary_layers(cell_cent, el, num_lyrs, bc_loc, struct_grd):\n dim = len(el)\n bound_range = np.zeros(2*dim, dtype=float)\n bound_nodes = {} #dict to store the node numbers of centroids that lie within bound_range\n if(struct_grd):\n fctr = 1\n corr = 0\n lyrs = float(num_lyrs-1)+ 0.0001\n else:\n fctr = 2\n corr = 1\n lyrs = float(num_lyrs)+ 0.0001\n\n lyrs = 1.0001*float(num_lyrs-1)\n for d in range(dim):\n bound_range[2*d] = np.min(cell_cent[:,d]) + corr*np.diff(np.unique(cell_cent[:,d])[0:2])[0] + lyrs*el[d]\n bound_range[2*d+1] = np.max(cell_cent[:,d]) - corr*np.diff(np.unique(cell_cent[:,d])[0:2])[0] - lyrs*el[d]\n\n bound_nodes[2*d] = np.where(cell_cent[:,d] <= bound_range[2*d])\n bound_nodes[(2*d+1)] = np.where(cell_cent[:,d] >= bound_range[2*d+1])\n\n #store only those key value pair that are in the bc_loc\n #this in the end returns mesh with ghost layer cells, \n #if they've been applied already\n keys = bound_nodes.keys()\n keys_temp = [kk for kk in keys]\n for kk in keys_temp:\n if kk not in bc_loc:\n bound_nodes.pop(kk, None)\n \n return bound_nodes",
"def iter_neighbors(x: int, y: int) -> t.Generator[COORDINATE, None, None]:\n yield x - 1, y\n yield x + 1, y\n yield x, y - 1\n yield x, y + 1",
"def get_nest_avoid_points_around(point):\n x, y = point[0], point[1]\n return {(x, y - 2),\n (x - 1, y - 1), (x, y - 1), (x + 1, y - 1),\n (x - 2, y), (x - 1, y), point, (x + 1, y), (x + 2, y),\n (x - 1, y + 1), (x, y + 1), (x + 1, y + 1),\n (x, y + 2)}",
"def _get_containing_blocks(size, point):\n i, j = point\n block_inds = []\n if i > 0:\n if j > 0:\n block_inds.append((i - 1, j - 1))\n if j < size - 1:\n block_inds.append((i - 1, j))\n if i < size - 1:\n if j > 0:\n block_inds.append((i, j - 1))\n if j < size - 1:\n block_inds.append((i, j))\n \n return block_inds",
"def _take_pieces(self, x, y):\n scores = []\n for p, (x1, y1) in self._get_surrounding(x, y):\n # If location is opponent's color and has no liberties, tally it up\n if p is self._next_turn and self.count_liberties(x1, y1) == 0:\n score = self._kill_group(x1, y1)\n scores.append(score)\n self._tally(score)\n return sum(scores)",
"def fill_blockgroups(sf, df,geoids, colors):\n color_ids = []\n for i in geoids:\n color_ids.append(df[df.GEOID==i].index[0])\n \n i = 0\n for bg in color_ids:\n shape_ex = sf.shape(bg)\n x_lon = np.zeros((len(shape_ex.points),1))\n y_lat = np.zeros((len(shape_ex.points),1))\n for ip in range(len(shape_ex.points)):\n x_lon[ip] = shape_ex.points[ip][0]\n y_lat[ip] = shape_ex.points[ip][1]\n plt.fill(x_lon,y_lat, colors[i])\n i = i +1",
"def getArea(rob):\r\n def dfs(visit, i, j):\r\n visit.add((i, j))\r\n for k in range(4):\r\n newi, newj = i + x[k], j + y[k]\r\n if (newi, newj) in visit or not rob.move(k):\r\n continue\r\n dfs(visit, newi, newj)\r\n rob.move((k + 2) % 4)\r\n visit = set()\r\n dfs(visit, 0, 0)\r\n return len(visit)"
] | [
"0.70040894",
"0.5795467",
"0.5597785",
"0.5448913",
"0.54436564",
"0.54422367",
"0.54193276",
"0.5357743",
"0.53455037",
"0.5341227",
"0.53019035",
"0.5227822",
"0.52242845",
"0.5224116",
"0.519014",
"0.5165992",
"0.51538765",
"0.51497793",
"0.51192147",
"0.51059824",
"0.51009536",
"0.50929034",
"0.50852716",
"0.5077564",
"0.50689983",
"0.50654185",
"0.5060827",
"0.5060543",
"0.5048282",
"0.5045247"
] | 0.63835096 | 1 |
Gets the number of liberties surrounding the group at the given coordinates. | def count_liberties(self, x, y):
return len(self.get_liberties(x, y)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getArea(rob):\r\n def dfs(visit, i, j):\r\n visit.add((i, j))\r\n for k in range(4):\r\n newi, newj = i + x[k], j + y[k]\r\n if (newi, newj) in visit or not rob.move(k):\r\n continue\r\n dfs(visit, newi, newj)\r\n rob.move((k + 2) % 4)\r\n visit = set()\r\n dfs(visit, 0, 0)\r\n return len(visit)",
"def __get_contour_num(self,coord_tuple):\r\n boundRect = self.boundRect\r\n x,y = coord_tuple\r\n res = -1\r\n for i in range(len(self.top_contours)):\r\n if ((self.top_contours[i] == True) and (x >= boundRect[i][0]) and (x <= boundRect[i][0]+boundRect[i][2]) and\r\n (y >= boundRect[i][1]) and (y <= boundRect[i][1]+boundRect[i][2])):\r\n res = i\r\n\r\n break\r\n return res",
"def getGroupLibCountEval(self, _group):\n eval_ = []\n for pos in _group.liberty_pos:\n temp_board = deepcopy(self.board)\n temp_board.grid[pos[0]][pos[1]] = Stone(temp_board, self.color, pos)\n temp_board.updateAllBoardData()\n temp_group = temp_board.getGroupByPos(pos)\n eval_ += [[ pos, temp_group.liberty_count ]]\n eval_.sort(key=lambda x: x[1], reverse=True)\n return eval_",
"def numCoords(self):\n return self.nCoords",
"def _get_liberties(self, x, y, traversed):\n loc = self[x, y]\n\n if loc is self.EMPTY:\n # Return coords of empty location (this counts as a liberty)\n return set([(x, y)])\n else:\n # Get surrounding locations which are empty or have the same color\n # and whose coordinates have not already been traversed\n locations = [\n (p, (a, b))\n for p, (a, b) in self._get_surrounding(x, y)\n if (p is loc or p is self.EMPTY) and (a, b) not in traversed\n ]\n\n # Mark current coordinates as having been traversed\n traversed.add((x, y))\n\n # Collect unique coordinates of surrounding liberties\n if locations:\n return set.union(*[\n self._get_liberties(a, b, traversed)\n for _, (a, b) in locations\n ])\n else:\n return set()",
"def get_num_landmarks(self):\n return len(self.landmarks_info)",
"def _calc_refinement_level(coords, well_loc, radius_per_level, max_level):\n if len(coords) != len(well_loc):\n raise ValueError('Unmatching dimensions for cell or well coordinates.')\n\n diff = (np.abs(np.array(coords) - np.array(well_loc))) // radius_per_level\n return max_level - np.max(diff)",
"def get_num_tiles(grid_bbox, dxy): \r\n xmin, xmax, ymin, ymax = grid_bbox\r\n return (int(np.abs(ymax-ymin)/dxy), int(np.abs(xmax-xmin)/dxy))",
"def n_rings(self) -> ir.IntegerValue:\n return ops.GeoNRings(self).to_expr()",
"def ncoordinates(self):\n return _coordsys.coordsys_ncoordinates(self)",
"def number_of_patches(width, height, patch_size):\n n_patches_x = width // patch_size\n n_patches_y = height // patch_size\n return n_patches_x, n_patches_y",
"def distance_from_boundaries_openbc(xy, boundaries, interp_n=None, check=False):\n dists = []\n for boundary in boundaries:\n these_dists = distance_from_boundary(xy, boundary, interp_n=interp_n, check=check)\n dists.append(these_dists)\n\n return tuple(dists)",
"def count_alive_cells(self, x, y):\n\n # indices of surrounding cells.\n ul = max(y - 1, 0) # upper left\n ur = min(y + 2, self.f_shape[1]) # upper right\n bl = max(x - 1, 0) # bottom left\n br = min(x + 2, self.f_shape[0]) # bottom right\n\n # slice\n cells = self.cells[bl:br, ul:ur]\n n_cells = np.count_nonzero(cells)\n\n return n_cells - self.cells[x][y]",
"def count_neighboors(self, x: int, y: int) -> int :\n\n cpt : int = 0\n min_x : int = max(0, x - 1)\n max_x : int = min(x + 1, self.width-1)\n min_y : int = max(0, y - 1)\n max_y : int = min(y + 1, self.height-1)\n\n x_tmp : int\n y_tmp : int\n for x_tmp in range(min_x, max_x+1):\n for y_tmp in range(min_y, max_y+1):\n if self.is_alive(x_tmp, y_tmp) and not (x_tmp == x and y_tmp == y):\n cpt += 1\n return cpt",
"def nGlobalCoords( self ):\n return self._nGlobalCoords",
"def getLength(self):\n lons = self._toplons\n lats = self._toplats\n seg = self._group_index\n groups = np.unique(seg)\n ng = len(groups)\n rlength = 0\n for i in range(ng):\n group_segments = np.where(groups[i] == seg)[0]\n nseg = len(group_segments) - 1\n for j in range(nseg):\n ind = group_segments[j]\n P0 = Point(lons[ind], lats[ind])\n P1 = Point(lons[ind + 1], lats[ind + 1])\n dist = P0.distance(P1)\n rlength = rlength + dist\n return rlength",
"def nspatials(self):\n return int(len(self)/2)",
"def grid_point_count(self):\n return pytools.product(self.grid_point_counts())",
"def n_points(self) -> ir.IntegerValue:\n return ops.GeoNPoints(self).to_expr()",
"def get_referenced_floor_area() -> np.ndarray:\n\n return envelope.get_referenced_floor_area()",
"def get_num_mines_around_position(self, x, y):\n mines = 0\n for row in range(y-1, y+2):\n for col in range(x-1, x+2):\n if row >= 0 and col >= 0 and row < len(self.mine_map) and col < len(self.mine_map[row]): # Don't check spaces that are outside of the array\n if self.mine_map[row][col]:\n mines += 1\n return mines",
"def count_lorentz(fit_range, lorentz_array_2d):\n counter = 0\n for i in range(0, lorentz_array_2d.shape[0]):\n f0 = lorentz_array_2d[i][1]\n if f0 > fit_range[1] and f0 < fit_range[2]:\n counter += 1\n return counter",
"def lon_grid_size(self):\n lon_diff = np.unique(np.diff(self.data_array.lon))\n\n if len(lon_diff) != 1:\n lon_diff = np.mean(lon_diff)\n\n return np.abs(lon_diff)",
"def get_location_count(self):\n return len(self.matrix)",
"def patch_areas(patch_ids):\n\n return np.bincount(patch_ids.reshape((-1,)))[1:]",
"def cell_containing(self,xy,neighbors_to_test=4): \n hit = self.select_cells_nearest(xy, count=neighbors_to_test, inside=True)\n if hit is None:\n return -1\n else:\n return hit",
"def _ring_area(coordinates):\n\n assert isinstance(coordinates, (list, tuple))\n\n total_area = 0\n coordinates_length = len(coordinates)\n\n if coordinates_length > 2:\n for i in range(0, coordinates_length):\n if i == (coordinates_length - 2):\n lower_index = coordinates_length - 2\n middle_index = coordinates_length - 1\n upper_index = 0\n elif i == (coordinates_length - 1):\n lower_index = coordinates_length - 1\n middle_index = 0\n upper_index = 1\n else:\n lower_index = i\n middle_index = i + 1\n upper_index = i + 2\n\n p1 = coordinates[lower_index]\n p2 = coordinates[middle_index]\n p3 = coordinates[upper_index]\n\n total_area += (_rad(p3[0]) - _rad(p1[0])) * sin(_rad(p2[1]))\n\n total_area = total_area * WGS84_RADIUS * WGS84_RADIUS / 2\n\n return total_area",
"def query_region_count(self, coordinates, radius=0.2*u.deg, pagesize=None, page=None):\n\n # build the coordinates string needed by Mast.Caom.Filtered.Position\n coordinates = commons.parse_coordinates(coordinates)\n\n # if radius is just a number we assume degrees\n if isinstance(radius, (int, float)):\n radius = radius * u.deg\n radius = coord.Angle(radius)\n\n # turn coordinates into the format\n position = ', '.join([str(x) for x in (coordinates.ra.deg, coordinates.dec.deg, radius.deg)])\n\n service = \"Mast.Caom.Filtered.Position\"\n params = {\"columns\": \"COUNT_BIG(*)\",\n \"filters\": [],\n \"position\": position}\n\n return int(self.service_request(service, params, pagesize, page)[0][0])",
"def total_crimes_in_bounds(user_coords):\n\n crimes_coords = {'crimes': []}\n\n # takes in user_coords point a and point b\n # in order to determine top left and bottom right coordinates.\n point_a = user_coords['point_a']\n point_b = user_coords['point_b']\n\n # compare latitude to see what's the top coord, tupleize\n # add 0.005 to latitude, and subtract 0.02 to longitude\n top_left_coord = {'lat': max(point_a['lat'], point_b['lat']) + 0.005,\n 'lng': min(point_a['lng'], point_b['lng']) - 0.02}\n\n # subtract 0.005 to latitude, and add 0.02 to longitude\n bottom_right_coord = {'lat': min(point_a['lat'], point_b['lat']) - 0.005,\n 'lng': max(point_a['lng'], point_b['lng']) + 0.02}\n\n # once the bounds are generated, we will want to do a query for all of the\n # geohashes that are within those bounds. Let's do that now.\n # some raw sql to get the center coords of geohash\n geohash_in_bounds_sql = \"SELECT *, \" + \\\n \"ST_AsText(ST_PointFromGeoHash(geohash)) AS lat_lng \" + \\\n \"FROM nyc_crimes_by_geohash \" + \\\n \"WHERE ST_Contains(\" + \\\n \"ST_MakeBox2D(\" + \\\n \"ST_Point(%f, %f), ST_Point(%f, %f)), ST_PointFromGeoHash(geohash));\" \\\n % (top_left_coord['lat'], top_left_coord['lng'],\n bottom_right_coord['lat'], bottom_right_coord['lng'])\n # execute the raw sql, there should be many\n geohash_in_bounds_query = db.engine.execute(geohash_in_bounds_sql).fetchall()\n\n for row in geohash_in_bounds_query:\n # strip the lat, lngs before putting them in\n # some string splitting to extract data\n location = row[4].strip(\"POINT(\").rstrip(\")\").split()\n latitude = location[0]\n longitude = location[1]\n\n format_loc_dict = {'latitude': latitude, 'longitude': longitude,\n 'total_crimes': row[2]}\n\n # append to crimes_coords inner list\n crimes_coords['crimes'].append(format_loc_dict)\n\n return crimes_coords",
"def count_paper_streets(x_intercepts: list, y_intercepts: list, homes: list) -> int:\n homes = __get_homes_not_on_intersections(x_intercepts, y_intercepts, homes)\n paper_streets = [street for street in __generate_streets(x_intercepts, y_intercepts) if street.isdisjoint(homes)]\n paper_groups = __group_streets(paper_streets)\n return len(paper_groups)"
] | [
"0.5703384",
"0.56963116",
"0.5542732",
"0.55277985",
"0.5469277",
"0.5424882",
"0.53873706",
"0.53670955",
"0.536483",
"0.5318496",
"0.5308205",
"0.5293582",
"0.5269632",
"0.5248836",
"0.5221797",
"0.52110815",
"0.51962906",
"0.5194603",
"0.516675",
"0.51180094",
"0.511654",
"0.5109159",
"0.5097142",
"0.5096693",
"0.5075644",
"0.506299",
"0.50456125",
"0.50453043",
"0.5045129",
"0.5039319"
] | 0.62602663 | 0 |
Makes requests to retrieve all resources for `res_ids`, yielding each batch. | def gen_resources_for_ids(
resource: Callable, res_ids: List[str], **list_params
) -> Generator[List, None, None]:
print("Generating resources for ids.")
total = len(res_ids)
res_counter = 0
if "maxResults" not in list_params.keys():
list_params["maxResults"] = DEFAULT_MAX_RESULTS
max_results = DEFAULT_MAX_RESULTS
else:
max_results = list_params["maxResults"]
_res_ids = res_ids.copy()
while len(_res_ids) > 0:
request_ids = []
for _ in range(max_results):
request_ids.append(_res_ids.pop(0))
if len(_res_ids) == 0:
break
print(
f"\tRequesting {res_counter}-{res_counter + len(request_ids)} of {total}."
)
list_params["id"] = ",".join(request_ids)
request = resource().list(**list_params)
response = request.execute()
yield response["items"]
res_counter += max_results
print("\tFinished requesting resources.")
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def batch(self, reqs):\n return self.connection.batch_(reqs)",
"async def run_requests(self):\n loop = asyncio.get_event_loop()\n tasks = []\n async with aiohttp.ClientSession(connector=self.connector) as session:\n\n for index, id in enumerate(self.ids):\n if id not in self.processed_ids:\n url = self.base_url + id\n auth_token = base64.b64encode(id.encode('ascii'))\n header = {\"Authorization\": auth_token.decode('UTF-8')}\n tasks.append(asyncio.ensure_future(self._request_one(url=url, header=header, id=id, index = index, session = session)))\n\n _ = await asyncio.gather(*tasks)",
"def _get_multiple_resources(\n self, resource_ids: List[str], kwargs: Dict[str, Any]\n ) -> List[Any]:\n more_than_one_resource = len(resource_ids) > 1\n\n resources = []\n for resource_id in resource_ids:\n try:\n resource = self.get_resource_by_id(resource_id, **kwargs)\n except ResourceNotFoundError:\n self.print_not_found(resource_id)\n\n if not more_than_one_resource:\n # For backward compatibility reasons and to comply with common \"get one\"\n # behavior, we only fail if a single source is requested\n raise ResourceNotFoundError(\"Resource %s not found.\" % resource_id)\n\n continue\n\n resources.append(resource)\n return resources",
"async def run(product_ids):\n url = \"http://challenge-api.luizalabs.com/api/product/{}\"\n tasks = []\n\n # Fetch all responses within one Client session,\n # keep connection alive for all requests.\n async with ClientSession() as session:\n for product_id in product_ids:\n task = asyncio.ensure_future(utils.fetch(url.format(product_id), session))\n tasks.append(task)\n\n self.responses = await asyncio.gather(*tasks)",
"def get_all(self, *ids):",
"def bulk_get_documents():\n ids = flask.request.json\n if not ids:\n raise UserError(\"No ids provided\")\n if not isinstance(ids, list):\n raise UserError(\"ids is not a list\")\n\n with blueprint.index_driver.session as session:\n # Comment it out to compare against the eager loading option.\n # query = session.query(IndexRecord)\n # query = query.filter(IndexRecord.did.in_(ids)\n\n # Use eager loading.\n query = session.query(IndexRecord)\n query = query.options(\n joinedload(IndexRecord.urls).joinedload(IndexRecordUrl.url_metadata)\n )\n query = query.options(joinedload(IndexRecord.acl))\n query = query.options(joinedload(IndexRecord.authz))\n query = query.options(joinedload(IndexRecord.hashes))\n query = query.options(joinedload(IndexRecord.index_metadata))\n query = query.options(joinedload(IndexRecord.aliases))\n query = query.filter(IndexRecord.did.in_(ids))\n\n docs = [q.to_document_dict() for q in query]\n return flask.Response(json.dumps(docs), 200, mimetype=\"application/json\")",
"def batch(self):\n return self._client.batch()",
"def get_objects(self, ids, **args):\n args[\"ids\"] = \",\".join(ids)\n return self.request(\"\", args)",
"def download_results(client, response_ready, id_list, **kwargs) -> list:\n # set optional arguments\n server = kwargs.get(\"server\", \"/v3/serp/google/organic/task_get/advanced/\")\n if response_ready['status_code'] == 20000:\n results = []\n # this loop ensure that results are collected when they are ready\n count = 0\n while id_list and (count < 1000) :\n if count >= 1:\n print(f\"...this might take a while(x {count})... \")\n print(f\"...still {len(id_list)} items to go! \")\n count += 1\n for id in id_list:\n temp_res = client.get(server + id)\n if temp_res['tasks'][0]['result']:\n results.append(temp_res['tasks'][0]['result'][0])\n id_list.remove(id)\n break\n time.sleep(0.2)\n if (count == 999) and id_list:\n raise ConnectionError(\"could not load all results!!!\")\n return results\n else:\n print(\"error. Code: %d Message: %s\" % (response_ready[\"status_code\"], response_ready[\"status_message\"]))",
"def get_objects(self, ids, **args):\n args[\"ids\"] = \",\".join(ids)\n return self.request(self.version + \"/\", args)",
"def get_results(self, ids):\n self.join()\n return [self.results[id] for id in ids]",
"def batch(\n arguments: List[BatchArgument],\n *,\n uniform_mime_type: str,\n include_output: bool = True,\n drive_service: Optional[discovery.Resource] = None,\n) -> List[drive_api.ResourceID]:\n\n # callback will append resulting IDs in order\n result: List[drive_api.ResourceID] = []\n\n def batch_response(request_id, response, exception) -> None:\n nonlocal result\n result.append(response.get(\"id\"))\n\n requests = [\n request(\n name=argument.name,\n mime_type=uniform_mime_type,\n parent_folder_id=argument.parent_folder_id,\n drive_service=drive_service,\n )\n for argument in arguments\n ]\n kwargs = {\"requests\": requests, \"drive_service\": drive_service}\n if include_output:\n kwargs[\"callback\"] = batch_response\n drive_api.batch_command(**kwargs)\n return result",
"def _batch_request(self, jobs):\n return generate_batch_request(jobs, self._batch_request_size)",
"async def fetch_all(self, urls):\n async with ClientSession() as session:\n tasks = []\n for url in urls:\n task = asyncio.create_task(self.fetch(session, url))\n tasks.append(task)\n results = await asyncio.gather(*tasks)\n return results",
"def test_get_resource_ids(self):\n for i in range(11):\n self.app.post(f'/v1/resource/{self.test_resource}/id/test{i}', headers=admin_headers)\n self._test_paging(f'/v1/resource/{self.test_resource}/ids', admin_headers, 10, 'resource_ids')",
"async def run():\n sem = asyncio.Semaphore(DEFAULT_SEMAPHORE_LIMIT)\n tasks = []\n\n async with ClientSession() as session:\n for u in [ROOT_URL.format(jid) for jid in DEFAULT_RANGE_IDS]:\n task = asyncio.ensure_future(bound_fetch(sem, u, session))\n tasks.append(task)\n responses = asyncio.gather(*tasks)\n await responses",
"def gen_resources(resource: Callable, **list_params) -> Generator[List, None, None]:\n print(\"Generating resources.\")\n if \"maxResults\" not in list_params.keys():\n list_params[\"maxResults\"] = DEFAULT_MAX_RESULTS\n\n next_page_token = None\n while True:\n if next_page_token:\n list_params[\"pageToken\"] = next_page_token\n\n request = resource().list(**list_params)\n # print(\"\\t\\tRequest made successfully.\")\n response = request.execute()\n # print(f\"\\t\\tRaw response: {response}\")\n\n data = response[\"items\"]\n print(f\"\\tRetrieved {len(data)}\")\n\n yield data\n\n if \"nextPageToken\" in response.keys():\n next_page_token = response[\"nextPageToken\"]\n else:\n print(\"\\tReached last page.\")\n break\n\n return None",
"def stream(self, batch):\n response = self.post(batch)\n try:\n for i, result_data in grouped(response):\n result = JobResult.hydrate(assembled(result_data), batch)\n log.info(\"< %s\", result)\n yield result\n finally:\n response.close()",
"def get_batches(auth, base_url='https://api.cratejoy.com/v1/'):\n \n batch_endpoint = '{}shipment_batches/'.format(base_url)\n\n resp = requests.get(\n batch_endpoint,\n auth=auth\n )\n\n print('GET request to {} responded with status '\n 'code: {}'.format(batch_endpoint,\n resp.status_code))\n print(resp.content)",
"def batch_query(url, headers=None, timeout=299):\n\n offset = 0\n count = 0\n\n proxies = {\n 'http': ARGS.proxy_string,\n 'https': ARGS.proxy_string\n }\n\n options = {\n \"headers\": headers,\n \"verify\": False,\n \"timeout\": timeout,\n \"proxies\": proxies,\n \"params\": {}\n }\n\n while True: # do - while offset < count\n options[\"params\"][\"offset\"] = offset\n req = requests.get(url, **options)\n\n if not req.status_code == 200:\n errmsg = \"status_code: {0.status_code}: {0.content}\"\n raise UnknownResult(errmsg.format(req))\n\n res = req.json()\n data = res[\"data\"]\n count = res.get(\"count\", 0)\n\n yield from data\n\n offset += len(data)\n\n if offset >= count:\n break",
"def bulk_process(self):\n\n def actions():\n try:\n task = self.queue.get(block=False, timeout=None)\n\n if task['action'] == 'index':\n yield {\n '_op_type': 'index',\n '_index': self.ensure_index(task),\n '_id': task['id'],\n 'doc': task['properties']\n }\n elif task['action'] == 'delete':\n yield {\n '_op_type': 'delete',\n '_index': self.ensure_index(task),\n '_id': task['id'],\n 'doc': task['properties']\n }\n else:\n raise NotImplementedError\n\n except Empty:\n pass\n\n for success, info in streaming_bulk(self.es_client, actions()):\n if success:\n self.queue.task_done()",
"def _batch(self, batch_request_entries):\n necessary_keys = [\"id\", \"version\", \"method\", \"params\"]\n\n results = []\n\n for (idx, request) in enumerate(batch_request_entries):\n error = None\n result = None\n\n # assert presence of important details\n for necessary_key in necessary_keys:\n if not necessary_key in request.keys():\n raise FakeBitcoinProxyException(\"Missing necessary key {} for _batch request number {}\".format(necessary_key, idx))\n\n if isinstance(request[\"params\"], list):\n method = getattr(self, request[\"method\"])\n result = method(*request[\"params\"])\n else:\n # matches error message received through python-bitcoinrpc\n error = {\"message\": \"Params must be an array\", \"code\": -32600}\n\n results.append({\n \"error\": error,\n \"id\": request[\"id\"],\n \"result\": result,\n })\n\n return results",
"def gen_multi(self, namespace, countspace, count):\n conn = self.pick_conn()\n retries = self.max_retries\n url = \"/gen?ns=%s&cs=%s&count=%d\" % (namespace, countspace, count)\n while 1:\n try:\n r = conn.request(\"GET\", url)\n content = r.data\n assert r.status == 200, \"http status(%d) != 200 : %s\" % (\n r.status, content\n )\n return [int(i) for i in content.split(\",\")]\n except Exception as e:\n logger.warn(\"%s %s %s\", conn, url, e)\n conn = self.pick_conn(new=True)\n retries -= 1\n if retries < 0:\n raise",
"def kegg_download_manager_synchronous(list_of_ids, wait=1):\n urls = ['http://rest.kegg.jp/get/%s' % '+'.join(chunk) for chunk in chunks(list(list_of_ids), 10)]\n num_urls = len(urls)\n print(f\"Total urls to download: {num_urls}. Progress will be shown below.\")\n results = []\n for url in tqdm(urls):\n results.append(download_synchronous(url))\n time.sleep(wait)\n\n return [raw_record for raw_records in results for raw_record in raw_records.split('///')[:-1]]",
"def all(cls, resq, start=0, count=1):\n first = MultipleBackend.classes[0]\n return first.all(resq, start, count)",
"def getDataBatch(self, batch_size):\n for i in range(batch_size):\n params.offset = params.offset+i #increment by 1 for the next set of batch\n url = 'https://api.nytimes.com/svc/search/v2/articlesearch.json'\n url_params = {'q': self.args.query.replace(' ', '+'),'api-key': self.args.api_key,'page': params.offset}\n response = requests.get(url, params=url_params)\n r = response.json()\n\n #start by checking call was successful\n if response.ok:\n if r['status'] != 'OK':\n log.error(\"Error with API call, NYT status not ok\")\n return None\n\n # TODO: implement - this dummy implementation returns one batch of data\n list_of_art = []\n for art in r['response']['docs']:\n list_of_art.append(functions.flatten_json(art)) #attach to list returned in call\n yield list_of_art\n else:\n log.error(\"Error during API call on request side\")",
"def batch(self, requests):\n return AlgoliaUtils_request(self.headers, self.write_hosts, \"POST\", \"/1/indexes/*/batch\", self.timeout, {\"requests\": requests})",
"def _get_batch(self):\n url = self._base_url + urlConfig.URLS['Project'] + '/' + self._project_id + '/batch'\n response = apiCall.get(self._get_token(), url,self._proxy, {}, 10)\n logging.debug(response)\n return response",
"def fetch_images(client, images):\n return [fetch_image(client, image) for image in images]",
"def get_documents(self, batch=None):\n\t\t\n\t\tfiles = None\n\t\tif not batch:\n\t\t\t# no batch = all the batches\n\t\t\tfiles = self._get_batch_files()\n\t\telif batch == \"random\":\n\t\t\t# get all the batches and pick one from random\n\t\t\tbatches = self._get_batches()\n\t\t\tfiles = [ self._get_batch_file(batch=random.randint(1, len(batches))) ]\n\t\telse:\n\t\t\t# get the specified batch\n\t\t\tfiles = [ self._get_batch_file(batch=batch) ]\n\t\t\t\n\t\t# loop through all the batch files\n\t\tfor f in files:\n\t\t\twith gzip.open(f, \"rb\") as infile:\n\t\t\t\tfor line in infile:\n\t\t\t\t\t# parse the JSON for each line\n\t\t\t\t\tyield json.loads(line)"
] | [
"0.6522907",
"0.64874965",
"0.6366794",
"0.6343513",
"0.6326583",
"0.62781847",
"0.6265821",
"0.61405367",
"0.61308616",
"0.6080425",
"0.6056786",
"0.59915185",
"0.5963366",
"0.5945424",
"0.5931578",
"0.5926348",
"0.5803143",
"0.5787222",
"0.5737144",
"0.57251173",
"0.5688712",
"0.56585985",
"0.56354964",
"0.56345373",
"0.5624251",
"0.5621077",
"0.56177384",
"0.56141347",
"0.5584427",
"0.5571676"
] | 0.75606394 | 0 |
Generates `commentThreads` for the `videos`, yielding on every video. | def gen_comment_threads_for_videos(
self, videos: List
) -> Generator[List, None, None]:
print("Requesting comment threads for videos.")
for video in videos:
threads = self.get_comment_threads_for_video(video["id"])
yield threads
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_comments(comments):\n API_KEY = secrets.YT_KEY\n youtube = build('youtube', 'v3', developerKey=API_KEY)\n request = youtube.commentThreads().list(\n part='replies',\n videoId=comments,\n textFormat=\"plainText\"\n )\n\n response = request.execute()\n\n video = response['items'][0]['replies']['comments']\n\n\n for i in video:\n print('\\n')\n print(i['snippet']['textDisplay'])\n # print(response['items'][0].keys())",
"def scrape_comments(video_list, driver_path=\"C:/WebDriver/bin/chromedriver.exe\", csv_path=\"../comments.csv\"):\n \n csv_file = open(csv_path,'w', encoding=\"UTF-8\", newline=\"\")\n writer = csv.writer(csv_file) \n \n writer.writerow(['query', 'url', 'title', 'upload_date', 'channel', 'no_of_views', 'likes', 'dislikes', 'comment', 'author', 'comment_date', 'no_of_replies','upvotes']) \n driver = webdriver.Chrome(executable_path=driver_path)\n\n for video in video_list:\n \n url = video['url']\n title = video['title']\n upload_date = video['date']\n query = video['query']\n \n # Scrape basic video data\n print(\"=\" * 40)\n print(\"video title : \", title)\n driver.get(url)\n v_channel = WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.CSS_SELECTOR,\"div#upload-info yt-formatted-string\"))).text\n print(\"channel : \",v_channel) \n v_views = WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.CSS_SELECTOR,\"div#count span.view-count\"))).text\n print(\"no. of views : \",v_views)\n v_timeUploaded = WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.CSS_SELECTOR,\"div#date yt-formatted-string\"))).text\n print(\"time uploaded : \",v_timeUploaded)\n w = WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.CSS_SELECTOR,\"div#top-level-buttons yt-formatted-string\")))\n w = driver.find_elements_by_css_selector(\"div#top-level-buttons yt-formatted-string\")\n v_likes = w[0].text\n v_dislikes = w[1].text\n print(\"video has \", v_likes, \"likes and \", v_dislikes, \" dislikes\")\n \n youtube_dict ={}\n \n print(\"+\" * 40)\n print(\"Scraping child links \")\n \n # Load comments section\n driver.execute_script('window.scrollTo(0,390);')\n time.sleep(2)\n \n try:\n # Sort by top comments\n print(\"sorting by top comments\")\n sort= WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.CSS_SELECTOR,\"div#icon-label\")))\n sort.click()\n topcomments =driver.find_element_by_xpath(\"\"\"//*[@id=\"menu\"]/a[1]/paper-item/paper-item-body/div[1]\"\"\")\n topcomments.click()\n \n # Loads more comments\n for i in range(0,5):\n driver.execute_script(\"window.scrollTo(0,Math.max(document.documentElement.scrollHeight,document.body.scrollHeight,document.documentElement.clientHeight))\")\n print(\"scrolling to load more comments\")\n time.sleep(4)\n \n # Count total number of comments and set index to number of comments if less than 50 otherwise set as 50. \n totalcomments= len(driver.find_elements_by_xpath(\"\"\"//*[@id=\"content-text\"]\"\"\"))\n \n if totalcomments < 100:\n index= totalcomments\n else:\n index= 100 \n \n # Loop through each comment and scrape info\n print(\"scraping through comments\")\n ccount = 0\n while ccount < index: \n try:\n comment = driver.find_elements_by_xpath('//*[@id=\"content-text\"]')[ccount].text\n except:\n comment = \"\"\n try:\n authors = driver.find_elements_by_xpath('//a[@id=\"author-text\"]/span')[ccount].text\n except:\n authors = \"\"\n try:\n comment_date = driver.find_elements_by_xpath('//*[@id=\"published-time-text\"]/a')[ccount].text\n except:\n comment_date = \"\"\n try:\n replies = driver.find_elements_by_xpath('//*[@id=\"more-text\"]')[ccount].text \n if replies ==\"View reply\":\n replies= 1\n else:\n replies =replies.replace(\"View \",\"\")\n replies =replies.replace(\" replies\",\"\")\n except:\n replies = \"\"\n try:\n upvotes = str(driver.find_elements_by_xpath('//*[@id=\"vote-count-middle\"]')[ccount].text)\n except:\n upvotes = \"\"\n \n \n # Write scraped data to csv file\n youtube_dict['query'] = query\n youtube_dict['url'] = url\n youtube_dict['title'] = title\n youtube_dict['upload_date'] = upload_date\n youtube_dict['channel'] = v_channel\n youtube_dict['no_of_views'] = v_views\n youtube_dict['likes'] = v_likes\n youtube_dict['dislikes'] = v_dislikes\n youtube_dict['comment'] = comment\n youtube_dict['author'] = authors\n youtube_dict['comment_date'] = comment_date\n youtube_dict['no_of_replies'] = replies\n youtube_dict['upvotes'] = upvotes\n writer.writerow(youtube_dict.values())\n \n ccount = ccount + 1\n \n # If video errors out, move onto the next one\n except TimeoutException as e:\n print(title, \" errored out: \",str(e))\n print(\"moving onto next video\")",
"def get_comments(yt_id):\n\n client = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,developerKey=DEVELOPER_KEY)\n\n video_comments = client.commentThreads().list(\n videoId = yt_id,\n part=\"snippet,replies\").execute()\n\n comment_items = video_comments['items']\n\n class MLStripper(HTMLParser):\n def __init__(self):\n self.reset()\n self.strict = False\n self.convert_charrefs= True\n self.fed = []\n def handle_data(self, d):\n self.fed.append(d)\n def get_data(self):\n return ''.join(self.fed)\n\n def strip_tags(html):\n s = MLStripper()\n s.feed(html)\n return s.get_data()\n\n comments = []\n for sub_block in comment_items:\n comments.append(strip_tags(sub_block['snippet']['topLevelComment']['snippet']['textDisplay']))\n\n comments_all = ' '.join(comments)\n\n print(\"YouTube comments scanned\")\n return comments_all",
"def request_comments(video_list, API_KEY, csv_path=\"../comments.csv\", as_df=False):\n columns = ['query', 'url', 'title', 'upload_date', 'channel', 'views', 'likes', 'dislikes', 'comment_count', 'comment_text', 'comment_author', 'comment_date', 'comment_likes']\n df = pd.DataFrame(columns=columns)\n \n # If video list is empty, return empty\n \n for video in video_list:\n \n # Grab all comments for video\n comments = yt_comments(video['id'], API_KEY)\n \n # Skip video if comments are disabled\n if not comments:\n continue\n \n for comment in comments:\n youtube_dict = {}\n \n # Write scraped data to csv file\n youtube_dict['query'] = video['query']\n youtube_dict['url'] = \"https://www.youtube.com/watch?v=\" + video['id']\n youtube_dict['title'] = video['title']\n youtube_dict['upload_date'] = video['date']\n youtube_dict['channel'] = video['channel']\n youtube_dict['views'] = video['views']\n youtube_dict['likes'] = video['likes']\n youtube_dict['dislikes'] = video['dislikes']\n youtube_dict['comment_count'] = video['comment_count']\n youtube_dict['comment_text'] = comment['text']\n youtube_dict['comment_author'] = comment['author']\n youtube_dict['comment_date'] = comment['date']\n youtube_dict['comment_likes'] = comment['likes']\n df = df.append(youtube_dict, ignore_index=True)\n \n if as_df:\n return df\n \n df.to_csv(csv_path, encoding=\"UTF-8\", index=False)\n return",
"def get_video_comments(self, video_id, count = 30, page = 1):\n uri = 'videos/' + video_id + '/comments'\n options = { 'per_page': count, 'page': page }\n return self.make_request(uri, options)",
"def get_comments(youtube, video_id, channel_id):\n global nextPageToken\n \n results = youtube.commentThreads().list(\n part=\"snippet\", \n videoId=video_id, \n allThreadsRelatedToChannelId=AUTH_USER_CHANNEL_ID\n ).execute()\n\n nextPageToken = results.get(\"nextPageToken\")\n\n for item in results[\"items\"]:\n comment = item[\"snippet\"][\"topLevelComment\"]\n \tauthor = comment[\"snippet\"][\"authorDisplayName\"]\n \ttry:\n \t authorChannelId = comment[\"snippet\"][\"authorChannelId\"]\n \texcept KeyError:\n \t pass\n \tchannel = authorChannelId.get(\"value\")\n \t\n \tchannel_list.append(channel)\n \t\n return results[\"items\"]",
"def get_comment_obj(self, videoId):\n response = self.build_service()\n\n # you only need to build the service once\n # collect all comments\n \n response2 = response.commentThreads().list(\n part='snippet',\n maxResults=100,\n textFormat='plainText',\n order='time',\n videoId=videoId,\n\n ).execute()\n\n return response2",
"def fetch_comments(item):\n # pylint: disable=R0912\n # pylint: disable=R0914\n cw, ch, _ = getxy()\n ch = max(ch, 10)\n ytid, title = item.ytid, item.title\n dbg(\"Fetching comments for %s\", c.c(\"y\", ytid))\n writestatus(\"Fetching comments for %s\" % c.c(\"y\", title[:55]))\n qs = {'textFormat': 'plainText',\n 'videoId': ytid,\n 'maxResults': 50,\n 'part': 'snippet'}\n\n # XXX should comment threads be expanded? this would require\n # additional requests for comments responding on top level comments\n\n jsdata = call_gdata('commentThreads', qs)\n\n coms = jsdata.get('items', [])\n coms = [x.get('snippet', {}) for x in coms]\n coms = [x.get('topLevelComment', {}) for x in coms]\n # skip blanks\n coms = [x for x in coms if len(x.get('snippet', {}).get('textDisplay', '').strip())]\n if not len(coms):\n g.message = \"No comments for %s\" % item.title[:50]\n g.content = generate_songlist_display()\n return\n\n items = []\n\n for n, com in enumerate(coms, 1):\n snippet = com.get('snippet', {})\n poster = snippet.get('authorDisplayName')\n _, shortdate = yt_datetime(snippet.get('publishedAt', ''))\n text = snippet.get('textDisplay', '')\n cid = (\"%s/%s\" % (n, len(coms)))\n out = (\"%s %-35s %s\\n\" % (cid, c.c(\"g\", poster), shortdate))\n out += c.c(\"y\", text.strip())\n items.append(out)\n\n cw = Config.CONSOLE_WIDTH.get\n\n def plain(x):\n \"\"\" Remove formatting. \"\"\"\n return x.replace(c.y, \"\").replace(c.w, \"\").replace(c.g, \"\")\n\n def linecount(x):\n \"\"\" Return number of newlines. \"\"\"\n return sum(1 for char in x if char == \"\\n\")\n\n def longlines(x):\n \"\"\" Return number of oversized lines. \"\"\"\n return sum(len(plain(line)) // cw for line in x.split(\"\\n\"))\n\n def linecounter(x):\n \"\"\" Return amount of space required. \"\"\"\n return linecount(x) + longlines(x)\n\n pagenum = 0\n pages = paginate(items, pagesize=ch, delim_fn=linecounter)\n\n while 0 <= pagenum < len(pages):\n pagecounter = \"Page %s/%s\" % (pagenum + 1, len(pages))\n page = pages[pagenum]\n pagetext = (\"\\n\\n\".join(page)).strip()\n content_length = linecount(pagetext) + longlines(pagetext)\n blanks = \"\\n\" * (-2 + ch - content_length)\n g.content = pagetext + blanks\n screen_update(fill_blank=False)\n xprint(\"%s : Use [Enter] for next, [p] for previous, [q] to return:\"\n % pagecounter, end=\"\")\n v = input()\n\n if v == \"p\":\n pagenum -= 1\n\n elif not v:\n pagenum += 1\n\n else:\n break\n\n g.content = generate_songlist_display()",
"def user_videos(username):\n for page_index in count():\n entry_list = download_video_feed(\n create_feed_url(username, page_index)\n )\n\n for entry in entry_list:\n yield entry\n\n if len(entry_list) < MAX_RESULTS:\n break",
"def get_comments(video_id, CLIENT_SECRETS_FILE):",
"def comment_extraction(self, part, Identity, limit=None, order=None, nextPageToken=None, searchTerms=None):\n key = self.keylist[self.keyindex]\n url_ct = \"https://www.googleapis.com/youtube/v3/commentThreads\"\n comment_details = {}\n\n if Identity.startswith(\"UC\"):\n channelId = Identity\n ct_id = None\n videoId = None\n\n elif Identity.startswith(\"Ug\"):\n ct_id = Identity\n channelId = None\n videoId = None\n\n elif len(Identity) == 11:\n videoId = Identity\n ct_id = None\n channelId = None\n\n else:\n return \"Invalid input to Identity Parameter\" \n \n if limit != None and limit >= 1 and limit <= 100:\n maxResults = limit\n else:\n maxResults = 100\n \n comment_count = initial = 0\n \n try:\n while nextPageToken or initial == 0:\n querystring = {\"part\": part,\n \"channelId\": channelId,\n \"id\": ct_id,\n \"videoId\": videoId,\n \"maxResults\": maxResults,\n \"key\": key,\n \"order\": order,\n \"pageToken\": nextPageToken,\n \"searchTerms\": searchTerms\n }\n\n response=request_handler(self, url_ct, params=querystring, wait=5)\n #print(response) \n if response.get('error'):\n while response['error']['errors'][0]['reason'] == 'quotaExceeded' or \\\n response['error']['errors'][0]['reason'] == 'dailyLimitExceeded':\n key = keychange(self)\n querystring = {\"part\": part,\n \"channelId\": channelId,\n \"id\": ct_id,\n \"videoId\": videoId,\n \"key\": key,\n \"maxResults\": maxResults,\n \"order\": order,\n \"pageToken\": nextPageToken,\n \"searchTerms\": searchTerms\n }\n \n response = request_handler(self, url_ct, params=querystring, wait=5)\n if response.get('error'):\n continue\n else:\n break\n # print(response)\n if response.get('error'):\n comment_details.update({Identity: [str(response)]})\n if response['error']['errors'][0]['reason'] == 'keyInvalid':\n return [{Identity: [str(response), response.text]}]\n break\n \n if response.get('Interneterror'):\n comment_details.update({Identity: response})\n break\n # print(response) \n # if limit == -1:\n # limit = response['pageInfo']['totalResults']\n nextPageToken = response.get(\"nextPageToken\")\n \n try:\n comment_count = comment_count + len(response['items'])\n # print(\"total comment extracted\",comment_count)\n if comment_details.get(Identity):\n comment_details[Identity].extend(response['items'])\n else:\n comment_details[Identity] = response['items']\n if nextPageToken==None or (comment_count>= limit and limit!=-1):\n break\n \n\n except:\n pass\n\n initial += 1\n\n # try:\n # comment_details[Identity] = response['items']\n # except:\n # pass\n\n except Exception as e:\n print(e,traceback.format_exc())\n\n return comment_details",
"def make_video_grid(videos, rows, cols):\n\n N, C, T, H, W = videos.shape\n assert N == rows*cols\n \n videos = videos.transpose(1, 2, 0, 3, 4)\n videos = videos.reshape(C, T, rows, cols, H, W)\n videos = videos.transpose(0, 1, 2, 4, 3, 5)\n videos = videos.reshape(C, T, rows * H, cols * W)\n if C == 1:\n videos = np.tile(videos, (3, 1, 1, 1))\n videos = videos[None]\n\n return videos",
"def handle_comments(self):\r\n comments = Comment.objects.all()\r\n for c in comments:\r\n new = ThreadedComment(\r\n content_type = c.content_type,\r\n object_id = c.object_id,\r\n comment = c.comment,\r\n user = c.user,\r\n date_submitted = c.submit_date,\r\n date_modified = c.submit_date,\r\n date_approved = c.submit_date,\r\n is_public = c.is_public,\r\n ip_address = c.ip_address,\r\n is_approved = not c.is_removed\r\n )\r\n new.save()",
"def youtube_import_comments(video_pk):\n from .models import Video, VideoComment # avoid circular imports\n try:\n video = Video.objects.get(pk=video_pk)\n except Video.DoesNotExist:\n logger.info('Video {} no longer exists! Cant import comments')\n return\n\n try:\n client = youtube.Client()\n comments = client.get_video_comments(video.youtube_id)\n except Exception:\n logger.exception(\n 'Error importing comments for video %r', video.youtube_id)\n return\n if comments:\n for c in comments:\n data = c['snippet']['topLevelComment']['snippet']\n updated = parser.parse(data['updatedAt'])\n published = parser.parse(data['publishedAt'])\n comment = VideoComment.objects.create(\n video=video,\n youtube_id=c['snippet']['topLevelComment']['id'],\n author_display_name=data['authorDisplayName'],\n author_profile_image_url=data['authorProfileImageUrl'],\n comment_raw=data['textOriginal'],\n comment_rich=data['textDisplay'],\n published=published,\n updated=updated)\n deferred.defer(\n cloudnlp_analyze_comment, comment.pk, _queue='analyze')\n logger.info('Finished importing comment for video %r', video.youtube_id)",
"def get_video_comments(self):\n other_user_email = request.args.get('other_user_email')\n video_title = request.args.get('video_title')\n if not other_user_email or not video_title:\n self.logger.debug(messages.MISSING_FIELDS_ERROR % \"query params\")\n return messages.ERROR_JSON % messages.MISSING_FIELDS_ERROR % \"query params\", 400\n users_data, comments = self.video_database.get_comments(other_user_email, video_title)\n response = [{\"user\": u,\n \"comment\": {\"content\":c.content, \"timestamp\": c.timestamp.isoformat()}}\n for u,c in zip(users_data, comments)]\n return json.dumps(response), 200",
"def comments(number):\n if g.browse_mode == \"normal\":\n item = g.model.songs[int(number) - 1]\n fetch_comments(item)\n\n else:\n g.content = generate_songlist_display()\n g.message = \"Comments only available for video items\"",
"def comments_to_csv(query, API_KEY, publishedBefore, publishedAfter, maxResults=49, driver_path=\"C:/WebDriver/bin/chromedriver.exe\", csv_path=\"./youtube_comments.csv\", useAPI=True):\n\n \n video_list = request_videos(query, API_KEY, publishedBefore, publishedAfter, maxResults=maxResults)\n \n if (useAPI):\n request_comments(video_list, API_KEY, csv_path)\n else:\n scrape_comments(video_list, driver_path, csv_path)",
"def __create_consumer_threads(\n self, num_threads, rate_limiter, experiment_end_time, reported_outcome_generator\n ):\n\n def consume_token_blocking():\n \"\"\"consumer threads will keep acquiring tokens until experiment end time\"\"\"\n while self._fake_clock.time() < experiment_end_time:\n # A simple loop that acquires token, updates a counter, then releases token with an outcome\n # provided by reported_outcome_generator()\n t1 = self._fake_clock.time()\n token = rate_limiter.acquire_token()\n # update test state\n self._test_state_lock.acquire()\n try:\n self._test_state[\"count\"] += 1\n self._test_state[\"times\"].append(int(t1))\n finally:\n self._test_state_lock.release()\n\n self._outcome_generator_lock.acquire()\n try:\n outcome = next(reported_outcome_generator)\n rate_limiter.release_token(token, outcome)\n finally:\n self._outcome_generator_lock.release()\n\n return [\n threading.Thread(target=consume_token_blocking) for _ in range(num_threads)\n ]",
"def sub_processor(lock, pid, video_list):\r\n text = 'processor %d' % pid\r\n with lock:\r\n progress = tqdm.tqdm(\r\n total=len(video_list),\r\n position=pid,\r\n desc=text\r\n )\r\n for i in range(len(video_list)):\r\n video_name = video_list[i]\r\n \"\"\" Read result csv file \"\"\"\r\n df = pd.read_csv(os.path.join(config.post_csv_load_dir, video_name + \".csv\"))\r\n \"\"\" Calculate final score of proposals \"\"\"\r\n df['score'] = df.iou.values[:] * df.start.values[:] * df.end.values[:]\r\n if len(df) > 1:\r\n df = softNMS(df)\r\n df = df.sort_values(by=\"score\", ascending=False)\r\n video_info = video_dict[video_name]\r\n video_duration = video_info[\"duration_second\"]\r\n proposal_list = []\r\n\r\n for j in range(min(top_number, len(df))):\r\n tmp_proposal = {}\r\n tmp_proposal[\"score\"] = df.score.values[j]\r\n tmp_proposal[\"segment\"] = [max(0, df.xmin.values[j]) * video_duration,\r\n min(1, df.xmax.values[j]) * video_duration]\r\n tmp_proposal[\"label\"] = \"行走\"\r\n # tmp_proposal[\"label\"] = \"Fun sliding down\"\r\n proposal_list.append(tmp_proposal)\r\n result_dict[video_name] = proposal_list\r\n with lock:\r\n progress.update(1)\r\n\r\n with lock:\r\n progress.close()",
"def Threads():\n for i in range(0, idc.get_thread_qty()):\n yield idc.getn_thread(i)",
"def test_plenty_of_video_files():\n # make sure that there is one sequence per video file\n pipe = VideoPipe(\n batch_size=BATCH_SIZE, data=PLENTY_VIDEO_FILES, step=1000000, sequence_length=1)\n pipe.build()\n iters = math.ceil(len(os.listdir(PLENTY_VIDEO_DIRECTORY)) / BATCH_SIZE)\n for i in range(iters):\n print(\"Iter \" + str(i))\n pipe.run()",
"def save_comments(self, videoId):\n comm_obj = self.get_comment_obj(videoId)# need to get the id \n\n file_exists = os.path.isfile(self.path)\n f = open(self.path, 'a', encoding='utf-8-sig')\n writer_top = csv.writer(f, delimiter=',', quoting=csv.QUOTE_MINIMAL)\n if not file_exists:\n writer_top.writerow(['etag'] + ['videoId'] + ['commentId'] + ['text'] + ['author'] + ['like'] + ['time'])\n f.close()\n\n f = open(self.path, 'a', encoding='utf-8-sig')\n writer_top = csv.writer(f, delimiter=',', quoting=csv.QUOTE_MINIMAL)\n\n for i in comm_obj['items']:\n\n result_row = [[i['etag'], i['snippet']['videoId'], i['snippet']['topLevelComment']['id'], i['snippet']['topLevelComment']['snippet']['textDisplay'], i['snippet']['topLevelComment']['snippet']['authorDisplayName'], i['snippet']['topLevelComment']['snippet']['likeCount'], i['snippet']['topLevelComment']['snippet']['publishedAt']]]\n writer_top.writerows(result_row)\n f.close()",
"def downloadAllVideos(train_csv_path, train_data_dir):\n\n vid2genre = {}\n with open(train_csv_path, 'r') as fin:\n lines = [line for line in fin.readlines() if not line.startswith('#')]\n print('start downloading %d videos' % len(lines))\n # use multiprocessing pool\n pool = multiprocessing.Pool(16)\n for i, line in enumerate(lines):\n # Extract the words consisting of video_id, start_time, end_time, list of video_tags\n words = [word.replace(\"\\n\", \"\").replace('\"', '') for word in line.replace(\" \", \"\").split(\",\")]\n words = words[0:3] + [words[3:]]\n video_id = words[0]\n vid2genre[video_id] = words[-1]\n pool.apply_async(download_vid, (line, train_data_dir))\n\n pool.close()\n pool.join()\n return vid2genre",
"def make_comments(post, comments):\n for comment in comments:\n try:\n com = RedditComment(reddit_post=post, **comment)\n com.save()\n except Exception as ex:\n print 'comment could not be created'\n print ex",
"def multi(video, processes):\n if processes < 0:\n processes = cpu_count() + processes\n elif processes == 0:\n raise ValueError('Number of processes must not be zero.')\n\n points = video.points\n points_split = tools.split_points(points, processes=processes)\n \n idi_kwargs = {\n 'cih_file': video.cih_file,\n }\n \n method_kwargs = {\n 'roi_size': video.method.roi_size, \n 'pad': video.method.pad, \n 'max_nfev': video.method.max_nfev, \n 'tol': video.method.tol, \n 'verbose': video.method.verbose, \n 'show_pbar': video.method.show_pbar\n }\n \n pool = Pool(processes=processes)\n results = [pool.apply_async(worker, args=(p, idi_kwargs, method_kwargs)) for p in points_split]\n pool.close()\n pool.join()\n\n out = []\n for r in results:\n _r = r.get()\n for i in _r:\n out.append(i)\n \n return np.asarray(out)",
"def build_pipeline_november_comments(subreddit, limit):\n data_file_name = subreddit + '_november_comments'\n cleaned_file_name = data_file_name + '_cleaned'\n standardized_file_name = cleaned_file_name + '_standardized'\n vader_file_name = standardized_file_name + '_vader'\n flair_file_name = vader_file_name + '_flair'\n ibm_tone_file_name = flair_file_name + '_tones'\n\n # get historical data\n comment_data = get_november_historical_comments(subreddit, limit)\n\n # save to csv\n save_historical_submission_comments(comment_data, data_file_name + '.csv')\n\n # sanitize characters\n print('sanitizing characters')\n sanitize_characters(data_file_name + '.csv', cleaned_file_name + '.csv')\n\n # standardize comments\n generic_run_standardize_comments(cleaned_file_name + '.csv', standardized_file_name + '.csv')\n\n # add vader sentiment scores\n generic_run_vader_sentiment_scores(standardized_file_name + '.csv', vader_file_name + '.csv')\n\n # add flair sentiment score\n add_flair_sentiment_to_csv(vader_file_name + '.csv', flair_file_name + '.csv')",
"def post_video(self, comment):\n\t\tpass",
"def getMovieShortComments(movieid, pages=1, proxy=1):\n\n commentList = []\n\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_0) AppleWebKit/537.36 (KHTML, like Gecko) '\n 'Chrome/71.0.3578.98 Safari/537.36',\n 'Cookie': 'bid=PFXqD9SdoDo; douban-fav-remind=1; gr_user_id=0f03311e-0e28-4e2f-a8fd-3a272d2a525f; _vwo_uuid_v2=D54BE21A153A50F178B1EEA3EE252805F|d0f6410ffbf6226399de9cd1715afb86; viewed=\"1148282_30329536_25815142\"; ll=\"118172\"; push_doumail_num=0; douban-profile-remind=1; __yadk_uid=7QS0r1GHatoz4fkcP2sh8IWeD8YWzQ4u; push_noty_num=0; __utmv=30149280.18600; _ga=GA1.2.449624121.1587021337; __utmc=30149280; __utmz=30149280.1589694675.4.3.utmcsr=m.douban.com|utmccn=(referral)|utmcmd=referral|utmcct=/movie/; __utmc=223695111; __utmz=223695111.1589694675.4.3.utmcsr=m.douban.com|utmccn=(referral)|utmcmd=referral|utmcct=/movie/; __gads=ID=352a53130bca4285:T=1589699239:S=ALNI_MYKpXBWoi1resUvUVMC-9bRu-CuSw; _pk_ref.100001.4cf6=%5B%22%22%2C%22%22%2C1589784625%2C%22https%3A%2F%2Fm.douban.com%2Fmovie%2F%22%5D; _pk_ses.100001.4cf6=*; ap_v=0,6.0; __utma=30149280.449624121.1587021337.1589694675.1589784731.5; __utma=223695111.299663224.1587002697.1589694675.1589784731.5; __utmb=223695111.0.10.1589784731; __utmt=1; __utmb=30149280.1.10.1589784731; dbcl2=\"186000836:vB8x8LL+q3k\"; ck=kTW_; _pk_id.100001.4cf6=ffb676b0890cad74.1587002697.6.1589786159.1589699369.'\n }\n session = requests.Session()\n\n proxies = None\n if proxy == 1:\n proxies = get_proxy.get_workable_ip()\n\n # First, try to get the total of comments.\n r = session.get(\n \"https://movie.douban.com/subject/\" + str(movieid) + \"/comments?limit=20&sort=new_score&status=P&start=\",\n headers=headers, proxies=proxies)\n bsObj = bs4.BeautifulSoup(r.text, \"html.parser\")\n numstr = bsObj.body.find('div', {'id': 'wrapper'}).find('ul', {'class': 'fleft CommentTabs'}) \\\n .find('li', {'class': 'is-active'}).span.get_text()\n num = re.match(r'(\\D+)(\\d+)', numstr)\n total = int(num.group(2))\n print(total)\n\n # To avoid the situation that the total of comments is less than the number we set.\n if pages * 20 > total:\n pages = int(total / 20 + 1)\n\n # Get comments.\n try:\n for i in range(0, pages):\n r = session.get(\n \"https://movie.douban.com/subject/\" + str(\n movieid) + \"/comments?limit=20&sort=new_score&status=P&start=\" +\n str(i * 20), headers=headers)\n bsObj = bs4.BeautifulSoup(r.text, \"html.parser\")\n comment_tags = bsObj.body.find('div', {'id': 'comments'}).find_all('div', {'class': 'comment-item'})\n pattern = re.compile('\\d{2}')\n for tag in comment_tags:\n temp = {}\n t = tag.find('span', {'class': re.compile('(.*) rating')})\n if t is not None:\n star = int(pattern.findall(t['class'][0])[0])\n # print(star)\n temp['comment'] = tag.find('p').span.get_text()\n temp['star'] = star\n commentList.append(temp)\n except AttributeError as e:\n print(\"Limited by website, please change your proxy.爬虫好像受到网站的限制,请更换代理。\")\n return commentList",
"def comments(\n self, **stream_options: Any\n ) -> Generator[praw.models.Comment, None, None]:\n return stream_generator(self.subreddit.comments, **stream_options)",
"def comments(self, q=None, sort=None):\n params = {}\n if sort is not None:\n params[\"sort\"] = sort\n if q is not None:\n params[\"q\"] = q\n for comment in self._get_paged(\"comments\", params=params):\n yield Comment(comment, **self._new_session_args)"
] | [
"0.6485952",
"0.63381004",
"0.61707836",
"0.6121679",
"0.59488356",
"0.59478843",
"0.5864268",
"0.57775325",
"0.57679206",
"0.57674754",
"0.57524395",
"0.57088053",
"0.56520087",
"0.55044687",
"0.5464832",
"0.54541576",
"0.53520346",
"0.52295196",
"0.521179",
"0.51633495",
"0.5158391",
"0.515181",
"0.5127983",
"0.5084821",
"0.5077224",
"0.5060407",
"0.50408614",
"0.5035596",
"0.50288707",
"0.49945945"
] | 0.8944976 | 0 |
Function to take the output of the ArcGIS catchment delineation polygon shapefile and cathcment sites csv and return a shapefile with appropriately delineated polygons. | def agg_catch(catch_del_shp, catch_sites_csv, catch_sites_col=['GRIDCODE', 'SITE'], catch_col='GRIDCODE'):
## Catchment areas shp
catch = read_file(catch_del_shp)[[catch_col, 'geometry']]
## dissolve the polygon
catch3 = catch.dissolve(catch_col)
## Determine upstream catchments
catch_df, singles_df = catch_net(catch_sites_csv, catch_sites_col)
base1 = catch3[in1d(catch3.index, singles_df)].geometry
for i in catch_df.index:
t1 = append(catch_df.loc[i, :].dropna().values, i)
t2 = GeoSeries(catch3[in1d(catch3.index, t1)].unary_union, index=[i])
base1 = GeoSeries(concat([base1, t2]))
## Convert to GeoDataFrame (so that all functions can be applied to it)
base2 = GeoDataFrame(base1.index, geometry=base1.geometry.values, crs=catch.crs)
base2.columns = ['site', 'geometry']
return(base2) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def combine_catchments(catchmentfile, flowfile, elevationfile, comid, \n output = None, overwrite = False, verbose = True):\n\n t0 = time.time()\n numpy.seterr(all = 'raise')\n\n if output is None: output = os.getcwd() + r'\\combined'\n\n if os.path.isfile(output + '.shp') and not overwrite:\n if verbose: print('combined catchment shapefile %s exists' % output)\n return\n \n if verbose: print('combining catchments from %s\\n' % catchmentfile)\n\n # start by copying the projection files\n\n shutil.copy(catchmentfile + '.prj', output + '.prj')\n\n # load the catchment and flowline shapefiles\n\n c = Reader(catchmentfile, shapeType = 5)\n f = Reader(flowfile, shapeType = 3)\n\n # make lists of the comids and featureids\n\n featureid_index = c.fields.index(['FEATUREID', 'N', 9, 0]) - 1\n comid_index = f.fields.index(['COMID', 'N', 9, 0]) - 1\n\n featureids = [r[featureid_index] for r in c.records()]\n comids = [r[comid_index] for r in f.records()]\n\n # check that shapes are traceable--don't have multiple points and start\n # and end at the same place--then make an appropriate list of shapes\n # and records--note it's more memory efficient to read one at a time\n\n n = len(c.records())\n shapes = []\n records = [] \n bboxes = []\n\n try: \n for i in range(n):\n catchment = c.shape(i)\n record = c.record(i)\n\n shape_list = format_shape(catchment.points)\n for s in shape_list:\n shapes.append(s)\n records.append(record)\n bboxes.append(catchment.bbox)\n\n try: combined = combine_shapes(shapes, bboxes, verbose = verbose)\n except: combined = combine_shapes(shapes, bboxes, skip = True, \n verbose = verbose)\n\n except: \n shapes = []\n records = [] \n bboxes = []\n for i in range(n):\n catchment = c.shape(i)\n record = c.record(i)\n\n shape_list = format_shape(catchment.points, omit = True)\n for s in shape_list:\n shapes.append(s)\n records.append(record)\n bboxes.append(catchment.bbox)\n\n try: combined = combine_shapes(shapes, bboxes, verbose = verbose)\n except: combined = combine_shapes(shapes, bboxes, skip = True,\n verbose = verbose)\n\n # iterate through the catchments and get the elevation data from NED\n # then estimate the value of the overland flow plane length and slope\n\n lengths = numpy.empty((n), dtype = 'float')\n slopes = numpy.empty((n), dtype = 'float')\n\n for i in range(n):\n catchment = c.shape(i)\n flowline = f.shape(comids.index(featureids[i]))\n\n catchpoints = get_raster_on_poly(elevationfile, catchment.points,\n verbose = verbose)\n catchpoints = numpy.array([p for p in catchpoints])\n\n zs = get_raster(elevationfile, flowline.points)\n\n flowpoints = numpy.array([[p[0], p[1], z] \n for p, z in zip(flowline.points, zs)])\n\n # iterate through the raster values and find the closest flow point\n\n closest = numpy.empty((len(catchpoints), 3), dtype = 'float')\n\n for point, j in zip(catchpoints, range(len(catchpoints))):\n closest[j] = flowpoints[numpy.dot(flowpoints[:, :2], \n point[:2]).argmin()]\n\n # estimate the slope and overland flow plane length\n\n length, slope = get_overland_vector(catchpoints, closest)\n\n if verbose: print('avg slope and length =', slope.mean(), length.mean())\n\n lengths[i], slopes[i] = length.mean(), slope.mean()\n\n if verbose: print('\\nfinished overland flow plane calculations\\n')\n\n # get area of the subbasin from the catchment metadata\n\n areasq_index = c.fields.index(['AreaSqKM', 'N', 19, 6]) - 1\n areas = numpy.array([r[areasq_index] for r in c.records()])\n\n # take the area weighted average of the slopes and flow lengths\n\n tot_area = round(areas.sum(), 2)\n avg_length = round(1000 * numpy.sum(areas * lengths) / tot_area, 1)\n avg_slope = round(numpy.sum(areas * slopes) / tot_area, 4)\n\n # get the centroid and the average elevation\n\n combined = [[float(x), float(y)] for x, y in combined]\n centroid = get_centroid(numpy.array(combined))\n\n Cx, Cy = round(centroid[0], 4), round(centroid[1], 4)\n\n elev_matrix, origin = get_raster_in_poly(elevationfile, combined, \n verbose = verbose)\n\n elev_matrix = elev_matrix.flatten()\n elev_matrix = elev_matrix[elev_matrix.nonzero()]\n \n avg_elev = round(elev_matrix.mean() / 100., 2)\n\n # write the data to the shapefile\n\n w = Writer(shapeType = 5)\n\n fields = [['ComID', 'N', 9, 0],\n ['PlaneLenM', 'N', 8, 2],\n ['PlaneSlope', 'N', 9, 6],\n ['AreaSqKm', 'N', 10, 2],\n ['CenX', 'N', 12, 6],\n ['CenY', 'N', 12, 6],\n ['AvgElevM', 'N', 8, 2]]\n\n record = [comid, avg_length, avg_slope, tot_area, Cx, Cy, avg_elev]\n\n for field in fields: w.field(*field)\n \n w.record(*record)\n \n w.poly(shapeType = 5, parts = [combined])\n\n w.save(output)\n\n if verbose: print('\\ncompleted catchment combination in %.1f seconds\\n' % \n (time.time() - t0))",
"def rec_catch_del(sites_shp, sites_col='site', catch_output=None):\n\n ### Parameters\n server = 'SQL2012PROD05'\n db = 'GIS'\n streams_table = 'MFE_NZTM_REC'\n streams_cols = ['NZREACH', 'NZFNODE', 'NZTNODE']\n catch_table = 'MFE_NZTM_RECWATERSHEDCANTERBURY'\n catch_cols = ['NZREACH']\n\n ### Modifications {NZREACH: {NZTNODE/NZFNODE: node # to change}}\n mods = {13053151: {'NZTNODE': 13055874}, 13048353: {'NZTNODE': 13048851}, 13048498: {'NZTNODE': 13048851}}\n\n ### Load data\n rec_streams = rd_sql(server, db, streams_table, streams_cols, geo_col=True)\n rec_catch = rd_sql(server, db, catch_table, catch_cols, geo_col=True)\n pts = select_sites(sites_shp)\n\n ### make mods\n for i in mods:\n rec_streams.loc[rec_streams['NZREACH'] == i, mods[i].keys()] = mods[i].values()\n\n ### Find closest REC segment to points\n pts_seg = closest_line_to_pts(pts, rec_streams, line_site_col='NZREACH', dis=400)\n nzreach = pts_seg.copy().NZREACH.unique()\n\n ### Find all upstream reaches\n reaches = find_upstream_rec(nzreach, rec_shp=rec_streams)\n\n ### Extract associated catchments\n rec_catch = extract_rec_catch(reaches, rec_catch_shp=rec_catch)\n\n ### Aggregate individual catchments\n rec_shed = agg_rec_catch(rec_catch)\n rec_shed.columns = ['NZREACH', 'geometry', 'area']\n rec_shed1 = rec_shed.merge(pts_seg.drop('geometry', axis=1), on='NZREACH')\n\n ### Export and return\n rec_shed1.to_file(catch_output)\n return(rec_shed1)",
"def to_dxf_file(geo_file_path, output_file_path):\n doc = ezdxf.new(dxfversion='R2010')\n msp = doc.modelspace()\n\n points = get_points(geo_file_path)\n elements = get_elements(geo_file_path)\n\n for element in elements:\n element_type = element[0]\n\n if element_type.startswith(ELEMENT_LIN):\n type_, p_id_1, p_id_2 = element\n p1 = points[p_id_1]\n p2 = points[p_id_2]\n\n msp.add_line(p1, p2)\n\n if element_type.startswith(ELEMENT_ARC):\n type_, center_id, start_id, end_id, direction = element\n center = points[center_id]\n start = points[start_id]\n end = points[end_id]\n\n radius = distance_2d(center, start)\n angle_start = math.degrees(math.atan2(start[1]-center[1], start[0]-center[0]))\n angle_end = math.degrees(math.atan2(end[1]-center[1], end[0]-center[0]))\n\n if int(direction) > 0:\n msp.add_arc(center, radius, angle_start, angle_end)\n else:\n msp.add_arc(center, radius, angle_end, angle_start)\n\n if element_type.startswith(ELEMENT_CIR):\n type_, center_id, radius = element\n center = points[center_id]\n\n msp.add_circle(center, radius)\n\n doc.saveas(output_file_path)",
"def get_district_file(state=48, district=7, leg_body='US-REP'):\r\n\r\n district_file = get_district_geojson_filename(\r\n state=state, district=district, leg_body=leg_body)\r\n geojson_path = 'static/geojson/' \r\n state = \"{0:0>2}\".format(state)\r\n district = \"{0:0>2}\".format(district)\r\n \r\n if not os.path.isfile(district_file):\r\n print( \"Downloading district file\" )\r\n # TODO download the most recent districts file\r\n # currently it downloads the 2016 district\r\n # 'http://www2.census.gov/geo/tiger/GENZ2016/shp/cb_2016_us_cd115_500k.zip'\r\n \r\n if leg_body == 'US-REP':\r\n district_url = 'http://www2.census.gov/geo/tiger/GENZ2016/shp/cb_2016_us_cd115_500k.zip'\r\n if leg_body == 'STATE-REP':\r\n district_url = 'ftp://ftpgis1.tlc.state.tx.us/DistrictViewer/House/PlanH358.zip'\r\n if leg_body == 'STATE-SEN':\r\n district_url = 'ftp://ftpgis1.tlc.state.tx.us/DistrictViewer/Senate/PlanS172.zip'\r\n \r\n district_dl_file = geojson_path + 'district.zip'\r\n download_file(district_url, district_dl_file)\r\n extract_all(district_dl_file, geojson_path)\r\n \r\n if len(glob(geojson_path + '*shp')) > 0:\r\n districts_shapefile = glob(geojson_path + '*shp')[0]\r\n else:\r\n for p in glob(geojson_path + '*'):\r\n if os.path.isdir(p):\r\n shapefile_path = p\r\n districts_shapefile = glob(p + '/*shp')[0]\r\n \r\n print( \"Converting district file to GEOJSON\" )\r\n districts = gpd.read_file(districts_shapefile)\r\n \r\n if leg_body == 'US-REP':\r\n d_index = districts[districts.GEOID == (state + district) ].index\r\n if leg_body == 'STATE-REP' or leg_body == 'STATE-SEN':\r\n d_index = districts[districts.District == int(district) ].index\r\n\r\n district_shape = districts.loc[d_index]\r\n district_shape = district_shape.to_crs({'init': u'epsg:4326'})\r\n district_shape.to_file(district_file, driver='GeoJSON')\r\n\r\n # cleanup geojson dir\r\n if len(glob(geojson_path + '*shp')) > 0:\r\n shapefile_prefix = glob(geojson_path + '*shp')[0].split(\r\n geojson_path)[1].split('.')[0]\r\n shapefiles = glob(geojson_path + shapefile_prefix + '*')\r\n for f in shapefiles:\r\n os.remove(f)\r\n else:\r\n shapefile_prefix = glob(shapefile_path + '/*shp')[0].split(\r\n shapefile_path)[1].split('.')[0]\r\n shapefiles = glob(shapefile_path + shapefile_prefix + '*')\r\n for f in shapefiles:\r\n os.remove(f)\r\n os.rmdir(shapefile_path)\r\n os.remove(district_dl_file)",
"def construct_polygon(self, polygon_longs: List, polygon_lats: List) -> gpd.GeoDataFrame:\n\n polygon_geom = Polygon(zip(polygon_longs, polygon_lats))\n\n crs = {'init': 'epsg:4326'}\n polygon = gpd.GeoDataFrame(index=[0], crs=crs, geometry=[polygon_geom])\n\n polygon.to_file(filename=f'{self.polygon_path}/polygon_{self.postfix}.geojson', driver='GeoJSON')\n polygon.to_file(filename=f'{self.polygon_path}/polygon_{self.postfix}.shp', driver=\"ESRI Shapefile\")\n\n self.monitor.info(\"-> Created area polygon.\")\n return polygon",
"def extractpolylinefromdxf():\r\n d={}\r\n for readfile in readfilelist: #将readfilelist中的文件逐个按照程序进行读取分析\r\n filetoread=open(readfile,'r')\r\n layername=filetoread.name.split(\".\")[0]\r\n #newfilename=filetoread.name.split('.')[0]+'.txt'\r\n #readme.write(newfilename)\r\n #filetowrite=file(newfilename,'w')\r\n #writefilelist.append(newfilename) \r\n x=0 #x坐标\r\n y=0 #y坐标\r\n dataset=[] #多段线坐标数组\r\n counter=0\r\n xflag=0 #以下x、y、poly、end flag表示下一次读取行是否进入表示该变量的行。1为是,0为否。\r\n yflag=0\r\n polyflag=0 \r\n endflag=0\r\n polyline=[] #多段线各顶点坐标构成的数组\r\n \r\n \r\n for line in filetoread.readlines():\r\n counter += 1\r\n pattern1=re.compile('AcDbPolyline') #pattern1~5正则表达式判断是否进入标志行\r\n pattern2=re.compile('\\s{1}10')\r\n pattern3=re.compile('\\s{1}20')\r\n pattern4=re.compile('\\s{2}0')\r\n pattern5=re.compile('ENDSEC')\r\n polymatch=pattern1.match(line)\r\n xmatch=pattern2.match(line)\r\n ymatch=pattern3.match(line)\r\n endmatch=pattern4.match(line)\r\n finalmatch=pattern5.match(line)\r\n if finalmatch and polyflag==1 and endflag==1: #实体定义部分结束,将最后一组多段线的顶点坐标数组加入dataset,dataset是该图形中所有多段线的集合\r\n polyflag=0\r\n dataset.append(polyline)\r\n #print(dataset) #打印测试,输出坐标\r\n #readme.write('polyline has ended!!!') \r\n if polyflag==1 and xflag==1 and endflag==0: #读取X坐标\r\n x=float(line)\r\n xflag=0\r\n if polyflag==1 and yflag==1 and endflag==0: #读取Y坐标\r\n y=float(line)\r\n yflag=0\r\n polyline.append([x,y])\r\n if polyflag==1 and len(polyline)>1 and endflag==1: #读取所有多段线坐标后,将坐标数组加入dataset内\r\n dataset.append(polyline)\r\n polyline=[]\r\n endflag=0\r\n if endmatch: \r\n endflag=1\r\n if polymatch: #进入多段线部分,重置其他flag为0。\r\n polyflag=1\r\n endflag=0\r\n xflag=0\r\n yflag=0\r\n if xmatch:\r\n xflag=1\r\n if ymatch:\r\n yflag=1 \r\n \r\n d[layername]=dataset \r\n d[\"Outline\"]=[[[globalconfig.X_LENGTH/2,globalconfig.Y_LENGTH/2],[globalconfig.X_LENGTH/2,-globalconfig.Y_LENGTH/2],[-globalconfig.X_LENGTH/2,-globalconfig.Y_LENGTH/2],[-globalconfig.X_LENGTH/2,globalconfig.Y_LENGTH/2]]]\r\n return d",
"def write_shapefile_branch1(self, shpname):\r\n inarrays = self.read_traveltime()\r\n \r\n Narrays = len(inarrays) \r\n \r\n \r\n westlats = []\r\n westlons = []\r\n eastlats = []\r\n eastlons = [] \r\n lines1 = []\r\n for i in range(len(self.westPnts1)):\r\n westlat, westlon = utm.to_latlon(self.westPnts1[i,0], self.westPnts1[i,1], 14, 'U')\r\n eastlat, eastlon = utm.to_latlon(self.eastPnts1[i,0], self.eastPnts1[i,1], 14, 'U')\r\n lines1.append([[westlon, westlat], [eastlon, eastlat]])\r\n westlats.append(westlat)\r\n westlons.append(westlon)\r\n eastlats.append(eastlat)\r\n eastlons.append(eastlon)\r\n \r\n # Create the projection\r\n spatialReference = osgeo.osr.SpatialReference()\r\n spatialReference.ImportFromProj4('+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs')\r\n \r\n # Create the shape file\r\n outfile = r'ArcGIS_online\\%s'%shpname\r\n driver = osgeo.ogr.GetDriverByName('ESRI Shapefile')\r\n shapeData = driver.CreateDataSource(outfile)\r\n \r\n # Create the layer\r\n layer = shapeData.CreateLayer('Contour', spatialReference, osgeo.ogr.wkbLineString)\r\n layerDefinition = layer.GetLayerDefn()\r\n \r\n # Create fields containing segment infos\r\n field_def = osgeo.ogr.FieldDefn('BranchID', osgeo.ogr.OFTInteger)\r\n layer.CreateField(field_def)\r\n \r\n field_def = osgeo.ogr.FieldDefn('Density', osgeo.ogr.OFTInteger)\r\n layer.CreateField(field_def)\r\n \r\n field_def = osgeo.ogr.FieldDefn('SegID', osgeo.ogr.OFTInteger)\r\n layer.CreateField(field_def)\r\n \r\n field_def = osgeo.ogr.FieldDefn('Lon_west', osgeo.ogr.OFTReal)\r\n layer.CreateField(field_def)\r\n \r\n field_def = osgeo.ogr.FieldDefn('Lat_west', osgeo.ogr.OFTReal)\r\n layer.CreateField(field_def)\r\n \r\n field_def = osgeo.ogr.FieldDefn('Lon_east', osgeo.ogr.OFTReal)\r\n layer.CreateField(field_def)\r\n \r\n field_def = osgeo.ogr.FieldDefn('Lat_east', osgeo.ogr.OFTReal)\r\n layer.CreateField(field_def)\r\n \r\n field_def = osgeo.ogr.FieldDefn('Travel_T', osgeo.ogr.OFTReal)\r\n layer.CreateField(field_def)\r\n \r\n \r\n def add_feature(layer, branchID, density, lines, segs, westlon, westlat, eastlon, eastlat, Ttime):\r\n \"\"\"\r\n function that adds feature to layer\r\n \"\"\" \r\n ctr=0\r\n for i in range(len(lines)):\r\n ctr+=1\r\n line = osgeo.ogr.Geometry(osgeo.ogr.wkbLineString)\r\n # Add points individually to the line\r\n xy = lines[i]\r\n \r\n line.AddPoint_2D(xy[0][0],xy[0][1])\r\n line.AddPoint_2D(xy[1][0],xy[1][1])\r\n # Update the feature with the line data\r\n featureIndex = ctr\r\n feature = osgeo.ogr.Feature(layerDefinition)\r\n #feature.SetStyleString(\"PEN(c:r,w:5px)\") \r\n feature.SetGeometry(line)\r\n feature.SetFID(featureIndex)\r\n feature.SetGeometryDirectly(line)\r\n \r\n # Set the attribute table\r\n feature.SetField('BranchID', int(branchID)) \r\n feature.SetField('Density', int(density[i]))\r\n feature.SetField('SegID', int(segs[i])) # convert to int() is necessary, osgeo cannot recognize numpy int32 type\r\n feature.SetField('Travel_T', \"{:.1f}\".format(Ttime[i]))\r\n feature.SetField('Lon_west', \"{:.3f}\".format(westlon[i]))\r\n feature.SetField('Lat_west', \"{:.3f}\".format(westlat[i]))\r\n feature.SetField('Lon_east', \"{:.3f}\".format(eastlon[i]))\r\n feature.SetField('Lat_east', \"{:.3f}\".format(eastlat[i]))\r\n \r\n layer.CreateFeature(feature)\r\n \r\n \r\n Ttime = inarrays[0][:,2]\r\n ind0 = np.nonzero(Ttime)[0][0]\r\n ind = np.arange(ind0, Ttime.shape[0])\r\n \r\n lines1 = [lines1[i] for i in ind]*Narrays\r\n westlats = [westlats[i] for i in ind]*Narrays\r\n westlons = [westlons[i] for i in ind]*Narrays\r\n eastlats = [eastlats[i] for i in ind]*Narrays\r\n eastlons = [eastlons[i] for i in ind]*Narrays\r\n \r\n inarrays_new = [inarrays[i][ind,:] for i in range(Narrays)]\r\n inarrays_stack = np.vstack(inarrays_new)\r\n \r\n add_feature(layer, 1, inarrays_stack[:,3], np.asarray(lines1), inarrays_stack[:,1], \r\n np.asarray(westlons), np.asarray(westlats), \r\n np.asarray(eastlats), np.asarray(eastlons), inarrays_stack[:,2])",
"def dem_generation(lastoolsdir, lidardir, ground_poly, cores, units_code, keep_orig_pts, coarse_step,\n coarse_bulge, coarse_spike, coarse_down_spike,\n coarse_offset, fine_step, fine_bulge, fine_spike,\n fine_down_spike, fine_offset, aoi_shp,\n dem_resolution, dem_method, tri_meth, void_meth):\n\n # We carry input spatial ref over from the above process, but we should still convert from shp to ref object\n print('Processing LiDAR to remove vegetation points...')\n las_folder = lidardir + '\\\\las_files\\\\'\n process_lidar(lastoolsdir + '\\\\', las_folder, ground_poly, cores, units_code, keep_orig_pts,\n coarse_step,\n coarse_bulge, coarse_spike, coarse_down_spike,\n coarse_offset, fine_step, fine_bulge, fine_spike,\n fine_down_spike, fine_offset)\n print('Done')\n\n print('Generating a %sm resolution DEM...' % dem_resolution)\n dem = lidar_to_raster(lidardir, ground_poly, aoi_shp, dem_method, tri_meth, void_meth,\n m_cell_size=float(dem_resolution))\n print('Done')\n\n print('Generating hillshade raster for the DEM...')\n hill_out = lidardir + '\\\\hillshade.tif'\n arcpy.HillShade_3d(dem, hill_out)\n print('Done')",
"def process(sources, output, force):\n logging.basicConfig(level=logging.INFO,\n format='%(asctime)s [%(levelname)s] - %(message)s', datefmt=\"%H:%M:%S\")\n\n logging.getLogger('shapely.geos').setLevel(logging.WARNING)\n logging.getLogger('Fiona').setLevel(logging.WARNING)\n logging.getLogger('requests.packages.urllib3.connectionpool').setLevel(logging.WARNING)\n requests.packages.urllib3.disable_warnings()\n # logging.getLogger('processing').setLevel(logging.DEBUG)\n\n catalog_features = []\n failures = []\n path_parts_to_skip = utils.get_path_parts(sources).index(\"sources\") + 1\n success = True\n for path in utils.get_files(sources):\n try:\n logging.info(\"Processing \" + path)\n pathparts = utils.get_path_parts(path)[path_parts_to_skip:]\n pathparts[-1] = pathparts[-1].replace('.json', '.geojson')\n\n outdir = os.path.join(output, *pathparts[:-1], pathparts[-1].replace('.geojson', ''))\n outfile = os.path.join(output, *pathparts)\n\n source = utils.read_json(path)\n urlfile = urlparse(source['url']).path.split('/')[-1]\n \n if not hasattr(adapters, source['filetype']):\n logging.error('Unknown filetype ' + source['filetype'])\n failures.append(path)\n continue\n \n read_existing = False\n if os.path.isfile(outfile):\n logging.info(\"Output file exists\")\n if os.path.getmtime(outfile) > os.path.getmtime(path):\n logging.info(\"Output file is up to date\")\n if not force:\n read_existing = True\n logging.warning('Skipping ' + path + ' since generated file exists. Use --force to regenerate.') \n else:\n logging.info(\"Output is outdated, {} < {}\".format(\n datetime.datetime.fromtimestamp(os.path.getmtime(outfile)),\n datetime.datetime.fromtimestamp(os.path.getmtime(path))))\n\n if read_existing:\n with open(outfile, \"rb\") as f:\n geojson = json.load(f)\n properties = geojson['properties']\n else:\n logging.info('Downloading ' + source['url'])\n \n try:\n fp = utils.download(source['url'])\n except IOError:\n logging.error('Failed to download ' + source['url'])\n failures.append(path)\n continue\n \n logging.info('Reading ' + urlfile)\n \n if 'filter' in source:\n filterer = BasicFilterer(source['filter'], source.get('filterOperator', 'and'))\n else:\n filterer = None\n \n try:\n geojson = getattr(adapters, source['filetype'])\\\n .read(fp, source['properties'],\n filterer=filterer,\n layer_name=source.get(\"layerName\", None),\n source_filename=source.get(\"filenameInZip\", None))\n except IOError as e:\n logging.error('Failed to read ' + urlfile + \" \" + str(e))\n failures.append(path)\n continue\n except zipfile.BadZipfile as e:\n logging.error('Unable to open zip file ' + source['url'])\n failures.append(path)\n continue\n finally:\n os.remove(fp.name)\n if(len(geojson['features'])) == 0:\n logging.error(\"Result contained no features for \" + path)\n continue\n excluded_keys = ['filetype', 'url', 'properties', 'filter', 'filenameInZip']\n properties = {k:v for k,v in list(source.items()) if k not in excluded_keys}\n properties['source_url'] = source['url']\n properties['feature_count'] = len(geojson['features'])\n logging.info(\"Generating demo point\")\n properties['demo'] = geoutils.get_demo_point(geojson)\n \n geojson['properties'] = properties\n \n utils.make_sure_path_exists(os.path.dirname(outfile))\n\n #cleanup existing generated files\n if os.path.exists(outdir):\n rmtree(outdir)\n filename_to_match, ext = os.path.splitext(pathparts[-1])\n output_file_dir = os.sep.join(utils.get_path_parts(outfile)[:-1])\n logging.info(\"looking for generated files to delete in \" + output_file_dir)\n for name in os.listdir(output_file_dir):\n base, ext = os.path.splitext(name)\n if base == filename_to_match:\n to_remove = os.path.join(output_file_dir, name)\n logging.info(\"Removing generated file \" + to_remove)\n os.remove(to_remove)\n\n utils.write_json(outfile, geojson)\n\n logging.info(\"Generating label points\")\n label_geojson = geoutils.get_label_points(geojson)\n label_path = outfile.replace('.geojson', '.labels.geojson')\n utils.write_json(label_path, label_geojson)\n\n logging.info('Done. Processed to ' + outfile)\n \n if not \"demo\" in properties:\n properties['demo'] = geoutils.get_demo_point(geojson)\n\n properties['path'] = \"/\".join(pathparts)\n catalog_entry = {\n 'type': 'Feature',\n 'properties': properties,\n 'geometry': geoutils.get_union(geojson)\n }\n catalog_features.append(catalog_entry)\n\n if not os.path.exists(outdir) or not os.path.exists(os.path.join(outdir, \"units.json\")):\n logging.info(\"Generated exploded GeoJSON to \" + outdir)\n if not os.path.exists(outdir):\n os.makedirs(outdir)\n # .json instead of .geojson, incase there is a unit named \"source\"\n utils.write_json(os.path.join(outdir, \"source.json\"), catalog_entry) \n units = []\n for feature in geojson['features']:\n feature_id = str(feature['properties']['id'])\n feature_id = feature_id.replace('/', '')\n feature_filename = os.path.join(outdir, feature_id + \".geojson\")\n utils.write_json(feature_filename, feature)\n units.append(feature['properties'])\n utils.write_json(os.path.join(outdir, \"units.json\"), units)\n else:\n logging.debug(\"exploded GeoJSON already exists, not generating\")\n\n except Exception as e:\n logging.error(str(e))\n logging.exception(\"Error processing file \" + path)\n failures.append(path)\n success = False\n\n catalog = {\n 'type': 'FeatureCollection',\n 'features': catalog_features\n }\n utils.write_json(os.path.join(output,'catalog.geojson'), catalog)\n\n if not success:\n logging.error(\"Failed sources: \" + \", \".join(failures))\n sys.exit(-1)",
"def process_country_shapes():\n path_processed = os.path.join(\n SHAPEFILE_DIR, 'national_outline_{}.shp'.format(COUNTRY_ABBRV))\n\n single_country = None\n if not os.path.exists(path_processed):\n print('Working on national outline')\n path_raw = os.path.join(BASE_DIR, 'data', 'gadm36_levels_shp', 'gadm36_0.shp')\n countries = geopandas.read_file(path_raw)\n\n for name in countries.GID_0.unique():\n if not name == COUNTRY_ABBRV:\n continue\n\n print('Working on {}'.format(name))\n single_country = countries[countries.GID_0 == name]\n\n print('Excluding small shapes')\n single_country['geometry'] = single_country.apply(\n exclude_small_shapes,axis=1)\n\n print('Simplifying geometries')\n single_country['geometry'] = single_country.simplify(\n tolerance = 0.005, preserve_topology=True\n ).buffer(0.01).simplify(tolerance = 0.005,\n preserve_topology=True)\n\n print('Writing national outline to file')\n single_country.to_file(path_processed, driver='ESRI Shapefile')\n found = True\n break\n \n if not found:\n raise ValueError(f'country abbrv {COUNTRY_ABBRV} does not exist')\n\n else:\n single_country = geopandas.read_file(path_processed)\n\n return single_country",
"def intersect_csv_and_shapefiles(in_csv_filename, geodata1, geodata2,\n ancillary_path, out_csv_filename, from_gbif):\n pth, basefname = os.path.split(out_csv_filename)\n logbasename, _ = os.path.splitext(basefname)\n logfname = os.path.join(pth, '{}.log'.format(logbasename))\n logger = get_logger(logbasename, logfname)\n bf = BisonFiller(log=logger)\n # Pass 4 of CSV transform, final step, point-in-polygon intersection\n bf.update_point_in_polygons(\n geodata1, geodata2, ancillary_path, in_csv_filename, out_csv_filename,\n from_gbif=from_gbif)\n # Do intersection here\n sleep(randint(0, 10))\n print(' - {}'.format(out_csv_filename))",
"def read_coastal(filename, plotregion):\n\n # Initialize all variables\n coast_x = []\n coast_y = []\n poly_x = []\n poly_y = []\n segnum = 0\n segments = 0\n\n # Read in file\n polygons = open(filename)\n\n # Parse polygons\n for line in polygons:\n tokens = line.split()\n if (tokens[0] == 'P') or (tokens[0] == 'L'):\n if (len(poly_x) > 0):\n coast_x.append(poly_x)\n coast_y.append(poly_y)\n poly_x = []\n poly_y = []\n segnum = 0\n segments = int(tokens[2])\n else:\n if (segnum >= segments):\n print(\"Invalid number of segments in \" +\n \"polygon from file %s\" % (file))\n return([], [])\n segnum = segnum + 1\n x = float(tokens[0])\n y = float(tokens[1])\n if (in_region([x, y], plotregion)):\n poly_x.append(x)\n poly_y.append(y)\n else:\n if (len(poly_x) > 0):\n coast_x.append(poly_x)\n coast_y.append(poly_y)\n poly_x = []\n poly_y = []\n\n # Remember to close file\n polygons.close()\n\n return coast_x, coast_y",
"def makeLocationPtShapefile(config,locationData):\n\n\n # set up the shapefile driver\n driver = ogr.GetDriverByName(\"ESRI Shapefile\")\n\n num_years = config.EndYear\n \n for iteration in range(config.MinimumIteration, config.MaximumIteration + 1):\n for year in range(1, num_years + 1):\n\n shapeFilename = config.getOutputFilePath(cc.COLLAR_VALUES_SHAPEFILE_FILENAME.format(iteration,year))\n\n # delete the shapefile if it already exists\n if os.path.exists(shapeFilename):\n driver.DeleteDataSource(shapeFilename)\n if os.path.exists(shapeFilename):\n sys.exit(\"Unable to delete existing Shapefile '{0}'\".format(shapeFilename))\n\n # create the data source\n data_source = driver.CreateDataSource(shapeFilename)\n\n # create the spatial reference, WGS84\n srs = osr.SpatialReference()\n srs.ImportFromEPSG(4326)\n\n # create the layer\n layer = data_source.CreateLayer(\"location\", srs, ogr.wkbPoint)\n\n # Add the fields we're interested in\n # ITERATION_ID,YEAR_ID,JULIAN_DAY,STRATUM_ID,HARVEST_ZONE,LAT, LON,OUT_OF_BOUNDS,DISTANCE\n # DEVNOTE: Shapefiles seem bound to 10 character limit\n layer.CreateField(ogr.FieldDefn(\"ITER_ID\", ogr.OFTInteger))\n layer.CreateField(ogr.FieldDefn(\"YEAR_ID\", ogr.OFTInteger))\n layer.CreateField(ogr.FieldDefn(\"JULIAN_DAY\", ogr.OFTInteger))\n layer.CreateField(ogr.FieldDefn(\"STRATUM_ID\", ogr.OFTInteger))\n layer.CreateField(ogr.FieldDefn(\"LAT\", ogr.OFTReal))\n layer.CreateField(ogr.FieldDefn(\"LON\", ogr.OFTReal))\n layer.CreateField(ogr.FieldDefn(\"DIST_KM\", ogr.OFTReal))\n layer.CreateField(ogr.FieldDefn(\"REL_ZOI\", ogr.OFTString))\n layer.CreateField(ogr.FieldDefn(\"RAA\", ogr.OFTString))\n\n # Process the text file and add the attributes and features to the shapefile\n for row in locationData:\n \n # Filter by iteration and timestep\n if row['ITERATION_ID'] == iteration:\n if row['YEAR_ID'] == year:\n # create the feature\n feature = ogr.Feature(layer.GetLayerDefn())\n # Set the attributes using the values from the delimited text file\n feature.SetField(\"ITER_ID\", row['ITERATION_ID'])\n feature.SetField(\"YEAR_ID\", row['YEAR_ID'])\n feature.SetField(\"JULIAN_DAY\", row['JULIAN_DAY'])\n feature.SetField(\"STRATUM_ID\", row['STRATUM_ID'])\n feature.SetField(\"LAT\", row['LAT'])\n feature.SetField(\"LON\", row['LON'])\n feature.SetField(\"DIST_KM\", row['DISTANCE'])\n feature.SetField(\"REL_ZOI\", row['RELATION_TO_ZOI'])\n feature.SetField(\"RAA\", row['RANGE_ASSESSMENT_AREA'])\n\n # create the WKT for the feature using Python string formatting\n wkt = \"POINT(%f %f)\" % (float(row['LON']) , float(row['LAT']))\n\n # Create the point from the Well Known Txt\n point = ogr.CreateGeometryFromWkt(wkt)\n\n # Set the feature geometry using the point\n feature.SetGeometry(point)\n # Create the feature in the layer (shapefile)\n layer.CreateFeature(feature)\n # Destroy the feature to free resources\n feature.Destroy()\n\n # Destroy the data source to free resources\n data_source.Destroy()\n\n print (\"\\n\\tConverted Collar Points Values into Shapefile for Iteration/Year {0}/{1}. Output file:'{2}'\".format(iteration, year, shapeFilename))",
"def read_polygon_shapefile(filename):\n result = cpp_read_polygon_shapefile(filename)\n f_pos = Series(result[0], name=\"f_pos\")\n r_pos = Series(result[1], name=\"r_pos\")\n return (\n f_pos,\n r_pos,\n DataFrame({\"x\": result[2], \"y\": result[3]}),\n )",
"def __load_dxf(self):\n print('Loading file: %s' % self.__fname)\n dwg = dxfgrabber.readfile(self.__fname)\n lines = [item for item in dwg.entities if item.dxftype == 'LINE']\n arcs = [item for item in dwg.entities if item.dxftype == 'ARC']\n if self.__layer > -1:\n lines = [item for item in lines if item.layer == self.__layer]\n arcs = [item for item in arcs if item.layer == self.__layer]\n print('File read.')\n print('Loaded %i lines' % len(lines))\n print('Loaded %i arcs' % len(arcs))\n print('Loaded %i line segments, lines or arcs' %\n (len(lines)+len(arcs)))\n # get all points and Line and Arc using pycalculix entities\n print('Converting to pycalculix lines arcs and points ...')\n all_points, all_lines = self.__get_pts_lines(lines, arcs)\n print('Loaded %i line segments, lines or arcs' % len(all_lines))\n print('Loaded %i points' % len(all_points))\n # for point in all_points:\n # print('%s %s' % (point, point.lines))\n # for line in all_lines:\n # print('%s %s' % (line, line.points))\n\n # remove all lines that are not part of areas\n dangling_points = self.__dangling_points(all_points)\n pruned_geometry = bool(dangling_points)\n while dangling_points:\n for point in dangling_points:\n all_points.remove(point)\n print('Removed point= %s' % point)\n dangling_line = list(point.lines)[0]\n point.unset_line(dangling_line)\n if dangling_line in all_lines:\n all_lines.remove(dangling_line)\n print('Removed line= %s' % dangling_line)\n dangling_points = self.__dangling_points(all_points)\n if pruned_geometry:\n print('Remaining line segments: %i' % len(all_lines))\n print('Remaining points: %i' % len(all_points))\n\n # make line all_loops now\n all_loops = []\n line = all_lines[0]\n this_loop = geometry.LineLoop()\n while len(all_lines) > 0:\n this_loop.append(line)\n all_lines.remove(line)\n if this_loop.closed == True:\n all_loops.append(this_loop)\n this_loop = geometry.LineLoop()\n if all_lines:\n line = all_lines[0]\n continue\n point = line.pt(1)\n other_lines = point.lines - set([line])\n if len(other_lines) > 1:\n # note: one could exclude connected segment nodes\n # make disconnected line all_loops, then have another\n # loop to connect those disconnected line all_loops\n print('One point was connected to > 2 lines.')\n print('Only import simple part all_loops, or surfaces.')\n raise Exception('Import geometry is too complex')\n next_line = list(other_lines)[0]\n if line.pt(1) != next_line.pt(0):\n next_line.reverse()\n line = next_line\n\n # find exterior loops\n exterior_loops = []\n for ind, loop in enumerate(all_loops):\n other_loops = all_loops[ind+1:]\n other_loops.extend(exterior_loops)\n is_exterior = True\n for other_loop in other_loops:\n if loop.inside(other_loop):\n is_exterior = False\n break\n if is_exterior:\n # exterior must be clockwise\n if loop.ccw:\n loop.reverse()\n exterior_loops.append(loop)\n # remove the found part exterior loops from all_loops\n for exterior_loop in exterior_loops:\n all_loops.remove(exterior_loop)\n # each part in parts is a list of line all_loops\n # [exterior, hole1, hole2]\n parts = [[exterior_loop] for exterior_loop in exterior_loops]\n # now place the child hole loops after the part exterior loop\n for part_loops in parts:\n exterior_loop = part_loops[0]\n # find child holes\n for hole_loop in all_loops:\n if hole_loop.inside(exterior_loop):\n hole_loop.hole = True\n # holes must be ccw\n if not hole_loop.ccw:\n hole_loop.reverse()\n part_loops.append(hole_loop)\n # remove child holes from loop list\n for hole_loop in part_loops[1:]:\n all_loops.remove(hole_loop)\n\n # make parts\n parts_list = []\n for part_loops in parts:\n this_part = partmodule.Part(self.__fea)\n for ind, loop in enumerate(part_loops):\n is_hole = loop.hole\n start = loop[0].pt(0)\n this_part.goto(start.x, start.y, is_hole)\n for item in loop:\n if isinstance(item, geometry.Line):\n end = item.pt(1)\n this_part.draw_line_to(end.x, end.y)\n elif isinstance(item, geometry.Arc):\n end = item.pt(1)\n center = item.actr\n this_part.draw_arc(end.x, end.y, center.x, center.y)\n parts_list.append(this_part)\n print('Parts created: %i' % len(parts_list))\n return parts_list",
"def read_area_shapes(path_ew, path_s):\n output = []\n\n with fiona.open(path_ew, 'r') as reader:\n for lsoa in reader:\n output.append({\n 'type': lsoa['type'],\n 'geometry': lsoa['geometry'],\n 'properties': {\n 'code': lsoa['properties']['LSOA11CD'],\n # 'LSOA11NM': lsoa['properties']['LSOA11NM'],\n }\n })\n\n with fiona.open(path_s, 'r') as reader:\n for datazone in reader:\n output.append({\n 'type': datazone['type'],\n 'geometry': datazone['geometry'],\n 'properties': {\n 'code': datazone['properties']['DataZone'],\n # 'LSOA11NM': lsoa['properties']['LSOA11NM'],\n }\n })\n\n return output",
"def generatePolygons():",
"def create_wrs_to_mgrs_lookup(wrs_shapefile):\n\n shapefile_driver = ogr.GetDriverByName(\"ESRI Shapefile\")\n\n grid_ds = shapefile_driver.Open(wrs_shapefile, 0)\n\n layer = grid_ds.GetLayer()\n\n\n path_row_list = []\n\n total_features = layer.GetFeatureCount()\n\n for idx, f in enumerate(layer):\n\n print(f'{idx} of {total_features}')\n\n footprint = f.GetGeometryRef().ExportToWkt()\n pathrow = f.GetField('PR')\n\n\n mgrs_list = find_mgrs_intersection_large(footprint)\n print(mgrs_list)\n mgrs_list_fine = []\n\n mgrs_list_fine += find_mgrs_intersection_100km(footprint, mgrs_list)\n\n print('for path row')\n print(pathrow)\n print(mgrs_list)\n print(mgrs_list_fine)\n print('\\n\\n')\n path_row_list.append((str(pathrow), ' '.join(mgrs_list_fine)))\n\n with open('wrs_to_mgrs.csv','w', newline='') as out:\n csv_out = csv.writer(out)\n csv_out.writerow(['pathrow','mgrs_list'])\n\n for row in path_row_list:\n csv_out.writerow(row)",
"def plan2shape(plan_id):\n exportFile = None\n plan = Plan.objects.get(id=plan_id)\n status = DistrictFile.get_file_status(plan, True)\n while status == 'pending':\n time.sleep(15)\n status = DistrictFile.get_file_status(plan, True)\n if status == 'none':\n pending = DistrictFile.get_file_name(plan, True) + '_pending.zip'\n archive = open(pending, 'w')\n try:\n # Create a named temporary file\n exportFile = tempfile.NamedTemporaryFile(\n suffix='.shp', mode='w+b')\n exportFile.close()\n\n # Get the districts in the plan\n districts = plan.district_set.filter(\n id__in=plan.get_district_ids_at_version(plan.version))\n\n # Generate metadata\n meta = DistrictShapeFile.generate_metadata(plan, districts)\n\n # Open a driver, and create a data source\n driver = 'ESRI Shapefile'\n\n # Set up mappings of field names for export, as well as shapefile\n # column aliases (only 8 characters!)\n district_fieldnames = [\n 'id', 'district_id', 'short_label', 'long_label',\n 'version', 'num_members'\n ]\n subject_names = list(Subject.objects.all().values_list(\n 'name', flat=True))\n aliases = {\n 'district_id': 'dist_num',\n 'num_members': 'nmembers',\n 'short_label': 'label',\n 'long_label': 'descr'\n }\n # Map fields to types where the default is incorrect\n mapped_fields = {\n 'id': 'int',\n 'district_id': 'int',\n 'short_label': 'str:10',\n 'long_label': 'str:254',\n 'version': 'int',\n 'num_members': 'int'\n }\n\n # set the district attributes\n record_properties = DistrictShapeFile.make_record_properties(\n district_fieldnames + subject_names,\n overrides=mapped_fields,\n aliases=aliases)\n\n # Add record metadata to meta\n for fieldname in district_fieldnames + subject_names:\n\n # default to double data types, unless the field type is defined\n ftype = mapped_fields.get(fieldname, 'float')\n\n # customize truncated field names\n fieldname = aliases.get(fieldname, fieldname)\n\n if fiona.prop_type(ftype) == unicode:\n domain = {'udom': 'User entered value.'}\n elif fiona.prop_type(ftype) == int:\n rdommin = 0\n rdommax = '+Infinity'\n if fieldname == 'id':\n rdommin = 1\n elif fieldname == 'district_id':\n rdommax = plan.legislative_body.max_districts\n elif fieldname == 'num_members':\n if plan.legislative_body.multi_members_allowed:\n rdommax = plan.legislative_body.max_multi_district_members\n rdommin = plan.legislative_body.min_multi_district_members\n else:\n rdommin = 1\n rdommax = 1\n\n domain = {\n 'rdom': {\n 'rdommin': rdommin,\n 'rdommax': rdommax\n }\n }\n elif fiona.prop_type(ftype) == float:\n # fieldname = Subject.objects.get(\n # name=fieldname).get_label()\n domain = {\n 'rdom': {\n 'rdommin': 0.0,\n 'rdommax': '+Infinity'\n }\n }\n\n attr = {\n 'attrlabl': fieldname,\n 'attrdef': fieldname,\n 'attrdomv': domain\n }\n\n meta['eainfo']['detailed']['attr'].append(attr)\n\n # Create the schema for writing out the shapefile\n schema = {\n 'geometry': 'Polygon',\n 'properties': record_properties\n }\n # begin exporting districts\n with fiona.open(\n exportFile.name,\n 'w',\n driver=driver,\n crs=crs.from_string(districts[0].geom.crs.wkt),\n schema=schema) as sink:\n for district in districts:\n # create a feature\n feature = DistrictShapeFile.district_to_record(\n district, district_fieldnames, subject_names,\n aliases)\n\n sink.write(feature)\n\n # write metadata\n DistrictShapeFile.meta2xml(meta, exportFile.name[:-4] + '.xml')\n\n # Zip up the file\n zipwriter = zipfile.ZipFile(archive, 'w', zipfile.ZIP_DEFLATED)\n exportedFiles = glob(exportFile.name[:-4] + '*')\n for exp in exportedFiles:\n zipwriter.write(exp, '%sv%d%s' % (plan.get_friendly_name(),\n plan.version, exp[-4:]))\n zipwriter.close()\n archive.close()\n os.rename(archive.name,\n DistrictFile.get_file_name(plan, True) + '.zip')\n except ValueError as e:\n os.unlink(archive.name)\n logger.warn('The plan \"%s\" was empty, so I bailed out')\n except Exception, ex:\n os.unlink(archive.name)\n logger.warn('The plan \"%s\" could not be saved to a shape file',\n plan.name)\n logger.debug('Reason: %s', ex)\n # delete the temporary file\n finally:\n if not exportFile is None:\n exportedFiles = glob(exportFile.name[:-4] + '*')\n for exp in exportedFiles:\n os.remove(exp)\n\n return DistrictFile.get_file(plan, True)",
"def main(name, line1, line2, orbital_filename):\n #name = \"TERRA\"\n #line1 = \"1 25994U 99068A 16048.43680378 .00000258 00000-0 67198-4 0 9999\"\n #line2 = \"2 25994 98.1982 124.4247 0001352 105.3907 254.7441 14.57126067859938\"\n satellite = ephem.readtle(name, line1, line2)\n \n\n # Landsat 8\n #name = \"Landsat8\"\n #line1=\"1 39084U 13008A 16051.82349873 .00000188 00000-0 51829-4 0 9999\"\n #line2=\"2 39084 98.1988 123.2603 0001265 89.4360 270.6984 14.57110027160810\"\n #LD8 = ephem.readtle(name, line1, line2)\n \n\n sun = ephem.Sun()\n fov = np.radians(68.6)\n\n \"\"\"\n Make pandas dataframe to store swath information\n \"\"\"\n import pandas as pd\n data = {\"DateTime\": [],\"DOY\":[],\"Month\": [],\n \"orbit_id\":[], \"ground_lat\": [], \n \"ground_lon\": [], \"swath_width\": []}\n swaths = pd.DataFrame(data)\n swaths.set_index(keys=\"DateTime\")\n # generate shapefile\n\n orbit_id = 0\n # need to do splitted by hemisphere unfortunately..\n for orbit in make_an_orbit(satellite):\n #import pdb; pdb.set_trace()\n if len(orbit) > 1:\n \"\"\"\n So worth doing processing on orbit...\n\n \"\"\"\n sun = ephem.Sun()\n\n print(orbit[0].datetime)\n\n for overpass in orbit:\n overpass.only_daytime_overpasses(sun)\n overpass.derive_swath_width(fov)\n \"\"\"\n Create a tempoary dataframe for this orbit\n \"\"\"\n epoch = datetime.datetime(1970, 1, 1)\n #import pdb; pdb.set_trace()\n tmp_d = {\"DateTime\": [(o.datetime - epoch).total_seconds() for o in orbit],\n \"DOY\":[int(o.datetime.strftime('%j')) for o in orbit],\n \"Month\": [o.datetime.month for o in orbit],\n \"orbit_id\": orbit_id * np.ones(len(orbit)),\n \"ground_lat\": [o.lat for o in orbit],\n \"ground_lon\": [o.long for o in orbit],\n \"swath_width\": [o.swath_width for o in orbit]}\n tmp = pd.DataFrame(tmp_d)\n tmp.set_index(keys=\"DateTime\")\n #import pdb; pdb.set_trace()\n orbit_id +=1 \n \"\"\"\n Append to main dataframe\n \"\"\"\n swaths = swaths.append(tmp)\n #swaths.set_index(keys=\"DateTime\")\n\n \"\"\"\n Save the DataFrame to a file\n \"\"\"\n swaths = swaths.set_index(keys=\"DateTime\")\n #swaths.set_index(keys=\"DateTime\")\n #import pdb; pdb.set_trace()\n swaths.to_csv(orbital_filename, header=True)",
"def write_result_shapefile(lad_geometry_shp, out_shape, field_names, csv_results):\n # Read in our existing shapefile\n lad_geometry_shp_name = lad_geometry_shp[:-3]\n myshp = open(lad_geometry_shp_name + \"shp\", \"rb\")\n mydbf = open(lad_geometry_shp_name + \"dbf\", \"rb\")\n\n record = shapefile.Reader(shp=myshp, dbf=mydbf)\n\n # Create a new shapefile in memory\n writer = shapefile.Writer()\n\n # Copy over the existing fields\n writer.fields = list(record.fields)\n\n # --------------\n # Add new fields\n # --------------\n for field_name in field_names:\n writer.field(field_name, \"F\", decimal=10) #Float\n\n # Get position of field 'name' \n position = 0\n for field_name in record.fields[1:]:\n if field_name[0] == 'name': #corresponds to LAD Geocode\n position_field_name = position\n break\n else:\n position += 1\n\n # --------------------------\n # Join fields programatically\n # --------------------------\n missing_recors = set()\n\n # Loop through each record, add a column and get results\n for rec in record.records():\n\n # Get geocode for row\n geo_code = rec[position_field_name]\n\n # Iterate result entries in list\n for result_per_field in csv_results:\n\n # Iterate result entries and add\n try:\n result_csv = result_per_field[geo_code]\n except KeyError:\n # No results\n result_csv = 0\n missing_recors.add(geo_code)\n\n # Add specific fuel result\n rec.append(result_csv)\n\n # Add the modified record to the new shapefile\n writer.records.append(rec)\n\n if missing_recors != []:\n logging.warning(\n \"No result value for regions '%s' in joining shapefile\",\n missing_recors)\n else:\n pass\n\n # Copy over the geometry without any changes\n writer._shapes.extend(record.shapes())\n\n # Save as a new shapefile (or write over the old one)\n writer.save(out_shape)\n logging.info(\"... finished writing shp\")\n return",
"def send_arcgis():\n\n dtypes = OrderedDict([\n ('roadsegid', 'str'),\n ('rd20full','str'),\n ('xstrt1','str'),\n ('xstrt2','str'),\n ('llowaddr','str'),\n ('lhighaddr','str'),\n ('rlowaddr','str'),\n ('rhighaddr','str'),\n ('zip','str'),\n ('pve_id', 'str'),\n ('seg_id', 'str'),\n ('project_id', 'str'),\n ('title','str'),\n ('status','str'),\n ('type','str'),\n ('date_start','str'),\n ('date_end','str'),\n ('pav_mi','float'),\n ('mi_comp','float'),\n ('mi_plan','float'),\n ('date_cy','str'),\n ('date_fy','str'),\n ('oci_11','float'),\n ('oci11_des','str'),\n ('oci_15','float'),\n ('oci15_des','str')\n ])\n\n logging.info(\"Reading geojson\")\n geojson = gpd.read_file(f\"{conf['prod_data_dir']}/sd_paving_segs_datasd.geojson\")\n geojson = geojson.rename(columns={'geometry':'geom'})\n \n logging.info(\"Reading repair data\")\n df = pd.read_csv(prod_file['sdif'],low_memory=False,parse_dates=['date_end','date_start'])\n\n df['date_end'] = df['date_end'].dt.date\n\n df['date_cy'] = df['date_end'].apply(lambda x: x.year)\n df['date_fy'] = df['date_end'].apply(lambda x: x.year+1 if x.month > 6 else x.year )\n\n df['date_cy'] = number_str_cols(df['date_cy'])\n df['date_fy'] = number_str_cols(df['date_fy'])\n \n df = df.rename(columns={'address_street':'addr_st',\n 'street_from':'street_fr',\n 'street_to':'street_to',\n 'paving_miles':'pav_mi'})\n\n df.loc[df['status'] == 'post construction','mi_comp'] = df.loc[df['status'] == 'post construction','pav_mi']\n df.loc[df['status'] != 'post construction','mi_plan'] = df.loc[df['status'] != 'post construction','pav_mi']\n\n df_sub = df[['pve_id',\n 'seg_id',\n 'project_id',\n 'title',\n 'status',\n 'type',\n 'date_start',\n 'date_end',\n 'pav_mi',\n 'date_cy',\n 'date_fy',\n 'mi_comp',\n 'mi_plan'\n ]]\n\n logging.info(\"Merging data\")\n\n\n df_merge = pd.merge(geojson,\n df_sub,\n how='outer',\n right_on='seg_id',\n left_on='sapid')\n\n df_merge.loc[df_merge['sapid'].isnull(),\n 'sapid'] = df_merge.loc[df_merge['sapid'].isnull(),\n 'seg_id'] \n\n\n df_gis = df_merge.drop(columns={'seg_id'})\n df_gis = df_gis.rename(columns={'sapid':'seg_id'})\n\n date_cols = ['date_end','date_cy','date_fy', 'date_start']\n for dc in date_cols:\n logging.info(f\"Converting {dc} from date to string\")\n df_gis[dc] = df_gis[dc].fillna('')\n df_gis[dc] = df_gis[dc].astype(str)\n\n df_gis['type'] = df_gis['type'].fillna('None')\n\n na_cols = ['pve_id','seg_id','project_id','title','status']\n for nc in na_cols:\n df_gis[nc] = df_gis[nc].fillna('')\n\n logging.info(\"Reading in OCI data\")\n\n oci_11 = pd.read_csv(f\"{conf['prod_data_dir']}/oci_2011_datasd.csv\")\n oci_15 = pd.read_csv(f\"{conf['prod_data_dir']}/oci_2015_datasd.csv\")\n\n merge_oci = pd.merge(df_gis,oci_11[['seg_id','oci','oci_desc']],how='left',on='seg_id')\n merge_oci = merge_oci.rename(columns={'oci':'oci_11','oci_desc':'oci11_des'})\n \n final_pave_gis = pd.merge(merge_oci,oci_15[['seg_id','oci','oci_desc']],how='left',on='seg_id')\n final_pave_gis = final_pave_gis.rename(columns={'oci':'oci_15','oci_desc':'oci15_des'})\n\n final_pave_gis.to_csv(f\"{conf['prod_data_dir']}/streets_map_data.csv\",index=False)\n\n #df_gis = gpd.GeoDataFrame(df_merge,geometry='geom')\n\n logging.info(\"Writing shapefile\")\n\n with fiona.collection(\n f\"{conf['prod_data_dir']}/sd_paving_gis_datasd.shp\",\n 'w',\n driver='ESRI Shapefile',\n crs=crs.from_epsg(2230),\n schema={'geometry': 'LineString', 'properties': dtypes}\n ) as shpfile:\n for index, row in final_pave_gis.iterrows():\n try:\n geometry = row['geom']\n props = {}\n for prop in dtypes:\n props[prop] = row[prop]\n shpfile.write({'properties': props, 'geometry': mapping(geometry)})\n except Exception as e:\n logging.info(f\"Problem with {index} because {e}\")\n\n shp2zip('sd_paving_gis_datasd')\n\n return \"Successfully sent GIS version to ESRI\"",
"def get_polygon_coordinates(self) -> Tuple[List, List]:\n\n polygon_query = f\"https://nominatim.openstreetmap.org/\" \\\n f\"search?city={self.location.replace(' ', '+')}&polygon_geojson=1&format=json\"\n r = requests.get(polygon_query)\n js = ast.literal_eval(r.text)\n\n self.monitor.info(\"-> Downloaded area polygon data points.\")\n clean_polygon_coords = js[0]['geojson']['coordinates'][0]\n\n polygon_lats = [float(i[1]) for i in clean_polygon_coords]\n polygon_longs = [float(i[0]) for i in clean_polygon_coords]\n\n self.monitor.info(\"-> Created lat/long vectors.\")\n return polygon_lats, polygon_longs",
"def draw_des(self,**kwargs):\n defaults=dict(color='red', lw=2)\n for k,v in defaults.items():\n kwargs.setdefault(k,v)\n\n #filename = expandvars('$MAGLITESDIR/maglites/data/round13-poly.txt')\n filename = 'data/round13-poly.txt'\n self.draw_polygon(filename,**kwargs)",
"def format_porteurs(filepath):\n fieldnames, rows = get_header_rows(filepath)\n\n if \"statut\" in fieldnames:\n fieldnames.append(\"situation_societariat_entrance\")\n fieldnames.append(\"situation_situation\")\n for row in rows:\n statut = row['statut']\n row['situation_societariat_entrance'] = \"\"\n if statut == \"Associé\":\n row['situation_societariat_entrance'] = \"01/01/2015\"\n row['situation_situation'] = PORTEUR_STATUS_MATCH.get(statut)\n\n\n if 'coordonnees_address1' in fieldnames and 'coordonnees_address2' in fieldnames:\n fieldnames.append('coordonnees_address')\n for row in rows:\n row['coordonnees_address'] = row['coordonnees_address1'] + \\\n '\\n' + row['coordonnees_address2']\n\n if \"coordonnees_civilite\" in fieldnames:\n fieldnames.append('coordonnees_sex')\n for row in rows:\n if row['coordonnees_civilite'].lower() == u\"mademoiselle\":\n row['coordonnees_civilite'] = u\"Madame\"\n\n if row['coordonnees_civilite'] == u'Madame':\n row['coordonnees_sex'] = 'F'\n else:\n row['coordonnees_sex'] = 'M'\n\n if \"zus\" in fieldnames:\n fieldnames.append(\"coordonnees_zone_qual\")\n for row in rows:\n if row['zus'] == '1':\n row['coordonnees_zone_qual'] = 'zus'\n\n write_csv_file(filepath, fieldnames, rows)",
"def _write_polygons(\n self,\n shapes: Iterable[Polygon],\n emissions: Iterable[float],\n info: EmissionInfo,\n source_group: int,\n ):\n\n # Rasterize the polygon on a grid\n shapes_serie = gpd.GeoSeries(shapes)\n # get polygon bounds\n minx, miny, maxx, maxy = shapes_serie.total_bounds\n # Create a grid for the rasterization\n x = np.arange(minx, maxx, self.polygon_raster_size)\n y = np.arange(miny, maxy, self.polygon_raster_size)\n\n # Get the emission per cell\n average_cells_proportion = (self.polygon_raster_size**2) / shapes_serie.area\n cell_emissions = np.array(emissions) * average_cells_proportion\n\n # WARNING: this might be not exactly mass convserving\n rasterized_emissions = rasterize(\n shapes=zip(shapes, cell_emissions),\n out_shape=(len(x), len(y)),\n transform=from_bounds(minx, miny, maxx, maxy, len(x), len(y)),\n all_touched=False,\n merge_alg=MergeAlg.add,\n )[\n ::-1, :\n ] # flip the y axis\n\n # Get the coordinates of the rasterized polygon\n indices = np.array(np.where(rasterized_emissions)).T\n\n # Write the polygon\n with open(self.file_cadastre, \"a\") as f:\n for i_x, i_y in indices:\n f.write(\n f\"{x[i_x]},{y[i_y]},{info.height},\"\n f\"{self.polygon_raster_size},{self.polygon_raster_size},{info.vertical_extension},\"\n f\"{rasterized_emissions[i_x, i_y]},0,0,0,{source_group},\\n\"\n )",
"def createTerritoryGeometries(config, start_time):\n # get the correct names for all of the provinces within each territory\n file_name = config['shape_files_path'] + config['county_shape_file_name']\n names_df = gpd.read_file(file_name)\n names_df.rename(columns={'NAMELSAD':'NAME'})\n names_df = names_df[['GEOID', 'NAME']]\n\n df_holder = []\n # read in block files for the 4 excluded US territories\n for territory in ['60','66','69','78']:\n try:\n temp_time = time.localtime()\n # open the appropriate block file for the given territory\n file_name = config['shape_files_path'] +\\\n \"block/tl_%s_%s_tabblock%s.shp\" %\\\n (config['census_vintage'],territory,config['census_vintage'][2:])\n temp_df = gpd.read_file(file_name)\n # modify the column names so they match what we expect in the tract and \n # county geojson files\n change_columns = { 'STATEFP%s' % config['census_vintage'][2:]:'state_fips', \n 'COUNTYFP%s' % config['census_vintage'][2:]: 'county_fips',\n 'GEOID%s' % config['census_vintage'][2:]:'block_fips',\n 'ALAND%s' % config['census_vintage'][2:]:'aland'}\n temp_df.rename(columns=change_columns, inplace=True)\n\n # create the tract file for the given territory\n tract_df = temp_df[['block_fips', 'aland', 'geometry']]\n tract_df['GEOID'] = tract_df['block_fips'].str[:11]\n tract_df['NAME']=tract_df['GEOID'].str[5:11]\n tract_df['NAME'] = np.where(tract_df['NAME'].str[4:6] != '00', \n tract_df['NAME'].str[:4] + \".\" + tract_df['NAME'].str[4:6], \n tract_df['NAME'].str[:4])\n\n # dissolve the blocks into tract level detail\n tract_df=tract_df[['GEOID', 'NAME', 'geometry']].loc[tract_df['aland']>0].dissolve(by='GEOID')\n tract_df.reset_index(inplace=True)\n\n # save the newly created tracts for the territory into a shape file\n # for later use by processes\n file_name = config['shape_files_path'] +\\\n \"tract/gz_%s_%s_140_00_500k.shp\" %\\\n (config['census_vintage'],territory)\n tract_df.to_file(file_name)\n\n # provide status or data processing\n my_message = \"\"\"\n INFO - STEP 0 (MASTER): TASK 3 OF 13 - FINISHED WRITING TRACT SHAPE FILE\n FOR US TERRITORY %s\n \"\"\" % territory\n my_message = ' '.join(my_message.split()) \n print(nbmf.logMessage(my_message, temp_time, time.localtime(),\n time.mktime(time.localtime())-time.mktime(start_time)))\n except:\n # there was an error in processing. Capture the error and output the\n # stacktrace to the screen\n my_message = \"\"\"\n ERROR - STEP 0 (MASTER): TASK 3 OF 13 - FAILED WRITING TRACT SHAPE FILE\n FOR US TERRITORY %s\n \"\"\" % territory \n my_message += \"\\n\" + traceback.format_exc()\n print(nbmf.logMessage(my_message, temp_time, time.localtime(),\n time.mktime(time.localtime())-time.mktime(start_time)))\n return False\n\n try:\n # create the dataframe for capturing county level data\n temp_time = time.localtime()\n county_df = temp_df[['state_fips', 'county_fips', 'aland', 'geometry']]\n county_df['GEOID'] = county_df['state_fips'] + county_df['county_fips']\n\n # merge the block level data at the county level to get the geometry\n county_df=county_df[['GEOID', 'geometry']].loc[county_df['aland']>0].dissolve(by='GEOID')\n\n # the county records for US states include names. The names cannot\n # be easily constructed following a set of rules, so instead we just\n # merge the names of the territories that are listed in the tiger line\n # files with the geometries we just calculated. This ends up giving\n # us the information we need to create the equivalent of a fully \n # populated 2010 county cartographic file that includes territories\n county_df = county_df.merge(names_df, left_on='GEOID', right_on='GEOID')\n county_df = county_df[['GEOID', 'NAME', 'geometry']]\n\n # append the information to a list that we will process later\n df_holder.append(county_df)\n\n # provide the status on the data processing for this task\n my_message = \"\"\"\n INFO - STEP 0 (MASTER): TASK 3 OF 13 - PROCESSED COUNTY DATA FOR\n US TERRITORY %s\n \"\"\" % territory\n my_message = ' '.join(my_message.split()) \n print(nbmf.logMessage(my_message, temp_time, time.localtime(),\n time.mktime(time.localtime())-time.mktime(start_time)))\n except:\n # there was an error in processing. Capture the error and output the\n # stacktrace to the screen \n my_message = \"\"\"\n ERROR - STEP 0 (MASTER): TASK 3 OF 13 - FAILED PROCESSING COUNTY DATA\n FOR US TERRITORY %s\n \"\"\" % territory \n my_message += \"\\n\" + traceback.format_exc()\n print(nbmf.logMessage(my_message, temp_time, time.localtime(),\n time.mktime(time.localtime())-time.mktime(start_time)))\n return False \n\n try:\n # now that we have the county level data for the territories, we need to merge\n # it with the US county data and create a single file for subsequent processing\n # open the county cartographic bounday file\n file_name = config['shape_files_path'] + config['county_cb_shape_file_name']\n county = gpd.read_file(file_name)\n\n # the cartographic boundary files do not have full names, so concatenate the \n # name and lsad columns and overwrite the original name\n county['NAME']=county['NAME'] + ' ' + county['LSAD']\n\n # extract the county fips from the non-standard county fips identifier in the\n # 2010 cartographic boundary file and then preserve only the necessary columns\n county['GEOID']=county['GEO_ID'].str[9:]\n county = county[['GEOID', 'NAME','geometry']]\n\n # append the county data to the list to be used to build the single file\n df_holder.append(county)\n\n # merge all of the dataframes into a single dataframe, sort it, and then \n # write the file out as a shape file so it can be used later for subsequent\n # data processing\n counties = pd.concat([x for x in df_holder])\n counties.sort_values(by='GEOID',inplace=True)\n file_name = config['shape_files_path'] + config['county_gzm_shape_file_name']\n counties.to_file(file_name)\n \n # provide the status on the data processing for this task\n my_message = \"\"\"\n INFO - STEP 0 (MASTER): TASK 3 OF 13 - COMPLETED UPDATING COUNTY \n CARTOGRAPHIC SHAPE FILE\n \"\"\" \n my_message = ' '.join(my_message.split()) \n print(nbmf.logMessage(my_message, temp_time, time.localtime(),\n time.mktime(time.localtime())-time.mktime(start_time))) \n return True \n\n except:\n # there was an error in processing. Capture the error and output the\n # stacktrace to the screen \n my_message = \"\"\"\n ERROR - STEP 0 (MASTER): TASK 3 OF 13 - FAILED UPDATING COUNTY \n CARTOGRAPHIC SHAPE FILE\n \"\"\" \n my_message += \"\\n\" + traceback.format_exc()\n print(nbmf.logMessage(my_message, temp_time, time.localtime(),\n time.mktime(time.localtime())-time.mktime(start_time)))\n return False",
"def geotransform(street_address_column, borough_column, zip_code_column, in_csv_file_loc, out_csv_file_loc):\r\n with open(out_csv_file_loc, 'wb') as csv_new_file:\r\n fieldnames = ['2010 Census Block',\r\n '2010 Census Block Suffix',\r\n '2010 Census Tract',\r\n 'Assembly District',\r\n 'Atomic Polygon',\r\n 'B10SC First Borough and Street Code',\r\n 'Bike Lane',\r\n 'Borough Block Lot (BBL)',\r\n 'Building Identification Number (BIN) of Input Address or NAP',\r\n 'City Council District',\r\n 'Community District',\r\n 'Community School District',\r\n 'Congressional District',\r\n 'DSNY Snow Priority Code',\r\n 'Election District',\r\n 'First Borough Name',\r\n 'House Number Display Format',\r\n 'House Number Sort Format',\r\n 'Hurricane Evacuation Zone (HEZ)',\r\n 'Message',\r\n 'NTA Name',\r\n 'Neighborhood Tabulation Area (NTA)',\r\n 'Police Precinct',\r\n 'Roadway Type',\r\n 'Second Street Name Normalized',\r\n 'Spatial Coordinates of Segment',\r\n 'State Senatorial District',\r\n 'USPS Preferred City Name',\r\n 'X-Y Coordinates of Lot Centroid',\r\n 'Zip Code',\r\n 'Latitude',\r\n 'Longitude',\r\n 'Spatial X',\r\n 'Spatial Y']\r\n writer = csv.DictWriter(csv_new_file, fieldnames=fieldnames)\r\n writer.writeheader()\r\n \r\n with open(in_csv_file_loc, 'rb') as csvfile:\r\n csvreader = csv.DictReader(csvfile, delimiter = ',')\r\n for row in csvreader:\r\n full_address = row[street_address_column].strip()\r\n split_full_address = full_address.split(' ')\r\n house_number = split_full_address[0]\r\n borough = row[borough_column].strip()\r\n boro_code = borough_transform(borough)\r\n zip_code = row[zip_code_column].strip()\r\n street_name = ' '.join(split_full_address[1:])\r\n \r\n (wa1, wa2) = geo_coder(house_number, boro_code, street_name, zip_code)\r\n \r\n output = Parser(wa1, wa2)\r\n \r\n writer.writerow(output)",
"def export_poly(self, filename):\n mun = Geometry.merge_adjacent_features([f for f in self.getFeatures()])\n mun = Geometry.get_multipolygon(mun)\n with open(filename, \"w\") as fo:\n fo.write(\"admin_boundary\\n\")\n i = 0\n for part in mun:\n for j, ring in enumerate(part):\n i += 1\n prefix = \"!\" if j > 0 else \"\"\n fo.write(prefix + str(i) + \"\\n\")\n for p in ring:\n fo.write(\"%f %f\\n\" % (p.x(), p.y()))\n fo.write(\"END\\n\")\n fo.write(\"END\\n\")\n return",
"def flyc_nofly_extract(po, fwmdlfile):\n (po.nfzone_pos, po.nfzone_count) = flyc_nofly_zone_pos_search(po, fwmdlfile, 0, po.expect_func_align, po.expect_data_align, po.min_match_accepted)\n if po.nfzone_pos < 0:\n raise ValueError(\"Flight controller no fly zones array signature not detected in input file.\")\n (po.nfcord_pos, po.nfcord_count) = flyc_nofly_cord_pos_search(po, fwmdlfile, 0, po.expect_func_align, po.expect_data_align, po.min_match_accepted)\n if po.nfcord_pos < 0:\n raise ValueError(\"Flight controller no fly coords array signature not detected in input file.\")\n nfzones = flyc_nofly_merged_zones_array(po, fwmdlfile)\n if (po.verbose > 0):\n print(\"{}: Creating JSON file...\".format(po.mdlfile))\n inffile = open(po.inffile, \"w\")\n inffile.write(\"{\\\"release_limits\\\":[\\n\")\n i = 0\n for parprop in nfzones:\n inffile.write(\"{\")\n for ppname in ('area_id','type','shape',):\n inffile.write(\"\\\"{:s}\\\":{:d}\".format(ppname,parprop[ppname]))\n inffile.write(\",\")\n for ppname in ('lat','lng',):\n inffile.write(\"\\\"{:s}\\\":{:06f}\".format(ppname,parprop[ppname]))\n inffile.write(\",\")\n for ppname in ('radius','warning','level','disable','updated_at','begin_at','end_at',):\n inffile.write(\"\\\"{:s}\\\":{:d}\".format(ppname,parprop[ppname]))\n inffile.write(\",\")\n for ppname in ('name',):\n inffile.write(\"\\\"{:s}\\\":\\\"{:s}\\\"\".format(ppname,parprop[ppname]))\n inffile.write(\",\")\n for ppname in ('storage','country',):\n inffile.write(\"\\\"{:s}\\\":{:d}\".format(ppname,parprop[ppname]))\n inffile.write(\",\")\n for ppname in ('city',):\n inffile.write(\"\\\"{:s}\\\":\\\"{:s}\\\"\".format(ppname,parprop[ppname]))\n inffile.write(\",\")\n for ppname in ('points',):\n inffile.write(\"\\\"{:s}\\\":{:s}\".format(ppname,parprop[ppname] if parprop[ppname] is not None else \"null\"))\n if (i+1 < len(nfzones)):\n inffile.write(\"},\\n\")\n else:\n inffile.write(\"}\\n\")\n i += 1\n inffile.write(\"]}\\n\")\n inffile.close()\n if (po.verbose > 0):\n print(\"{}: Done exporting.\".format(po.mdlfile))"
] | [
"0.5526519",
"0.55115604",
"0.5385016",
"0.53638154",
"0.5314633",
"0.53080976",
"0.5173656",
"0.50681376",
"0.50139534",
"0.49680954",
"0.495339",
"0.49488038",
"0.49403173",
"0.49082854",
"0.49027666",
"0.48932567",
"0.48877347",
"0.4870424",
"0.48680896",
"0.48623064",
"0.48507988",
"0.48397648",
"0.48357353",
"0.48235127",
"0.4809229",
"0.47947738",
"0.4791602",
"0.47865826",
"0.4783175",
"0.47705236"
] | 0.5947495 | 0 |
Catchment delineation using the REC streams and catchments. sites_shp Points shapfile of the sites along the streams.\n sites_col The column name of the site numbers in the sites_shp.\n catch_output The output polygon shapefile path of the catchment delineation. | def rec_catch_del(sites_shp, sites_col='site', catch_output=None):
### Parameters
server = 'SQL2012PROD05'
db = 'GIS'
streams_table = 'MFE_NZTM_REC'
streams_cols = ['NZREACH', 'NZFNODE', 'NZTNODE']
catch_table = 'MFE_NZTM_RECWATERSHEDCANTERBURY'
catch_cols = ['NZREACH']
### Modifications {NZREACH: {NZTNODE/NZFNODE: node # to change}}
mods = {13053151: {'NZTNODE': 13055874}, 13048353: {'NZTNODE': 13048851}, 13048498: {'NZTNODE': 13048851}}
### Load data
rec_streams = rd_sql(server, db, streams_table, streams_cols, geo_col=True)
rec_catch = rd_sql(server, db, catch_table, catch_cols, geo_col=True)
pts = select_sites(sites_shp)
### make mods
for i in mods:
rec_streams.loc[rec_streams['NZREACH'] == i, mods[i].keys()] = mods[i].values()
### Find closest REC segment to points
pts_seg = closest_line_to_pts(pts, rec_streams, line_site_col='NZREACH', dis=400)
nzreach = pts_seg.copy().NZREACH.unique()
### Find all upstream reaches
reaches = find_upstream_rec(nzreach, rec_shp=rec_streams)
### Extract associated catchments
rec_catch = extract_rec_catch(reaches, rec_catch_shp=rec_catch)
### Aggregate individual catchments
rec_shed = agg_rec_catch(rec_catch)
rec_shed.columns = ['NZREACH', 'geometry', 'area']
rec_shed1 = rec_shed.merge(pts_seg.drop('geometry', axis=1), on='NZREACH')
### Export and return
rec_shed1.to_file(catch_output)
return(rec_shed1) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def agg_catch(catch_del_shp, catch_sites_csv, catch_sites_col=['GRIDCODE', 'SITE'], catch_col='GRIDCODE'):\n\n ## Catchment areas shp\n catch = read_file(catch_del_shp)[[catch_col, 'geometry']]\n\n ## dissolve the polygon\n catch3 = catch.dissolve(catch_col)\n\n ## Determine upstream catchments\n catch_df, singles_df = catch_net(catch_sites_csv, catch_sites_col)\n\n base1 = catch3[in1d(catch3.index, singles_df)].geometry\n for i in catch_df.index:\n t1 = append(catch_df.loc[i, :].dropna().values, i)\n t2 = GeoSeries(catch3[in1d(catch3.index, t1)].unary_union, index=[i])\n base1 = GeoSeries(concat([base1, t2]))\n\n ## Convert to GeoDataFrame (so that all functions can be applied to it)\n base2 = GeoDataFrame(base1.index, geometry=base1.geometry.values, crs=catch.crs)\n base2.columns = ['site', 'geometry']\n return(base2)",
"def combine_catchments(catchmentfile, flowfile, elevationfile, comid, \n output = None, overwrite = False, verbose = True):\n\n t0 = time.time()\n numpy.seterr(all = 'raise')\n\n if output is None: output = os.getcwd() + r'\\combined'\n\n if os.path.isfile(output + '.shp') and not overwrite:\n if verbose: print('combined catchment shapefile %s exists' % output)\n return\n \n if verbose: print('combining catchments from %s\\n' % catchmentfile)\n\n # start by copying the projection files\n\n shutil.copy(catchmentfile + '.prj', output + '.prj')\n\n # load the catchment and flowline shapefiles\n\n c = Reader(catchmentfile, shapeType = 5)\n f = Reader(flowfile, shapeType = 3)\n\n # make lists of the comids and featureids\n\n featureid_index = c.fields.index(['FEATUREID', 'N', 9, 0]) - 1\n comid_index = f.fields.index(['COMID', 'N', 9, 0]) - 1\n\n featureids = [r[featureid_index] for r in c.records()]\n comids = [r[comid_index] for r in f.records()]\n\n # check that shapes are traceable--don't have multiple points and start\n # and end at the same place--then make an appropriate list of shapes\n # and records--note it's more memory efficient to read one at a time\n\n n = len(c.records())\n shapes = []\n records = [] \n bboxes = []\n\n try: \n for i in range(n):\n catchment = c.shape(i)\n record = c.record(i)\n\n shape_list = format_shape(catchment.points)\n for s in shape_list:\n shapes.append(s)\n records.append(record)\n bboxes.append(catchment.bbox)\n\n try: combined = combine_shapes(shapes, bboxes, verbose = verbose)\n except: combined = combine_shapes(shapes, bboxes, skip = True, \n verbose = verbose)\n\n except: \n shapes = []\n records = [] \n bboxes = []\n for i in range(n):\n catchment = c.shape(i)\n record = c.record(i)\n\n shape_list = format_shape(catchment.points, omit = True)\n for s in shape_list:\n shapes.append(s)\n records.append(record)\n bboxes.append(catchment.bbox)\n\n try: combined = combine_shapes(shapes, bboxes, verbose = verbose)\n except: combined = combine_shapes(shapes, bboxes, skip = True,\n verbose = verbose)\n\n # iterate through the catchments and get the elevation data from NED\n # then estimate the value of the overland flow plane length and slope\n\n lengths = numpy.empty((n), dtype = 'float')\n slopes = numpy.empty((n), dtype = 'float')\n\n for i in range(n):\n catchment = c.shape(i)\n flowline = f.shape(comids.index(featureids[i]))\n\n catchpoints = get_raster_on_poly(elevationfile, catchment.points,\n verbose = verbose)\n catchpoints = numpy.array([p for p in catchpoints])\n\n zs = get_raster(elevationfile, flowline.points)\n\n flowpoints = numpy.array([[p[0], p[1], z] \n for p, z in zip(flowline.points, zs)])\n\n # iterate through the raster values and find the closest flow point\n\n closest = numpy.empty((len(catchpoints), 3), dtype = 'float')\n\n for point, j in zip(catchpoints, range(len(catchpoints))):\n closest[j] = flowpoints[numpy.dot(flowpoints[:, :2], \n point[:2]).argmin()]\n\n # estimate the slope and overland flow plane length\n\n length, slope = get_overland_vector(catchpoints, closest)\n\n if verbose: print('avg slope and length =', slope.mean(), length.mean())\n\n lengths[i], slopes[i] = length.mean(), slope.mean()\n\n if verbose: print('\\nfinished overland flow plane calculations\\n')\n\n # get area of the subbasin from the catchment metadata\n\n areasq_index = c.fields.index(['AreaSqKM', 'N', 19, 6]) - 1\n areas = numpy.array([r[areasq_index] for r in c.records()])\n\n # take the area weighted average of the slopes and flow lengths\n\n tot_area = round(areas.sum(), 2)\n avg_length = round(1000 * numpy.sum(areas * lengths) / tot_area, 1)\n avg_slope = round(numpy.sum(areas * slopes) / tot_area, 4)\n\n # get the centroid and the average elevation\n\n combined = [[float(x), float(y)] for x, y in combined]\n centroid = get_centroid(numpy.array(combined))\n\n Cx, Cy = round(centroid[0], 4), round(centroid[1], 4)\n\n elev_matrix, origin = get_raster_in_poly(elevationfile, combined, \n verbose = verbose)\n\n elev_matrix = elev_matrix.flatten()\n elev_matrix = elev_matrix[elev_matrix.nonzero()]\n \n avg_elev = round(elev_matrix.mean() / 100., 2)\n\n # write the data to the shapefile\n\n w = Writer(shapeType = 5)\n\n fields = [['ComID', 'N', 9, 0],\n ['PlaneLenM', 'N', 8, 2],\n ['PlaneSlope', 'N', 9, 6],\n ['AreaSqKm', 'N', 10, 2],\n ['CenX', 'N', 12, 6],\n ['CenY', 'N', 12, 6],\n ['AvgElevM', 'N', 8, 2]]\n\n record = [comid, avg_length, avg_slope, tot_area, Cx, Cy, avg_elev]\n\n for field in fields: w.field(*field)\n \n w.record(*record)\n \n w.poly(shapeType = 5, parts = [combined])\n\n w.save(output)\n\n if verbose: print('\\ncompleted catchment combination in %.1f seconds\\n' % \n (time.time() - t0))",
"def postprocess(sun=None,sun_dir=None,force=False):\n sun = sun or sunreader.SunReader(sun_dir)\n nprocs=sun.num_processors()\n dfm_dwaq_path=os.path.join(sun.datadir,'dwaq')\n\n\n for proc in range(nprocs):\n nc_fn=os.path.join(dfm_dwaq_path,\n \"DFM_DELWAQ_sun_%04d\"%proc,\n \"sun_%04d_flowgeom.nc\"%proc)\n\n if force or not os.path.exists( nc_fn ):\n sun_to_flowgeom(sun,proc,nc_fn)",
"def saveCLIPPED_DR4(fileOUT, pathOUT, time, flux, xPOS, yPOS, temperature, exposureTIME, numberSTACKS, PSFC1, PSFC2, RTSC, **kwargs):\n # Checking if the last character of pathOUT is an '/'\n if not(pathOUT[-1] == '/'):\n pathOUT += '/'\n # Checking if the suffix of the file is given\n if not fileOUT[-4:] in ['.txt', '.dat']:\n fileOUT += '.dat' \n \n # Preparing the header of the output file\n headerSTRING = 'BRITE photometry, which was clipped for outliers on ' + strftime(\"%Y-%m-%d %H:%M:%s\") + '.'\n headerSTRING +='\\n----------------------------------------'\n headerSTRING +='\\nColumn1: time measurements [d]'\n headerSTRING +='\\nColumn2: flux [adu]'\n headerSTRING +='\\nColumn3: CCD centroid x-position [pixel]'\n headerSTRING +='\\nColumn4: CCD centroid y-position [pixel]'\n headerSTRING +='\\nColumn5: CCD temperature [deg]'\n headerSTRING +='\\nColumn6: exposure time of the observations [s]'\n headerSTRING +='\\nColumn7: number of stacked observations corresponding to one datapoint []'\n headerSTRING +='\\nColumn8: PSF blurring coeffient 1 []'\n headerSTRING +='\\nColumn9: PSF blurring coeffient 2 []'\n headerSTRING +='\\nColumn10: RTS estimate coeffient []'\n headerSTRING +='\\n----------------------------------------'\n \n # Constructing the matrix\n dtOUT = np.dtype([('time', np.float), ('flux', np.float), ('xPOS', np.float), ('yPOS', np.float), ('temperature', np.float), ('exposureTIME', np.float), ('numberSTACKS', np.float), ('PSFC1', np.float), ('PSFC2', np.float), ('RTSC', np.float)])\n matrixOUT = np.zeros(len(time), dtype=dtOUT)\n matrixOUT['time'] = time; matrixOUT['flux'] = flux; matrixOUT['xPOS'] = xPOS; matrixOUT['yPOS'] = yPOS; matrixOUT['temperature'] = temperature; matrixOUT['exposureTIME'] = exposureTIME; matrixOUT['numberSTACKS'] = numberSTACKS; matrixOUT['PSFC1'] = PSFC1; matrixOUT['PSFC2'] = PSFC2; matrixOUT['RTSC'] = RTSC\n \n # The actual saving using a numpy.savetxt \n np.savetxt(pathOUT + fileOUT, matrixOUT, fmt=('%.12e %.7f %.4f %.4f %.4f %.2f %i %.6f %.6f %.2f'), delimiter=' ', header=headerSTRING, comments='#')",
"def FlowMapTwoColourComparisonWithCatchmentsHelper(self,ref_flowmap_filename,data_flowmap_filename,\n ref_catchment_filename,data_catchment_filename,\n ref_rdirs_filename,data_rdirs_filename,\n reference_rmouth_outflows_filename,\n data_rmouth_outflows_filename,\n lsmask_filename=None,minflowcutoff=100,flip_data=False,\n rotate_data=False,flip_ref=False,rotate_ref=False,\n lsmask_has_same_orientation_as_ref=True,\n flip_lsmask=False,rotate_lsmask=False,\n invert_ls_mask=False,matching_parameter_set='default',\n rivers_to_plot=None,\n rivers_to_plot_alt_color=None,\n rivers_to_plot_secondary_alt_color=None,\n use_single_color_for_discrepancies=True,\n use_only_one_color_for_flowmap=False,\n additional_matches_list_filename=None,\n additional_truesink_matches_list_filename=None,\n catchment_and_outflows_mods_list_filename=None,\n first_datasource_name=\"Reference\",\n second_datasource_name=\"Data\",use_title=True,\n remove_antartica=False,\n difference_in_catchment_label=\"Discrepancy\",\n glacier_mask_filename=None,\n extra_lsmask_filename=None,\n show_true_sinks=False,\n fig_size=(12,5),\n grid_type='HD',\n glacier_mask_grid_type='LatLong10min',\n glacier_mask_grid_kwargs={},\n flip_glacier_mask=False,\n rotate_glacier_mask=False,\n **grid_kwargs):\n if grid_type == \"LatLong10min\":\n scale_factor = 3\n else:\n scale_factor = 1\n if (rivers_to_plot_secondary_alt_color is not None):\n if (rivers_to_plot is None) or (rivers_to_plot_alt_color is None):\n raise RuntimeError(\"Invalid options - Secondary alternative color set when primary and/or\"\n \"secondary colors unused\")\n else:\n rivers_to_plot_alt_color.extend(rivers_to_plot_secondary_alt_color)\n else:\n rivers_to_plot_secondary_alt_color = []\n flowmap_grid=grid.makeGrid(grid_type)\n ref_flowmaps_filepath = os.path.join(self.plots_data_dir,ref_flowmap_filename)\n data_flowmaps_filepath = os.path.join(self.plots_data_dir,data_flowmap_filename)\n ref_catchment_filepath = os.path.join(self.plots_data_dir,\n ref_catchment_filename)\n data_catchment_filepath = os.path.join(self.plots_data_dir,\n data_catchment_filename)\n flowmap_ref_field = iodriver.load_field(ref_flowmaps_filepath,\n file_type=iodriver.get_file_extension(ref_flowmaps_filepath),\n field_type='Generic',\n grid_type=grid_type,**grid_kwargs)\n flowmap_data_field = iodriver.load_field(data_flowmaps_filepath,\n file_type=iodriver.get_file_extension(data_flowmaps_filepath),\n field_type='Generic',\n grid_type=grid_type,**grid_kwargs)\n data_catchment_field = iodriver.load_field(data_catchment_filepath,\n file_type=iodriver.get_file_extension(data_catchment_filepath),\n field_type='Generic',\n grid_type=grid_type,**grid_kwargs)\n ref_catchment_field = iodriver.load_field(ref_catchment_filepath,\n file_type=iodriver.get_file_extension(ref_catchment_filepath),\n field_type='Generic',\n grid_type=grid_type,**grid_kwargs)\n if data_rdirs_filename:\n data_rdirs_filepath = os.path.join(self.plots_data_dir,\n data_rdirs_filename)\n ref_rdirs_filepath = os.path.join(self.plots_data_dir,ref_rdirs_filename)\n if data_rdirs_filename:\n data_rdirs_field = iodriver.load_field(data_rdirs_filepath,\n file_type=iodriver.get_file_extension(data_rdirs_filepath),\n field_type='Generic',\n grid_type=grid_type,**grid_kwargs)\n else:\n data_rdirs_field = None\n ref_rdirs_field = iodriver.load_field(ref_rdirs_filepath,\n file_type=iodriver.get_file_extension(ref_rdirs_filepath),\n field_type='RiverDirections',\n grid_type=grid_type,**grid_kwargs)\n if lsmask_filename:\n lsmask_field = iodriver.load_field(lsmask_filename,\n file_type=iodriver.get_file_extension(lsmask_filename),\n field_type='Generic', grid_type=grid_type,**grid_kwargs)\n else:\n lsmask_field = field.Field(ref_rdirs_field.get_lsmask(),grid=\"LatLong10min\")\n if extra_lsmask_filename:\n extra_lsmask_field = iodriver.load_field(extra_lsmask_filename,\n file_type=iodriver.\n get_file_extension(extra_lsmask_filename),\n field_type='Generic',\n grid_type=grid_type,**grid_kwargs)\n if catchment_and_outflows_mods_list_filename:\n catchment_and_outflows_mods_list_filepath = os.path.join(self.catchment_and_outflows_mods_list_directory,\n catchment_and_outflows_mods_list_filename)\n if additional_matches_list_filename:\n additional_matches_list_filepath = os.path.join(self.additional_matches_list_directory,\n additional_matches_list_filename)\n if additional_truesink_matches_list_filename:\n additional_truesink_matches_list_filepath = os.path.join(self.additional_truesink_matches_list_directory,\n additional_truesink_matches_list_filename)\n if glacier_mask_filename:\n glacier_mask_field = iodriver.load_field(glacier_mask_filename,\n file_type=iodriver.\\\n get_file_extension(glacier_mask_filename),\n fieldname='sftgif',\n field_type='Generic',\n grid_type=glacier_mask_grid_type,\n **glacier_mask_grid_kwargs)\n if glacier_mask_grid_type != grid_type:\n glacier_mask_field = utilities.upscale_field(glacier_mask_field,\n output_grid_type=grid_type,\n method=\"Mode\",\n output_grid_kwargs=grid_kwargs,\n scalenumbers=False)\n else:\n glacier_mask_field=None\n if flip_data:\n flowmap_data_field.flip_data_ud()\n data_catchment_field.flip_data_ud()\n if data_rdirs_filename:\n data_rdirs_field.flip_data_ud()\n if rotate_data:\n flowmap_data_field.rotate_field_by_a_hundred_and_eighty_degrees()\n data_catchment_field.rotate_field_by_a_hundred_and_eighty_degrees()\n if data_rdirs_filename:\n data_rdirs_field.rotate_field_by_a_hundred_and_eighty_degrees()\n if flip_ref:\n flowmap_ref_field.flip_data_ud()\n ref_catchment_field.flip_data_ud()\n ref_rdirs_field.flip_data_ud()\n if lsmask_filename and lsmask_has_same_orientation_as_ref:\n lsmask_field.flip_data_ud()\n if rotate_ref:\n flowmap_ref_field.rotate_field_by_a_hundred_and_eighty_degrees()\n ref_catchment_field.rotate_field_by_a_hundred_and_eighty_degrees()\n ref_rdirs_field.rotate_field_by_a_hundred_and_eighty_degrees()\n if lsmask_filename and lsmask_has_same_orientation_as_ref:\n lsmask_field.rotate_field_by_a_hundred_and_eighty_degrees()\n if invert_ls_mask:\n lsmask_field.invert_data()\n if extra_lsmask_filename:\n extra_lsmask_field.invert_data()\n if flip_lsmask and not lsmask_has_same_orientation_as_ref:\n lsmask_field.flip_data_ud()\n if rotate_lsmask and not lsmask_has_same_orientation_as_ref:\n lsmask_field.rotate_field_by_a_hundred_and_eighty_degrees()\n if glacier_mask_filename:\n if flip_glacier_mask:\n glacier_mask_field.flip_data_ud()\n if rotate_glacier_mask:\n glacier_mask_field.rotate_field_by_a_hundred_and_eighty_degrees()\n plt.figure(figsize=fig_size)\n ax = plt.subplot(111)\n if extra_lsmask_filename:\n image_array,extra_lsmask =fmp_pts.\\\n make_basic_flowmap_comparison_plot(ax,flowmap_ref_field.get_data(),\n flowmap_data_field.get_data(),\n minflowcutoff,\n first_datasource_name,\n second_datasource_name,\n lsmask_field.get_data(),\n return_image_array_instead_of_plotting=True,\n glacier_mask=glacier_mask_field,\n second_lsmask = extra_lsmask,\n scale_factor=scale_factor)\n else:\n image_array =fmp_pts.\\\n make_basic_flowmap_comparison_plot(ax,flowmap_ref_field.get_data(),\n flowmap_data_field.get_data(),\n minflowcutoff,\n first_datasource_name,\n second_datasource_name,\n lsmask_field.get_data(),\n return_image_array_instead_of_plotting=True,\n glacier_mask=glacier_mask_field.get_data()\n if glacier_mask_field is not None else None,\n scale_factor=scale_factor)\n temp_file_list = []\n if catchment_and_outflows_mods_list_filename:\n ref_outflow_field = iodriver.load_field(reference_rmouth_outflows_filename,\n file_type=iodriver.\\\n get_file_extension(reference_rmouth_outflows_filename),\n field_type='Generic', grid_type=grid_type,**grid_kwargs)\n data_outflow_field = iodriver.load_field(data_rmouth_outflows_filename,\n file_type=iodriver.\\\n get_file_extension(data_rmouth_outflows_filename),\n field_type='Generic', grid_type=grid_type,**grid_kwargs)\n if flip_data:\n data_outflow_field.flip_data_ud()\n if rotate_data:\n data_outflow_field.rotate_field_by_a_hundred_and_eighty_degrees()\n ref_catchment_field, ref_outflow_field, data_catchment_field, data_outflow_field =\\\n rc_pts.modify_catchments_and_outflows(ref_catchment_field,ref_outflow_field,flowmap_ref_field,\n ref_rdirs_field,data_catchment_field,data_outflow_field,\n catchment_and_outflows_modifications_list_filename=\\\n catchment_and_outflows_mods_list_filepath,\n grid_type=grid_type)\n if flip_data:\n data_outflow_field.flip_data_ud()\n if rotate_data:\n data_outflow_field.rotate_field_by_a_hundred_and_eighty_degrees()\n reference_rmouth_outflows_filename=os.path.join(self.scratch_dir,\n self.temp_label + os.path.\\\n basename(reference_rmouth_outflows_filename))\n data_rmouth_outflows_filename=os.path.join(self.scratch_dir,\n self.temp_label + os.path.\\\n basename(reference_rmouth_outflows_filename))\n temp_file_list.append(reference_rmouth_outflows_filename)\n temp_file_list.append(data_rmouth_outflows_filename)\n iodriver.write_field(reference_rmouth_outflows_filename,\n field=ref_outflow_field,\n file_type=iodriver.\\\n get_file_extension(reference_rmouth_outflows_filename))\n iodriver.write_field(data_rmouth_outflows_filename,\n field=data_outflow_field,\n file_type=iodriver.\\\n get_file_extension(data_rmouth_outflows_filename))\n #Using get data to convert field type causes confusion... possibly rewrite\n if lsmask_filename:\n lsmask = lsmask_field.get_data()\n else:\n lsmask = None\n if extra_lsmask_filename:\n extra_lsmask = extra_lsmask_field.get_data()\n flowmap_ref_field = flowmap_ref_field.get_data()\n flowmap_data_field = flowmap_data_field.get_data()\n data_catchment_field = data_catchment_field.get_data()\n ref_catchment_field = ref_catchment_field.get_data()\n if data_rdirs_filename:\n data_rdirs_field = data_rdirs_field.get_data()\n ref_rdirs_field = ref_rdirs_field.get_data()\n if glacier_mask_filename:\n glacier_mask_field = glacier_mask_field.get_data()\n matchedpairs,_ = mtch_rm.main(reference_rmouth_outflows_filename=\\\n reference_rmouth_outflows_filename,\n data_rmouth_outflows_filename=\\\n data_rmouth_outflows_filename,\n flip_data_field=flip_data,\n rotate_data_field=rotate_data,\n flip_ref_field=flip_ref,\n rotate_ref_field=rotate_ref,\n param_set=matching_parameter_set,\n grid_type=grid_type,**grid_kwargs)\n if additional_matches_list_filename:\n additional_matches = mtch_rm.load_additional_manual_matches(additional_matches_list_filepath,\n reference_rmouth_outflows_filename,\n data_rmouth_outflows_filename,\n flip_data_field=flip_data,\n rotate_data_field=rotate_data,\n grid_type=grid_type,**grid_kwargs)\n matchedpairs.extend(additional_matches)\n if additional_truesink_matches_list_filename:\n additional_matches = mtch_rm.load_additional_manual_truesink_matches(additional_truesink_matches_list_filepath,\n reference_rmouth_outflows_filename,\n data_rmouth_outflows_filename,\n ref_flowmap_filename,\n data_flowmap_filename,\n flip_data_rmouth_outflow_field=\\\n flip_data,\n rotate_data_rmouth_outflow_field=\\\n rotate_data,\n flip_data_flowmap_field=\\\n flip_data,\n rotate_data_flowmap_field=\\\n rotate_data,\n grid_type=grid_type,\n **grid_kwargs)\n matchedpairs.extend(additional_matches)\n for pair in matchedpairs:\n if pair[0].get_lat() > 310*scale_factor:\n continue\n alt_color_num = 8\n if (rivers_to_plot is not None) and (rivers_to_plot_alt_color is not None):\n if ((not (pair[0].get_lat(),pair[0].get_lon()) in rivers_to_plot) and\n (not (pair[0].get_lat(),pair[0].get_lon()) in rivers_to_plot_alt_color)):\n continue\n elif (((pair[0].get_lat(),pair[0].get_lon()) in rivers_to_plot) and\n ((pair[0].get_lat(),pair[0].get_lon()) in rivers_to_plot_alt_color)):\n raise RuntimeError(\"Cannot plot a catchment in both original and alternative colors - check for duplicate\")\n elif ((pair[0].get_lat(),pair[0].get_lon()) in rivers_to_plot):\n alt_color=False\n elif ((pair[0].get_lat(),pair[0].get_lon()) in rivers_to_plot_secondary_alt_color):\n alt_color=True\n alt_color_num = 9\n else:\n alt_color=True\n elif rivers_to_plot is not None:\n alt_color = False\n if not (pair[0].get_lat(),pair[0].get_lon()) in rivers_to_plot:\n continue\n elif rivers_to_plot_alt_color is not None:\n alt_color = True\n if not (pair[0].get_lat(),pair[0].get_lon()) in rivers_to_plot_alt_color:\n continue\n else:\n alt_color = False\n print(\"Ref Point: \" + str(pair[0]) + \"Matches: \" + str(pair[1]))\n image_array = fmp_pts.add_selected_catchment_to_existing_plot(image_array,data_catchment_field,\n ref_catchment_field,data_catchment_field,\n flowmap_data_field, ref_rdirs_field,\n data_rdirs_field, pair=pair,\n catchment_grid_changed=False,\n use_alt_color=alt_color,\n alt_color_num=alt_color_num,\n use_single_color_for_discrepancies=\\\n use_single_color_for_discrepancies,\n use_only_one_color_for_flowmap=\\\n use_only_one_color_for_flowmap,\n allow_new_sink_points=show_true_sinks,\n grid_type=grid_type,\n data_original_scale_grid_type=grid_type)\n if extra_lsmask_filename:\n image_array = fmp_pts.add_extra_flowmap(image_array,extra_lsmask)\n if show_true_sinks:\n image_array[np.logical_and(ref_rdirs_field == 5,\n data_rdirs_field == 5)] = -4\n image_array[np.logical_and(ref_rdirs_field == 5,\n data_rdirs_field != 5)] = -5\n image_array[np.logical_and(ref_rdirs_field != 5,\n data_rdirs_field == 5)] = -6\n if remove_antartica:\n image_array = image_array[:320*scale_factor]\n fmp_pts.plot_composite_image(ax,image_array,minflowcutoff,first_datasource_name,second_datasource_name,\n use_single_color_for_discrepancies,use_only_one_color_for_flowmap,use_title,\n colors=self.colors,difference_in_catchment_label=difference_in_catchment_label,\n flowmap_grid=flowmap_grid,plot_glaciers=True if glacier_mask_filename else False,\n second_ls_mask=True if extra_lsmask_filename else False,\n show_true_sinks=show_true_sinks)\n for temp_file in temp_file_list:\n if os.path.basename(temp_file).startswith(\"temp_\"):\n print(\"Deleting File: {0}\".format(temp_file))\n os.remove(temp_file)",
"def catch_net(catch_sites_csv, catch_sites_col=['GRIDCODE', 'SITE']):\n\n ## Read in data\n catch_sites_names=['catch', 'site']\n catch_sites = read_csv(catch_sites_csv)[catch_sites_col]\n catch_sites.columns = catch_sites_names\n\n ## Reorganize and select intial catchments\n catch_sites1 = catch_sites[catch_sites.site != 0]\n catch_sites2 = catch_sites1[catch_sites1.catch != catch_sites1.site]\n\n catch_unique = catch_sites2.catch.unique()\n\n singles = catch_sites1.catch[~catch_sites1.catch.duplicated(keep=False)]\n\n ## Network processing\n df = catch_sites2\n index1 = catch_unique\n catch_set2 = []\n for i in index1:\n catch1 = df.loc[df.catch == i, 'site'].values\n catch_set1 = catch1\n check1 = in1d(df.catch, catch1)\n while sum(check1) >= 1:\n# if sum(check1) > len(catch1):\n# print('Index numbering is wrong!')\n catch2 = df[check1].site.values.flatten()\n catch3 = catch2[~isnan(catch2)]\n catch_set1 = append(catch_set1, catch3)\n check1 = in1d(df.catch, catch3)\n catch1 = catch3\n catch_set2.append(catch_set1.tolist())\n\n df2 = DataFrame(catch_set2, index=index1)\n return([df2, singles.values])",
"def main(ancillary_ws, zero_elev_nodata_flag=False, overwrite_flag=False):\n logging.info('\\nProcess DAYMET ancillary rasters')\n\n # Site URL\n # ancillary_url = 'http://daymet.ornl.gov/files/ancillary_files.tgz'\n\n # Build output workspace if it doesn't exist\n if not os.path.isdir(ancillary_ws):\n os.makedirs(ancillary_ws)\n\n # Input paths\n # ancillary_targz = os.path.join(\n # ancillary_ws, os.path.basename(ancillary_url))\n # dem_nc = os.path.join(ancillary_ws, 'dem_data.nc')\n # mask_nc = os.path.join(ancillary_ws, 'mask_data.nc')\n\n # Output paths\n dem_raster = os.path.join(ancillary_ws, 'daymet_elev.img')\n lat_raster = os.path.join(ancillary_ws, 'daymet_lat.img')\n lon_raster = os.path.join(ancillary_ws, 'daymet_lon.img')\n # mask_raster = os.path.join(ancillary_ws, 'daymet_mask.img')\n\n # Spatial reference parameters\n daymet_proj4 = (\n \"+proj=lcc +datum=WGS84 +lat_1=25 n \"\n \"+lat_2=60n +lat_0=42.5n +lon_0=100w\")\n daymet_osr = drigo.proj4_osr(daymet_proj4)\n daymet_osr.MorphToESRI()\n daymet_proj = daymet_osr.ExportToWkt()\n daymet_cs = 1000\n # daymet_nodata = -9999\n\n # For now, hardcode the DAYMET extent/geo\n snap_xmin, snap_ymin = -4560750, -3090500\n daymet_rows, daymet_cols = 8075, 7814\n # snap_xmin, snap_ymin = -4659000, -3135000\n # daymet_rows, daymet_cols = 8220, 8011\n # daymet_geo = (\n # snap_xmin, daymet_cs, 0.,\n # snap_ymin + daymet_cs * daymet_rows, 0., -daymet_cs)\n daymet_extent = drigo.Extent([\n snap_xmin, snap_ymin,\n snap_xmin + daymet_cs * daymet_cols,\n snap_ymin + daymet_cs * daymet_rows])\n daymet_geo = daymet_extent.geo(daymet_cs)\n logging.debug(\" Extent: {}\".format(daymet_extent))\n logging.debug(\" Geo: {}\".format(daymet_geo))\n # logging.debug(\" Cellsize: {}\".format(daymet_cs))\n # logging.debug(\" Shape: {}\".format(daymet_extent.shape(daymet_cs)))\n\n # # Download the ancillary raster tar.gz\n # if overwrite_flag or not os.path.isfile(ancillary_targz):\n # logging.info('\\nDownloading ancillary tarball files')\n # logging.info(\" {}\".format(os.path.basename(ancillary_url)))\n # logging.debug(\" {}\".format(ancillary_url))\n # logging.debug(\" {}\".format(ancillary_targz))\n # url_download(ancillary_url, ancillary_targz)\n # try:\n # urllib.urlretrieve(ancillary_url, ancillary_targz)\n # except:\n # logging.error(\" ERROR: {}\\n FILE: {}\".format(\n # sys.exc_info()[0], ancillary_targz))\n # os.remove(ancillary_targz)\n\n # # Extract the ancillary rasters\n # ancillary_list = [dem_nc]\n # # ancillary_list = [dem_nc, mask_nc]\n # if (os.path.isfile(ancillary_targz) and\n # (overwrite_flag or\n # not all([os.path.isfile(os.path.join(ancillary_ws, x))\n # for x in ancillary_list]))):\n # logging.info('\\nExtracting ancillary rasters')\n # logging.debug(\" {}\".format(ancillary_targz))\n # tar = tarfile.open(ancillary_targz)\n # for member in tar.getmembers():\n # print member.name\n # member.name = os.path.basename(member.name)\n # # Strip off leading numbers from ancillary raster name\n # member.name = member.name.split('_', 1)[1]\n # member_path = os.path.join(ancillary_ws, member.name)\n # if not member.name.endswith('.nc'):\n # continue\n # elif member_path not in ancillary_list:\n # continue\n # elif os.path.isfile(member_path):\n # continue\n # logging.debug(\" {}\".format(member.name))\n # tar.extract(member, ancillary_ws)\n # tar.close()\n\n # # Mask\n # if ((overwrite_flag or\n # not os.path.isfile(mask_raster)) and\n # os.path.isfile(mask_nc)):\n # logging.info('\\nExtracting mask raster')\n # mask_nc_f = netCDF4.Dataset(mask_nc, 'r')\n # logging.debug(mask_nc_f)\n # # logging.debug(mask_nc_f.variables['image'])\n # mask_array = mask_nc_f.variables['image'][:]\n # mask_array[mask_array == daymet_nodata] = 255\n # drigo.array_to_raster(\n # mask_array, mask_raster,\n # output_geo=daymet_geo, output_proj=daymet_proj,\n # output_nodata=255)\n # mask_nc_f.close()\n\n # # DEM\n # if ((overwrite_flag or not os.path.isfile(dem_raster)) and\n # os.path.isfile(dem_nc)):\n # logging.info('\\nExtracting DEM raster')\n # dem_nc_f = netCDF4.Dataset(dem_nc, 'r')\n # logging.debug(dem_nc_f)\n # # logging.debug(dem_nc_f.variables['image'])\n # dem_array = dem_nc_f.variables['image'][:]\n # # Rounding issues of the nodata value when converting to float32\n # dem_array[dem_array == daymet_nodata] -= 1\n # dem_array = dem_array.astype(np.float32)\n # if zero_elev_nodata_flag:\n # dem_array[dem_array <= daymet_nodata] = 0\n # else:\n # dem_array[dem_array <= daymet_nodata] = np.nan\n # drigo.array_to_raster(\n # dem_array, dem_raster,\n # output_geo=daymet_geo, output_proj=daymet_proj)\n # dem_nc_f.close()\n\n # Latitude/Longitude\n if (os.path.isfile(dem_raster) and\n (overwrite_flag or\n not os.path.isfile(lat_raster) or\n not os.path.isfile(lon_raster))):\n logging.info('\\nDAYMET Latitude/Longitude')\n logging.debug(' {}'.format(lat_raster))\n lat_array, lon_array = drigo.raster_lat_lon_func(\n dem_raster, gcs_cs=0.05)\n drigo.array_to_raster(\n lat_array.astype(np.float32), lat_raster,\n output_geo=daymet_geo, output_proj=daymet_proj)\n logging.debug(' {}'.format(lon_raster))\n drigo.array_to_raster(\n lon_array.astype(np.float32), lon_raster,\n output_geo=daymet_geo, output_proj=daymet_proj)\n del lat_array, lon_array\n\n logging.debug('\\nScript Complete')",
"def _change_seg_stop(self, seg_img, depth_img, stop_signs, cam, _region_size=6): \r\n for stop in stop_signs:\r\n\r\n _dist = self._get_distance(stop.get_transform().location)\r\n \r\n _region = np.abs(depth_img - _dist)\r\n\r\n seg_img[(_region < _region_size) & (seg_img == 12)] = 26\r\n\r\n # lane markings\r\n trigger = stop.trigger_volume\r\n\r\n _trig_loc_world = self._trig_to_world(np.array([[0], [0], [0], [1.0]]).T, stop, trigger)\r\n _x = self._world_to_sensor(_trig_loc_world, self._get_sensor_position(cam))[0,0]\r\n\r\n if _x > 0: # stop is in front of camera\r\n\r\n bb = self._create_2d_bb_points(trigger, 4)\r\n trig_loc_world = self._trig_to_world(bb, stop, trigger)\r\n cords_x_y_z = self._world_to_sensor(trig_loc_world, self._get_sensor_position(cam), True)\r\n\r\n #if cords_x_y_z.size: \r\n cords_x_y_z = cords_x_y_z[:3, :]\r\n cords_y_minus_z_x = np.concatenate([cords_x_y_z[1, :], -cords_x_y_z[2, :], cords_x_y_z[0, :]])\r\n bbox = (self._sensor_data['calibration'] @ cords_y_minus_z_x).T\r\n\r\n camera_bbox = np.concatenate([bbox[:, 0] / bbox[:, 2], bbox[:, 1] / bbox[:, 2], bbox[:, 2]], axis=1)\r\n\r\n if np.any(camera_bbox[:,2] > 0):\r\n\r\n camera_bbox = np.array(camera_bbox)\r\n\r\n polygon = [(camera_bbox[i, 0], camera_bbox[i, 1]) for i in range(len(camera_bbox))]\r\n\r\n img = Image.new('L', (self._sensor_data['width'], self._sensor_data['height']), 0)\r\n ImageDraw.Draw(img).polygon(polygon, outline=1, fill=1)\r\n _region = np.array(img)\r\n\r\n seg_img[(_region == 1) & (seg_img == 6)] = 27",
"def copy_to_scratch_from_coordinates(coords_csv, production_info, scratch_info, output_report_path):\n handler = logging.StreamHandler(sys.stdout)\n logger.setLevel(logging.INFO)\n logging.getLogger().addHandler(handler)\n\n coords_xyz = pd.read_csv(coords_csv, header=None, names=['x', 'y', 'z'], dtype=np.int32).values\n \n coords_xyz, copy_infos = copy_to_scratch(production_info, scratch_info, coords_xyz)\n scratch_split_svs = np.array([info.split_sv for info in copy_infos])\n cleave_info = cleave_supervoxels_as_isolated_bodies( scratch_info, scratch_split_svs )\n\n logger.info(\"Preparing output CSV\")\n table = []\n for coord_xyz, copy_info, cleave_info in zip(coords_xyz, copy_infos, cleave_info):\n x, y, z = coord_xyz\n production_sv = copy_info.src_sv\n scratch_sv = cleave_info[0]\n scratch_body = cleave_info[1]\n scratch_cleaved_body = cleave_info[2]\n table.append( (x,y,z,production_sv,scratch_sv,scratch_body,scratch_cleaved_body) )\n \n df = pd.DataFrame(table, columns=['x','y','z','production_sv','scratch_sv','scratch_body','scratch_cleaved_body'])\n df.to_csv(output_report_path, index=False)\n logger.info(\"DONE!\")",
"def DegViewshed (FLOOR, HEIGHT):\n\n #Select Record\n arcpy.SelectLayerByAttribute_management(PointsFL,\"NEW_SELECTION\",SQL)\n \n #Set Observer Height (OffSETA)\n arcpy.CalculateField_management(PointsFL,\"OFFSETA\",HEIGHT,\"PYTHON_9.3\")\n \n #perform viewshed analysis\n arcpy.SetProgressorLabel(\"Performing Viewshed Analysis for point \"+str(value))\n outViewshed = IntermediateFiles+\"\\\\vs_\"+str(FLOOR)+\"_\"+str(value).split(\".\")[0]\n arcpy.Viewshed_3d(outCon,PointsFL,outViewshed)\n\n #convert viewshed to polygon\n arcpy.SetProgressorLabel(\"Converting viewshed\"+str(value)+\" on floor \"+str(FLOOR)+\" to polygon.\")\n OutPoly = IntermediateFiles+\"\\\\\"+os.path.basename(outViewshed).split(\".\")[0]+\"_poly.shp\"\n arcpy.RasterToPolygon_conversion(outViewshed,OutPoly)\n\n #Intersect viewshed polygon with buffer clip\n #This will allow the viewshed poly to inherit attribute fields needed for later analysis\n FinalView = Final_Floor_Viewsheds+\"\\\\FinalViewshed_\"+str(FLOOR)+\"_\"+str(value)+\".shp\"\n arcpy.Intersect_analysis([BufferClip,OutPoly],FinalView)\n \n #Select features in viewshed polygon with Gridcode = 1\n #If no records with grid = 1 exist, scriptwill skip to setting viewshed in degrees to 0\n \n #Convert viewshed polygon to layer\n ViewshedLayer = outName(FinalView,\"lyr\")\n arcpy.MakeFeatureLayer_management(FinalView,ViewshedLayer)\n\n #Select records with gridcode = 1\n arcpy.SelectLayerByAttribute_management(ViewshedLayer,\"NEW_SELECTION\",\"GRIDCODE =\"+str(1)+\"\") \n\n #Get count of the # of records selected in viewshed poly layer\n VsLyrCount = int(arcpy.GetCount_management(ViewshedLayer).getOutput(0))\n \n NoView = SummaryTables+\"\\\\summary_\"+str(FLOOR)+\"_\"+str(value)+\".dbf\"\n YesView = SummaryTables+\"\\\\summary_\"+str(FLOOR)+\"_\"+str(value)+\".dbf\"\n StatsField0 = [[\"GRIDCODE\",\"SUM\"]]\n CaseField0 = [\"ID\",\"SPOT\",FloorField] \n StatsField1 = [[\"LENGTH\",\"SUM\"]]\n CaseField1 = [\"GRIDCODE\",\"ID\",\"SPOT\",FloorField]\n VsArcLengths = ArcLengths+\"\\\\ArcLength_\"+str(FLOOR)+\"_\"+str(value)+\".shp\"\n \n if VsLyrCount == 0: #no viewable areas exist\n arcpy.SelectLayerByAttribute_management(ViewshedLayer,\"CLEAR_SELECTION\")\n arcpy.SetProgressorLabel(\"Calculating viewshed statistics for parcel \"+str(value))\n arcpy.Statistics_analysis(ViewshedLayer,NoView, StatsField0,CaseField0)\n\n #Add field to summary table to hold viewshed value of 0\n #Add field to note which floor viewshed corresponds to\n arcpy.AddField_management(NoView, \"FLR_RAN\",\"SHORT\")\n arcpy.AddField_management(NoView, \"VIEW_\"+Year,\"DOUBLE\")\n arcpy.AddField_management(NoView,\"OFFSETA\",\"SHORT\")\n arcpy.CalculateField_management(NoView,\"FLR_RAN\",FLOOR)\n arcpy.CalculateField_management(NoView,\"VIEW_\"+Year,0)\n arcpy.CalculateField_management(NoView,\"OFFSETA\",HEIGHT)\n\n else: #Calculate viewshed, in degrees, for selected records\n arcpy.SetProgressorLabel(\"Getting arc length for parcel\"+str(value)+\" at the \"+str(FLOOR)+\" floor.\")\n arcpy.Intersect_analysis([BufferLine,ViewshedLayer],VsArcLengths,\"\",10,\"LINE\")#Intersect with any line within 10 ft. \n arcpy.AddField_management(VsArcLengths, \"Length\",\"DOUBLE\")\n arcpy.CalculateField_management(VsArcLengths,\"Length\",\"!SHAPE.length@miles!\",\"PYTHON_9.3\")\n arcpy.Statistics_analysis(VsArcLengths,YesView,StatsField1,CaseField1)\n\n #Add fields to output summary table\n arcpy.AddField_management(YesView,\"FLR_RAN\",\"SHORT\")\n arcpy.AddField_management(YesView,\"VIEW_\"+Year,\"DOUBLE\")\n arcpy.AddField_management(YesView,\"OFFSETA\",\"SHORT\")\n arcpy.CalculateField_management(YesView,\"FLR_RAN\",FLOOR)\n arcpy.CalculateField_management(YesView,\"OFFSETA\",HEIGHT)\n arcpy.CalculateField_management(YesView,\"VIEW_\"+Year,\"((!SUM_LENGTH!/3.14)*180)\",\"PYTHON_9.3\")\n arcpy.SelectLayerByAttribute_management(ViewshedLayer,\"CLEAR_SELECTION\")",
"def extractpolylinefromdxf():\r\n d={}\r\n for readfile in readfilelist: #将readfilelist中的文件逐个按照程序进行读取分析\r\n filetoread=open(readfile,'r')\r\n layername=filetoread.name.split(\".\")[0]\r\n #newfilename=filetoread.name.split('.')[0]+'.txt'\r\n #readme.write(newfilename)\r\n #filetowrite=file(newfilename,'w')\r\n #writefilelist.append(newfilename) \r\n x=0 #x坐标\r\n y=0 #y坐标\r\n dataset=[] #多段线坐标数组\r\n counter=0\r\n xflag=0 #以下x、y、poly、end flag表示下一次读取行是否进入表示该变量的行。1为是,0为否。\r\n yflag=0\r\n polyflag=0 \r\n endflag=0\r\n polyline=[] #多段线各顶点坐标构成的数组\r\n \r\n \r\n for line in filetoread.readlines():\r\n counter += 1\r\n pattern1=re.compile('AcDbPolyline') #pattern1~5正则表达式判断是否进入标志行\r\n pattern2=re.compile('\\s{1}10')\r\n pattern3=re.compile('\\s{1}20')\r\n pattern4=re.compile('\\s{2}0')\r\n pattern5=re.compile('ENDSEC')\r\n polymatch=pattern1.match(line)\r\n xmatch=pattern2.match(line)\r\n ymatch=pattern3.match(line)\r\n endmatch=pattern4.match(line)\r\n finalmatch=pattern5.match(line)\r\n if finalmatch and polyflag==1 and endflag==1: #实体定义部分结束,将最后一组多段线的顶点坐标数组加入dataset,dataset是该图形中所有多段线的集合\r\n polyflag=0\r\n dataset.append(polyline)\r\n #print(dataset) #打印测试,输出坐标\r\n #readme.write('polyline has ended!!!') \r\n if polyflag==1 and xflag==1 and endflag==0: #读取X坐标\r\n x=float(line)\r\n xflag=0\r\n if polyflag==1 and yflag==1 and endflag==0: #读取Y坐标\r\n y=float(line)\r\n yflag=0\r\n polyline.append([x,y])\r\n if polyflag==1 and len(polyline)>1 and endflag==1: #读取所有多段线坐标后,将坐标数组加入dataset内\r\n dataset.append(polyline)\r\n polyline=[]\r\n endflag=0\r\n if endmatch: \r\n endflag=1\r\n if polymatch: #进入多段线部分,重置其他flag为0。\r\n polyflag=1\r\n endflag=0\r\n xflag=0\r\n yflag=0\r\n if xmatch:\r\n xflag=1\r\n if ymatch:\r\n yflag=1 \r\n \r\n d[layername]=dataset \r\n d[\"Outline\"]=[[[globalconfig.X_LENGTH/2,globalconfig.Y_LENGTH/2],[globalconfig.X_LENGTH/2,-globalconfig.Y_LENGTH/2],[-globalconfig.X_LENGTH/2,-globalconfig.Y_LENGTH/2],[-globalconfig.X_LENGTH/2,globalconfig.Y_LENGTH/2]]]\r\n return d",
"def postIdealizedAnalysis(inpath, outpath, member,\n refpath='/lustre/research/bancell/aucolema/HWT2016runs/2016050800/wrfoutREF'):\n # SENSvals file naming conventions\n sensval_varstrings = [\"GPH_300\", \"GPH_500\", \"GPH_700\", \"GPH_850\", \"SKIP\",\n \"T_300\", \"T_500\", \"T_700\", \"T_850\", \"T_925\",\n \"U_300\", \"U_500\", \"U_700\", \"U_850\", \"U_925\",\n \"V_300\", \"V_500\", \"V_700\", \"V_850\", \"V_925\",\n \"SKIP\", \"SKIP\", \"SKIP\", \"SKIP\", \"SKIP\", \"SKIP\",\n \"SKIP\", \"SKIP\", \"Q_850\", \"SKIP\", \"SLP\", \"T2\",\n \"TD2\", \"U10\", \"V10\"]\n # Post-processed new file naming conventions\n sensstringslist = [\"300 hPa GPH\",\"500 hPa GPH\",\"700 hPa GPH\",\n \"850 hPa GPH\",\"925 hPa GPH\",\"300 hPa T\",\"500 hPa T\",\n \"700 hPa T\",\"850 hPa T\",\"925 hPa T\",\"300 hPa U-Wind\",\n \"500 hPa U-Wind\",\"700 hPa U-Wind\",\"850 hPa U-Wind\",\n \"925 hPa U-Wind\",\"300 hPa V-Wind\",\"500 hPa V-Wind\",\n \"700 hPa V-Wind\",\"850 hPa V-Wind\",\"925 hPa V-Wind\",\n \"300 hPa Dewpt\", \"500 hPa Dewpt\", \"700 hPa Dewpt\",\n \"850 hPa Dewpt\", \"925 hPa Dewpt\", \"300 hPa Q\",\n \"500 hPa Q\", \"700 hPa Q\", \"850 hPa Q\", \"925 hPa Q\",\n \"SLP\",\"2m Temp\",\"2m Dewpt\",\n \"10m U-Wind\",\"10m V-Wind\"]\n\n # Get more dimensions/geographical info\n wrf_d1 = Dataset(refpath)\n lons, lats = wrf_d1.variables['XLONG'][0], wrf_d1.variables['XLAT'][0]\n wrf_idim = len(lons[0,:])\n wrf_jdim = len(lats[:,0])\n\n # Write interpolated variables to netCDF\n new_analysis = Dataset(outpath, \"w\", format=\"NETCDF4\")\n new_analysis.createDimension('lat', wrf_jdim)\n new_analysis.createDimension('lon', wrf_idim)\n new_analysis.createDimension('time', None)\n xlat = new_analysis.createVariable(\"XLAT\", float, dimensions=('lat','lon'))\n xlat[:,:] = lats\n xlon = new_analysis.createVariable(\"XLONG\", float, dimensions=('lat','lon'))\n xlon[:,:] = lons\n\n # Open dataset and start pulling member fields\n member_fields = np.zeros((len(sensval_varstrings), wrf_jdim, wrf_idim))\n sensvar_dat = Dataset(inpath)\n for ind, var in enumerate(sensval_varstrings):\n # print(\"SENSvals variable:\", var, \"New variable string\", sensstringslist[ind])\n if var != \"SKIP\":\n member_fields[ind] = sensvar_dat[var][member-1][:]\n newvar = new_analysis.createVariable(\n sensstringslist[ind].replace(\" \",\"_\"),\n member_fields[ind].dtype,\n dimensions=('lat','lon'))\n newvar[:,:] = member_fields[ind]\n new_analysis.close()\n return",
"def postIdealizedAnalysis(inpath, outpath, member,\n refpath='/lustre/research/bancell/aucolema/HWT2016runs/2016050800/wrfoutREF'):\n # SENSvals file naming conventions\n sensval_varstrings = [\"GPH_300\", \"GPH_500\", \"GPH_700\", \"GPH_850\", \"SKIP\",\n \"T_300\", \"T_500\", \"T_700\", \"T_850\", \"T_925\",\n \"U_300\", \"U_500\", \"U_700\", \"U_850\", \"U_925\",\n \"V_300\", \"V_500\", \"V_700\", \"V_850\", \"V_925\",\n \"SKIP\", \"SKIP\", \"SKIP\", \"SKIP\", \"SKIP\", \"SKIP\",\n \"SKIP\", \"SKIP\", \"Q_850\", \"SKIP\", \"SLP\", \"T2\",\n \"TD2\", \"U10\", \"V10\"]\n # Post-processed new file naming conventions\n sensstringslist = [\"300 hPa GPH\",\"500 hPa GPH\",\"700 hPa GPH\",\n \"850 hPa GPH\",\"925 hPa GPH\",\"300 hPa T\",\"500 hPa T\",\n \"700 hPa T\",\"850 hPa T\",\"925 hPa T\",\"300 hPa U-Wind\",\n \"500 hPa U-Wind\",\"700 hPa U-Wind\",\"850 hPa U-Wind\",\n \"925 hPa U-Wind\",\"300 hPa V-Wind\",\"500 hPa V-Wind\",\n \"700 hPa V-Wind\",\"850 hPa V-Wind\",\"925 hPa V-Wind\",\n \"300 hPa Dewpt\", \"500 hPa Dewpt\", \"700 hPa Dewpt\",\n \"850 hPa Dewpt\", \"925 hPa Dewpt\", \"300 hPa Q\",\n \"500 hPa Q\", \"700 hPa Q\", \"850 hPa Q\", \"925 hPa Q\",\n \"SLP\",\"2m Temp\",\"2m Dewpt\",\n \"10m U-Wind\",\"10m V-Wind\"]\n\n # Get more dimensions/geographical info\n wrf_d1 = Dataset(refpath)\n lons, lats = wrf_d1.variables['XLONG'][0], wrf_d1.variables['XLAT'][0]\n wrf_idim = len(lons[0,:])\n wrf_jdim = len(lats[:,0])\n\n # Write interpolated variables to netCDF\n new_analysis = Dataset(outpath, \"w\", format=\"NETCDF4\")\n new_analysis.createDimension('lat', wrf_jdim)\n new_analysis.createDimension('lon', wrf_idim)\n new_analysis.createDimension('time', None)\n xlat = new_analysis.createVariable(\"XLAT\", float, dimensions=('lat','lon'))\n xlat[:,:] = lats\n xlon = new_analysis.createVariable(\"XLONG\", float, dimensions=('lat','lon'))\n xlon[:,:] = lons\n\n # Open dataset and start pulling member fields\n member_fields = np.zeros((len(sensval_varstrings), wrf_jdim, wrf_idim))\n sensvar_dat = Dataset(inpath)\n for ind, var in enumerate(sensval_varstrings):\n # print(\"SENSvals variable:\", var, \"New variable string\", sensstringslist[ind])\n if var != \"SKIP\":\n member_fields[ind] = sensvar_dat[var][member-1][:]\n newvar = new_analysis.createVariable(\n sensstringslist[ind].replace(\" \",\"_\"),\n member_fields[ind].dtype,\n dimensions=('lat','lon'))\n newvar[:,:] = member_fields[ind]\n new_analysis.close()\n return",
"def process(sources, output, force):\n logging.basicConfig(level=logging.INFO,\n format='%(asctime)s [%(levelname)s] - %(message)s', datefmt=\"%H:%M:%S\")\n\n logging.getLogger('shapely.geos').setLevel(logging.WARNING)\n logging.getLogger('Fiona').setLevel(logging.WARNING)\n logging.getLogger('requests.packages.urllib3.connectionpool').setLevel(logging.WARNING)\n requests.packages.urllib3.disable_warnings()\n # logging.getLogger('processing').setLevel(logging.DEBUG)\n\n catalog_features = []\n failures = []\n path_parts_to_skip = utils.get_path_parts(sources).index(\"sources\") + 1\n success = True\n for path in utils.get_files(sources):\n try:\n logging.info(\"Processing \" + path)\n pathparts = utils.get_path_parts(path)[path_parts_to_skip:]\n pathparts[-1] = pathparts[-1].replace('.json', '.geojson')\n\n outdir = os.path.join(output, *pathparts[:-1], pathparts[-1].replace('.geojson', ''))\n outfile = os.path.join(output, *pathparts)\n\n source = utils.read_json(path)\n urlfile = urlparse(source['url']).path.split('/')[-1]\n \n if not hasattr(adapters, source['filetype']):\n logging.error('Unknown filetype ' + source['filetype'])\n failures.append(path)\n continue\n \n read_existing = False\n if os.path.isfile(outfile):\n logging.info(\"Output file exists\")\n if os.path.getmtime(outfile) > os.path.getmtime(path):\n logging.info(\"Output file is up to date\")\n if not force:\n read_existing = True\n logging.warning('Skipping ' + path + ' since generated file exists. Use --force to regenerate.') \n else:\n logging.info(\"Output is outdated, {} < {}\".format(\n datetime.datetime.fromtimestamp(os.path.getmtime(outfile)),\n datetime.datetime.fromtimestamp(os.path.getmtime(path))))\n\n if read_existing:\n with open(outfile, \"rb\") as f:\n geojson = json.load(f)\n properties = geojson['properties']\n else:\n logging.info('Downloading ' + source['url'])\n \n try:\n fp = utils.download(source['url'])\n except IOError:\n logging.error('Failed to download ' + source['url'])\n failures.append(path)\n continue\n \n logging.info('Reading ' + urlfile)\n \n if 'filter' in source:\n filterer = BasicFilterer(source['filter'], source.get('filterOperator', 'and'))\n else:\n filterer = None\n \n try:\n geojson = getattr(adapters, source['filetype'])\\\n .read(fp, source['properties'],\n filterer=filterer,\n layer_name=source.get(\"layerName\", None),\n source_filename=source.get(\"filenameInZip\", None))\n except IOError as e:\n logging.error('Failed to read ' + urlfile + \" \" + str(e))\n failures.append(path)\n continue\n except zipfile.BadZipfile as e:\n logging.error('Unable to open zip file ' + source['url'])\n failures.append(path)\n continue\n finally:\n os.remove(fp.name)\n if(len(geojson['features'])) == 0:\n logging.error(\"Result contained no features for \" + path)\n continue\n excluded_keys = ['filetype', 'url', 'properties', 'filter', 'filenameInZip']\n properties = {k:v for k,v in list(source.items()) if k not in excluded_keys}\n properties['source_url'] = source['url']\n properties['feature_count'] = len(geojson['features'])\n logging.info(\"Generating demo point\")\n properties['demo'] = geoutils.get_demo_point(geojson)\n \n geojson['properties'] = properties\n \n utils.make_sure_path_exists(os.path.dirname(outfile))\n\n #cleanup existing generated files\n if os.path.exists(outdir):\n rmtree(outdir)\n filename_to_match, ext = os.path.splitext(pathparts[-1])\n output_file_dir = os.sep.join(utils.get_path_parts(outfile)[:-1])\n logging.info(\"looking for generated files to delete in \" + output_file_dir)\n for name in os.listdir(output_file_dir):\n base, ext = os.path.splitext(name)\n if base == filename_to_match:\n to_remove = os.path.join(output_file_dir, name)\n logging.info(\"Removing generated file \" + to_remove)\n os.remove(to_remove)\n\n utils.write_json(outfile, geojson)\n\n logging.info(\"Generating label points\")\n label_geojson = geoutils.get_label_points(geojson)\n label_path = outfile.replace('.geojson', '.labels.geojson')\n utils.write_json(label_path, label_geojson)\n\n logging.info('Done. Processed to ' + outfile)\n \n if not \"demo\" in properties:\n properties['demo'] = geoutils.get_demo_point(geojson)\n\n properties['path'] = \"/\".join(pathparts)\n catalog_entry = {\n 'type': 'Feature',\n 'properties': properties,\n 'geometry': geoutils.get_union(geojson)\n }\n catalog_features.append(catalog_entry)\n\n if not os.path.exists(outdir) or not os.path.exists(os.path.join(outdir, \"units.json\")):\n logging.info(\"Generated exploded GeoJSON to \" + outdir)\n if not os.path.exists(outdir):\n os.makedirs(outdir)\n # .json instead of .geojson, incase there is a unit named \"source\"\n utils.write_json(os.path.join(outdir, \"source.json\"), catalog_entry) \n units = []\n for feature in geojson['features']:\n feature_id = str(feature['properties']['id'])\n feature_id = feature_id.replace('/', '')\n feature_filename = os.path.join(outdir, feature_id + \".geojson\")\n utils.write_json(feature_filename, feature)\n units.append(feature['properties'])\n utils.write_json(os.path.join(outdir, \"units.json\"), units)\n else:\n logging.debug(\"exploded GeoJSON already exists, not generating\")\n\n except Exception as e:\n logging.error(str(e))\n logging.exception(\"Error processing file \" + path)\n failures.append(path)\n success = False\n\n catalog = {\n 'type': 'FeatureCollection',\n 'features': catalog_features\n }\n utils.write_json(os.path.join(output,'catalog.geojson'), catalog)\n\n if not success:\n logging.error(\"Failed sources: \" + \", \".join(failures))\n sys.exit(-1)",
"def generateSDFitsFromHipsr(filename_in, path_in, filename_out, path_out, write_stokes=0, cal=None):\n \n # Open h5 file\n print \"\\nOpening files\"\n print \"-------------\"\n h5file = os.path.join(path_in, filename_in)\n out_file = os.path.join(path_out, filename_out)\n h6 = Hipsr6(h5file)\n pointing = h6.tb_pointing.cols\n obs = h6.tb_observation.cols\n obs_mode = obs.obs_mode[0].strip()\n ref_beams= obs.ref_beam[:]\n\n freqs = h6.freqs\n freqs_cal = h6.freqs_cal\n \n firmware = h6.tb_firmware_config.cols.firmware[0]\n \n print \"Input file: %s\"%h6.h5.filename\n print h6\n\n if cal == None:\n abspath = os.path.abspath( __file__ ).replace('sdfits.pyc', '').replace('sdfits.py', '')\n #diode_cal_file_x = \"%s/diode_jy_x.cal\"%abspath\n #diode_cal_file_y = \"%s/diode_jy_y.cal\"%abspath\n diode_cal_file = \"%s/diode_jy.cal\"%abspath\n else:\n diode_cal_file = cal\n\n print \"Using calibration %s\"%cal\n diode_temps_x, diode_temps_y, rx_temps_x, rx_temps_y = loadDiodeTemp(h6, diode_cal_file)\n\n scan_pointing_len = h6.tb_scan_pointing.shape[0]\n \n tb_lengths = []\n for beam in h6.h5.root.raw_data:\n if beam.shape[0] != scan_pointing_len:\n beam_id = int(beam.name.lstrip('beam_'))\n print \"WARNING: beam %i len: %i, scan_pointing len: %i\"%(beam_id, beam.shape[0], scan_pointing_len)\n tb_lengths.append(np.min([beam.shape[0], scan_pointing_len]))\n \n \n num_acc = np.max(tb_lengths) \n num_rows = num_acc * 13\n\n if num_acc == 0:\n print \"No data in %s. Skipping.\"%h5file\n return -1\n \n print \"No accumulations: %s, no rows: %s\"%(num_acc, num_rows)\n\n # We now need to generate a blank SD-FITS file, with the same number of rows\n print \"\\nGenerating blank SD-FITS file with %i rows...\"%num_rows\n\n path = findLibraryPath()\n if obs_mode == 'MXCAL':\n header_primary = os.path.join(path, 'header_primaryHDU.txt')\n header_tbl = os.path.join(path, 'header_dataHDU_mxcal.txt')\n coldef_file = os.path.join(path, 'coldefs_dataHDU_mxcal.txt')\n elif write_stokes == 2:\n print \"Stokes flag found - writing I,Q,U,V\"\n header_primary = os.path.join(path, 'header_primaryHDU.txt')\n header_tbl = os.path.join(path, 'header_dataHDU_stokes.txt')\n coldef_file = os.path.join(path, 'coldefs_dataHDU_stokes.txt')\n elif write_stokes == 0:\n print \"Writing XX, YY\"\n header_primary = os.path.join(path, 'header_primaryHDU.txt')\n header_tbl = os.path.join(path, 'header_dataHDU.txt')\n coldef_file = os.path.join(path, 'coldefs_dataHDU.txt')\n else:\n print \"Writing XX, YY, XY, YX\"\n header_primary = os.path.join(path, 'header_primaryHDU.txt')\n header_tbl = os.path.join(path, 'header_dataHDU_xpol.txt')\n coldef_file = os.path.join(path, 'coldefs_dataHDU_xpol.txt')\n \n if '200_16384' in firmware:\n coldef_file = os.path.join(path, 'coldefs_dataHDU_200_16384.txt')\n \n hdulist = generateBlankSDFits(num_rows, header_primary, header_tbl, coldef_file)\n print hdulist.info()\n \n # Next, we copy over observation data \n print \"Filling new SD-FITS with HIPSR data...\"\n sdtab = hdulist[1].data\n sdhead = hdulist[1].header\n\n # Fill in header values\n sdhead[\"OBSERVER\"] = obs.observer[0]\n sdhead[\"PROJID\"] = obs.project_id[0]\n \n # Fill in common values\n # NEW METHOD OF TIMESTAMPING - AUG 27 2013\n ref_time = int(h6.h5.root.raw_data.beam_01.cols.timestamp[0])\n ref_id = int(h6.h5.root.raw_data.beam_01.cols.id[0])\n ref_clk = np.abs(h6.h5.root.observation.cols.bandwidth[0]) * 1e6\n num_chans = h6.h5.root.raw_data.beam_01.cols.xx[0].shape[0]\n acc_len = h6.h5.root.firmware_config.cols.acc_len[0]\n # OLD - BEFORE MAR 2018 ref_delta = num_chans * acc_len * 2 / ref_clk\n # NEW - post MAR 2018\n fs = 800e6\n ref_delta = 4 * num_chans * acc_len / fs\n \n f = h6.freqs\n\n print \"Filling in common values... \",\n sdtab[\"SCAN\"][:] = 1\n sdtab[\"EXPOSURE\"][:] = ref_delta\n sdtab[\"OBJECT\"][:] = pointing.source[0]\n sdtab[\"OBJ-RA\"][:] = pointing.ra[0]\n sdtab[\"OBJ-DEC\"][:] = pointing.dec[0]\n sdtab[\"RESTFRQ\"][:] = obs.frequency[0] * 1e6\n sdtab[\"FREQRES\"][:] = np.abs(obs.bandwidth[0])*1e6 / num_chans\n sdtab[\"BANDWID\"][:] = np.abs(obs.bandwidth[0]) * 1e6\n sdtab[\"CRPIX1\"][:] = num_chans/2 + 1\n sdtab[\"CRVAL1\"][:] = obs.frequency[0] * 1e6\n sdtab[\"CDELT1\"][:] = np.abs(obs.bandwidth[0])*1e6 / num_chans\n sdtab[\"FLAGGED\"][:] = 0\n sdtab[\"SCANRATE\"][:] = obs.scan_rate[0] / 60 # Deg/min to deg/s\n\n\n # TCS INFO\n sdtab[\"OBSMODE\"][:] = obs.obs_mode[0] \n sdtab[\"IF\"][:] = 1\n print \"OK.\"\n \n row_sd = 0\n cycle_id = 0\n \n flipped = False\n if obs.bandwidth[0] < 0:\n flipped = True\n \n print \"Filling in unique values... \"\n num_cycles = np.min([scan_pointing_len, num_acc])\n for row_h5 in range(num_acc):\n cycle_id += 1 # Starts at 1 in SD-FITS file\n\n for beam in h6.h5.root.raw_data:\n beam_id = int(beam.name.lstrip('beam_'))\n LinePrint(\"%i of %i\"%(row_sd, num_rows))\n \n if cycle_id <= num_cycles:\n raj_id = \"mb%s_raj\"%beam.name.lstrip('beam_')\n dcj_id = \"mb%s_dcj\"%beam.name.lstrip('beam_')\n \n sdtab[\"CYCLE\"][row_sd] = cycle_id\n\n # Fix beam mapping (remove after fixing mapping)\n sdtab[\"BEAM\"][row_sd] = beam_id\n \n sdtab[\"CRVAL3\"][row_sd] = h6.tb_scan_pointing.col(raj_id)[cycle_id-1]\n sdtab[\"CRVAL4\"][row_sd] = h6.tb_scan_pointing.col(dcj_id)[cycle_id-1]\n\n # AZ, EL and PARANGLE should be stored for beam 1 only\n if beam_id == 1:\n sdtab[\"AZIMUTH\"][row_sd] = h6.tb_scan_pointing.col(\"azimuth\")[cycle_id-1]\n sdtab[\"ELEVATIO\"][row_sd] = h6.tb_scan_pointing.col(\"elevation\")[cycle_id-1]\n sdtab[\"PARANGLE\"][row_sd] = h6.tb_scan_pointing.col(\"par_angle\")[cycle_id-1]\n\n #sdtab[\"FOCUSAXI\"][row_sd] = h6.tb_scan_pointing.col(\"focus_axi\")[cycle_id-1]\n sdtab[\"FOCUSTAN\"][row_sd] = h6.tb_scan_pointing.col(\"focus_tan\")[cycle_id-1]\n\n # This is confusing - but it looks like FOCUSROT should be 15.0, which is sent as feed_angle\n # Likewise, focusaxi is probably supposed to be what we receive as focus_rot\n focus_rot = h6.tb_scan_pointing.col(\"focus_rot\")[cycle_id-1]\n sdtab[\"FOCUSROT\"][row_sd] = focus_rot\n sdtab[\"FOCUSAXI\"][row_sd] = h6.tb_observation.col(\"feed_angle\")[0]\n\n try:\n\n # OLD - 27 Aug 2013\n #timestamp = beam.cols.timestamp[row_h5]\n # New - based off integration length\n if beam_id == 1:\n new_id = beam.cols.id[row_h5]\n timestamp = (new_id - ref_id) * ref_delta + ref_time\n date_obs, time = timestamp2dt(timestamp)\n\n sdtab[\"DATE-OBS\"][row_sd] = date_obs\n sdtab[\"TIME\"][row_sd] = time\n\n ref_beam = ref_beams[np.argmin(np.abs(timestamp - obs.date[:]))]\n \n # Compute T_sys for each beam\n T_d_x = diode_temps_x[beam_id-1]\n T_d_y = diode_temps_y[beam_id-1]\n\n T_sys_x, T_sys_y = computeTsys(beam, row_h5, T_d_x, T_d_y)\n S_sys_x, S_sys_y = computeTsysSpec(h6, beam, row_h5, T_d_x, T_d_y)\n\n\n #print T_sys_x, T_sys_y\n sdtab[\"TSYS\"][row_sd] = (T_sys_x, T_sys_y)\n sdtab[\"TCAL\"][row_sd] = (np.average(extractMid(T_d_x)), np.average(extractMid(T_d_y)))\n #sdtab[\"CALFCTR\"][row_sd] = (1, 1)\n\n xx = beam.cols.xx[row_h5].astype('float32')\n yy = beam.cols.yy[row_h5].astype('float32')\n xx[0], yy[0] = 0, 0\n \n # See if there is cross corr \n if write_stokes in (1, 2):\n re_xy = beam.cols.re_xy[row_h5].astype('float32')\n im_xy = beam.cols.im_xy[row_h5].astype('float32')\n re_xy[0], im_xy[0] = 0, 0\n \n if flipped:\n xx, yy = xx[::-1], yy[::-1]\n if write_stokes in (1, 2):\n re_xy, im_xy = re_xy[::-1], im_xy[::-1]\n\n # DCP 2019.01 - Adding refbeam to all file types\n sdtab[\"REFBEAM\"][row_sd] = ref_beam\n #if obs_mode == 'MXCAL':\n # sdtab[\"REFBEAM\"][row_sd] = ref_beam\n\n if write_stokes == 2:\n xx = xx / fitLine(f, xx, num_chans) * S_sys_x\n yy = yy / fitLine(f, yy, num_chans) * S_sys_y\n\n re_xy = re_xy / fitLine(f, re_xy, num_chans)* np.sqrt(S_sys_x * S_sys_y)\n im_xy = im_xy / fitLine(f, im_xy, num_chans) * np.sqrt(S_sys_x * S_sys_y)\n \n # Ettore tells me Parkes uses this definition\n # i.e. that I is the average of xx + yy\n ii = (xx + yy) / 2\n qq = (xx - yy) / 2\n uu = re_xy\n vv = im_xy\n \n # Form one data vector\n data1 = np.append(ii, qq)\n data2 = np.append(uu, vv)\n data = np.append(data1, data2)\n data = data.reshape([1,1,4,num_chans])\n else:\n\n if write_stokes == 1:\n re_xy = re_xy / fitLine(f, re_xy, num_chans) * np.sqrt(S_sys_x * S_sys_y)\n im_xy = im_xy / fitLine(f, re_im, num_chans) * np.sqrt(S_sys_x * S_sys_y)\n re_xy[0], im_xy[0] = 0, 0\n\n #print \"cal factor: %2.3f\"%cf\n #print \"Diode temp: %s\"%T_d\n #xx, yy = applyCal(beam, row_h5, freqs, freqs_cal, cf, T_d_x, T_d_y)\n \n xx = xx / fitLine(f, xx, num_chans) * S_sys_x\n yy = yy / fitLine(f, yy, num_chans) * S_sys_y\n\n # Multibeam stats screws up if it encounters division by 1\n xx[xx <= 1 ] = 1\n yy[yy <= 1 ] = 1\n \n do_flagger = True\n if do_flagger:\n flags = np.zeros(len(xx))\n flags[xx > 1000] = 1\n flags[yy > 1000] = 1\n flags[xx==1] = 1\n flags[yy==1] = 1\n flags = np.append(flags, flags)\n flags = flags.reshape([1,1,2,num_chans])\n \n sdtab[\"FLAGGED\"][row_sd] = flags\n \n data = np.append(xx, yy)\n data = data.reshape([1,1,2,num_chans])\n \n sdtab[\"DATA\"][row_sd] = data\n\n if write_stokes == 1:\n sdtab[\"XPOLDATA\"][row_sd] = np.row_stack((re_xy, im_xy)).flatten()\n \n except:\n if beam.name != 'beam_02':\n print \"\\nWARNING: missing row in %s\"%beam.name\n print \"Current index: %i\"%row_h5\n print \"Row length: %i\"%beam.shape[0]\n raise\n try:\n sdtab[\"FLAGGED\"][row_sd] = np.ones_like([1,1,2,num_chans])\n except ValueError:\n pass\n row_sd += 1\n else:\n print \"WARNING: scan_pointing table is not complete.\"\n print \"%s table length: %i\"%(beam.name, beam.shape[0])\n print \"scan_pointing table length: %i\"%scan_pointing_len\n\n \n h6.h5.close()\n \n if os.path.exists(out_file):\n print \"\\nInfo: File exists, deleting...\"\n os.remove(out_file)\n\n print \"\\nInfo: Saving to file\"\n hdulist.writeto(out_file)\n hdulist.close()",
"def disaggregate(self, sitecol, ruptures, iml4, truncnorm, epsilons,\n monitor=Monitor()):\n acc = AccumDict(accum=[])\n ctx_mon = monitor('disagg_contexts', measuremem=False)\n pne_mon = monitor('disaggregate_pne', measuremem=False)\n clo_mon = monitor('get_closest', measuremem=False)\n for rupture in ruptures:\n with ctx_mon:\n orig_dctx = DistancesContext(\n (param, get_distances(rupture, sitecol, param))\n for param in self.REQUIRES_DISTANCES)\n self.add_rup_params(rupture)\n with clo_mon: # this is faster than computing orig_dctx\n closest_points = rupture.surface.get_closest_points(sitecol)\n cache = {}\n for r, gsim in self.gsim_by_rlzi.items():\n dctx = orig_dctx.roundup(gsim.minimum_distance)\n for m, imt in enumerate(iml4.imts):\n for p, poe in enumerate(iml4.poes_disagg):\n iml = tuple(iml4.array[:, r, m, p])\n try:\n pne = cache[gsim, imt, iml]\n except KeyError:\n with pne_mon:\n pne = gsim.disaggregate_pne(\n rupture, sitecol, dctx, imt, iml,\n truncnorm, epsilons)\n cache[gsim, imt, iml] = pne\n acc[poe, str(imt), r].append(pne)\n acc['mags'].append(rupture.mag)\n acc['dists'].append(getattr(dctx, self.filter_distance))\n acc['lons'].append(closest_points.lons)\n acc['lats'].append(closest_points.lats)\n return acc",
"def spathy_run_sve(pgen, pcpy, pbu, ptop, ncf=True, ave_outputs=True, flatten=True):\n\n gisdata = create_catchment(pgen['catchment_id'], fpath=pgen['gis_folder'],\n plotgrids=False, plotdistr=False)\n gisdata['LAI_conif'] *= pcpy['lai_multip']\n gisdata['LAI_decid'] *= pcpy['lai_multip']\n \n \"\"\" greate SpatHy object \"\"\"\n spa = SpatHy(pgen, pcpy, pbu, ptop, gisdata, ave_outputs=ave_outputs, flatten=True)\n Nsteps = spa.Nsteps\n\n \"\"\" create netCDF output file \"\"\"\n if ncf:\n ncf, _= initialize_netCDF(spa.id, spa.GisData, spa.FORC,\n fpath=spa.pgen['output_folder'],\n fname=pgen['outname'])\n \n #3d array indexing: dim1=time, dim2=rows(lat), dim3=cols(lon). W[1,:,:] --> grid at 1st timestep. \n\n \"\"\" ----- MAIN CALCULATION LOOP ----- \"\"\"\n\n print '******* Running Spathy ********'\n spa._run(0, Nsteps, calibr=False, ncf=ncf)\n \n print '********* done *********'\n\n return spa",
"def _post_process_route_fcs(self):\r\n # Create the final output feature class\r\n desc = arcpy.Describe(self.route_fcs[0])\r\n helpers.run_gp_tool(\r\n LOGGER,\r\n arcpy.management.CreateFeatureclass, [\r\n os.path.dirname(self.out_routes),\r\n os.path.basename(self.out_routes),\r\n \"POLYLINE\",\r\n self.route_fcs[0], # template feature class to transfer full schema\r\n \"SAME_AS_TEMPLATE\",\r\n \"SAME_AS_TEMPLATE\",\r\n desc.spatialReference\r\n ]\r\n )\r\n\r\n # Insert the rows from all the individual output feature classes into the final output\r\n fields = [\"SHAPE@\"] + [f.name for f in desc.fields]\r\n with arcpy.da.InsertCursor(self.out_routes, fields) as cur: # pylint: disable=no-member\r\n for fc in self.route_fcs:\r\n for row in arcpy.da.SearchCursor(fc, fields): # pylint: disable=no-member\r\n cur.insertRow(row)",
"def GEEicPts(ptsFile,yr,buf,poly,username,folderOut, scalePix = 30):\n # load required libraries\n import ee\n \n # Initialize the Earth Engine object, using the authentication credentials.\n ee.Initialize()\n\n ID_field = \"geeID\"\n\n #load pts or poly file\n pts1 = ee.FeatureCollection('users/' + username + '/' + str(ptsFile))\n\n #define landcover images\n tc = ee.Image(\"USGS/NLCD/NLCD\" + str(yr)).select('impervious')\n\n if buf > 0:\n bufL = [buf]\n def bufferPoly(feature):\n return feature.buffer(bufL[0])\n\n ptsB = pts1.map(bufferPoly)\n \n #reduce regions, filter out null values, remove geometry and export table\n table_tc_pts = tc.reduceRegions(collection = ptsB.select([ID_field]),\n reducer = ee.Reducer.mean(),\n scale = scalePix)\n task_tc = ee.batch.Export.table.toDrive(collection = table_tc_pts\n .filter(ee.Filter.neq('mean', None))\n .select(['.*'],None,False),\n description = 's_ic_'+str(yr)+'_ptsB',\n folder = folderOut,\n fileFormat = 'CSV')\n task_tc.start()\n\n #print (\"buffered pts by:\" + str(buf))\n\n elif poly > 0:\n \n #reduce regions, filter out null values, remove geometry and export table\n table_tc_pts = tc.reduceRegions(collection = pts1.select([ID_field]),\n reducer = ee.Reducer.mean(),\n scale = scalePix)\n task_tc = ee.batch.Export.table.toDrive(collection = table_tc_pts\n .filter(ee.Filter.neq('mean', None))\n .select(['.*'],None,False),\n description = 's_ic_'+str(yr)+'_poly1',\n folder = folderOut,\n fileFormat = 'CSV')\n task_tc.start()\n\n #print (\"spatial mean in poly: no buffer\")\n\n else:\n \n #reduce regions, filter out null values, remove geometry and export table\n table_tc_pts = tc.reduceRegions(collection = pts1.select([ID_field]),\n reducer = ee.Reducer.mean(),\n scale = scalePix)\n task_tc = ee.batch.Export.table.toDrive(collection = table_tc_pts\n .filter(ee.Filter.neq('mean', None))\n .select(['.*'],None,False),\n description = 's_ic_'+str(yr)+'_pts1',\n folder = folderOut,\n fileFormat = 'CSV')\n task_tc.start()\n\n #print(\"value at point: no buffer\")",
"def GEElcPts(ptsFile,yr,buf,poly,username,folderOut, scalePix = 30):\n \n # load required libraries\n import ee\n \n # Initialize the Earth Engine object, using the authentication credentials.\n ee.Initialize()\n\n ID_field = \"geeID\"\n\n #load pts or poly file\n pts1 = ee.FeatureCollection('users/' + username + '/' + str(ptsFile))\n\n #define landcover images\n tc = ee.Image(\"USGS/NLCD/NLCD\" + str(yr)).select('landcover')\n\n if buf > 0:\n bufL = [buf]\n def bufferPoly(feature):\n return feature.buffer(bufL[0])\n\n ptsB = pts1.map(bufferPoly)\n \n #reduce regions, filter out null values, remove geometry and export table\n table_tc_pts = tc.reduceRegions(collection = ptsB.select([ID_field]),\n reducer = ee.Reducer.frequencyHistogram(),\n scale = scalePix)\n task_tc = ee.batch.Export.table.toDrive(collection = table_tc_pts\n .filter(ee.Filter.neq('histogram', None))\n .select(['.*'],None,False),\n description = 'f_lc_'+str(yr)+'_ptsB',\n folder = folderOut,\n fileFormat = 'CSV')\n task_tc.start()\n\n #print (\"buffered pts by:\" + str(buf))\n\n elif poly > 0:\n \n #reduce regions, filter out null values, remove geometry and export table\n table_tc_pts = tc.reduceRegions(collection = pts1.select([ID_field]),\n reducer = ee.Reducer.frequencyHistogram(),\n scale = scalePix)\n task_tc = ee.batch.Export.table.toDrive(collection = table_tc_pts\n .filter(ee.Filter.neq('histogram', None))\n .select(['.*'],None,False),\n description = 'f_lc_'+str(yr)+'_poly1',\n folder = folderOut,\n fileFormat = 'CSV')\n task_tc.start()\n\n #print (\"spatial mean in poly: no buffer\")\n\n else:\n \n #reduce regions, filter out null values, remove geometry and export table\n table_tc_pts = tc.reduceRegions(collection = pts1.select([ID_field]),\n reducer = ee.Reducer.mean(),\n scale = scalePix)\n task_tc = ee.batch.Export.table.toDrive(collection = table_tc_pts\n .filter(ee.Filter.neq('mean', None))\n .select(['.*'],None,False),\n description = 's_lc_'+str(yr)+'_pts1',\n folder = folderOut,\n fileFormat = 'CSV')\n task_tc.start()\n\n #print(\"value at point: no buffer\")",
"def direct(ctx, tech, sam_files, res_file, out_fpath, points, lat_lon_fpath,\n lat_lon_coords, regions, region, region_col, sites_per_worker,\n logdir, output_request, site_data, mem_util_lim,\n curtailment, gid_map, verbose):\n ctx.obj['TECH'] = tech\n ctx.obj['POINTS'] = points\n ctx.obj['SAM_FILES'] = sam_files\n ctx.obj['RES_FILE'] = res_file\n ctx.obj['SITES_PER_WORKER'] = sites_per_worker\n ctx.obj['OUT_FPATH'] = out_fpath\n ctx.obj['LOGDIR'] = logdir\n ctx.obj['OUTPUT_REQUEST'] = output_request\n ctx.obj['SITE_DATA'] = site_data\n ctx.obj['MEM_UTIL_LIM'] = mem_util_lim\n ctx.obj['CURTAILMENT'] = curtailment\n ctx.obj['GID_MAP'] = gid_map\n\n ctx.obj['LAT_LON_FPATH'] = lat_lon_fpath\n ctx.obj['LAT_LON_COORDS'] = lat_lon_coords\n ctx.obj['REGIONS'] = regions\n ctx.obj['REGION'] = region\n ctx.obj['REGION_COL'] = region_col\n\n verbose = any([verbose, ctx.obj['VERBOSE']])",
"def write_shapefile_branch1(self, shpname):\r\n inarrays = self.read_traveltime()\r\n \r\n Narrays = len(inarrays) \r\n \r\n \r\n westlats = []\r\n westlons = []\r\n eastlats = []\r\n eastlons = [] \r\n lines1 = []\r\n for i in range(len(self.westPnts1)):\r\n westlat, westlon = utm.to_latlon(self.westPnts1[i,0], self.westPnts1[i,1], 14, 'U')\r\n eastlat, eastlon = utm.to_latlon(self.eastPnts1[i,0], self.eastPnts1[i,1], 14, 'U')\r\n lines1.append([[westlon, westlat], [eastlon, eastlat]])\r\n westlats.append(westlat)\r\n westlons.append(westlon)\r\n eastlats.append(eastlat)\r\n eastlons.append(eastlon)\r\n \r\n # Create the projection\r\n spatialReference = osgeo.osr.SpatialReference()\r\n spatialReference.ImportFromProj4('+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs')\r\n \r\n # Create the shape file\r\n outfile = r'ArcGIS_online\\%s'%shpname\r\n driver = osgeo.ogr.GetDriverByName('ESRI Shapefile')\r\n shapeData = driver.CreateDataSource(outfile)\r\n \r\n # Create the layer\r\n layer = shapeData.CreateLayer('Contour', spatialReference, osgeo.ogr.wkbLineString)\r\n layerDefinition = layer.GetLayerDefn()\r\n \r\n # Create fields containing segment infos\r\n field_def = osgeo.ogr.FieldDefn('BranchID', osgeo.ogr.OFTInteger)\r\n layer.CreateField(field_def)\r\n \r\n field_def = osgeo.ogr.FieldDefn('Density', osgeo.ogr.OFTInteger)\r\n layer.CreateField(field_def)\r\n \r\n field_def = osgeo.ogr.FieldDefn('SegID', osgeo.ogr.OFTInteger)\r\n layer.CreateField(field_def)\r\n \r\n field_def = osgeo.ogr.FieldDefn('Lon_west', osgeo.ogr.OFTReal)\r\n layer.CreateField(field_def)\r\n \r\n field_def = osgeo.ogr.FieldDefn('Lat_west', osgeo.ogr.OFTReal)\r\n layer.CreateField(field_def)\r\n \r\n field_def = osgeo.ogr.FieldDefn('Lon_east', osgeo.ogr.OFTReal)\r\n layer.CreateField(field_def)\r\n \r\n field_def = osgeo.ogr.FieldDefn('Lat_east', osgeo.ogr.OFTReal)\r\n layer.CreateField(field_def)\r\n \r\n field_def = osgeo.ogr.FieldDefn('Travel_T', osgeo.ogr.OFTReal)\r\n layer.CreateField(field_def)\r\n \r\n \r\n def add_feature(layer, branchID, density, lines, segs, westlon, westlat, eastlon, eastlat, Ttime):\r\n \"\"\"\r\n function that adds feature to layer\r\n \"\"\" \r\n ctr=0\r\n for i in range(len(lines)):\r\n ctr+=1\r\n line = osgeo.ogr.Geometry(osgeo.ogr.wkbLineString)\r\n # Add points individually to the line\r\n xy = lines[i]\r\n \r\n line.AddPoint_2D(xy[0][0],xy[0][1])\r\n line.AddPoint_2D(xy[1][0],xy[1][1])\r\n # Update the feature with the line data\r\n featureIndex = ctr\r\n feature = osgeo.ogr.Feature(layerDefinition)\r\n #feature.SetStyleString(\"PEN(c:r,w:5px)\") \r\n feature.SetGeometry(line)\r\n feature.SetFID(featureIndex)\r\n feature.SetGeometryDirectly(line)\r\n \r\n # Set the attribute table\r\n feature.SetField('BranchID', int(branchID)) \r\n feature.SetField('Density', int(density[i]))\r\n feature.SetField('SegID', int(segs[i])) # convert to int() is necessary, osgeo cannot recognize numpy int32 type\r\n feature.SetField('Travel_T', \"{:.1f}\".format(Ttime[i]))\r\n feature.SetField('Lon_west', \"{:.3f}\".format(westlon[i]))\r\n feature.SetField('Lat_west', \"{:.3f}\".format(westlat[i]))\r\n feature.SetField('Lon_east', \"{:.3f}\".format(eastlon[i]))\r\n feature.SetField('Lat_east', \"{:.3f}\".format(eastlat[i]))\r\n \r\n layer.CreateFeature(feature)\r\n \r\n \r\n Ttime = inarrays[0][:,2]\r\n ind0 = np.nonzero(Ttime)[0][0]\r\n ind = np.arange(ind0, Ttime.shape[0])\r\n \r\n lines1 = [lines1[i] for i in ind]*Narrays\r\n westlats = [westlats[i] for i in ind]*Narrays\r\n westlons = [westlons[i] for i in ind]*Narrays\r\n eastlats = [eastlats[i] for i in ind]*Narrays\r\n eastlons = [eastlons[i] for i in ind]*Narrays\r\n \r\n inarrays_new = [inarrays[i][ind,:] for i in range(Narrays)]\r\n inarrays_stack = np.vstack(inarrays_new)\r\n \r\n add_feature(layer, 1, inarrays_stack[:,3], np.asarray(lines1), inarrays_stack[:,1], \r\n np.asarray(westlons), np.asarray(westlats), \r\n np.asarray(eastlats), np.asarray(eastlons), inarrays_stack[:,2])",
"def process_image(overviews, db_graph, input_filename, color, out_raster_srs):\n if verbose > 0:\n print(\"~~~process_image\")\n input_image = gdal.Open(input_filename)\n stem = Path(input_filename).stem\n if not(\"dataSet\" in overviews):\n overviews['dataSet'] = {}\n overviews['dataSet']['boundingBox'] = {}\n overviews['dataSet']['limits'] = {}\n\n tile_limits = get_tile_limits(input_filename)\n\n if not(\"LowerCorner\" in overviews['dataSet']['boundingBox']):\n overviews['dataSet']['boundingBox'] = tile_limits\n else:\n if tile_limits['LowerCorner'][0] < overviews['dataSet']['boundingBox']['LowerCorner'][0]:\n overviews['dataSet']['boundingBox']['LowerCorner'][0] = tile_limits['LowerCorner'][0]\n if tile_limits['LowerCorner'][1] < overviews['dataSet']['boundingBox']['LowerCorner'][1]:\n overviews['dataSet']['boundingBox']['LowerCorner'][1] = tile_limits['LowerCorner'][1]\n if tile_limits['UpperCorner'][0] > overviews['dataSet']['boundingBox']['UpperCorner'][0]:\n overviews['dataSet']['boundingBox']['UpperCorner'][0] = tile_limits['UpperCorner'][0]\n if tile_limits['UpperCorner'][1] > overviews['dataSet']['boundingBox']['UpperCorner'][1]:\n overviews['dataSet']['boundingBox']['UpperCorner'][1] = tile_limits['UpperCorner'][1]\n\n # for z in tiles:\n for tile_z in range(overviews['level']['min'], overviews['level']['max'] + 1):\n print('Niveau de zoom : ', tile_z)\n\n resolution = overviews['resolution'] * 2 ** (overviews['level']['max'] - tile_z)\n\n MinTileCol = \\\n math.floor(round((tile_limits['LowerCorner'][0] - overviews['crs']['boundingBox']['xmin'])/(resolution*overviews['tileSize']['width']),8))\n MinTileRow = \\\n math.floor(round((overviews['crs']['boundingBox']['ymax']-tile_limits['UpperCorner'][1])/(resolution*overviews['tileSize']['height']),8))\n MaxTileCol = \\\n math.ceil(round((tile_limits['UpperCorner'][0] - overviews['crs']['boundingBox']['xmin'])/(resolution*overviews['tileSize']['width']),8)) - 1\n MaxTileRow = \\\n math.ceil(round((overviews['crs']['boundingBox']['ymax']-tile_limits['LowerCorner'][1])/(resolution*overviews['tileSize']['height']),8)) - 1\n\n if not( str(tile_z) in overviews['dataSet']['limits'] ):\n overviews['dataSet']['limits'][str(tile_z)] = {\n 'MinTileCol': MinTileCol,\n 'MinTileRow': MinTileRow,\n 'MaxTileCol': MaxTileCol,\n 'MaxTileRow': MaxTileRow,\n }\n\n else:\n if MinTileCol < overviews['dataSet']['limits'][str(tile_z)]['MinTileCol']:\n overviews['dataSet']['limits'][str(tile_z)]['MinTileCol'] = MinTileCol\n if MinTileRow < overviews['dataSet']['limits'][str(tile_z)]['MinTileRow']:\n overviews['dataSet']['limits'][str(tile_z)]['MinTileRow'] = MinTileRow\n if MaxTileCol > overviews['dataSet']['limits'][str(tile_z)]['MaxTileCol']:\n overviews['dataSet']['limits'][str(tile_z)]['MaxTileCol'] = MaxTileCol\n if MaxTileRow > overviews['dataSet']['limits'][str(tile_z)]['MaxTileRow']:\n overviews['dataSet']['limits'][str(tile_z)]['MaxTileRow'] = MaxTileRow\n\n for tile_x in range(MinTileCol, MaxTileCol + 1): \n for tile_y in range(MinTileRow, MaxTileRow + 1):\n # on cree une image 3 canaux pour la tuile\n opi = create_blank_tile(overviews, {'x': tile_x, 'y': tile_y, 'resolution': resolution}, 3, out_raster_srs)\n # on reech l'OPI dans cette image\n gdal.Warp(opi, input_image)\n # si necessaire on cree le dossier de la tuile\n tile_dir = args.cache+'/'+str(tile_z)+'/'+str(tile_y)+'/'+str(tile_x)\n Path(tile_dir).mkdir(parents=True, exist_ok=True)\n # on export en jpeg (todo: gerer le niveau de Q)\n PNG_DRIVER.CreateCopy(tile_dir+\"/\"+stem+\".png\", opi)\n # on cree une image mono canal pour la tuile\n mask = create_blank_tile(overviews, {'x': tile_x, 'y': tile_y, 'resolution': resolution}, 3, out_raster_srs)\n # on rasterise la partie du graphe qui concerne ce cliche\n gdal.Rasterize(mask, db_graph,\n SQLStatement='select geom from ' + args.table + ' where cliche = \\''+stem+'\\' ')\n img_mask = mask.GetRasterBand(1).ReadAsArray()\n # si le mask est vide, on a termine\n val_max = np.amax(img_mask)\n if val_max > 0:\n # on cree le graphe et l'ortho\n ortho = create_blank_tile(overviews, {'x': tile_x, 'y': tile_y, 'resolution': resolution}, 3, out_raster_srs)\n graph = create_blank_tile(overviews, {'x': tile_x, 'y': tile_y, 'resolution': resolution}, 3, out_raster_srs)\n if Path(tile_dir+\"/ortho.png\").is_file():\n existing_ortho = gdal.Open(tile_dir+\"/ortho.png\")\n existing_graph = gdal.Open(tile_dir+\"/graph.png\")\n else:\n existing_ortho = False\n existing_graph = False\n for i in range(3):\n opi_i = opi.GetRasterBand(i+1).ReadAsArray()\n if existing_ortho:\n ortho_i = existing_ortho.GetRasterBand(i+1).ReadAsArray()\n else:\n ortho_i = ortho.GetRasterBand(i+1).ReadAsArray()\n opi_i[(img_mask == 0)] = 0\n ortho_i[(img_mask != 0)] = 0\n ortho.GetRasterBand(i+1).WriteArray(np.add(opi_i, ortho_i))\n if existing_graph:\n graph_i = existing_graph.GetRasterBand(i+1).ReadAsArray()\n else:\n graph_i = graph.GetRasterBand(i+1).ReadAsArray()\n graph_i[(img_mask != 0)] = color[i]\n graph.GetRasterBand(i+1).WriteArray(graph_i)\n existing_ortho = None\n existing_graph = None\n PNG_DRIVER.CreateCopy(tile_dir+\"/ortho.png\", ortho)\n PNG_DRIVER.CreateCopy(tile_dir+\"/graph.png\", graph)",
"def _insert_stops_many_to_many(self):\r\n # Store data of the relevant origins and destinations in dictionaries for quick lookups and reuse\r\n o_data = {} # {Origin ID: [Shape, transferred fields]}\r\n for row in arcpy.da.SearchCursor( # pylint: disable=no-member\r\n self.input_origins_layer,\r\n [self.origin_id_field, \"SHAPE@\"] + self.origin_transfer_fields\r\n ):\r\n o_data[row[0]] = row[1:]\r\n d_data = {} # {Destination ID: [Shape, transferred fields]}\r\n for row in arcpy.da.SearchCursor( # pylint: disable=no-member\r\n self.input_destinations_layer,\r\n [self.dest_id_field, \"SHAPE@\"] + self.destination_transfer_fields\r\n ):\r\n d_data[row[0]] = row[1:]\r\n\r\n # Insert origins from each OD pair into the Route analysis\r\n with self.rt_solver.insertCursor(\r\n arcpy.nax.RouteInputDataType.Stops,\r\n [\"RouteName\", \"Sequence\", self.origin_unique_id_field_name, \"SHAPE@\"] + self.origin_transfer_fields\r\n ) as icur:\r\n for od_pair in self.od_pairs:\r\n origin_id, dest_id = od_pair\r\n try:\r\n origin_data = o_data[origin_id]\r\n except KeyError:\r\n # This should never happen because we should have preprocessed this out.\r\n self.logger.debug(\r\n f\"Origin from OD Pairs not found in inputs. Skipped pair {od_pair}.\")\r\n continue\r\n route_name = f\"{origin_id} - {dest_id}\"\r\n icur.insertRow((route_name, 1, origin_id) + origin_data)\r\n\r\n # Insert destinations from each OD pair into the Route analysis\r\n with self.rt_solver.insertCursor(\r\n arcpy.nax.RouteInputDataType.Stops,\r\n [\"RouteName\", \"Sequence\", self.dest_unique_id_field_name, \"SHAPE@\"] + self.destination_transfer_fields\r\n ) as icur:\r\n for od_pair in self.od_pairs:\r\n origin_id, dest_id = od_pair\r\n try:\r\n dest_data = d_data[dest_id]\r\n except KeyError:\r\n # This should never happen because we should have preprocessed this out.\r\n self.logger.debug(\r\n f\"Destination from OD Pairs not found in inputs. Skipped pair {od_pair}.\")\r\n continue\r\n route_name = f\"{origin_id} - {dest_id}\"\r\n icur.insertRow((route_name, 2, dest_id) + dest_data)",
"def read_sediment_thickness(self, infname='sedthk.xyz'):\n inArr = np.loadtxt(infname)\n lonArr = inArr[:, 0]\n lonArr = lonArr.reshape(lonArr.size/360, 360)\n latArr = inArr[:, 1]\n latArr = latArr.reshape(latArr.size/360, 360)\n depthArr= inArr[:, 2]\n depthArr= depthArr.reshape(depthArr.size/360, 360)\n stalst = self.waveforms.list()\n if len(stalst) == 0:\n print 'Inversion with surface wave datasets only, not added yet!'\n return\n for staid in stalst:\n netcode, stacode = staid.split('.')\n staid_aux = netcode+'_'+stacode\n stla, elev, stlo = self.waveforms[staid].coordinates.values()\n if stlo > 180.:\n stlo -= 360.\n whereArr= np.where((lonArr>=stlo)*(latArr>=stla))\n ind_lat = whereArr[0][-1]\n ind_lon = whereArr[1][0]\n # check\n lon = lonArr[ind_lat, ind_lon]\n lat = latArr[ind_lat, ind_lon]\n if abs(lon-stlo) > 1. or abs(lat - stla) > 1.:\n print 'ERROR!',lon,lat,stlo,stla\n depth = depthArr[ind_lat, ind_lon]\n header = {'sedi_depth': depth, 'data_source': 'crust_1.0'}\n self.add_auxiliary_data(data=np.array([]), data_type='SediDepth', path=staid_aux, parameters=header)\n return",
"def flyc_nofly_extract(po, fwmdlfile):\n (po.nfzone_pos, po.nfzone_count) = flyc_nofly_zone_pos_search(po, fwmdlfile, 0, po.expect_func_align, po.expect_data_align, po.min_match_accepted)\n if po.nfzone_pos < 0:\n raise ValueError(\"Flight controller no fly zones array signature not detected in input file.\")\n (po.nfcord_pos, po.nfcord_count) = flyc_nofly_cord_pos_search(po, fwmdlfile, 0, po.expect_func_align, po.expect_data_align, po.min_match_accepted)\n if po.nfcord_pos < 0:\n raise ValueError(\"Flight controller no fly coords array signature not detected in input file.\")\n nfzones = flyc_nofly_merged_zones_array(po, fwmdlfile)\n if (po.verbose > 0):\n print(\"{}: Creating JSON file...\".format(po.mdlfile))\n inffile = open(po.inffile, \"w\")\n inffile.write(\"{\\\"release_limits\\\":[\\n\")\n i = 0\n for parprop in nfzones:\n inffile.write(\"{\")\n for ppname in ('area_id','type','shape',):\n inffile.write(\"\\\"{:s}\\\":{:d}\".format(ppname,parprop[ppname]))\n inffile.write(\",\")\n for ppname in ('lat','lng',):\n inffile.write(\"\\\"{:s}\\\":{:06f}\".format(ppname,parprop[ppname]))\n inffile.write(\",\")\n for ppname in ('radius','warning','level','disable','updated_at','begin_at','end_at',):\n inffile.write(\"\\\"{:s}\\\":{:d}\".format(ppname,parprop[ppname]))\n inffile.write(\",\")\n for ppname in ('name',):\n inffile.write(\"\\\"{:s}\\\":\\\"{:s}\\\"\".format(ppname,parprop[ppname]))\n inffile.write(\",\")\n for ppname in ('storage','country',):\n inffile.write(\"\\\"{:s}\\\":{:d}\".format(ppname,parprop[ppname]))\n inffile.write(\",\")\n for ppname in ('city',):\n inffile.write(\"\\\"{:s}\\\":\\\"{:s}\\\"\".format(ppname,parprop[ppname]))\n inffile.write(\",\")\n for ppname in ('points',):\n inffile.write(\"\\\"{:s}\\\":{:s}\".format(ppname,parprop[ppname] if parprop[ppname] is not None else \"null\"))\n if (i+1 < len(nfzones)):\n inffile.write(\"},\\n\")\n else:\n inffile.write(\"}\\n\")\n i += 1\n inffile.write(\"]}\\n\")\n inffile.close()\n if (po.verbose > 0):\n print(\"{}: Done exporting.\".format(po.mdlfile))",
"def run(self):\n if self.part == 'a':\n cond_latitude = \"b < 0\"\n else:\n cond_latitude = \"b >= 0\"\n\n if self.mode == 'full':\n extracmd = \"\"\"delcols \"pSaturated \\\n rErrBits iErrBits haErrBits errBits \\\n rPlaneX rPlaneY iPlaneX iPlaneY \\\n haPlaneX haPlaneY rAxis primaryID \\\n vignetted truncated badPix\" \"\"\"\n else:\n # select \"nBands == 3\"; \\\n extracmd = \"\"\"keepcols \"name ra dec \\\n r rErr \\\n i iErr \\\n ha haErr \\\n mergedClass errBits\";\"\"\"\n\n instring = ''\n for field in self.fieldlist:\n path = os.path.join(self.datapath,\n 'strip{0:.0f}'.format(self.strip),\n '{0}.fits'.format(field))\n instring += 'in={0} '.format(path)\n\n output_filename = self.get_output_filename()\n output_filename_gzip = self.get_output_filename(gzip=True)\n log.info('Writing data to {0}'.format(output_filename))\n\n version = datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S')\n\n # A bug in stilts causes long fieldIDs to be truncated if -utype S15 is not set\n # We also replace a bunch of column descriptions because they cannot be longer than 73 chars.\n param = {'stilts': constants.STILTS,\n 'in': instring,\n 'icmd': \"\"\"'clearparams *; \\\n setparam NAME \"IPHAS DR2 Source Catalogue (part \"\"\"+self.get_partname()+\"\"\")\"; \\\n setparam ORIGIN \"www.iphas.org\"; \\\n setparam AUTHOR \"Geert Barentsen, Hywel Farnhill, Janet Drew\"; \\\n setparam VERSION \\\"\"\"\"+version+\"\"\"\"; \\\n select \"(errBits < 64) \\\n & ! (r<12.5 & i<11.5 & ha<12) \\\n & (rErr < 0.198 || iErr < 0.198 || haErr < 0.198) \\\n & (pStar > 0.2 || pGalaxy > 0.2) \\\n & (NULL_rErrBits || NULL_iErrBits || NULL_haErrBits || ((rErrbits & iErrBits & haErrBits & 8) == 0))\n & l >= \"\"\"+str(self.lon1)+\"\"\" \\\n & l < \"\"\"+str(self.lon2)+\"\"\" \\\n & \"\"\"+str(cond_latitude)+\"\"\" \\\n & sourceID == primaryID\"; \\\n addcol -before ra \\\n -desc \"Source designation (JHHMMSS.ss+DDMMSS.s) without IPHAS2 prefix.\" \\\n name \\\n \"concat(\\\\\"J\\\\\", \n replaceAll(degreesToHms(ra, 2),\n \\\\\":\\\\\", \\\\\"\\\\\"), \n replaceAll(degreesToDms(dec, 1),\n \\\\\":\\\\\", \\\\\"\\\\\")\n )\"; \\\n addcol -before rMJD -desc \"True if source was blended with a nearby neighbour in the r-band.\" \\\n rDeblend \"NULL_rErrBits ? false : (rErrBits & 2) > 0\";\n addcol -before rMJD -desc \"True i the peak pixel count exceeded 55000 in r.\" \\\n rSaturated \"r<13 ? true : NULL_rErrBits ? false : (rErrBits & 8) > 0\";\n addcol -before iMJD -desc \"True if source was blended with a nearby neighbour in the i-band.\" \\\n iDeblend \"NULL_iErrBits ? false : (iErrBits & 2) > 0\";\n addcol -before iMJD -desc \"True if the peak pixel count exceeded 55000 in i.\" \\\n iSaturated \"i<12 ? true : NULL_iErrBits ? false : (iErrBits & 8) > 0\";\n addcol -before haMJD -desc \"True if source was blended with a nearby neighbour in H-alpha.\" \\\n haDeblend \"NULL_haErrBits ? false : (haErrBits & 2) > 0\";\n addcol -before haMJD -desc \"True if the peak pixel count exceeded 55000 in H-alpha.\" \\\n haSaturated \"ha<12.5 ? true : NULL_haErrBits ? false : (haErrBits & 8) > 0\";\n replacecol saturated \"rSaturated || iSaturated || haSaturated\";\n colmeta -name a10 reliable;\n replacecol a10 \"! saturated & nBands == 3 & rErr<0.1 & iErr<0.1 & haErr<0.1 & (abs(r-rAperMag1) < 3*hypot(rErr,rAperMag1Err)+0.03) & (abs(i-iAperMag1) < 3*hypot(iErr,iAperMag1Err)+0.03) & (abs(ha-haAperMag1) < 3*hypot(haErr,haAperMag1Err)+0.03)\";\n addcol -before fieldID -desc \"True if (a10 & pStar > 0.9 & ! deblend & ! brightNeighb)\" \\\n a10point \"a10 & pStar > 0.9 & ! deblend & ! brightNeighb\";\n replacecol -utype S15 fieldID \"fieldID\";\n replacecol -utype S1 fieldGrade \"toString(fieldGrade)\";\n colmeta -desc \"True if detected in all bands at 10-sigma plus other criteria.\" a10;\n colmeta -desc \"J2000 RA with respect to the 2MASS reference frame.\" ra;\n colmeta -desc \"Unique source identification string (run-ccd-detectionnumber).\" sourceID;\n colmeta -desc \"Astrometric fit error (RMS) across the CCD.\" posErr;\n colmeta -desc \"1=galaxy, 0=noise, -1=star, -2=probableStar, -3=probableGalaxy.\" mergedClass;\n colmeta -desc \"N(0,1) stellarness-of-profile statistic.\" mergedClassStat;\n colmeta -desc \"1=galaxy, 0=noise, -1=star, -2=probableStar, -3=probableGalaxy.\" rClass;\n colmeta -desc \"1=galaxy, 0=noise, -1=star, -2=probableStar, -3=probableGalaxy.\" iClass;\n colmeta -desc \"1=galaxy, 0=noise, -1=star, -2=probableStar, -3=probableGalaxy.\" haClass;\n colmeta -desc \"Unique r-band detection identifier (run-ccd-detectionnumber).\" rDetectionID;\n colmeta -desc \"Unique i-band detection identifier (run-ccd-detectionnumber).\" iDetectionID;\n colmeta -desc \"Unique H-alpha detection identifier (run-ccd-detectionnumber).\" haDetectionID;\n colmeta -desc \"CCD pixel coordinate in the r-band exposure.\" rX;\n colmeta -desc \"CCD pixel coordinate in the r-band exposure.\" rY;\n colmeta -desc \"CCD pixel coordinate in the i-band exposure.\" iX;\n colmeta -desc \"CCD pixel coordinate in the i-band exposure.\" iY;\n colmeta -desc \"CCD pixel coordinate in the H-alpha exposure.\" haX;\n colmeta -desc \"CCD pixel coordinate in the H-alpha exposure.\" haY;\n colmeta -desc \"Survey field identifier.\" fieldID;\n colmeta -desc \"Probability the source is extended.\" pGalaxy;\n colmeta -desc \"Default r mag (Vega) using the 2.3 arcsec aperture.\" r;\n colmeta -desc \"Default i mag (Vega) using the 2.3 arcsec aperture.\" i;\n colmeta -desc \"Default H-alpha mag (Vega) using the 2.3 arcsec aperture.\" ha;\n colmeta -desc \"r mag (Vega) derived from peak pixel height.\" rPeakMag;\n colmeta -desc \"i mag (Vega) derived from peak pixel height.\" iPeakMag;\n colmeta -desc \"H-alpha mag (Vega) derived from peak pixel height.\" haPeakMag;\n colmeta -desc \"r mag (Vega) using the 1.2 arcsec aperture.\" rAperMag1;\n colmeta -desc \"i mag (Vega) using the 1.2 arcsec aperture.\" iAperMag1;\n colmeta -desc \"H-alpha mag (Vega) using the 1.2 arcsec aperture.\" haAperMag1;\n colmeta -desc \"r mag (Vega) using the 3.3 arcsec aperture.\" rAperMag3;\n colmeta -desc \"i mag (Vega) using the 3.3 arcsec aperture.\" iAperMag3;\n colmeta -desc \"H-alpha mag (Vega) using the 3.3 arcsec aperture.\" haAperMag3;\n colmeta -desc \"Internal quality control score of the field. One of A, B, C or D.\" fieldGrade;\n colmeta -desc \"Number of repeat observations of this source in the survey.\" nObs;\n colmeta -desc \"SourceID of the object in the partner exposure.\" sourceID2;\n colmeta -desc \"FieldID of the partner detection.\" fieldID2;\n colmeta -desc \"r mag (Vega) in the partner field, obtained within 10 minutes.\" r2;\n colmeta -desc \"Uncertainty for r2.\" rErr2;\n colmeta -desc \"i mag (Vega) in the partner field, obtained within 10 minutes.\" i2;\n colmeta -desc \"Uncertainty for i2.\" iErr2;\n colmeta -desc \"H-alpha mag (Vega) in the partner field, obtained within 10 minutes.\" ha2;\n colmeta -desc \"Uncertainty for ha2.\" haErr2;\n colmeta -desc \"flag brightNeighb (1), deblend (2), saturated (8), vignetting (64)\" errBits2;\n {0}\n '\"\"\".format(extracmd),\n 'out': output_filename}\n\n cmd = '{stilts} tcat {in} icmd={icmd} countrows=true lazy=true out={out}'\n mycmd = cmd.format(**param)\n log.info(mycmd)\n status = os.system(mycmd)\n log.info('concat: '+str(status))\n\n # zip\n mycmd = 'gzip --stdout {0} > {1}'.format(output_filename, output_filename_gzip)\n log.debug(mycmd)\n status = os.system(mycmd)\n log.info('gzip: '+str(status))\n\n return status",
"def conRFMixAndMaskToBeagle(indfile_name, rephasedhaps_pref, em_iters, win_size, chroms):\n\t### First get individual information\n\twindow_id = 0\n\tem_iter = em_iters\n\tindfile = open(indfile_name, \"r\")\t\n\tinds = []\n\tfor line in indfile:\n\t\tsplits = line.strip(\"\\r\\n\").split()\n\t\tinds.append(splits[1] + \"_A\")\n\t\tinds.append(splits[1] + \"_B\")\n\n\tallloci = []\n\toutfilename = rephasedhaps_pref + \"_w\" + str(win_size) + \".beagle\"\n\toutfile = open(outfilename, \"w\")\n\toutfile.write(\"I\\tid\\t\" + \"\\t\".join(inds) + \"\\n\")\n\t## Write genotype data out to file\n\n\tvitout = open(rephasedhaps_pref + \".vit\", \"w\")\n\twinout = open(rephasedhaps_pref + \".windows\", \"w\")\n\tfbkout = rephasedhaps_pref + \".fbk\"\n\tif os.path.exists(fbkout):\n\t\tos.remove(fbkout)\n\tvitlist = []\n\tfor chrom in chroms:\n\t\tprint chrom\n\t\tshapeitfilename = rephasedhaps_pref + \"_chr\" + str(chrom) + \"_shapeout.allelesRephased\" + str(em_iters) + \".txt\"\n\t\tshapeitfile = open(shapeitfilename, \"rb\")\n\t\tfbkin_name = rephasedhaps_pref + \"_chr\" + str(chrom) + \"_shapeout.\" + str(em_iters) + \".ForwardBackward.txt\"\n\t\tos.system('cat ' + fbkin_name + \" >> \" + fbkout) # Concatenate files together\n\t\tmarkerin = rephasedhaps_pref + \"_chr\" + str(chrom) + \"_shapeout.amaps\"\n\t\tmarkerfile = open(markerin, \"r\")\n\t\tloci=[]\n\t\talleles = {}\n\t\tfor mline in markerfile:\n\t\t\tmsplit = mline.strip().split()\n\t\t\tloci.append(msplit[1])\n\t\t\talleles[msplit[1]] = [msplit[3], msplit[4] ]\n\n\t\tallloci.extend(loci)\n\t\tfor j,line in enumerate(shapeitfile):\n\t\t\tsline = line.strip(\"\\r\\n\")\n\t\t\tzero, ones = alleles[loci[j]]\n\t\t\tfixed = [ recodeAllele(k, zero, ones) for k in sline ]\n\t\t\toutfile.write(\"M\\t\" + loci[j] + \"\\t\" + \"\\t\".join(fixed) + \"\\n\")\n\t\tvitfile = open(rephasedhaps_pref + \"_chr\" + str(chrom) + \"_shapeout.\" + str(em_iters) + \".Viterbi.txt\", \"r\")\n\t\tvitlist.extend([x.strip().split() for x in vitfile])\n\t\tshapeitfile.close()\n\t\tvitfile.close()\n\t\t\n\t# This will transpose the whole Viterbi file\n\t# Yikes this may take a lot of memory\n\tfor i,x in enumerate(zip(*vitlist)):\n\t\tvitout.write(inds[i] + \"\\t\")\n\t\tfor y in x:\n\t\t\tvitout.write(y+\"\\t\")\n\t\tvitout.write(\"\\n\")\n\t\t### This doesn't quite work yet so make sure to fix it next time\n\tfor l in allloci:\n\t\twinout.write(\"window\" + str(window_id) + \"\\t\" + l + \"\\n\")\n\t\twindow_id += 1\n\treturn([outfile.name, vitout.name, winout.name, fbkout])",
"def extract(self, files):\n for i in range(len(files)):\n print(files[i])\n img = cv2.imread('{}/{}'.format('{}/{}/{}'.format(DIR_2DST_Mask, self.patient, self.plan), files[i]), 0)\n\n \"\"\"\n Find the indices of array elements that are non-zero, i.e,\n find the pixels' positions that represents the respiratory\n functions (pixels in the respiratory function are brighter).\n \"\"\"\n color_pts = np.argwhere(img > 70)\n\n \"\"\"\n Sorts the pixels according to their x coordenate.\n Obs: np.argwhere inverts x and y, it's like (y, x), because of it,\n the parameter of itemgetter is 1 (to get x coordinate)\n \"\"\"\n lcolor_pts = sorted(color_pts.tolist(), key=itemgetter(1))\n\n \"\"\"\n If there is no pixel representing the respiratory function\n (i.e., lighter pixel) it creates an empty image (without any\n respiratory function)\n \"\"\"\n if len(lcolor_pts) == 0:\n diaphragmatic_lvl = np.zeros((256, 50, 3), np.uint8)\n\n cv2.imwrite('{}/{}/{}/{}'.format(\n DIR_2DST_Diaphragm, patient, plan, files[i]), diaphragmatic_lvl)\n\n # file = open(\n # '{}/{}/{}/points.txt'.format(DIR_2DST_Diaphragm, self.patient, self.plan), 'a')\n # file.write(\"{}:{}\\n\".format(files[i], []))\n # file.close()\n\n continue\n\n # Reverse the coordinates and store the result in lordered_pts list\n lordered_pts = []\n for j in range(len(lcolor_pts)):\n lordered_pts.append(lcolor_pts[j][::-1])\n\n \"\"\"\n Convert pixels coordinates into a tuples and check which column\n has pixels that corresponding to diaphragmatic level\n Obs. There are some columns that doesnt have any pixel that\n correpond to diaphragmatic level.\n \"\"\"\n # Columns that have a pixel corresponding diaphragmatic level\n lcolumn_available = []\n for j in range(len(lordered_pts)):\n lordered_pts[j] = tuple(lordered_pts[j])\n lcolumn_available.append(lordered_pts[j][0])\n lcolumn_available = list(set(lcolumn_available))\n # print(\"Ordered points: \", lordered_pts)\n # print(\"Columns available: \", lcolumn_available)\n\n \"\"\"\n If there is not enough columns to build a respiratory pattern,\n create a blank image\n \"\"\"\n if len(lcolumn_available) < 20:\n diaphragmatic_lvl = np.zeros((256, 50, 3), np.uint8)\n cv2.imwrite('{}/{}/{}/{}'.format(\n DIR_2DST_Diaphragm, patient, plan, files[i]), diaphragmatic_lvl)\n continue\n\n \"\"\"\n If there are no pixel that corresponding diaphragmatic level in the\n first column, assign to it the value of the second y coordinate\n \"\"\"\n if lcolumn_available[0] is not 0:\n y = max(\n [x for x in lordered_pts if x[0] == lcolumn_available[0]],\n key=itemgetter(1))[1]\n lordered_pts.insert(0, (0, y))\n lcolumn_available.insert(0, 0)\n\n \"\"\"\n If there are no pixel that corresponding diaphragmatic level in the\n last column, assign to it the value of the penultimate y coordinate\n available\n \"\"\"\n if lcolumn_available[-1] is not 49:\n lordered_pts.append(\n (49, lordered_pts[len(lcolumn_available)][1]))\n lcolumn_available.append(49)\n\n \"\"\"\n Get the biggest y value in each column that represents the\n diaphragmatic level\n \"\"\"\n column = 0\n lcolumn = []\n ldiaphragm_pts = []\n for j in range(50):\n # Get the column's points\n lcolumn = [x for x in lordered_pts if x[0] == column]\n # print('{}: {}'.format(j, lcolumn))\n\n if len(lcolumn) > 0:\n ldiaphragm_pts.append(\n max(lcolumn, key=itemgetter(1))) # Get the biggest y\n else:\n # Get the y value from the previous column\n lcolumn_available.insert(column, column)\n ldiaphragm_pts.append((column, ldiaphragm_pts[-1][1]))\n column += 1\n lcolumn = []\n\n # Draw diaphragmatic level\n diaphragmatic_lvl = np.zeros((256, 50, 3), np.uint8)\n j = 0\n while(j < len(lcolumn_available) - 1):\n cv2.line(\n diaphragmatic_lvl,\n ldiaphragm_pts[j], ldiaphragm_pts[j + 1],\n (0, 0, 255), 1)\n j = j + 1\n\n lcolumn_available = []\n\n print(\"Diaphragmatic's points: \", ldiaphragm_pts)\n cv2.imshow('Diaphragmatic level', diaphragmatic_lvl)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n cv2.imwrite('{}/{}/{}/{}'.format(\n DIR_2DST_Diaphragm, patient, plan, files[i]), diaphragmatic_lvl)\n\n # file = open('{}/{}/{}/points.txt'.format(DIR_2DST_Diaphragm, self.patient, self.plan), 'a')\n # file.write(\"{}:{}\\n\".format(files[i], ldiaphragm_pts))\n # file.close()\n\n # return ldiaphragm_pts",
"def GEEtcPts(ptsFile,yr,buf,poly,username,folderOut, scalePix = 30):\n # load required libraries\n import ee\n \n # Initialize the Earth Engine object, using the authentication credentials.\n ee.Initialize()\n\n ID_field = \"geeID\"\n\n #load pts or poly file\n pts1 = ee.FeatureCollection('users/' + username + '/' + str(ptsFile))\n\n #define landcover images\n tc = ee.Image(\"USGS/NLCD/NLCD\" + str(yr)).select('percent_tree_cover')\n\n if buf > 0:\n bufL = [buf]\n def bufferPoly(feature):\n return feature.buffer(bufL[0])\n\n ptsB = pts1.map(bufferPoly)\n \n #reduce regions, filter out null values, remove geometry and export table\n table_tc_pts = tc.reduceRegions(collection = ptsB.select([ID_field]),\n reducer = ee.Reducer.mean(),\n scale = scalePix)\n task_tc = ee.batch.Export.table.toDrive(collection = table_tc_pts\n .filter(ee.Filter.neq('mean', None))\n .select(['.*'],None,False),\n description = 's_tc_'+str(yr)+'_ptsB',\n folder = folderOut,\n fileFormat = 'CSV')\n task_tc.start()\n\n #print (\"buffered pts by:\" + str(buf))\n\n elif poly > 0:\n \n #reduce regions, filter out null values, remove geometry and export table\n table_tc_pts = tc.reduceRegions(collection = pts1.select([ID_field]),\n reducer = ee.Reducer.mean(),\n scale = scalePix)\n task_tc = ee.batch.Export.table.toDrive(collection = table_tc_pts\n .filter(ee.Filter.neq('mean', None))\n .select(['.*'],None,False),\n description = 's_tc_'+str(yr)+'_poly1',\n folder = folderOut,\n fileFormat = 'CSV')\n task_tc.start()\n\n #print (\"spatial mean in poly: no buffer\")\n\n else:\n \n #reduce regions, filter out null values, remove geometry and export table\n table_tc_pts = tc.reduceRegions(collection = pts1.select([ID_field]),\n reducer = ee.Reducer.mean(),\n scale = scalePix)\n task_tc = ee.batch.Export.table.toDrive(collection = table_tc_pts\n .filter(ee.Filter.neq('mean', None))\n .select(['.*'],None,False),\n description = 's_tc_'+str(yr)+'_pts1',\n folder = folderOut,\n fileFormat = 'CSV')\n task_tc.start()\n\n #print(\"value at point: no buffer\")"
] | [
"0.62439525",
"0.57997125",
"0.48219526",
"0.46345586",
"0.4631852",
"0.46315864",
"0.4526034",
"0.45056954",
"0.44907907",
"0.44516155",
"0.44467697",
"0.44328094",
"0.44328094",
"0.43942088",
"0.43738577",
"0.43644437",
"0.43510073",
"0.42912412",
"0.428687",
"0.4282901",
"0.4261443",
"0.42265213",
"0.4210979",
"0.41797775",
"0.41612858",
"0.4158562",
"0.4158322",
"0.41553676",
"0.41441244",
"0.41423294"
] | 0.776218 | 0 |
Return prob(chisq >= chi, with df degrees of freedom). df must be even. | def chi2P(chi, df):
assert df & 1 == 0
# If chi is very large, exp(-m) will underflow to 0.
m = chi / 2.0
sum = term = exp(-m)
for i in range(1, df//2):
term *= m / i
sum += term
# With small chi and large df, accumulated
# roundoff error, plus error in
# the platform exp(), can cause this to spill
# a few ULP above 1.0. For
# example, chi2P(100, 300) on my box
# has sum == 1.0 + 2.0**-52 at this
# point. Returning a value even a teensy
# bit over 1.0 is no good.
return min(sum, 1.0) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def achisqprob(chisq,df):\r\n BIG = 200.0\r\n def ex(x):\r\n BIG = 200.0\r\n exponents = N.where(N.less(x,-BIG),-BIG,x)\r\n return N.exp(exponents)\r\n\r\n if type(chisq) == N.ndarray:\r\n arrayflag = 1\r\n else:\r\n arrayflag = 0\r\n chisq = N.array([chisq])\r\n if df < 1:\r\n return N.ones(chisq.shape,N.float)\r\n probs = N.zeros(chisq.shape,N.float_)\r\n probs = N.where(N.less_equal(chisq,0),1.0,probs) # set prob=1 for chisq<0\r\n a = 0.5 * chisq\r\n if df > 1:\r\n y = ex(-a)\r\n if df%2 == 0:\r\n even = 1\r\n s = y*1\r\n s2 = s*1\r\n else:\r\n even = 0\r\n s = 2.0 * azprob(-N.sqrt(chisq))\r\n s2 = s*1\r\n if (df > 2):\r\n chisq = 0.5 * (df - 1.0)\r\n if even:\r\n z = N.ones(probs.shape,N.float_)\r\n else:\r\n z = 0.5 *N.ones(probs.shape,N.float_)\r\n if even:\r\n e = N.zeros(probs.shape,N.float_)\r\n else:\r\n e = N.log(N.sqrt(N.pi)) *N.ones(probs.shape,N.float_)\r\n c = N.log(a)\r\n mask = N.zeros(probs.shape)\r\n a_big = N.greater(a,BIG)\r\n a_big_frozen = -1 *N.ones(probs.shape,N.float_)\r\n totalelements = N.multiply.reduce(N.array(probs.shape))\r\n while asum(mask)<>totalelements:\r\n e = N.log(z) + e\r\n s = s + ex(c*z-a-e)\r\n z = z + 1.0\r\n# print z, e, s\r\n newmask = N.greater(z,chisq)\r\n a_big_frozen = N.where(newmask*N.equal(mask,0)*a_big, s, a_big_frozen)\r\n mask = N.clip(newmask+mask,0,1)\r\n if even:\r\n z = N.ones(probs.shape,N.float_)\r\n e = N.ones(probs.shape,N.float_)\r\n else:\r\n z = 0.5 *N.ones(probs.shape,N.float_)\r\n e = 1.0 / N.sqrt(N.pi) / N.sqrt(a) * N.ones(probs.shape,N.float_)\r\n c = 0.0\r\n mask = N.zeros(probs.shape)\r\n a_notbig_frozen = -1 *N.ones(probs.shape,N.float_)\r\n while asum(mask)<>totalelements:\r\n e = e * (a/z.astype(N.float_))\r\n c = c + e\r\n z = z + 1.0\r\n# print '#2', z, e, c, s, c*y+s2\r\n newmask = N.greater(z,chisq)\r\n a_notbig_frozen = N.where(newmask*N.equal(mask,0)*(1-a_big),\r\n c*y+s2, a_notbig_frozen)\r\n mask = N.clip(newmask+mask,0,1)\r\n probs = N.where(N.equal(probs,1),1,\r\n N.where(N.greater(a,BIG),a_big_frozen,a_notbig_frozen))\r\n return probs\r\n else:\r\n return s",
"def lchisqprob(chisq,df):\r\n BIG = 20.0\r\n def ex(x):\r\n BIG = 20.0\r\n if x < -BIG:\r\n return 0.0\r\n else:\r\n return math.exp(x)\r\n\r\n if chisq <=0 or df < 1:\r\n return 1.0\r\n a = 0.5 * chisq\r\n if df%2 == 0:\r\n even = 1\r\n else:\r\n even = 0\r\n if df > 1:\r\n y = ex(-a)\r\n if even:\r\n s = y\r\n else:\r\n s = 2.0 * zprob(-math.sqrt(chisq))\r\n if (df > 2):\r\n chisq = 0.5 * (df - 1.0)\r\n if even:\r\n z = 1.0\r\n else:\r\n z = 0.5\r\n if a > BIG:\r\n if even:\r\n e = 0.0\r\n else:\r\n e = math.log(math.sqrt(math.pi))\r\n c = math.log(a)\r\n while (z <= chisq):\r\n e = math.log(z) + e\r\n s = s + ex(c*z-a-e)\r\n z = z + 1.0\r\n return s\r\n else:\r\n if even:\r\n e = 1.0\r\n else:\r\n e = 1.0 / math.sqrt(math.pi) / math.sqrt(a)\r\n c = 0.0\r\n while (z <= chisq):\r\n e = e * (a/float(z))\r\n c = c + e\r\n z = z + 1.0\r\n return (c*y+s)\r\n else:\r\n return s",
"def pchisq(x, df):\n \n if df % 2 == 0:\n dchi = 0.5 * math.exp(-0.5 * x)\n f = 1.0 - 2.0 * dchi\n for i in range(4, df + 1, 2):\n dchi *= x / (i - 2)\n f -= 2.0 * dchi\n \n else:\n f = 2.0 * pnorm(math.sqrt(x), 0.0, 1.0) - 1.0\n dchi = math.exp(-0.5 * x) / math.sqrt(2.0 * math.pi * x)\n for i in range(3, df + 1, 2):\n dchi *= x / (i - 2)\n f -= 2.0 * dchi\n \n return f",
"def chisq_test(observed):\n\tn, k = observed.shape\n\trow = observed.sum(axis=0).reshape(1,-1)\n\tcol = observed.sum(axis=1).reshape(-1,1)\n\texpected = np.dot(col, row)/observed.sum()\n\t#chi2, pvalue = scipy.stats.mstats.chisquare(observed.ravel(), expected.ravel(), ddof = n+k-2)\n\tchi2 = (((observed-expected)**2)/expected).sum()\n\tpvalue = 1-scipy.stats.chi2.cdf(chi2, (n-1)*(k-1))\n\tmessage = \"\"\"\n\tPerforming the test of independence in\ta contingency table.\n\ttest statistic: %(chi2)s\n\tdegrees of freedom: %(df)s\n\tp-value: %(pvalue)s\n\t\"\"\" % {'chi2': chi2, 'df': (n-1)*(k-1), 'pvalue': pvalue}\n\tprint(message)\n\twarning = \"\"\"\n\tWarning message:\n\tChi-squared approximation may be incorrect\n\t\"\"\"\n\tif expected.min() < 5:\n\t\tprint(warning)\n\treturn chi2, pvalue",
"def f_test(chi1,df1,chi2,df2,red_chi = True):\n\n# if chi1/df1 > chi2/df2:\n#\tprob = 2. * f.cdf(chi1/df1, chi2/df2, df1, df2)\n# else:\n#\tprob = 2. * f.cdf(chi2/df2, chi1/df1, df2, df1)\n if red_chi:\n\tfval = (chi1/df1) / (chi2/df2)\n else:\n\tfval = chi1 / chi2\n prob = 2. * f.cdf((chi1/df1) / (chi2/df2), df1, df2)\n if prob > 1.: \n\treturn 2. - prob\n else:\n\treturn prob",
"def find_confidence(self, chi2, df):\n chi2_table = self.chi2_table\n nearest_df = round(find_nearest(chi2_table.index, df), 0)\n nearest_chi2 = round(find_nearest(chi2_table.loc[nearest_df], chi2), 6)\n for col in list(chi2_table):\n if nearest_chi2 == round(chi2_table[col][nearest_df], 6):\n # Subtract from one to get confidence.\n confidence = (1.0 - float(col))\n return confidence",
"def _two_sided_p_value(t, df):\n return 2 * scipy.stats.t.cdf(-np.abs(t), df=df)",
"def chi2_contingency(observed, correction=True, lambda_=None):\n observed = np.asarray(observed)\n if np.any(observed < 0):\n raise ValueError(\"All values in `observed` must be nonnegative.\")\n if observed.size == 0:\n raise ValueError(\"No data; `observed` has size 0.\")\n\n expected = expected_freq(observed)\n if np.any(expected == 0):\n # Include one of the positions where expected is zero in\n # the exception message.\n zeropos = list(zip(*np.nonzero(expected == 0)))[0]\n raise ValueError(\"The internally computed table of expected \"\n \"frequencies has a zero element at %s.\" % (zeropos,))\n\n # The degrees of freedom\n dof = expected.size - sum(expected.shape) + expected.ndim - 1\n\n if dof == 0:\n # Degenerate case; this occurs when `observed` is 1D (or, more\n # generally, when it has only one nontrivial dimension). In this\n # case, we also have observed == expected, so chi2 is 0.\n chi2 = 0.0\n p = 1.0\n else:\n if dof == 1 and correction:\n # Adjust `observed` according to Yates' correction for continuity.\n observed = observed + 0.5 * np.sign(expected - observed)\n\n chi2, p = power_divergence(observed, expected,\n ddof=observed.size - 1 - dof, axis=None,\n lambda_=lambda_)\n\n return chi2, p, dof, expected",
"def max_chi_value(df=1, start_chi=25):\n if df == 1:\n return start_chi\n\n start_p_value = 1 - chi2.cdf(start_chi, 1)\n max_chi = start_chi\n p_value = 1 - chi2.cdf(max_chi, df)\n\n while p_value >= start_p_value:\n max_chi += 1\n p_value = 1 - chi2.cdf(max_chi, df)\n\n return max_chi",
"def lambda_test(p_values, df=1):\n from scipy.stats import chi2\n assert np.max(p_values) <= 1 and np.min(p_values) >= 0, 'These do not appear to be p-values'\n\n chi_sq_scores = chi2.ppf(1 - p_values, df)\n return np.median(chi_sq_scores) / chi2.ppf(0.5, df)",
"def prob4():\n#raise NotImplementedError(\"Problem 4 Incomplete\")\n h = lambda x : x[0] < -1 and x[1] > 1\n f = lambda x : stats.multivariate_normal.pdf(x,mean=np.array([0,0]),cov=np.eye(2))\n g = lambda x : stats.multivariate_normal.pdf(x,mean=np.array([-1,1]),cov=np.eye(2))\n X = np.random.multivariate_normal(mean=np.array([-1,1]),cov=np.eye(2),size=10000)\n return 1./10000*np.sum(np.apply_along_axis(h,1,X)*np.apply_along_axis(f,1,X)/np.apply_along_axis(g,1,X))",
"def test_chi2(y0, y1, level):\n if len(y0) == 0 or len(y1) == 0:\n return True\n l0 = np.argmax(y0, axis=1)\n l1 = np.argmax(y1, axis=1)\n v, c = np.unique(np.append(l0,l1), return_counts=True)\n v0, c0 = np.unique(l0, return_counts=True)\n v1, c1 = np.unique(l1, return_counts=True)\n p = np.zeros(len(y0[0]))\n p0 = p.copy()\n p1 = p.copy()\n p[v] = c / np.sum(c)\n p0[v0] = c0 / np.sum(c0)\n p1[v1] = c1 / np.sum(c1)\n p0[p0==0] = 0.05\n p1[p1 == 0] = 0.05\n p[p==0] = 0.05\n _, p0_value = stat.chisquare(p0, p)\n _, p1_value = stat.chisquare(p1, p)\n if 1-p0_value > level or 1-p1_value > level:\n return False\n else:\n return True",
"def chi2(data, fdata, err):\n return sum(((data-fdata)/err)**2)",
"def my_chisq(ydata,ymod,deg=2,sd=None): \n # Chi-square statistic \n if sd==None:\n chisq=np.sum((ydata-ymod)**2) \n else:\n chisq=np.sum( ((ydata-ymod)/sd)**2 ) \n\n # Number of degrees of freedom assuming 2 free parameters \n nu=ydata.size-1-deg \n return chisq/nu",
"def P(phi, phib, df):\n\tif f(0,phi,phib,df)*f(1,phi,phib,df) < 0:\n\t\treturn opt.bisect(f, 0, 1, args=(phi,phib,df), maxiter=500) # Bisection method\n\telse:\n\t\treturn opt.newton(f, 1.0, args=(phi,phib,df), maxiter=5000) # Newton-Raphson",
"def find_chi2(self, df, confidence=0.95):\n chi2_table = self.chi2_table\n nearest_confidence = round(find_nearest(list(chi2_table), 1.0-confidence), 4)\n nearest_df = round(find_nearest(chi2_table.index, df), 0)\n chi2 = round(chi2_table[str(nearest_confidence)][nearest_df], 4)\n return chi2",
"def model_value(likelihood, df, significance):\n v_m = 2 * np.log(likelihood)\n if df > 0:\n v_m -= chi2.cdf(1-significance, df)\n return v_m",
"def cohensd2problarger(d):\n\n return stats.norm.cdf(d / np.sqrt(2))",
"def fisher(probs):\r\n stat = -2 * log(array(probs)).sum()\r\n if isnan(stat):\r\n return nan\r\n else:\r\n try:\r\n return chi_high(stat, 2 * len(probs))\r\n except OverflowError as e:\r\n return nan",
"def calculate_chi_square_p_value(A):\n nonzero_columns = np.where(A.any(axis=0))[0]\n A_nonzero_columns = A[:, nonzero_columns]\n _, p_value, _, _ = scipy.stats.chi2_contingency(A_nonzero_columns)\n return p_value",
"def lchisquare(f_obs,f_exp=None):\r\n k = len(f_obs) # number of groups\r\n if f_exp == None:\r\n f_exp = [sum(f_obs)/float(k)] * len(f_obs) # create k bins with = freq.\r\n chisq = 0\r\n for i in range(len(f_obs)):\r\n chisq = chisq + (f_obs[i]-f_exp[i])**2 / float(f_exp[i])\r\n return chisq, chisqprob(chisq, k-1)",
"def chi_square_test(data, var1, var2, pairwise = False, alpha = 0.05): \n cont_table = pd.crosstab(data[var1], data[var2])\n if not pairwise:\n missing_indices = find_indices_with_value(cont_table, 0)\n cont_table = cont_table.drop(missing_indices)\n chi2, p, df, exp = stats.chi2_contingency(cont_table)\n if (p < alpha): \n print(\"statistically significant: %s\" % (tuple([var1, var2]), ))\n print(\"p-value is \" + str(p))\n display(cont_table.apply(lambda r: 100 * r/r.sum(), axis=0))\n\n return chi2, p, df, exp\n else:\n pairs = [\",\".join(map(str, comb)).split(\",\") for comb in combinations(cont_table.columns, 2)]\n for pair in pairs:\n cont_table2 = cont_table[pair]\n missing_indices = find_indices_with_value(cont_table2, 0)\n cont_table2 = cont_table2.drop(missing_indices)\n \n chi2, p, df, exp = stats.chi2_contingency(cont_table2)\n if (p < alpha): \n print(\"statistically significant: %s\" % (tuple(pair), ))\n print(\"p-value is \" + str(p))\n display(cont_table2.apply(lambda r: 100 * r/r.sum(), axis=0))",
"def compare_distributions(self, alpha = 0.05, cdf = None, args=(), freq = False):\n\n if freq:\n if self.y is not None:\n stat, p = chisquare(self.x, f_exp = self.y)\n else:\n return None\n else:\n if cdf is not None:\n stat, p = kstest(self.x, cdf = cdf, args=args)\n else:\n if self.y is not None:\n stat, p = kstest(self.x, cdf = self.y)\n else:\n return None\n\n return self._result(p,alpha)",
"def chi_test_goodness_of_fit(dimension1, dimension2):\n total_len = (dimension1 + dimension2) / 2\n\n X = (dimension1 - total_len) ** 2 / total_len + (dimension2 - total_len) ** 2 / total_len\n return 1 - chi2.cdf(X, df=1)",
"def chisq_and_posthoc_corrected(df: pd.DataFrame, correction: str = 'bonferroni') -> pd.DataFrame:\n\n # perform chi-square omnibus test on full data\n chi2, p, dof, ex = chi2_contingency(df, correction=True)\n print('Chi-Square Omnibus Test Results: Test statistic: {}, df: {}, p-value: {}'.format(chi2, dof, p))\n\n # post-hoc analysis\n print('Performing post hoc testing using: {} p-value correction method'.format(correction))\n p_values, all_combinations = [], list(combinations(df.index, 2)) # gathering all combinations for post-hoc chi2\n\n for comb in all_combinations:\n new_df = df[(df.index == comb[0]) | (df.index == comb[1])]\n chi2, p, dof, ex = chi2_contingency(new_df, correction=True)\n p_values.append(p)\n\n # checking significance and application of correction for multiple testing\n reject_list, corrected_p_vals = multipletests(p_values, method=correction)[:2]\n\n # save results to a pandas df\n post_hoc_results = pd.DataFrame({'comparison': ['-'.join(x) for x in all_combinations],\n 'original_pvalue': p_values,\n 'corrected_pvalue': list(corrected_p_vals),\n 'reject_h0': list(reject_list)})\n\n return post_hoc_results",
"def compute(real_data, synthetic_data):\n f_obs, f_exp = get_frequencies(real_data, synthetic_data)\n if len(f_obs) == len(f_exp) == 1:\n pvalue = 1.0\n else:\n _, pvalue = chisquare(f_obs, f_exp)\n\n return pvalue",
"def calculate_chi_squared(self):\n chi = 0\n obsVals, expVals = self.calculate_obs_and_exp()\n for i in range(4):\n if expVals[i] != 0:\n chi += (obsVals[i] - expVals[i])**2 / expVals[i]\n return chi",
"def P(phi, phib, df):\n\tprint 'P'\n\tif f(0,phi,phib,df)*f(1,phi,phib,df) < 0:\n\t\tprint 'brent'\n\t\t#return opt.brentq(f, 0, 1, args=(phi,df)) # Brent's method\n\t\t#return opt.brenth(f, 0, 1, args=(phi,df)) # Brent's method\n\t\treturn opt.bisect(f, 0, 1, args=(phi,phib,df)) # Bisection method\n\t\t#x,r = opt.bisect(f, 0, 1, args=(phi,df), full_output=True) # Bisection method\n\t\t#print r.iterations\n\t\t#return x\n\telse:\n\t\tprint 'newton'\n\t\treturn opt.newton(f, 0.5, args=(phi,phib,df)) # Newton-Raphson\n\t#print 'newtonpre'\n #return opt.newton(f, 1.0, args=(phi,phib,df)) # Newton-Raphson\n\t#print 'newtonpost'",
"def chisquare(obs, exp=None):\n obs = N.array(obs)\n\n # get total number of observations\n nobs = N.sum(obs)\n\n # if no expected value are supplied assume equal distribution\n if exp == None:\n exp = N.ones(obs.shape) * nobs / N.prod(obs.shape)\n\n # make sure to have floating point data\n exp = exp.astype(float)\n\n # compute chisquare value\n chisq = N.sum((obs - exp )**2 / exp)\n\n # return chisq and probability (upper tail)\n return chisq, stats.chisqprob(chisq, N.prod(obs.shape) - 1)",
"def lfprob (dfnum, dfden, F):\r\n p = betai(0.5*dfden, 0.5*dfnum, dfden/float(dfden+dfnum*F))\r\n return p"
] | [
"0.74559987",
"0.724271",
"0.69441646",
"0.6661619",
"0.6657019",
"0.60960495",
"0.6026556",
"0.59603435",
"0.5947283",
"0.5885562",
"0.5863677",
"0.5858906",
"0.5848407",
"0.58128613",
"0.581115",
"0.580568",
"0.57811344",
"0.57709396",
"0.572794",
"0.5714124",
"0.56916016",
"0.5616916",
"0.56051636",
"0.5598142",
"0.55731696",
"0.5562697",
"0.55598766",
"0.5556161",
"0.5554857",
"0.5549409"
] | 0.7435911 | 1 |
Make a pair of functions flatten(tree) > x, unflatten(x) > tree | def flatten_and_unflatten(input_tree) -> Tuple[Callable, Callable]:
tree_structure = tree_util.tree_structure(input_tree)
leaf_shapes = [get_shape(leaf) for leaf in tree_util.tree_leaves(input_tree)]
def flatten(tree):
leaves = tree_util.tree_leaves(tree)
flattened_leaves = [reshape(leaf, num_elements(get_shape(leaf))) for leaf in leaves]
x = jnp.hstack(flattened_leaves)
assert len(x.shape) == 1
return x
def unflatten(x):
leaves = []
i = 0
for shape in leaf_shapes:
n = num_elements(shape)
leaves.append(reshape(x[i : i + n], shape))
i += n
tree = tree_util.tree_unflatten(tree_structure, leaves)
return tree
return flatten, unflatten | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def flatten():",
"def flatten(node: ir.Node) -> ir.Node:\n\n def visitor(node: ir.Node, args=None) -> ir.Node:\n if isinstance(node, ir.BinaryOp):\n\n # Flatten singleton BinaryOp\n if len(node.operand) == 1:\n return flatten(node.operand[0])\n\n # Flatten BinaryOp with reduction operators\n new_operator: List[str] = []\n new_operand: List[ir.Expr] = []\n for child_operator, child_operand in zip((None, *node.operator),\n node.operand):\n if child_operator is not None:\n new_operator.append(child_operator)\n # The first operator can always be flattened if two operations has the\n # same type.\n if child_operator in (None, '||', '&&', *'|&+*') and \\\n type(child_operand) is type(node):\n new_operator.extend(child_operand.operator)\n new_operand.extend(child_operand.operand)\n else:\n new_operand.append(child_operand)\n # At least 1 operand is flattened.\n if len(new_operand) > len(node.operand):\n return flatten(type(node)(operator=new_operator, operand=new_operand))\n\n # Flatten compound Operand\n if isinstance(node, ir.Operand):\n for attr in node.ATTRS:\n val = getattr(node, attr)\n if val is not None:\n if isinstance(val, ir.Node):\n return flatten(val)\n break\n else:\n raise util.InternalError('undefined Operand')\n\n # Flatten identity unary operators\n if isinstance(node, ir.Unary):\n minus_count = node.operator.count('-')\n if minus_count % 2 == 0:\n plus_count = node.operator.count('+')\n if plus_count + minus_count == len(node.operator):\n return flatten(node.operand)\n not_count = node.operator.count('!')\n if not_count % 2 == 0 and not_count == len(node.operator):\n return flatten(node.operand)\n\n # Flatten reduction functions\n if isinstance(node, ir.Call):\n operator = getattr(node, 'name')\n if operator in ir.REDUCTION_FUNCS:\n operands: List[ir.Expr] = []\n for operand in getattr(node, 'arg'):\n if (isinstance(operand, ir.Call) and\n getattr(operand, 'name') == operator):\n operands.extend(getattr(operand, 'arg'))\n else:\n operands.append(operand)\n if len(operands) > len(getattr(node, 'arg')):\n return flatten(ir.Call(name=operator, arg=operands))\n\n return node\n\n if not isinstance(node, ir.Node):\n return node\n\n return node.visit(visitor)",
"def flatten(self, root: TreeNode) -> None:\r\n # Divide and Conquer\r\n self.helper(root)",
"def flatten(self, root: TreeNode) -> None:\n '''\n use stack !!!\n '''\n prev = None\n stack = [root]\n while stack:\n cur = stack.pop()\n if cur is None:\n continue\n if prev is not None:\n prev.right = cur\n prev.left = None\n stack.append(cur.right)\n stack.append(cur.left)\n prev = cur\n if prev is not None:\n prev.right = None\n prev.left = None\n return root",
"def local_flatten_lift(node):\r\n if (isinstance(node.op, T.Flatten) and\r\n node.inputs[0].owner and\r\n isinstance(node.inputs[0].owner.op, T.Elemwise) and\r\n len(node.inputs[0].owner.inputs) == 1):\r\n f = node.op(node.inputs[0].owner.inputs[0])\r\n e = node.inputs[0].owner.op(f)\r\n return [e]",
"def flatten(*args):\n return _flatten(args)",
"def flatten(self, root: TreeNode) -> None:\n def helper(root):\n if not root.left and not root.right:\n return root, root\n L, R = None, None\n if root.left:\n L, llast = helper(root.left)\n if root.right:\n R, rlast = helper(root.right)\n if L and R:\n root.right = L\n root.left = None\n llast.left = None\n llast.right = R\n return root, rlast\n elif L:\n root.left = None\n root.right = L\n return root, llast\n else:\n root.left = None\n root.right = R\n return root, rlast\n helper(root)[0]",
"def convert_flatten(node, **kwargs):\n return create_basic_op_node('Flatten', node, kwargs)",
"def flatten(self, root) -> None:\n def flat(node):\n if node is None:\n return None\n if node.left is None and node.right is None:\n return node\n lefttail = flat(node.left) # postorder first flatten left and right and get end element\n righttail = flat(node.right) # postorder flatten right\n\n if lefttail:\n lefttail.right = node.right # put right in lefttail\n node.right = node.left # put left in right\n node.left = None # left becomes None\n\n if righttail:\n return righttail\n else:\n return lefttail\n flat(root)",
"def flatten_tree(tree):\n if isinstance(tree[1],str):\n return [tree[1]]\n if isinstance(tree[1],Terminal):\n return [tree[1]]\n s = []\n for subtree in tree[1]:\n s += flatten_tree(subtree)\n return s",
"def flatten(BST):\r\n leaves = ()\r\n if isinstance(BST,tuple):\r\n return flatten(BST[0]) + flatten(BST[1])\r\n else:\r\n leaves = leaves + (BST,)\r\n return leaves",
"def flatten(self, root: TreeNode) -> None:\n def flatL(node):\n #only need to return the tail.\n if node == None:\n return None\n if node.left == None and node.right == None:\n return node\n nodeR = node.right\n nodeLW = flatL(node.left)\n nodeRW = flatL(node.right)\n \n if nodeLW:\n node.right= node.left\n node.left = None\n nodeLW.right = nodeR\n if nodeRW == None:\n return nodeLW\n return nodeRW\n\n \n flatL(root)",
"def flatten(self, root: TreeNode) -> None:\n pre_list = list()\n stack = list()\n node = root\n while node or stack:\n while node:\n pre_list.append(node)\n stack.append(node)\n node = node.left\n node = stack.pop()\n node = node.right\n for i in range(1, len(pre_list)):\n pre, next = pre_list[i-1], pre_list[i]\n pre.left = None\n pre.right = next",
"def flatten(self, root) -> None:\n node = root\n stack = []\n while node:\n if node.left:\n if node.right:\n stack.append(node.right)\n node.right = node.left\n node.left = None\n if not node.left and not node.right and stack:\n node.right = stack.pop()\n node = node.right",
"def flatten(self, root: TreeNode) -> None: \n stack = []\n prev = root\n \n if not root:\n return root\n \n if root.right:\n stack.append(root.right)\n \n if root.left:\n stack.append(root.left)\n\n root.left = None\n \n while stack: \n tn = stack.pop()\n if tn.right:\n stack.append(tn.right)\n if tn.left:\n stack.append(tn.left)\n\n tn.left = None\n prev.right = tn\n prev = prev.right",
"def flatten(self, root: TreeNode) -> None:\n if not root:\n return\n left = root.left\n right = root.right\n root.left = None\n self.flatten(left)\n self.flatten(right)\n root.right = left\n cur = root\n while cur.right:\n cur = cur.right\n cur.right = right",
"def flatten(self, root: TreeNode) -> None:\n def flatL(node):\n if node == None:\n return None\n returnNode = node\n nodeR = node.right\n nodeL = node.left\n if nodeL:\n leftT = flatL(node.left)\n node.right = node.left\n node.left = None\n returnNode = leftT\n if nodeR:\n rightT = flatL(nodeR)\n if nodeL:\n leftT.right = nodeR\n returnNode = rightT\n return returnNode\n flatL(root)\n return root",
"def flatten(self, root: TreeNode) -> None:\n # User must pass a node\n if root:\n root_flatten = TreeNode(root.val)\n leaf = inOrderTreeWalk(root, root_flatten)\n root.left = None\n root.right = root_flatten.right.right",
"def flatten(self, root: TreeNode) -> None:\n if root is None:\n return\n def f(root):\n old_right = root.right\n if root.left is not None:\n left_last = f(root.left)\n root.right = root.left\n root.left = None\n else:\n left_last = root\n left_last.right = old_right\n \n if old_right is not None:\n last = f(old_right)\n else:\n last = left_last\n \n return last\n f(root)",
"def flatten(self, root: TreeNode) -> None:\n helper = [] ## 需要额外存储空间\n def traverse(node):\n if not node: return\n helper.append(node)\n traverse(node.left)\n traverse(node.right)\n \n traverse(root)\n node = root\n for i in range(1,len(helper)):\n node.right = helper[i]\n node.left = None\n node = node.right\n return",
"def flatten(self, root: TreeNode) -> None:\n if not root:\n return\n \n node = root\n while node:\n if node.left:\n rightmost = node.left\n while rightmost.right:\n rightmost = rightmost.right\n rightmost.right = node.right\n node.right = node.left\n node.left = None\n node = node.right",
"def _tree_flatten_with_names(tree):\n vals, tree_def = jax.tree_flatten(tree)\n\n # \"Fake\" token tree that is use to track jax internal tree traversal and\n # adjust our custom tree traversal to be compatible with it.\n tokens = range(len(vals))\n token_tree = tree_def.unflatten(tokens)\n val_names, perm = zip(*_traverse_with_names(token_tree))\n inv_perm = np.argsort(perm)\n\n # Custom traversal should visit the same number of leaves.\n assert len(val_names) == len(vals)\n\n return [(val_names[i], v) for i, v in zip(inv_perm, vals)], tree_def",
"def flatten(tree,tag):\n List=[]\n Queue=deque([tree])\n try:\n while Queue:\n current=Queue.popleft()\n if type(current)==list: \n if isinstance(current[0],basestring):\n List.append([current[1],tag,current])\n elif isinstance(current[0][0],basestring) and isinstance(current[0][1],(int, long, float, complex)) and type(current[0][2])==list:\n List.append([current[0][1],tag,current])\n Queue.extend(current[1:])\n else:\n return []\n else:\n return []\n except Exception as error:\n print 'error: current=', str(current)\n return []\n List=sorted(List,key=lambda x: x[0])\n return List",
"def _unflatten(updates, flat):\n updates_flat, treedef = tree_flatten(updates)\n offsets = []\n for update in updates_flat:\n size = np.prod(update.shape)\n if offsets:\n offsets.append(size + offsets[-1])\n else:\n offsets.append(size)\n del offsets[-1]\n flat_split = jnp.split(flat, offsets)\n reshaped = [\n jnp.reshape(flat_update, update.shape)\n for flat_update, update in zip(flat_split, updates_flat)\n ]\n return tree_unflatten(treedef, reshaped)",
"def flatten(self, root: TreeNode) -> None:\n if not root or (not root.left and not root.right):\n return\n if root.left:\n self.flatten(root.left)\n temp = root.right\n root.right = root.left\n root.left = None\n while root.right:\n root = root.right\n root.right = temp\n self.flatten(root.right)",
"def flatten(self, root: TreeNode) -> None:\n if not root:\n return\n if not root.left and not root.right:\n return\n left = root.left\n right = root.right\n root.right = left\n root.left = None\n self.flatten(root.right)\n node = root\n while node.right:\n node = node.right\n node.right = right\n self.flatten(right)",
"def flatten(self, root: TreeNode) -> None:\n if not root: return\n self.flatten(root.right)\n self.flatten(root.left)\n root.right = self.last\n root.left = None\n self.last = root",
"def flatten(self, root: TreeNode) -> None:\n if not root:\n return\n \n self.flatten(root.left)\n self.flatten(root.right)\n \n left, right = root.left, root.right\n \n root.left = None\n root.right = left\n \n node = root\n while node.right:\n node = node.right\n node.right = right",
"def _create_flatten(cls, onnx_node, inputs, opset_version):\n factor = onnx_node.getattr('axis', 1)\n if factor < 0:\n # in order to support the negative axis\n factor = len(inputs[0].shape) + factor\n\n _, forward = cls._common_onnx_node_to_singa_op(onnx_node, inputs,\n opset_version)\n return None, forward(axis=factor)",
"def flatten(self, root: TreeNode) -> None:\n\n def helper(node):\n \"\"\"\n :param node: root node\n :return: rightmost node\n \"\"\"\n if not node:\n return None\n\n if not node.left and not node.right:\n return node\n\n left = helper(node.left)\n right = helper(node.right)\n\n if left:\n left.right = node.right\n node.right = node.left\n node.left = None\n\n return right if right else left\n\n helper(root)"
] | [
"0.7424356",
"0.7009518",
"0.673188",
"0.6717321",
"0.6695242",
"0.6640111",
"0.66385573",
"0.6636192",
"0.6581992",
"0.6532039",
"0.6504157",
"0.65039814",
"0.6488403",
"0.6451589",
"0.643888",
"0.6405306",
"0.6398433",
"0.63705224",
"0.6344484",
"0.62895566",
"0.6286946",
"0.6277937",
"0.625556",
"0.622861",
"0.62187356",
"0.6208127",
"0.6204769",
"0.61912787",
"0.6169062",
"0.6158178"
] | 0.7750934 | 0 |
load all training data into a dictionary stored in order of X, u, L, W, k | def load_all():
training_data = dict()
for i in range(7):
training_data[i+1] = load_data(i+1)
return training_data | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _load_training_data(self):\n self._save_training_data()",
"def training_data(self):\n if self._training_data is None:\n self._load_training_data()\n if self._swapped_training_data is None:\n self._swapped_training_data = {}\n for key, value in self._training_data.items():\n self._swapped_training_data[key] = value\n return self._swapped_training_data",
"def load_data():\n print(\"PARSING TRAIN\")\n ys_train, x_train, ids_train = load_pickle_data(\"ys_train\"), load_pickle_data(\"x_train\"), load_pickle_data(\n \"ids_train\")\n if ys_train is None or x_train is None or ids_train is None:\n ys_train, x_train, ids_train = load_csv_data(\"{}/train.csv\".format(DATA_DIR))\n dump_pickle_data(ys_train, \"ys_train\")\n dump_pickle_data(x_train, \"x_train\")\n dump_pickle_data(ids_train, \"ids_train\")\n\n print(\"PARSING TEST\")\n x_test, ids_test = load_pickle_data(\"x_test\"), load_pickle_data(\"ids_test\")\n if x_test is None or ids_test is None:\n _, x_test, ids_test = load_csv_data(\"{}/test.csv\".format(DATA_DIR))\n dump_pickle_data(x_test, \"x_test\")\n dump_pickle_data(ids_test, \"ids_test\")\n\n return ys_train, x_train, ids_train, x_test, ids_test",
"def load_training_set():\n global training_set\n f = gzip.open('mnist.pkl.gz', 'rb')\n train, valid, test = cPickle.load(f)\n [training_set, training_labels] = train\n [validation_set, validation_labels] = valid\n [testing_set, testing_labels] = test\n training_set = np.concatenate((training_set, validation_set))\n f.close()\n np.random.shuffle(training_set)",
"def _load_data(self):\n self.mapper = Mapper()\n self.mapper.generate_vocabulary(self.review_summary_file)\n self.X_fwd, self.X_bwd, self.Y = self.mapper.get_tensor(reverseflag=True)\n # Store all the mapper values in a dict for later recovery\n self.mapper_dict = dict()\n self.mapper_dict['seq_length'] = self.mapper.get_seq_length()\n self.mapper_dict['vocab_size'] = self.mapper.get_vocabulary_size()\n self.mapper_dict['rev_map'] = self.mapper.get_reverse_map()\n # Split into test and train data\n self._split_train_tst()",
"def load_data():\n f = gzip.open('../data/mnist.pkl.gz', 'rb')\n training_data, validation_data, test_data = cPickle.load(f)\n f.close()\n return (training_data, validation_data, test_data)",
"def load_train_data():\r\n X_train = np.load('data/train/X_train.npy')\r\n scaling_train = np.load('data/train/scaling_train.npy')\r\n ids_train = np.load('data/train/ids_train.npy')\r\n y_train = np.load('data/train/y_train.npy')\r\n\r\n seed = np.random.randint(1, 10e6)\r\n np.random.seed(seed)\r\n np.random.shuffle(X_train)\r\n np.random.seed(seed)\r\n np.random.shuffle(scaling_train)\r\n np.random.seed(seed)\r\n np.random.shuffle(ids_train)\r\n np.random.seed(seed)\r\n np.random.shuffle(y_train)\r\n\r\n return X_train, scaling_train, ids_train, y_train",
"def load_data():\n data = gzip.open(\"mnist.pkl.gz\", \"rb\")\n train_set, valid_set, test_set = cPickle.load(data)\n data.close()\n\n # Combine validation and train folds to recreate the master 60k set.\n new_images = numpy.concatenate((train_set[0], valid_set[0]))\n new_labels = numpy.concatenate((train_set[1], valid_set[1]))\n\n train_set = (new_images, new_labels)\n \n return (train_set, test_set)",
"def load_data_pkl(self):\n pkl_name = '{}/data/mini-imagenet-cache-{}.pkl'.format(self.root_dir, self.split)\n print('Loading pkl dataset: {} '.format(pkl_name))\n\n try:\n with open(pkl_name, \"rb\") as f:\n data = pkl.load(f, encoding='bytes')\n image_data = data[b'image_data']\n class_dict = data[b'class_dict']\n except:\n with open(pkl_name, \"rb\") as f:\n data = pkl.load(f)\n image_data = data['image_data']\n class_dict = data['class_dict']\n\n print(data.keys(), image_data.shape, class_dict.keys())\n data_classes = sorted(class_dict.keys()) # sorted to keep the order\n\n n_classes = len(data_classes)\n print('n_classes:{}, n_label:{}, n_unlabel:{}'.format(n_classes,self.n_label,self.n_unlabel))\n dataset_l = np.zeros([n_classes, self.n_label, self.im_height, self.im_width, self.channels], dtype=np.float32)\n if self.n_unlabel>0:\n dataset_u = np.zeros([n_classes, self.n_unlabel, self.im_height, self.im_width, self.channels], dtype=np.float32)\n else:\n dataset_u = []\n\n for i, cls in enumerate(data_classes):\n idxs = class_dict[cls] \n np.random.RandomState(self.seed).shuffle(idxs) # fix the seed to keep label,unlabel fixed\n dataset_l[i] = image_data[idxs[0:self.n_label]]\n if self.n_unlabel>0:\n dataset_u[i] = image_data[idxs[self.n_label:]]\n print('labeled data:', np.shape(dataset_l))\n print('unlabeled data:', np.shape(dataset_u))\n \n self.dataset_l = dataset_l\n self.dataset_u = dataset_u\n self.n_classes = n_classes\n\n del image_data",
"def load_training_data(self) -> Tuple[List[np.ndarray], np.ndarray]:\n return self._load_set(config.TRAIN_DIR, True)",
"def load_data():\r\n f = gzip.open('mnist.pkl.gz', 'rb')\r\n training_data, validation_data, test_data = pickle.load(f,encoding='bytes')\r\n f.close()\r\n return (training_data, validation_data, test_data)",
"def set_data():\r\n #if not os.path.exists(filepath):\r\n #download_data()\r\n metadata = read(filepath + flist[-1])\r\n ndata = metadata['num_cases_per_batch']\r\n ndim = metadata['num_vis']\r\n\r\n data, train, test = {}, {}, {}\r\n data['labels'] = metadata['label_names']\r\n data['ntraindata'] = metadata['num_cases_per_batch'] * (len(flist) - 2)\r\n data['ntestdata'] = metadata['num_cases_per_batch']\r\n data['ndim'] = metadata['num_vis']\r\n\r\n train['x'], train['y'] = convert_train(data['ntraindata'], data['ndim'])\r\n\r\n testdata = read(filepath + flist[-2])\r\n test['x'] = testdata['data']\r\n test['y'] = testdata['labels']\r\n\r\n data['train'], data['test'] = train, test\r\n save_pkl(data)",
"def _load_data(self):\n pickle_in = open(\"X_train.pickle\", \"rb\")\n self.X = pickle.load(pickle_in)\n pickle_in = open(\"y_train.pickle\", \"rb\")\n self.Y = pickle.load(pickle_in)\n\n pickle_in = open(\"X_test.pickle\", \"rb\")\n self.X_final = pickle.load(pickle_in)\n pickle_in = open(\"y_test.pickle\", \"rb\")\n self.Y_final = pickle.load(pickle_in)\n\n # Set input shape:\n if K.image_data_format() == 'channels_first':\n self.input_shape = (3, self.img_rows, self.img_cols)\n else:\n self.input_shape = (self.img_rows, self.img_cols, 3)\n\n self.X = self.X.astype('float32')\n self.X /= 255\n self.X_final = self.X_final.astype('float32')\n self.X_final /= 255\n print('X shape:', self.X.shape)\n print(self.X.shape[0], 'Samples')\n\n num_datapoints = 3000\n self.X = self.X[0:num_datapoints]\n self.Y = self.Y[0:num_datapoints]\n\n num_datapoints = 2000\n self.X_final = self.X_final[0:num_datapoints]\n self.Y_final = self.Y_final[0:num_datapoints]\n\n self.Y_final = to_categorical(self.Y_final, self.num_classes)\n\n # Initialize Data\n kfold = StratifiedKFold(n_splits=self.nFolds, shuffle=True)\n\n if self.b_eval_advanced:\n # Loop through the indices the split() method returns\n for index, (train_indices, test_indices) in enumerate(kfold.split(self.X, self.Y)):\n if index == 0:\n self.Y = to_categorical(self.Y, self.num_classes)\n\n # Generate batches from indices\n xtrain, xtest = self.X[train_indices], self.X[test_indices]\n ytrain, ytest = self.Y[train_indices], self.Y[test_indices]\n\n self.data.append(tuple([xtrain, xtest, ytrain, ytest]))\n\n if not self.b_eval_advanced:\n self.Y = to_categorical(self.Y, self.num_classes)\n\n #print(np.asarray(self.data).shape)\n #print(self.data)\n print(\"Y_final Shape\", self.Y_final.shape)",
"def process_training_data(train_page_names):\n\n print('Reading data')\n images_train = []\n labels_train = []\n for page_name in train_page_names:\n images_train = utils.load_char_images(page_name, images_train)\n labels_train = utils.load_labels(page_name, labels_train)\n labels_train = np.array(labels_train)\n\n print('Extracting features from training data')\n bbox_size = get_bounding_box_size(images_train)\n fvectors_train_full = images_to_feature_vectors(images_train, bbox_size)\n\n model_data = dict()\n model_data['labels_train'] = labels_train.tolist()\n model_data['bbox_size'] = bbox_size\n\n print('Reducing to 10 dimensions')\n fvectors_train = reduce_dimensions(fvectors_train_full, model_data)\n\n model_data['fvectors_train'] = fvectors_train.tolist()\n\n with open('ListOfOneHundredThousandWords.txt') as word_file:\n words_list = [words.replace(\" \", \"\").strip('\\n').upper() for words in word_file]\n model_data['words'] = words_list\n\n return model_data",
"def load_data():\n global X, Y, X_final, Y_final, input_shape\n\n pickle_in = open(\"X_train.pickle\", \"rb\")\n X = pickle.load(pickle_in)\n pickle_in = open(\"y_train.pickle\", \"rb\")\n Y = pickle.load(pickle_in)\n\n pickle_in = open(\"X_test.pickle\", \"rb\")\n X_final = pickle.load(pickle_in)\n pickle_in = open(\"y_test.pickle\", \"rb\")\n Y_final = pickle.load(pickle_in)\n\n if K.image_data_format() == 'channels_first':\n input_shape = (3, img_rows, img_cols)\n else:\n input_shape = (img_rows, img_cols, 3)\n\n X = X.astype('float32')\n X /= 255\n X_final = X_final.astype('float32')\n X_final /= 255\n print('X shape:', X.shape)\n print(X.shape[0], 'Samples')\n\n Y_final = to_categorical(Y_final, num_classes)\n\n if not b_eval_advanced:\n Y = to_categorical(Y, num_classes)\n\n print(\"Y_final Shape\",Y_final.shape)",
"def load_data():\n train = pd.read_csv(\"../input/train.csv\", dtype={\"Age\": np.float64}, )\n test = pd.read_csv(\"../input/test.csv\", dtype={\"Age\": np.float64}, )\n\n train = train.set_index('PassengerId')\n test = test.set_index('PassengerId')\n\n train = train.apply(preprocess, axis=1)\n test = test.apply(preprocess, axis=1)\n\n x_train = train.drop(['Survived'], axis=1)\n y_train = train['Survived']\n x_test = test\n return {'train': {'x': x_train, 'y': y_train},\n 'test': {'x': x_test},\n 'full_features': pd.concat([x_train, x_test])}",
"def load_data(tetrode_number=TETRODE_NUMBER):\n print(\"Loading data...\")\n X_train, X_valid, X_test, y_train_labels, y_valid_labels, y_test_labels = formatData(tetrode_number,BASENAME,CONV)\n print(\"Done!\")\n\n X_train = X_train.reshape(X_train.shape[0],1,X_train.shape[1],X_train.shape[2])\n X_valid = X_valid.reshape(X_valid.shape[0],1,X_valid.shape[1],X_valid.shape[2])\n X_test = X_test.reshape(X_test.shape[0],1,X_test.shape[1],X_test.shape[2])\n\n\n y_train = X_train\n y_valid = X_valid\n y_test = X_test\n\n r={}\n for x,y in zip(X_test,y_test_labels):\n # print(\"x: {}\".format(x))\n # print(\"y: {}\".format(y))\n _y = list(y)\n if int(_y.index(1.0)) not in r:\n r[int(_y.index(1.0))]=[x]\n else:\n r[int(_y.index(1.0))].append(x)\n\n for key in r:\n r[key] = np.asarray(r[key])\n\n\n return dict(\n X_train=X_train,\n y_train=y_train,\n X_valid=X_valid,\n y_valid=y_valid,\n X_test=X_test,\n y_test=y_test,\n labeled_test=r,\n caswells_dim = y_train_labels.shape[-1],\n num_examples_train=X_train.shape[0],\n num_examples_valid=X_valid.shape[0],\n num_examples_test=X_test.shape[0],\n input_shape=X_train.shape,\n output_dim=y_train.shape[-1],\n )",
"def load_dataset():\n temp = gzip.open('mnist.pkl.gz')\n train, val , test = pickle.load(temp,encoding='latin1')\n temp.close()\n train_inp = [np.reshape(x, (784,1)) for x in train[0]]\n train_outp = [one_hot(y) for y in train[1]]\n training_data = zip(train_inp, train_outp)\n validation_inp = [np.reshape(x, (784, 1)) for x in val[0]]\n validation_data = zip(validation_inp, val[1])\n test_inp = [np.reshape(x, (784, 1)) for x in test[0]]\n test_data = zip(test_inp, test[1])\n return (training_data,validation_data,test_data)",
"def load_data():\n f = gzip.open('../data/mnist.pkl.gz', mode='rb')\n\n # NOTE: I get errors when I don't use encoding='latin1' because of Python 2 vs Python 3 compatibility issues\n # training_data, validation_data, test_data = pickle.load(f, encoding='latin1')\n training_data, validation_data, test_data = pickle.load(f)\n\n f.close()\n\n return training_data, validation_data, test_data",
"def load_data(self):\n params = self.params\n catg = params.data_category\n langs = ['en', params.target_lang]\n data = {lang: {splt: {} for splt in (['train', 'valid'] if lang == 'en' else ['test'])} for lang in langs}\n clf_dataset_path = {\n lang: {\n splt: {\n 'x': os.path.join(params.data_path, '%s_%s_%s_x.bpe.pth' % (splt, lang, catg)),\n 'y': os.path.join(params.data_path, '%s_%s_%s_y.txt' % (splt, lang, catg)),\n } for splt in (['train', 'valid'] if lang == 'en' else ['test'])\n } for lang in langs\n }\n for splt in ['train', 'valid', 'test']:\n for lang in langs:\n if lang == 'en' and splt in ['train', 'valid'] or lang != 'en' and splt == 'test':\n # load data and dictionary\n data1 = load_binarized(clf_dataset_path[lang][splt]['x'], params)\n data['dico'] = data.get('dico', data1['dico'])\n # set dictionary parameters\n set_dico_parameters(params, data, data1['dico'])\n # create dataset\n data[lang][splt]['x'] = Dataset(data1['sentences'], data1['positions'], params)\n # load labels\n with open(clf_dataset_path[lang][splt]['y'], 'r') as f:\n labels = [int(l) for l in f]\n data[lang][splt]['y'] = torch.LongTensor(labels)\n assert len(data[lang][splt]['x']) == len(data[lang][splt]['y'])\n\n return data",
"def load_data(self):\n\n self._load_train_data()\n self._load_test_data()",
"def load_dataset(self):\n\n train_path = os.path.join(self.dataset_path, 'images_background')\n validation_path = os.path.join(self.dataset_path, 'images_evaluation')\n\n # First let's take care of the train alphabets\n for alphabet in os.listdir(train_path):\n if alphabet[0] == '.':\n continue\n alphabet_path = os.path.join(train_path, alphabet)\n\n current_alphabet_dictionary = {}\n\n for character in os.listdir(alphabet_path):\n if character[0] == '.':\n continue\n character_path = os.path.join(alphabet_path, character)\n\n current_alphabet_dictionary[character] = os.listdir(\n character_path)\n\n self.train_dictionary[alphabet] = current_alphabet_dictionary\n\n # Now it's time for the validation alphabets\n for alphabet in os.listdir(validation_path):\n alphabet_path = os.path.join(validation_path, alphabet)\n if alphabet[0] == '.':\n continue\n\n current_alphabet_dictionary = {}\n\n for character in os.listdir(alphabet_path):\n if character[0] == '.':\n continue\n character_path = os.path.join(alphabet_path, character)\n\n current_alphabet_dictionary[character] = os.listdir(\n character_path)\n\n self.evaluation_dictionary[alphabet] = current_alphabet_dictionary",
"def loadUnitaryFeatures(trainingObject):\n\n ID = trainingObject.attrs['ID']\n\n training = {}\n training[ID] = trainingObject.data\n \n return training",
"def load_cifa_10():\n train_set_x = np.ndarray([ 50000, 3072 ])\n train_set_y = np.ndarray( [50000] )\n\n batch_size = 10000\n for i in xrange(5):\n batch = open( datapath + \"data_batch_\"+str(i+1), 'rb')\n map = cPickle.load( batch )\n batch.close()\n train_set_x[ i*batch_size : (i+1)*batch_size , : ] = np.asarray( map[ 'data' ], dtype = 'float32' )\n train_set_y[ i*batch_size : (i+1)*batch_size ] = np.asarray( map[ 'labels' ], dtype = 'float32' )\n\n test_file = open( datapath + 'test_batch', 'rb')\n map = cPickle.load( test_file )\n test_file.close()\n \n test_set_x = np.asarray( map['data'], dtype = 'float32' )\n test_set_y = np.asarray( map['labels'], dtype = 'float32' )\n \n\n return train_set_x, train_set_y, test_set_x, test_set_y",
"def load_training(self):\n path = \"./training/\" + self.training + \".json\"\n\n data = {}\n\n with open(path, \"r\") as infile:\n data = json.load(infile)\n\n self.states = data[\"states\"]\n self.transitions = data[\"transitions\"]\n self.matrix = data[\"matrix\"]",
"def prepare_data(self):\n # Set up the path\n self.path_target_train = os.path.join(self.data_dir, self.train_path_file_target + \".pkl\")\n self.path_target_test = os.path.join(self.data_dir, self.test_path_file_target + \".pkl\")\n\n if not os.path.exists(self.path_target_train) or not os.path.exists(self.path_target_test):\n # Create vocabularies of the appropriate sizes.\n self.create_vocabulary(self.train_path_file)\n\n # Create token ids for the training data.\n input_train_path = self.train_path_file\n target_train_path = self.train_path_file_target\n train_input, train_input_length, train_labels = self.data_to_token_ids(input_train_path, target_train_path)\n\n # Create token ids for the validation data.\n input_test_path = self.test_path_file\n target_test_path = self.test_path_file_target\n test_input, test_input_length, _ = self.data_to_token_ids(input_test_path, target_test_path, train=False)\n\n # Collect data into a list\n training_data = [train_input, train_input_length, train_labels]\n test_data = [test_input, test_input_length]\n\n # Save all the data\n with open(self.path_target_train, 'wb') as f:\n pickle.dump(training_data,f)\n with open(self.path_target_test, 'wb') as f:\n pickle.dump(test_data, f)\n else:\n # Load data\n with open(self.path_target_train, 'rb') as f:\n training_data = pickle.load(f)\n with open(self.path_target_test, 'rb') as f:\n test_data = pickle.load(f)\n\n # Initialize vocabulary\n self.initialize_vocabulary()\n\n # Convert list into a numpy array - train data\n train_input = pd.DataFrame(training_data[0]).fillna(value=0).astype(int).values\n train_length_input = np.array(training_data[1], dtype=int)\n train_labels = np.array(training_data[2], dtype=int)\n\n # Convert list into a numpy array - test data\n test_input = pd.DataFrame(test_data[0]).fillna(value=0).astype(int).values\n test_length_input = pd.DataFrame(test_data[1]).fillna(value=0).astype(int).values\n\n # Printing maximum length\n print(\"Shape of the input training matrix {}\".format(str(train_input.shape)))\n print(\"Shape of the input test matrix {}\".format(str(test_input.shape)))\n\n # Copy the files\n self.copy_files()\n\n # Return output\n return train_input, train_length_input, train_labels, test_input, test_length_input",
"def parse_train_data(training_set, language):\n print \"Reading training set: \" + training_set\n xmldoc = minidom.parse(training_set)\n lex_list = xmldoc.getElementsByTagName('lexelt')\n training_output = {}\n\n print \"Processing training set and training models...\"\n for node in lex_list:\n lexelt = node.getAttribute('item')\n training_output[lexelt] = {}\n inst_list = node.getElementsByTagName(\"instance\")\n # setup the neighbor_word_list within k distance of the word\n neighbor_word_list = []\n senseid_set = set()\n for inst in inst_list:\n sentence = inst.getElementsByTagName('context')[0]\n senseid_set.add(inst.getElementsByTagName('answer')[0].getAttribute('senseid'))\n neighbor_word_list = list(set(neighbor_word_list + get_neighbor_words_list(sentence, language)))\n senseid_list = list(senseid_set)\n training_output[lexelt][\"neighbor_word_list\"] = neighbor_word_list\n _4c_4d_feature = extract_4c_4d_feature(neighbor_word_list, senseid_list, inst_list, language)\n training_output[lexelt][\"4c_4d_feature\"] = _4c_4d_feature\n x_list = []\n y_list = []\n for inst in inst_list:\n y = inst.getElementsByTagName('answer')[0].getAttribute('senseid')\n if ignore_U_activated and y.__eq__('U'):\n continue\n y_list.append(str(replace_accented(y)))\n x = extract_vector(inst, neighbor_word_list, _4c_4d_feature, language)\n x_list.append(x)\n # for each node, build a classifier\n if language.__eq__(\"English\"):\n #clf = RandomForestClassifier(n_estimators=10) 58.9\n #clf = SGDClassifier() 61.1\n #clf = MultinomialNB() 62.9\n #clf = BernoulliNB() 55.8\n #clf = Perceptron() 60.4\n #clf = PassiveAggressiveClassifier() 62.1\n #clf = RidgeClassifier() 62.7\n #clf = svm.LinearSVC() 62.5\n #clf = KNeighborsClassifier()\n #clf = GaussianNB()\n clf = MultinomialNB(alpha=0.95) #+ alpha=0.95 + k=13 + left_right_order + vector_0_1 off = 64.7\n elif language.__eq__(\"Spanish\"):\n #clf = svm.LinearSVC() 82.0\n #clf = MultinomialNB() 82.2\n #clf = RidgeClassifier() 81.5\n #clf = PassiveAggressiveClassifier() 81.9\n #clf = BernoulliNB() 72.4\n clf = MultinomialNB(alpha=0.50) #0.25:82.6 0.4:83.1 0.45:83.2 0.5: 83.2 0.55:83.2 0.6:82.8 0.75:82.7\n elif language.__eq__(\"Catalan\"):\n #clf = svm.LinearSVC() # 82.8\n #clf = MultinomialNB() # 80.8\n #clf = RidgeClassifier() 82.6\n #clf = svm.LinearSVC(C=1.5) 82.9\n clf = MultinomialNB(alpha=0.25) # 0.5:84.3 0.35:84.6 0.3:84.8 0.25:85.4 0.2:85.3\n else:\n clf = svm.LinearSVC()\n clf.fit(x_list, y_list)\n training_output[lexelt][\"Classifier\"] = clf\n print \"Models trained.\"\n return training_output",
"def load_training():\n for can in candidates:\n trainings[can] = []\n for subdir, dirs, files in os.walk(os.path.join(corpus_dir, can)):\n for doc in files:\n trainings[can].append(doc)",
"def load_data():\n\n \"\"\"The ``training_data`` is returned as a tuple with two entries.\n The first entry contains the actual training images. This is a\n numpy ndarray with 50,000 entries. Each entry is, in turn, a\n numpy ndarray with 784 values, representing the 28 * 28 = 784\n pixels in a single MNIST image.\"\"\"\n\n \"\"\"The second entry in the ``training_data`` tuple is a numpy ndarray\n containing 50,000 entries. Those entries are just the digit\n values (0...9) for the corresponding images contained in the first\n entry of the tuple.\"\"\"\n\n \"\"\"The ``validation_data`` and ``test_data`` are similar, except\n each contains only 10,000 images.\"\"\"\n f = gzip.open('MNIST/data/mnist.pkl.gz', 'rb')\n training_data, validation_data, test_data = Pickle.load(f, encoding='bytes'\n )\n f.close()\n return (training_data, validation_data, test_data)",
"def get_classification_training_data() -> Iterable[Tuple[str, Dict[str, Any]]]:\n return (_create_training_entry(*pair) for pair in TRAINING_DATA) # type: ignore"
] | [
"0.72685313",
"0.6999833",
"0.6843773",
"0.6823378",
"0.6724388",
"0.6672758",
"0.6670386",
"0.6663503",
"0.6590046",
"0.65890056",
"0.6577699",
"0.6511327",
"0.6481927",
"0.6450511",
"0.64183396",
"0.6417053",
"0.6415652",
"0.6405644",
"0.63804287",
"0.6376085",
"0.6344903",
"0.6338779",
"0.6311472",
"0.6276298",
"0.6262592",
"0.6258867",
"0.6251929",
"0.6250866",
"0.6242375",
"0.62412393"
] | 0.8075104 | 0 |
compile the training set corresponding to experiments listed in ind_list | def make_training_set(ind_list, training_data):
exp = training_data[ind_list[0]]
X_train = exp[0]
u_train = exp[1]
for i in ind_list[1:]:
exp = training_data[i]
X_train = np.append(X_train, exp[0], axis=0)
u_train = np.append(u_train, exp[1], axis=0)
return X_train, u_train | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def train(self, input_vects):\n \n #Training iterations\n for iter_no in range(self._n_iterations):\n #Train with each vector one by one\n if iter_no % 20 == 0:\n print(iter_no)\n for input_vect in input_vects:\n self._sess.run(self._training_op,\n feed_dict={self._vect_input: input_vect,\n self._iter_input: iter_no})\n \n #Store a centroid grid for easy retrieval later on\n centroid_grid = [[] for i in range(self._m)]\n self._weightages = list(self._sess.run(self._weightage_vects))\n self._locations = list(self._sess.run(self._location_vects))\n for i, loc in enumerate(self._locations):\n centroid_grid[loc[0]].append(self._weightages[i])\n self._centroid_grid = centroid_grid\n \n self._trained = True",
"def train(self, input_vects):\n \n #Training iterations\n for iter_no in range(self._n_iterations):\n print(iter_no)\n if (iter_no % 1==0) & (iter_no>0) :\n \n self.map_plot(iter_no)\n centroid_grid = [[] for i in range(self._m)]\n self._weightages = list(self._sess.run(self._weightage_vects))\n self._locations = list(self._sess.run(self._location_vects))\n \n for i, loc in enumerate(self._locations):\n centroid_grid[loc[0]].append(self._weightages[i])\n self._centroid_grid = centroid_grid \n \n #Train with each vector one by one\n for input_vect in input_vects:\n self._sess.run(self._training_op,\n feed_dict={self._vect_input: input_vect,\n self._iter_input: iter_no})\n print(iter_no)\n self.map_plot(iter_no) \n self._trained = True\n gif.build_gif(imgs, saveto='exoplaneta005s6 .gif')",
"def train(self, absList, modelFilename):\n pass",
"def train_loop(train_per_list, cut_off_list, C_list,\n factors, non_factors, data_path, executable_path, \n trial_factors_list=None): \n if trial_factors_list is None:\n trial_factors_list=[factors]\n sql_table = 'aggregated_ctr' #Data table\n # remove cross terms\n sql_features = list(set(sum([fs.split('*') for fs in factors], [])))\n# factors+=['campaign_id','ad_account_id','pub_account_id', \n# 'campaign_id*site', 'ad*pub_account_id']\n con_dict_dse={'host':'db.lqm.io','db':'dse',\n 'user':'dse','passwd':'dSe@lQm'}\n con_dict_mad={'host':'db.lqm.io','db':'madvertise_production',\n 'user':'readonly','passwd':'z0q909TVZj'}\n \n rtb_flag=[0,1]\n model_type=0\n has_intercept = True # bias term in LR\n tol = 0.00000001\n # NB these filenames are HARDCODED in write_sparse routines\n weights_file = 'train_ais.txt'\n train_file = 'train_svm.txt'\n test_file = 'test_svm.txt'\n probability_file = 'preds_SummModel_py.txt'\n results = []\n for train_per in train_per_list:\n test_per = ( add_hour(train_per[1], 1), add_hour(train_per[1], 3))\n # DATA RANGE IS INCLUSIVE => 00:00-02:00 = 3 HOURS\n train_df=mysql_lqm.MySQL_getdata(con_dict_dse,\n sql_table, train_per, sql_features, rtb_flag)\n train_df=mysql_lqm.add_features( train_df)\n test_df= mysql_lqm.MySQL_getdata(con_dict_dse,\n sql_table, test_per, sql_features, rtb_flag)\n test_df = mysql_lqm.add_features(test_df)\n \n sc, click_no_click_df, weights, targets \\\n = libLinear_functions.create_sparse_cat(train_df, factors, non_factors)\n\n \n for cut_off in cut_off_list:\n sparse_train_all = libLinear_functions.create_sparse(sc, cut_off, click_no_click_df)\n sparse_test_all = sc.transform(test_df)\n for trial_factors in trial_factors_list:\n trial_factors=trial_factors[:] # copy\n trial_factors.sort(key=lambda x: sc.factors.index(x))\n # libsvm expects the indices in ascending order\n print (trial_factors) \n sparse_train=sc.select_factors(sparse_train_all, trial_factors)\n sparse_test=sc.select_factors(sparse_test_all, trial_factors)\n libLinear_functions.write_sparse(sc, sparse_train, weights, targets, data_path, len(trial_factors))\n libLinear_functions.write_sparse_test(sc, sparse_test, data_path, n_columns_used= len(trial_factors))\n\n\n for C in C_list:\n model_file = \\\n '{start}_{stop}_cut_{cut_off}_C_{C:0.3}.model'.format(\n start=date_name(train_per[0]),\n stop=date_name(train_per[1]),\n cut_off=cut_off, C=C)\n fit(executable_path, data_path, train_file,\n model_file, weights_file, model_type, reg_param=C, tol=tol,\n has_intercept=has_intercept)\n \n \n pCTR = libLinear_functions.predict(executable_path, data_path, test_file,\n model_file, probability_file)\n if type(pCTR) is pd.Series:\n amounts = pd.DataFrame({\n 'no_clicks':test_df['instances' ]-test_df['clicks'],\n 'clicks':test_df['clicks']})\n mean_log_loss, weighted_log_loss = log_loss_weighted(pCTR, amounts)\n results.append([train_per[:],trial_factors[:],\n cut_off,C,amounts.clicks.sum(),amounts.no_clicks.sum(), mean_log_loss])\n results_df=pd.DataFrame(results,columns=['date','features','cutoff','C','clicks','no_clicks','lloss'])\n results_df.to_csv(data_path+'resultsX.txt',index=False, sep='|')\n # what to do if ERROR?\n return results_df, weighted_log_loss",
"def run(num_epochs, encoded_dim):\n # for patient_ in get_patient_ids():\n for patient_ in ['16']:\n print(\"Starting on index: \" + str(patient_))\n training_ae(num_epochs, encoded_dim, patient_, True)\n print(\"Completed \" + str(patient_) + \" reconstruction and encoding, saved test data to assess performance\")",
"def train(self, absList, modelFilename):\n pass",
"def compile_train(self, epochs):\n self.model.compile(\n optimizer=Adam(0.001),\n loss=SparseCategoricalCrossentropy(from_logits=True),\n metrics='accuracy'\n )\n\n self.model.fit(\n train_x,\n train_y,\n batch_size=50,\n epochs=epochs,\n validation_data=(val_x, val_y),\n verbose=1\n )",
"def train(self, absList, modelFilename):\n raise NotImplementedError(\"Need to implement train()\")",
"def execute(self):\n for csnn_config in self.__config[\"csnnConfigs\"]:\n csnn_name = csnn_config[\"modelName\"]\n\n try:\n for dataset_config in self.__config[\"datasetConfigs\"]:\n provider = getDatasetProvider(dataset_config)\n if not dataset_config[\"nameOfDataset\"] in csnn_config[\"batchSizes\"].keys():\n continue\n for i in range(0, csnn_config[\"xFoldCrossValidation\"]):\n model_dir = \"/\" + csnn_name + \"/\" + dataset_config[\"nameOfDataset\"] + \"/xFoldCrossVal\" + str(i)\n self.__logger.info(\"Starting to train: \" + model_dir, \"CsnnVisualizationExperiment:execute\")\n\n if csnn_config[\"xFoldCrossValidation\"] <= 1:\n xseed = None\n else:\n xseed = 42 + i\n\n dataset, dataset_generator = prepareDataset(provider, dataset_config, xfold_seed=xseed,\n augment_data=csnn_config[\"augmentData\"])\n\n dataset_max_div, _ = prepareDataset(provider, dataset_config, xfold_seed=xseed,\n augment_data=csnn_config[\"augmentData\"],\n normalize_data=\"maxDiv\")\n\n self.__logger.info(\"Starting to create dataset encoding with: \" + model_dir,\n \"CsnnVisualizationExperiment:execute\")\n encoding_provider, encoding = prepareEncoding(csnn_config, dataset_generator, dataset,\n dataset_config, csnn_name, self.__num_gpus,\n model_dir + \"/Csnn\", zero_mean_unit_variance=\n csnn_config[\"zeroMeanUnitVarianceEncoding\"],\n return_with_encoding=dataset_max_div)\n\n self.__logger.info(\"Finished to create dataset encoding with: \" + model_dir,\n \"CsnnVisualizationExperiment:execute\")\n\n self.__logger.info(\"Starting to create mean activities for: \" + model_dir,\n \"CsnnVisualizationExperiment:execute\")\n enc_for_label = {}\n for i in range(0, len(encoding[\"y_test\"])):\n enc_for_label.setdefault(np.argmax(encoding[\"y_test\"][i]), []).append(encoding[\"x_test\"][i])\n\n mean_ecs_for_label = []\n for key, value in sorted(enc_for_label.items()):\n mean_ec_for_label = np.mean(np.mean(np.mean(value, axis=0), axis=0), axis=0)\n grid = csnn_config[\"layers\"][-1][\"somGrid\"]\n mean_ec_for_label = np.reshape(mean_ec_for_label, [grid[0], grid[1], grid[2]])\n mean_ecs_for_label.append(mean_ec_for_label)\n\n # Dir to save and reload model.\n save_path = os.path.dirname(\n sys.modules['__main__'].__file__) + \"/experimentResults\" + model_dir\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n np.save(save_path + \"/test_mean_acts\", np.array(mean_ecs_for_label))\n self.__logger.info(\"Finised to create mean activities for: \" + model_dir,\n \"CsnnVisualizationExperiment:execute\")\n\n self.__logger.info(\"Starting to create mean sub activities for: \" + model_dir,\n \"CsnnVisualizationExperiment:execute\")\n sub_encs = []\n for enc in mean_ecs_for_label:\n sub_encs_for_label = []\n for kenc2 in mean_ecs_for_label:\n sub_enc = enc - kenc2\n sub_encs_for_label.append(sub_enc)\n sub_encs.append(sub_encs_for_label)\n\n np.save(save_path + \"/test_sub_mean_acts\", np.array(sub_encs))\n self.__logger.info(\"Finished to create mean sub activities for: \" + model_dir,\n \"CsnnVisualizationExperiment:execute\")\n\n except Exception:\n print(traceback.format_exc())",
"def create_training_examples(statement_list: List[List[str]], trees, training_spans,\n training: bool, max_span_length, num_sentence_words,\n args, pos_tags, constituents, k=1):\n print('building features...')\n\n word_frequencies = FreqDist([w.lower() for w in reuters.words()])\n # list of science tokens & science multiword expressions\n science_tokens = get_science_terms(args.data_path)\n science_expressions = get_science_terms(args.data_path, False)\n stop_words = set(stopwords.words('english'))\n\n\n\n true_examples, false_examples, examples_per_sentence = [], [], []\n span_indexes = []\n\n # loop over all statements\n for i, statement in enumerate(statement_list):\n print(i)\n\n span_index = [] #enumerating all spans for this sentence.\n pos_tags_this_statement = pos_tag(statement)\n sentence_word_frequencies = [word_frequencies.freq(token.lower()) \\\n for token in statement]\n tree = trees[i]\n false_examples_this_instance = []\n sentence_candidate_span_examples = [] # when training.\n\n # loop across different spans for given sentence\n for span in legal_spans(num_sentence_words, max_span_length): #globally legal\n if span[1] > len(statement):\n continue\n if span[0] > len(statement):\n break\n span_index.append(span)\n\n # extract scalar features for this span. [position will not be used]\n f_bias = 1\n f_length = span[1] - span[0]\n #f_begin = span[0]\n #f_end = span[1]\n #f_dist_to_end0 = len(statement) - span[0]\n #f_dist_to_end1 = len(statement) - span[1]\n\n # list of tokens of this span\n span_tokens = statement[span[0]: span[1]]\n\n # feature: span contains at least one science token\n f_science_token = bool( \\\n set(span_tokens).intersection(science_expressions))\n\n\n\n f_science_token_count = 0 # counting # of science tokens in span\n max_token_length = 0 # in this span.\n for token in span_tokens:\n f_science_token_count += int(token in science_tokens)\n max_token_length = max(max_token_length, len(token))\n\n f_max_token_length = np.log(max_token_length)\n\n # feature: relative word frequency average\n # with numerical stability/ avoiding -inf\n f_avg_word_frequency = 1e-10+np.mean(sentence_word_frequencies[span[0]: span[1]])\n f_avg_word_frequency = np.log(f_avg_word_frequency)\n\n # feature: begin with stop word?\n f_stop_word_begin = bool(span_tokens[0] in stop_words)\n\n # POS indicator (one-hot)\n f_pos = np.zeros([len(pos_tags)])\n\n # Bag-of-POS-tags for this span.\n for token, tag in pos_tags_this_statement[span[0]:span[1]]:\n f_pos[pos_tags.index(tag)] += 1.0\n\n # feature: POS indicator for span beginning\n f_pos_beginning = np.zeros([len(pos_tags)])\n f_pos_beginning[pos_tags.index(pos_tags_this_statement[span[0]][1])] = 1.0\n\n # feature: POS indicator for span end\n f_pos_end = np.zeros([len(pos_tags)])\n f_pos_end[pos_tags.index(pos_tags_this_statement[span[1]-1][1])] = 1.0\n\n # feature: POS bigram indicator\n # define extended POS tag set with additional begin and end symbols for bigrams.\n # pos_tags_bigram = pos_tags + [\"POS_BEGIN\", \"POS_END\"]\n\n # for POS bigrams.\n # pos_tags_square = [x for x in product(pos_tags_bigram, pos_tags_bigram)]\n\n # f_pos_bigram = np.zeros([len(pos_tags_square)])\n\n # obtaining the POS bigram\n # for position in range(-1, f_length):\n # boundary cases: start of span and end of span.\n # if position == -1:\n # tag1 = 'POS_BEGIN'\n # _, tag2 = pos_tags_this_statement[span[0]]\n # elif position == f_length -1:\n # _, tag1 = pos_tags_this_statement[span[0]+position]\n # tag2 = 'POS_END'\n # #normal case: inside span.\n # else:\n # _, tag1 = pos_tags_this_statement[span[0] + position]\n # _, tag2 = pos_tags_this_statement[span[0] + position + 1]\n #\n # f_pos_bigram[pos_tags_square.index( ( tag1, tag2 ) )] += 1.0\n\n # constituent tree features\n\n tree_position = tree.treeposition_spanning_leaves(span[0], span[1])\n\n # smallest subtree in constituent parse, containing this span.\n smallest_subtree = tree[tree_position[:-1]]\n constituent_tag = smallest_subtree.label()\n\n # feature: is this span a constituent parse subtree span?\n f_span_match = bool(span[1]-span[0] == len(smallest_subtree))\n\n # constituency parse label indicator\n f_span_constituent = np.zeros([len(constituents)])\n f_span_constituent[constituents.index(constituent_tag)] = 1.0\n\n # constituency parse label indicator with indication for large spans.\n f_span_constituent_big = np.zeros([len(constituents)])\n f_span_constituent_big[constituents.index(constituent_tag)] = (f_length > 2)\n\n\n # leave out position features:\n #### f_begin, f_end, f_dist_to_end0, f_dist_to_end1,\n\n #now collect all features:\n f_scalars = np.array([f_bias, f_span_match, f_length,\n f_science_token, f_avg_word_frequency,\n f_stop_word_begin,\n f_max_token_length,\n f_science_token_count])\n\n # these are all features for this span, in a np array.\n feature_vector = np.concatenate((f_scalars, f_pos, f_pos_beginning,\n f_pos_end, f_span_constituent,\n f_span_constituent_big))\n\n\n\n # provide True/False annotation in case the data is used for training.\n if training:\n if span == training_spans[i]:\n #positive example\n true_examples.append(feature_vector)\n sentence_candidate_span_examples.append((feature_vector, True))\n else:\n #negative example\n false_examples_this_instance.append(feature_vector)\n sentence_candidate_span_examples.append((feature_vector, False))\n else:\n sentence_candidate_span_examples.append(feature_vector)\n\n span_indexes.append(span_index)\n examples_per_sentence.append(sentence_candidate_span_examples)\n\n\n # select at random k negative spans as training examples. default 1:1\n if training:\n for random_index in np.random.randint(0, len(false_examples_this_instance), k):\n false_examples.append(false_examples_this_instance[random_index])\n\n print(len(true_examples), 'True span examples.')\n print(len(false_examples), 'False span examples.')\n\n # collect true and false examples [inputs]\n all_examples = np.concatenate((np.asarray(false_examples), np.asarray(true_examples)))\n\n # collect annotations for each example (True/False target outputs)\n false_span_labels = np.zeros([len(false_examples)])\n true_span_labels = np.ones([len(true_examples)])\n all_labels = np.concatenate((false_span_labels, true_span_labels))\n\n return all_examples, all_labels, examples_per_sentence, span_indexes",
"def train(self):\n acc_time = []\n data_test = self.val_data[0][0][0]\n labels_test = self.val_data[0][0][1]\n for i, train_batch in enumerate(self.dataset):\n \n writerDIM = SummaryWriter('runs/experiment_DIM'+str(i))\n data,labels, t = train_batch\n\n index_tr,index_cv,coreset = data_split(data.shape[0],777)\n\n # adding eventual replay patterns to the current batch\n if i == 0:\n ext_mem = [data[coreset], labels[coreset]]\n dataC = np.concatenate((data[index_tr], data[index_cv]),axis=0)\n labC = np.concatenate((labels[index_tr],labels[index_cv]),axis=0)\n else:\n dataP = ext_mem[0]\n labP = ext_mem[1]\n\n ext_mem = [\n np.concatenate((data[coreset], ext_mem[0])),\n np.concatenate((labels[coreset], ext_mem[1]))]\n if self.replay:\n dataC = np.concatenate((data[index_tr], data[index_cv],dataP),axis=0)\n labC = np.concatenate((labels[index_tr],labels[index_cv],labP),axis=0)\n else:\n dataC = np.concatenate((data[index_tr], data[index_cv]),axis=0)\n labC = np.concatenate((labels[index_tr],labels[index_cv]),axis=0)\n\n\n\n print(\"----------- batch {0} -------------\".format(i))\n print(\"Task Label: \", t)\n trC,cvC = data_split_Tr_CV(dataC.shape[0],777)\n\n train_set = LoadDataset(dataC,labC,transform=self.tr,indices=trC)\n val_set = LoadDataset(dataC,labC,transform=self.tr,indices=cvC)\n print('Training set: {0} \\nValidation Set {1}'.format(train_set.__len__(),val_set.__len__()))\n batch_size=32\n train_loader = DataLoader(train_set, batch_size=batch_size, shuffle=True)\n valid_loader = DataLoader(val_set, batch_size=batch_size, shuffle=False)\n dataloaders = {'train':train_loader,'val':valid_loader}\n \n ####### Set hyperparameters for the training\n if i ==0: \n prior = False\n ep=40\n dim_model = DIM_model(batch_s=32,num_classes =128,feature=True) \n dim_model.to(self.device)\n classifierM = _classifier(n_input=128,n_class=50,n_neurons=[256,256,128])\n classifierM = classifierM.to(self.device)\n writer = SummaryWriter('runs/experiment_C'+str(i))\n lr_new = 0.00001\n lrC=0.0001\n \n else:\n prior = True\n ep=6\n \n lr_new =0.000005\n lrC = 0.00005\n\n optimizer = torch.optim.Adam(dim_model.parameters(),lr=lr_new)\n scheduler = lr_scheduler.StepLR(optimizer,step_size=40,gamma=0.1) #there is also MultiStepLR\n\n tr_dict_enc = {'ep':ep,'writer':writerDIM,'best_loss':1e10,'t_board':True,\n 'gamma':.5,'beta':.5,'Prior_Flag':prior,'discriminator':classifierM} \n tr_dict_cl = {'ep':40,'writer':writer,'best_loss':1e10,'t_board':True,'gamma':1}#40\n\n if i==0 and self.load:\n print('Load DIM model weights first step')\n dim_model.load_state_dict(torch.load(self.path + 'weights/weightsDIM_T0.pt'))\n else:\n ############################## Train Encoder########################################\n dim_model,self.stats = trainEnc_MI(self.stats,dim_model, optimizer, scheduler,dataloaders,self.device,tr_dict_enc)\n ####################################################################################\n if i==0:\n torch.save(dim_model.state_dict(), self.path + 'weights/weightsDIM_T'+str(i)+'.pt')\n\n ####\n #Conversion of image into latent space representation for classifier training\n ####\n dim_model.requires_grad_(False)\n for phase in ['train','val']:\n dataF = None\n labF = None\n for inputs, labels in dataloaders[phase]:\n torch.cuda.empty_cache()\n if len(inputs.shape)==5:\n\n inputs = inputs[:,:,:,:,0].to(self.device)\n else:\n inputs = inputs.to(self.device)\n\n _,_,pred = dim_model(inputs)\n pred_l = pred.data.cpu().numpy()\n if dataF is None:\n dataF = pred_l\n labF = labels.data.cpu().numpy()\n else:\n dataF = np.concatenate((dataF,pred_l),axis=0)\n labF = np.concatenate((labF,labels.data.cpu().numpy()),axis=0)\n\n if phase == 'train':\n dataTr_f = dataF\n labTr_f = labF\n else:\n dataCv_f = dataF\n labCv_f = labF\n \n dim_model.requires_grad_(True)\n train_set = LoadFeat(dataTr_f,labTr_f)\n val_set = LoadFeat(dataCv_f,labCv_f)\n batch_size=32\n\n train_loader = DataLoader(train_set, batch_size=batch_size, shuffle=True)\n valid_loader = DataLoader(val_set, batch_size=batch_size, shuffle=False)\n dataloaderC = {'train':train_loader,'val':valid_loader}\n\n optimizerC = torch.optim.Adam(classifierM.parameters(),lr=lrC)\n schedulerC = lr_scheduler.StepLR(optimizerC,step_size=40,gamma=0.1)\n classifierM.requires_grad_(True)\n\n ############################## Train Classifier ########################################\n classifierM,self.stats = train_classifier(self.stats,classifierM, optimizerC, schedulerC,dataloaderC,self.device,tr_dict_cl) \n #################################### #################################### ##############\n\n torch.save(classifierM.state_dict(), self.path + 'weights/weightsC_T'+str(i)+'.pt')\n dim_model.eval()\n classifierM.eval()\n #### Cross_val Testing\n \n test_set = LoadDataset(data_test,labels_test,transform=self.trT)\n batch_size=32\n test_loader = DataLoader(test_set, batch_size=batch_size, shuffle=False)\n score= []\n\n for inputs, labels in test_loader:\n torch.cuda.empty_cache()\n inputs = inputs.to(self.device)\n labels = labels.to(self.device) \n _,_,ww =dim_model(inputs)\n pred = classifierM(ww)\n pred_l = pred.data.cpu().numpy()\n score.append(np.sum(np.argmax(pred_l,axis=1)==labels.data.cpu().numpy())/pred_l.shape[0])\n print('TEST PERFORMANCES:', np.asarray(score).mean())\n acc_time.append(np.asarray(score).mean())\n del test_set,test_loader\n \n self.dim_model = dim_model\n self.classifierM = classifierM\n acc_time = np.asarray(acc_time)\n return self.stats,acc_time",
"def train_candidates(nb_layers = 4, nb_neurons = 300, p_drop = 0.15, nb_exp = 50):\n\n print(\"Calculation will be performed on {}\".format(device))\n \n # custom data loader, automatically sent to device\n ds = imelt.data_loader()\n\n for i in range(nb_exp):\n\n print(\"Training model {}\".format(i))\n print(\"...\\n\")\n name = \"./model/candidates/l\"+str(nb_layers)+\"_n\"+str(nb_neurons)+\"_p\"+str(p_drop)+\"_m\"+str(i)+\".pth\"\n\n # declaring model\n neuralmodel = imelt.model(ds.x_visco_train.shape[1],nb_neurons,nb_layers,ds.nb_channels_raman,\n activation_function = torch.nn.GELU(), p_drop=p_drop\n )\n\n # criterion for match\n criterion = torch.nn.MSELoss(reduction='mean')\n criterion.to(device) # sending criterion on device\n\n # we initialize the output bias and send the neural net on device\n neuralmodel.output_bias_init()\n neuralmodel = neuralmodel.float()\n neuralmodel.to(device)\n\n optimizer = torch.optim.Adam(neuralmodel.parameters(), lr = 0.0003, weight_decay=0.00) # optimizer\n neuralmodel, record_train_loss, record_valid_loss = imelt.training(neuralmodel, ds, criterion, optimizer, \n save_switch=True, save_name=name, nb_folds=1, train_patience=400, min_delta=0.05, verbose=True)",
"def main(params):\n params = run_train.prepare_experiment_folder(params, FOLDER_EXPERIMENT)\n\n # run_train.check_pathes_patterns(paths)\n tl_expt.set_experiment_logger(params['path_expt'])\n logging.info('COMPUTER: \\n%r', platform.uname())\n logging.info(tl_expt.string_dict(params, desc='PARAMETERS'))\n\n tl_expt.create_subfolders(params['path_expt'], LIST_SUBFOLDER)\n\n path_csv = os.path.join(params['path_expt'], NAME_CSV_TRIPLES)\n df_paths = get_csv_triplets(\n params['path_list'], path_csv, params['path_images'], params['path_segms'], force_reload=FORCE_RERUN\n )\n\n dict_classif = seg_clf.load_classifier(params['path_classif'])\n params_clf = dict_classif['params']\n params_clf.update(params)\n logging.info(tl_expt.string_dict(params, desc='UPDATED PARAMETERS'))\n\n # perform on new images\n df_stat = pd.DataFrame()\n _wrapper_detection = partial(\n load_compute_detect_centers,\n params=params_clf,\n path_classif=params['path_classif'],\n path_output=params['path_expt'],\n )\n iterate = tl_expt.WrapExecuteSequence(_wrapper_detection, df_paths.iterrows(), nb_workers=params['nb_workers'])\n for dict_center in iterate:\n df_stat = df_stat.append(dict_center, ignore_index=True)\n df_stat.to_csv(os.path.join(params['path_expt'], NAME_CSV_TRIPLES_TEMP))\n\n df_stat.set_index(['image'], inplace=True)\n df_stat.to_csv(os.path.join(params['path_expt'], NAME_CSV_TRIPLES))\n logging.info('STATISTIC: \\n %r', df_stat.describe())",
"def run_sweep(nets, critics, loss_fns, exp_name, **kwargs):\n grid = itertools.product(nets, critics, loss_fns)\n data_frames = []\n results_with_singular_values = []\n for nets_name, critic_name, loss_name in grid:\n print(\"[New experiment] encoder: {}, critic: {}, loss: {}\".format(\n nets_name, critic_name, loss_name))\n with tf.Graph().as_default():\n g1, g2 = nets[nets_name]()\n critic = critics[critic_name]()\n loss_fn = loss_fns[loss_name]\n results_per_run = []\n for n in range(NRUNS):\n try:\n print(\"{:d}th run, loss: {}\".format(n, loss_name))\n if loss_name == \"drfc\" and TFDS_NAME == \"cifar10\":\n results = train(g1, g2, critic, loss_fn, **kwargs, learning_rate=LEARNING_RATE, n_iter=n, loss_name=loss_name)\n #results = train(g1, g2, critic, loss_fn, **kwargs, learning_rate=1e-4, n_iter=n, loss_name=loss_name)\n else:\n results = train(g1, g2, critic, loss_fn, **kwargs, learning_rate=LEARNING_RATE, n_iter=n, loss_name=loss_name)\n results_per_run.append(results)\n except Exception as ex:\n print(\"Run {} failed! Error: {}\".format(n, ex))\n for i, result in enumerate(results_per_run):\n data_frames.append(convert_to_data_frame(\n result, exp_name, nets_name, critic_name, loss_name, i))\n if kwargs.get('compute_jacobian', False):\n results_with_singular_values.append((\n ResultsConfig(nets_name, critic_name, loss_name), results_per_run\n ))\n\n return {\n \"df\": pd.concat(data_frames),\n \"singular_values\": results_with_singular_values\n }",
"def train(self):\n\n # Step 1 - Obtain optimized weights for final model ------------------------------------------------------------\n\n t0 = time()\n\n # Check the training data for potential hazardous problems\n self.check_training_samples()\n\n opt_results = pd.DataFrame()\n kf_opt = StratifiedKFold(n_splits=self.kfold_cv, shuffle=True)\n rep_str, opt_str = '', ''\n\n if self.verbose:\n print('\\n\\n__ TRAINING STEP 1/2 \\_______________________________')\n print(' \\ Train with reverse %d-fold CV - %d time(s) /\\n' % (self.kfold_cv, self.n_repeat))\n\n for i_rep in range(self.n_repeat):\n\n if self.verbose:\n rep_str = '\\n_/--- Rep %d/%d' % (i_rep + 1, self.n_repeat)\n\n # Sample clf-net parameters to test\n param = [\n np.random.normal(loc=self.n_estimators,\n scale=self.n_estimators*self.param_tune_scale,\n size=self.kfold_cv),\n np.random.normal(loc=self.min_impurity_decrease,\n scale=self.min_impurity_decrease*self.param_tune_scale,\n size=self.kfold_cv),\n np.random.normal(loc=self.min_sample_leaf,\n scale=np.ceil(self.min_sample_leaf*self.param_tune_scale),\n size=self.kfold_cv),\n ]\n scores = list()\n\n for j_fold, (opt_idxs, cv_train_idxs) in enumerate(kf_opt.split(\n X=self.datas[self.train_idx].nidx_train,\n y=self.datas[self.train_idx].gen_labels(condense_labels=True))):\n\n if self.verbose:\n print(rep_str + ' - CV %d/%d ---\\_____\\n' % (j_fold + 1, self.kfold_cv))\n\n # set clf-net parameters\n self.n_estimators = param[0][j_fold]\n self.min_impurity_decrease = param[1][j_fold]\n self.min_sample_leaf = param[2][j_fold]\n self.clf_net = self.gen_rfc()\n\n # Split data\n opt_nidxs = np.array([self.datas[self.train_idx].nidx_train[i] for i in opt_idxs])\n cv_train_nidxs = np.array([self.datas[self.train_idx].nidx_train[i] for i in cv_train_idxs])\n\n # Partition train/eval nidx for reverse k-fold CV training\n _, _, opt_eval_nidxs, opt_train_nidxs = train_test_split(\n np.zeros(len(opt_nidxs)),\n opt_nidxs,\n test_size=1/(self.kfold_cv - 1),\n shuffle=True,\n stratify=self.datas[self.train_idx].gen_labels(nidxs=opt_nidxs, condense_labels=True))\n\n # Train clfs\n if self.verbose:\n print('\\n> Training base classifiers ...')\n self._train_clfs(train_nidxs=cv_train_nidxs)\n\n # Evaluate train with cv_train data\n if self.verbose:\n print('\\n> Evaluating base classifiers with cv_train partition ...')\n self.clfs_predict(nidxs_target=cv_train_nidxs, data=self.datas[self.train_idx], to_eval=True,\n eval_idx=self.train_idx)\n\n # Evaluate pre-optimization with opt_train data\n if self.verbose:\n print('\\n> Evaluating base classifiers with cv_eval partition ...')\n cv_res = self.clfs_predict(nidxs_target=opt_train_nidxs, data=self.datas[self.train_idx], to_eval=True,\n nidxs_train=cv_train_nidxs, eval_idx=self.train_idx)\n\n # Train clf-opt with opt_train partition results\n if self.verbose:\n print('\\n> Training clf-opt ...')\n self._train_clf_opt(predictions=cv_res)\n\n # Evaluate clf-opt with opt_eval partition\n if self.verbose:\n print('\\n> Evaluating optimized classifier with opt_test partition ...')\n opt_res = self.clfs_predict(nidxs_target=opt_eval_nidxs, data=self.datas[self.train_idx], to_eval=True,\n nidxs_train=cv_train_nidxs, eval_idx=self.train_idx)\n opt_results = opt_results.append(opt_res, ignore_index=True)\n\n # Append score to optimize clf-net parameter\n r = self.scores(opt_res['ytruth'], opt_res['ynet'])\n if not self.aim:\n scores.append(r['aucroc'])\n else:\n aim = self.aim.replace('hard', '')\n scores.append(r[aim])\n\n # reset link2featidx\n self.datas[self.train_idx].link2featidx = {}\n\n # Aggregate results from clf-net parameter search\n self._set_clf_net_param(param, scores)\n\n # STEP 2 - Train final model -----------------------------------------------------------------------------------\n # .clf_opt is already trained through previous iterations by using warm_start\n\n if self.verbose:\n print('\\n__ TRAINING STEP 2/2 \\_______________________________')\n print(' \\ Train final model with all train data /\\n')\n\n # Train clfs with all the data\n self._train_clfs()\n\n # Evaluate final clf-opt with all data\n print('\\n> Evaluating final classifier ...')\n self.clfs_predict(nidxs_target=self.datas[self.train_idx].nidx_train, to_eval=True, eval_idx=self.train_idx)\n print('** Because this is evaluating with the training data, classifier performances should be very high.')\n\n # Assign model ID - this is here so that if retrained, it would be known that it is not the same model anymore\n self.id = 'm_%s' % gen_id()\n\n if self.verbose:\n te = (time() - t0) / 60\n print('\\n Training took %.1f minutes on %d processors' % (te, os.cpu_count()))\n print('\\n__ __________')\n print(' \\ Training complete! /\\n')\n\n return opt_results",
"def train(self, absList, modelfilename):\n raise NotImplementedError(\"Need to implement train()\")",
"def train_all(X_train_fuse, Y_train, X_dev_fuse, Y_dev, R_train, R_dev, hyperparams):",
"def main(args, base_dir):\n for i in range(args.n_training):\n # value of the next seed\n seed = args.seed + i\n\n # The time when the current experiment started.\n now = strftime(\"%Y-%m-%d-%H:%M:%S\")\n\n # Create a save directory folder (if it doesn't exist).\n if args.log_dir is not None:\n dir_name = args.log_dir\n else:\n dir_name = os.path.join(base_dir, '{}/{}'.format(\n args.env_name, now))\n ensure_dir(dir_name)\n\n # Get the policy class.\n if args.alg == \"TD3\":\n from hbaselines.multiagent.td3 import MultiFeedForwardPolicy\n elif args.alg == \"SAC\":\n from hbaselines.multiagent.sac import MultiFeedForwardPolicy\n elif args.alg == \"PPO\":\n from hbaselines.multiagent.ppo import MultiFeedForwardPolicy\n elif args.alg == \"TRPO\":\n from hbaselines.multiagent.trpo import MultiFeedForwardPolicy\n else:\n raise ValueError(\"Unknown algorithm: {}\".format(args.alg))\n\n # Get the hyperparameters.\n hp = get_hyperparameters(args, MultiFeedForwardPolicy)\n\n # add the seed for logging purposes\n params_with_extra = hp.copy()\n params_with_extra['seed'] = seed\n params_with_extra['env_name'] = args.env_name\n params_with_extra['policy_name'] = \"MultiFeedForwardPolicy\"\n params_with_extra['algorithm'] = args.alg\n params_with_extra['date/time'] = now\n\n # Add the hyperparameters to the folder.\n with open(os.path.join(dir_name, 'hyperparameters.json'), 'w') as f:\n json.dump(params_with_extra, f, sort_keys=True, indent=4)\n\n run_exp(\n env=args.env_name,\n policy=MultiFeedForwardPolicy,\n hp=hp,\n dir_name=dir_name,\n evaluate=args.evaluate,\n seed=seed,\n eval_interval=args.eval_interval,\n log_interval=args.log_interval,\n save_interval=args.save_interval,\n initial_exploration_steps=args.initial_exploration_steps,\n ckpt_path=args.ckpt_path,\n )",
"def experiment(**config):\n from ..training.train import training\n \n training(config)",
"def train_classifiers(params):\n # Create result dataframe\n out = pd.DataFrame(\n columns=[\"Dataset\", \"Classifier\", \"Accuracy\", \"F1\", \"Precision\", \"Recall\"])\n\n for model_type, all_languages in params.items():\n print(\"Classifier: \", str(model_type))\n\n for language, all_targets in all_languages.items():\n print(language)\n for target, model_params in all_targets.items():\n print(target)\n print(model_params)\n\n datasets = sample_datasets(\n language, target, SAMPLING, TFIDF, model_params['top_k_words'], SUB_SAMPLE_RERUNS)\n\n # Iterate the datasets\n for data_id, dataset in enumerate(datasets):\n dataset_name = dataset[0]\n data = dataset[1]\n y = np.array(dataset[2])\n val_data = dataset[3]\n val_y = np.array(dataset[4])\n\n acc_scores = []\n pre_scores = []\n rec_scores = []\n f1_scores = []\n \n global X_train\n X_train, X_test = data, val_data\n y_train, y_test = y, val_y\n y_pred = None\n\n # Create model instance.\n model = mlp_model(layers=model_params['hidden_layers'], units=model_params['hidden_units'], dropout_rate=model_params['dropout_rate'],\n input_shape=X_train.shape[1:], num_classes=2)\n optimizer = tf.keras.optimizers.Adam(\n lr=model_params['learning_rate'])\n model.compile(optimizer=optimizer,\n loss='binary_crossentropy', metrics=['acc'])\n\n # Stop training is validation loss doesnt decrease for 3 steps\n callbacks = [tf.keras.callbacks.EarlyStopping(\n monitor='val_loss', patience=3)]\n\n # Train and validate model.\n history = model.fit(\n X_train,\n y_train,\n epochs=model_params['epochs'],\n callbacks=callbacks,\n validation_data=(X_test, y_test),\n verbose=0,\n batch_size=512)\n\n acc_scores.append(\n history.history['val_acc'][-1])\n y_pred = [round(a[0])\n for a in model.predict(X_test)]\n\n # Compute the results\n prfs = precision_recall_fscore_support(\n y_test, y_pred, warn_for=[])\n\n pre_scores.append(prfs[0].mean())\n rec_scores.append(prfs[1].mean())\n f1_scores.append(prfs[2].mean())\n\n # Append average scores\n clf_acc = np.array(acc_scores).mean()\n clf_pre = np.array(pre_scores).mean()\n clf_rec = np.array(rec_scores).mean()\n clf_f1 = np.array(f1_scores).mean()\n\n out = out.append(pd.DataFrame(\n [[dataset_name, model_type, clf_acc, clf_f1, clf_pre, clf_rec]], columns=out.columns), ignore_index=True)\n\n return out",
"def train_model(self, X, y, nlist=None, n_jobs=None):\n self._X = X\n self._y = y\n if nlist is None:\n nlist = self.nlist\n else:\n self.nlist = nlist\n # Result dataframes\n self._df_train = pd.DataFrame()\n self._df_test = pd.DataFrame()\n with tqdm(total=len(self.simulations)*len(nlist)) as pb:\n for seed in self.simulations:\n ds = train_test_split(X, y, test_size=self.test_size,\n random_state=seed)\n pb.set_description(f'Iter: {seed + 1}')\n X_train = ds[0]\n X_test = ds[1]\n y_train = ds[2]\n y_test = ds[3]\n accuracy_train = []\n accuracy_test = []\n for n in nlist:\n # Create KNN classifier for each n\n cls = KNeighborsClassifier(n,\n n_jobs=-1\n ).fit(X_train, y_train)\n # Store result of each n\n accuracy_train.append(cls.score(X_train, y_train))\n accuracy_test.append(cls.score(X_test, y_test))\n pb.update(1)\n\n # Store result for each simulation\n self._df_train[seed] = accuracy_train\n self._df_test[seed] = accuracy_test\n self._df_train.index = nlist\n self._df_test.index = nlist\n return self._df_train, self._df_test",
"def train():\n ### DO NOT CHANGE SEEDS!\n # Set the random seeds for reproducibility\n np.random.seed(42)\n\n ## Prepare all functions\n # Get number of units in each hidden layer specified in the string such as 100,100\n if FLAGS.dnn_hidden_units:\n dnn_hidden_units = FLAGS.dnn_hidden_units.split(\",\")\n dnn_hidden_units = [int(dnn_hidden_unit_) for dnn_hidden_unit_ in dnn_hidden_units]\n else:\n dnn_hidden_units = []\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n\n model = MLP(n_hidden=dnn_hidden_units,n_classes=10,batch_size=FLAGS.batch_size, input_dim=32*32*3, \n weight_decay=FLAGS.weight_reg_strength, weight_scale=FLAGS.weight_init_scale)\n\n Datasets = utils.get_cifar10(data_dir = DATA_DIR_DEFAULT, one_hot = True, validation_size = 0)\n \n for i in range(1500): #(FLAGS.max_steps):\n train_batch = Datasets.train.next_batch(batch_size = FLAGS.batch_size)\n #Get the model output\n logits = model.inference(x=train_batch[0].reshape([FLAGS.batch_size,32*32*3]))\n #Get the loss and let the model set the loss derivative.\n loss = model.loss(logits=logits, labels=train_batch[1])\n #Perform training step\n model.train_step(loss=loss, flags=FLAGS)\n\n #Every 100th iteratin print accuracy on the whole test set.\n if i % 100 == 0:\n # for layer in model.layers:\n test_batch = Datasets.test.next_batch(batch_size = 200) #Datasets.test.num_examples\n logits = model.inference(x=test_batch[0].reshape([200,32*32*3]))\n print('-- Step: ', i, \" accuracy: \",model.accuracy(logits=logits,labels=test_batch[1]),'loss', loss )\n\n ########################\n # END OF YOUR CODE #\n #######################",
"def _run(self, index_list: List[np.ndarray]) -> Iterator[XData]:\n da_it = task_list(index_list, IdReader(), self.worker, self.nworkers)\n xdata_it = (dataarrays_to_xdata(d, self.meta) for d in da_it)\n return xdata_it",
"def train_all_individual_models(dropout=0.998, hidden_layers=27, verbosity=2):\n for i, unit in enumerate(UNITS):\n print(\"Training the model for {} ({}/{})\".format(unit, i+1, len(UNITS)))\n train_model(load_enrolment_matrix(unit, from_pickle=True), dropout, hidden_layers, verbosity, save=unit)",
"def training(self) -> None:\n self.compile_model()\n self.train_epoch()\n self.agent.save()",
"def eval_list(self, exps: ExperimentList):\n for e in exps:\n self.eval(e)",
"def train_build(df):\n print(\"Constructing training set...\")\n recent_labels = pr.labels.get_last_keypresses() #List of strings\n labeled_df = pr.labels.apply_labels_all(df, recent_labels)\n X, y = pr.build_model.make_training_set(labeled_df)\n\n return X, y",
"def train(self, trainFilenames):\n\n\t\tstartIndex = len(self.documents)\n\t\tendIndex = startIndex + len(trainFilenames)\n\t\tself.documents += trainFilenames\n\n\t\tX = [[i] for i in range(startIndex, endIndex)]\n\t\tY = [isAroused(f) for f in trainFilenames]\n\n\t\tself.knn.fit(np.array(X), np.array(Y))",
"def run_experiments() :\n #%%\n target_size=(32,32)\n g_specs = {\n \"batch_size\" : [ 30 , 60, 100 ],\n \"learning_rate\" : [ 0.0002, 0.0003, 0.0005 ],\n \"drop_out_rate\" : [ 0.2, 0.25, 0.3 ],\n \"rescale_mode\" : [ \"max_q\" , \"max\", \"\" ]\n }\n\n model_traits = MODEL_TRAITS[\"model2\"].copy()\n tt_obj = model_traits[\"trainer_tester_class\"]( model_traits )\n del model_traits[\"trainer_tester_class\"]\n\n cnt = 0\n for batchs, lrate, do_rate, resc_mode in product( g_specs[\"batch_size\"],\n g_specs[\"learning_rate\"],\n g_specs[\"drop_out_rate\"],\n g_specs[\"rescale_mode\"] ) :\n\n tt_obj.model_traits.update( {\"batch_size\" : batchs,\n \"learning_rate\" : lrate,\n \"rescale_mode\" : resc_mode,\n \"drop_out_rate\" : do_rate } )\n\n train_4d, train_gt = tu.make_4d_arrays( images_dir=\"images/train\",\n target_size=target_size )\n\n test_4d, test_gt = tu.make_4d_arrays( images_dir=\"images/test\",\n target_size=target_size )\n\n data = {\"train_4d\" : train_4d,\n \"test_4d\" : test_4d,\n \"train_y\" : train_gt,\n \"test_y\" : test_gt}\n\n valid_accu_log, train_accu_log = tt_obj.train( model_traits, data,\n logl=100 )\n idx_v = int(np.argmax( valid_accu_log))\n idx_t = int(np.argmax( train_accu_log))\n\n model_traits.update({\"valid_accu_log\" : valid_accu_log,\n \"train_accu_log\" : train_accu_log,\n \"best_valid\" : max(valid_accu_log),\n \"best_valid_at\" : idx_v,\n \"train_at_best_valid\" : train_accu_log[idx_v],\n \"best_train\" : max(train_accu_log),\n \"best_train_at\": idx_t })\n\n #print(cnt, pformat(model_traits) )\n print( \"%d : best_train = %.4f, best_valid = %.4f\" % \\\n (cnt, max(train_accu_log), max(valid_accu_log) ))\n\n with open( \"exp_results_%d.json\" % cnt,\n \"wt\" , encoding=\"utf8\" ) as f_out :\n print( json.dumps( model_traits ), file=f_out)\n\n\n cnt += 1\n #%%",
"def train_and_evaluate(config, workdir):\n if config.dataset.batch_size % jax.device_count() != 0:\n raise ValueError(\"Batch size must be divisible by the number of devices.\")\n\n file_utils.makedirs(workdir)\n # Deterministic training.\n rng = jax.random.PRNGKey(config.seed)\n # Shift the numpy random seed by process_index() to shuffle data loaded\n # by different hosts\n np.random.seed(20201473 + jax.process_index())\n\n # ----------------------------------------------------------------------------\n # Build input pipeline.\n # ----------------------------------------------------------------------------\n rng, data_rng = jax.random.split(rng)\n data_rng = jax.random.fold_in(data_rng, jax.process_index())\n\n train_iter, _, _ = datasets.create_datasets(config)\n example_batch = train_iter.peek()\n train_iter = flax.jax_utils.prefetch_to_device(train_iter, 6)\n\n config.dataset.image_height = example_batch[\"rgb\"].shape[-3]\n config.dataset.image_width = example_batch[\"rgb\"].shape[-2]\n\n # ----------------------------------------------------------------------------\n # Learning rate schedule.\n num_train_steps = config.train.max_steps\n if num_train_steps == -1:\n raise ValueError\n\n steps_per_epoch = num_train_steps // config.train.num_epochs\n logging.info(\n \"num_train_steps=%d, steps_per_epoch=%d\", num_train_steps, steps_per_epoch\n )\n\n learning_rate_fn = train_utils.create_learning_rate_fn(config)\n\n # ----------------------------------------------------------------------------\n # Initialize model.\n rng, model_rng = jax.random.split(rng)\n model, state, metric_collector = models.create_train_state(\n config,\n model_rng,\n learning_rate_fn=learning_rate_fn,\n example_batch=example_batch,\n )\n\n # ----------------------------------------------------------------------------\n # Set up checkpointing of the model and the input pipeline.\n\n # check if the job was stopped and relaunced\n state = checkpoints.restore_checkpoint(workdir, state)\n\n initial_step = int(state.step) + 1\n if config.dev_run:\n jnp.set_printoptions(precision=2)\n np.set_printoptions(precision=2)\n\n # ----------------------------------------------------------------------------\n # Get the multiplier dictionary.\n alpha_dict = dict(config.loss)\n alpha_fn_dict = {}\n for key, value in alpha_dict.items():\n if key in [\n \"fg_mask_alpha\",\n \"mask_layer_alpha\",\n \"shadow_smooth_alpha\",\n ]:\n alpha_fn_dict[key] = schedule_utils.cons_then_decay(\n value, config.train.switch_steps, config.train.max_steps\n )\n elif key in [\n \"disp_layer_alpha\",\n \"mask_l0_alpha\",\n \"disp_smooth_alpha\",\n ]: # \"disp_layer_alpha\"]:\n alpha_fn_dict[key] = schedule_utils.cons(value)\n else:\n alpha_fn_dict[key] = schedule_utils.warmup_then_cons(\n value, config.train.switch_steps\n )\n\n # Distribute training.\n state = flax_utils.replicate(state)\n p_train_step = jax.pmap(\n functools.partial(\n train_step,\n model=model,\n learning_rate_fn=learning_rate_fn,\n alpha_fn_dict=alpha_fn_dict,\n weight_decay=config.train.weight_decay,\n metric_collector=metric_collector,\n ),\n axis_name=\"batch\",\n )\n\n # ----------------------------------------------------------------------------\n # Prepare Metric Writers\n writer = metric_writers.create_default_writer(\n workdir, just_logging=jax.process_index() > 0\n )\n if initial_step == 1:\n writer.write_hparams(dict(config))\n\n logging.info(\"Starting training loop at step %d.\", initial_step)\n hooks = []\n report_progress = periodic_actions.ReportProgress(\n num_train_steps=num_train_steps, writer=writer\n )\n if jax.process_index() == 0:\n hooks += [\n report_progress,\n ]\n train_metrics = None\n\n n_local_devices = jax.local_device_count()\n rng = rng + jax.process_index() # Make random seed separate across hosts.\n keys = jax.random.split(rng, n_local_devices) # For pmapping RNG keys.\n\n with metric_writers.ensure_flushes(writer):\n for step in range(initial_step, num_train_steps + 1):\n # `step` is a Python integer. `state.step` is JAX integer on the GPU/TPU\n # devices.\n is_last_step = step == num_train_steps\n with jax.profiler.StepTraceAnnotation(\"train\", step_num=step):\n batch = jax.tree_map(np.asarray, next(train_iter))\n state, metrics_update, keys = p_train_step(\n rng=keys, state=state, batch=batch\n )\n metric_update = flax_utils.unreplicate(metrics_update)\n train_metrics = (\n metric_update\n if train_metrics is None\n else train_metrics.merge(metric_update)\n )\n # Quick indication that training is happening.\n logging.log_first_n(logging.INFO, \"Finished training step %d.\", 5, step)\n for h in hooks:\n h(step)\n if step % config.train.log_loss_every_steps == 0 or is_last_step:\n writer.write_scalars(step, train_metrics.compute())\n train_metrics = None\n\n if (jax.process_index() == 0) and (\n step % config.train.checkpoint_every_steps == 0 or is_last_step\n ):\n with report_progress.timed(\"checkpoint\"):\n state_to_save = jax.device_get(jax.tree_map(lambda x: x[0], state))\n checkpoints.save_checkpoint(workdir, state_to_save, step, keep=100)\n\n logging.info(\"Finishing training at step %d\", num_train_steps)"
] | [
"0.5979961",
"0.58931327",
"0.5842986",
"0.5816581",
"0.5816086",
"0.5768412",
"0.575343",
"0.5698102",
"0.5657766",
"0.5611364",
"0.55773675",
"0.5553645",
"0.5535842",
"0.55307597",
"0.5524448",
"0.5512024",
"0.5494072",
"0.5475803",
"0.54679567",
"0.5410617",
"0.5362356",
"0.53378415",
"0.53329515",
"0.5330724",
"0.5319719",
"0.53126925",
"0.530407",
"0.5302737",
"0.5301527",
"0.5301406"
] | 0.64535296 | 0 |
Lowers a list of TealBlocks into a list of TealComponents. | def flattenBlocks(blocks: List[TealBlock]) -> List[TealComponent]:
codeblocks = []
references: DefaultDict[int, int] = defaultdict(int)
indexToLabel = lambda index: "l{}".format(index)
for i, block in enumerate(blocks):
code = list(block.ops)
codeblocks.append(code)
if block.isTerminal():
continue
if type(block) is TealSimpleBlock:
simpleBlock = cast(TealSimpleBlock, block)
assert simpleBlock.nextBlock is not None
nextIndex = blocks.index(simpleBlock.nextBlock, i+1)
if nextIndex != i + 1:
references[nextIndex] += 1
code.append(TealOp(None, Op.b, indexToLabel(nextIndex)))
elif type(block) is TealConditionalBlock:
conditionalBlock = cast(TealConditionalBlock, block)
assert conditionalBlock.trueBlock is not None
assert conditionalBlock.falseBlock is not None
trueIndex = blocks.index(conditionalBlock.trueBlock, i+1)
falseIndex = blocks.index(conditionalBlock.falseBlock, i+1)
if falseIndex == i + 1:
references[trueIndex] += 1
code.append(TealOp(None, Op.bnz, indexToLabel(trueIndex)))
continue
if trueIndex == i + 1:
references[falseIndex] += 1
code.append(TealOp(None, Op.bz, indexToLabel(falseIndex)))
continue
references[trueIndex] += 1
code.append(TealOp(None, Op.bnz, indexToLabel(trueIndex)))
references[falseIndex] += 1
code.append(TealOp(None, Op.b, indexToLabel(falseIndex)))
else:
raise TealInternalError("Unrecognized block type: {}".format(type(block)))
teal: List[TealComponent] = []
for i, code in enumerate(codeblocks):
if references[i] != 0:
teal.append(TealLabel(None, indexToLabel(i)))
teal += code
return teal | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def block(self, block: tuple) -> list:\n b = []\n for j in range(3):\n index = block * 3 + j * 9 + 18 * (block // 3)\n for val in self.grid[index:index+3]:\n b.append(val)\n return b",
"def get_blocks(self) -> list:\n self.clingo = ClingoBridge() # reset clingo\n\n base = ('base', '')\n self.clingo.add_file('initial-states.lp')\n self.clingo.run([base], n=1)\n output = self.clingo.output[0]\n\n blocks = []\n for atom in output:\n if atom.name == 'block':\n blocks.append(atom)\n\n return blocks",
"def mergeCalibBlocks(blocks: Iterable[Any]) -> List[Any]:\n blocks = list(blocks)\n\n newBlocks = []\n while blocks:\n mergeables = [0]\n for i in range(1, len(blocks)):\n if _mergeCalibBlocks_isMergeable(blocks[0], blocks[i]):\n mergeables.append(i)\n newBlocks.append(_mergeCalibBlocks_merge(blocks[i] for i in mergeables))\n for i in reversed(mergeables):\n del blocks[i]\n\n return newBlocks",
"def get_party_blocks_list():\n return get_pro_independence_parties(), [\"C's\", 'PSC', 'CatSíqueesPot', 'PP']",
"def component_lists_to_blocks(components_dict, *args, **kwargs):\n\n # track block index\n index = 0\n ancestries = defaultdict(list)\n blocks = list()\n for k, cs in components_dict.items():\n for vs in cs:\n # check for parent, and add current block to node ancestry list\n # should be impossible for a block to have multiple parents,\n # i.e. all nodes in block should have same parent ancestor\n parent=None\n for v in vs:\n if parent is None and ancestries[v]:\n parent = ancestries[v][-1]\n ancestries[v].append(index)\n\n blk = Block(idx=index, parent=parent, vertices=vs, klevel=k, kconn=k)\n blocks.append(blk)\n index += 1\n \n return (blocks)",
"def _makeBlockPinPatches(block, cold):\n patches = []\n data = []\n names = []\n if isinstance(block.spatialGrid, grids.HexGrid):\n largestPitch, comp = block.getPitch(returnComp=True)\n\n elif isinstance(block.spatialGrid, grids.ThetaRZGrid):\n raise TypeError(\n \"This plot function is not currently supported for ThetaRZGrid grids.\"\n )\n else:\n largestPitch, comp = block.getPitch(returnComp=True)\n if block.getPitch()[0] != block.getPitch()[1]:\n raise ValueError(\"Only works for blocks with equal length and width.\")\n\n sortedComps = sorted(block, reverse=True)\n\n derivedComponents = block.getComponentsOfShape(DerivedShape)\n if len(derivedComponents) == 1:\n derivedComponent = derivedComponents[0]\n sortedComps.remove(derivedComponent)\n cName = derivedComponent.name\n\n if isinstance(derivedComponent.material, custom.Custom):\n material = derivedComponent.p.customIsotopicsName\n else:\n material = derivedComponent.material.name\n\n location = comp.spatialLocator\n if isinstance(location, grids.MultiIndexLocation):\n location = location[0]\n x, y, _ = location.getLocalCoordinates()\n if isinstance(comp, Hexagon):\n derivedPatch = matplotlib.patches.RegularPolygon(\n (x, y), 6, largestPitch / math.sqrt(3)\n )\n elif isinstance(comp, Square):\n derivedPatch = matplotlib.patches.Rectangle(\n (x - largestPitch[0] / 2, y - largestPitch[0] / 2),\n largestPitch[0],\n largestPitch[0],\n )\n else:\n raise TypeError(\n \"Shape of the pitch-defining element is not a Square or Hex it is {}, cannot plot for this type of block\".format(\n comp.shape\n )\n )\n patches.append(derivedPatch)\n data.append(material)\n names.append(cName)\n for component in sortedComps:\n locs = component.spatialLocator\n if not isinstance(locs, grids.MultiIndexLocation):\n # make a single location a list to iterate.\n locs = [locs]\n for loc in locs:\n x, y, _ = loc.getLocalCoordinates()\n\n # goes through each location\n # want to place a patch at that location\n blockPatches = _makeComponentPatch(component, (x, y), cold)\n for element in blockPatches:\n patches.append(element)\n\n if isinstance(component.material, custom.Custom):\n material = component.p.customIsotopicsName\n else:\n material = component.material.name\n\n data.append(material)\n names.append(component.name)\n\n return patches, data, names",
"def mk_lst_trans_met(self):\n\t\telem_rnge_I = [[21,30],[39,44],[46,48],[74,76],[78,80]]\n\t\telem_rnge=[]\n\t\tfor i in elem_rnge_I:\n\t\t\tel_strt=i[0]\n\t\t\tel_end=i[1]\n\t\t\trnge_sect=range(el_strt,el_end+1)\n\t\t\telem_rnge.extend(rnge_sect)\n\t\telements=[]\n\t\tfor i in elem_rnge:\n\t\t\telement=Element.from_Z(i)\t# Indice -> pymatgen element object\n\t\t\telements.append(element)\n\t\treturn elements",
"def _split_block(block: PruningBlock, list_output_channels: List[int]) -> List[PruningBlock]:\n if len(list_output_channels) == 1:\n raise RuntimeError\n\n dot_product = reduce((lambda x, y: x * y), list_output_channels)\n\n current_size = dot_product\n new_blocks = []\n divided_shapes = filter(lambda x: x != 1, list_output_channels)\n for divided_shape in divided_shapes:\n offset = int(current_size % dot_product)\n current_size /= divided_shape\n new_block = copy.copy(block)\n new_block.size = int(current_size)\n new_block.offset = offset\n new_blocks.append(new_block)\n return new_blocks",
"def create_source_blocks_from_list(self, list_powers, assign_material=True, default_material=\"Ceramic_material\"):\n oObjects = self.modeler.primitives.solid_names\n listmcad = []\n num_power = None\n for row in list_powers:\n if not num_power:\n num_power = len(row) - 1\n self[\"P_index\"] = 0\n if row[0] in oObjects:\n listmcad.append(row)\n if num_power > 1:\n self[row[0] + \"_P\"] = str(row[1:])\n out = self.create_source_block(row[0], row[0] + \"_P[P_index]\", assign_material, default_material)\n\n else:\n out = self.create_source_block(row[0], str(row[1]) + \"W\", assign_material, default_material)\n if out:\n listmcad.append(out)\n\n return listmcad",
"def convert_blockages(self):\n debug.info(1,\"Converting blockages.\") \n for blockage in self.blockages:\n debug.info(3,\"Converting blockage {}\".format(str(blockage)))\n blockage_list = self.convert_blockage(blockage)\n self.blocked_grids.update(blockage_list)",
"def get_block_feature(self):\n vector_str = Gumtree.gumtree.getBlockFeature()\n vector = vector_str[1:-1].split(\",\")\n vector = [int(i) for i in vector]\n return vector",
"def _serialise(block):\n out = []\n for type in ['points', 'curves', 'loops', 'surfaces']:\n if type not in block:\n continue\n for element in block[type]:\n out += [str(component) for component in block[type][element]]\n return out",
"def _flatten(block: Block) -> List[List[Tuple[int, int, int]]]:\r\n return_list = []\r\n unit = 2 ** (block.max_depth - block.level)\r\n unit_size = block.size / unit\r\n for i in range(unit):\r\n temp_list = []\r\n for j in range(unit):\r\n temp_list.append(_get_colour(block, (i, j), unit_size))\r\n return_list.append(temp_list)\r\n return return_list",
"def getModifiedBlockList(self):\n if self.binary:\n return []\n block_list = []\n for child in self.children:\n old_line_list = [line.strip() for line, color in child.getOldCodeList()\n if line is not None and color in (MODIFIED_DIFF_COLOR,\n DELETED_DIFF_COLOR)]\n new_line_list = [line.strip() for line, color in child.getNewCodeList()\n if line is not None and color in (MODIFIED_DIFF_COLOR,\n ADDITION_DIFF_COLOR)]\n if old_line_list or new_line_list:\n block_list.append((child,(old_line_list, new_line_list)))\n return block_list",
"def blocks2modules(blocks, net_info, device=\"cpu\"):\n modules = torch.nn.ModuleList()\n\n # Track number of channels (filters) in the output of each layer; this\n # is necessary to determine layer input/output shapes for various layers.\n curr_out_channels = None\n prev_layer_out_channels = net_info[\"channels\"]\n out_channels_list = []\n\n for i, block in enumerate(blocks):\n module = torch.nn.Sequential()\n\n if block[\"type\"] == \"convolutional\":\n batch_normalize = \"batch_normalize\" in block\n bias = not batch_normalize\n kernel_size = block[\"size\"]\n padding = (kernel_size - 1) // 2 if \"pad\" in block else 0\n in_channels = prev_layer_out_channels\n out_channels = block[\"filters\"]\n\n conv = torch.nn.Conv2d(\n in_channels=in_channels, out_channels=out_channels,\n kernel_size=kernel_size, stride=block[\"stride\"],\n padding=padding, bias=bias\n )\n module.add_module(\"conv_{}\".format(i), conv)\n\n if batch_normalize:\n bn = torch.nn.BatchNorm2d(num_features=out_channels)\n module.add_module(\"batch_norm_{}\".format(i), bn)\n\n if block[\"activation\"] == \"leaky\":\n acti = torch.nn.LeakyReLU(negative_slope=0.1, inplace=True)\n module.add_module(\"leaky_{}\".format(i), acti)\n elif block[\"activation\"] == \"linear\":\n # NOTE: Darknet src files call out \"linear\" vs \"relu\" but we\n # use ReLU here.\n acti = torch.nn.ReLU(inplace=True)\n\n # Update the number of current (output) channels.\n curr_out_channels = out_channels\n\n elif block[\"type\"] == \"maxpool\":\n stride = block[\"stride\"]\n maxpool = MaxPool2d(\n kernel_size=block[\"size\"], stride=stride\n )\n module.add_module(\"maxpool_{}\".format(i), maxpool)\n\n elif block[\"type\"] == \"route\":\n # Route layer concatenates outputs along channel dim; add dummy\n # layer and handle the actual logic in Darknet.forward().\n module.add_module(\"route_{}\".format(i), DummyLayer())\n\n out_channels = sum(\n out_channels_list[layer_idx] for layer_idx in block[\"layers\"]\n )\n\n curr_out_channels = out_channels\n\n elif block[\"type\"] == \"shortcut\":\n # Shortcut layer sums outputs from previous layers; add dummy\n # layer and handle the actual logic in Darknet.forward().\n module.add_module(\"shortcut_{}\".format(i), DummyLayer())\n\n if \"activation\" in block:\n if block[\"activation\"] == \"leaky\":\n acti = torch.nn.LeakyReLU(negative_slope=0.1, inplace=True)\n module.add_module(\"leaky_{}\".format(i), acti)\n elif block[\"activation\"] == \"linear\":\n acti = torch.nn.ReLU(inplace=True)\n\n assert out_channels == out_channels_list[i + block[\"from\"]]\n curr_out_channels = out_channels\n\n elif block[\"type\"] == \"upsample\":\n # NOTE: torch.nn.Upsample is deprecated in favor of Interpolate;\n # consider using this and/or other interpolation methods?\n upsample = torch.nn.Upsample(\n scale_factor=block[\"stride\"], mode=\"nearest\"\n )\n module.add_module(\"upsample_{}\".format(i), upsample)\n\n elif block[\"type\"] == \"yolo\":\n yolo = YOLOLayer(block[\"anchors\"], block[\"mask\"], device=device)\n module.add_module(\"yolo_{}\".format(i), yolo)\n\n modules.append(module)\n prev_layer_out_channels = curr_out_channels\n out_channels_list.append(curr_out_channels)\n\n return modules",
"def update_blocks(self, block_weight: BlockWeight) -> Union[Iterable[netEncapsulation], netEncapsulation, None]:\n pass",
"def update_blocks(self, block_weight: BlockWeight) -> Union[Iterable[netEncapsulation], netEncapsulation, None]:\n pass",
"def transform_basis(self, values):\n block_len = len(values)/self.base\n blocks = [values[i*block_len:(i+1)*block_len] for i in range(self.base)]\n return blocks",
"def transform_basis(self, values):\n block_len = len(values)/self.base\n blocks = [values[i*block_len:(i+1)*block_len] for i in range(self.base)]\n return blocks",
"def _flatten(block: Block) -> List[List[Tuple[int, int, int]]]:\r\n lst = []\r\n for i in range(2**(block.max_depth - block.level)):\r\n lst.append([])\r\n\r\n if len(block.children) == 0:\r\n for item in lst:\r\n for i in range(len(lst)):\r\n item.append(block.colour)\r\n else:\r\n child0 = _flatten(block.children[0])\r\n child1 = _flatten(block.children[1])\r\n child2 = _flatten(block.children[2])\r\n child3 = _flatten(block.children[3])\r\n for i in range(len(child1)):\r\n lst[i].extend(child1[i])\r\n lst[i].extend(child2[i])\r\n for j in range(len(child0)):\r\n lst[j+len(child1)].extend(child0[j])\r\n lst[j+len(child1)].extend(child3[j])\r\n\r\n return lst",
"def _set_components(self, components: List[base_node.BaseNode]) -> None:\n self._check_mutable()\n\n deduped_components = set(components)\n for upstream_component, component in enumerate_implicit_dependencies(\n list(deduped_components),\n registry=self.dsl_context_registry,\n pipeline=self,\n ):\n component.add_upstream_node(upstream_component)\n\n layers = topsort.topsorted_layers(\n list(deduped_components),\n get_node_id_fn=lambda c: c.id,\n get_parent_nodes=lambda c: c.upstream_nodes,\n get_child_nodes=lambda c: c.downstream_nodes)\n self._components = []\n for layer in layers:\n for component in layer:\n self._components.append(component)\n\n if self.beam_pipeline_args:\n for component in self._components:\n add_beam_pipeline_args_to_component(component, self.beam_pipeline_args)",
"def add_entanglement_layer(self, entanglement_blocks:Optional[Union[str, cirq.Gate, Callable, 'TemplateCircuitBlock',\n List[str],List[cirq.Gate],List[Callable],\n List['TemplateCircuitBlock']]] =None,\n entangle_strategy:Optional[Union[str,List[str], Callable[[int,int],List[Tuple[int]]],\n List[Callable[[int,int],List[Tuple[int]]]]]]=None): \n entangle_strategy = entangle_strategy or 'full' \n entanglement_blocks = self._parse_entanglement_blocks(entanglement_blocks)\n for i, block in enumerate(entanglement_blocks):\n if self._reuse_param_per_layer:\n self.reset_index() \n if isinstance(block, TemplateCircuitBlock):\n interaction_graphs = self.get_interaction_graphs(i, block.num_block_qubits,\n entangle_strategy)\n for qubits in interaction_graphs:\n if self._reuse_param_per_template:\n self.reset_index() \n block.build(self, qubits)\n else:\n interaction_graphs = self.get_interaction_graphs(i, 2, entangle_strategy)\n for qubits in interaction_graphs:\n gate = self.parameterise_gate(block)\n self.apply_gate_operation(gate, qubits)",
"def differential_coding(blocks: np.ndarray):\n dc_comps = [transform.dc_component(b) for b in blocks]\n return utils.differences(dc_comps)",
"def __init__(self, in_channel_list, out_channel_list, block, n_blocks,\n use_transform=False):\n super(HighResolutionBlock, self).__init__()\n self.out_chs_list = out_channel_list\n self.branches = nn.ModuleList()\n\n for in_chs, out_chs in zip(in_channel_list, out_channel_list):\n branch = Branch(in_chs, out_chs, block, n_blocks)\n self.branches.append(branch)\n if len(out_channel_list) > 1:\n self.mul_resolution_layer = MultiResolutionLayer(out_channel_list)\n else:\n self.mul_resolution_layer = None\n pass",
"def components(self):\r\n return list(self._components)",
"def map_blit(self):\n for l in self.blocks:\n for b in range(len(l)):\n l[b].blit()",
"def components(self):\n return [Equity(t, self) for t in self.component_tickers]",
"def GetBlocks(state):\n result = []\n last_pos = 0\n for entry in state:\n pos = entry['pos']\n # Calculate block start points from the beginning of individual lines.\n blocks = [(s[0]-last_pos, s[1]-s[0]) for s in entry['blocks']]\n # Add one end marker block.\n blocks.append((pos-last_pos, 0))\n result.append(blocks)\n last_pos = pos\n return result",
"def components(self):\n skel = self.clone()\n forest = self._compute_components(skel)\n \n if len(forest) == 0:\n return []\n elif len(forest) == 1:\n return [ skel ]\n\n skeletons = []\n for edge_list in forest:\n edge_list = np.array(edge_list, dtype=np.uint32)\n vert_idx = fastremap.unique(edge_list)\n\n vert_list = skel.vertices[vert_idx]\n radii = skel.radii[vert_idx]\n vtypes = skel.vertex_types[vert_idx]\n\n remap = { vid: i for i, vid in enumerate(vert_idx) }\n edge_list = fastremap.remap(edge_list, remap, in_place=True)\n\n skeletons.append(\n Skeleton(vert_list, edge_list, radii, vtypes, skel.id)\n )\n\n return skeletons",
"def _new_components_containing_blocks(self):\n return ActiveConfigHolder._check_for_added_blocks(self._cached_components, self._components)"
] | [
"0.57208264",
"0.56381387",
"0.5355571",
"0.5295688",
"0.5244981",
"0.52368444",
"0.5234634",
"0.51819783",
"0.5135695",
"0.50644815",
"0.5015755",
"0.4994735",
"0.49924207",
"0.4943743",
"0.49335623",
"0.49311888",
"0.49311888",
"0.4920625",
"0.4920625",
"0.4903881",
"0.48911953",
"0.48727617",
"0.4840654",
"0.48398975",
"0.48284256",
"0.4814315",
"0.48128307",
"0.47995114",
"0.4797684",
"0.4785351"
] | 0.68299204 | 0 |
If the object_id is found in the db for the last 5 minutes, it retrieves this job's information and skips processing. If the object_id cannot be found in the db for the last 5 minutes, it saves it and sends it to a queue to be processed. | def process(object_id: str) -> Job:
jobs = db.Jobs().get_by_object_id(object_id)
job_processed_in_last_five_minutes = list(
filter(
lambda x: (
datetime.datetime.utcnow() - x.timestamp < datetime.timedelta(minutes=5)
),
jobs,
)
)
if job_processed_in_last_five_minutes:
return sorted(job_processed_in_last_five_minutes, key=lambda x: x.timestamp)[0]
# todo: add error handling
new_job = db.Jobs().create(object_id)
publisher = queues.Publisher()
publisher.publish(new_job.id)
return new_job | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def on_tick(self):\n if ((len(self._queue) >= self.config.batchsize) or\n (time.time() - self._last_get > self.config.batchtime and self._queue)):\n self._get()",
"def run(self):\n assert self.queue is not None, \"Must specify queue or override run()\"\n\n while not self.terminated():\n qs = self.queue.objects.filter(status=self.queue.UNSUBMITTED,).order_by(\n \"-seq\"\n )[: django.conf.settings.DAEMONS_MAX_BATCH_SIZE]\n if not qs:\n self.sleep(django.conf.settings.DAEMONS_IDLE_SLEEP)\n continue\n\n for task_model in qs:\n try:\n self.do_task(task_model)\n task_model.status = self.queue.SUCCESS\n except AsyncProcessingIgnored:\n task_model.status = self.queue.IGNORED\n except Exception as e:\n if isinstance(e, AsyncProcessingRemoteError):\n # This is a bit messy. Do not log a trace when the\n # error is due to the remote service rejecting the request.\n # Such an error is still permanent for the task though.\n self.log.error(e)\n else:\n self.log.error('#' * 100)\n self.log.exception(f'Exception when handling task \"{task_model}\"')\n\n task_model.error = str(e)\n # if self.is_permanent_error(e):\n task_model.status = self.queue.FAILURE\n task_model.errorIsPermanent = True\n # raise\n else:\n task_model.submitTime = self.now_int()\n\n task_model.save()\n\n self.sleep(django.conf.settings.DAEMONS_BATCH_SLEEP)\n self.log.info(\"Exiting run loop.\")",
"def get_polling(self, obj):\n try:\n del self._cache[obj.pk]\n except KeyError:\n pass\n return self.get_polling_many((obj,))[0]",
"def peek(self):\n record = self.db.crawl_queue.find_and_modify(\n query={'status': self.WAITING},\n update={'$set': {'status': self.PROCESSING, 'timestamp': datetime.now()}}\n )\n if record:\n return record",
"def _mq_callback(self, message):\n try:\n raw_data = RawData(message.body)\n try:\n session = self.ss_dao.get_one(raw_data.key[0], raw_data.session_id)\n\n # update the click_xxx info\n session = self.update_session_body(raw_data, session)\n duration = raw_data.key[1] - time_helper.session_to_epoch(session.key[1])\n session.total_duration = duration\n\n index = session.number_of_entries\n self.add_entry(session, index, raw_data)\n self.performance_ticker.update.increment_success()\n except LookupError:\n # insert the record\n session = SingleSession()\n\n # input data constraints - both session_id and user_id must be present in MQ message\n session.key = (raw_data.key[0], time_helper.raw_to_session(raw_data.key[1]))\n session.session_id = raw_data.session_id\n session.ip = raw_data.ip\n session.total_duration = 0\n\n session = self.update_session_body(raw_data, session)\n self.add_entry(session, 0, raw_data)\n self.performance_ticker.insert.increment_success()\n\n if time.time() - self._last_safe_save_time < self.SAFE_SAVE_INTERVAL:\n is_safe = False\n else:\n is_safe = True\n self._last_safe_save_time = time.time()\n\n self.ss_dao.update(session, is_safe)\n self.consumer.acknowledge(message.delivery_tag)\n except AutoReconnect as e:\n self.logger.error('MongoDB connection error: %r\\nRe-queueing message & exiting the worker' % e)\n self.consumer.reject(message.delivery_tag)\n raise e\n except (KeyError, IndexError) as e:\n self.logger.error('Error is considered Unrecoverable: %r\\nCancelled message: %r' % (e, message.body))\n self.consumer.cancel(message.delivery_tag)\n except Exception as e:\n self.logger.error('Error is considered Recoverable: %r\\nRe-queueing message: %r' % (e, message.body))\n self.consumer.reject(message.delivery_tag)",
"def save(self, *args, **kwargs):\n super(News, self).save(*args, **kwargs)\n pigeonpost_queue.send(sender=self, defer_for=6*60*60)",
"def process_job():\n r = redis.StrictRedis()\n while True:\n curr_job = r.blpop('job_queue', 0)[1]\n r.hset('status', curr_job, 'processing')\n print('current job ID:', curr_job)\n # convert byte to string\n url = r.hget('urls', curr_job).decode(\"utf-8\")\n print('Current URL:', url)\n\n # if this url has not been requested before/is not in the db\n if Site.query.filter_by(url=url).first():\n r.hset('status', curr_job, 'complete')\n print('Job', curr_job, 'Completed')\n else:\n # fetches url page source\n try:\n html = str(get_html(url))\n print('Successfully retrieved HTML')\n # add results to database\n db.session.add(Site(url=url, html=html))\n db.session.commit()\n print('Added to database')\n r.hset('status', curr_job, 'complete')\n print('Job', curr_job, 'Completed')\n except ValueError:\n r.hset('status', curr_job, 'abort')\n print('Job', curr_job, 'Aborted')\n except TimeoutError:\n r.hset('status', curr_job, 'timeout')\n print('Job', curr_job, 'Timed Out')\n return",
"def get_cached_polling(self, obj):\n return self._cache[obj.pk]",
"def process( self, message ) :\n try: \n spot_master_msg = SpotMasterMsg( raw_json=message.get_body() )\n spot_master_uuid = spot_master_msg.spot_master_uuid \n logger.info( fmt_master_uuid_msg_hdr( spot_master_uuid ) + 'process_resubmit_failed_request')\n dynamodb_conn = boto.dynamodb2.connect_to_region( self.region_name, profile_name=self.profile_name )\n spot_master_table = Table( self.spot_master_table_name, connection=dynamodb_conn ) \n spot_master_item = spot_master_table.get_item( spot_master_uuid=spot_master_uuid )\n spot_request_table = Table( self.spot_request_table_name, connection=dynamodb_conn ) \n failed_spot_request_item = spot_request_table.get_item( spot_request_uuid=spot_master_msg.spot_request_uuid )\n \n # Request spot instance\n spot_instance_request = self.resubmit_failed_request_spot_instance( spot_master_item, failed_spot_request_item, dynamodb_conn )\n \n # Queue up a SpotRequestMsg \n if spot_instance_request != None:\n spot_request_uuid = str(uuid.uuid1())\n spot_request_msg = SpotRequestMsg( spot_request_uuid=spot_request_uuid, \n spot_master_uuid=spot_master_item[ TableSpotMaster.spot_master_uuid ], \n spot_request_msg_type=SpotRequestMsg.TYPE_SPOT_REQUEST_INITIATED, \n spot_request_id=spot_instance_request.id )\n spot_request_msg.name_value_pairs[ SpotRequestMsg.PAIR_NAME_SPOT_PRICE ] = str( spot_instance_request.price )\n spot_request_msg.name_value_pairs[ SpotRequestMsg.PAIR_NAME_INSTANCE_USERNAME ] = spot_master_item[ TableSpotMaster.instance_username ]\n spot_request_msg.name_value_pairs[ SpotRequestMsg.PAIR_NAME_ATTEMPT_NUMBER ] = int( failed_spot_request_item[ TableSpotRequest.attempt_number ] + 1 )\n \n spot_request_sqs_message_durable = SqsMessageDurable( self.spot_request_queue_name, self.region_name, profile_name=self.profile_name )\n spot_request_sqs_message_durable.send_message( spot_request_msg.to_json(), message_attributes=create_microsvc_message_attributes( awsspotbatch.common.const.MICROSVC_REQUEST_CLASSNAME_SpotRequestMessageSpotRequestInitiated ) )\n self.spot_master_sqs_message_durable.delete_message(message) \n # No instances available - resubmit this message with a delay timer so it will get reprocessed in future\n else:\n logger.warning( fmt_master_uuid_msg_hdr( spot_master_uuid ) + 'No spot instances available, will try again in ' + str(awsspotbatch.common.const.NO_SPOT_INSTANCES_AVAILABLE_RECHECK_MINUTES) + ' minutes')\n delay_seconds = awsspotbatch.common.const.NO_SPOT_INSTANCES_AVAILABLE_RECHECK_MINUTES * 60\n self.spot_master_sqs_message_durable.send_message( message.get_body(), \n message_attributes=create_microsvc_message_attributes( awsspotbatch.common.const.MICROSVC_MASTER_CLASSNAME_SpotMasterMessageResubmitFailedRequest ), \n delay_seconds=delay_seconds )\n self.spot_master_sqs_message_durable.delete_message(message)\n\n except StandardError as e:\n logger.error( fmt_master_item_msg_hdr( spot_master_item ) + str(e) )\n logger.error( fmt_master_item_msg_hdr( spot_master_item ) + traceback.format_exc() )",
"def _run_one_off_job(self, query_id):\n job_id = user_query_jobs_one_off.UserQueryOneOffJob.create_new()\n params = {\n 'query_id': query_id\n }\n user_query_jobs_one_off.UserQueryOneOffJob.enqueue(\n job_id, additional_job_params=params)\n self.assertEqual(\n self.count_jobs_in_taskqueue(\n taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1)\n with self.swap(feconf, 'CAN_SEND_EMAILS', True):\n self.process_and_flush_pending_tasks()",
"def pop(self):\n record = self.db.crawl_queue.find_and_modify(\n query={'status': self.WAITING},\n update={'$set': {'status': self.PROCESSING, 'timestamp': datetime.now()}}\n )\n if record:\n return record\n else:\n self.repair()\n raise KeyError()",
"def get_latest_job_tick(self, job_origin_id):",
"def monitor_queue(self):\n\n while True:\n job = self.queue.next()\n if job:\n # print(\"found %s\" % (job.job_id))\n\n job_name = job.payload[\"job_name\"]\n\n if job_name in self.mul_func_map:\n\n t = self.mul_func_map[job_name]\n p = multiprocessing.Process(target=t, args=(job,))\n p.daemon = True\n p.start()\n\n elif job_name in self.th_func_map:\n\n t = self.th_func_map[job_name]\n # create a thread to process the job\n p = threading.Thread(target=t, args=(job,))\n p.daemon = True\n # start the thread, going into the worker function\n p.start()\n\n elif job_name in self.fk_func_map:\n t = self.fk_func_map[job_name]\n if not os.fork():\n os.setsid()\n t(job)\n exit()\n else:\n # jobs in this queue that are unknown are presently being skipped\n # however they could probably get moved to a 'dead letter' queue\n # for closer examination\n print(\"unknown job name %s, skipping\" % (job_name))\n\n # throttle so that other worker subscribers get a chance\n time.sleep(self.queue_delay)\n else:\n time.sleep(self.poll_delay)\n\n # prints the number of threads\n # print len(threading.enumerate())",
"def worker(self, queue):\n with sa.create_engine(dsn).connect() as dbcon:\n while True:\n if queue.qsize() == 0:\n sleep(1)\n if queue.qsize() == 0:\n break\n continue\n item = queue.get()\n try:\n if hash(item['title']) in self.exist_products:\n dbcon.execute(Product.update().values(**item).where(Product.c.id == self.get_id(item)))\n else:\n result = dbcon.execute(Product.insert().values(**item))\n self.exist_products[hash(item['title'])] = result.inserted_primary_key[0]\n except Exception as e:\n print(type(e), e)",
"def process(self, job_id, job_service):\n print('Monitoring job %s' % job_id)\n local_job = Job.query.get(job_id)\n remote_job = job_service.get_job(local_job.remote_job_id)\n\n # TODO: catch saga.IncorrectState\n remote_job_state = remote_job.state\n\n if local_job.last_status != remote_job_state:\n self.send_notifications(local_job, remote_job)\n self.download_files(local_job, remote_job, job_service)\n self.update_state(local_job, remote_job)\n\n # Add task back to the queue if still running\n if remote_job_state not in (saga.FAILED,\n saga.DONE,\n saga.CANCELED,\n saga.FINAL,\n saga.EXCEPTION):\n self.send((job_id, job_service))",
"def _queue_job(jid):\n q.put(jid)",
"def _queue_job(jid):\n q.put(jid)",
"def _timeout(self):\n if self._store_timeout > 0 and (not self._messages.empty()):\n \n # Update Timestamp\n timestamp = 0\n t = datetime.datetime.today()\n timestamp = t.microsecond/1000 + t.second*1000 + \\\n t.minute*60*1000 + t.hour*60*60*1000 + t.day*24*60*60*1000\n while timestamp > 4294967295: timestamp -= 4294967295\n \n # Remove Timeout Messages\n while (not self._messages.empty()):\n msg_time = self._messages.queue[0][0]\n if (timestamp - msg_time >= self._store_timeout) or\\\n (timestamp < msg_time and 4294967295 - \\\n msg_time + timestamp >= self._store_timeout):\n logger.warning(\"%s: message store timeout occurred.\" %\\\n (self.__class__.__name__))\n self._messages.get()\n else:\n break",
"def queue_retrieve(model_admin, request, queryset):\n for img in queryset:\n img.queue_retrieve_data()",
"def requeue(self, job_id):\n def handle_error(failure):\n r = failure.trap(NoSuchJobError, UnpickleError)\n return self.remove(r.job_id)\n \n def requeue_job(job):\n job.status = Status.QUEUED\n job.exc_info = None\n q = Queue(job.origin, connection=job.connection)\n return q.enqueue_job(job, timeout=job.timeout)\n \n d = Job.fetch(job_id, connection=self.connection)\n d.addErrback(handle_error)\n d.addCallback(self.remove)\n d.addCallback(requeue_job)\n return d",
"def _on_batch_cache_timeout(self, meta, timestamp, batch):\n assert isinstance(meta, Message)\n assert isinstance(timestamp, float)\n assert isinstance(batch, list)\n assert len(batch) > 0\n if __debug__:\n dprint(\"processing \", len(batch), \"x \", meta.name, \" batched messages\")\n\n if meta in self._batch_cache and id(self._batch_cache[meta][2]) == id(batch):\n if __debug__: dprint(\"pop batch cache for \", len(batch), \"x \", meta.name)\n self._batch_cache.pop(meta)\n\n if not self._communities.get(meta.community.cid, None) == meta.community:\n if __debug__: \n dprint(\"dropped \", len(batch), \"x \", meta.name, \" packets (community no longer loaded)\", level=\"warning\")\n self._statistics.dict_inc(self._statistics.drop, \"on_batch_cache_timeout: community no longer loaded\", len(batch))\n self._statistics.drop_count += len(batch)\n return 0\n\n if meta.batch.enabled and timestamp > 0.0 and meta.batch.max_age + timestamp <= time():\n if __debug__:\n dprint(\"dropped \", len(batch), \"x \", meta.name, \" packets (can not process these messages on time)\", level=\"warning\")\n self._statistics.dict_inc(self._statistics.drop, \"on_batch_cache_timeout: can not process these messages on time\", len(batch))\n self._statistics.drop_count += len(batch)\n return 0\n\n return self._on_batch_cache(meta, batch)",
"def run(self):\n self.timer.start()\n \n while not Status.is_final(self.status):\n if self.request:\n self.handle_request()\n \n if self.status == Status.RUNNING:\n # Clean up orphaned schedules and undead schedulers.\n # Schedule.objects.orphaned().update(scheduler=None)\n # CronSchedule.objects.orphaned().update(scheduler=None)\n \n cron = CronSchedule.objects.unclaimed()[:SCHEDULER_LIMIT]\n simple = Schedule.objects.unclaimed()[:SCHEDULER_LIMIT]\n for schedule in itertools.chain(cron, simple):\n self.log.info('Claiming %s.' % schedule)\n schedule.scheduler = self\n schedule.save()\n self.add(schedule)\n if not Status.is_final(self.status):\n self.wait()\n self.request = Scheduler.objects.get(pk=self.pk).request",
"def poll(self) -> None:\n assert not self.__closed\n\n if self.__batch is not None and (\n len(self.__batch.results) >= self.__max_batch_size\n or time.time() > self.__batch.created + self.__max_batch_time / 1000.0\n ):\n self.__flush()",
"def requeue_job(job_id: str, connection: 'Redis', serializer=None) -> 'Job':\n job = Job.fetch(job_id, connection=connection, serializer=serializer)\n return job.requeue()",
"def need_update(self):\n five_minutes_ago = datetime.now() - timedelta(minutes=5)\n if (\n self.fetch_status != self.FetchStatus.NONE\n and self.collected_at > five_minutes_ago\n ):\n return False\n return True",
"def _enqueue(self, schedule):\n updated_schedule = get_object(type(schedule), pk=schedule.pk)\n self.set.remove(schedule)\n if updated_schedule == None or updated_schedule.deleted:\n self.log.info('%s was removed.' % schedule)\n if updated_schedule != None:\n updated_schedule.scheduler = None\n updated_schedule.save()\n return\n schedule = updated_schedule\n \n if not schedule.scheduler == self:\n self.log.info(\"%s is no longer tied to this Scheduler.\" %\n schedule)\n # self.set.remove(schedule)\n return\n instance = Instance.objects.create(\n task=schedule.task, schedule=schedule)\n self.log.info('Enqueuing %s.' % instance)\n schedule.queue.push(instance)\n schedule.enqueued()\n if not schedule.finished():\n self.add(schedule)\n else:\n schedule.scheduler = None\n schedule.save()",
"def jobs(self):\n if len(self.ips) == 0:\n return self\n for ip in self.ips:\n dt = datetime.datetime.now()\n time_spent = ping3.ping(ip, unit=\"ms\")\n if time_spent is not None:\n logging.info(\"for \" + ip + \" time is \" + str(time_spent))\n self.save(ip, str(time_spent), dt)\n else:\n self.save(ip, \"-50\", dt)\n return self",
"def killQueueOrder(self):\n # CHECK ALL QUEUE ORDERS AND CANCEL ORDER IF GREATER THAN TWO HOURS OLD\n queue_orders = self.queue.find(\n {\"Trader\": self.user[\"Name\"], \"Asset_Type\": self.asset_type, \"Account_ID\": self.account_id})\n\n dt = datetime.now(tz=pytz.UTC).replace(microsecond=0)\n\n dt_central = dt.astimezone(pytz.timezone('US/Central'))\n\n two_hours_ago = datetime.strptime(datetime.strftime(\n dt_central - timedelta(hours=2), \"%Y-%m-%d %H:%M:%S\"), \"%Y-%m-%d %H:%M:%S\")\n\n ten_minutes_ago = datetime.strptime(datetime.strftime(\n dt_central - timedelta(minutes=10), \"%Y-%m-%d %H:%M:%S\"), \"%Y-%m-%d %H:%M:%S\")\n\n for order in queue_orders:\n\n order_date = order[\"Date\"]\n\n order_type = order[\"Order_Type\"]\n\n id = order[\"Order_ID\"]\n\n forbidden = [\"REJECTED\", \"CANCELED\", \"FILLED\"]\n\n if two_hours_ago > order_date and (order_type == \"BUY\" or order_type == \"BUY_TO_OPEN\") and id != None and order[\"Order_Status\"] not in forbidden:\n\n # FIRST CANCEL ORDER\n resp = self.tdameritrade.cancelOrder(id)\n\n if resp.status_code == 200 or resp.status_code == 201:\n\n other = {\n \"Symbol\": order[\"Symbol\"],\n \"Order_Type\": order[\"Order_Type\"],\n \"Order_Status\": \"CANCELED\",\n \"Strategy\": order[\"Strategy\"],\n \"Account_ID\": self.account_id,\n \"Aggregation\": order[\"Aggregation\"],\n \"Trader\": self.user[\"Name\"],\n \"Date\": getDatetime()\n }\n\n if self.asset_type == \"OPTION\":\n\n other[\"Pre_Symbol\"] = order[\"Pre_Symbol\"]\n\n other[\"Exp_Date\"] = order[\"Exp_Date\"]\n\n self.other.insert_one(other)\n\n self.queue.delete_one(\n {\"Trader\": self.user[\"Name\"], \"Symbol\": order[\"Symbol\"], \"Strategy\": order[\"Strategy\"], \"Asset_Type\": self.asset_type})\n\n self.logger.INFO(\n f\"CANCELED ORDER FOR {order['Symbol']} - TRADER: {self.user['Name']}\", True)\n\n # IF QUEUE ORDER DATE GREATER THAN 10 MINUTES OLD AND ORDER ID EQUALS NONE, SEND ALERT\n if ten_minutes_ago > order_date and order[\"Order_ID\"] == None and order[\"Account_ID\"] == self.account_id:\n\n if order[\"Symbol\"] not in self.no_ids_list:\n\n self.logger.ERROR(\n \"QUEUE ORDER ID ERROR\", f\"ORDER ID FOR {order['Symbol']} NOT FOUND - TRADER: {self.user['Name']} - ACCOUNT ID: {self.account_id}\")\n\n self.no_ids_list.append(order[\"Symbol\"])\n\n else:\n\n if order[\"Symbol\"] in self.no_ids_list:\n\n self.no_ids_list.remove(order[\"Symbol\"])",
"def run(self):\n while True :\n try :\n instance_id = self.queue.get()\n db.hset(application_name,instance_id,1)\n except:\n pass\n finally:\n pass",
"def _lock(self):\n sql = (\"SELECT id FROM %s WHERE ID = %%s FOR UPDATE NOWAIT\" %\n self.model._table)\n try:\n self.session.cr.execute(sql, (self.binding_id, ),\n log_exceptions=False)\n except psycopg2.OperationalError:\n _logger.info('A concurrent job is already exporting the same '\n 'record (%s with id %s). Job delayed later.',\n self.model._name, self.binding_id)\n raise RetryableJobError(\n 'A concurrent job is already exporting the same record '\n '(%s with id %s). The job will be retried later.' %\n (self.model._name, self.binding_id))"
] | [
"0.55987585",
"0.54896456",
"0.54685956",
"0.54629576",
"0.54253185",
"0.54224366",
"0.53269327",
"0.52808934",
"0.52751964",
"0.527179",
"0.52460194",
"0.5220358",
"0.52187735",
"0.52122515",
"0.5173813",
"0.51726663",
"0.51726663",
"0.5172634",
"0.51683325",
"0.51590395",
"0.5139636",
"0.51369303",
"0.51281315",
"0.5128064",
"0.51009095",
"0.50376797",
"0.50310034",
"0.50270146",
"0.5016648",
"0.5007809"
] | 0.7468584 | 0 |
Detects the immersion level based on the given features. | def detect(self, features):
pass # TODO | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def detect(self, detect_img):\n features = self.classifier.detectMultiScale(detect_img,1.3,5)\n self.features = features\n self.features_detected = True",
"def get_basic_feature(image, featurelist=['entropy']):\n features = {'min': image.min(), 'max': image.max(), 'variance': 0, 'mean': 0, 'std_dev': 0, 'skewness': 0,\n 'kurtosis': 0, 'entropy': 0, 'energy': 0, 'smoothness': 0, 'coefficient': 0}\n # hist, - = np.histogram(image.flatten(), bins=256, range=[0, 255], density=False)\n\n # hist = cv2.calcHist(image,[0],None,[256],[0,256])\n hist, _ = np.histogram(image.flatten(), bins=256, range=[0, 255], density=False)\n hx = hist.ravel() / hist.sum()\n # mean = np.mean(image.flatten())\n x = np.arange(256)\n mean = hx.dot(x)\n variance = ((x - mean) ** 2).dot(hx)\n std = np.sqrt(variance)\n\n features['mean'] = mean\n features['variance'] = variance\n features['std_dev'] = std\n\n for name in featurelist:\n if name in features.keys():\n if name is 'skewness':\n features[name] = ((x - mean) ** 3).dot(hx) / std ** 3 # different with lecture notes\n elif name is 'kurtosis':\n features[name] = (((x - mean) ** 4) * hx).sum() / std ** 4 - 3 # different with lecture notes\n elif name is 'energy':\n features[name] = (hx * hx).sum()\n elif name is 'smoothness':\n features[name] = 1 - 1 / (1 + variance)\n elif name is 'coefficient':\n features[name] = float(std) / mean\n elif name is 'entropy':\n # ref: https://stackoverflow.com/questions/16647116/faster-way-to-analyze-each-sub-window-in-an-image\n log_h = np.log2(hx + 0.00001)\n features[name] = -1 * (log_h * hx).sum()\n\n return features",
"def _multi_descriptors(self, features, mode):\n # Illumination variant, rotation variant\n illum_var_rot_var_head = self.relu(self.conv_illum_var_rot_var_1(\n features))\n illum_var_rot_var_head = self.bn_illum_var_rot_var_1(\n illum_var_rot_var_head)\n illum_var_rot_var_head = self.conv_illum_var_rot_var_2(\n illum_var_rot_var_head)\n\n # Illumination variant, rotation invariant\n illum_var_rot_invar_head = self.relu(self.conv_illum_var_rot_invar_1(\n features))\n illum_var_rot_invar_head = self.bn_illum_var_rot_invar_1(\n illum_var_rot_invar_head)\n illum_var_rot_invar_head = self.conv_illum_var_rot_invar_2(\n illum_var_rot_invar_head)\n\n # Illumination invariant, rotation variant\n illum_invar_rot_var_head = self.relu(self.conv_illum_invar_rot_var_1(\n features))\n illum_invar_rot_var_head = self.bn_illum_invar_rot_var_1(\n illum_invar_rot_var_head)\n illum_invar_rot_var_head = self.conv_illum_invar_rot_var_2(\n illum_invar_rot_var_head)\n\n # Illumination invariant, rotation invariant\n illum_invar_rot_invar_head = self.relu(\n self.conv_illum_invar_rot_invar_1(features))\n illum_invar_rot_invar_head = self.bn_illum_invar_rot_invar_1(\n illum_invar_rot_invar_head)\n illum_invar_rot_invar_head = self.conv_illum_invar_rot_invar_2(\n illum_invar_rot_invar_head)\n\n outputs = {'raw_rot_var_illum_var': illum_var_rot_var_head,\n 'raw_rot_invar_illum_var': illum_var_rot_invar_head,\n 'raw_rot_var_illum_invar': illum_invar_rot_var_head,\n 'raw_rot_invar_illum_invar': illum_invar_rot_invar_head}\n return outputs",
"def supported_features(self):\n if self._is_dimmable:\n return SUPPORT_BRIGHTNESS\n return 0",
"def get_feature(glcm, featurelist=['contrast']):\n measure_list = dict(max_prob=0, contrast=0, dissimilarity=0, homogeneity=0, ASM=0, energy=0, entropy=0,\n correlation=0, cluster_shade=0, variance_i=0, variance_j=0, mean_i=0, mean_j=0)\n\n M, N = glcm.shape\n\n np.seterr(divide='ignore', invalid='ignore')\n\n flat_glcm = glcm.flatten()\n index_i = np.arange(0, M) # row index\n index_j = np.arange(0, N) # column index = row\n\n sum_v = np.sum(glcm, axis=0) # sum column[] , vertical\n sum_h = np.sum(glcm, axis=1) # sum row[] , horizontal\n\n max_prob = np.max(flat_glcm)\n mean_i = np.dot(index_i, sum_h.flatten())\n mean_j = np.dot(index_j, sum_v.flatten())\n var_i = np.dot((index_i - mean_i) ** 2, sum_h.flatten())\n var_j = np.dot((index_j - mean_j) ** 2, sum_v.flatten())\n\n measure_list['max_prob'] = max_prob\n measure_list['variance_i'] = var_i\n measure_list['variance_j'] = var_j\n measure_list['mean_i'] = mean_i\n measure_list['mean_j'] = mean_j\n\n for name in featurelist:\n if name in measure_list.keys():\n if name is 'max_prob':\n measure_list[name] = np.max(flat_glcm)\n elif name is 'ASM':\n measure_list[name] = np.dot(flat_glcm, flat_glcm)\n elif name is 'energy':\n ASM = np.dot(flat_glcm, flat_glcm)\n measure_list[name] = np.sqrt(ASM)\n elif name is 'cluster_shade':\n cluster_weights = np.zeros([M, N])\n for i in range(M):\n for j in range(N):\n cluster_weights[i, j] = (i + j - mean_i - mean_j) ** 3\n measure_list[name] = np.dot(flat_glcm, cluster_weights.flatten())\n elif name is 'correlation':\n stdev_i = np.sqrt(var_i)\n stdev_j = np.sqrt(var_j)\n correl_weights = np.outer((index_i - mean_i), (index_j - mean_j)) / (stdev_i * stdev_j)\n measure_list[name] = np.dot(flat_glcm, correl_weights.flatten())\n elif name is 'contrast':\n contrast_weights = np.zeros([M, N])\n for i in range(M):\n for j in range(N):\n contrast_weights[i, j] = (i - j) ** 2\n measure_list[name] = np.dot(flat_glcm, contrast_weights.flatten())\n elif name is 'entropy':\n # ln = np.log(flat_glcm) here, log(0) = -inf, will have some problem, using np.ma.log instead\n # np.ma.log(0) = -- : not -inf. ? can pass\n ln = np.ma.log(flat_glcm)\n measure_list[name] = -np.dot(flat_glcm, ln)\n elif name is 'dissimilarity':\n dissi_weights = np.zeros([M, N])\n for i in range(M):\n for j in range(N):\n dissi_weights[i, j] = abs(i - j)\n measure_list[name] = np.dot(flat_glcm, dissi_weights.flatten())\n elif name is 'homogeneity':\n homo_weights = np.zeros([M, N])\n for i in range(M):\n for j in range(N):\n homo_weights[i, j] = 1 / (1 + (i - j) ** 2)\n measure_list[name] = np.dot(flat_glcm, homo_weights.flatten())\n\n return measure_list",
"def feature_extraction(img, feature):\n\n if feature == 'HoG':\n # HoG parameters\n\n # In the case of the Hog Feature, we already given the base parameters for using hog feature function.\n # TA - You can just use that parameter with each subdivide image (which has image grid size * image grid size)\n # Thank you for the reply. Does it mean to divide the image into 20x20 size sub-images and perform the feature extraction on each image??\n # TA - Yes. In the SIFT, image grid size is different.\n\n win_size = (32, 32)\n block_size = (32, 32)\n block_stride = (16, 16)\n cell_size = (16, 16)\n\n nbins = 9\n deriv_aperture = 1\n win_sigma = 4\n histogram_norm_type = 0\n l2_hys_threshold = 2.0000000000000001e-01\n gamma_correction = 0\n nlevels = 64\n\n # Your code here. You should also change the return value.\n\n # sample visualizing\n # cv2.imshow('img', img)\n\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n\n hog = cv2.HOGDescriptor(win_size,\n block_size,\n block_stride,\n cell_size,\n nbins,\n deriv_aperture,\n win_sigma,\n histogram_norm_type,\n l2_hys_threshold,\n gamma_correction,\n nlevels)\n\n # additional parameters\n\n #hist = hog.compute(gray,winStride,padding,locations)\n\n #TODO: Check if this is valid???\n\n hist = hog.compute(gray)\n hist_resized = np.resize(hist, (int(len(hist)/36), 36))\n hist_resized\n return hist_resized\n\n elif feature == 'SIFT':\n\n # Your code here. You should also change the return value.\n\n #input image size 240 * 200 ==> divide H, W by 20 ==> 12 * 10 = 120\n #in case of this input image, the number of feature is 120.\n #So the number of feature is changed according to input image size.\n\n #IF PROBLEMS WITH DEPENDENCIES: pip3 install opencv-contrib-python==3.4.2.16\n\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n sift = cv2.xfeatures2d.SIFT_create()\n kp, des = sift.detectAndCompute(gray, None)\n\n return des",
"def supported_features(self) -> int:\n return self._support_flags",
"def supported_features(self):\n return MEURAL_SUPPORT",
"def supported_features(self):\n return SUPPORT_ARLO",
"def compare_logistic_features(self):\n\n\t\tlogistic_coefs = self.model.coef_[0]\n\t\tfeature_importances = np.exp(logistic_coefs)\n\t\tmin_feature_importance = np.min(feature_importances[feature_importances > 0])\n\t\trelative_importances = feature_importances / min_feature_importance\n\n\t\treturn reative_importances",
"def add_new_features(self):\r\n curr_img = self.cam0_curr_img_msg.image\r\n grid_height, grid_width = self.get_grid_size(curr_img)\r\n\r\n # Create a mask to avoid redetecting existing features.\r\n mask = np.ones(curr_img.shape[:2], dtype='uint8')\r\n\r\n for feature in chain.from_iterable(self.curr_features):\r\n x, y = map(int, feature.cam0_point)\r\n mask[y-3:y+4, x-3:x+4] = 0\r\n\r\n # Detect new features.\r\n new_features = self.detector.detect(curr_img, mask=mask)\r\n\r\n # Collect the new detected features based on the grid.\r\n # Select the ones with top response within each grid afterwards.\r\n new_feature_sieve = [[] for _ in range(self.config.grid_num)]\r\n for feature in new_features:\r\n row = int(feature.pt[1] / grid_height)\r\n col = int(feature.pt[0] / grid_width)\r\n code = row * self.config.grid_col + col\r\n new_feature_sieve[code].append(feature)\r\n\r\n new_features = []\r\n for features in new_feature_sieve:\r\n if len(features) > self.config.grid_max_feature_num:\r\n features = sorted(features, key=lambda x:x.response, \r\n reverse=True)[:self.config.grid_max_feature_num]\r\n new_features.append(features)\r\n new_features = list(chain.from_iterable(new_features))\r\n\r\n # Find the stereo matched points for the newly detected features.\r\n cam0_points = [kp.pt for kp in new_features]\r\n cam1_points, inlier_markers = self.stereo_match(cam0_points)\r\n\r\n cam0_inliers, cam1_inliers, response_inliers = [], [], []\r\n for i, inlier in enumerate(inlier_markers):\r\n if not inlier:\r\n continue\r\n cam0_inliers.append(cam0_points[i])\r\n cam1_inliers.append(cam1_points[i])\r\n response_inliers.append(new_features[i].response)\r\n # if len(cam0_inliers) < max(5, len(new_features) * 0.1):\r\n\r\n # Group the features into grids\r\n grid_new_features = [[] for _ in range(self.config.grid_num)]\r\n for i in range(len(cam0_inliers)):\r\n cam0_point = cam0_inliers[i]\r\n cam1_point = cam1_inliers[i]\r\n response = response_inliers[i]\r\n\r\n row = int(cam0_point[1] / grid_height)\r\n col = int(cam0_point[0] / grid_width)\r\n code = row*self.config.grid_col + col\r\n\r\n new_feature = FeatureMetaData()\r\n new_feature.response = response\r\n new_feature.cam0_point = cam0_point\r\n new_feature.cam1_point = cam1_point\r\n grid_new_features[code].append(new_feature)\r\n\r\n # Sort the new features in each grid based on its response.\r\n # And collect new features within each grid with high response.\r\n for i, new_features in enumerate(grid_new_features):\r\n for feature in sorted(new_features, key=lambda x:x.response, \r\n reverse=True)[:self.config.grid_min_feature_num]:\r\n self.curr_features[i].append(feature)\r\n self.curr_features[i][-1].id = self.next_feature_id\r\n self.curr_features[i][-1].lifetime = 1\r\n self.next_feature_id += 1",
"def findFeatures(self):\n\t\tpass",
"def getFeaturesAttack(agent,node):\r\n gameState = node.gameState\r\n lastGameState = node.parentNode.gameState\r\n features = util.Counter()\r\n\r\n features['getFood'] = gameState.getAgentState(agent.index).numCarrying - lastGameState.getAgentState(agent.index).numCarrying\r\n \r\n # if features['getFood'] == 0:\r\n # if len(agent.getFood(gameState).asList()) > 0:\r\n features['minDistToFood'] = agent.getMinDistToFood(gameState)\r\n\r\n return features",
"def feature_simulator(self, function, x):\n if function == 'bitmap_count':\n return utils.bitmap_count(x.member, BITMAP_THRESHOLD)\n if function == 'move_distance':\n return utils.move_distance(x.member)\n if function == 'orientation_calc':\n return utils.orientation_calc(x.member, 0)",
"def supported_features(self) -> int:\n return self._supported_features",
"def get_light_state(self):\n\n cv_image = self.bridge.imgmsg_to_cv2(self.camera_image, \"bgr8\")\n\n #Get classification\n tl_image_rgb, color_index = self.light_classifier.get_classification(cv_image)\n tl_cv_image = cv2.cvtColor(tl_image_rgb, cv2.COLOR_RGB2BGR)\n try:\n self.tl_detected_image_pub.publish(self.bridge.cv2_to_imgmsg(tl_cv_image, \"bgr8\"))\n except CvBridgeError as e:\n print(e)",
"def run(layers):\n\n # Depth above which people are regarded affected [m]\n threshold = 0.1\n thresholds = [0.1, 0.2, 0.3, 0.5, 0.8, 1.0]\n\n # Identify hazard and exposure layers\n inundation = get_hazard_layer(layers) # Flood inundation [m]\n\n # Get population and gender ratio\n population = gender_ratio = None\n for layer in get_exposure_layers(layers):\n keywords = layer.get_keywords()\n\n if 'datatype' not in keywords:\n population = layer\n else:\n datatype = keywords['datatype']\n\n if 'population' in datatype and 'density' in datatype:\n population = layer\n\n if 'female' in datatype and 'ratio' in datatype:\n gender_ratio_unit = keywords['unit']\n\n msg = ('Unit for gender ratio must be either '\n '\"percent\" or \"ratio\"')\n assert gender_ratio_unit in ['percent', 'ratio'], msg\n\n gender_ratio = layer\n\n msg = 'No population layer was found in: %s' % str(layers)\n assert population is not None, msg\n\n # Extract data as numeric arrays\n D = inundation.get_data(nan=0.0) # Depth\n\n # Calculate impact as population exposed to depths > threshold\n if population.get_resolution(native=True, isotropic=True) < 0.0005:\n # Keep this for backwards compatibility just a little while\n # This uses the original custom population set and\n # serves as a reference\n\n P = population.get_data(nan=0.0) # Population density\n pixel_area = 2500\n I = numpy.where(D > threshold, P, 0) / 100000.0 * pixel_area\n else:\n # This is the new generic way of scaling (issue #168 and #172)\n P = population.get_data(nan=0.0, scaling=True)\n I = numpy.where(D > threshold, P, 0)\n\n if gender_ratio is not None:\n # Extract gender ratio at each pixel (as ratio)\n G = gender_ratio.get_data(nan=0.0)\n if gender_ratio_unit == 'percent':\n G /= 100\n\n # Calculate breakdown\n P_female = P * G\n P_male = P - P_female\n\n I_female = I * G\n I_male = I - I_female\n\n\n # Generate text with result for this study\n total = str(int(sum(P.flat) / 1000))\n count = str(int(sum(I.flat) / 1000))\n\n # Create report\n caption = ('<table border=\"0\" width=\"320px\">'\n ' <tr><td><b>%s:</b></td>'\n '<td align=\"right\"><b>%s</b></td></tr>'\n % ('Jumlah Penduduk', total))\n if gender_ratio is not None:\n total_female = str(int(sum(P_female.flat) / 1000))\n total_male = str(int(sum(P_male.flat) / 1000))\n\n\n caption += (' <tr><td>%s:</td>'\n '<td align=\"right\">%s</td></tr>'\n % (' - Wanita', total_female))\n caption += (' <tr><td>%s:</td>'\n '<td align=\"right\">%s</td></tr>'\n % (' - Pria', total_male))\n caption += '<tr><td> </td></tr>' # Blank separation row\n\n caption += (' <tr><td><b>%s:</b></td>'\n '<td align=\"right\"><b>%s</b></td></tr>'\n % ('Perkiraan Jumlah Terdampak (> %.1fm)' % threshold,\n count))\n\n if gender_ratio is not None:\n affected_female = str(int(sum(I_female.flat) / 1000))\n affected_male = str(int(sum(I_male.flat) / 1000))\n\n\n caption += (' <tr><td>%s:</td>'\n '<td align=\"right\">%s</td></tr>'\n % (' - Wanita', affected_female))\n caption += (' <tr><td>%s:</td>'\n '<td align=\"right\">%s</td></tr>'\n % (' - Pria', affected_male))\n\n caption += '</table>'\n\n caption += '<br>' # Blank separation row\n caption += 'Catatan: Semua nomor x 1000'\n\n # Create raster object and return\n R = Raster(I,\n projection=inundation.get_projection(),\n geotransform=inundation.get_geotransform(),\n name='People affected',\n keywords={'caption': caption})\n return R",
"def classify_feature_image(input_img, feature_colors, pix_cutoff=50):\n result = 'negative'\n for pic_val, num in pic_val_count(input_img):\n for min_rgb, max_rgb in feature_colors:\n if (((min_rgb[0] <= pic_val[0] <= max_rgb[0])\n &(min_rgb[1] <= pic_val[1] <= max_rgb[1])\n &(min_rgb[2] <= pic_val[2] <= max_rgb[2])) & (num > pix_cutoff)):\n result = \"positive\"\n return result",
"def nrc_affect_intensity(self, tokens): \n num_features = 10 # 'anger', 'anticipation', 'disgust', 'fear', 'joy', 'negative', 'positive', 'sadness', 'surprise', 'trust'\n sum_vec = [0.0] * num_features\n for token in tokens:\n if token in self.nrc_affect_intensity_map:\n sum_vec = [a + b for a, b in zip(sum_vec, self.nrc_affect_intensity_map[token])]\n \n feature_names = ['anger', 'anticipation', 'disgust', 'fear', 'joy', 'negative', 'positive', 'sadness', 'surprise', 'trust']\n feature_names = ['nrc_affect_intensity_' + name for name in feature_names]\n return dict(zip(feature_names, sum_vec))",
"def evaluate(labels, predictions):\n positive_count = 0\n positive = 0\n negative_count = 0\n negative = 0\n for i in range(len(labels)):\n if labels[i] == 1:\n positive_count+=1\n if predictions[i] == 1:\n positive +=1\n else:\n negative_count+=1\n if predictions[i] == 0:\n negative +=1\n\n sensitivity = positive / positive_count\n specificity = negative / negative_count\n\n return (sensitivity, specificity)",
"def detect_features(self):\n # P.S. the features and descriptors of frame A are calculated beforehand\n self.featureFrameB, self.featureDesB = self.orb.detectAndCompute(self.frameB, None)",
"def test_changeIlluminationLevel(self):\n fade_to_black = \"Your environs fade to black due to Ineffable Spooky Magic.\"\n no_change = \"You do it. Swell.\"\n dark_to_light = \"Your environs are suddenly alight.\"\n brighten = \"Your environs seem slightly brighter.\"\n endarken = \"Your environs seem slightly dimmer.\"\n Manipulator.createFor(self.playerWrapper.actor)\n\n self._test(\n \"illuminate 0\",\n [fade_to_black],\n [fade_to_black])\n\n ll = self.store.findUnique(\n objects.LocationLighting,\n objects.LocationLighting.thing == self.location)\n self.assertEquals(ll.candelas, 0)\n\n self._test(\n \"illuminate 0\",\n [no_change])\n self.assertEquals(ll.candelas, 0)\n\n self._test(\n \"illuminate 100\",\n [dark_to_light],\n [dark_to_light])\n self.assertEquals(ll.candelas, 100)\n\n self._test(\n \"illuminate 110\",\n [brighten],\n [brighten])\n self.assertEquals(ll.candelas, 110)\n\n self._test(\n \"illuminate 100\",\n [endarken],\n [endarken])\n self.assertEquals(ll.candelas, 100)\n\n self._test(\n \"illuminate 0\",\n [fade_to_black],\n [fade_to_black])\n self.assertEquals(ll.candelas, 0)",
"def detect_scenes(\n clip=None, luminosities=None, luminosity_threshold=10, logger=\"bar\", fps=None\n):\n if luminosities is None:\n luminosities = [\n f.sum() for f in clip.iter_frames(fps=fps, dtype=\"uint32\", logger=logger)\n ]\n\n luminosities = np.array(luminosities, dtype=float)\n if clip is not None:\n end = clip.duration\n else:\n end = len(luminosities) * (1.0 / fps)\n luminosity_diffs = abs(np.diff(luminosities))\n avg = luminosity_diffs.mean()\n luminosity_jumps = (\n 1 + np.array(np.nonzero(luminosity_diffs > luminosity_threshold * avg))[0]\n )\n timings = [0] + list((1.0 / fps) * luminosity_jumps) + [end]\n cuts = [(t1, t2) for t1, t2 in zip(timings, timings[1:])]\n return cuts, luminosities",
"def recognize():\n return 0",
"def feature_selection_information_gain(df, string_cols, threshold = 0.01, label_col = 'label', pcg = 1.0):\n\n df = df.select(string_cols + [label_col]).sample(withReplacement=False, fraction=pcg)\n\n df = only_categorical_columns(df, label_col=label_col)\n\n df.cache()\n\n print \"[Info] Number of rows in the DF: \" + str(df.count())\n\n string_cols = list(set(df.columns) - set([label_col]))\n\n # First pipeline: string indexer variables -> necessary to use them in models\n print('[INFO] Indexing categorical variables: ' + str(len(string_cols)))\n\n ig_df = information_gain(df=df, var_list=string_cols, label_col = label_col)\n\n cat_cols = ig_df\\\n .filter(col('ig') >= (threshold)*col('init_entropy'))\\\n .select('feature').rdd.map(lambda r: r['feature']).collect()\n\n # [ig[0] for ig in ig_results if (ig[1] >= threshold_abs)]\n\n return cat_cols",
"def classify(self, features):\n node = self.tree\n answer = node.right_label + node.left_label\n while len(answer)>1:\n if node.model.classify(features)==+1:\n answer=node.left_label\n node=node.left\n else:\n answer=node.right_label\n node=node.right \n return answer[0]",
"def main():\n import sys\n if len(sys.argv) < 3:\n print(\"Please list database and sobel threshold.\")\n return\n import numpy as np\n import cv2\n import glob\n import age_class\n # store all image paths in list\n img_list = glob.glob(sys.argv[1] + '*')\n # threshold for edge detection\n sobel_thres = int(sys.argv[2])\n feature_list = []\n for i in img_list:\n print(i)\n if i != \"face_dataset/.jpg\":\n img_color = cv2.imread(i)\n img_color = cv2.resize(img_color, (150, 200))\n height, width = 200, 150\n img_gray = cv2.cvtColor(img_color, cv2.COLOR_BGR2GRAY)\n img_gray = age_class.dynamic_range(img_gray)\n\n # location phase\n locs = age_class.location_phase(img_gray.copy(), sobel_thres)\n eye_pos, left_eye, right_eye, nose_pos, mouth_pos, mouth_area = locs\n # feature extraction phase\n feats = age_class.feature_extraction(img_gray.copy(), eye_pos, \n left_eye, right_eye, nose_pos, mouth_pos, mouth_area, 40)\n # place all feature data into feature matrix\n l = []\n for n in range(len(feats[0])):\n for key in feats[0][n].keys():\n l.append(feats[0][n][key])\n l.append(feats[1])\n l.append(feats[2])\n feature_list.append(l)\n\n feature_list = np.array(feature_list)\n print(feature_list.shape)",
"def check_supported_features(self):",
"def haar_feature(i, x, y, f, s):\n features = np.array([[2, 1], [1, 2], [3, 1], [1, 3], [2, 2]])\n h = features[f][0]*s\n w = features[f][1]*s\n\n if f == 0:\n bright = (i[int(x+h/2-1), y+w-1] + i[x-1, y-1]) - (i[x-1, y+w-1] + i[int(x+h/2-1), y-1])\n dark = (i[x+h-1, y+w-1] + i[int(x+h/2-1), y-1]) - (i[int(x+h/2-1), y+w-1] + i[x+h-1, y-1])\n elif f == 1:\n bright = (i[x+h-1, int(y+w/2-1)] + i[x-1, y-1]) - (i[x-1, int(y+w/2-1)] + i[x+h-1, y-1])\n dark = (i[x+h-1, y+w-1] + i[x-1, int(y+w/2-1)]) - (i[x+h-1, int(y+w/2-1)] + i[x-1, y+w-1])\n #print(bright)\n #print(dark)\n haar_feature_val = bright-dark\n #print(haar_feature_val)\n return haar_feature_val",
"def classify(self, features):\n\n # TODO: finish this.\n features = np.array(features)\n return self.classifier.classify(features)"
] | [
"0.56744266",
"0.5453125",
"0.54530686",
"0.5328636",
"0.53202724",
"0.5282828",
"0.5223401",
"0.5216018",
"0.5200532",
"0.51810616",
"0.5085944",
"0.50755256",
"0.50647336",
"0.50630873",
"0.50608903",
"0.5057752",
"0.5049103",
"0.5029467",
"0.5027702",
"0.49991143",
"0.4995177",
"0.49910012",
"0.4963685",
"0.49461764",
"0.49438226",
"0.4937277",
"0.49348068",
"0.49254546",
"0.49201903",
"0.4899727"
] | 0.6558402 | 0 |
Optimizes the EmotionsDetector model, trying to find the SVM parameters that would yield better results. | def optimize(self, args):
############################
# Get the data
############################
# Read the CSV file ignoring the header and the first column (which
# contains the file name of the image used for extracting the data in
# a row)
try:
data = np.genfromtxt(args.featuresFile, delimiter=',',
skip_header=1)
data = data[:, 1:]
except:
print('Could not read CSV file: {}'.format(args.featuresFile))
return -1
x = data[:, :-1]
y = np.squeeze(data[:, -1:])
############################
# Execute the optimization
############################
tunningParams = [
{
'kernel': ['linear'],
'C': [1e-3, 1e-2, 1e-1, 1, 1e+1, 1e+2, 1e+3]
},
{
'kernel': ['rbf'],
'gamma': [1e-3, 1e-2, 1e-1, 1, 1e+1, 1e+2, 1e+3],
'C': [1e-3, 1e-2, 1e-1, 1, 1e+1, 1e+2, 1e+3]
},
]
scores = ['precision', 'recall']
for score in scores:
print('# Tuning hyper-parameters for {}\n'.format(score))
clf = GridSearchCV(svm.SVC(C=1), tunningParams, cv=5,
scoring=format('{}_macro'.format(score)))
clf.fit(x, y)
print('Best parameters set found on development set:\n')
print(clf.best_params_)
print('\nGrid scores on development set:\n')
means = clf.cv_results_['mean_test_score']
stds = clf.cv_results_['std_test_score']
for mean, std, params in zip(means, stds, clf.cv_results_['params']):
print('{:.3f} (+/-{:.3f}) for {}'.format(mean, std * 2, params))
#print('\nDetailed classification report:\n')
#print('The model is trained on the full development set.')
#print('The scores are computed on the full evaluation set.\n')
#y_true, y_pred = y_test, clf.predict(X_test)
#print(classification_report(y_true, y_pred))
#print()
return 0 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def svm():",
"def optimize(self):\n self.vbe_step()\n self.compute_responsibilities()\n self.compute_sufficient_stats()\n self.vbmstep()",
"def svm_clf_training(max_features, data):\r\n X_train, y_train, X_test, y_test = data\r\n clf = Pipeline([('feature_selection', SelectKBest(score_func=chi2, k=max_features)),\r\n ('clf', svm.SVC(C=1., kernel='linear'))])\r\n\r\n vectorizer = CountVectorizer(ngram_range=(1, 2), lowercase=True) # unigrams and bigrams\r\n X_matrix_tr = vectorizer.fit_transform(X_train)\r\n # parameters = [{'clf__kernel': ['linear'], 'clf__C': [0.1, 1, 10, 100]},\r\n # {'clf__kernel': ['rbf'], 'clf__C': [0.1, 1, 10, 100], 'clf__gamma': [0.001, 0.01, 0.1]},\r\n # {'clf__kernel': ['poly'], 'clf__C': [0.1, 1, 10, 100], 'clf__degree': [2, 3, 4, 5]}]\r\n # clf = GridSearchCV(svc, parameters, scoring='accuracy')\r\n clf.fit(X_matrix_tr, y_train)\r\n # print(\"Best parameters set found on development set:\")\r\n # print()\r\n # print(clf.best_estimator_)\r\n # print()\r\n # print(\"Grid scores on development set:\")\r\n # print()\r\n # for params, mean_score, scores in clf.grid_scores_:\r\n # print(\"%0.3f (+/-%0.03f) for %r\"\r\n # % (mean_score, scores.std() / 2, params))\r\n # print()\r\n voc = vectorizer.get_feature_names()\r\n # vectorizer1 = CountVectorizer(ngram_range=(1, 2), lowercase=True, vocabulary=voc)\r\n # X_matrix_val = vectorizer1.fit_transform(X_test)\r\n # y_pred = clf.predict(X_matrix_val)\r\n\r\n # for i in range(len(X_test)):\r\n # if y_test[i] != y_pred[i]:\r\n # print(X_test[i], y_test[i], y_pred[i])\r\n # print(classification_report(y_test, y_pred))\r\n return clf, voc",
"def optimize_parameters(self):\n pass",
"def optimize_parameters(self):\n pass",
"def optimize_parameters(self):\n pass",
"def hyper_parameter_tuning(X, y, classifier, models, sntypes_map, feature_names, fig_dir='.', remove_models=(), name=''):\n\n # Hyperparameter grid\n n_estimators = [int(x) for x in np.linspace(start=200, stop=2000, num=10)]\n max_features = ['auto', 'sqrt']\n max_depth = [int(x) for x in np.linspace(10, 110, num=11)]\n max_depth.append(None)\n min_samples_split = [2, 5, 10]\n min_samples_leaf = [1, 2, 4]\n bootstrap = [True, False]\n random_grid = {'n_estimators': n_estimators,\n 'max_features': max_features,\n 'max_depth': max_depth,\n 'min_samples_split': min_samples_split,\n 'min_samples_leaf': min_samples_leaf,\n 'bootstrap': bootstrap}\n\n # Get data\n num_features = X.shape[1]\n model_names = [sntypes_map[model] for model in models]\n X, y, models, remove_models = remove_redundant_classes(X, y, models, remove_models)\n\n # Get best features\n n = 50\n num_features, feature_names, X = get_n_best_features(n, X, y, classifier, feature_names, num_features, fig_dir, name, models, model_names)\n\n # Randomised Search\n clf_random = RandomizedSearchCV(estimator=classifier, param_distributions=random_grid, n_iter=7, cv=3, verbose=2,\n random_state=42, n_jobs=2)\n clf_random.fit(X, y)\n print(clf_random.best_params_)\n\n def evaluate(model, test_features, test_labels):\n predictions = model.predict(test_features)\n errors = abs(predictions - test_labels)\n mape = 100 * np.mean(errors / test_labels)\n accuracy = 100 - mape\n print('Model Performance')\n print('Average Error: {:0.4f} degrees.'.format(np.mean(errors)))\n print('Accuracy = {:0.2f}%.'.format(accuracy))\n\n return accuracy\n\n best_random = clf_random.best_estimator_\n # random_accuracy = evaluate(best_random, test_features, test_labels)",
"def _optimise(self):\n pass",
"def time_SVM_tuned(X, y, sample_size):\n Xs, _, ys, _ = train_test_split(X,y, train_size=sample_size)\n clf = svm.SVC(kernel = 'rbf', gamma = 1e-5)\n start_fit = time.time()\n clf.fit(Xs,ys)\n end_fit = time.time()\n \n start_predict = time.time()\n clf.predict(Xs)\n end_predict = time.time()\n \n timings = {\"fit\": end_fit - start_fit,\n \"predict\": end_predict - start_predict}\n \n return timings",
"def __init__(self, train_x, train_y, test_x, test_y, Tunning_Cs=[0.001, 0.01, 0.1, 1, 10]): \n self.Cs = Tunning_Cs\n self.train_x = train_x\n self.train_y = train_y\n self.test_x = test_x \n self.test_y = test_y\n self.model = svm.SVR(kernel='rbf', gamma='auto')",
"def tune_parameters(self, model, param_set, train, predictor_var, target_var):\n \n grid_search = GridSearchCV(estimator = model, param_grid = param_set,n_jobs=-1, cv=5)\n grid_search.fit(train[predictor_var],train[target_var])\n \n print(grid_search.best_params_, grid_search.best_score_)\n \n return grid_search.best_params_",
"def add_objective(self): \n \n if \"CSS\" in self.algorithm:\n \n if self.num_hidden == 0:\n \n data_term = self.compute_energy(self.x, self.batch_size)\n \n else:\n \n data_term = self.compute_free_energy(self.x)\n \n normalizer_term = self.add_css_approximation(data_term)\n \n if \"CD\" in self.algorithm and self.num_hidden ==0:\n \n data_term = self.compute_energy(self.x, self.batch_size)\n \n normalizer_term = self.compute_energy(self.x_gibbs, \n self.batch_size)\n \n normalizer_term = -T.mean(normalizer_term)\n \n if \"CD\" in self.algorithm and self.num_hidden > 0:\n \n data_term = self.compute_free_energy(self.x)\n \n normalizer_term = self.compute_free_energy(self.rbm_cd_samples)\n \n normalizer_term = -T.mean(normalizer_term)\n \n # cost is negative log likelihood \n self.cost = T.mean(data_term) + normalizer_term",
"def main(argv):\n\n # Parse arguments and store in model_dict\n model_dict = svm_model_dict_create()\n DR = model_dict['dim_red']\n rev_flag = model_dict['rev']\n strat_flag = 1\n\n # Load dataset and create data_dict to store metadata\n print('Loading data...')\n dataset = model_dict['dataset']\n if (dataset == 'MNIST') or (dataset == 'GTSRB'):\n X_train, y_train, X_val, y_val, X_test, y_test = load_dataset(\n model_dict)\n img_flag = None\n elif dataset == 'HAR':\n X_train, y_train, X_test, y_test = load_dataset(model_dict)\n img_flag = None\n # TODO: 2 classes case\n # if model_dict['classes'] == 2:\n # X_train = X_train\n\n data_dict = get_data_shape(X_train, X_test)\n n_features = data_dict['no_of_features']\n\n # Reshape dataset to have dimensions suitable for SVM\n X_train_flat = X_train.reshape(-1, n_features)\n X_test_flat = X_test.reshape(-1, n_features)\n # Center dataset with mean of training set\n mean = np.mean(X_train_flat, axis=0)\n X_train_flat -= mean\n X_test_flat -= mean\n\n # Create a new model or load an existing one\n clf = model_creator(model_dict, X_train_flat, y_train)\n model_tester(model_dict, clf, X_test_flat, y_test)\n\n # Assign parameters\n n_mag = 25 # No. of deviations to consider\n dev_list = np.linspace(0.1, 2.5, n_mag) # A list of deviations mag.\n if dataset == 'MNIST':\n rd_list = [784, 331, 200, 100, 90, 80, 70, 60, 50, 40, 30, 20, 10] # Reduced dimensions to use\n # rd_list = [784]\n elif dataset == 'HAR':\n rd_list = [561, 200, 100, 90, 80, 70, 60, 50, 40, 30, 20, 10]\n # rd_list = [561]\n n_rd = len(rd_list)\n output_list = []\n clear_flag = None\n # Clear old output files\n if clear_flag ==1:\n abs_path_o = resolve_path_o(model_dict)\n _, fname = file_create(model_dict)\n os.remove(abs_path_o + fname + '.txt')\n _, fname = file_create(model_dict, rd=1, strat=strat_flag, rev=rev_flag)\n os.remove(abs_path_o + fname + '.txt')\n\n # Test clf against adv. samples\n print('Performing attack...')\n if model_dict['classes'] != 2:\n for i in range(n_mag):\n X_adv, y_ini = mult_cls_atk(clf, X_test_flat, mean, dev_list[i])\n output_list.append(acc_calc_all(clf, X_adv, y_test, y_ini))\n if img_flag != None:\n save_svm_images(model_dict, data_dict, X_test, X_adv,\n dev_list[i])\n fname = print_svm_output(model_dict, output_list, dev_list)\n # subprocess.call([\"gnuplot -e \\\"filename='{}.png'; in_name='{}.txt'\\\" gnu_in_loop.plg\".format(fname,fname)], shell=True)\n # else:\n # # TODO: 2 classes\n # print('TODO')\n\n # Retrain defense and strategic attack\n print('--------------Retrain Defense & Strategic Attack--------------')\n for rd in rd_list:\n output_list = []\n print('Reduced dimensions: {}'.format(rd))\n\n # Dimension reduce dataset and reshape\n X_train_dr, _, dr_alg = dr_wrapper(\n X_train_flat, X_test_flat, DR, rd, y_train, rev=rev_flag)\n\n # With dimension reduced dataset, create new model or load existing one\n clf = model_creator(model_dict, X_train_dr, y_train, rd, rev_flag)\n # Modify classifier to include transformation matrix\n clf = model_transform(model_dict, clf, dr_alg)\n\n model_tester(model_dict, clf, X_test_flat, y_test, rd, rev_flag)\n\n # rev_flag = 1\n # model_dict['rev'] = rev_flag\n # # Dimension reduce dataset and reshape\n # X_train_dr, _, dr_alg = dr_wrapper(\n # X_train_flat, X_test_flat, DR, rd, y_train, rev=rev_flag)\n #\n # # With dimension reduced dataset, create new model or load existing one\n # clf_1 = model_creator(model_dict, X_train_dr, y_train, rd, rev_flag)\n # # Modify classifier to include transformation matrix\n # clf_1 = model_transform(model_dict, clf_1, dr_alg)\n # # Test model on original data\n # model_tester(model_dict, clf_1, X_test_flat, y_test, rd, rev_flag)\n #\n # print clf_1.coef_[0]-clf.coef_[0]\n # print np.linalg.norm(clf_1.coef_[0]), np.linalg.norm(clf.coef_[0])\n # print np.dot(clf_1.coef_[0],clf.coef_[0])/(np.linalg.norm(clf_1.coef_[0])*np.linalg.norm(clf.coef_[0]))\n\n # Strategic attack: create new adv samples based on retrained clf\n print('Performing strategic attack...')\n for i in range(n_mag):\n X_adv, y_ini = mult_cls_atk(clf, X_test_flat, mean, dev_list[i])\n output_list.append(acc_calc_all(clf, X_adv, y_test, y_ini))\n if img_flag != None:\n save_svm_images(model_dict, data_dict, X_test_flat, X_adv,\n dev_list[i], rd, dr_alg, rev_flag)\n\n fname = print_svm_output(model_dict, output_list, dev_list, rd,\n strat_flag, rev_flag)\n\n # fname = dataset +'_' + fname\n subprocess.call(\n [\"gnuplot -e \\\"mname='{}'\\\" gnu_in_loop.plg\".format(fname)], shell=True)",
"def propose_optimize():\n pass",
"def find_svm_hyperparams():\n NUM_ITERS = 10\n # coefs = np.arange(-5, 5).astype(np.float)\n coefs = np.linspace(0.25, 1, 10)\n Cs = np.power(2, coefs)\n results = []\n\n for _ in range(NUM_ITERS):\n data = FaceDataset(\"embeddings/dev\", n=50)\n train_data, train_labels = data.train()\n test_data, test_labels = data.test()\n accs = []\n for c in tqdm(Cs):\n clf = svm.SVC(kernel=\"linear\", C=c)\n clf, _ = train(clf, train_data, train_labels)\n acc, _ = test(clf, test_data, test_labels)\n accs.append(acc)\n results.append(accs)\n\n results = np.mean(results, axis=0)\n s = plotly.graph_objs.Scatter(x=Cs, y=results)\n plotly.offline.plot([s], filename=\"svm_linear.html\")\n print(\"C={}\".format(Cs[np.argmax(results)]))",
"def _single_model_BayesianSearchCV(self, \n model_ID,\n model_dict, \n X_train, y_train, \n X_test, y_test,\n path_model_dir,\n refit=True,\n **kwargs):\n if self.verbose>=1:\n print('Fitting',self.cv,'folds for each of',self.max_evals,'candidates, totalling',self.cv*self.max_evals,'fits')\n \n model_dict = model_dict.copy()\n model = model_dict['model']\n type_model = str(type(model))\n model_type = str(type(model_dict['model']))\n param_grid = model_dict['param_grid'].copy()\n objective = _functools.partial(self._objective, \n model_ID = model_ID,\n model_dict = model_dict, \n X = X_train, y=y_train, \n **kwargs)\n \n space = self._build_space(param_grid)\n \n if self.verbose>=4:\n self._plot_space(space)\n \n best_params_bad_keys = _hyperopt.fmin(fn = objective, \n space = space, \n algo = _hyperopt.tpe.suggest, \n max_evals = self.max_evals, \n trials = _hyperopt.Trials(),\n verbose = self.verbose)\n # hyperopt doesn't return the best params dict with keys matching the 'space' keys.\n # This breaks handling of 'log10.' transformed parameters. Fix is implemented below\n best_params_ = {}\n for key in space.keys():\n best_params_[key] = best_params_bad_keys[key.replace('log10.','')]\n if self.verbose>=3:\n print('hyperopt_input_best_params_:',best_params_)\n \n best_score_ = self._objective(best_params_, \n model_ID,\n model_dict = model_dict, \n X = X_train, y=y_train)['loss']\n \n #transform params back to original model values\n best_params_, best_model_ = self._update_model_params(best_params_, model_ID, model, param_grid)\n \n if self.verbose>=3:\n print('model_input_best_params_:',best_params_)\n \n \n if refit:\n if 'sklearn' in type_model or 'xgboost' in type_model:\n if y_train.shape[1]==1:\n y_train = _np.array(y_train).reshape(-1,)\n best_model_.fit(X_train, y_train)\n else: #using neural net function\n import tensorflow as _tf\n \n if 'dataframe' in str(type(X_train)).lower():\n X_train = _np.array(X_train)\n X_test = _np.array(X_test)\n if 'dataframe' in str(type(y_train)).lower():\n y_train = _np.array(y_train)\n y_test = _np.array(y_test)\n \n #check for kwargs\n epochs = 100\n batch_size = 32\n callbacks = [_tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience =10)]\n for item in kwargs.items():\n if 'epochs' in item[0]: \n epochs = item[1]\n elif 'batch_size' in item[0]: \n batch_size = item[1]\n elif 'callbacks' in item[0]: \n callbacks = item[1]\n \n history = best_model_.fit(x= X_train, \n y= y_train, \n validation_data=(X_test, y_test),\n batch_size=batch_size, \n epochs = epochs, \n verbose= max(0,self.verbose-2), \n callbacks = callbacks)\n \n model_dict['best_params'] = best_params_\n model_dict['best_model'] = best_model_\n model_dict['best_cv_score'] = best_score_ \n \n if 'sklearn' in model_type or 'xgboost' in model_type:\n self.save(model_dict, 'model_dict', 'dill', path_model_dir)\n else:\n if _os.path.isdir(path_model_dir)==False:\n _os.makedirs(path_model_dir)\n best_model_.save(_os.path.join(path_model_dir, 'best_model.h5')) \n self.save(model_dict['best_params'], 'best_params', 'dill', path_model_dir)\n \n return model_dict",
"def explain(self):\n # build the 2 versions of the model\n model = self.build_model()\n last_conv_model = self.build_cut_model()\n\n for i, label_name in enumerate(self.label_names):\n # This is the algorithm for the last convolution layer's tensor image\n # Get the index of the image that was classified correctly with the most confidence for the class\n predicted_col_proba = np.array(self.predicted_labels)[0][:, i]\n predicted_col_argsort = predicted_col_proba.argsort()[::-1]\n predicted_col = (predicted_col_proba > 0.2).astype(int)\n true_col = self.true_labels[:, 0]\n\n representative_image_index = None\n for most_probable_arg_index in predicted_col_argsort:\n if predicted_col[most_probable_arg_index] == true_col[most_probable_arg_index]:\n representative_image_index = most_probable_arg_index\n break\n\n # Resize the image to fit the neural network and keep the original resized image\n original_img = io.imread('{}/{}/{}'.format(path_to_img_directory, self.ex_format, np.array(self.image_names)[representative_image_index]))\n original_img = cv2.normalize(original_img, None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)\n original_img = cv2.resize(original_img, dsize=(self.ex_input_size, self.ex_input_size), interpolation=cv2.INTER_CUBIC)\n img = np.expand_dims(original_img, axis=0)\n original_img = original_img[:, :, :3]\n\n # Get the output of the neural network for this image as a tensor\n model.predict(np.array(img))\n class_output = model.output[:, i]\n last_conv_layer = model.get_layer(self.ex_last_conv_layer_name1).output\n # if self.model_name == 'vit':\n # last_conv_layer = tf.nn.relu(tf.reshape(last_conv_layer[:, :256, :], (-1, 16, 16, 1024)))\n\n # Get the output for the cut model\n cut_img = last_conv_model.predict(np.array(img))[0]\n if self.model_name == 'vit':\n cut_img = np.reshape(cut_img[:256, :], (16, 16, 1024))\n cut_img = np.mean(cut_img, axis=-1)\n cut_img = cv2.normalize(cut_img, None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)\n if self.model_name == 'vit':\n cut_img[0, 0] = np.mean(cut_img)\n cut_img = cv2.normalize(cut_img, None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)\n cut_img = cv2.resize(cut_img, (self.ex_input_size, self.ex_input_size))\n\n # This is the algorithm of the Grad-CAM model\n # Refine the output of the last convolutional layer according to the class output\n grads = K.gradients(class_output, last_conv_layer)[0]\n if self.model_name == 'vit':\n last_conv_layer = tf.reshape(last_conv_layer[:, :256, :], (-1, 16, 16, 1024))\n last_conv_layer = last_conv_layer / tf.norm(last_conv_layer)\n\n grads = tf.reshape(grads[:, :256, :], (-1, 16, 16, 1024))\n grads = grads / tf.norm(grads)\n\n pooled_grads = K.mean(grads, axis=(0, 1, 2))\n iterate = K.function([model.input], [pooled_grads, last_conv_layer[0]])\n pooled_grads_value, conv_layer_output_value = iterate([img])\n for j in range(self.ex_last_conv_layer_filter_number):\n conv_layer_output_value[:, :, j] *= pooled_grads_value[j]\n\n # Create a 16x16 heatmap and scale it to the same size as the original image\n heatmap = np.mean(conv_layer_output_value, axis=-1)\n heatmap = np.maximum(heatmap, 0)\n heatmap /= np.max(heatmap)\n heatmap = cv2.resize(heatmap, (self.ex_input_size, self.ex_input_size))\n heatmap = np.uint8(255 * heatmap)\n heatmap = cv2.applyColorMap(heatmap, cv2.COLORMAP_JET)\n heatmap = cv2.normalize(heatmap, None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)\n superimposed_img = cv2.addWeighted(original_img, 0.7, heatmap, 0.4, 0)\n\n # save the original image\n plt.matshow(original_img)\n plt.axis('off')\n plt.title(label_name, fontdict={'fontsize': 18})\n plt.savefig('{}/{}/{}_{}.png'.format(path_to_explainable, 'original', self.model_name, label_name), bbox_inches='tight', pad_inches=0.1)\n\n # save the cut image\n plt.matshow(cut_img, cmap=plt.get_cmap('Spectral'))\n plt.colorbar(shrink=0.75, ticks=np.linspace(0, 1, 11).tolist())\n plt.axis('off')\n plt.title(label_name, fontdict={'fontsize': 18})\n plt.savefig('{}/{}/{}_{}.png'.format(path_to_explainable, 'cut', self.model_name, label_name), bbox_inches='tight', pad_inches=0.1)\n\n # save the superimposed gradcam image\n plt.matshow(superimposed_img, cmap=plt.get_cmap('Spectral'))\n plt.colorbar(shrink=0.75, ticks=np.linspace(0, 1, 11).tolist())\n plt.axis('off')\n plt.title(label_name, fontdict={'fontsize': 18})\n plt.savefig('{}/{}/{}_{}.png'.format(path_to_explainable, 'gradcam', self.model_name, label_name), bbox_inches='tight', pad_inches=0.1)",
"def optimize(self, model):\n model.optimize_params(\n max_iters=self.max_iters, max_beta_iters=self.max_beta_iters,\n max_U_iters=self.max_U_iters, rel_tol=self.rel_tol,\n optimize_beta=self.optimize_beta, optimize_U=self.optimize_U,\n compute_D=self.compute_D\n )\n return model",
"def solve_SVM_dual_SMO(x_train, y_train, x_test, C=1):\n n, d = x_train.shape[0], x_train.shape[1]\n alpha = np.zeros((n))\n count = 0\n while True:\n count += 1\n alpha_prev = np.copy(alpha)\n for j in range(0, n):\n # Getting random int i!=j\n i = j\n cnt=0\n while i == j and cnt<1000:\n i = rnd.randint(0,n-1)\n cnt=cnt+1\n x_i, x_j, y_i, y_j = x_train[i,:], x_train[j,:], y_train[i], y_train[j]\n k_ij = (np.dot(x_i, x_i.T)) + (np.dot(x_j, x_j.T) ) - (2 * np.dot(x_i, x_j.T))\n if k_ij <= 0:\n continue\n alpha_prime_j, alpha_prime_i = alpha[j], alpha[i]\n if(y_i != y_j):\n (L,H) = (max(0, alpha_prime_j - alpha_prime_i), min(C, C - alpha_prime_i + alpha_prime_j))\n else:\n (L,H) = (max(0, alpha_prime_i + alpha_prime_j - C), min(C, alpha_prime_i + alpha_prime_j))\n if(L==H):\n continue\n # Computing model parameters\n w = np.dot(x_train.T, np.multiply(alpha,y_train))\n b = np.mean(y_train - np.dot(w.T, x_train.T))\n E_i = np.sign(np.dot(w.T, x_i.T) + b).astype(int) - y_i\n E_j = np.sign(np.dot(w.T, x_j.T) + b).astype(int) - y_j\n # Setting new alpha values(Lagrange multipliers)\n alpha[j] = alpha_prime_j + float(y_j * (E_i - E_j))/k_ij\n alpha[j] = max(alpha[j], L)\n alpha[j] = min(alpha[j], H)\n alpha[i] = alpha_prime_i + y_i*y_j * (alpha_prime_j - alpha[j])\n # Checking for convergence\n diff = np.linalg.norm(alpha - alpha_prev)\n if diff < 0.000000001:\n break\n # Computing weights and bias\n b = np.mean(y_train-np.dot(w.T,x_train.T))\n w = np.dot(x_train.T, np.multiply(alpha,y_train))\n y_pred_test = (np.sign(np.dot(w.T, x_test.T) + b).astype(int))\n return (y_pred_test,alpha)",
"def optimize_parameters(self):\r\n # forward\r\n self.forward() # compute fake image/video and reconstruction image/video\r\n\r\n # D_A\r\n self.set_requires_grad([self.D_V], True)\r\n self.set_requires_grad([self.G_t, self.G_u, self.Att, self.classifier], False)\r\n self.optimizer_D.zero_grad() # set D_V's gradients to zero\r\n self.backward_D_V() # calculate graidents for D_V\r\n self.optimizer_D.step() # update D_A's weights\r\n\r\n # G_A and G_B\r\n self.set_requires_grad([self.D_V], False) # Ds require no gradients when optimizing Gs\r\n self.set_requires_grad([self.G_t, self.G_u, self.Att, self.classifier], True)\r\n self.optimizer_G.zero_grad() # set G_t,G_u,Att,classifier's gradients to zero\r\n self.backward_G() # calculate gradients for G_A and G_B\r\n self.optimizer_G.step() # update G_A and G_B's weights\r",
"def optimize(self, X, y):\n print(\"Performing TPOT genetic optimization.\")\n self.model.fit(X, y)\n self.optimized = True",
"def esvm(encs_test, encs_train, C=1000):\n\n\n # set up labels\n # TODO\n encs_test=np.array(encs_test).squeeze(1)\n encs_train=np.array(encs_train).squeeze(1)\n\n label=np.ones((encs_train.shape[0]+1,1))\n label[-1,0]=-1\n label *=-1\n label=label.ravel()\n print(label.shape)\n print(label)\n def loop(i):\n # compute SVM \n # and make feature transformation\n # TODO\n svc = LinearSVC(C=C,class_weight='balanced')\n test_added=encs_test[i].reshape(1,-1)\n encs_train_new=np.concatenate((encs_train,test_added),axis=0)\n svc.fit(encs_train_new, label)\n\n return svc.coef_\n\n # let's do that in parallel: \n # if that doesn't work for you, just exchange 'parmap' with 'map'\n # Even better: use DASK arrays instead, then everything should be\n # parallelized\n new_encs = list(map( loop, tqdm(range(len(encs_test)))))\n new_encs = np.concatenate(new_encs, axis=0)\n # return new encodings\n\n return new_encs",
"def hog_extra_and_svm_class(proposal, clf, resize = (64, 64)):\n img = cv2.cvtColor(proposal, cv2.COLOR_BGR2GRAY)\n img = cv2.resize(img, resize)\n bins = 9\n cell_size = (8, 8)\n cpb = (2, 2)\n norm = \"L2\"\n features = ft.hog(img, orientations=bins, pixels_per_cell=cell_size, \n cells_per_block=cpb, block_norm=norm, transform_sqrt=True)\n print \"feature = \", features.shape\n features = np.reshape(features, (1,-1))\n cls_prop = clf.predict_proba(features)\n print(\"type = \", cls_prop)\n print \"cls prop = \", cls_prop\n return cls_prop",
"def min_detection_strategy(self, init_model_infos):\n model_folder = 'min_detection_model_v2'\n result = self.detection(init_model_infos)\n # here add some methods to make select more 'clever'\n rank_hard_images = sorted(result.items(), key=lambda item:item[1], reverse=True)\n total_amount = 30\n trained_images = []\n # Select most hard images (30 as a step)\n # Start training with select images\n while total_amount < 150:\n al_model = TrainingProcess()\n al_model_data = []\n \"\"\"\n # CEAL to get better result pick 15 most hard and 15 most easy\n for item in rank_hard_images[:20]:\n al_model_data.append(item[0])\n trained_images.append(item[0])\n for item in rank_hard_images[-10:]:\n al_model_data.append(item[0])\n trained_images.append(item[0])\n print('select images are:', al_model_data)\n \"\"\"\n # To keep the distribution same, take the package that have the most hard images for training\n package_distrib = [0] * 11\n for item in rank_hard_images[:30]:\n package_distrib[(int(item[0].split('.')[0]) -1) // 30] += 1\n package_id = package_distrib.index(max(package_distrib))\n image_to_package_dir = os.path.join(DATA_DIR, \"package%s\" % package_id)\n al_model_data = os.listdir(image_to_package_dir)\n print('select package are:', package_id)\n print('select images are:', al_model_data)\n total_amount += 30\n if total_amount == 60:\n last_model_info = init_model_infos\n else:\n last_model_info = al_model_info\n last_model_path = os.path.join(last_model_info[0], last_model_info[1] + '.h5')\n last_model_weights = os.path.join(MODEL_DIR, last_model_path)\n al_model_info = [model_folder, '%s_images_model' % total_amount]\n al_model.train_model(al_model_data, al_model_info, self.dataset_val, cur_model_path=last_model_weights)\n al_model.mAP_of_model(al_model_info, self.dataset_val)\n result = self.detection(al_model_info, trained_images)\n rank_hard_images = sorted(result.items(), key=lambda item:item[1], reverse=True)\n del al_model\n print(\"Ending selection\")",
"def optimize(self, best_func):\n nb_clf = Pipeline(steps=[('vect', TfidfVectorizer()), ('clf', best_func)])\n parameters = {\n 'vect__stop_words': [None, 'english'],\n }\n gs_clf = GridSearchCV(nb_clf, parameters, scoring='accuracy')\n gs_clf = gs_clf.fit(self.train.text, self.train.gender)\n print(\"Best parameters: \" + str(gs_clf.best_params_))\n print('Best score: ' + str(gs_clf.best_score_))\n print('=' * 80)\n return gs_clf.best_params_",
"def tuned_for_ec():\n # TODO(theosanderson): update these to true SOTA values\n hparams = contrib_training.HParams()\n hparams.add_hparam('gradient_clipping_decay', 0.9999)\n hparams.add_hparam('batch_style', 'bucket')\n hparams.add_hparam('batch_size', 34)\n hparams.add_hparam('dilation_rate', 5)\n hparams.add_hparam('filters', 411)\n hparams.add_hparam('first_dilated_layer', 1) # This is 0-indexed\n hparams.add_hparam('kernel_size', 7)\n hparams.add_hparam('num_layers', 5)\n hparams.add_hparam('pooling', 'mean')\n hparams.add_hparam('resnet_bottleneck_factor', 0.88152)\n hparams.add_hparam('lr_decay_rate', 0.9977)\n hparams.add_hparam('learning_rate', 0.00028748)\n hparams.add_hparam('decision_threshold', 0.3746)\n hparams.add_hparam('denominator_power', 0.88)\n\n hparams.add_hparam('train_steps', 650000)\n return hparams",
"def optimization_parameters():\n param_distributions = {\n \"n_estimators\": list(range(50, 300, 50)),\n \"max_features\": [\"auto\", \"log2\"],\n \"max_depth\": list(range(1, 21, 2)),\n \"min_samples_leaf\": list(range(4, 22, 2)),\n \"min_samples_split\": list(range(5, 30, 5)),\n \"criterion\": [\"gini\", \"entropy\"],\n }\n param_grid = {\n \"n_estimators\": list(range(50, 300, 50)),\n \"max_depth\": list(range(1, 21, 2)),\n \"min_samples_leaf\": list(range(4, 22, 2)),\n \"min_samples_split\": list(range(5, 30, 5)),\n \"criterion\": [\"gini\", \"entropy\"],\n }\n\n rfc = RandomForestClassifier()\n\n # 5 * 10 * 9 * 5 * 2 = 4500 iterations\n # will take a lot of time\n model = GridSearchCV(\n estimator=rfc,\n param_grid=param_grid,\n scoring=\"accuracy\",\n verbose=10,\n n_jobs=1,\n cv=5,\n )\n # initiates Randomized Search \n model = RandomizedSearchCV(\n estimator=rfc,\n param_distributions=param_distributions,\n n_iter=20,\n scoring='accuracy',\n verbose=10,\n n_jobs=1,\n cv=5,\n )\n \n # fit and predict the model\n model.fit(x_train, y_train)\n pred = model.predict(x_test)\n \n # define evaluation metric as accuracy score\n acc = accuracy_score(y_test, pred) * 100\n print(f\"RandomForestClassifier with GridSearchCV: {acc:0.2f}%\")\n print(\"Best parameters set:\")\n\n # extract best parameters \n best_parameters = model.best_estimator_.get_params()\n for param_name in sorted(param_grid.keys()):\n print(f\"\\t{param_name}: {best_parameters[param_name]}\")",
"def tune_params(self, X_train, Y_train):\n return self.model # No hyper-parameter tuning",
"def estimate_params(self, thresh=1e-5, max_iter=15):\n em = EM(self.obs, self.theta, thresh=thresh, max_iter=max_iter)\n self.estimate = em.estimate_params()\n self.likelihood = em.lhood\n self.initial_likelihood = em.calculate_likelihood(theta=self.theta)",
"def tune(runner, kernel_options, device_options, tuning_options):\n\n #Bayesian Optimization strategy seems to need some hyper parameter tuning to\n #become better than random sampling for auto-tuning GPU kernels.\n\n #alpha, normalize_y, and n_restarts_optimizer are options to\n #https://scikit-learn.org/stable/modules/generated/sklearn.gaussian_process.GaussianProcessRegressor.html\n #defaults used by Baysian Optimization are:\n # alpha=1e-6, #1e-3 recommended for very noisy or discrete search spaces\n # n_restarts_optimizer=5,\n # normalize_y=True,\n\n #several exploration friendly settings are: (default is acq=\"ucb\", kappa=2.576)\n # acq=\"poi\", xi=1e-1\n # acq=\"ei\", xi=1e-1\n # acq=\"ucb\", kappa=10\n\n if not bayes_opt_present:\n raise ImportError(\"Error: optional dependency Bayesian Optimization not installed\")\n\n #defaults as used by Bayesian Optimization Python package\n acq = tuning_options.strategy_options.get(\"method\", \"poi\")\n kappa = tuning_options.strategy_options.get(\"kappa\", 2.576)\n xi = tuning_options.strategy_options.get(\"xi\", 0.0)\n init_points = tuning_options.strategy_options.get(\"popsize\", 5)\n n_iter = tuning_options.strategy_options.get(\"maxiter\", 25)\n\n tuning_options[\"scaling\"] = True\n\n results = []\n\n #function to pass to the optimizer\n def func(**kwargs):\n args = [kwargs[key] for key in tuning_options.tune_params.keys()]\n return -1.0 * minimize._cost_func(args, kernel_options, tuning_options, runner, results)\n\n bounds, _, _ = minimize.get_bounds_x0_eps(tuning_options)\n pbounds = OrderedDict(zip(tuning_options.tune_params.keys(),bounds))\n\n verbose=0\n if tuning_options.verbose:\n verbose=2\n\n optimizer = BayesianOptimization(f=func, pbounds=pbounds, verbose=verbose)\n\n optimizer.maximize(init_points=init_points, n_iter=n_iter, acq=acq, kappa=kappa, xi=xi)\n\n if tuning_options.verbose:\n print(optimizer.max)\n\n return results, runner.dev.get_environment()"
] | [
"0.6111361",
"0.5912575",
"0.5642402",
"0.5623126",
"0.5623126",
"0.5623126",
"0.55785716",
"0.5568076",
"0.5529505",
"0.55088013",
"0.54948574",
"0.5481435",
"0.5458506",
"0.5439299",
"0.5435268",
"0.54325897",
"0.5422759",
"0.5416318",
"0.5403779",
"0.5397329",
"0.53708524",
"0.53639776",
"0.53638685",
"0.5360692",
"0.5326519",
"0.5313918",
"0.5292927",
"0.5285315",
"0.528365",
"0.5280364"
] | 0.6573799 | 0 |
Compile and install the fake library used for testing | def fake(ctx, clean=False):
work_dir = join(PROJ_ROOT, "func", "dynlink")
build_dir = join(PROJ_ROOT, "build", "libfake")
clean_dir(build_dir, clean)
build_cmd = [
"cmake",
"-GNinja",
"-DFAASM_BUILD_SHARED=ON",
"-DFAASM_BUILD_TYPE=wasm",
"-DCMAKE_TOOLCHAIN_FILE={}".format(CMAKE_TOOLCHAIN_FILE),
"-DCMAKE_BUILD_TYPE=Release",
"-DCMAKE_INSTALL_PREFIX={}".format(WASM_SYSROOT),
work_dir,
]
run(" ".join(build_cmd), shell=True, cwd=build_dir, check=True)
run("ninja", shell=True, cwd=build_dir, check=True)
run("ninja install", shell=True, cwd=build_dir, check=True)
# Copy shared object into place
sysroot_files = join(WASM_SYSROOT, "lib", "wasm32-wasi", "libfake*.so")
runtime_lib_dir = join(FAASM_RUNTIME_ROOT, "lib")
if not exists(runtime_lib_dir):
makedirs(runtime_lib_dir)
run(
"cp {} {}".format(sysroot_files, runtime_lib_dir),
shell=True,
check=True,
)
# Update env
shell_env = copy(environ)
shell_env.update(
{
"LD_LIBRARY_PATH": "/usr/local/lib/",
}
)
# Run codegen
shared_objs = [
join(FAASM_RUNTIME_ROOT, "lib", "libfakeLibA.so"),
join(FAASM_RUNTIME_ROOT, "lib", "libfakeLibB.so"),
]
binary = find_codegen_shared_lib()
for so in shared_objs:
print("Running codegen for {}".format(so))
run("{} {}".format(binary, so), env=shell_env, shell=True, check=True) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_project_with_dependencies(self):\n self.make_project()\n # 'test_library.zip' is not currently compiled for diorite.\n self.project.app_platforms = \"aplite,basalt,chalk\"\n self.project.save()\n tempdir = tempfile.mkdtemp()\n try:\n # Extract a premade library to a temporary directory\n ZipFile(LIBRARY_PATH).extractall(tempdir)\n lib_path = os.path.join(tempdir, 'libname')\n\n # Include the library in the code and package.json\n self.add_file(\"main.c\", DEPENDENCY_MAIN)\n self.project.set_dependencies({\n 'libname': lib_path\n })\n\n # Compile and check\n self.compile()\n self.check_compile_success(num_platforms=3)\n finally:\n shutil.rmtree(tempdir)",
"def compile_package(self):\n build_package = [\n self.mock,\n '--root=%s' % self.root,\n '--arch=%s' % self.arch,\n '--shell',\n '/build_package.sh',\n \n ]\n output, errors = self._run_command(build_package)",
"def test_install(self):\n pass",
"def run(self):\n self._make_lib_file_symbolic_links()\n self._copy_each_include_files_to_include_dir()\n self._make_dep_lib_file_sym_links_and_copy_include_files()\n self.setup_py.add_patchs_to_build_without_pkg_config(\n self.rpm.lib_dir, self.rpm.include_dir\n )\n self.setup_py.apply_and_save()\n self._build_and_install()",
"def test_add(tmpdir):\n chdir(tmpdir)\n subprocess.run(['leanpkg', 'init', 'project'])\n fix_leanpkg_bug()\n subprocess.run(['leanproject', 'add-mathlib'])\n assert (tmpdir/'_target'/'deps'/'mathlib'/'src'/'algebra'/'free.olean').exists()",
"def main():\n\n util.protontricks('mf_install')",
"def setup_lib(CLIB):\n # {{ SETUP_LIB }}",
"def test_1_make(self):\n #We can compile all these modules together into a single shared library.\n writer = self.writers.values()[0]\n self.code = writer.make(remake=True, dependencies=self.dependencies)\n self.assertEqual(self.code, 0)",
"def install():\n build()\n sh(\"%s setup.py develop\" % PYTHON)",
"def test_native_SDK3_project(self):\n self.make_project()\n self.add_file(\"main.c\", SIMPLE_MAIN)\n self.compile()\n self.check_compile_success()\n\n # self.check_package_manifest(manifest, package_options={'dependencies': deps})",
"def test_project_with_interdependencies(self):\n self.make_project()\n # Build the package\n package = Project.objects.create(name='test', sdk_version='3', project_type='package', app_short_name='libname', owner_id=self.user_id)\n self.add_file(\"whatever.c\", LIBRARY_C, project=package)\n self.add_file(\"whatever.h\", LIBRARY_H, project=package)\n package_build_result = BuildResult.objects.create(project=package)\n run_compile(package_build_result.id)\n # Set up the project which depends on the package\n self.project.project_dependencies.add(package)\n self.add_file(\"main.c\", DEPENDENCY_MAIN)\n self.compile()\n self.check_compile_success()",
"def setUp(self):\n self.framework = FrameworkFactory.get_framework()\n self.framework.start()\n self.ipopo = install_ipopo(self.framework)\n\n # Install the test bundle\n self.module = install_bundle(self.framework)",
"def local(ctx):\n _do_codegen_user(\"demo\")\n _do_codegen_user(\"errors\")\n _do_codegen_user(\"mpi\")\n _do_codegen_user(\"omp\")\n _do_codegen_user(\"python\")\n\n # Do codegen for libfake\n for so in LIB_FAKE_FILES:\n _do_codegen_file(so)\n\n # Run the WAMR codegen required by the tests\n codegen(ctx, \"demo\", \"echo\", wamr=True)\n codegen(ctx, \"demo\", \"chain\", wamr=True)\n\n # Run the SGX codegen required by the tests\n codegen(ctx, \"demo\", \"hello\", wamr=True, sgx=True)\n codegen(ctx, \"demo\", \"chain_named_a\", wamr=True, sgx=True)\n codegen(ctx, \"demo\", \"chain_named_b\", wamr=True, sgx=True)\n codegen(ctx, \"demo\", \"chain_named_c\", wamr=True, sgx=True)",
"def test_install(ctx):\n ctx.run(\"pip uninstall {PROJECT_NAME} --yes\".format(PROJECT_NAME=PROJECT_NAME), warn=True)\n ctx.run(\"pip install --no-cache-dir --no-index --find-links=file:./dist {PROJECT_NAME}\".format(PROJECT_NAME=PROJECT_NAME))\n ctx.run(\"pip uninstall {PROJECT_NAME} --yes\".format(PROJECT_NAME=PROJECT_NAME))",
"def setUpClass(cls) -> None:\n helper_funcs.setup_mock_files() # setup mock files\n cls.module, cls.ffi = helper_funcs.load(cls._filenames, cls._function_names,\n header_includes=cls.helper_include,\n compiled_file_end=\"helper_func\")",
"def link_lib_test_fun(self):\n\tdef write_test_file(task):\n\t\ttask.outputs[0].write(task.generator.code)\n\n\trpath = []\n\tif getattr(self, 'add_rpath', True):\n\t\trpath = [self.bld.path.get_bld().abspath()]\n\tbld = self.bld\n\tbld(rule=write_test_file, target='test.c', code='int lib_func(void) { return 9; }\\n')\n\tbld(rule=write_test_file, target='main.c', code='int main(void) {return !(lib_func() == 9);}\\n')\n\tbld(features='c cshlib', source='test.c', target='test')\n\tbld(features='c cprogram test_exec', source='main.c', target='app', uselib_local='test', rpath=rpath)",
"def _Install(vm):\n nthreads = vm.NumCpusForBenchmark() * 2\n vm.Install('build_tools')\n vm.RemoteCommand('git clone {0} {1}'.format(GIT_REPO, SILO_DIR))\n vm.RemoteCommand('cd {0} && git checkout {1}'.format(SILO_DIR,\n GIT_TAG))\n # This is due to a failing clone command when executing behind a proxy.\n # Replacing the protocol to https instead of git fixes the issue.\n vm.RemoteCommand('git config --global url.\"https://\".insteadOf git://')\n # Disable -Wmaybe-uninitialized errors when GCC has the option to workaround\n # a spurious error in masstree.\n cxx = '\"g++ -std=gnu++0x \\\n $(echo | gcc -Wmaybe-uninitialized -E - >/dev/null 2>&1 && \\\n echo -Wno-error=maybe-uninitialized)\"'\n vm.RemoteCommand(\n 'cd {0} && CXX={2} MODE=perf DEBUG=0 CHECK_INVARIANTS=0 make -j{1} dbtest'\n .format(SILO_DIR, nthreads, cxx))",
"def test_manual_install_1(monkeypatch):\n\n monkeypatch.setattr(platform, 'system', lambda: 'Linux')\n monkeypatch.setattr(platform, 'machine', lambda: 'x86_64')\n monkeypatch.setattr(tempfile, 'mkdtemp', lambda: '/tmp/tempdir')\n monkeypatch.setattr(shutil, 'rmtree', lambda path: True)\n monkeypatch.setattr(shutil, 'copyfileobj', lambda src, dest: True)\n monkeypatch.setattr(os, 'listdir', lambda path: [\n 'terraform-provider-terraform_v0.11.2_x4', 'pkg1', 'pkg2'])\n monkeypatch.setattr(os, 'chmod', lambda path, permissions: True)\n\n def mp_zip_file(dest, mode):\n class MockedZipFile:\n def extractall(self, dest):\n return True\n\n def close(self):\n return True\n\n return MockedZipFile()\n\n def mp_url_open(url):\n class MockedUrlOpen:\n def __enter__(self):\n return 'content'\n\n def __exit__(self, type, value, traceback):\n pass\n\n return MockedUrlOpen()\n\n def mp_open(file, mode):\n class MockedOpen:\n def __enter__(self):\n return 'content'\n\n def __exit__(self, type, value, traceback):\n pass\n\n return MockedOpen()\n\n monkeypatch.setattr(urllib.request, 'urlopen', mp_url_open)\n monkeypatch.setattr(builtins, 'open', mp_open)\n\n monkeypatch.setattr(zipfile, 'ZipFile', mp_zip_file)\n\n manual_install(['[email protected]', 'pkg2'], '/tmp/stone-burner_plugins')",
"def set_up(dev=False):\n _install_dependencies()",
"def setUp(self):\n # After stage1:\n # TODO: use this form after implementing a fixer to consolidate\n # __future__ imports into a single line:\n # self.headers1 = \"\"\"\n # from __future__ import absolute_import, division, print_function\n # \"\"\"\n self.headers1 = reformat_code(\"\"\"\n from __future__ import absolute_import\n from __future__ import division\n from __future__ import print_function\n \"\"\")\n\n # After stage2 --all-imports:\n # TODO: use this form after implementing a fixer to consolidate\n # __future__ imports into a single line:\n # self.headers2 = \"\"\"\n # from __future__ import (absolute_import, division,\n # print_function, unicode_literals)\n # from future import standard_library\n # from future.builtins import *\n # \"\"\"\n self.headers2 = reformat_code(\"\"\"\n from __future__ import absolute_import\n from __future__ import division\n from __future__ import print_function\n from __future__ import unicode_literals\n from future import standard_library\n standard_library.install_aliases()\n from builtins import *\n \"\"\")\n self.interpreters = [sys.executable]\n self.tempdir = tempfile.mkdtemp() + os.path.sep\n pypath = os.getenv('PYTHONPATH')\n if pypath:\n self.env = {'PYTHONPATH': os.getcwd() + os.pathsep + pypath}\n else:\n self.env = {'PYTHONPATH': os.getcwd()}",
"def setUp(self):\n self.setUpPyfakefs()",
"def _install():\n download_file='http://www.ipol.im/pub/art/2015/136/inpaint_8.tgz'\n tools.download_and_extract(download_file) \n this_file_path=os.path.dirname(__file__)\n subprocess.call(' mkdir build; cd build; cmake ..; make', shell=True,cwd=exec_folder)",
"def test_universal64_executable(self):\n self.build(debug_info=\"dsym\")\n self.do_test()",
"def test_universal64_executable(self):\n self.build(debug_info=\"dsym\")\n self.do_test()",
"def setUp(self):\n trytond.tests.test_tryton.install_module('nereid_webshop')",
"def install_test_deps():\n workon = '.'\n if VENVWRAPPER:\n workon=os.getenv(\"WORKON_HOME\")\n cmd = '{workon}/{env}/bin/pip install nose-cov webtest mock'.format(\n envs=ENVS, env=VENV, workon=workon)\n print(cmd)\n subprocess.call(cmd.split())",
"def test_arm_c_lib(self):\n mock_target = mock.MagicMock()\n mock_target.core = \"Cortex-M4\"\n mock_target.supported_c_libs = {\"arm\": [\"small\"]}\n mock_target.c_lib = \"sMALL\"\n del mock_target.default_lib\n mock_target.default_toolchain = \"ARM\"\n mock_target.supported_toolchains = [\"ARM\", \"uARM\", \"ARMC5\", \"ARMC6\"]\n arm_std_obj = ARM_STD(mock_target)\n arm_micro_obj = ARM_MICRO(mock_target)\n\n mock_target.default_toolchain = \"ARMC6\"\n arm_c6_obj = ARMC6(mock_target)\n\n self.assertIn(\"-D__MICROLIB\", arm_std_obj.flags[\"common\"])\n self.assertIn(\"-D__MICROLIB\", arm_micro_obj.flags[\"common\"])\n self.assertIn(\"-D__MICROLIB\", arm_c6_obj.flags[\"common\"])\n\n self.assertIn(\"--library_type=microlib\", arm_std_obj.flags[\"ld\"])\n self.assertIn(\"--library_type=microlib\", arm_micro_obj.flags[\"ld\"])\n self.assertIn(\"--library_type=microlib\", arm_c6_obj.flags[\"ld\"]) \n self.assertIn(\"--library_type=microlib\", arm_c6_obj.flags[\"asm\"])",
"def setUpModule():\n # Create a temporary directory where we can create a fake notify-send\n # program that is guaranteed to exist and will run successfully, but\n # without actually bothering the user with interactive notifications.\n directory = tempfile.mkdtemp(prefix='rsync-system-backup-', suffix='-fake-path')\n TEMPORARY_DIRECTORIES.append(directory)\n fake_program = os.path.join(directory, 'notify-send')\n candidates = which('true')\n os.symlink(candidates[0], fake_program)\n # Add the directory to the $PATH.\n path = get_search_path()\n path.insert(0, directory)\n os.environ['PATH'] = os.pathsep.join(path)",
"def setUp(self):\r\n coloredlogs.install(level=logging.DEBUG)\r\n # Create a temporary working directory.\r\n self.working_directory = tempfile.mkdtemp()\r\n # Create a temporary build directory.\r\n self.build_directory = os.path.join(self.working_directory, 'build')\r\n # Create a temporary virtual environment.\r\n self.virtual_environment = os.path.join(self.working_directory, 'environment')\r\n python = 'python%i.%i' % (sys.version_info[0], sys.version_info[1])\r\n assert os.system('virtualenv --python=%s %s' % (pipes.quote(python), pipes.quote(self.virtual_environment))) == 0\r\n # Make sure pip-accel uses the pip in the temporary virtual environment.\r\n os.environ['PATH'] = '%s:%s' % (os.path.join(self.virtual_environment, 'bin'), os.environ['PATH'])\r\n os.environ['VIRTUAL_ENV'] = self.virtual_environment\r\n # Make pip and pip-accel use the temporary working directory.\r\n os.environ['PIP_DOWNLOAD_CACHE'] = os.path.join(self.working_directory, 'download-cache')\r\n os.environ['PIP_ACCEL_CACHE'] = self.working_directory\r\n # Initialize the required subdirectories.\r\n self.pip_accel = __import__('pip_accel')\r\n self.pip_accel.initialize_directories()",
"def _install(self):\n\n pass"
] | [
"0.6840887",
"0.62958485",
"0.61345935",
"0.60900086",
"0.60620475",
"0.60337037",
"0.6018115",
"0.60040337",
"0.60032463",
"0.59797657",
"0.5962672",
"0.5853615",
"0.5829548",
"0.5787476",
"0.5776545",
"0.5772395",
"0.577139",
"0.5742527",
"0.5735555",
"0.5723381",
"0.5722386",
"0.57017225",
"0.5696815",
"0.5696815",
"0.56465334",
"0.5637659",
"0.56274235",
"0.56209284",
"0.56167775",
"0.5616585"
] | 0.6825305 | 1 |
Create branches in the DAG. | def create_branches(branches, pcoll, provider_options):
logger.info('Branch count: %i' % len(branches))
pcoll_tuple = ()
for branch in branches:
logger.info('Adding branch')
output = create_graph(branch, pcoll, provider_options)
pcoll_tuple = pcoll_tuple + (output,)
logger.info('Transform: MergeBranches')
output = pcoll_tuple | 'MergeBranches' >> MergeBranches()
return output | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_branch(self):\n os.chdir(str(self.repository_path))\n sh.git.checkout('master')\n sh.git.checkout('-b', self.branch)\n logger.debug('Branch {} created', self.branch)",
"def main(github_token, branch_name, repository, sha):\n create_branch(github_token, branch_name, repository, sha)\n click.echo(f\"Successfully created branch {branch_name}\")",
"def __branch_factory(self, action, task_activities):\n branches = action.findall(\"./branches_Branch\")\n for branch in branches:\n branch_type = get_branch_type(branch=branch)\n if \"probabilistic\" == branch_type:\n return self.__add_probabilistic_branch(action=action, task_activities=self.task_activities)\n elif \"type\" == branch_type:\n return self.__add_type_branch(action=action, task_activities=task_activities)\n elif \"detailed\" == branch_type:\n return self.__add_detailed_branch(action=action, task_activities=task_activities)\n elif \"simple\" == branch_type:\n return self.__add_simple_branch(action=self.action, task_activities=self.task_activities)\n else:\n raise ValueError(\"Unknown branch_type. Abort Mission.\")",
"def _assign_branches(ctx, prl):\n heads = prl.set_heads\n if not heads:\n return None\n branch_dict = ctx.branch_dict()\n LOG.debug2('allowing branch creation: %s', ctx.branch_creation)\n # Assign branches to each of the received commits for pushed branches\n assigner = Assigner(branch_dict, heads, ctx)\n assigner.assign()\n return assigner",
"def create_branch(ctx, name, sha):\n\n try:\n\n gh = ctx.obj.github\n\n log.echo('Creating branch...', break_line=False)\n branch = gh.create_branch(name=name, sha=sha)\n log.checkmark()\n log.echo('Branch {} created at {}'.format(name, sha))\n return branch\n except BaseException as _:\n log.xmark()\n raise",
"def __add_simple_branch(self, action, task_activities):\n action_activity_name = create_activity_name_from_action(action=action)\n add_activity_to_task(task_activities=task_activities, activity_name=action_activity_name, hide_activity=True)\n\n post_or_element, stop_pre_or_tag = self.__add_pre_or_and_post_or(action=action,\n action_activity_name=action_activity_name,\n task_activities=task_activities)\n\n for branch in action.findall(\"./branches_Branch\"):\n branches_steps = branch.find(\"./branchBehaviour_BranchTransition\").findall(\".//steps_Behaviour\")\n branch_start_actions, final_successor_action = self.__get_final_successor_and_start(actions=branches_steps)\n # Create ActionFactory and add action\n self.__create_actions_for_all_branches(branches_steps, task_activities)\n\n branch_conditions = branch.findall(\".//branchCondition_GuardedBranchTransition\")\n for condition in branch_conditions:\n condition_types = self.__get_condition_types(condition)\n reference_name = self.__get_reference_name(condition_types=condition_types)\n variable_usage = get_element_by_identifier(attribute=\"referenceName\", search_string=reference_name,\n element_tree=self.xml_cache.get_xml_tree(\"usagemodel\"))\n\n for branch_start_action in branch_start_actions:\n parent = variable_usage.getparent()\n bool_exp = parent.find(\".//specification_VariableCharacterisation\").get(\"specification\")\n match_object = re.findall(r'true;+\\d\\.\\d*|false;+\\d\\.\\d*', bool_exp)\n # Get branch probability for post element\n branch_probability = \"0\"\n # First start action has false probability\n if \"NOT\" in condition_types:\n for matching_object in match_object:\n if \"false\" in matching_object:\n branch_probability = matching_object.split(\";\")[1]\n else:\n for matching_object in match_object:\n if \"true\" in matching_object:\n branch_probability = matching_object.split(\";\")[1]\n\n post_predecessor_activity_name = create_activity_name_from_action(action=branch_start_action)\n post_predecessor_activity = SubElement(post_or_element, 'activity')\n post_predecessor_activity.set(\"name\", post_predecessor_activity_name)\n post_predecessor_activity.set(\"prob\", branch_probability)\n\n self.__add_stop_action_precedences(final_successor_action, stop_pre_or_tag)",
"def create_builds(self):\n branches = self.search([('use_in_ci', '=', True)])\n branches.create_build()\n return True",
"def test_sql_branch_operator_postgres(self):\n branch_op = BranchSQLOperator(\n task_id=\"make_choice\",\n conn_id=\"postgres_default\",\n sql=\"SELECT 1\",\n follow_task_ids_if_true=\"branch_1\",\n follow_task_ids_if_false=\"branch_2\",\n dag=self.dag,\n )\n branch_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)",
"def _make_release_branch(self):\n user = getpass.getuser()\n if not user == self._user:\n raise Error('the command should only be run as user %s' % self._user)\n branch = self._branch\n # get the latest master updates\n subprocess.check_call('git remote update', shell=True)\n subprocess.check_call('git checkout master', shell=True)\n # does a git pull and updates the submodules\n GitUtil.update_submodules()\n # get the latest commit before the release is cut\n self._latest_commit = GitUtil.get_latest_commit()\n print 'Making release branch %s' % branch\n # create the new release branch\n GitUtil.create_branch(branch)\n print TermColor.ColorStr('Created remote branch %s' % branch, 'GREEN')",
"def __create_actions_for_all_branches(self, branches_steps, task_activities):\n for branch_action in branches_steps:\n action_factory = branch_factory.ActionFactory(xml_cache=self.xml_cache,\n mapping_cache=self.mapping_cache,\n input_data=self.input_data,\n action=branch_action,\n latency=self.latency,\n processor=self.processor\n ).create_action_factory(task_activities=task_activities)\n action_factory.add_action()",
"def test_heads_create_new_branch_at_another_branch(repository: Repository) -> None:\n main = repository.head\n branch1 = repository.heads.create(\"branch1\")\n\n repository.checkout(branch1)\n repository.commit()\n\n repository.checkout(main)\n branch2 = repository.heads.create(\"branch2\", branch1.commit)\n\n assert branch1.commit == branch2.commit",
"def create_branch_from_issue(jira_url, jira_username, jira_api_key, project_key, source_branch_name, issue_key):\n click.echo('Branch \"{}\" was created'.format(\n create_branch_func(\n source_branch_name, get_branch_name(jira_url, jira_username, jira_api_key, issue_key, project_key)\n )\n ))",
"def test_heads_create_new_branch_name(repository: Repository) -> None:\n branch = repository.heads.create(\"branch\", repository.head.commit)\n assert \"branch\" == branch.name",
"def test_heads_create_new_branch_commit(repository: Repository) -> None:\n branch = repository.heads.create(\"branch\", repository.head.commit)\n assert repository.head.commit == branch.commit",
"def branch(self, name, ref=\"HEAD\"):\n self._git.create_head(name, ref)\n self.checkout(name)",
"def create_topic_branch(self, topic_branch_name):\n print(\"Creating topic branch locally...\")\n self.git.checkout(self.base_branch)\n self.git.checkout('-b', topic_branch_name)\n print(\"Pushing topic branch to base branch's remote...\")\n self.git.push('-u', self.base_branch_remote(), topic_branch_name)",
"def create_branch(branch, orphaned=False, changeto=False, directory=None):\n current_branch = get_current_branch(directory)\n try:\n if orphaned:\n execute_command('git symbolic-ref HEAD refs/heads/' + branch,\n cwd=directory)\n execute_command('rm -f .git/index', cwd=directory)\n execute_command('git clean -fdx', cwd=directory)\n cmd = 'git commit --allow-empty -m \"Created orphaned branch '\\\n '{0}\"'.format(branch)\n execute_command(cmd, cwd=directory)\n if changeto:\n current_branch = None\n else:\n execute_command('git branch {0}'.format(branch), cwd=directory)\n if changeto:\n checkout(branch, directory=directory)\n current_branch = None\n finally:\n if current_branch is not None:\n checkout(current_branch, directory=directory)",
"def __add_detailed_branch(self, action, task_activities):\n add_activity_to_task(task_activities=task_activities,\n activity_name=self.activity_name,\n hide_activity=True)\n post_or_element, stop_pre_or_tag = self.__add_pre_or_and_post_or(action=self.action,\n action_activity_name=self.activity_name,\n task_activities=task_activities)\n branch_counter = 0\n branches = action.findall(\"./branches_Branch\")\n for branch in branches:\n branches_steps = branch.find(\"./branchBehaviour_BranchTransition\").findall(\"./steps_Behaviour\")\n branch_start_actions, final_successor_action = self.__get_final_successor_and_start(actions=branches_steps)\n self.__create_actions_for_all_branches(branches_steps, task_activities)\n # if branch is of type \"ProbabilisticBranchTransition\" it has entity \"branchProbability\":\n # use this to create or post tag\n branch_conditions = branch.findall(\".//branchCondition_GuardedBranchTransition\")\n for condition in branch_conditions:\n condition_types = self.__get_condition_types(condition)\n reference_name = self.__get_reference_name(condition_types=condition_types)\n variable_usage = get_element_by_identifier(attribute=\"referenceName\", search_string=reference_name,\n element_tree=self.xml_cache.get_xml_tree(\"usagemodel\"))\n for branch_start_action in branch_start_actions:\n parent = variable_usage.getparent()\n bool_exp = parent.find(\".//specification_VariableCharacterisation\").get(\"specification\")\n match_object = re.findall(r'\"t\";+\\d\\.\\d*|\"f\";+\\d\\.\\d*', bool_exp)\n # Get branch probability for post element\n branch_uuid, branch_probability = match_object[branch_counter].replace('\\\"', \"#\").split(\";\")\n post_predecessor_activity_name = create_activity_name_from_action(action=branch_start_action,\n uid_string=self.uid_string)\n post_predecessor_activity = SubElement(post_or_element, 'activity')\n post_predecessor_activity.set(\"name\", post_predecessor_activity_name)\n post_predecessor_activity.set(\"prob\", branch_probability)\n branch_counter += 1\n\n # Add precedence for stop actions\n self.__add_stop_action_precedences(final_successor_action, stop_pre_or_tag, uid=self.uid_string)",
"def test_branch_list_with_dag_run(self, mock_get_db_hook):\n branch_op = BranchSQLOperator(\n task_id=\"make_choice\",\n conn_id=\"mysql_default\",\n sql=\"SELECT 1\",\n follow_task_ids_if_true=[\"branch_1\", \"branch_2\"],\n follow_task_ids_if_false=\"branch_3\",\n dag=self.dag,\n )\n\n self.branch_1.set_upstream(branch_op)\n self.branch_2.set_upstream(branch_op)\n self.branch_3 = EmptyOperator(task_id=\"branch_3\", dag=self.dag)\n self.branch_3.set_upstream(branch_op)\n self.dag.clear()\n\n dr = self.dag.create_dagrun(\n run_id=\"manual__\",\n start_date=timezone.utcnow(),\n execution_date=DEFAULT_DATE,\n state=State.RUNNING,\n )\n\n mock_get_records = mock_get_db_hook.return_value.get_first\n mock_get_records.return_value = [[\"1\"]]\n\n branch_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)\n\n tis = dr.get_task_instances()\n for ti in tis:\n if ti.task_id == \"make_choice\":\n assert ti.state == State.SUCCESS\n elif ti.task_id == \"branch_1\":\n assert ti.state == State.NONE\n elif ti.task_id == \"branch_2\":\n assert ti.state == State.NONE\n elif ti.task_id == \"branch_3\":\n assert ti.state == State.SKIPPED\n else:\n raise ValueError(f\"Invalid task id {ti.task_id} found!\")",
"def create_branch(self, name, base_name, from_sha=False):\n\n logger.debug(\n 'GitHubAPI.create_branch: name={}, base_name={}'.format(\n name, base_name\n )\n )\n # raise an error if we can find the branch, continue if we get\n # a 404\n try:\n self.get_branch(name)\n except requests.exceptions.HTTPError:\n pass\n else:\n raise DuplicateBranchError(\n 'Branch already started. Run'\n '\\n\\tgit fetch --all && get checkout {}'.format(name)\n )\n\n if not from_sha:\n base = self.get_branch(base_name)\n base_sha = base['object']['sha']\n else:\n base_sha = base_name\n\n try:\n branch_info = {\n 'ref': 'refs/heads/{}'.format(name),\n 'sha': base_sha\n }\n except KeyError:\n logger.error('base repsonse: {}'.format(base))\n raise Exception(\n 'Could not locate the current SHA for '.format(base_name))\n\n resp = self.post('git/refs', json=branch_info)\n try:\n resp.raise_for_status()\n except Exception:\n logger.error(resp.json())\n raise\n\n return resp.json()",
"def make_branches(self, api_json=None):\n if api_json is None:\n return []\n\n obj = simplejson.loads(api_json)\n branches = [item[\"commit\"][\"sha\"] for item in obj]\n\n print branches\n\n return branches",
"def test_branch_true_with_dag_run(self, mock_get_db_hook):\n branch_op = BranchSQLOperator(\n task_id=\"make_choice\",\n conn_id=\"mysql_default\",\n sql=\"SELECT 1\",\n follow_task_ids_if_true=\"branch_1\",\n follow_task_ids_if_false=\"branch_2\",\n dag=self.dag,\n )\n\n self.branch_1.set_upstream(branch_op)\n self.branch_2.set_upstream(branch_op)\n self.dag.clear()\n\n dr = self.dag.create_dagrun(\n run_id=\"manual__\",\n start_date=timezone.utcnow(),\n execution_date=DEFAULT_DATE,\n state=State.RUNNING,\n )\n\n mock_get_records = mock_get_db_hook.return_value.get_first\n\n for true_value in SUPPORTED_TRUE_VALUES:\n mock_get_records.return_value = true_value\n\n branch_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)\n\n tis = dr.get_task_instances()\n for ti in tis:\n if ti.task_id == \"make_choice\":\n assert ti.state == State.SUCCESS\n elif ti.task_id == \"branch_1\":\n assert ti.state == State.NONE\n elif ti.task_id == \"branch_2\":\n assert ti.state == State.SKIPPED\n else:\n raise ValueError(f\"Invalid task id {ti.task_id} found!\")",
"def test_heads_create_new_branch_at_ancestor(repository: Repository) -> None:\n parent = repository.head.commit\n updatefile(repository.path / \"a\")\n branch = repository.heads.create(\"branch\", parent)\n assert parent == branch.commit",
"def create_network_postcommit(self, context):\n for _switch in self.switches:\n self._add_to_switch(_switch, context)",
"def test_instantiate_branch_node(self):\n try:\n BranchNode('my_name')\n except Exception:\n message = \"BranchNode instantiation failed\"\n self.fail(message)",
"def addBranches(self, values, nodes):\n \n if len(values) != len(nodes):\n raise ValueError('values and nodes must have the same length')\n \n for i in range(0, len(values)):\n self.branches[values[i]] = nodes[i]",
"def add_branch(self, branch, pidx=None, random_color=True):\n if random_color:\n rand_node_type = randrange(6, 257)\n\n new_branch = np.zeros((len(branch.pts), 8))\n id_start = 1 if self._data.shape[0] == 1 else self._data[:, 0].max() + 1\n\n for i in range(len(branch.pts)):\n p, r, c = branch.pts[i], branch.radius[i], branch.conf[i]\n id = id_start + i\n # 3 for basal dendrite; 4 for apical dendrite;\n # However now we cannot differentiate them automatically\n nodetype = 3\n\n if i == len(branch.pts) - 1: # The end of this branch\n pid = self._data[pidx, 0] if pidx is not None else -2\n if pid != -2 and pid != 0 and self._data.shape[0] != 1:\n # Its connected node is fork point\n self._data[self._data[:, 0] == pid, 1] = 5\n else:\n pid = id_start + i + 1\n if i == 0:\n nodetype = 6 # Endpoint\n\n assert pid != id\n new_branch[i] = np.asarray(\n [\n id,\n rand_node_type if random_color else nodetype,\n p[0],\n p[1],\n p[2],\n r,\n pid,\n c,\n ]\n )\n\n # Check if any tail should be connected to its tail\n tail = new_branch[0]\n matched, minidx = self.match(tail[2:5], tail[5])\n if matched and self._data[minidx, 6] == -2:\n self._data[minidx, 6] = tail[0]\n\n self._data = np.vstack((self._data, new_branch))",
"def test_heads_create_existing_branch_force(repository: Repository) -> None:\n head, heads = repository.head, repository.heads\n branch = heads.create(\"branch\", head.commit)\n updatefile(repository.path / \"a\")\n heads.create(branch.name, head.commit, force=True)\n assert head.commit == branch.commit",
"def branch_new(request, repo_id):\n repo = models.Repository.get_by_id(int(repo_id))\n if request.method != 'POST':\n form = BranchForm(initial={'url': repo.url,\n 'category': 'branch',\n })\n return respond(request, 'branch_new.html', {'form': form, 'repo': repo})\n form = BranchForm(request.POST)\n errors = form.errors\n if not errors:\n try:\n branch = models.Branch(\n repo_key=repo.key,\n category=form.cleaned_data.get('category'),\n name=form.cleaned_data.get('name'),\n url=form.cleaned_data.get('url'),\n )\n except (db.BadValueError, ValueError) as err:\n errors['__all__'] = unicode(err)\n if errors:\n return respond(request, 'branch_new.html', {'form': form, 'repo': repo})\n branch.repo_name = repo.name\n branch.put()\n return HttpResponseRedirect(reverse(repos))",
"def test_create_experiment_hit_branch(self):\n with OrionState(experiments=[config]) as cfg:\n experiment = create_experiment(\n config[\"name\"],\n space={\"y\": \"uniform(0, 10)\"},\n branching={\"enable\": True},\n storage=cfg.storage_config,\n )\n\n assert experiment.name == config[\"name\"]\n assert experiment.version == 2\n assert experiment.algorithm\n assert experiment.algorithm.configuration == config[\"algorithm\"]\n assert experiment.max_trials == config[\"max_trials\"]\n assert experiment.max_broken == config[\"max_broken\"]\n assert experiment.working_dir == config[\"working_dir\"]"
] | [
"0.71373373",
"0.6745019",
"0.6531341",
"0.64341766",
"0.62213826",
"0.6209696",
"0.6113299",
"0.60638833",
"0.6030111",
"0.60283864",
"0.60153705",
"0.59939706",
"0.59934443",
"0.5881487",
"0.5877085",
"0.5869261",
"0.5821324",
"0.58131206",
"0.579358",
"0.5737575",
"0.569943",
"0.5685671",
"0.56801224",
"0.5648519",
"0.56348234",
"0.55993706",
"0.55938625",
"0.5582505",
"0.55582386",
"0.5544529"
] | 0.6769689 | 1 |
given pymongo databaseand a regex object (or string) for geo_id return (pubmed_id, geo_id) | def getPubmedIds(db, geo_id, limit=0):
pm_tups = []
for ds in db.datasets.find({ '$or' : [{'reference_series':geo_id} , {"geo_id" :geo_id }]}).limit(limit):
if 'pubmed_id' in ds:
pm_tups.append((ds['pubmed_id'], ds['geo_id']))
pm_tups.append((ds['pubmed_id'], ds['reference_series']))
return pm_tups | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def geonameid_from_location(text):\n if 'HASCORE_SERVER' in app.config:\n url = urljoin(app.config['HASCORE_SERVER'], '/1/geo/parse_locations')\n response = requests.get(url, params={'q': text}).json()\n geonameids = [field['geoname']['geonameid'] for field in response['result'] if 'geoname' in field]\n return set(geonameids)\n return None",
"def test_get_id_map__different_user_id(collection, user_id, media_item, repo):\n insert1 = collection.insert_one(media_item | {\"id\": \"id1\", \"userId\": user_id})\n insert2 = collection.insert_one(\n media_item | {\"id\": \"id2\", \"userId\": \"test-other-user-id\"}\n )\n\n result = repo.get_id_map([\"id1\", \"id2\"])\n assert \"id2\" not in result",
"def extract_obj_id_from_query(id_row: Text) -> Dict:\n pairs = id_row.split(\",\")\n _id = {}\n for pair in pairs:\n key, value = pair.split(\"=\")\n _id[key] = value\n return _id",
"def figure_id_filter(text):\n def fn(m):\n s = m.group(1) or \"\"\n s += m.group(2)\n body = m.group(4)\n s += replace_body(body, type=m.group(3))\n s += m.group(5) or \"\" # Close parens if any\n return s\n\n return __ref_pattern.sub(fn,text)",
"def _extract_identifier(self, publication):\n return self._parse_identifier(publication.metadata.identifier)",
"def get_publication_from_id_in_db(new_id: str) -> Union[None, Dict]:\n\n # db_publications = app.data.driver.db[\"publications\"]\n # try:\n # res = db_publications.find({\"id\": new_id}).limit(1).next()\n # except:\n # res = None\n # return res\n\n url = BaseConfig.DATAESR_PUBLICATIONS_URL\n url += '?where={{\"id\":\"{}\"}}'.format(new_id)\n r = requests.get(url)\n if r.status_code == 200:\n res = r.json()['data']\n else:\n res = []\n if len(res) > 1:\n print(\"ERROR more than one id - SHOULD NOT HAPPEN !!\")\n return res[0]\n elif len(res) == 1:\n return res[0]\n else:\n return None",
"def test_get_id_map__same_user_id(collection, user_id, media_item, repo):\n insert = collection.insert_one(media_item | {\"userId\": user_id})\n\n result = repo.get_id_map([media_item[\"id\"]])\n assert type(result) == dict\n assert result[media_item[\"id\"]].items() >= media_item.items()",
"def extract_gi_id(description):\n fields = description[1:].split('|')\n if 'gi' not in fields:\n return None\n return fields[1 + fields.index('gi')]",
"def query_by_id(doc_id):\n with Con(db = os.environ.get(\"MONGO_DB\"), host = os.environ.get(\"MONGO_URL\"), port = int(os.environ.get(\"MONGO_PORT\")), col = os.environ.get(\"MONGO_COL\")) as col:\n doc = col.find_one({'pmid': doc_id})\n doc_text = [v for k,v in doc['abstract'].iteritems()]\n doc_text = ' '.join(doc_text) + doc['title']\n return doc_text",
"def ids(self):\n return (x[\"_id\"] for x in self.document._meta.collection.find(self.spec, fields = (\"_id\",)))",
"def get_matches_id(match_number):\n database = TinyDB('db.json')\n matches_table = database.table('match')\n # recuperation de tous les tours\n match_id_list = []\n for i in range(1, match_number + 1):\n data = matches_table.all()[-i]\n match_id_list.append(data.doc_id)\n return match_id_list",
"def match_dataset_with_field_primary_reference(setup,conn,dataset_params,\n reduction_metadata,log):\n\n starlist = stage3_db_ingest.fetch_field_starlist(conn,dataset_params,log)\n\n primary_refimg_id = db_phot.find_primary_reference_image_for_field(conn)\n\n matched_stars = stage3_db_ingest.match_catalog_entries_with_starlist(conn,dataset_params,\n starlist,\n reduction_metadata,\n primary_refimg_id,log,\n verbose=True)\n\n (transform_xy, transform_sky) = stage3_db_ingest.calc_transform_to_primary_ref(setup,matched_stars,log)\n\n matched_stars = stage3_db_ingest.match_all_entries_with_starlist(setup,conn,dataset_params,\n starlist,reduction_metadata,\n primary_refimg_id,transform_sky,log,\n verbose=True)\n\n return transform, matched_stars",
"def id_extract(ids, db_location, database_file):\n # For exact searching, need to be adapted for ignored letter\n # SeqIO.write((seq for seq in seqiter if seq.id in ids), temp_seq_file[1]+'.txt', \"fasta\")\n\n os.chdir(db_location)\n temp_seq_file = tempfile.mkstemp()\n f_results = open(temp_seq_file[1] + '.txt', 'w')\n\n seqiter = SeqIO.parse(open(database_file), 'fasta')\n\n for id_to_find in ids:\n for sequence in seqiter:\n if sequence.id.find(id_to_find) is not -1:\n f_results.write('>' + sequence.id + '\\n' + str(sequence.seq) + '\\n')\n f_results.close()\n\n return temp_seq_file[1] + '.txt'",
"def read(self, data):\n query = { \"query\": {'$regex': '.*'+str(data)+'.*'} }\n mydoc = self.mycol.find(query)\n return mydoc",
"def get_pubmed(pub_query,conn):\n\n get_pm = ('SELECT DISTINCT dx.accession '\n 'FROM pub p, pub_dbxref pd, dbxref dx, db '\n 'WHERE p.pub_id = pd.pub_id '\n 'AND pd.dbxref_id = dx.dbxref_id AND dx.db_id = db.db_id '\n 'AND db.name = \\'pubmed\\' AND pd.is_current = TRUE AND p.uniquename = %s')\n \n pm = connect(get_pm, pub_query, conn)\n if pm:\n return(pm[0][0])\n else:\n return(None)",
"def identify_primary_reference_datasets(conn, log):\n\n primary_ref = {}\n\n primary_ref['refimg_id_ip'] = phot_db.find_primary_reference_image_for_field(conn)\n\n query = 'SELECT facility, filter, software FROM reference_images WHERE refimg_id=\"'+str(primary_ref['refimg_id_ip'])+'\"'\n t = phot_db.query_to_astropy_table(conn, query, args=())\n\n primary_ref['facility_id'] = t['facility'][0]\n primary_ref['software_id'] = t['software'][0]\n\n query = 'SELECT filter_id, filter_name FROM filters WHERE filter_name=\"ip\"'\n t = phot_db.query_to_astropy_table(conn, query, args=())\n primary_ref['ip'] = t['filter_id'][0]\n\n for f in ['rp', 'gp']:\n query = 'SELECT filter_id, filter_name FROM filters WHERE filter_name=\"'+f+'\"'\n t = phot_db.query_to_astropy_table(conn, query, args=())\n primary_ref[f] = t['filter_id'][0]\n\n query = 'SELECT refimg_id FROM reference_images WHERE facility=\"'+str(primary_ref['facility_id'])+\\\n '\" AND software=\"'+str(primary_ref['software_id'])+\\\n '\" AND filter=\"'+str(t['filter_id'][0])+'\"'\n qs = phot_db.query_to_astropy_table(conn, query, args=())\n\n if len(qs) > 0:\n primary_ref['refimg_id_'+f] = qs['refimg_id'][0]\n else:\n log.info('WARNING: Database contains no primary reference image data in filter '+f)\n\n log.info('Identified the primary reference datasets for this field as:')\n for key, value in primary_ref.items():\n log.info(str(key)+' = '+str(value))\n\n return primary_ref",
"def get_austria_crop_geopedia_idx_to_crop_id_mapping():\n gpd_session = GeopediaSession()\n to_crop_id = list(GeopediaFeatureIterator(layer='2032', gpd_session=gpd_session))\n to_crop_id = [{'crop_geopedia_idx': code['id'], **code['properties']} for code in to_crop_id]\n to_crop_id = pd.DataFrame(to_crop_id)\n to_crop_id['crop_geopedia_idx'] = pd.to_numeric(to_crop_id.crop_geopedia_idx)\n to_crop_id.rename(index=str, columns={\"SNAR_BEZEI\": \"SNAR_BEZEI_NAME\"}, inplace=True)\n to_crop_id.rename(index=str, columns={\"crop_geopedia_idx\": \"SNAR_BEZEI\"}, inplace=True)\n\n return to_crop_id",
"def process_identifier(identifier: str) -> Tuple[str, str]:\n graph_ns, graph_id = identifier.split(\":\", maxsplit=1)\n db_ns, db_id = identifiers.get_ns_id_from_identifiers(graph_ns, graph_id)\n db_id = identifiers.ensure_prefix_if_needed(db_ns, db_id)\n return db_ns, db_id",
"def get_slovenia_crop_geopedia_idx_to_crop_id_mapping():\n gpd_session = GeopediaSession()\n to_crop_id = list(GeopediaFeatureIterator(layer='2036', gpd_session=gpd_session))\n to_crop_id = [{'crop_geopedia_idx': code['id'], **code['properties']} for code in to_crop_id]\n to_crop_id = pd.DataFrame(to_crop_id)\n to_crop_id['crop_geopedia_idx'] = pd.to_numeric(to_crop_id.crop_geopedia_idx)\n\n return to_crop_id",
"def find_document(collection: str, query: dict = None, regex: list = None) -> dict:\n if query is not None:\n return DB[collection].find_one(query)\n if regex is not None:\n return DB[collection].find_one({regex[0]: {'$regex': regex[1]}})\n raise Exception('Didnt specify a query or a regex')",
"def convert_id(text):\n GOOGLE_API_KEY = os.environ['GOOGLE_API_KEY']\n geo = GoogleV3(api_key=GOOGLE_API_KEY)\n location = geo.geocode(place_id=text)\n # remove \", USA\" from end of location\n return location.latitude, location.longitude, location.address[:-5].strip()",
"def process_data(words,puncts,word_to_id):\n\tids = []\n\tp_ids = []\n\tfor i in range(len(words)):\n\t\tids.append(word_to_id[words[i]])\n\t\tp_ids.append(punct_to_id[puncts[i]])\n\treturn ids,p_ids",
"def parse_id_as_interval(id_string, regex):\n\n match = regex.match(id_string)\n genome = match.group(\"genome\")\n seqid = match.group(\"seqid\")\n start_tmp = int(match.group(\"start\"))\n end_tmp = int(match.group(\"end\"))\n\n start = min([start_tmp, end_tmp])\n end = max([start_tmp, end_tmp])\n del start_tmp\n del end_tmp\n\n return (genome, seqid, start, end)",
"def search_pubmed(keyword, area, start_year_range, end_year_range):\r\n handle = Entrez.esearch(db='pubmed', term=keyword, field = area, retmode='xml', idtype='acc', mindate=start_year_range, maxdate=end_year_range)\r\n data = Entrez.read(handle)\r\n \r\n UID = data['IdList']\r\n \r\n attributes_list = []\r\n \r\n for ID in range(len(UID)):\r\n record = Entrez.esummary(db='pubmed', id=UID[ID])\r\n attributes_list.append(Entrez.read(record))\r\n\r\n return attributes_list",
"def _parse_rec_id(rec_id: str) -> Tuple[int, bool, Optional[int]]:\n\n if m := re.match(ORIGINAL_RECORD_PATTERN, rec_id):\n return [int(m.group(1)), \"A\", None]\n elif m := re.match(DUPE_RECORD_PATTERN, rec_id):\n return [int(m.group(1)), \"B\", m.group(2)]\n else:\n raise Exception(f\"Unable to parse rec_id: {rec_id}\")",
"def get_danish_crop_geopedia_idx_to_crop_id_mapping():\n gpd_session = GeopediaSession()\n to_crop_id = list(GeopediaFeatureIterator(layer='2050', gpd_session=gpd_session))\n to_crop_id = [{'crop_geopedia_idx': code['id'], **code['properties']} for code in to_crop_id]\n to_crop_id = pd.DataFrame(to_crop_id)\n to_crop_id['crop_geopedia_idx'] = pd.to_numeric(to_crop_id.crop_geopedia_idx)\n\n return to_crop_id",
"def reference_to_id(value):\n m = re.search(r\"<@(U[A-Z0-9]+)>\", value)\n return m.group(1) if m else None",
"def extract_documents():\n client = MongoClient()\n conn = client.data\n coll = conn.germanwings\n\n query = {'text': {'$exists': 1}, 'exc': {'$exists': 0}}\n selection = {'text': 1, 'short_url': 1}\n for i, doc in enumerate(coll.find(query, selection)):\n short_url, text = tuple(doc[x] for x in (\"short_url\", \"text\"))\n print(\"Extracting {0} {1}\".format(i, short_url), file=stderr)\n filename = os.path.join(RAW_DIR, short_url)\n with open(filename, \"w\") as f:\n ascii = text.encode('ascii', 'ignore')\n f.write(ascii)",
"def documents(pmid_23982599, civic_aid6_document):\n return [pmid_23982599, civic_aid6_document]",
"def get_id_by_name(self, names):\n result = []\n name_field = 'name'\n synonym_field = 'synonyms'\n pos_0 = {name_field: {'$in': names}}\n pos_1 = {synonym_field: {'$in': names}}\n query = {'$or': [pos_0, pos_1]}\n projection = {'_id': 1}\n docs = self.collection.find(filter=query, projection=projection, collation=self.collation)\n for doc in docs:\n result.append(doc['_id'])\n return result"
] | [
"0.55480814",
"0.53312546",
"0.53300273",
"0.5205659",
"0.511528",
"0.5103334",
"0.50721294",
"0.50375956",
"0.50349665",
"0.49944767",
"0.4975143",
"0.49461344",
"0.49347347",
"0.49083653",
"0.48941147",
"0.48785365",
"0.48737317",
"0.48682904",
"0.4865054",
"0.48643872",
"0.4857458",
"0.4812101",
"0.479898",
"0.47771314",
"0.47643515",
"0.47639257",
"0.4750634",
"0.47154015",
"0.4710979",
"0.4702056"
] | 0.6269434 | 0 |
given a pubmed id, return a list of words from the given fields | def getWords(pubmed_id, fields=["MeshHeading" , "AbstractText", "ArticleTitle"]):
def findText(anode):
if anode.nodeType == anode.TEXT_NODE:
return anode.data
elif anode.hasChildNodes():
return ' '.join(map(findText, anode.childNodes))
else:
return ''
handle = Entrez.efetch(db="pubmed", id=pubmed_id, retmode='xml')
myfile = handle.read()
doc = parseString(myfile)
a = ["MeshHeading" , "AbstractText", "ArticleTitle"]
myt = ' '.join( [' '.join(map( findText, doc.getElementsByTagName(tag))) for tag in a] )
word_list = []
for word in myt.split():
clean_word = word.strip(r'.!?:;\'",)(%&').lower()
if len(clean_word) > 1:
word_list.append(clean_word)
return word_list | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def words(self, fields, normalizer_class):\n return sorted(set(itertools.chain.from_iterable(\n bib.raw_data(fields, normalizer_class)\n for bib in self.documents\n )))",
"def get_words(data):\n return data[\"words\"]",
"def get_page_words(parsed_hocr_page, pageid):\n page_words = []\n page_height = parsed_hocr_page.box.height\n page_width = parsed_hocr_page.box.width\n page_dim_string = \"%sx%s\" %(page_width, page_height)\n \n for word in parsed_hocr_page.words:\n this_word = {\n 'x0':word.box.left, 'x1':word.box.right, \n 'y0':page_height-word.box.bottom, 'y1':page_height-word.box.top,\n 'text':word.text, 'width':word.box.width,\n 'height':word.box.height, 'pageid':pageid,\n 'page_dim':page_dim_string,\n 'object_type':'word',\n 'lang':word.lang,\n }\n page_words.append(this_word)\n \n return page_words",
"def get_words(pid):\n try:\n #search for an existing state for the pid\n query = {'pid': pid}\n temp = {}\n cursor = database['ComplexWords'].find(query)\n if cursor is None:\n return {'status': 0, 'data': None}\n else:\n for document in cursor:\n temp[document['sentence_number']] = document['words']\n return {'status': 1, 'data': temp}\n except Exception as e:\n return {'status': -1, 'data': str(e)}",
"def get_words(results):\n return ' '.join([result['word'] for result in results])",
"def line_2_words(wordid_list, id2word):\n word_list = []\n for word_id in wordid_list:\n word_list.append(id2word[word_id])\n return word_list",
"def get_words():\n # words\n words_list = list()\n for i in range(1, 114+1):\n sura = quran.get_sura(i)\n for aya in sura:\n wordsList = aya.split(' ')\n for word in wordsList:\n words_list.append(word)\n\n return words_list",
"def get_words(self,data):\n f_words = []\n e_words = []\n for d in data:\n f_sent = d[\"fr\"] ## foreign sentence\n e_sent = d[\"en\"] ## English sentence\n f_words.extend(f_sent.split())\n d[\"fr\"] = f_sent.split()\n e_words.extend(e_sent.split())\n d[\"en\"] = e_sent.split()\n return list(set(f_words)),list(set(e_words))",
"def getWords(docstr):\n # get rid of digits and non-alphanumeric chars\n # and split on spaces\n wds = re.sub('\\d', ' ', docstr)\n wds = re.sub('[\\W_]', ' ', wds)\n wds = wds.split()\n\n # convert to lowercase and get rid of stop words\n wordlist = [w.lower() for w in wds]\n wordlist = [w for w in wordlist if w not in stopWords]\n wordlist = [w for w in wordlist if len(w) >= 3]\n\n return wordlist",
"def words(self) -> List[str]:\n return pulumi.get(self, \"words\")",
"def words(self) -> List[str]:\n return pulumi.get(self, \"words\")",
"def _ids_to_words(ids, dictionary):\n if not dictionary.id2token: # may not be initialized in the standard gensim.corpora.Dictionary\n setattr(dictionary, 'id2token', {v: k for k, v in dictionary.token2id.items()})\n\n top_words = set()\n for word_id in ids:\n word = dictionary.id2token[word_id]\n if isinstance(word, set):\n top_words = top_words.union(word)\n else:\n top_words.add(word)\n\n return top_words",
"def shakespeare_words():\n return itertools.chain.from_iterable(shakespeare.words(fileid) for fileid in shakespeare.fileids())",
"def get_words(self):\n return [self.id2word[idx] for idx in range(len(self))]",
"def word_ids_to_words(data, id_to_word):\n return [id_to_word[i] for i in data]",
"def get_pmid_by_term(cls, word, limit=40):\n\n print \"Getting all studies associated with \", word\n\n if isinstance(word, list):\n pmids = db.session.query(cls.pmid).filter(\n cls.word.in_(word)).group_by(\n cls.pmid).order_by(\n cls.frequency).limit(limit).all()\n\n else:\n pmids = db.session.query(cls.pmid).filter(\n cls.word == word).group_by(\n cls.pmid).order_by(\n cls.frequency).limit(limit).all()\n\n return [pmid[0] for pmid in pmids]",
"def process_data(words,puncts,word_to_id):\n\tids = []\n\tp_ids = []\n\tfor i in range(len(words)):\n\t\tids.append(word_to_id[words[i]])\n\t\tp_ids.append(punct_to_id[puncts[i]])\n\treturn ids,p_ids",
"def find_restricted_words(content):\n restricted_words_obj = db.engine.execute(\"select * from restricted_word;\")\n restricted_words_dict = []\n for row in restricted_words_obj:\n if ' ' + row[1].upper().strip() + ' ' in content:\n restricted_words_dict.append({'id': row[0], 'phrase': row[1].upper()})\n\n return restricted_words_dict",
"def fill_in_words(mad_lib, words, types):\n result = []\n # For word in the text...\n for word in mad_lib:\n # If it's a place holder...\n if partofspeech in types:\n # Go through each PoS in the word list...\n for inner_index in range(len(words)):\n # Once you find the correct PoS...\n if words[inner_index][0] == partofspeech:\n # Choice a random word from it's list\n to_append = random.choice(words[inner_index][1])\n \n else:\n # If it's not placerholder, then just append the word\n to_append = word\n \n result.append(to_append)\n \n return result",
"def get_random_words_from_wordnik(part_of_speech, limit):\n words = words_api.getRandomWords(includePartOfSpeech=part_of_speech, limit=limit)\n\n random_words = []\n for word in words:\n random_words.append(word.word)\n # pprint(random_words)\n return random_words",
"def get_words(self):\n words = self.wiki.get_words(cleaner=self.cleaner)\n df = pd.DataFrame({\"word\": words})\n df = df.drop_duplicates(\"word\")\n df = df.head(100)\n mask = df[\"word\"].isin(self.common[\"word\"])\n mask |= df[\"word\"].str.lower().isin(self.common[\"word\"])\n\n words = [ Word(word) for word in df[~mask][\"word\"] ]\n for word in words:\n word.get_definition(definer=self.definer)",
"def words(self, uncased=False):\n if uncased:\n return [t[self.TEXT].lower() for t in self.data]\n else:\n return [t[self.TEXT] for t in self.data]",
"def words(self, uncased=False):\n if uncased:\n return [t[self.TEXT].lower() for t in self.data]\n else:\n return [t[self.TEXT] for t in self.data]",
"def getWords(speech):\r\n return speech.split()",
"def get_words(doc):\n splitter = re.compile('\\\\W*')\n # Split the words by non-alpha characters\n words = [s.lower() for s in splitter.split(doc) \n if len(s)>2 and len(s)<20]\n # Return the unique set of words only\n return dict([(w,1) for w in words])",
"def words_to_word_ids(data, word_to_id):\n # if isinstance(data[0], six.string_types):\n # print(type(data[0]))\n # # exit()\n # print(data[0])\n # print(word_to_id)\n # return [word_to_id[str(word)] for word in data]\n # else:\n return [word_to_id[word] for word in data]\n\n # if isinstance(data[0], str):\n # # print('is a string object')\n # return [word_to_id[word] for word in data]\n # else:#if isinstance(s, bytes):\n # # print('is a unicode object')\n # # print(data[0])\n # return [word_to_id[str(word)] f",
"def getTerms(vocabulary_id, terms_id):\n return [getTerm(vocabulary_id, term_id) for term_id in terms_id]",
"def get_word_list_of_img_id(self, img_id, remove_stops):\n item = self.get_item_from_img_id(img_id)\n\n word_list = self.get_word_list_from_item(item, remove_stops=remove_stops)\n return word_list",
"def fetchWords(morph_type):\r\n with driver.session() as session:\r\n\r\n results = session.run(\"\"\"\r\n MATCH (d:Dictionary {{node:'Dictionary'}})-[rel:IS_{morph_type}]->(w:Word)\r\n RETURN d,type(rel),w,id(w)\"\"\".format(morph_type=morph_type))\r\n\r\n nodes = []\r\n for record in results:\r\n print(record)\r\n nodes.append({\r\n \"name\": record[\"w\"][\"name\"],\r\n \"translation\": record['w']['translation'],\r\n \"plural\": record['w']['plural'],\r\n \"type\": record['type(rel)'],\r\n \"id\": record['id(w)']\r\n })\r\n\r\n print(nodes)\r\n return nodes",
"def get_text(data):\n return \" \".join([item[\"words\"] for item in data])"
] | [
"0.6270164",
"0.62465376",
"0.62347436",
"0.6110164",
"0.5964306",
"0.58644086",
"0.5819131",
"0.58036935",
"0.5776748",
"0.57580763",
"0.57580763",
"0.5744212",
"0.57393175",
"0.56931",
"0.5689465",
"0.5641902",
"0.5632844",
"0.5622878",
"0.5604147",
"0.5599055",
"0.55986273",
"0.55972946",
"0.55972946",
"0.5594526",
"0.55874974",
"0.55796444",
"0.5549114",
"0.5514148",
"0.54968333",
"0.547838"
] | 0.78243196 | 0 |
given mongo db, geo_id and a list of words insert into word2geo collection | def insertWords(db, geo_id, words):
def f( word):
return {'geo_id' : geo_id, 'word': word}
try:
db.word2geo.insert(map( f, words))
except:
print "error in " + geo_id
print map( f, words) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def _insert_words(self, dict_words: List[DictWordModel]) -> NoReturn:\n docs = [word.dict() for word in dict_words]\n is_inserted = await self._db_client.try_insert_many(self._db_name, self._db_collection_name, docs)\n if not is_inserted:\n raise DBError('Failed to save many words docs')",
"def insert_geo_to_mongo(collection):\n if not collection:\n log_utils.log_msg_error(logger=logger, key='INSERTGEOCODE0001', msg='Collection is None')\n return None\n\n cursor = collection.find()\n count = 1\n\n for each in cursor:\n location = each['location_desc']\n id = each['_id']\n lat, lon = geo_utils.get_location_from_geopy(location)\n\n collection.update({'_id': id}, {\"$set\" :{'geo_location': {'lat': lat, 'lon': lon}}}, True)\n\n print count\n count += 1",
"def insert_characterlist_mongo(characterList):\n #MONGO_URI, DB_NAME and COLLECTION_NAME is from creds file\n client = MongoClient(MONGO_URI)\n db = client[MONGO_DB_NAME]\n Names = db[MONGO_COLLECTION_NAME]\n result = Names.insert_many(characterList)",
"def populate_hateword_data():\n with open(\"./data/hate-speech-lexicons/refined_ngram_dict.csv\") as f:\n lst = [row.split(',', 1)[0] for row in f]\n lst = lst[1:]\n\n lst = [{\n 'word': word,\n 'category': [],\n 'similar_to': []\n } for word in lst]\n\n try:\n db = mongo_client.MongoClient(config.MONGO_URI).twitter\n db.hateword.delete_many({})\n result = db.hateword.insert_many(lst)\n print(\"Completed populating\", len(result.inserted_ids), \"hate words\")\n except pymongo.errors.BulkWriteError as e:\n print(e.details)",
"def to_mongo(orders_list:list, db_collection, live=False):\n client = MongoClient('localhost', 27017)\n if live: db = client.live\n else: db = client.papertrade\n\n collections = {'orders': db.orders,\n 'positions': db.positions}\n\n collection = collections[db_collection]\n return collection.insert_many(orders_list)",
"def linear(files):\n return list(map(insert_to_mongo, files))",
"def insert_to_collection(db, coll_name, docs):\n if isinstance(docs, list):\n db[coll_name].insert_many(docs)\n else:\n db[coll_name].insert_one(docs)",
"def save_to_mongodb(lst):\n # deaulft using host='localhost' and port=27107\n db_object = connect_mongodb.connect_mongodb()\n # mongodb'connect\n connect = db_object.get_connect()\n # whether or not db is None\n if connect is None:\n print 'connect to mongodb database error'\n return None\n # db.python of mongodb'database\n database = connect['python']\n # batch insert\n index = 0\n lt = []\n for item in lst:\n # index must to convert string\n dt = {str(index): item}\n lt.append(dt)\n index += 1\n database.activation_code.insert(lt)",
"def write_postings(docname, postings, dbcon):\n cur = dbcon.cursor()\n for word, posting in postings.items():\n # generate text of indexes\n indexes = \"\"\n for ix in posting[\"indexes\"]:\n indexes += \"{},\".format(ix)\n indexes = indexes.rstrip(\",\")\n # insert into database; nested try is needed to handle rollback\n # and commit properly\n try:\n try:\n cur.execute(\"INSERT INTO IndexWord VALUES (?)\", (word,))\n except sqlite3.IntegrityError: # word already in index\n pass\n cur.execute(\n \"INSERT INTO Posting VALUES (?, ?, ?, ?)\",\n (word, docname, posting[\"frequency\"], indexes)\n )\n except Exception as e:\n print(e)\n dbcon.rollback()\n else:\n dbcon.commit()",
"def AddWords(cls, word_list, words):\n entity = WordList.get_by_id(word_list)\n if not entity:\n return \"word list {} does not exist\".format(word_list)\n entity.words = list(set(entity.words) | set(words))\n entity.numWords = len(entity.words)\n entity.put()\n return None",
"def send_to_mongo(data_list):\n client = pymongo.MongoClient(DB_URL, ssl=True, ssl_cert_reqs=ssl.CERT_NONE)\n db = client.coreData\n result = db.allGames.insert_many(data_list)\n return result.inserted_ids",
"def insert(db_name, collection_name, docs):\n db = client[db_name]\n collection = db[collection_name]\n return collection.insert_many(docs)",
"def save_words_to_database(database_path: str, words_list: list):\n\n db = sqlite3.connect(database_path)\n with db:\n cursor = db.cursor()\n for word in words_list:\n # check is word in DB already\n sql = \"SELECT COUNT(*) FROM {} WHERE word='{}'\".format('words', word)\n cursor.execute(sql)\n count = cursor.fetchone()[0]\n\n if count > 0:\n sql = \"UPDATE {} SET {} = {} + 1 WHERE {} = '{}'\"\\\n .format('words', 'usage_count', 'usage_count', 'word', word)\n else:\n sql = \"INSERT INTO {}({}) VALUES('{}')\".format('words', 'word', word)\n\n # print(sql)\n cursor.execute(sql)\n\n print('Database save complete')\n\n if db is not None:\n db.close()",
"def save_words(csvf, word_set_id, orig_set_id=''):\n words = []\n headings = []\n\n with open(csvf, \"r\", encoding='utf-8-sig') as file:\n reader = csv.reader(file, delimiter=',')\n\n # Create dictionary keys\n for row in reader:\n i = 0\n while (i < len(row)):\n headings.append(row[i])\n i += 1\n break\n\n # Save STR values to each person\n for row in reader:\n i = 0\n word = {}\n\n while (i < len(row)):\n key = str(headings[i])\n value = row[i]\n word[key] = value\n i += 1\n words.append(word)\n\n # Get heading names\n lang1 = headings[0] # Original Language\n lang1p = headings[1] # Original transliteration\n lang2 = headings[2] # Translation Language\n lang2p = headings[3] # Translation transliteration\n wtype = headings[4] # Type of word (noun, verb)\n\n orig_lang_id = (db.execute(\n \"SELECT id FROM languages WHERE name = ?\", (lang1, )).fetchall())[0]['id']\n trans_lang_id = (db.execute(\n \"SELECT id FROM languages WHERE name = ?\", (lang2, )).fetchall())[0]['id']\n\n for w in words:\n word_type_id = (db.execute(\n \"SELECT id FROM word_type WHERE type = ?\", (w[wtype], )).fetchall())[0]['id']\n\n new_orig_word_id = (db.execute(\"INSERT INTO words ('wordstr', 'language_id', 'type', 'pronunciation') VALUES (?, ?, ?, ?)\",\n (w[lang1], orig_lang_id, word_type_id, w[lang1p])\n )).lastrowid\n con.commit()\n new_translated_word_id = (db.execute(\"INSERT INTO words ('wordstr', 'language_id', 'type', 'pronunciation') VALUES (?, ?, ?, ?)\",\n (w[lang2], trans_lang_id, word_type_id, w[lang2p])\n )).lastrowid\n con.commit()\n db.execute(\"INSERT INTO word_set_words (word_set_id, word_id) VALUES (?, ?)\",\n (word_set_id, new_translated_word_id))\n con.commit()\n # if orig_set_id is set\n if (orig_set_id != ''):\n db.execute(\"INSERT INTO word_set_words (word_set_id, word_id) VALUES (?, ?)\",\n (int(orig_set_id), new_orig_word_id))\n con.commit()\n # insert orig and its translation equivalent\n db.execute(\"INSERT INTO word_translation (orig_lang, trans_lang, orig_word, trans_word) VALUES (?, ?, ?, ?)\",\n (orig_lang_id, trans_lang_id, new_orig_word_id, new_translated_word_id))\n con.commit()\n # reverse orig & translation\n db.execute(\"INSERT INTO word_translation (orig_lang, trans_lang, orig_word, trans_word) VALUES (?, ?, ?, ?)\",\n (trans_lang_id, orig_lang_id, new_translated_word_id, new_orig_word_id))\n con.commit()\n file.close()\n return len(words)",
"def add_phosphosites_to_db(phosphosites, db_cursor):\n\n for phosphosite in phosphosites:\n residue = phosphosite.get_residue()\n position = phosphosite.get_position()\n uniprotid = phosphosite.get_uniprotid()\n fold_change = phosphosite.get_fold_change()\n db_cursor.execute(\\\n \"INSERT INTO phosphositetb (residue,position,uniprotid,foldchange) VALUES(?,?,?,?);\"\\\n ,(residue,position,uniprotid,fold_change))",
"def store_words(pid, words, sentence_number):\n try:\n database.ComplexWords.insert_one({'pid': pid, 'words': words, 'sentence_number': sentence_number})\n counter += 1\n print('counter: ', counter)\n return {'status': 1, 'data': None}\n except Exception as e:\n return {'status': -1, 'data': str(e)}",
"def add_words(self, words):\r\n for word in words:\r\n self.add(word)",
"def insertCollection(db, col, result, drop=True):\n\n # result = result.to_dict(\"records\")\n conn = MongoClient(\"localhost\", 27017)\n connObj = conn[db][col]\n if drop:\n connObj.drop()\n # connObj.insert_many(result)\n for x, row in result.iterrows():\n connObj.insert_one(row.to_dict())\n conn.close()",
"def write_data_to_mongo(self, db_name, collection_name, list_of_dicts):\n self.db_client.db_name = db_name\n self.db = self.db_client.affirm_client()\n collection = self.__write(collection_name, list_of_dicts)\n return collection",
"def store_eeg_in_mongodb(eeg_data):\n con, eeg = connect_to_eeg_db()\n for eeg_record in eeg_data:\n eeg.insert(eeg_record)\n con.close()",
"def to_db(self):\n bulk = conn_db().initialize_ordered_bulk_op()\n for fiction in self.fictions:\n bulk.find({'id': fiction.id}).upsert().update({'$set': fiction.__dict__})\n bulk.execute()",
"def _add_keyword(self, collection_id, name, doc, args):\n argstring = json.dumps(args)\n self.db.execute(\"\"\"\n INSERT INTO keyword_table\n (collection_id, name, doc, args)\n VALUES\n (?,?,?,?)\n \"\"\", (collection_id, name, doc, argstring))",
"def insert_tfs(connection: DBConnection, documents: Sequence[Document]) -> None:\n max_ = len(documents)\n current = 0\n print() # print an extra line, because we will delete lines with printing \\r\n for chunk in chunks(documents):\n rows = (d.get_tfs_rows() for d in chunk)\n connection.execute(\"BEGIN TRANSACTION\")\n for row in rows:\n connection.executemany(\n \"INSERT INTO tfs(did, term, tf) VALUES (?, ?, ?)\", row)\n connection.execute(\"COMMIT\")\n current += len(chunk)\n print(f\"\\r[{current}/{max_}] doc-tfs done\", end='')\n print()",
"def get_feature_collection(page):\n #print page['words']\n feature_array = []\n for i,word in enumerate(page['words']):\n # should line_num be required here? It's not supported by -bbox output... \n word_properties = {'text':word['text'], 'line_num':word['line_num']}\n # should we instead rely on the the word number for the id? \n feature_array.append(get_geojson_feature(i, word['bbox'], word_properties))\n \n featurecollection = geojson.FeatureCollection(feature_array)\n # todo: add page dimensions\n return geojson.dumps(featurecollection)",
"def insert_data(data, collec, many):\n db = client.get_database('tweetstorm')\n collection = db.get_collection(collec)\n if many:\n collection.insert_many(data)\n logger.info(f\"{ymdhms()} inserted {len(data)} tweets to {collec} collection\")\n else:\n collection.insert_one(data)\n logger.info(f\"{ymdhms()} inserted data {data} to {collec} collection\")",
"def insert_values(listingid_to_text):\n sql = \"INSERT INTO listingid_to_text_english VALUES (%s, %s)\"\n args = [(key, val) for key, val in listingid_to_text.iteritems()]\n conn = None\n try:\n # read database configuration\n params = config()\n # connect to the PostgreSQL database\n conn = psycopg2.connect(**params)\n # create a new cursor\n cur = conn.cursor()\n \n print(\"here\")\n # execute the INSERT statement\n cur.executemany(sql, args)\n # commit the changes to the database\n conn.commit()\n # close communication with the database\n cur.close()\n except (Exception, psycopg2.DatabaseError) as error:\n print(error)\n finally:\n if conn is not None:\n conn.close()",
"def save_list_mongo(listz):\t\n\tconnection = pymongo.Connection('localhost', 27017)\n\tdb = connection.database\n\tcollection = db.warez_collection",
"def store_word_ranks(pid, words):\n try:\n for wordpair in words:\n for key,value in wordpair.items():\n database.RankedWords.insert_one({'pid': pid, 'word': key, 'complexity': value})\n return {'status': 1, 'data': None}\n except Exception as e:\n return {'status': -1, 'data': str(e)}",
"async def bulk_insert(self, documents, alias=None):\n\n is_valid = True\n docs_to_insert = []\n\n for document_index, document in enumerate(documents):\n self.update_field_on_save_values(document, document._id is not None)\n try:\n is_valid = is_valid and self.validate_document(document)\n except Exception:\n err = sys.exc_info()[1]\n raise ValueError(\n \"Validation for document %d in the documents you are saving failed with: %s\"\n % (document_index, str(err))\n )\n\n if not is_valid:\n return\n\n docs_to_insert.append(document.to_son())\n\n if not is_valid:\n return\n\n doc_ids = await self.coll(alias).insert(docs_to_insert)\n\n for object_index, object_id in enumerate(doc_ids):\n documents[object_index]._id = object_id\n\n return documents",
"def add_words(self, goal_slug, num_words, timeout=10):\n url = DATAPOINT_ADD_URL.format(self.username, goal_slug, self.auth_token)\n data = { 'timestamp' : str(int(time.time())),\n 'value' : str(num_words),\n 'comment' : COMMENT_DEFAULT }\n urllib2.urlopen(url, urllib.urlencode(data), timeout)"
] | [
"0.671187",
"0.66561407",
"0.64611846",
"0.6321505",
"0.61196697",
"0.60452765",
"0.6039269",
"0.6009112",
"0.5792243",
"0.5741951",
"0.57339126",
"0.57302725",
"0.56968504",
"0.5630612",
"0.55702686",
"0.55599356",
"0.55106515",
"0.55084234",
"0.5503626",
"0.5496883",
"0.5466811",
"0.5462642",
"0.5462397",
"0.5435641",
"0.541866",
"0.5409155",
"0.5399856",
"0.53759784",
"0.53610826",
"0.5347672"
] | 0.8156848 | 0 |
Build a dictionary recording the min and max indices (indicating the position in a list) of documents for each review; | def build_indices(review_ids):
review_indices = {}
# Load qrel_abs_train txt file
clef_data = pd.read_csv(config.TRAIN_QREL_LOCATION, sep="\s+", names=['review_id', 'q0', 'pmid', 'included'])
# Get index of documents for each review
for review_id in review_ids:
index = clef_data.index[clef_data['review_id'] == review_id].tolist()
# Get the range of index for all documents within each review
review_indices[review_id] = (min(index), max(index) + 1)
return review_indices | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def generate_dict(self):\n dict = defaultdict(list)\n for i in range(self.no_of_docs-1):\n doc_txt = self.doc_to_df(i)\n #assign key to index in dictionary and its locations as tuples(docid,line,wordpos) as the values\n for j in range(len(doc_txt)):\n for k in range(doc_txt.shape[1]):\n key = doc_txt[k][j]\n dict[key].append((i,j,k))",
"def pos():\n pos_list = []\n for token in doc:\n pos_list.append(token.pos_)\n setList = list(set(pos_list))\n my_dict = {i: pos_list.count(i) for i in setList}\n print(my_dict)",
"def build_index(text: Iterable) -> Dict[str, List[Tuple[int, int]]]:\n index = defaultdict(list)\n for line_no, line in enumerate(text, 1):\n for match in WORD_RE.finditer(line):\n word = match.group()\n column_no = match.start() + 1\n location = (line_no, column_no)\n index[word].append(location)\n return index",
"def build_ngram_index(tokenized_documents, ngrams):\n dictionary = {}\n\n doc_ngrams = {}\n for doc in tokenized_documents:\n ngrams_freq = {}\n\n measures = nltk.collocations.BigramAssocMeasures()\n finder = BigramCollocationFinder.from_words(tokenized_documents[doc])\n freqs = finder.ngram_fd\n for ngram in freqs:\n ngrams_freq[ngram] = freqs[ngram]\n \n measures = nltk.collocations.TrigramAssocMeasures()\n finder = TrigramCollocationFinder.from_words(tokenized_documents[doc])\n freqs = finder.ngram_fd\n for ngram in freqs:\n ngrams_freq[ngram] = freqs[ngram]\n\n doc_ngrams[doc] = ngrams_freq\n\n for ngram in ngrams:\n dictionary[ngram] = [0]\n for doc in doc_ngrams:\n if ngram in doc_ngrams[doc]:\n dictionary[ngram][0] += doc_ngrams[doc][ngram]\n dictionary[ngram].append((doc, doc_ngrams[doc][ngram]))\n \n return dictionary",
"def _create_id_map(self, word_list, max_list_length):\n\n ############ 1.5 TODO\n from collections import Counter\n \n # import pdb; pdb.set_trace()\n word_rank_list = Counter(word_list).most_common(max_list_length)\n \n id_map = {}\n for idx, (word,_) in enumerate(word_rank_list):\n id_map[word] = idx\n\n ############\n # raise NotImplementedError()\n return id_map",
"def build_dict(min_word_freq=0, train_dir=\"\", test_dir=\"\"):\n word_freq = collections.defaultdict(int)\n files = os.listdir(train_dir)\n for fi in files:\n with open(os.path.join(train_dir, fi), \"r\") as f:\n word_freq = word_count(f, word_freq)\n files = os.listdir(test_dir)\n for fi in files:\n with open(os.path.join(test_dir, fi), \"r\") as f:\n word_freq = word_count(f, word_freq)\n\n word_freq = [x for x in six.iteritems(word_freq) if x[1] > min_word_freq]\n word_freq_sorted = sorted(word_freq, key=lambda x: (-x[1], x[0]))\n words, _ = list(zip(*word_freq_sorted))\n word_idx = dict(list(zip(words, six.moves.range(len(words)))))\n return word_idx",
"def process_document(text):\n words = preprocess(text)\n postings = {}\n for word, ix in words:\n if word in postings:\n wordinfo = postings[word]\n else:\n wordinfo = {\"frequency\": 0, \"indexes\": []}\n postings[word] = wordinfo\n wordinfo[\"frequency\"] += 1\n wordinfo[\"indexes\"].append(ix)\n return postings",
"def create_dict(list, old_min, old_max, new_min, new_max):\n d = {}\n for row in list:\n tds = row.find_all(\"td\")\n letter = tds[0].string\n freq = tds[1].string[:-1]\n freq = float(freq.replace(',', '.'))\n d[letter] = map_to_range(freq, old_min, old_max, new_min, new_max)\n\n return d",
"def preprocess(self, documents):\n\n # A dict storing the frequency of each word\n word_freq = {}\n\n # Iterate for each document\n for doc in documents:\n # Split the document into a list of words and iterate on it\n for w in extract_words(doc):\n # Update word frequencies\n '''YOUR CODE HERE'''\n if w not in word_freq.keys():\n word_freq[w] = 1\n else:\n word_freq[w] += 1\n\n ''' END CODE FOR THIS LOOP '''\n\n\n # A set of words with frequency less than 'self.min_freq'\n remove_words = set()\n\n # Check frequency of each word and add to 'remove_words'\n # if it's frequency is below self.min_freq\n\n ''' YOUR CODE HERE '''\n for w in word_freq.keys():\n if word_freq[w] < self.min_freq:\n remove_words.add(w)\n\n # Delete the words in 'remove_words' from 'word_freq'\n for w in remove_words:\n del word_freq[w]\n\n # Fill 'self.word_to_idx' and 'self.idx_to_word' for\n # each word in 'word_freq' (dicts are explained above)\n\n i = 0\n for w in word_freq.keys():\n self.word_to_idx[w] = i\n self.idx_to_word[i] = w \n i += 1\n\n ''' END YOUR CODE HERE '''",
"def build_index(path, limit=None):\n\n documents = {}\n doc_lengths = {}\n index = {}\n j = 0 # Counter for articles\n for i in range(0, 22):\n if i >= 10:\n file = open(path + \"reut2-0\" + str(i) + \".sgm\", encoding='latin-1')\n else:\n file = open(path + \"reut2-00\" + str(i) + \".sgm\", encoding='latin-1')\n\n # Parsing html pages and getting reuters tagged once\n soup = BeautifulSoup(file, \"html.parser\")\n articles = soup.find_all('reuters')\n\n for article in articles:\n\n body = \"\"\n title = \"\"\n newid = int(article['newid'])\n\n try:\n body = article.body.get_text()\n except AttributeError:\n pass\n\n try:\n title = article.title.get_text()\n except AttributeError:\n pass\n\n words_list = title + \"\\n\" + body\n\n # Adding title+body to documents dictionary\n documents[newid] = words_list\n\n # Processing document and adding document lengths to dictionary\n processed_doc = preprocess(documents[newid])\n doc_lengths[newid] = len(processed_doc)\n\n # Adding word to index\n for term in processed_doc:\n if term in index:\n term_freq, docs_dict = index[term]\n\n term_freq += 1\n if newid in docs_dict:\n docs_dict[newid] += 1\n else:\n docs_dict[newid] = 1\n\n index[term] = (term_freq, docs_dict)\n else:\n docs_dict = {newid: 1}\n index[term] = (1, docs_dict)\n j += 1\n # Checking limit on articles\n if limit is not None:\n if j == limit:\n break\n\n # Checking limit on articles\n if limit is not None:\n if j == limit:\n break\n\n for term in index:\n term_freq, docs_dict = index[term]\n index[term] = [term_freq] + list(docs_dict.items())\n\n if limit is None:\n save_obj(index, \"reuters_index\")\n save_obj(documents, \"reuters_documents\")\n save_obj(doc_lengths, \"reuters_doc_length\")\n\n return index",
"def dictionary(cleaned_data,threshold):\n news = []\n for date in cleaned_data:\n for headlines in cleaned_data[date]:\n news.append(headlines)\n\n word_freq = nltk.FreqDist(itertools.chain(*news))\n id_to_word = ['<pad>'] + [word for word, cnt in word_freq.items() if cnt >= threshold] + ['<unk>']\n word_to_id = {word:idx for idx, word in enumerate(id_to_word)}\n \n return id_to_word, word_to_id",
"def _build_token_dict(self, corpus: List[List[str]], min_count: int = 3):\n token2idx = {\n self.token_pad: 0,\n self.token_unk: 1,\n self.token_bos: 2,\n self.token_eos: 3\n }\n\n token2count = {}\n for sentence in corpus:\n for token in sentence:\n count = token2count.get(token, 0)\n token2count[token] = count + 1\n\n # 按照词频降序排序\n sorted_token2count = sorted(token2count.items(),\n key=operator.itemgetter(1),\n reverse=True)\n token2count = collections.OrderedDict(sorted_token2count)\n\n for token, token_count in token2count.items():\n if token not in token2idx and token_count >= min_count:\n token2idx[token] = len(token2idx)\n\n self.token2idx = token2idx\n self.idx2token = dict([(value, key)\n for key, value in self.token2idx.items()])\n logging.debug(f\"build token2idx dict finished, contains {len(self.token2idx)} tokens.\")\n self.dataset_info['token_count'] = len(self.token2idx)",
"def getAlleleCountDict(rec,idx_list=None):\n alleles = defaultdict(int)\n total_sites = 0\n missing_inds = 0\n if idx_list is None:\n idx_list = range(len(rec.samples))\n for j in idx_list:\n samp = rec.samples[j]\n if None in samp.alleles:\n alleles['N'] += len(samp.alleles)\n #missing_inds += 1\n for k in range(len(samp.alleles)):\n b = samp.alleles[k]\n if b is not None:\n alleles[b] += 1\n total_sites+=1\n return alleles",
"def _vector_mapping(self) -> dict:\n words = set()\n for file in os.listdir(self.processed_path):\n doc_path = f\"{self.processed_path}/{file}\"\n with open(doc_path, 'r') as f:\n text_words = f.readline().split()\n words = words.union(set(text_words))\n words = list(words)\n words.sort()\n\n return dict(zip(words, range(len(words))))",
"def index_feats_dict(self):\n doc_features_dict = {}\n\n for index, doc in zip(self.index, self.series):\n # Sets for a doc and feature words\n doc_set = set(doc.split())\n feat_set = set(self.features)\n\n # Shared words between the two sets\n interset_words = doc_set.intersection(feat_set)\n\n # Append to doc_features_dict\n doc_features_dict[index] = list(interset_words)\n\n return doc_features_dict",
"def create_docs(text_sentences):\n doc_info = []\n\n ix = 0\n for sent in text_sentences:\n ix += 1\n count = count_words(sent)\n temp = {\n 'doc_id': ix,\n 'doc_length': count\n }\n doc_info.append(temp)\n\n return doc_info",
"def _create_dictionary(self, document_set):\n words = self._normalize_words(document_set.words)\n unique_words = frozenset(words)\n return dict((word, idx) for idx, word in enumerate(unique_words))",
"def compute_idfs(documents):\n idf={}\n words={}\n # idf= no.of doc/no. of doc in which it lies\n for doc in documents:\n for wrd in set(documents[doc]):\n if wrd.lower() not in words:\n words[wrd.lower()]=0\n words[wrd.lower()]+=1 \n for word in words:\n idf[word]=len(documents)/words[word]\n return idf",
"def _create_dictionary(self, document):\n words = self._normalize_words(document.words)\n unique_words = frozenset(words)\n return dict((word, idx) for idx, word in enumerate(unique_words))",
"def markov_analysis(text_list, num_pre=2):\n dictionary = dict()\n for i in range(len(text_list) - num_pre):\n\n prefix = tuple(text_list[i: i+num_pre])\n suffix = text_list[i+num_pre]\n\n if dictionary.get(prefix, 0) != 0:\n dictionary[prefix].append(suffix)\n else:\n dictionary[prefix] = [suffix]\n\n return dictionary",
"def get_word_to_ind (count_dict, cutoff = 0):\n word_to_ind = {}\n if (not cutoff):\n cutoff = len(count_dict.keys())\n #sorting the words by their count:\n sorted_tuples = list(reversed(sorted(count_dict.items(), key=operator.itemgetter(1))))\n for i in range(cutoff):\n cur_word = sorted_tuples[i][0]\n word_to_ind[cur_word] = i\n\n return word_to_ind",
"def analyze_reviews(reviews):\n\n good_reviews=reviews[reviews['rs_review_movie_score']>=9]\n bad_reviews=reviews[reviews['rs_review_movie_score']<=2]\n\n print 'len(good_reviews)=%s' % len(good_reviews)\n print 'len(bad_reviews)=%s' % len(bad_reviews)\n\n m = re.compile('\\d')\n\n english_stop_words=stopwords.words('english')\n\n\n def tokenize(text):\n tokens=nltk.word_tokenize(text)\n # strip out trailing puncutation\n tokens = [ token[:-1] if token[-1] in ['.',',','!','?'] else token for token in tokens]\n\n # lower case\n tokens = [token.lower() for token in tokens]\n\n # Take only relativly long characters\n tokens = [token for token in tokens if len(token)>=3]\n\n # remove words with numbers/digits\n tokens = [token for token in tokens if m.search(token) is None]\n\n # Remove stop words: http://nltk.googlecode.com/svn/trunk/doc/book/ch02.html\n tokens = [token for token in tokens if token not in english_stop_words]\n return tokens\n\n good_tokens_list = []\n for i,review in good_reviews.iterrows():\n text=review['rs_review_text']\n good_tokens_list.append(tokenize(text))\n\n bad_tokens_list = []\n for i,review in bad_reviews.iterrows():\n text=review['rs_review_text']\n bad_tokens_list.append(tokenize(text))\n\n all_words=Counter()\n for tokens in good_tokens_list + bad_tokens_list:\n for token in tokens:\n all_words[token]+=1\n\n most_common=all_words.most_common(2000)\n most_common=zip(*most_common)[0]\n\n print 'most_common_words = ',most_common[-20:]\n\n def document_features(tokens):\n return {word:word in tokens for word in most_common}\n\n good_set=[(document_features(tokens), 'pos') for tokens in good_tokens_list]\n bad_set=[(document_features(tokens), 'neg') for tokens in bad_tokens_list]\n\n train_set = good_set + bad_set\n random.shuffle(train_set) # dunno if this is necessary\n\n classifier = nltk.NaiveBayesClassifier.train(train_set)\n\n print 'accuracy',nltk.classify.accuracy(classifier, train_set)\n\n classifier.show_most_informative_features(300)\n\n return classifier",
"def map_docs(docs_file):\n word_map = defaultdict(int)\n doc_count = 0\n token_count = 0\n\n for _, doc_tokens in tokenize(docs_file):\n doc_count += 1 # count document\n token_count += len(doc_tokens) # count tokens\n for token in set(doc_tokens):\n word_map[token] += 1 # increase inverted index count\n\n docs_file.seek(0) # reset file pointer\n return doc_count, token_count, word_map",
"def relevant_docs_from_posting(self, query):\n relevant_docs = {}\n # postingLists = [self.FindPostingByTerm(term) for term in query] #list of posting file -->[idx,tweet id,tfi]\n for term in query:\n post = self.FindPostingByTerm_Binary(term)\n for p in post:\n tweet_id = p[1]\n if tweet_id not in relevant_docs.keys():\n relevant_docs[tweet_id] = {}\n relevant_docs[tweet_id][term] = p[2] * self.inverted_index[term][1] # wiq\n return relevant_docs",
"def createIndex(pages): \n index = defaultdict(list)\n for url, content, links in pages:\n counts = getNumberTerms(content)\n for term, count in counts.items():\n index[term].append((url, count))\n return index",
"def vec_to_dict(docVec):\n return {dimension:value for dimension, value in enumerate(docVec)}",
"def compute_idfs(documents):\n idfs = dict()\n total_num_documents = len(documents)\n words = set(word for sublist in documents.values() for word in sublist)\n \n for word in words:\n num_documents_containing_word = 0\n \n for document in documents.values():\n if word in document:\n num_documents_containing_word += 1\n \n idf = math.log(total_num_documents / num_documents_containing_word)\n idfs[word] = idf\n\n return idfs",
"def buildVocabToNumMapping(vocab):\n # Index starts at one so we reseve 0 as a padding character \n index = 1\n vocab_to_num = {}\n num_to_vocab = {}\n \n for word in vocab:\n if word not in vocab_to_num:\n vocab_to_num[word] = index\n num_to_vocab[index] = word\n index += 1\n print(\"Max index // length of vocab: %s\" % index)\n \n return (vocab_to_num, num_to_vocab)",
"def get_gold_pred_idx_dict(self, y_true, y_pred):\n gold_pred_idx_dict = defaultdict(lambda: defaultdict(list))\n gold_pred_ct_dict = defaultdict(lambda: defaultdict(int)) \n\n for gold_idx in range(3,self.nerTags.size):\n gold_filter = (y_true == gold_idx).astype(\"int\") # 1/0 all rows with that gold_idx\n for pred_idx in range(3,self.nerTags.size):\n pred_filter = (y_pred == pred_idx).astype(\"int\") # 1/0 all rows with that ner_idx\n match_ner_idx = np.nonzero(np.all([gold_filter, pred_filter],axis=0).astype(\"int\"))[0]\n gold_pred_idx_dict[gold_idx][pred_idx] = match_ner_idx \n gold_pred_ct_dict[gold_idx][pred_idx] = match_ner_idx.shape[0] \n\n return gold_pred_idx_dict, gold_pred_ct_dict",
"def parse_docs(data: np.ndarray, words: dict, doc_count: int, weight_func: typing.Any) -> dict:\n m = len(data)\n n = len(words.keys())\n docmatrix = {}\n wordref = {w:i for i, w in enumerate(sorted(words.keys()))}\n for i, doc in enumerate(data):\n for word in list(set(doc.split(' '))):\n if not is_stop_word(word):\n if word != '':\n docmatrix[(i, wordref[word])] = weight_func(doc_count,\n words[word]['doccount'],\n words[word]['freq'])\n return docmatrix"
] | [
"0.6648486",
"0.6142995",
"0.5936477",
"0.5701389",
"0.56871146",
"0.5630566",
"0.5610905",
"0.5560928",
"0.55124485",
"0.54871017",
"0.54758066",
"0.546961",
"0.54175454",
"0.5414213",
"0.53981483",
"0.5375703",
"0.5374007",
"0.5359983",
"0.53506577",
"0.53481126",
"0.53472376",
"0.5337356",
"0.53274447",
"0.53199077",
"0.5303088",
"0.5295425",
"0.52936536",
"0.52657497",
"0.526446",
"0.5248324"
] | 0.70010144 | 0 |
Redirect index to students page | def index() -> str:
return redirect('/students') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def index():\n return redirect(url_for('second_page'))",
"def home_page():\n return redirect('/users')",
"def idx(_request):\n return HttpResponseRedirect('/home')",
"def second_page():\n return redirect(url_for('index'))",
"def index_file():\n return redirect(\"/\")",
"def index(request):\n return redirect('polls:index')",
"def homepage():\n return redirect('index.html')",
"def index():\n return redirect(url_for(\"home\"))",
"def redir_index():\n return redirect(url_for(\"index\"), code=301)",
"def index():\n redirect(URL('form'))",
"def entry_page():\n return redirect(url_for('index'))",
"def admin_search_student(request):\n\n if not validate_request(request): return redirect(reverse(URL_FORBIDDEN))\n if request.session['type'] == 'S' or request.session['type'] == 'R': return redirect(reverse(URL_FORBIDDEN))\n\n if request.method == \"GET\":\n return render(\n request,\n 'app/admin/admin_search_student.html',\n {\n 'title':'Student Info',\n 'layout_data' : get_layout_data(request),\n }\n )\n else:\n return redirect(reverse(URL_BAD_REQUEST))",
"def index():\n if current_user.is_authenticated:\n return redirect(url_for('home'))\n return render_template('index.html')",
"def index(request):\n\n\tif request.user.is_authenticated:\n\t\treturn HttpResponseRedirect('home')\n\treturn HttpResponseRedirect('login')",
"def index():\n if 'name' in session:\n return render_template('home.html')\n return redirect(url_for('log_in'))",
"def show_index():\r\n if 'username' in flask.session:\r\n return flask.redirect(flask.url_for('home')) # Need to fix redirect\r\n\r\n return flask.render_template(\"index.html\")",
"def view_students(request):\n\n\tcontext_dict = {\n\t\t'title': 'All Students',\n\t}\n\treturn render(request, \"viewStudent.html\", context_dict)",
"def index():\n return redirect('/client/index.html')",
"def homepage():\n if g.user:\n return redirect(f\"/user/{g.user.id}\")\n else:\n return redirect(\"/landing\")",
"def index(self):\n log.debug('index()')\n return redirect_to('/admin/dashboard')",
"def index():\n try:\n if current_user.is_administrator():\n return render_template('admin/index.html')\n return redirect(url_for('main.index'))\n except Exception as e:\n abort(500, e)",
"def homepage():\n return redirect(\"/posts\")",
"def get(self, request):\n return redirect('start:home')",
"def get(self, request):\n return redirect('start:home')",
"def get(self, request):\n return redirect('start:home')",
"def home_page():\n return redirect(url_for(_DEFAULT_ROUTE, _external=True))",
"def index(request):\n try:\n if request.user.is_authenticated:\n return render(request, \"pages/index.html\")\n else:\n return redirect('login')\n\n except:\n return redirect('login')",
"def get(self):\n self.redirect('/admin')",
"def get(self):\n self.redirect('/admin')",
"def index():\n return redirect(auth_flow.get_authorization_url())"
] | [
"0.6889249",
"0.685594",
"0.68496305",
"0.6730005",
"0.6604118",
"0.651976",
"0.6506114",
"0.6469121",
"0.6458482",
"0.64317304",
"0.640421",
"0.6387237",
"0.63767403",
"0.6375142",
"0.6348239",
"0.6342875",
"0.6328945",
"0.6312688",
"0.630708",
"0.63059723",
"0.6292399",
"0.6281294",
"0.6274253",
"0.6274253",
"0.6274253",
"0.6269788",
"0.6251427",
"0.6246372",
"0.6246372",
"0.6136182"
] | 0.8471828 | 0 |
Returns the total number of cards that given user owns of this card | def get_user_ownership_count(
self, user: get_user_model(), prefetched: bool = False
) -> int:
if prefetched:
return sum(
ownership.count
for card_printing in self.printings.all()
for localisation in card_printing.localisations.all()
for ownership in localisation.ownerships.all()
if ownership.owner_id == user.id
)
return self.printings.aggregate(
card_count=Sum(
Case(
When(
localisations__ownerships__owner=user,
then="localisations__ownerships__count",
),
output_field=IntegerField(),
default=0,
)
)
)["card_count"] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_user_ownership_count(\n self, user: get_user_model(), prefetched: bool = False\n ) -> int:\n if prefetched:\n return sum(\n ownership.count\n for localisation in self.localisations.all()\n for ownership in localisation.ownerships.all()\n if ownership.owner_id == user.id\n )\n\n return self.localisations.aggregate(\n card_count=Sum(\n Case(\n When(ownerships__owner=user, then=\"ownerships__count\"),\n output_field=IntegerField(),\n default=0,\n )\n )\n )[\"card_count\"]",
"def get_usage_count(request, user_id):\n board_count = Member.objects.filter(user_id=user_id, is_creator=True).count()\n return Response({\"board_count\": board_count})",
"def query_card_ownership(user_id, guild_id, card):\n definition = session.query(CardDefinition) \\\n .select_from(Card).join(CardDefinition) \\\n .filter(Card.guild_id == guild_id) \\\n .filter(or_(Card.card_id == card, func.lower(CardDefinition.name) == func.lower(card))) \\\n .filter(Card.owner_ids.contains(str(user_id))) \\\n .one_or_none()\n count = session.query(Card) \\\n .select_from(Card).join(CardDefinition) \\\n .filter(Card.owner_ids.endswith(str(user_id))) \\\n .filter(or_(Card.card_id == card, func.lower(CardDefinition.name) == func.lower(card))) \\\n .count() \\\n if definition else 0\n return definition, count",
"def total_cards(self):\n amount = 0\n for palo in self._cards:\n amount = amount + len(self._cards[palo])\n\n return amount",
"def get_amount(self): \n return len(self.get_cards())",
"def get_amount_users() -> User:\n return User.objects.all().count()",
"def _get_count(_khoros_object, _user_id, _object_type):\n _api_response = query_users_table_by_id(_khoros_object, f'{_object_type}.count(*)', _user_id)\n return int(_api_response['data']['items'][0][_object_type]['count'])",
"def total_users(user):\n user_count = User.objects.filter(is_active=True).count()\n\n return NumberResponse(user_count, 'Total number of users')",
"def get_number_of_pins_for_user(self, user):\n\t\treturn self.active_pins().filter(board__user=user).count()",
"def count(self):\n return(len(self.cards))",
"def total_oros(self):\n return len(self._cards[\"oro\"])",
"def count_users(self, session) -> int:\n\n users_quantity = session.query(User).count()\n return users_quantity",
"def get_num_railroads_owned(self, player_name):\n total = 0\n if self.get_owner(\"Reading Railroad\") == player_name:\n total += 1\n if self.get_owner(\"Pennsylvania Railroad\") == player_name:\n total += 1\n if self.get_owner(\"B. & O. Railroad\") == player_name:\n total += 1\n if self.get_owner(\"Short Line\") == player_name:\n total += 1\n return total",
"def count_deck(deck):\n return reduce(lambda x, y: x + y['quantity'], deck['cards'], 0)",
"async def get_player_total(user_id):\n return ex.first_result(await ex.conn.fetchrow(\"SELECT total FROM blackjack.currentstatus WHERE userid = $1\", user_id))",
"def num_cards(self):\n length=len(self.cards)\n return length",
"def sum_cards(self):\n has_ace = False\n sum = 0\n\n # Add up players cards\n for card in self.cards:\n if card.card_value == \"ace\":\n has_ace = True\n sum += card.game_value\n\n # Handle case where ace plays low\n if sum > 21 and has_ace:\n sum -= 10\n\n return sum",
"def get_num_cards(self):\n \n return self._hand.get_size()",
"def get_number_of_likes_for_user(self, user):\n\t\tfrom pins.models import Pin\n\t\tpin_ctype = ContentType.objects.get_for_model(Pin)\n\t\tpin_list = Pin.objects.active_pins().filter(board__user=user).values_list('pk', flat=True)\n\t\treturn self.filter(content_type=pin_ctype, object_id__in=pin_list).count()",
"def count_user():\r\n session = tables.get_session()\r\n if session is None:\r\n return 0\r\n count = 0\r\n try:\r\n user_account = UserAccount()\r\n uid = user_account.get_max_uid(session)\r\n if uid is None:\r\n return 0\r\n return uid + 1\r\n except SQLAlchemyError as err:\r\n LOGGER.error('Count user number failed: %s', err)\r\n return count\r\n finally:\r\n session.close()\r\n return count",
"def count_total_each_user():\r\n trans = transaction.begin()\r\n user_list = UserMgr.get_list(active=True)\r\n for user in user_list:\r\n StatBookmarkMgr.count_user_bookmarks(user.username)\r\n trans.commit()",
"def count_users(self):\n return self.get_session.query(func.count(self.user_model.id)).scalar()",
"def get_kudos_given_count(khoros_object, user_settings=None, user_id=None, login=None, email=None):\n user_settings = _process_settings_and_user_id(khoros_object, user_settings, user_id, login, email)\n return _get_sum_weight(khoros_object, user_settings['id'], 'kudos_given')",
"def get_count(username):\n return get_contributor(username)[\"count\"]",
"def utilization(user, ressource):\n if ressource == 'accounts':\n return Account.objects.filter(vhost__in=list(get_vhosts(user))).count()\n return None",
"def calculate_score(player_cards):\n score = sum(player_cards)\n return score",
"def get_total_issues_per_user(issues):\n return get_total_contributions_per_user(issues, 'user')",
"def getPoints(self):\n count = 0\n for card in self.cards:\n if card.rank > 9:\n count += 10\n elif card.rank == 1:\n count += 11\n else:\n count += card.rank\n # Deduct 10 if Ace is available and needed as 1\n for card in self.cards:\n if count <= 21:\n break\n elif card.rank == 1:\n count -= 10\n return count",
"def get_total_users(request):\n number_of_users = User.objects.count()\n res = {\n 'total_users': number_of_users,\n }\n return Response(res, status=status.HTTP_200_OK)",
"async def get_user_hw_action_list_count(\n request: Request,\n user_id: object = None,\n name=None) -> int:\n\n ret_val = 0\n query_str = get_user_hw_action_list_count_query\n try:\n\n async with request.app.pg.acquire() as connection:\n row = await connection.fetchval(query_str, user_id, name)\n if row is not None:\n ret_val = row\n except Exception as gclcerr:\n logger.error('get_user_hw_action_list_count service erred with: {}'.format(gclcerr))\n\n return ret_val"
] | [
"0.7715831",
"0.69351727",
"0.67096174",
"0.6708182",
"0.6541524",
"0.64215946",
"0.63916093",
"0.6306432",
"0.6229252",
"0.6144589",
"0.6107996",
"0.6098523",
"0.60861856",
"0.6067441",
"0.6061621",
"0.60230386",
"0.60217756",
"0.6010899",
"0.6005462",
"0.5943102",
"0.589071",
"0.5857916",
"0.58565456",
"0.5850169",
"0.5848177",
"0.58336544",
"0.5822212",
"0.5819246",
"0.5811677",
"0.58069324"
] | 0.7711362 | 1 |
Gets whether this card has another half (flip, split, transform etc) | def has_other_half(self) -> bool:
return self.layout in (
"flip",
"split",
"transform",
"meld",
"aftermath",
"adventure",
"modal_dfc",
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def can_split(self) -> bool:\n if len(self.cards) == 2 and self.cards[0].value == self.cards[1].value:\n return True\n else:\n return False",
"def is_pair(hand):\n\tis_a_pair = False\n\ti = 0\n\twhile i < 13:\n\t\tif hand[i] == 2:\n\t\t\tis_a_pair = True\n\t\ti += 1 \n\thigh_card = 0\n\tj = 0\n\twhile j < 13 and is_a_pair == True:\n\t\tif hand[j] == 2 and j >= high_card:\n\t\t\thigh_card = j\n\t\tj += 1\n\tif is_a_pair:\n\t\treturn True, high_card\n\telse:\n\t\treturn False",
"def hasBlackjack(self):\n return len(self.cards) == 2 and self.getPoints() == 21",
"def is_full_house(hand):\n\tis_a_full_house = False\n\tnum_three_kind = 0\n\tnum_pair = 0\n\ti = 0\n\twhile i < 13:\n\t\tif hand[i] == 3:\n\t\t\tnum_three_kind += 1\n\t\telif hand[i] == 2:\n\t\t\tnum_pair += 1\n\t\ti += 1\n\tif num_three_kind ==1 and num_pair == 1:\n\t\tis_a_full_house = True\n\t\n\thigh_card = 0\n\tj = 0\n\twhile j < 13 and is_a_full_house == True:\n\t\tif (hand[j] == 2 or hand[j] == 3) and j >= high_card:\n\t\t\thigh_card = j\n\t\tj += 1\n\tif is_a_full_house:\n\t\treturn True, high_card\n\telse:\n\t\treturn False",
"def in_side(self, side):\n if side == \"U\":\n return self.z() == 1\n if side == \"D\":\n return self.z() == -1\n if side == \"F\":\n return self.y() == -1\n if side == \"B\":\n return self.y() == 1\n if side == \"R\":\n return self.x() == 1\n if side == \"L\":\n return self.x() == -1",
"def is_fullp(self):\n front = (self.front + 1) % self.length\n return front == self.rear",
"def is_card_in_other_hands(self, own_hand_index, card):\n for i, hand in enumerate(self.hands):\n if i == own_hand_index:\n continue\n if card in hand:\n return True\n return False",
"def isFull(self) -> bool:\n if self.move_forward(self.rear) == self.front:\n return True\n else:\n return False",
"def is_blackjack(self) -> bool:\n if self.score == 21 and len(self.cards) == 2:\n return True\n else:\n return False",
"def is_blackjack(self):\n if self.hand == 21 and len(list(self)) ==2:\n print '%s = Blackjack'%self\n return True",
"def still_in_hand(self):\n return len(self.hand.cards)!=0",
"def is_high_card(hand):\n\tis_a_high_card = True\n\ti = 0\n\twhile i < 13:\n\t\tif hand[i] > 1:\n\t\t\tis_high_card = False\n\t\ti += 1\n\t\t\n\thigh_card = 0\n\tj = 0\n\twhile j < 13 and is_a_high_card == True:\n\t\tif hand[j] == 1 and j >= high_card:\n\t\t\thigh_card = j\n\t\tj += 1\n\tif is_a_high_card:\n\t\treturn True, high_card\n\telse:\n\t\treturn False",
"def has_twopair(self):\n count = 0\n self.suit_hist()\n for val in self.ranks.values():\n if val == 2:\n count += 1\n if count >= 2:\n self.rank_per_hand['1'] = \"two pair\"\n return True\n return False",
"def has_fullhouse(self):\n if self.has_pair() & self.has_three_of_a_kind():\n self.rank_per_hand['5'] = \"full house\"\n return True\n return False",
"def has_crossing_len2_ob(self) -> bool:\n fcell = self.first_cell\n scell = self.second_cell\n if self._fuse_row:\n possible_obs = [\n GriddedPerm((0, 1), (fcell, scell)),\n GriddedPerm((1, 0), (scell, fcell)),\n ]\n else:\n possible_obs = [\n GriddedPerm((0, 1), (fcell, scell)),\n GriddedPerm((1, 0), (fcell, scell)),\n ]\n return any(ob in possible_obs for ob in self._tiling.obstructions)",
"def is_half_complete(self):\r\n from foldit.models import PuzzleComplete\r\n complete = PuzzleComplete.is_level_complete(\r\n self.system.anonymous_student_id,\r\n self.required_level_half_credit,\r\n self.required_sublevel_half_credit,\r\n self.due_time)\r\n return complete",
"def is_crossing_len2(self, gp: GriddedPerm) -> bool:\n return (\n len(gp) == 2\n and gp.occupies(self.first_cell)\n and gp.occupies(self.second_cell)\n )",
"def is_flush(hand):\n\tis_a_flush = False\n\ti = 16\n\twhile i >= 13:\n\t\tif hand[i] == 5:\n\t\t\tis_a_flush = True\n\t\ti -= 1\n \n\thigh_card = 0\n\tj = 0\n\twhile j < 13 and is_a_flush == True:\n\t\tif hand[j] == 1 and j >= high_card:\n\t\t\thigh_card = j\n\t\tj += 1\n\tif is_a_flush:\n\t\treturn True, high_card\n\telse:\n\t\treturn False",
"def has_cards(self):\n return self.hand.len() > 0",
"def at_last_stich(self):\n return len(self.cards) == 1",
"def has_pair(self):\n self.suit_hist()\n for val in self.ranks.values():\n if val == 2:\n self.rank_per_hand['0'] = \"pair\"\n return True\n return False",
"def is_two_pair(hand):\n\tfaces_of_pairs = []\n\tis_a_two_pair = False\n\ti = 0\n\twhile i < 13:\n\t\tif hand[i] == 2:\n\t\t\tfaces_of_pairs.append(i)\n\t\ti += 1\n\tif len(faces_of_pairs) == 2:\n\t\tis_a_two_pair = True\n\tfor fp in faces_of_pairs:\n\t\tprint(fp)\n\tif is_a_two_pair:\n\t\treturn True, faces_of_pairs[1]\n\telse:\n\t\treturn False",
"def is_straight(hand):\n\ti = 0\n\twhile i < 8:\n\t\tif hand[i] == 1 and hand[i+1] == 1 and hand[i+2] == 1 and hand[i+3] == 1 and hand[i+4] == 1:\n\t\t\treturn True, i + 4\n\t\ti += 1\n\treturn False",
"def is_single_riffle(half1, half2, shuffled_deck):\n # base case\n if len(shuffled_deck) == 0:\n return True\n\n # if the top of shuffled_deck is the same as the top of half1\n # (making sure first that we have a top card in half1)\n if len(half1) and half1[0] == shuffled_deck[0]:\n\n # take the top cards off half1 and shuffled_deck and recurse\n return is_single_riffle(half1[1:], half2, shuffled_deck[1:])\n\n # if the top of shuffled_deck is the same as the top of half2\n elif len(half2) and half2[0] == shuffled_deck[0]:\n\n # take the top cards off half2 and shuffled_deck and recurse\n return is_single_riffle(half1, half2[1:], shuffled_deck[1:])\n\n # top of shuffled_deck doesn't match top of half1 or half2\n # so we know it's not a single riffle\n else:\n return False",
"def isFull(self) -> bool:\n return (self.rear + 1) % self.capacity == self.front",
"def hasTwoSons(self):\n \n return self._leftSon is not None and self._rightSon is not None",
"def is_four_of_a_kind(hand):\n\tis_a_four_of_a_kind = False\n\ti = 0\n\twhile i < 13:\n\t\tif hand[i] == 4:\n\t\t\tis_a_four_of_a_kind = True\n\t\ti += 1 \n\t\t\n\thigh_card = 0\n\tj = 0\n\twhile j < 13 and is_a_four_of_a_kind == True:\n\t\tif hand[j] == 4 and j >= high_card:\n\t\t\thigh_card = j\n\t\tj += 1\n\tif is_a_four_of_a_kind:\n\t\treturn True, high_card\n\telse:\n\t\treturn False",
"def drone_has_flipped(self, current_orientation):\n has_flipped = True\n\n self.max_roll = rospy.get_param(\"/drone/max_roll\")\n self.max_pitch = rospy.get_param(\"/drone/max_pitch\")\n\n rospy.logwarn(\"#### HAS FLIPPED? ########\")\n rospy.logwarn(\"RPY current_orientation\"+str(current_orientation))\n rospy.logwarn(\"max_roll\"+str(self.max_roll) +\n \",min_roll=\"+str(-1*self.max_roll))\n rospy.logwarn(\"max_pitch\"+str(self.max_pitch) +\n \",min_pitch=\"+str(-1*self.max_pitch))\n rospy.logwarn(\"############\")\n\n if current_orientation.x > -1*self.max_roll and current_orientation.x <= self.max_roll:\n if current_orientation.y > -1*self.max_pitch and current_orientation.y <= self.max_pitch:\n has_flipped = False\n\n return has_flipped",
"def is_straight_flush(hand):\n\tis_a_local_flush = False\n\tis_a_local_straight = False\n\tlocal_high_card = 0\n\ti = 16\n\twhile i >= 13:\n\t\tif hand[i] == 5:\n\t\t\tis_a_local_flush = True\n\t\ti -= 1\n\tif is_a_local_flush:\n\t\tj = 0\n\t\twhile j < 8:\n\t\t\tif hand[j] == 1 and hand[j + 1] == 1 and hand[j + 2] == 1 and hand[j + 3] == 1 and hand[j + 4] == 1:\n\t\t\t\tis_a_local_straight = True\n\t\t\t\tlocal_high_card = j + 4\n\t\t\tj += 1\n\tif is_a_local_flush and is_a_local_straight:\n\t\treturn True, local_high_card\n\treturn False",
"def _unbalanced(self):\n if self.internal():\n if self.full():\n if abs(self._leftchild._height-self._rightchild._height) >= 2:\n return True\n elif self._leftchild and not self._rightchild:\n if self._leftchild._height >= 2:\n return True\n elif self._rightchild._height >= 2:\n return True\n return False"
] | [
"0.6733933",
"0.618208",
"0.61451954",
"0.61021394",
"0.60967654",
"0.6092608",
"0.60442275",
"0.60439914",
"0.6035215",
"0.59942883",
"0.5980618",
"0.5967132",
"0.5947805",
"0.58967525",
"0.5893755",
"0.5892925",
"0.5842676",
"0.5830995",
"0.5810103",
"0.5801732",
"0.57994264",
"0.5796336",
"0.5778259",
"0.5752251",
"0.57341695",
"0.57146007",
"0.5708322",
"0.5699208",
"0.568146",
"0.5630865"
] | 0.7691357 | 0 |
Gets the keyrune code that should be used for this printing In 99% of all cases, this will return the same value as printing.set.keyrune_code But for Guild Kit printings, the guild symbol should be used instead | def get_set_keyrune_code(self) -> str:
if self.set.code in ("GK1", "GK2") and len(self.face_printings.all()) == 1:
first_face = self.face_printings.all()[0]
if first_face.watermark:
return first_face.watermark
return self.set.keyrune_code.lower() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_code_to_printings_key(printing):\n return (\n printing.set_integer or 0,\n str(printing.set_variant),\n printing.multiverseid or 0,\n printing.card_name,\n )",
"def getCode1Letter(self):\n dataDict = self.__dict__\n # NB must be done by direct access\n result = dataDict['code1Letter']\n return result",
"def get_char(cls, key, mods):\n return cls.__key_dict.key_to_char(key, mods)",
"def getkey(self) -> str:\n return self.screen.getkey()",
"def keypad_key(m) -> str:\n return f\"keypad_{m.digit}\"",
"def getCode1Letter(self):\n dataDict = self.__dict__\n cc = self.stdChemComp\n if cc is None:\n result = None\n else:\n result = cc.code1Letter\n return result",
"def _GetKeyString(self):",
"def _GetKeyString(self):",
"def get_char(self) -> str:\n return self._char",
"def _GetKeyString(self):\n return self.__key_string",
"def __GetKeyString(self):\n return self._GetKeyString()",
"def __GetKeyString(self):\n return self._GetKeyString()",
"def process_key(key):\n print(chr(key))",
"def _GetKeyString(self):\n return self.__key_string",
"def letter(self):\n return self._letter",
"def getChar(self,code):\r\n return chr(code)",
"def card_name_to_printing_key(printing):\n return (\n printing.set_code,\n printing.set_integer or 0,\n str(printing.set_variant),\n printing.multiverseid or 0,\n )",
"def _above128char_to_keycode(self, char: str) -> int:\n if ord(char) in self.HIGHER_ASCII:\n return self.HIGHER_ASCII[ord(char)]\n if char in self.HIGHER_ASCII:\n return self.HIGHER_ASCII[char]\n return 0",
"def letter(self) -> str:\n my_letter = None\n if self is LieType.A:\n my_letter = \"A\"\n elif self is LieType.B:\n my_letter = \"B\"\n elif self is LieType.C:\n my_letter = \"C\"\n elif self is LieType.D:\n my_letter = \"D\"\n else:\n raise ValueError(\n \"This is not in the enum of Lie types so this should be unreachable\")\n return my_letter",
"def character(self) -> str:\r\n return self.char if self.was_guessed else '_'",
"def getCode1Letter(self):\n dataDict = self.__dict__\n raise ApiError(\"\"\"%s.getCode1Letter:\n getCode1Letter should never be called - must be overridden in subclass\"\"\" % self.qualifiedName\n + \": %s\" % (self,)\n )",
"def __str__(self):\n return str(self.__alphabet)",
"def _get_key(self, key_column):\n return key_column.text.replace(u'\\xa0', u' ')",
"def getPlayerSymbol(self) -> str:\n return self.player.getSymbol()",
"def code(self) -> str:\n return self._code",
"def code(self) -> str:\n return self._code",
"def player_key(self):\n # type: () -> string_types\n return self._player_key",
"def getFENtileLetter(fen,letter,number):\n l2i = lambda l: ord(l)-ord('A') # letter to index\n piece_letter = fen[(8-number)*8+(8-number) + l2i(letter)]\n return ' KQRBNPkqrbnp'.find(piece_letter)",
"def _mode_key(guild_id: int) -> str:\n return f\"mode/{guild_id}\"",
"def gcode_text(self):\n return os.linesep.join(map(str, self.gcode))"
] | [
"0.5882872",
"0.5875788",
"0.57938766",
"0.5729844",
"0.57142025",
"0.5691378",
"0.5621861",
"0.5621861",
"0.55469465",
"0.5541225",
"0.55250955",
"0.55160975",
"0.55054325",
"0.5503266",
"0.54999036",
"0.54993963",
"0.549464",
"0.5456612",
"0.5454372",
"0.5450911",
"0.5397566",
"0.53921133",
"0.53608274",
"0.53599286",
"0.53340137",
"0.53340137",
"0.5333268",
"0.5329205",
"0.5312412",
"0.53095514"
] | 0.7367078 | 0 |
Returns the total number of cards that given user owns of this printing | def get_user_ownership_count(
self, user: get_user_model(), prefetched: bool = False
) -> int:
if prefetched:
return sum(
ownership.count
for localisation in self.localisations.all()
for ownership in localisation.ownerships.all()
if ownership.owner_id == user.id
)
return self.localisations.aggregate(
card_count=Sum(
Case(
When(ownerships__owner=user, then="ownerships__count"),
output_field=IntegerField(),
default=0,
)
)
)["card_count"] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_user_ownership_count(\n self, user: get_user_model(), prefetched: bool = False\n ) -> int:\n if prefetched:\n return sum(\n ownership.count\n for card_printing in self.printings.all()\n for localisation in card_printing.localisations.all()\n for ownership in localisation.ownerships.all()\n if ownership.owner_id == user.id\n )\n\n return self.printings.aggregate(\n card_count=Sum(\n Case(\n When(\n localisations__ownerships__owner=user,\n then=\"localisations__ownerships__count\",\n ),\n output_field=IntegerField(),\n default=0,\n )\n )\n )[\"card_count\"]",
"def get_amount(self): \n return len(self.get_cards())",
"def get_usage_count(request, user_id):\n board_count = Member.objects.filter(user_id=user_id, is_creator=True).count()\n return Response({\"board_count\": board_count})",
"def query_card_ownership(user_id, guild_id, card):\n definition = session.query(CardDefinition) \\\n .select_from(Card).join(CardDefinition) \\\n .filter(Card.guild_id == guild_id) \\\n .filter(or_(Card.card_id == card, func.lower(CardDefinition.name) == func.lower(card))) \\\n .filter(Card.owner_ids.contains(str(user_id))) \\\n .one_or_none()\n count = session.query(Card) \\\n .select_from(Card).join(CardDefinition) \\\n .filter(Card.owner_ids.endswith(str(user_id))) \\\n .filter(or_(Card.card_id == card, func.lower(CardDefinition.name) == func.lower(card))) \\\n .count() \\\n if definition else 0\n return definition, count",
"def total_cards(self):\n amount = 0\n for palo in self._cards:\n amount = amount + len(self._cards[palo])\n\n return amount",
"def get_num_railroads_owned(self, player_name):\n total = 0\n if self.get_owner(\"Reading Railroad\") == player_name:\n total += 1\n if self.get_owner(\"Pennsylvania Railroad\") == player_name:\n total += 1\n if self.get_owner(\"B. & O. Railroad\") == player_name:\n total += 1\n if self.get_owner(\"Short Line\") == player_name:\n total += 1\n return total",
"def total_oros(self):\n return len(self._cards[\"oro\"])",
"def count(self):\n return(len(self.cards))",
"def num_cards(self):\n length=len(self.cards)\n return length",
"def cards_per_hand(self):\n s = \"\"\n for id in self.player_id_list:\n name = self.players[id].name\n cards = len(self.players[id])\n s += \"{} has {} cards.\\n\".format(name, cards)\n return s[:-1]",
"def getPoints(self):\n count = 0\n for card in self.cards:\n if card.rank > 9:\n count += 10\n elif card.rank == 1:\n count += 11\n else:\n count += card.rank\n # Deduct 10 if Ace is available and needed as 1\n for card in self.cards:\n if count <= 21:\n break\n elif card.rank == 1:\n count -= 10\n return count",
"def sum_cards(self):\n has_ace = False\n sum = 0\n\n # Add up players cards\n for card in self.cards:\n if card.card_value == \"ace\":\n has_ace = True\n sum += card.game_value\n\n # Handle case where ace plays low\n if sum > 21 and has_ace:\n sum -= 10\n\n return sum",
"def _get_count(_khoros_object, _user_id, _object_type):\n _api_response = query_users_table_by_id(_khoros_object, f'{_object_type}.count(*)', _user_id)\n return int(_api_response['data']['items'][0][_object_type]['count'])",
"def count_deck(deck):\n return reduce(lambda x, y: x + y['quantity'], deck['cards'], 0)",
"def get_num_cards(self):\n \n return self._hand.get_size()",
"async def get_player_total(user_id):\n return ex.first_result(await ex.conn.fetchrow(\"SELECT total FROM blackjack.currentstatus WHERE userid = $1\", user_id))",
"def get_amount_users() -> User:\n return User.objects.all().count()",
"def get_kudos_given_count(khoros_object, user_settings=None, user_id=None, login=None, email=None):\n user_settings = _process_settings_and_user_id(khoros_object, user_settings, user_id, login, email)\n return _get_sum_weight(khoros_object, user_settings['id'], 'kudos_given')",
"def allocated_tickets(self, user=None):\n query = self.raffleticket_set.filter(raffle_prize=self)\n if user:\n query = query.filter(user=user)\n \n return query.count()",
"def get_number_of_pins_for_user(self, user):\n\t\treturn self.active_pins().filter(board__user=user).count()",
"def show_card_counts(self, faction_type):\n\n faction_list = mtg.Faction.get_factions(faction_type)\n print(\"{}Total cards in:{}\".format(Style.BRIGHT, Style.RESET_ALL))\n for f in sorted(faction_list):\n print(\"{:12}{}\".format(f, self.card_count(f)))",
"def total_users(user):\n user_count = User.objects.filter(is_active=True).count()\n\n return NumberResponse(user_count, 'Total number of users')",
"def CountSuits(hand):\r\n numtrump = 0\r\n numss = 0\r\n numos1 = 0\r\n numos2 = 0\r\n\r\n for card in hand:\r\n if card < 7:\r\n numtrump += 1\r\n elif card < 12:\r\n numss += 1\r\n elif card < 18:\r\n numos1 += 1\r\n else:\r\n numos2 += 1\r\n \r\n numsuits = 0\r\n if numtrump != 0:\r\n numsuits += 1\r\n if numss != 0:\r\n numsuits += 1\r\n if numos1 != 0:\r\n numsuits += 1\r\n if numos2 != 0:\r\n numsuits += 1\r\n return [numtrump,numss,numos1,numos2,numsuits]",
"def get_count(username):\n return get_contributor(username)[\"count\"]",
"def getTotalMancount(self, playerID):\n count=0\n for camp in self.__camps:\n if( camp.getOwner() == playerID ):\n count = count + camp.getMancount()\n for army in self.__armies:\n if( army.getOwner() == playerID ):\n count = count + army.getMancount()\n return count",
"def total(self):\n for card in self.cards:\n if not card.value:\n return 0\n t = 0\n for card in self.cards:\n t += card.value\n contains_ace = False\n for card in self.cards:\n if card.value == BJ_Card.ACE_VALUE:\n contains_ace = True\n if contains_ace and t <= 11:\n t += 10\n return t",
"def calculate_score(player_cards):\n score = sum(player_cards)\n return score",
"def get_total_prs_per_user(prs):\n return get_total_contributions_per_user(prs, 'user')",
"def get_total_number_of_buildings_for_user(request):\n buildings_count = get_buildings_for_user_count(request.user)\n\n return {'status': 'success', 'buildings_count': buildings_count}",
"def count(self):\n return len(self.deck)"
] | [
"0.7798705",
"0.6641717",
"0.6554687",
"0.65397143",
"0.65318733",
"0.62367505",
"0.62067574",
"0.6073335",
"0.59826356",
"0.5970287",
"0.59580576",
"0.5957845",
"0.5957355",
"0.5945392",
"0.5938073",
"0.5932106",
"0.5879058",
"0.57467246",
"0.5720436",
"0.56724894",
"0.5669512",
"0.56611",
"0.56502014",
"0.5641143",
"0.5590045",
"0.5589523",
"0.55645055",
"0.5562864",
"0.55623484",
"0.55596274"
] | 0.7298613 | 1 |
Applies a change of the number of cards a user owns (can add or subtract cards) | def apply_user_change(self, change_count: int, user: get_user_model()) -> bool:
if user is None or change_count == 0:
return False
try:
existing_card = UserOwnedCard.objects.get(
card_localisation=self, owner=user
)
if change_count < 0 and abs(change_count) >= existing_card.count:
# If the count is below 1 than there is no point thinking that the user "owns"
# the card anymore, so just delete the record
change_count = -existing_card.count
existing_card.delete()
else:
existing_card.count += change_count
existing_card.clean()
existing_card.save()
except UserOwnedCard.DoesNotExist:
if change_count < 0:
# You can't subtract cards when you don' have any
return False
new_ownership = UserOwnedCard(
count=change_count, owner=user, card_localisation=self
)
new_ownership.clean()
new_ownership.save()
change = UserCardChange(
card_localisation=self,
owner=user,
difference=change_count,
date=datetime.datetime.now(),
)
change.clean()
change.save()
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def calculate_cards(cards):\n if 11 in cards and sum(cards) > 21:\n cards.remove(11)\n cards.append(1)\n print('Changing 11 --> 1')\n print(f'Your hand is now {cards} and your total is {sum(cards)}')\n elif sum(cards) > 21:\n print('Sorry. Looks like you busted!')\n restart_game()\n elif sum(cards) == 21 and len(cards) == 2:\n print('You win with a blackjack!')\n restart_game()",
"async def changescore(self, ctx, num: int, *, user: discord.Member):\n self.data[ctx.guild.id]['score'][user.id] += num\n return await ctx.send(f\"{user}'s score has been changed to {self.data[ctx.guild.id]['score'][user.id]}.\")",
"async def set_aces_used(card_list, user_id):\n separator = ','\n cards = separator.join(card_list)\n await ex.conn.execute(\"UPDATE blackjack.currentstatus SET acesused = $1 WHERE userid = $2\", cards, user_id)",
"def update_collection_num(user_id, another_user_id, is_add):\n\n user = db_session.query(User).filter_by(user_id=user_id).scalar()\n another_user = db_session.query(User).filter_by(\n user_id=another_user_id).scalar()\n if is_add:\n user.follow_num += 1\n another_user.be_followed_num += 1\n else:\n user.follow_num -= 1\n another_user.be_followed_num -= 1\n db_session.commit()",
"def updateSuit(self, playersView: Player, ofPlayer: Player, suit: Suit):\n deck = [Card(suit, val) for val in range(2, 14 + 1)]\n\n playersProb = self[playersView, ofPlayer, :]\n\n for otherPlayer in Player:\n if otherPlayer != playersView and otherPlayer != ofPlayer:\n for card in deck:\n self[playersView, otherPlayer, card] += (playersProb[card.__hash__()] / 2)\n\n for card in deck:\n self[playersView, ofPlayer, card] = 0",
"def _update_value(self) -> int:\n\n value_list = [card.value if card.value <= 10 else 10 for card in self]\n hand_value = sum(value_list)\n\n # Checks to see if any Aces can be worth 11 points instead of 1 point\n while value_list.count(1) > 0 and (21 - hand_value) >= 10:\n value_list[value_list.index(1)] = 11\n hand_value = sum(value_list)\n\n self._value = hand_value",
"def hook_gain_this_card(self, game, player):\n empties = sum(1 for st in game.cardpiles if game[st].is_empty())\n for _ in range(empties):\n player.gain_card(\"Gold\")",
"def calculate_score(cards):\n if sum(cards) == 21 and len(cards) == 2:\n return 0\n \n if 11 in cards and sum(cards) > 21:\n cards.remove 11\n cards.append 1\n return sum(cards)",
"def _update_suspicion_1(self):\n\n for bucket in self.used_buckets:\n multiplier = 1 if bucket.attacked else 0\n for user in bucket.users:\n user.suspicion += multiplier",
"def get_amount(self): \n return len(self.get_cards())",
"def update_total_cards(self, screens_to_update, text):\n if type(self._frame) in screens_to_update:\n self._frame.snipe_form_component.lbl_total_players.configure(text=text)",
"def sum_hand(self, cards):\n self.totalValue = 0\n for card in cards:\n self.totalValue += DeckOfCards.value(self, card)\n\n for card in cards:\n if self.totalValue > 21 and 'A' in card:\n self.totalValue -= 10\n \n if self.totalValue > 21:\n self.keepGoing = False\n print(f\"{self.name} busted!\")",
"def _update_suspicion_1(self):\n\n for bucket in self.buckets:\n multiplier = 1 if bucket.attacked else 0\n for user in bucket.users:\n user.suspicion += multiplier",
"def profile_likes_count_decrease(target_user: User):\n try:\n with transaction.atomic():\n dillo.models.profiles.Profile.objects.filter(user=target_user).update(\n likes_count=F('likes_count') - 1\n )\n except IntegrityError:\n log.warning('Integrity error when incrementing likes count for user %i' % target_user.id)\n target_user.profile.recalculate_likes()",
"async def add_card(self, user_id):\n end_game = False\n check = 0\n\n separator = ','\n current_cards = await self.get_current_cards(user_id)\n game_id = await self.get_game_by_player(user_id)\n game = await self.get_game(game_id)\n channel = await ex.client.fetch_channel(game[5])\n stand = await self.check_player_standing(user_id)\n player1_score = await self.get_player_total(game[1])\n player2_score = await self.get_player_total(game[2])\n player1_cards = await self.get_current_cards(game[1])\n if not stand:\n available_cards = await self.get_available_cards(game_id)\n random_card = random.choice(available_cards)\n current_cards.append(str(random_card))\n cards = separator.join(current_cards)\n current_total = await self.get_player_total(user_id)\n random_card_value = await self.get_card_value(random_card)\n if current_total + random_card_value > 21:\n for card in current_cards: # this includes the random card\n if await self.check_if_ace(card, user_id) and check != 1:\n check = 1\n current_total = (current_total + random_card_value) - 10\n if check == 0: # if there was no ace\n current_total = current_total + random_card_value\n else:\n current_total = current_total + random_card_value\n await ex.conn.execute(\"UPDATE blackjack.currentstatus SET inhand = $1, total = $2 WHERE userid = $3\", cards, current_total, user_id)\n if current_total > 21:\n if user_id == game[2] and self.check_if_bot(game[2]):\n if player1_score > 21 and current_total >= 16:\n end_game = True\n await self.set_player_stand(game[1])\n await self.set_player_stand(game[2])\n elif player1_score > 21 and current_total < 16:\n await self.add_card(game[2])\n elif player1_score < 22 and current_total > 21:\n pass\n else:\n end_game = True\n elif self.check_if_bot(game[2]) and not self.check_if_bot(user_id): # if user_id is not the bot\n if player2_score < 16:\n await self.add_card(game[2])\n else:\n await self.set_player_stand(user_id)\n await self.set_player_stand(game[2])\n end_game = True\n else:\n if user_id == game[2] and self.check_if_bot(game[2]):\n if current_total < 16143478541328187392 and len(player1_cards) > 2:\n await self.add_card(game[2])\n if await self.check_player_standing(game[1]) and current_total >= 16:\n end_game = True\n if not self.check_if_bot(user_id):\n if self.check_if_bot(game[2]):\n await self.send_cards_to_channel(channel, user_id, random_card, True)\n else:\n await self.send_cards_to_channel(channel, user_id, random_card)\n else:\n await channel.send(f\"> **You already stood.**\")\n if await self.check_game_over(game_id):\n await self.finish_game(game_id, channel)\n if end_game:\n await self.finish_game(game_id, channel)",
"def withdraw_by_username(self,amount,username):\r\n pass",
"def score_up(self, increment_by):\n self.user_score += increment_by",
"def make_count_change():\n \"*** YOUR CODE HERE ***\"",
"def calculate_score(list_of_cards):\n if sum(list_of_cards) == 21 and len(list_of_cards) == 2:\n return 0\n if 11 in list_of_cards and sum(list_of_cards) > 21:\n list_of_cards.remove(11)\n list_of_cards.append(1)\n return sum(list_of_cards)",
"def increase_score(self):\n self.score += 1",
"def updateBotCounts(self, nextCard):\n nextVal = dnUtil.getValue(nextCard)\n state = self.getState()\n counts = self.getCounts(state)\n newCount = counts.copy()\n for value in dnUtil.valuesList:\n if counts[value][2] == 0:\n continue\n update = self.updateCount(value, nextVal, counts[value])\n newCount[value] = update\n self.setCounts(newCount)",
"def calculate_score(card_list):\n if sum(card_list) == 21 and len(card_list) == 2:\n return 0\n if sum(card_list) > 21 and 11 in card_list:\n card_list.remove(11)\n card_list.append(1)\n return sum(card_list)",
"def hook_trashThisCard(self, game, player):\n player.plr_gain_card(cost=self.cost - 1)",
"def incr_no_of_attacks(self):\n\t\tself.__anom += 1\n\t\tself.__anom_lbl.setText(str(self.__anom))",
"def increase(pot, amount, player):\r\n pot = pot + amount\r\n player.cash = player.cash - amount\r\n return pot",
"def deal(self, cards_num):\n\n cards = []\n while cards_num > 0:\n\n x = random.randint(0, 53)\n if self.in_use[x] == 0:\n self.in_use[x] += 1\n cards.append(x)\n cards_num -= 1\n\n return cards",
"def going_out(self, cards):\n for card in cards:\n self.out_of_use.append(int(card))\n # print(self.out_of_use)",
"def account_for_new_score(self):\n self.rolls += 1\n if self.continued is True:\n self.total_score += self.current_roll.score\n self.dice_remaining = self.current_roll.dice_remaining\n\n if self.dice_remaining == 0:\n self.resets += 1\n self.dice_remaining = 5",
"async def update_total(self, ctx: Context, override=0):\n\n guild = ctx.guild\n author = ctx.author\n\n old = await self.config.guild(guild).signed()\n\n if override:\n return await self.config.guild(guild).signed.set(old+override)\n\n player_id = await self.config.guild(guild).player_id()\n player_role = discord.utils.get(guild.roles, id=player_id)\n\n if player_role in author.roles:\n await self.config.guild(guild).signed.set(old-1)",
"def add_card(self, card):\n if not isinstance(card, Card):\n raise TypeError(\"'card' must be a card object.\")\n # append new card to list of cards in the hand\n self.cards.append(card)\n self.total = card + self.total\n # aces require a little more work\n if card.rank == 14:\n self.soft = True\n self.num_aces += 1\n self.num_hard_aces += 1\n # account for soft hands\n if self.total > 21 and self.soft:\n self.total -= 10\n self.num_hard_aces -= 1\n self.soft = False\n # catch the edge case where you're delt 12+ aces\n if self.total > 21:\n self.total -= 10\n self.num_hard_aces -= 1\n self.soft = False\n if self.num_hard_aces > 0:\n self.soft = True\n if self.total > 21:\n self.bust = True"
] | [
"0.6545522",
"0.63200855",
"0.6267624",
"0.61299944",
"0.60999966",
"0.5987832",
"0.59660965",
"0.5964261",
"0.5903642",
"0.58689475",
"0.5830519",
"0.58236086",
"0.58002126",
"0.5779088",
"0.5747298",
"0.5740831",
"0.57314503",
"0.57169497",
"0.57010615",
"0.56986016",
"0.56885403",
"0.5666678",
"0.5664653",
"0.56259894",
"0.56161463",
"0.5603928",
"0.5594685",
"0.5592917",
"0.559219",
"0.5587891"
] | 0.71913886 | 0 |
Gets most fitting image path for this localisation (the first face if there are multiple | def get_image_path(self) -> Optional[str]:
try:
return self.localised_faces.all()[0].get_image_path()
except IndexError:
logging.exception("Failed to find an image for %s", self)
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_best_face(self, image):\n\t\ttry:\n\t\t\treturn max(self.get_faces(image),\n\t\t\t key = (lambda f: f[1]))\n\t\texcept ValueError:\n\t\t\treturn None",
"def getFirst(self):\n if self.use_dic:\n data = sorted(self.dic.keys())[0]\n activity = sorted(self.dic[data].keys())[0]\n imsize = sorted(self.dic[data][activity].keys())[0]\n img = sorted(self.dic[data][activity][imsize].keys())[0]\n labels = self.dic[data][activity][imsize][img]\n path = join(self.home, 'data'+str(data), activity, imsize, str(img)+'.jpg')\n else:\n first = self.dic.keys()[0]\n path, labels = first, self.dic[first]\n return path, labels",
"def path(self):\n return self.alignment.matching_function_bestpath(self.idx)",
"def get_fomod_image(self, image_path):\n try:\n return self.normalized_imgpaths[image_path.lower()]\n except KeyError:\n return None",
"def getLargestFace(self, img):\n assert img is not None\n\n try:\n faces = self.faceLocator(img, 1)\n if len(faces) > 0:\n return max(faces, key=lambda rect: rect.width() * rect.height())\n else:\n raise ValueError(\"No faces detected!\")\n except Exception as e:\n print \"Warning: {}\".format(e)\n # In rare cases, exceptions are thrown.\n return []",
"def _getFullPath(self):\n\n if not self.plateifu:\n return None\n\n plate, ifu = self.plateifu.split('-')\n dir3d = self._get_image_dir()\n\n name = 'mangaimage'\n\n return super(Image, self)._getFullPath(name, ifu=ifu, dir3d=dir3d,\n drpver=self._drpver, plate=plate)",
"def find_best_face(faces_dict):\n\n prefix_msg_response = \"The best face is from:\"\n no_valid_urls_msg = \"Please insert valid URLs\"\n if faces_dict:\n max_face_item = max(faces_dict.values(), key=itemgetter(1)) # Finds the image that is the common one,\n # and that has the largest face.\n max_face_image = max_face_item[2]\n max_face_top = max_face_item[3]\n max_face_left = max_face_item[4]\n return f\"{prefix_msg_response} {max_face_image}. The face top is: {max_face_top} and left: {max_face_left}\"\n return no_valid_urls_msg",
"def imagePath(self):\n if self.use_dic:\n if self.imlist:\n paths = []\n for img in self.allimgs:\n paths.append(join(self.home, 'data'+str(self.data), self.activity, self.imsize, str(img)+'.jpg'))\n return paths\n else:\n path = join(self.home, 'data'+str(self.data), self.activity, self.imsize, str(self.img)+'.jpg')\n else:\n path = self.img\n return path",
"def find_name(face):\n if not face[\"MatchedFaces\"]:\n return \"\"\n confidence = face[\"MatchedFaces\"][0][\"Similarity\"]\n if confidence < CONFIDENCE_THRESHOLD:\n return \"\"\n return face[\"MatchedFaces\"][0][\"Face\"][\"ExternalImageId\"]",
"def locate_face(image, minNeighbors=5, scaleFactor=1.05):\n rects = cc_face.detectMultiScale(image, scaleFactor=scaleFactor, minNeighbors=minNeighbors)\n return max(rects, key=rect_area)",
"def get_path_bounding_box(self, image) -> BoundingBox:\n return NNManager.get_yolo_model(\"path\").predict(image)",
"def get_pathname(self):\n return self.image_data.path",
"def _find_model(model_chkp_dir, mode='last'):\n\n if mode == 'last':\n file_name = sorted(os.listdir(model_chkp_dir))[-1]\n model_path = os.path.join(model_chkp_dir, file_name)\n\n elif mode == 'best':\n raise NotImplementedError\n\n return model_path",
"def GetLocalPath(self):\n for priority_group in self._path_priority_groups:\n priority_group = [g for g in priority_group if os.path.exists(g)]\n if not priority_group:\n continue\n return max(priority_group, key=lambda path: os.stat(path).st_mtime)\n return None",
"def source_path(self, workspace):\n if self.file_name_method.value == FN_FROM_IMAGE:\n path_feature = \"%s_%s\" % (\n C_PATH_NAME,\n self.file_image_name.value,\n )\n assert workspace.measurements.has_feature(\"Image\", path_feature), (\n \"Image %s does not have a path!\" % self.file_image_name.value\n )\n return workspace.measurements.get_current_image_measurement(path_feature)\n\n # ... otherwise, chase the cpimage hierarchy looking for an image with a path\n cur_image = workspace.image_set.get_image(self.image_name.value)\n while cur_image.path_name is None:\n cur_image = cur_image.parent_image\n assert (\n cur_image is not None\n ), \"Could not determine source path for image %s' % (self.image_name.value)\"\n return cur_image.path_name",
"def getBestPath(self):\n if self._bestPathVertex.getNextWaypoint() is None:\n numWaypointsCompleted = len(self._waypoints)\n quality = 2\n if self._vertexQueue.isEmpty():\n quality += 1\n else:\n numWaypointsCompleted = self._bestPathVertex.getNextWaypoint().getIndex()\n quality = 1\n if self._vertexQueue.isEmpty():\n quality -= 1\n \n return outputPath.generatePath(self._bestPathVertex, self._params.waypointAcceptanceRadii, quality, numWaypointsCompleted)",
"def find_closest_path(self):\n\t\tclosest_distance = sys.maxint\n\t\tclosest_path = 0\n\t\tbike_position = (self.map_model.bike.xB, self.map_model.bike.yB)\n\t\tfor path_index in range(len(self.map_model.paths)):\n\t\t\tnearest_point = geometry.nearest_point_on_path(self.map_model.paths[path_index], bike_position)\n\t\t\tdistance_to_bike = geometry.distance(nearest_point, bike_position)\n\t\t\tif (closest_distance > distance_to_bike):\n\t\t\t\tclosest_distance = distance_to_bike\n\t\t\t\tclosest_path = path_index \n\t\tdisp_next = self.displacement_to_turn(target_path = (closest_path+1)%len(self.map_model.paths))\n\t\ttarget_path = (closest_path+1)%len(self.map_model.paths)\n\t\tdistance_next = geometry.distance_from_path(bike_position, self.map_model.paths[target_path])\n\t\tif disp_next - np.abs(distance_next)>-0.01:\n\t\t\tclosest_path = np.mod(closest_path + 1,len(self.map_model.paths))\n\t\treturn closest_path",
"def findReferenceImage(modelfile):\n\n try:\n\n dirname = op.dirname(modelfile)\n prefixes = [getFIRSTPrefix(modelfile)]\n except ValueError:\n return None\n\n if prefixes[0].endswith('_first'):\n prefixes.append(prefixes[0][:-6])\n\n for p in prefixes:\n try:\n return fslimage.addExt(op.join(dirname, p), mustExist=True)\n except fslimage.PathError:\n continue\n\n return None",
"def get_first_image(self):\n photos = GoodsPhotos.objects.filter(good=self)[:1]\n if photos:\n return photos[0]\n else:\n return None",
"def best_path(self, unlabeled_sequence):\n unlabeled_sequence = self._transform(unlabeled_sequence)\n return self._best_path(unlabeled_sequence)",
"def findShortestPath(self):\r\n pass",
"def _getface_hog_cnn(self,img,mode):\n faces = face_locations(img,number_of_times_to_upsample=1,model=self.model_name)\n if len(faces)==0:\n return None\n if mode == 1:\n out = faces[0]\n elif mode ==2 :\n top,right,bottom,left = faces[0]\n x,y,w,h = int(left), int(top), int(right-left+1), int(bottom-top+1)\n out = [x,y,w,h]\n return out",
"def fetch_last_model_file(self):\n try:\n filename = self.model_files[-1]\n return self.make_path(filename)\n except IndexError:\n return None",
"def firstPath(self, toNative=True):\n return self.paths(toNative=toNative)[0]",
"def get_top_down_image_env(self, env_id, egocentric=False):\n path = load_path(env_id)\n env_image_in = load_env_img(env_id, self.map_w, self.map_h)\n\n # If we need to return a bigger image resolution than we loaded\n if self.map_w != self.img_w or self.map_h != self.img_h:\n env_image = np.zeros([self.img_h, self.img_w, env_image_in.shape[2]])\n env_image[0:self.map_h, 0:self.map_w, :] = env_image_in\n else:\n env_image = env_image_in\n\n #path_img = cf_to_img(path, [env_image.shape[0], env_image.shape[1]])\n #self.plot_path_on_img(env_image, path_img)\n\n env_image = standardize_image(env_image)\n env_img_t = torch.from_numpy(env_image).unsqueeze(0).float()\n #presenter = Presenter()\n #presenter.show_image(env_img_t[0], \"data_img\", torch=True, scale=1)\n return env_img_t",
"def getFit(self):\n if self.fits.has_key('default'):\n return self.fits['default']\n else:\n return None",
"def image_path_at(self, i):\n return self.image_path_from_index(self.image_index[i])",
"def lastPath(self, toNative=True):\n return self.paths(toNative=toNative)[-1]",
"def get_best_known_model(self) -> Tuple[Optional[Path], int]:\n return self._get_first_model(sort='total_score', desc=False)",
"def _real_image_path(self, path):\r\n return osp.join(self.train_image_path, path)"
] | [
"0.61643314",
"0.60766",
"0.602109",
"0.5965046",
"0.592761",
"0.57702565",
"0.57653356",
"0.57164794",
"0.57031834",
"0.55963093",
"0.546135",
"0.5455214",
"0.5410387",
"0.5387471",
"0.5364005",
"0.53553",
"0.53441226",
"0.5299835",
"0.529249",
"0.5285781",
"0.52561975",
"0.5253615",
"0.52478015",
"0.523409",
"0.5233439",
"0.5226887",
"0.5208129",
"0.52023804",
"0.5199575",
"0.5198434"
] | 0.71636146 | 0 |
Parses the date and meal for a menu, both from CLI and function calls. This method will only return a nonNone value if that's what the user specified, since more information then available at this point is necessary to make an automatic decision (namely, the menu date), in which case it returns None so that whichever function called it can make the decision by itself. | def _parse_args(input_date, input_meal):
parser = ArgumentParser()
parser.add_argument('-d', '--date', type=str)
parser.add_argument('-m', '--meal', type=str)
args = parser.parse_args()
# Allows getting the args from either CLI or as the function parameters
query_date = args.date or input_date
query_meal = args.meal or input_meal
# Validate and sanitize the meal
if query_meal and query_meal not in constants.MEAL_CHOICES:
raise ValueError("Refeições suportadas são apenas 'almoço', 'jantar' e 'todas'.")
# Validate and sanitize the date
if query_date == constants.DATE_TOMORROW:
query_date = date.today() + timedelta(days=1)
else:
try:
query_date = parse_date(args.date if args.date else input_date or None)
except ValueError:
query_date = None
return query_date, query_meal | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def parseMenu(self):\n soup = BeautifulSoup(self.sourceText,\n markupMassage = Parser.SOURCE_FIXES,\n convertEntities = BeautifulStoneSoup.HTML_ENTITIES)\n\n keyTranslations = {'prato principal': 'principal',\n 'salada': 'salada',\n 'sobremesa': 'sobremesa',\n 'suco': 'suco'}\n menu = {}\n\n # The code here is not very smart or efficient, but we are trying\n # to be as much error-proof as possible.\n # Using the main meal and reading the other entries using it as a\n # reference point seems to be the best take.\n # We begin by finding the main meal, parsing the date and then the\n # meal items.\n mainMeal = soup.find(text = re.compile(r\"prato\\s+principal\", re.IGNORECASE)).parent.parent\n mealDate = mainMeal.findPreviousSibling('p')\n menu['data'] = self.__getTagText(mealDate)\n\n # The four remaining entries (main meal, dessert, salad and juice) are the 4 next siblings\n for item in mealDate.findNextSiblings('p', limit=4):\n match = re.match(r'\\s*([\\w\\s]+):\\s*(.+)', self.__getTagText(item))\n\n if match:\n key = keyTranslations[match.group(1).lower().strip()]\n menu[key] = match.group(2)\n\n # In the end, we shall have a dictionary with the following keys:\n # 'data', 'principal', 'salada', 'sobremesa', 'suco'\n if len(menu) != 5:\n raise ValueError, \"The menu in the site has an invalid format\"\n\n return Menu.Menu(menu)",
"def print_choice_menu():\n month1 = input(\"Give the month as 4, 5, 6, 7, 8, or 9: \")\n print()\n day1 = input(\"Give the day as 1, 2, ..., 29, 30, or 31: \")\n print()\n\n try:\n month1 = int(month1)\n day1 = int(day1)\n\n if month1 in [4, 5, 6, 7, 8, 9]:\n if day1 in list(range(1, 32)):\n if day1 == 31 and month1 in [4, 6, 9]:\n print(\"Invalid Date!\")\n sleep(2)\n return 0, 0\n elif month1 > datetime.now().month or (month1 == datetime.now().month and day1 > datetime.now().day):\n print(\"Date out of bounds\")\n sleep(2)\n return 0, 0\n else:\n sleep(2)\n return month1, day1\n else:\n print(\"Date out of bounds\")\n sleep(2)\n return 0, 0\n else:\n print(\"Date out of bounds\")\n sleep(2)\n return 0, 0\n\n except ValueError:\n print(\"User Error\")\n sleep(2)\n return 0, 0",
"def run(input_date=None, input_meal=None):\n query_date, query_meal = _parse_args(input_date, input_meal)\n # Get the data and instantiate the required classes\n html = scrapper.fetch_data(query_date)\n meal_date = scrapper.get_meal_date(html)\n available_dates = scrapper.get_available_dates(html)\n meals = scrapper.get_meals(html)\n result = \"\"\n result += format_for_terminal(\"Cardápio do dia {}\\n\".format(format_date(meal_date)),\n constants.TERMINAL_GREEN)\n # Automatically decide which meal should be shown if none was specified\n if not query_meal:\n if meal_date != date.today():\n query_meal = constants.MEAL_LUNCH\n elif (datetime.now(timezone(constants.BANDECO_TIMEZONE)).time() <\n time(hour=constants.LUNCH_END_HOUR)):\n query_meal = constants.MEAL_LUNCH\n else:\n query_meal = constants.MEAL_DINNER\n # Formats the meal menu\n if query_meal == constants.MEAL_LUNCH:\n result += meals[0].combine(meals[1]).format()\n elif query_meal == constants.MEAL_DINNER:\n result += meals[2].combine(meals[3]).format()\n else:\n for meal in meals:\n result += meal.format()\n # Show other dates available for fetching\n result += format_for_terminal(\n \"Datas disponíves: {}\".format(\", \".join(list(map(format_date, available_dates)))),\n constants.TERMINAL_BLUE)\n # Shows a warning if the menu is not from today\n today = date.today()\n if not query_date and meal_date != today:\n result += format_for_terminal(\"\\nAviso: este cardápio não é de hoje, mas para daqui {} dias\"\n .format((meal_date - today).days), constants.TERMINAL_RED)\n return result",
"def parse_menu(menufile_text):\n def make_dict(matches):\n data_dict = {}\n for key, value in matches:\n data_dict[key] = value\n return data_dict\n\n name_match = re.search(\"\\?package\\((.*?)\\)\", menufile_text)\n property_matches = re.findall(\"(\\w+)=\\\"(.*?)\\\"\", menufile_text)\n try:\n name = name_match.group(1)\n data = make_dict(property_matches)\n return name, data\n except AttributeError:\n return None, None",
"def search_method_menu(self):\n\n print()\n options = {'1': 'Employee Name', '2': 'Keyword', '3': 'Time Spent',\n '4': 'Date', '5': 'Date Range', '6': 'Exit to main menu'}\n\n while True:\n\n for k, v in options.items():\n print(k + \". \" + v)\n\n user_choice = input('\\nPlease enter the number of choice: ').lower().strip()\n\n if user_choice in options.keys():\n return options.get(user_choice)\n else:\n print('\\nInvalid choice! Please try again.\\n')",
"def get_menu() -> str:\n date = datetime.date.today()\n urls = generate_urls(date)\n menu_json = fetch_menu(urls)\n menu = extract_menu(menu_json, date)\n\n return menu",
"def extract_menu(menu_json: dict, date: datetime.date) -> str:\n\n inner_menu = menu_json[-1]\n acf = inner_menu.get(\"acf\")\n\n date_string = f\"*Menu for {date}*\"\n story = prettify(acf.get(\"story\")).strip()\n menu_items = parse_menu_items(acf.get(\"menu_items\"))\n\n return \"\\n\".join([date_string] + [story] + menu_items)",
"def get_menu(menu_name):\n\n pass",
"def UserMenu(self):\n prompt = \"\"\"\n (CD) Certificate of Deposit\n (MM) Money Market\n (MS) Money Savings\n (C) Checking\n Enter Account Type: \"\"\"\n done = 0\n while not done:\n choice = 0\n while not choice:\n try:\n option = raw_input(prompt).strip().upper()\n m = re.search(r'CD|MM|MS|C',option)\n if m:\n print \" Your preferred account type is \",option\n prompt2 = \"\"\"\n (=>) WithDrawal\n (<=) Deposit\n (-) Debit\n (+) Credit\n Enter Choice :\"\"\"\n else:\n print \"Invalid Transaction\"\n except(EOFError, KeyboardInterrupt):\n option = 'C'\n if option == 'E':\n choice = 1\n try:\n option1 = raw_input(prompt2).strip().upper()\n except(KeyboardInterrupt, EOFError):\n option1 = 'E'\n m1 = re.search(r'=>|<=|-|+',option1)\n if not m1:\n print \"Invalid option.. Try again\"\n else:\n choice = 1\n if option1 == '=>': self.Deposit()\n if option1 == '<=': self.Withdrawal()\n if option1 == '-': self.Debit()\n if option1 == '+': self.Credit()\n if option1 == 'E': done = 1",
"def menu():\n\twhile True:\n\t\t# Loop till user quits.\n\t\tsel = 0\n\t\twhile ( sel < 1 or sel > len(restaurants)):\n\t\t\t# Input validation\n\t\t\tprint pretty_header.format(curtime=ctime())\n\t\t\tsel = raw_input(\"Enter your menu choice [1-6 or q]: \")\n\t\t\tif sel.lower() == \"q\":\n\t\t\t\tsys.exit(0)\n\t\t\ttry:\n\t\t\t\tsel = int(sel)\n\t\t\texcept:\n\t\t\t\tsel = 0\n\t\t\tos.system(\"clear\")\n\n\t\t# Load meals from desired restaurant.\n\t\thtml = urlopen(restaurants[sel-1])\n\t\tsoup = BeautifulSoup(html, convertEntities = BeautifulSoup.HTML_ENTITIES)\n\t\tmeals = soup.findAll(id=re.compile(\"meal_\\d\"))\n\t\ttabs = soup.findAll(id=re.compile(\"tab_\\d\"))\n\n\t\t# get the name of the restaurant, minus the \"RIT Dining Services\" bs.\n\t\tprint (\"\\nOn the menu at \" + re.sub(\"^[\\w\\W]*\\s?:\\s?\", \"\",\n\t\t\tstr(soup.title.string)) + \" today is:\")\n\t\tmeal_num = 0\n\t\tfor meal in meals:\n\t\t\tif meal:\n\t\t\t\t# print all meals served + meal name / subrestaurant name\n\t\t\t\tprint (\"=====================\")\n\t\t\t\tprint tabs[meal_num].contents[0].string\n\t\t\t\tprint (\"=====================\\n\")\n\t\t\t\tmeal_num += 1\n\t\t\t\tfor item in meal.findAll(\"li\"):\n\t\t\t\t\tif item.string and str(item.string) != \"\":\n\t\t\t\t\t\tprint item.string\n\t\t\t\tprint (\"\\n\")\n\t\traw_input(\"Press any key to continue...\")\n\t\tos.system(\"clear\")",
"def formatMenu(day) :\n\n # Get the html from the webpage\n headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36 Edg/87.0.664.66'}\n r = requests.get(\"https://studentlife.yale-nus.edu.sg/dining-experience/daily-dining-menu/\", headers = headers)\n soup = BeautifulSoup(r.text, \"html.parser\")\n\n day_to_tab_dict = {1 : \"tab1\", 2 : \"tab2\", 3 : \"tab3\", 4 : \"tab4\", 5 : \"tab5\", 6 : \"tab6\", 7 : \"tab7\"}\n\n menu = \"\"\n meals = soup.find(\"div\", {\"id\" : day_to_tab_dict[day]}).findAll(class_ = \"menu-list\")\n\n # If day is not weekend (ie the name of first meal is not brunch)\n first_meal_name = meals[0].find(\"h4\").text\n \n if first_meal_name != \"brunch\" :\n\n # Loop through breakfast, lunch & dinner\n for index in range(3) :\n # Bold the meal text\n menu = menu + \"<b>\" + meals[index].find(\"h4\").get_text() + \"</b>\" + \"\\n\"\n \n # Find the stations (ie. salad bar, yakimix etc.) and the dishes (ie. kung pao chicken etc)\n stations = meals[index].findAll(\"u\")\n dishes = meals[index].findAll(\"p\")\n\n # Convert to text \n stations = [station.get_text() for station in stations]\n dishes = [dish.get_text() for dish in dishes]\n print(stations)\n print(dishes)\n\n # Format the dishes\n for dish in dishes :\n\n # If dish is station name, print dish text with underline\n if dish in stations :\n menu = menu + \"<u>\" + dish + \"</u>\" + \"\\n\"\n \n else :\n menu = menu + dish + \"\\n\"\n \n # Add new lines to separate the meals from another\n menu = menu + \"\\n\" + \"\\n\"\n \n return menu\n \n\n else :\n for index in range(2) :\n\n menu = menu + \"<b>\" + meals[index].find(\"h4\").get_text() + \"</b>\" + \"\\n\"\n \n stations = meals[index].findAll(\"strong\")\n dishes = meals[index].findAll(\"td\")\n\n stations = [station.get_text() for station in stations]\n dishes = [dish.get_text() for dish in dishes]\n\n for dish in dishes :\n\n if dish in stations :\n menu = menu + \"<u>\" + dish + \"</u>\" + \"\\n\"\n \n else :\n menu = menu + dish + \"\\n\"\n \n menu = menu + \"\\n\" + \"\\n\"\n \n return menu",
"def getMenuOption():\n return menu_option",
"def get_user_data():\n parser = argparse.ArgumentParser(description='Type your data with whitespace in following format')\n parser.add_argument('depart_iata', type=lambda x: x.upper(), help='AAA')\n parser.add_argument('dest_iata', type=lambda x: x.upper(), help='AAA')\n parser.add_argument('depart_date', type=lambda x: datetime.strptime(x, \"%Y-%m-%d\"), help='YYYY-MM-DD')\n parser.add_argument('return_date', nargs='?', default='', help='YYYY-MM-DD - optional')\n args = parser.parse_args()\n validate_iata(args.depart_iata, args.dest_iata)\n validate_date(args.depart_date.date(), args.return_date)\n return args.depart_iata, args.dest_iata, args.depart_date.date(), args.return_date",
"def get_one_menu_option():",
"def invoke(self):\n print(\"\\nEnter Published Date (YYYY-MM-DD): \", end=\"\")\n # get option from user, and strip whitespace\n str_option = input().strip()\n # try parse as datetime object\n try:\n publish_date = parse(str_option)\n self.sbh.display_books(\n self.db.query_book_by_publish_date(publish_date)\n )\n except:\n print(\"Invalid Input!\")\n return",
"def test_date_valid_data(self):\n selected_menu_form = SelectedMenuForm(self.possible_meals_choices)\n\n self.assertTrue(selected_menu_form)",
"def menu(self, venue_id, date):\n query = \"&date=\" + date\n response = self._request(V2_ENDPOINTS['MENUS'] + venue_id + query)\n return response",
"def MolecularToolsMainMenu(DataUser):\r\n\r\n import MenuTools\r\n\r\n MMName = 'MAIN MENU'\r\n MMOptions = [['A','Update Status'],['B','Compare Methods'],['C','Model Molecular Data'],\r\n ['D','Model Health Data'],\r\n ['Q','Quit']]\r\n if DataUser.Update:\r\n MMOptions.append(['U','Update Molecule Database'])\r\n MMOptions.append(['V','Update Health Database'])\r\n MMString = MenuTools.CreateMenu(MMName,MMOptions)\r\n\r\n # User prompt string connected to Main Menu\r\n PromptString = 'Please select an option from Main Menu: '\r\n\r\n # Control code for Main Menu\r\n Option = ''\r\n while (Option != 'Q') and (Option != 'q'):\r\n # Output menu and prompt\r\n print(MMString)\r\n Option = input(PromptString)\r\n # Option A: Update status\r\n if Option == 'A':\r\n UpdateStatus.UpdateStatus(DataUser)\r\n elif Option == 'B':\r\n CompareMethods.CompareMethods(DataUser)\r\n elif Option == 'C':\r\n MolecularModelData.ModelData(DataUser)\r\n elif Option == 'D':\r\n HealthModelData.ModelData(DataUser)\r\n elif (Option == 'Q') or (Option == 'q'):\r\n print('Exiting Data Tools')\r\n elif (Option == 'U') and DataUser.Update:\r\n MolecularDatabase.UpdateMenu(DataUser)\r\n elif (Option == 'V') and DataUser.Update:\r\n HealthDatabase.UpdateMenu(DataUser)\r\n # if the user does not select a valid option, a warning is\r\n # printed and the menu and command prompt is reprinted.\r\n else:\r\n print('Input not recognized.')",
"def menu(data):\n print('-' * 80)\n while True:\n print(\"Please choose which section you would like to review.\")\n print(\"\"\"\n 1. Sales Review\n 2. Item Review\n 3. Advisor Review\n 4. Exit/Quit\n \"\"\")\n\n review = input(\"Please type your choice NUMBER and press ENTER: \\n\")\n if validate_input(review):\n if review == '1':\n print(f\"You typed '{review}',\")\n print(\"Sales Data will be compiled...\\n\")\n exit_call = sale_call(data)\n if exit_call:\n print(\"Reload Complete!\")\n else:\n return False\n elif review == '2':\n print(f\"You typed '{review}',\")\n print(\"We are now loading the Items Data...\\n\")\n exit_call = item_call(data)\n if exit_call:\n print(\"Reload Complete!\")\n else:\n return False\n elif review == '3':\n print(f\"You typed '{review}',\")\n print(\"We are now taking you to the Advisor Data...\\n\")\n exit_call = adv_call(data)\n if exit_call:\n print(\"Reload Complete!\")\n else:\n return False\n else:\n print(f\"You typed '{review}'.\")\n print(\"You have chosen to leave the programme...\\n\")\n print(\"The programme will now terminate...\\n\")\n print(\"Have a nice day! :-)\")\n print('-' * 80)\n return False",
"def display_menu():\n print(\"Press 1 to purchase stocks\\n\")\n print(\"\\nPress 2 to visualize the total prices of selected stocks over the period of time imported from a json file\\n\")\n print(\"\\nPress 0 to quit\\n\")\n try:\n response = int(input(\"\\nwaiting for Input: \"))\n if response < 0 or response > 2:\n return \"Please input a value between 0 and 2\"\n except:\n print(\"Please enter the numeric values specified in the menu\")\n else:\n return response",
"def Infor_menu():\n \n import sys\n d = ''\n msg = '' \n while d == '':\n print('\\nINFORMATION MENU')\n print('1. Display coordinate sequence')\n print('2. Display SEQRES sequence')\n print('3. Display Alignment sequence')\n print('4. Display all non-water ligands in the protein(if any)')\n print('q. Quit')\n option = input('Select an option: ')\n if option.lower() == 'q':\n sys.exit()\n elif option == '1':\n msg = 'Option 1'\n d = display_cord_seq()\n elif option == '2':\n msg = 'Option 2'\n d = display_seqres_seq()\n elif option == '3':\n msg = 'Option 3'\n d = display_algn_seq()\n elif option == '4':\n msg = 'Option 4'\n d = display_all_nonwater_L()\n else:\n print ('Invalid selection!')\n return msg, d",
"def decider_menu():\n menu_value = -1\n while menu_value < 0 or menu_value > 6:\n print(\"Spending Categories:\\n\\t1. Food\\n\\t2. Travel\\n\\t3. Transit\"\n \"\\n\\t4. Gas\\n\\t5. Online Shopping\\n\\t6. Other\\n\")\n menu_value = input(\"Please enter one of the above values or 0 \"\n \"to cancel: \")\n try:\n menu_value = int(menu_value)\n except ValueError:\n print(\"Error: Not an integer! Please enter a valid input!\")\n continue\n if menu_value == 0:\n break\n elif menu_value < 0 or menu_value > 6:\n print(\"Error: Not a valid integer! Please enter a valid number!\")\n continue\n elif menu_value == 1:\n print(\"You have selected Food! Please select a subcategory:\\n\\t\"\n \"1. Dining\\n\\t2. Groceries\")\n subcategory = input(\"Please enter an above values, anything else \"\n \"to leave this category: \")\n try:\n subcategory = int(subcategory)\n if subcategory == 1:\n return \"dining\"\n elif subcategory == 2:\n amazon = \"\"\n while amazon != \"N\" and amazon != \"Y\":\n amazon = input(\n \"Are you shopping at Whole Foods? (Y/N): \")\n if amazon == \"N\":\n return \"grocery\"\n elif amazon == \"Y\":\n return \"grocery(Whole Foods)\"\n else:\n print(\"Invalid input\")\n else:\n print(\"Exiting the food category...\")\n continue\n except ValueError:\n print(\"Exiting the food category...\")\n continue\n elif menu_value == 2:\n print(\"You have selected Travel! Please select a subcategory:\\n\"\n \"\\t1. General\\n\\t2. IHG Hotel\\n\\t3. Chase\\n\\t4. AMEX\")\n subcategory = input(\"Please enter an above value, anything else \"\n \"to leave this category: \")\n try:\n subcategory = int(subcategory)\n if subcategory == 1:\n return \"travel\"\n elif subcategory == 2:\n return \"hotel(IHG)\"\n elif subcategory == 3:\n return \"travel(Chase)\"\n elif subcategory == 4:\n return \"travel(AMEX)\"\n else:\n print(\"Exiting the Travel category...\")\n continue\n except ValueError:\n print(\"Error! Exiting the Travel category...\")\n continue\n elif menu_value == 3:\n return \"transit\"\n elif menu_value == 4:\n return \"gas\"\n elif menu_value == 5:\n print(\"You have selected Online Shopping! Please select a \"\n \"subcategory:\\n\\t1. Amazon\\n\\t2. Walmart\\n\\t3. Elsewhere\")\n subcategory = input(\n \"Please enter an above value, anything else to \"\n \"leave this category: \")\n try:\n subcategory = int(subcategory)\n if subcategory == 1:\n return \"online shopping(Amazon)\"\n elif subcategory == 2:\n return \"online shopping(Walmart)\"\n elif subcategory == 3:\n return \"online shopping\"\n else:\n print(\"Exiting the Online Shopping category...\")\n continue\n except ValueError:\n print(\"Error! Exiting the Online Shopping category...\")\n continue\n elif menu_value == 6:\n print(\"You have selected Other! Please select a subcategory:\\n\\t1.\"\n \" Streaming\\n\\t2. Utilities\\n\\t3. Drugstores\\n\\t4. Other\")\n subcategory = input(\"Please enter an above value, anything else to\"\n \" leave this category: \")\n try:\n subcategory = int(subcategory)\n if subcategory == 1:\n return \"streaming\"\n elif subcategory == 2:\n return \"utilities\"\n elif subcategory == 3:\n return \"drugstores\"\n elif subcategory == 4:\n return \"else\"\n else:\n print(\"Exiting the Other category...\")\n continue\n except ValueError:\n print(\"Error! Exiting the Other category...\")\n continue",
"def test_date_entry_returns_correct_value_for_date(self):\n date_string = \"2018-01-21\"\n date_format = settings.DATE_FORMATS['iso 8601']\n self.menu.OPTIONS['date format'] = date_format\n\n user_input = [date_string]\n\n with patch('builtins.input', side_effect=user_input):\n result = self.menu.date_entry()\n\n expected_result = (\n None,\n datetime.datetime.strptime(date_string,\n date_format['datetime format'])\n )\n\n self.assertEqual(result, expected_result)",
"def showMenu():\n print '''\\nIndica una opció:\n 1 Afegir contacte\n 2 Modificar contacte\n 3 Eliminar contacte\n 4 Cercar contacte\n 5 Info de l'agenda\n 0 Sortir\\n'''\n\n try:\n global menu_option\n menu_option = int(raw_input('Opció escollida: '))\n except ValueError:\n print 'Error al escollir l\\'opció'",
"def mainMenuChoice():\r\n print(\"What would you like to do?\")\r\n print(\" n) Add a class\")\r\n print(\" d) Delete a class\")\r\n print(\" e) Edit a class\")\r\n print(\" s) Show ongoing list of classes\")\r\n print(\" p) Print Schedule to Terminal\")\r\n print(\" g) Generate schedule in csv and print to Terminal\")\r\n print(\" q) Save and Exit program\\n\")\r\n \r\n choice = input(\"Input choice here: \")\r\n \r\n if choice.lower() in [\"n\",\"d\",\"e\",\"s\",\"g\",\"q\", \"p\"]:\r\n return choice\r\n else:\r\n print(\"\\nPlease enter a valid menu choice\")\r\n return None",
"def search_menu():\n clear_screen()\n print(\"What would you like to search by?\")\n print(\" d: Date (Default)\")\n print(\" t: Time spent\")\n print(\" e: Exact\")\n print(\" p: Pattern (Regex)\")\n user_input = input(\"> \").lower()\n if user_input == 't':\n search_by_time_spent()\n elif user_input == 'e':\n search_by_string()\n elif user_input == 'p':\n search_by_pattern()\n else:\n search_by_date()",
"def _holiday_parser(item):\n\n return item['summary'], item.get('start',{}).get('date'), item.get('end',{}).get('date')",
"def menu(self):\n print('1) Today\\'s tasks')\n print('2) Week\\'s tasks')\n print('3) All tasks')\n print('4) Missed tasks')\n print('5) Add task')\n print('6) Delete task')\n print('0) Exit')\n self.menu_choice = input()",
"def menu():\n\tuser_input = input()\n\tif user_input == 'QUIT' :\n\t\tquit_game()\n\telif user_input == 'HELP':\t #According to user input it\n\t\tmenu_help() #Performs appropriate functions\n\t\tmenu()\n\telif user_input[0:5] == 'START':\n\t\tmenu_start_game(user_input[6:len(user_input)])\n\telse :\n\t\tprint(\"\\n\",end=\"\")\n\t\tprint(\"No menu item\")\n\t\tprint(\"\\n\",end=\"\")\n\tpass",
"def retirement_main(g:argparse.Namespace) -> tuple: \n now = datetime.date.today()\n\n # Only the most cursory checking.\n if not g.birthday < g.hiredate < now: \n print(\"\\n\".join([\"you must have been born before you were hired,\",\n \"and you must have been hired before today.\"]))\n sys.exit(os.EX_DATAERR)\n \n current_age = now - g.birthday\n current_service = now - g.hiredate\n\n old_enough_on = g.birthday + rdelta(years=g.min_age)\n old_enough_now = old_enough_on < now\n long_enough_on = g.hiredate + rdelta(years=g.req_years)\n long_enough_now = long_enough_on < now\n\n if long_enough_now and old_enough_now:\n return now, 0, old_enough_on, long_enough_on, current_age.days, current_service.days\n \n no_earlier_than = max(long_enough_on, old_enough_on)\n magic_date = now + rdelta(rdelta(years=g.magic_number) - rdelta(current_age) - rdelta(current_service))/2\n quit_on = max(magic_date, no_earlier_than)\n\n if old_enough_on < g.hiredate: \n old_enough_on = 'when you started.'\n elif old_enough_on < now: \n old_enough_on = 'now'\n if long_enough_on < now: \n long_enough_on = 'now'\n\n return quit_on, (quit_on - now).days, old_enough_on, long_enough_on, current_age.days, current_service.days"
] | [
"0.63029546",
"0.62325466",
"0.6204037",
"0.5833007",
"0.57940876",
"0.5668976",
"0.5628858",
"0.55757457",
"0.5570373",
"0.5545508",
"0.5543758",
"0.55230284",
"0.5504087",
"0.5475572",
"0.5453246",
"0.5448193",
"0.54172593",
"0.5367389",
"0.5333646",
"0.53286713",
"0.5318852",
"0.53166044",
"0.5292661",
"0.52460515",
"0.52444065",
"0.523319",
"0.5216469",
"0.520917",
"0.5204353",
"0.51959944"
] | 0.6623508 | 0 |
Builds a network from config file | def build_network(config):
network_cfg = config['network']
network_name = network_cfg['name']
network_params = list(inspect.signature(eval(network_name).__init__).parameters)[1:]
args = [f'{param}={network_cfg[param]}' for param in network_params if network_cfg.get(param)]
try:
model = eval('{}({})'.format(network_name, ', '.join(args)))
except:
raise ValueError('Can\'t load network.')
return model.to(device='cuda') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _build_network(self):\n pass",
"def create_net(args):\n\n # Load config file for this experiment\n xinfo = yaml.load(open(args.exp)) # experiment info\n\n # copy config to run directory\n assert osp.isdir(args.cache_dir), 'Working directory not found: ' + args.cache_dir\n # output config file\n yaml.dump(xinfo, open(args.exp_config_path, 'w'),\n default_flow_style=False)\n\n # Load dataset config file\n dcfg_path = osp.join(args.data_config_path, xinfo['INPUT']['DATASET'])\n dinfo = yaml.load(open(dcfg_path)) # dataset info\n data_dir = dinfo['ROOT']\n\n layout = xinfo['INPUT']['LAYOUT']\n inps = [s.strip() for l in layout for s in l.split(',')]\n outs = [s.strip() for s in xinfo['REFINE']['TARGETS'].split(',')]\n\n supports = ['seg', 'flow', 'norm', 'rgb', 'depth']\n\n nets = {}\n for split in ['train', 'test']:\n net_inps = []\n net_outs = []\n for inp in inps:\n match = re.search('^(gt|pr)({})'.format('|'.join(supports)), inp)\n assert match is not None, 'Error in config INPUT-LAYOUT: ' + inp\n\n modality = match.group(2)\n nchannels = dinfo[modality]['n']\n path = osp.join(data_dir, dinfo[modality][match.group(1) + '-' + split])\n\n assert osp.exists(path), 'File not found: ' + path\n net_inps.append((inp, path, nchannels))\n\n for out in outs:\n # TODO: read target type: zero couplings, tight, loose couplings\n match = re.search('({})'.format('|'.join(supports)), out)\n assert match is not None, 'Error in config REFINE-TARGET: '+ out\n\n modality = match.group(1)\n nchannels = dinfo[modality]['n']\n path = osp.join(data_dir, dinfo[modality]['gt-' + split])\n\n assert osp.exists(path), 'File not found: ' + path\n net_outs.append((out, path, nchannels))\n\n loss_params = dict()\n mapping = None\n if 'mapping' in dinfo['seg']:\n idx = dinfo['seg']['mapping']\n mapping = dict(zip(idx, xrange(len(idx))))\n\n if split == 'train':\n\n # if the class weights is not in the dataset config file\n if 'gt-train-weights' not in dinfo['seg']:\n print 'Generating median frequency balancing weights.'\n (weights, mapping) = gcw.get_mfb(osp.join(dinfo['ROOT'], dinfo['seg']['gt-train']),\n dinfo['seg']['ignore_label'],\n mapping)\n # save back to dataset config\n dinfo['seg']['gt-train-weights'] = weights\n yaml.dump(dinfo, open(dcfg_path, 'w'), default_flow_style=False)\n else:\n weights = dinfo['seg']['gt-train-weights']\n # update data\n # update loss parameter\n ignore_label = dinfo['seg']['ignore_label']\n ignore_label = mapping[ignore_label] if mapping is not None else ignore_label\n loss_params['loss_param'] = {\n 'ignore_label': ignore_label,\n 'class_weighting': weights\n }\n\n # generate net prototxt\n loader = dinfo['NAME'] + '_loader'\n net_proto = arch.create_net(net_inps, net_outs, split, loader, layout, mapping, **loss_params)\n\n # output to file\n path = osp.join(args.cache_dir, getattr(args, 'exp_{}_path'.format(split)))\n open(path, 'w').write(str(net_proto))\n nets[split] = net_proto\n\n return nets",
"def buildNetwork(self):\n\n # create the network node for our module\n self.networkNode = cmds.createNode(\"network\", name=self.modName)\n\n # create attributes\n self.addAttributes()\n\n return self.networkNode",
"def load_network(file_name):\n with open(file_name) as file:\n data = json.load(file)\n\n cost_fn = getattr(sys.modules[__name__], data[\"cost_func\"])\n act_fn = getattr(sys.modules[__name__], data[\"act_func\"])\n metric = getattr(sys.modules[__name__], data[\"metric\"])\n\n network = Network([1, 1], act_func=act_fn, cost_func=cost_fn, metric=metric)\n network.layers_num = data[\"layers_num\"]\n network.weights = [np.array(w) for w in data[\"weights\"]]\n network.biases = [np.array(b) for b in data[\"biases\"]]\n\n return network",
"def __init__(self, netlist_file):\n with open(netlist_file, 'r') as f:\n self.netlist = _parse_netlist(f)\n self.G = _create_graph(self.netlist)",
"def network_config(self):\n\n if self._network_config:\n return self._network_config\n\n interfaces = self.metadata.get('interfaces')\n\n if not interfaces:\n raise Exception(\"Unable to get meta-data from server....\")\n\n # Convert Vultr network configuration to cloudinit.net format\n\n # Example JSON:\n # [\n # {\n # \"ipv4\": {\n # \"additional\": [\n # {\n # \"address\": \"192.0.2.3\",\n # \"netmask\": \"255.255.255.0\"\n # }\n # ],\n # \"address\": \"192.0.2.2\",\n # \"gateway\": \"192.0.2.1\",\n # \"netmask\": \"255.255.255.0\"\n # },\n # \"ipv6\": {\n # \"additional\": [\n # {\n # \"network\": \"2001:0db8:0:2::\",\n # \"prefix\": \"64\"\n # }\n # ],\n # \"address\": \"2001:0db8:0:1:5428:d5ff:fe28:1910\",\n # \"network\": \"2001:0db8:0:1::\",\n # \"prefix\": \"64\"\n # },\n # \"mac\": \"00:00:00:00:00:00\",\n # \"network-type\": \"public\"\n # },\n # ......\n # ]\n\n nic_configs = []\n macs_to_nics = cloudnet.get_interfaces_by_mac()\n LOG.debug(\"nic mapping: %s\", macs_to_nics)\n\n config = []\n for vultr_ip_dict in interfaces:\n mac = vultr_ip_dict[\"mac\"]\n\n if mac not in macs_to_nics:\n raise ValueError(\"Did not find network interface on system \"\n \"with mac '%s'. Cannot apply configuration: %s\"\n % (mac_address, nic))\n if_name = macs_to_nics[mac] # if_name = string 'eth0', ...\n if_config= {\n 'type': 'physical',\n 'mac_address': mac,\n 'name': if_name,\n 'subnets': [{\n 'type': 'dhcp',\n 'control': 'auto',\n }\n ]\n }\n config.append(if_config)\n\n LOG.debug(\"nic '%s' configuration: %s\", if_name, if_config)\n\n LOG.debug(\"added dns servers: %s\", self.dns_servers)\n config.append({'type': 'nameserver', 'address': self.dns_servers})\n\n return {'version': 1, 'config': config}",
"def build_net(model_file, weight_file):\n if not os.path.exists(model_file):\n raise ValueError('cannot find model file: {}'.format(model_file))\n if not os.path.exists(weight_file):\n raise ValueError('cannot find weight file: {}'.format(weight_file))\n\n net = caffe.Net(model_file, weight_file, caffe.TEST)\n return net",
"def load_networks(self, start=False):\n logging.debug(\"%s load_networks entered\" % self)\n # networks = self.infra['networks']\n all_containers = cf.list_containers()\n if self.container_name in all_containers:\n logging.info(\"found existing container, checking for network configuration\")\n mycontainer = cf.get_container(self.container_name)\n try:\n index = mycontainer.get_object(\"index.json\")\n mconf = json.loads(index.fetch())\n for network in mconf['networks'].keys():\n logging.info(\"loading %s from file\" % network)\n new_network = Network(self, network)\n if mconf['networks'][network].has_key(\"uuid\"):\n uuid = mconf['networks'][network][\"uuid\"]\n # print \"YYY: \", uuid\n new_network.load(uuid, start=start)\n self.networks[network] = new_network\n except Exception, e:\n # print \"ALJKALDFDKSJFLSKJDf\"\n logging.warn(e.message)\n import traceback\n logging.debug(traceback.print_exc())\n \n # check if they exist...\n # for net in networks.keys():\n # # create the network object\n # new_net = Network(self, net) \n # ",
"def get_network(network: str, config):\n using_spatial = False # If true input is fed as patches.\n using_attention = False\n patch_return_size = 1\n\n if network == 'cohen':\n model = CohenMLP(seq_len=config.seq_len)\n elif network == 'oksuz_rnn':\n model = OksuzRNN(config.gru, input_size=config.rnn_input_size, hidden_size=config.rnn_hidden_size,\n seq_len=config.seq_len, num_layers=config.rnn_num_layers,\n bidirectional=config.rnn_bidirectional)\n elif network == 'hoppe':\n spatial_pooling = None if config.spatial_pooling.lower() == 'none' else config.spatial_pooling.lower()\n using_spatial = True if spatial_pooling is not None else False\n model = Hoppe(config.gru, input_size=config.rnn_input_size, hidden_size=config.rnn_hidden_size,\n seq_len=config.seq_len, num_layers=config.rnn_num_layers,\n bidirectional=config.rnn_bidirectional, spatial_pooling=spatial_pooling,\n patch_size=config.patch_size)\n elif network == 'rnn_attention':\n using_attention = True\n model = RNNAttention(input_size=config.rnn_input_size, hidden_size=config.rnn_hidden_size,\n batch_size=config.batch_size, seq_len=config.seq_len,\n num_layers=config.rnn_num_layers, bidirectional=config.rnn_bidirectional)\n elif network == 'song':\n using_attention=True\n model = Song(seq_len=config.seq_len)\n elif network == 'soyak':\n using_spatial = True\n patch_return_size = config.patch_size - 2\n model = Soyak(patch_size=config.patch_size, seq_len=config.seq_len)\n elif network == 'patch_size':\n using_spatial = True\n model = PatchSizeTest(seq_len=config.seq_len, patch_size=config.patch_size)\n elif network == 'balsiger':\n using_spatial = True\n model = Balsiger(seq_len=config.seq_len, patch_size=config.patch_size)\n elif network == 'rca_unet':\n using_spatial = True\n patch_return_size = config.patch_size\n using_attention = config.rcab_attention\n model = RCAUNet(seq_len=config.seq_len, patch_size=config.patch_size,\n temporal_features=config.num_temporal_features, attention=config.rcab_attention)\n elif network == 'r2plus1d':\n using_spatial = True\n using_attention = True if config.non_local_level > 0 else False\n model = R2Plus1D(patch_size=config.patch_size, seq_len=config.seq_len, factorise=config.factorise,\n dimensionality_reduction_level=config.dimensionality_reduction_level,\n non_local_level=config.non_local_level)\n elif network == 'r1d':\n model = R1D(seq_len=config.seq_len)\n else:\n import sys # Should not be able to reach here as we provide a choice.\n print(\"Invalid network. Exiting...\")\n sys.exit(1)\n\n return model, using_spatial, using_attention, patch_return_size",
"def build(config):",
"def load_network_config(config_path):\n return load_json_file(config_path)",
"def build(self, config):\n nets = OrderedDict()\n\n nets['shared'] = NeuralNet(self.tensor_in, config['net_g']['shared'],\n name='shared')\n\n nets['pitch_time_private'] = [\n NeuralNet(nets['shared'].tensor_out,\n config['net_g']['pitch_time_private'],\n name='pt_'+str(idx))\n for idx in range(config['num_track'])\n ]\n\n nets['time_pitch_private'] = [\n NeuralNet(nets['shared'].tensor_out,\n config['net_g']['time_pitch_private'],\n name='tp_'+str(idx))\n for idx in range(config['num_track'])\n ]\n\n nets['merged_private'] = [\n NeuralNet(tf.concat([nets['pitch_time_private'][idx].tensor_out,\n nets['time_pitch_private'][idx].tensor_out],\n -1),\n config['net_g']['merged_private'],\n name='merged_'+str(idx))\n for idx in range(config['num_track'])\n ]\n\n nets['refiner_private'] = [\n NeuralNet(nets['merged_private'][idx].tensor_out,\n config['net_r']['private'],\n slope_tensor=self.slope_tensor,\n name='refiner_private'+str(idx))\n for idx in range(config['num_track'])\n ]\n\n return (tf.concat([nn.tensor_out for nn in nets['private']], -1), nets,\n tf.concat([nn.layers[-1].preactivated\n for nn in nets['private']], -1))",
"def build_graph_network(config, is_training=False):\n if not isinstance(config, graph_network_pb2.GraphNetwork):\n raise ValueError('Config has to be an instance of GraphNetwork proto.')\n\n network_oneof = config.WhichOneof('graph_network_oneof')\n if not network_oneof in _MODELS:\n raise ValueError('Invalid model %s!' % network_oneof)\n\n return _MODELS[network_oneof](getattr(config, network_oneof),\n is_training=is_training)",
"def build_graph(file_name):\n graph = MyGraph()\n with open(file_name, 'r') as fin:\n line = fin.readline().replace('\\n', '')\n while line != \"\":\n vals = line.split(':')\n graph.add_node(vals[0], pos=(int(vals[1]),int(vals[2])))\n line = fin.readline().replace('\\n', '')\n dest = fin.readline().replace('\\n','').split('\\t')\n line = fin.readline().replace('\\n', '')\n edges = []\n while line != '':\n node_info = line.split('\\t')\n src = node_info[0]\n for node in range(1,len(node_info)):\n if node_info[node] != '':\n if (dest[node],src) not in edges:\n edges.append((src,dest[node], node_info[node]))\n line = fin.readline().replace('\\n','')\n for edge in edges:\n graph.add_edge(edge[0], edge[1], weight=int(edge[2]))\n\n return graph",
"def load_network(fpath):\n\twith open(fpath, \"rb\") as f:\n\t\tnetwork = pickle.load(f)\n\treturn network",
"def import_network(file_name, NetworkClass):\r\n file = open(file_name, 'br')\r\n data_pickle = file.read()\r\n file.close()\r\n net = NetworkClass()\r\n net.__dict__ = pickle.loads(data_pickle)\r\n return net",
"def readNetworkFile(self, networkFileName):\n try:\n with open(networkFileName, \"r\") as networkFile:\n fileLines = networkFile.read().splitlines()\n \n # Set default parameters for metadata, then read\n self.numNodes = None\n self.numLinks = None\n self.numZones = None\n self.firstThroughNode = 0\n metadata = utils.readMetadata(fileLines) \n\n try:\n self.numNodes = int(metadata['NUMBER OF NODES'])\n self.numLinks = int(metadata['NUMBER OF LINKS'])\n if self.numZones != None:\n if self.numZones != int(metadata['NUMBER OF ZONES']):\n print(\"Error: Number of zones does not match in network/demand files.\")\n raise utils.BadFileFormatException\n else:\n self.numZones = int(metadata['NUMBER OF ZONES'])\n self.firstThroughNode = int(metadata['FIRST THRU NODE'])\n except KeyError: # KeyError\n print(\"Warning: Not all metadata present, error checking will be limited and code will proceed as though all nodes are through nodes.\")\n self.tollFactor = float(metadata.setdefault('TOLL FACTOR', 0))\n self.distanceFactor = float(metadata.setdefault('DISTANCE FACTOR', 0))\n \n for line in fileLines[metadata['END OF METADATA']:]:\n # Ignore comments and blank lines\n line = line.strip()\n commentPos = line.find(\"~\")\n if commentPos >= 0: # strip comments\n line = line[:commentPos]\n \n if len(line) == 0:\n continue \n \n data = line.split() \n if len(data) < 11 or data[10] != ';' :\n print(\"Link data line not formatted properly:\\n '%s'\" % line)\n raise utils.BadFileFormatException\n \n # Create link \n linkID = '(' + str(data[0]).strip() + \",\" + str(data[1]).strip() + ')'\n\n self.link[linkID] = Link(self,\n int(data[0]), int(data[1]), # head and tail\n float(data[2]), # capacity\n float(data[3]), # length\n float(data[4]), # free-flow time \n float(data[5]), # BPR alpha\n float(data[6]), # BPR beta\n float(data[7]), # Speed limit\n float(data[8]), # Toll\n data[9]) # Link type\n \n # Create nodes if necessary\n if data[0] not in self.node: # tail\n self.node[int(data[0])] = Node(True if int(data[0]) <= self.numZones else False)\n if data[1] not in self.node: # head\n self.node[int(data[1])] = Node(True if int(data[1]) <= self.numZones else False)\n \n except IOError:\n# print(\"\\nError reading network file %s\" % networkFile)\n traceback.print_exc(file=sys.stdout)",
"def load(self, name=\"\"):\n\n self.constructed = True\n if name == \"\":\n name = \"/home/unai/Escritorio/MultiNetwork/model/model\"\n\n network_descriptors = {\"Generic\": GenericDescriptor, \"Decoder\": DecoderDescriptor, \"Discrete\": DiscreteDescriptor, \"Convolution\": ConvolutionDescriptor}\n\n if not os.path.isfile(name):\n print(\"Error at loading the model\")\n return None\n\n f = open(name, \"r+\")\n\n lines = f.readlines()\n\n i = 0\n while lines[i] != \"\\n\": # Each component is stored in a line\n ident, n_inp, kind, n_hidden, layers, init, act, cond_rand, taking, producing, depth, reachable, belows = lines[i][:-1].split(\"_\")\n kwargs = {}\n if int(ident[1:]) > self.last_net:\n self.last_net = int(ident[1:])\n\n self.reachable[ident] = reachable.split(\",\")\n self.comps_below[ident] = belows.split(\",\")\n\n if \"onv\" in kind: # Not working right now\n filters, sizes, layers, strides = layers.split(\"*\")\n sizes = sizes.split(\",\")\n s = np.array([[int(sz) for sz in szs.split(\"/\")] for szs in sizes])\n desc = network_descriptors[kind](int(inp), int(outp), int(n_inp), layers.split(\",\"), filters.split(\",\"), [int(x) for x in strides.split(\",\")], s, [int(x) for x in act.split(\",\")], [int(x) for x in init.split(\",\")], kwargs)\n else:\n if len(kwargs) > 0: # Not working right now\n kwargs = kwargs.split(\"-\")\n kwargs[0] = [int(x) for x in kwargs[0].split(\".\") if len(x) > 0]\n kwargs[1] = [int(x) for x in kwargs[1].split(\".\") if len(x) > 0]\n if len(cond_rand) > 0:\n cond_rand = cond_rand.split(\"-\")\n cond_rand[0] = [int(x) for x in cond_rand[0].split(\",\") if len(x) > 0]\n cond_rand[1] = [int(x) for x in cond_rand[1].split(\",\") if len(x) > 0]\n kwargs[\"conds\"] = cond_rand\n desc = network_descriptors[kind](int(taking.split(\",\")[0]), int(producing.split(\",\")[0]), int(n_inp), int(n_hidden), [int(x) for x in layers.split(\",\") if x != \"-1\"], init_functions[[int(x) for x in init.split(\",\") if x != \"-1\"]],\n act_functions[[int(x) for x in act.split(\",\") if x != \"-1\"]], **kwargs)\n\n # print(\"ident\", ident, \"n_inp\", n_inp, \"kind\", kind, \"inp\", inp, \"outp\", outp, \"layers\", layers, \"init\", init, \"act\", act, \"taking\", taking, \"producing\", producing, \"depth\", depth, \"kwargs\", kwargs)\n net = NetworkComp(desc, InOut(size=int(taking.split(\",\")[0]), data_type=taking.split(\",\")[1]), InOut(data_type=producing.split(\",\")[1], size=int(producing.split(\",\")[0])), int(depth))\n\n self.add_net(net, ident)\n i += 1\n\n i += 1\n\n while lines[i] != \"\\n\": # Inputs\n\n ident, size, kind, depth = lines[i].split(\"_\")\n\n self.inputs[ident] = ModelComponent(None, InOut(size=int(size), data_type=kind), int(depth))\n i += 1\n\n i += 1\n\n while lines[i] != \"\\n\": # Outputs\n\n ident, size, kind, depth, belows = lines[i].split(\"_\")\n\n self.outputs[ident] = ModelComponent(InOut(size=int(size), data_type=kind), None, int(depth))\n self.comps_below[ident] = belows.split(\",\")\n i += 1\n\n i += 1\n\n while i < len(lines): # Connections\n name, inp, outp, kind, size = lines[i].split(\"_\")\n\n if int(name[1:]) > self.last_con:\n self.last_con = int(name[1:])\n\n self.connections[name] = Connection(inp, outp, InOut(kind, int(size)), name)\n i += 1\n self.update_below()",
"def load_net(filepath):\n\twith open(filepath, 'r') as fh:\n\t\treturn load(file = fh)",
"def build_from_file(self, topology_file, topology_format):\n with open(topology_file) as infile:\n for line in infile:\n if line.startswith(\"#\"):\n continue\n else:\n if topology_format == 0:\n x = line.split(\"\\n\")[0].split(\"|\")\n as1 = int(x[0])\n as2 = int(x[1])\n relationship = int(x[2])\n else:\n x = line.split(\"\\n\")[0].split(\"\\t\")\n if x[2] == \"p2c\":\n as1 = int(x[0])\n as2 = int(x[1])\n relationship = -1\n elif x[2] == \"c2p\":\n as1 = int(x[1])\n as2 = int(x[0])\n relationship = -1\n elif x[2] == \"p2p\":\n as1 = int(x[1])\n as2 = int(x[0])\n relationship = 0\n else:\n continue\n\n if not self.has_edge(as1, as2):\n self.add_edge(as1, as2, relationship=relationship, as1=as1, as2=as2)",
"def build_config_parser(filename='GradientOneAuthConfig.txt'):\n cfg = ConfigParser(dict_type=dict)\n cfg.optionxform = str\n cfgfile = None\n try:\n cfgfile = find_file(filename)\n except IOError:\n raise ValueError(\"Could not find a {} file. Please download \"\n \"one for this machine.\".format(filename))\n try:\n cfg.read(cfgfile)\n except IOError:\n raise ValueError(\"Could not read the {} file. Please download a \"\n \"valid config file for this machine.\"\n .format(filename))\n return cfg",
"def build_net(self, nodes, links, output_network, from_geometry=True, debug=False):\n _nodes = nodes.copy()\n _links = links.copy()\n\n if from_geometry:\n _nodes[['x', 'y']] = _nodes['geometry'].apply(lambda g: pd.Series([g.coords[0][0], g.coords[0][1]]))\n _nodes.drop(['geometry'], axis=1, errors='ignore', inplace=True)\n\n pandasdbf.write_dbf(_nodes, self.environment + r'\\temp_nodes_to_dbf.dbf', pre_process=False)\n pandasdbf.write_dbf(_links, self.environment + r'\\temp_links_to_dbf.dbf', pre_process=False)\n\n script_text = r\"\"\"\n\n RUN PGM=NETWORK PRNFILE=\"%s\\temp_net.prn\"\n FILEO NETO = \"%s\"\n FILEI LINKI[1] = \"%s\"\n FILEI NODEI[1] = \"%s\"\n ENDRUN\n\n \"\"\" % (\n self.environment,\n output_network,\n self.environment + r'\\temp_links_to_dbf.dbf',\n self.environment + r'\\temp_nodes_to_dbf.dbf'\n )\n\n # creating a cube script\n script = open(self.environment + r'\\build_net.s', 'w', encoding='latin')\n script.write(script_text)\n script.close()\n\n # runs the script with voyager.exe\n options = \"\"\"/Start /CloseWhenDone /Minimize /NoSplash\"\"\" if not debug else \"\"\n cmd = 'voyager.exe \"' + self.environment + r'\\build_net.s\" ' + options\n print(cmd)\n os.system(cmd)",
"def build_net(self, n_dps=1, n_vlans=1,\n dp_links=None, host_links=None, host_vlans=None,\n vlan_options=None, dp_options=None, host_options=None,\n routers=None, stack_roots=None,\n include=None, include_optional=None,\n hw_dpid=None, lacp_trunk=False):\n if include is None:\n include = []\n if include_optional is None:\n include_optional = []\n self.NUM_DPS = n_dps\n self.dpids = [str(self.rand_dpid()) for _ in range(n_dps)]\n self.dpids[0] = self.dpid\n vlan_vids = {vlan: self.vlan_vid(vlan) for vlan in range(n_vlans)}\n self.topo = FaucetTopoGenerator(\n self.OVS_TYPE,\n self.ports_sock,\n self._test_name(),\n self.dpids,\n dp_links,\n host_links,\n host_vlans,\n vlan_vids,\n hw_dpid=self.hw_dpid,\n switch_map=self.switch_map,\n port_order=self.port_order,\n start_port=self.start_port\n )\n self.port_maps = {dpid: self.create_port_map(dpid) for dpid in self.dpids}\n self.port_map = self.port_maps[self.dpid]\n self.CONFIG = self.get_config(\n dpids=self.dpids,\n hw_dpid=hw_dpid,\n hardware=self.hardware,\n ofchannel_log=self.debug_log_path,\n n_vlans=n_vlans,\n host_links=host_links,\n host_vlans=host_vlans,\n stack_roots=stack_roots,\n include=include,\n include_optional=include_optional,\n acls=self.acls(),\n acl_in_dp=self.acl_in_dp(),\n lacp_trunk=lacp_trunk,\n vlan_options=vlan_options,\n dp_options=dp_options,\n routers=routers,\n host_options=host_options\n )\n self.n_vlans = n_vlans\n self.dp_links = dp_links\n self.host_links = host_links\n self.host_vlans = host_vlans\n self.stack_roots = stack_roots\n self.routers = routers\n self.dp_options = dp_options\n self.host_options = host_options\n self.vlan_options = vlan_options",
"def build_configuration() -> Config:\n logger.debug('Building configuration.')\n config = Config(roman_url=sanitize_url(get_prop('ROMAN_URL')),\n redis_url=get_prop('REDIS_URL'),\n redis_port=int(get_prop('REDIS_PORT')),\n redis_username=get_prop('REDIS_USERNAME', True),\n redis_password=get_prop('REDIS_PASSWORD', True),\n charon_url=sanitize_url(get_prop('CHARON_URL')))\n logger.debug(f'Used configuration: {config}')\n return config",
"def genConfig():\n\n cfg = open('/home/sevudan/Scripts/projects/topogen/result.cfg','w')\n template = getTemplate()\n G = topo.topology()\n gen_config_lo(G, cfg)\n # Get node from list nodes.\n for node in sorted(G.nodes):\n d = dict(G[node])\n hostname = node\n # Get attributes for node.\n peer = d.keys()\n for peer_node in peer:\n params = d.get(peer_node)\n conf = template.render(\n node=hostname,\n description = peer_node,\n ifd = params.get('ifd'),\n local_ifl = params.get('local_ifl'),\n peer_ifl = params.get('peer_ifl'),\n ifa = params.get('ip_address')\n )\n result = '{}{}'.format(conf,'\\n')\n cfg.write(result)\n cfg.close()",
"def config() -> 'bittensor.Config':\n parser = argparse.ArgumentParser()\n parser.add_argument('--neuron.config', type=str, help='If set, defaults are overridden by passed file.')\n parser.add_argument('--neuron.modality', type=int, help='''Miner network modality. TEXT=0, IMAGE=1. Currently only allowed TEXT''', default=0)\n parser.add_argument('--neuron.use_upnpc', action='store_true', help='''Turns on port forwarding on your router using upnpc.''', default=False)\n parser.add_argument('--neuron.use_tensorboard', action='store_true', help='Turn on bittensor logging to tensorboard', default=True)\n parser.add_argument('--neuron.learning_rate', type=float, help='Training initial learning rate.', default=3e-2)\n parser.add_argument('--neuron.weight_decay', type=float, help='nucleus parameter weight decay.', default=0.25)\n parser.add_argument('--neuron.clip_gradients', type=float, help='Implement gradient clipping to avoid exploding loss on smaller architectures.', default=1.0)\n parser.add_argument('--neuron.n_epochs', type=int, help='Number of training epochs.', default=sys.maxsize )\n parser.add_argument('--neuron.epoch_length', type=int, help='Iterations of training per epoch', default=500)\n parser.add_argument('--neuron.batch_size_train', type=int, help='Training batch size.', default=2)\n parser.add_argument('--neuron.reload', action='store_true', help='''Reload training from previous trial run.''', default=False )\n parser.add_argument('--neuron.restart_on_failure', action='store_true', help='''Restart miner on unknown error.''', default=False)\n parser.add_argument('--neuron.compute_remote_gradients', action='store_true', help='''Does the neuron compute and return gradients from backward queries.''', default=False)\n parser.add_argument('--neuron.accumulate_remote_gradients', action='store_true', help='''Does the neuron accumulate remote gradients from backward queries.''', default=False)\n parser.add_argument('--neuron.name', type=str, help='Trials for this miner go in miner.root / (wallet_cold - wallet_hot) / miner.name ', default='gpt2_exodus')\n parser.add_argument('--neuron.device', type=str, help='Neuron default training device cpu/cuda', default=(\"cuda\" if torch.cuda.is_available() else \"cpu\"))\n bittensor.logging.add_args( parser )\n bittensor.wallet.add_args( parser )\n bittensor.subtensor.add_args( parser )\n bittensor.metagraph.add_args( parser )\n bittensor.dataloader.add_args( parser )\n bittensor.dendrite.add_args( parser )\n bittensor.axon.add_args( parser )\n GPT2Nucleus.add_args( parser )\n SGMOERouter.add_args( parser )\n \n config_file_path = vars(parser.parse_known_args()[0])['neuron.config']\n if config_file_path:\n #loads config_file and updates defaults\n config_file_path = os.path.expanduser(config_file_path)\n \n try:\n with open(config_file_path) as f:\n params_config = yaml.safe_load(f) \n print('Config File Detected at {} updating defaults'.format(config_file_path))\n parser.set_defaults(**params_config)\n \n except Exception as e:\n print('Error in loading: {} using default parser settings'.format(e))\n\n return bittensor.config( parser )",
"def _read_network_file(in_name, in_format=\"\", directed=False):\n\n if in_format == 'edges':\n if directed:\n g = nx.read_edgelist(in_name, create_using=nx.DiGraph())\n else:\n g = nx.read_edgelist(in_name, data=False)\n elif in_format == 'gefx':\n g = nx.read_gexf(in_name)\n elif in_format == 'gml':\n g = nx.read_gml(in_name)\n elif in_format == 'graphML' or in_format == 'graphml':\n g = nx.read_graphml(in_name)\n nodesInfo = g.nodes(data=True)\n if len(nx.get_node_attributes(g,\"label\"))>0:\n node2Label = {nodeid: data[\"label\"].replace(\" \",\"_\") for (nodeid, data) in nodesInfo}\n g = nx.relabel_nodes(g, node2Label, copy=False)\n elif in_format == 'pajek':\n g = nx.read_pajek(in_name)\n elif in_format == 'ncol':\n g = nx.read_edgelist(in_name)\n else:\n raise Exception(\"UNKNOWN FORMAT \" + in_format)\n return g",
"def load_network(self):\t\t\r\n\t\tself.dqn.load_network(self.path)",
"def build_configs():",
"def convert_network_configuration(config, dns_servers):\n\n def _get_subnet_part(pcfg, nameservers=None):\n subpart = {'type': 'static',\n 'control': 'auto',\n 'address': pcfg.get('ip_address'),\n 'gateway': pcfg.get('gateway')}\n\n if nameservers:\n subpart['dns_nameservers'] = nameservers\n\n if \":\" in pcfg.get('ip_address'):\n subpart['address'] = \"{0}/{1}\".format(pcfg.get('ip_address'),\n pcfg.get('cidr'))\n else:\n subpart['netmask'] = pcfg.get('netmask')\n\n return subpart\n\n all_nics = []\n for k in ('public', 'private'):\n if k in config:\n all_nics.extend(config[k])\n\n macs_to_nics = cloudnet.get_interfaces_by_mac()\n nic_configs = []\n\n for nic in all_nics:\n\n mac_address = nic.get('mac')\n sysfs_name = macs_to_nics.get(mac_address)\n nic_type = nic.get('type', 'unknown')\n # Note: the entry 'public' above contains a list, but\n # the list will only ever have one nic inside it per digital ocean.\n # If it ever had more than one nic, then this code would\n # assign all 'public' the same name.\n if_name = NIC_MAP.get(nic_type, sysfs_name)\n\n LOG.debug(\"mapped %s interface to %s, assigning name of %s\",\n mac_address, sysfs_name, if_name)\n\n ncfg = {'type': 'physical',\n 'mac_address': mac_address,\n 'name': if_name}\n\n subnets = []\n for netdef in ('ipv4', 'ipv6', 'anchor_ipv4', 'anchor_ipv6'):\n raw_subnet = nic.get(netdef, None)\n if not raw_subnet:\n continue\n\n sub_part = _get_subnet_part(raw_subnet)\n if nic_type == 'public' and 'anchor' not in netdef:\n # add DNS resolvers to the public interfaces only\n sub_part = _get_subnet_part(raw_subnet, dns_servers)\n else:\n # remove the gateway any non-public interfaces\n if 'gateway' in sub_part:\n del sub_part['gateway']\n\n subnets.append(sub_part)\n\n ncfg['subnets'] = subnets\n nic_configs.append(ncfg)\n LOG.debug(\"nic '%s' configuration: %s\", if_name, ncfg)\n\n return {'version': 1, 'config': nic_configs}"
] | [
"0.71432847",
"0.6280527",
"0.6222136",
"0.62148905",
"0.62123084",
"0.6184715",
"0.61805207",
"0.60832465",
"0.60693824",
"0.6053631",
"0.6041396",
"0.6024714",
"0.6021795",
"0.6015814",
"0.5989596",
"0.597385",
"0.596626",
"0.583157",
"0.58275205",
"0.5820853",
"0.5814393",
"0.5805685",
"0.58041537",
"0.5797638",
"0.5784804",
"0.57799035",
"0.57796305",
"0.57602847",
"0.5740101",
"0.5737727"
] | 0.7545299 | 0 |
For the given installer conditions, verify the dependencies for every single one of the conditions that are in some way referenced in specs or source. | def test_verify_all_dependencies(self):
for condition in self.all_references():
result = self.verify_dependencies(condition)
if result:
self.ill_defined[condition] = result
else:
self.well_defined.add(condition)
return self.ill_defined | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_verify_dependencies(self, cond_id, conditions):\n\n if not cond_id in conditions.get_keys():\n return 1\n else:\n result = self.verify_dependencies(cond_id)\n return result",
"def check_all(self, exe_paths=False):\n self.status_msg = \"\"\n for dep in self.dependencies:\n target_version = version.parse(\n self.dependency_dict[dep][\"version\"]\n )\n version_command = self.dependency_dict[dep][\"version_command\"]\n self.dependency_dict[dep][\"installed\"] = not self.force\n for binary in self.dependency_dict[dep][\"binaries\"]:\n if sh.which(binary, paths=SEARCH_PATHS) == None:\n self.dependency_dict[dep][\"installed\"] = False\n exe = binary\n ver_str = \"not installed.\"\n else:\n exe = sh.Command(binary, search_paths=SEARCH_PATHS)\n try:\n ver_out = exe(\n *version_command, _err_to_out=True\n ).rstrip(\"\\n\")\n if \"version_parser\" in self.dependency_dict[dep]:\n installed_version = version.parse(\n self.dependency_dict[dep][\"version_parser\"](\n ver_out\n )\n )\n else:\n installed_version = version.parse(\n default_version_splitter(ver_out)\n )\n except sh.ErrorReturnCode_1:\n installed_version = version.parse(\"0.0\")\n if installed_version == target_version:\n ver_str = (\n f\"at recommended version {installed_version}.\"\n )\n elif installed_version < target_version:\n ver_str = (\n f\"installed {installed_version} < target\"\n f\" {target_version}.\"\n )\n self.dependency_dict[dep][\"installed\"] = False\n elif installed_version > target_version:\n ver_str = (\n f\"installed {installed_version} exceeds target\"\n f\" {target_version}.\"\n )\n if exe_paths:\n self.status_msg += f\"{exe} {ver_str}\\n\"\n else:\n self.status_msg += f\"{binary} {ver_str}\\n\"\n self.versions_checked = True\n # Check that bin directory exists and is writable.\n if self.bin_path_exists:\n bin_path_state = \"exists, \"\n else:\n bin_path_state = \"doesn't exist, \"\n if self.install_path_writable:\n bin_path_state += \"writable, \"\n else:\n bin_path_state += \"not writable, \"\n all_required = all(\n [\n self.dependency_dict[d][\"installed\"]\n for d in self.dependency_dict\n if self.dependency_dict[d][\"required\"]\n ]\n )\n all_optional = all(\n [\n self.dependency_dict[d][\"installed\"]\n for d in self.dependency_dict\n if not self.dependency_dict[d][\"required\"]\n ]\n )\n if all_required:\n if all_optional:\n optional_str = \"and optional\"\n else:\n optional_str = \"\"\n self.status_msg += (\n f\"All required {optional_str} dependencies are installed.\\n\"\n )\n return all_required",
"def check(self, srcs, actual_deps):\r\n if self._check_missing_deps or self._check_missing_direct_deps or self._check_unnecessary_deps:\r\n missing_file_deps, missing_tgt_deps, missing_direct_tgt_deps = \\\r\n self._compute_missing_deps(srcs, actual_deps)\r\n\r\n buildroot = get_buildroot()\r\n def shorten(path): # Make the output easier to read.\r\n for prefix in [buildroot, self._context.ivy_home]:\r\n if path.startswith(prefix):\r\n return os.path.relpath(path, prefix)\r\n return path\r\n\r\n if self._check_missing_deps and (missing_file_deps or missing_tgt_deps):\r\n for (tgt_pair, evidence) in missing_tgt_deps:\r\n evidence_str = '\\n'.join([' %s uses %s' % (shorten(e[0]), shorten(e[1]))\r\n for e in evidence])\r\n self._context.log.error(\r\n 'Missing BUILD dependency %s -> %s because:\\n%s'\r\n % (tgt_pair[0].address.reference(), tgt_pair[1].address.reference(), evidence_str))\r\n for (src_tgt, dep) in missing_file_deps:\r\n self._context.log.error('Missing BUILD dependency %s -> %s'\r\n % (src_tgt.address.reference(), shorten(dep)))\r\n if self._check_missing_deps == 'fatal':\r\n raise TaskError('Missing deps.')\r\n\r\n if self._check_missing_direct_deps:\r\n for (tgt_pair, evidence) in missing_direct_tgt_deps:\r\n evidence_str = '\\n'.join([' %s uses %s' % (shorten(e[0]), shorten(e[1]))\r\n for e in evidence])\r\n self._context.log.warn('Missing direct BUILD dependency %s -> %s because:\\n%s' %\r\n (tgt_pair[0].address, tgt_pair[1].address, evidence_str))\r\n if self._check_missing_direct_deps == 'fatal':\r\n raise TaskError('Missing direct deps.')\r\n\r\n if self._check_unnecessary_deps:\r\n raise TaskError('Unnecessary dep warnings not implemented yet.')",
"def checkRequiredDependencies(self):\n \n # skip dependency check for downloading only\n if( self.downloadOnly ):\n return\n\n # hard dependencies\n for req in self.reqmodules:\n if( self.parent.module(req) == None ):\n # check if there is an auto detected module\n if( self.parent.module(req, True) == None ):\n self.abort( self.name + \" requires \" + req \\\n + \" and it wasn't found in your config file!!\" )\n else:\n # use auto detected module\n self.parent.use( self.parent.module(req, True) )\n self.parent.module( req ).init()\n\n print self.name + \": auto-detected \" + req + \" version \" + self.parent.module( req ).version\n \n # build only dependencies\n if( self.mode == \"install\" ):\n mods = self.reqmodules_buildonly + self.reqmodules_external\n for req in mods:\n if( self.parent.module(req) == None ):\n # check if there is an auto detected module\n if( self.parent.module(req, True) == None ):\n self.abort( req + \" not found in your config file!! \" + self.name \\\n + \" cannot be built without \" + req )\n else:\n # use auto detected module\n self.parent.use( self.parent.module(req, True) )\n self.parent.module( req ).init()\n\n print \" - \" + self.name + \": auto-detected \" + req + \" version \" + self.parent.module( req ).version",
"def test_check_dependencies_with_found(self):\n self.spy_on(check_install, op=kgb.SpyOpMatchAny([\n {\n 'args': (['cm', 'version'],),\n 'op': kgb.SpyOpReturn(True),\n },\n ]))\n\n client = self.build_client(setup=False)\n client.check_dependencies()\n\n self.assertSpyCallCount(check_install, 1)\n self.assertSpyCalledWith(check_install, ['cm', 'version'])",
"def dependency_check(dependency_set=CORE, exit_on_failure=True):\n verify_python_version()\n \n disable_warnings()\n\n platform = get_current_platform()\n\n #\n # Check for missing python modules\n #\n failed_deps = []\n pip_distributions = pip.get_installed_distributions()\n \n for w3af_req in platform.PIP_PACKAGES[dependency_set]:\n for dist in pip_distributions:\n if w3af_req.package_name.lower() == dist.project_name.lower():\n\n w3af_req_version = str(Version(w3af_req.package_version))\n dist_version = str(dist.version)\n\n if w3af_req_version == dist_version:\n # It's installed and the version matches!\n break\n else:\n failed_deps.append(w3af_req)\n\n #\n # Check for missing operating system packages\n #\n missing_os_packages = []\n for os_package in platform.SYSTEM_PACKAGES[dependency_set]:\n if not platform.os_package_is_installed(os_package):\n missing_os_packages.append(os_package)\n \n os_packages = list(set(missing_os_packages))\n\n # All installed?\n if not failed_deps and not os_packages:\n # False means: do not exit()\n enable_warnings()\n return False\n\n generate_requirements_txt(failed_deps)\n script_path = generate_helper_script(platform.PKG_MANAGER_CMD, os_packages,\n platform.PIP_CMD, failed_deps)\n\n #\n # Report the missing system packages\n #\n msg = ('w3af\\'s requirements are not met, one or more third-party'\n ' libraries need to be installed.\\n\\n')\n \n if os_packages:\n missing_pkgs = ' '.join(os_packages)\n \n msg += ('On %s systems please install the following operating'\n ' system packages before running the pip installer:\\n'\n ' %s %s\\n')\n print(msg % (platform.SYSTEM_NAME, platform.PKG_MANAGER_CMD,\n missing_pkgs))\n \n #\n # Report all missing python modules\n # \n if failed_deps:\n # pylint: disable=E1101\n msg = ('Your python installation needs the following modules'\n ' to run w3af:\\n')\n msg += ' ' + ' '.join([fdep.module_name for fdep in failed_deps])\n print(msg)\n print('\\n')\n # pylint: enable=E1101\n \n #\n # Report missing pip packages\n #\n not_git_pkgs = [fdep for fdep in failed_deps if not fdep.is_git]\n git_pkgs = [fdep.git_src for fdep in failed_deps if fdep.is_git]\n \n msg = ('After installing any missing operating system packages, use'\n ' pip to install the remaining modules:\\n')\n \n if not_git_pkgs:\n cmd = generate_pip_install_non_git(platform.PIP_CMD, not_git_pkgs)\n msg += ' %s\\n' % cmd\n \n if git_pkgs:\n for missing_git_pkg in git_pkgs:\n msg += ' %s\\n' % generate_pip_install_git(platform.PIP_CMD,\n missing_git_pkg)\n \n print(msg)\n \n msg = 'A script with these commands has been created for you at %s'\n print(msg % script_path)\n \n enable_warnings()\n platform.after_hook()\n \n if exit_on_failure:\n sys.exit(1)\n else:\n return True",
"def check_requirements(config=None):\n for dependency, module_requirements in (\n requirements(config, include_conditional=False).items()):\n for module_requirement in module_requirements:\n if \">=\" in module_requirement:\n module_name, required_version = module_requirement.split(\">=\")\n version_test = \">=\"\n elif \"==\" in module_requirement:\n module_name, required_version = module_requirement.split(\"==\")\n version_test = \"==\"\n else:\n module_name = module_requirement\n version_test = None\n\n try:\n module = __import__(module_name)\n except ImportError:\n logging.exception(\n \"Can't import %r which is part of %r\",\n module_name, dependency\n )\n raise MissingRequirementError(\n \"Can't import %r which is part of %r\"\n % (module_name, dependency), module_name, dependency\n )\n version = getattr(module, \"__version__\", None)\n file_path = getattr(module, \"__file__\", None)\n logger.info(\n \"Using %r version %r from %r to satisfy %r\",\n module_name, version, file_path, dependency\n )\n\n if version_test == \">=\":\n if version is None:\n raise MissingRequirementError(\n \"Version of %r isn't set as __version__ of module %r\"\n % (dependency, module_name), module_name, dependency\n )\n if LooseVersion(version) < LooseVersion(required_version):\n raise MissingRequirementError(\n \"Version of %r in %r is too old. %r < %r\"\n % (dependency, file_path, version, required_version),\n module_name, dependency\n )\n elif version_test == \"==\":\n if version is None:\n raise MissingRequirementError(\n \"Version of %r isn't set as __version__ of module %r\"\n % (dependency, module_name), module_name, dependency\n )\n if LooseVersion(version) != LooseVersion(required_version):\n raise MissingRequirementError(\n \"Unexpected version of %r in %r. %r != %r\"\n % (dependency, file_path, version, required_version),\n module_name, dependency\n )",
"def check_dependencies(target_binary, target_platform, target_architecture, target_type):\n check_architecture(target_architecture)\n check_platform(target_platform)",
"def check_dependencies():\n\n # Check for python version\n print(\"Python location : {}\".format(sys.executable))\n print(\"Python version : {}\".format(sys.version))\n if sys.version_info[0] < 3:\n warnings.warn(\n \"WARNING : Using python 2. This Python version is no longer maintained. Use at your own risk.\"\n )\n\n # Check FSL installation\n try:\n print(f\"Your fsl directory is located here: {os.environ['FSLDIR']}\")\n except KeyError:\n raise AssertionError(\n \"You do not have FSL installed! See installation instructions here: https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/FslInstallation\"\n )\n\n # Check AFNI installation\n try:\n print(\n f\"Your AFNI directory is located here: {subprocess.check_output('which afni', shell=True, universal_newlines=True)}\"\n )\n except subprocess.CalledProcessError:\n raise AssertionError(\n \"You do not have AFNI installed! See installation instructions here: https://afni.nimh.nih.gov/pub/dist/doc/htmldoc/background_install/main_toc.html\"\n )",
"def verify_dependencies(self):\n dependencies = {\n \"manufacturers\": [\n {\"name\": \"VMware\", \"slug\": \"vmware\"},\n ],\n \"platforms\": [\n {\"name\": \"VMware ESXi\", \"slug\": \"vmware-esxi\"},\n {\"name\": \"Windows\", \"slug\": \"windows\"},\n {\"name\": \"Linux\", \"slug\": \"linux\"},\n ],\n \"sites\": [{\n \"name\": \"vCenter\",\n \"slug\": \"vcenter\",\n \"comments\": \"A default virtual site created to house objects \"\n \"that have been synced from vCenter.\",\n \"tags\": [\"Synced\", \"vCenter\"]\n }],\n \"cluster_types\": [\n {\"name\": \"VMware ESXi\", \"slug\": \"vmware-esxi\"}\n ],\n \"device_roles\": [\n {\n \"name\": \"Server\",\n \"slug\": \"server\",\n \"color\": \"9e9e9e\",\n \"vm_role\": True\n }],\n \"tags\": [\n {\n \"name\": \"Orphaned\",\n \"slug\": \"orphaned\",\n \"color\": \"607d8b\",\n \"comments\": \"This applies to objects that have become \"\n \"orphaned. The source system which has \"\n \"previously provided the object no longer \"\n \"states it exists.{}\".format(\n \" An object with the 'Orphaned' tag will \"\n \"remain in this state until it ages out \"\n \"and is automatically removed.\"\n ) if settings.NB_PRUNE_ENABLED else \"\"\n },\n {\n \"name\": self.vc_tag,\n \"slug\": format_slug(self.vc_tag),\n \"comments\": \"Objects synced from vCenter host \"\n \"{}. Be careful not to modify the name or \"\n \"slug.\".format(self.vc_tag)\n }]\n }\n # For each dependency of each type verify object exists\n log.info(\"Verifying all prerequisite objects exist in NetBox.\")\n for dep_type in dependencies:\n log.debug(\n \"Checking NetBox has necessary %s objects.\", dep_type[:-1]\n )\n for dep in dependencies[dep_type]:\n self.obj_exists(nb_obj_type=dep_type, vc_data=dep)\n log.info(\"Finished verifying prerequisites.\")",
"def check_deps(self):\n\t\tshutit_global.shutit_global_object.yield_to_draw()\n\t\tcfg = self.cfg\n\t\tself.log('PHASE: dependencies', level=logging.DEBUG)\n\t\tself.pause_point('\\nNow checking for dependencies between modules', print_input=False, level=3)\n\t\t# Get modules we're going to build\n\t\tto_build = [\n\t\t\tself.shutit_map[module_id] for module_id in self.shutit_map\n\t\t\tif module_id in cfg and cfg[module_id]['shutit.core.module.build']\n\t\t]\n\t\t# Add any deps we may need by extending to_build and altering cfg\n\t\tfor module in to_build:\n\t\t\tself.resolve_dependencies(to_build, module)\n\n\t\t# Dep checking\n\t\tdef err_checker(errs, triples):\n\t\t\t\"\"\"Collate error information.\n\t\t\t\"\"\"\n\t\t\tnew_triples = []\n\t\t\tfor err, triple in zip(errs, triples):\n\t\t\t\tif not err:\n\t\t\t\t\tnew_triples.append(triple)\n\t\t\t\t\tcontinue\n\t\t\t\tfound_errs.append(err)\n\t\t\treturn new_triples\n\n\t\tfound_errs = []\n\t\ttriples = []\n\t\tfor depender in to_build:\n\t\t\tfor dependee_id in depender.depends_on:\n\t\t\t\ttriples.append((depender, self.shutit_map.get(dependee_id), dependee_id))\n\n\t\ttriples = err_checker([ self.check_dependee_exists(depender, dependee, dependee_id) for depender, dependee, dependee_id in triples ], triples)\n\t\ttriples = err_checker([ self.check_dependee_build(depender, dependee, dependee_id) for depender, dependee, dependee_id in triples ], triples)\n\t\ttriples = err_checker([ check_dependee_order(depender, dependee, dependee_id) for depender, dependee, dependee_id in triples ], triples)\n\n\t\tif found_errs:\n\t\t\treturn [(err,) for err in found_errs]\n\n\t\tself.log('Modules configured to be built (in order) are: ', level=logging.DEBUG)\n\t\tfor module_id in self.module_ids():\n\t\t\tmodule = self.shutit_map[module_id]\n\t\t\tif cfg[module_id]['shutit.core.module.build']:\n\t\t\t\tself.log(module_id + ' ' + str(module.run_order), level=logging.DEBUG)\n\t\tself.log('\\n', level=logging.DEBUG)\n\n\t\treturn []",
"def check_requirements(self):\n # first, separate plugins based on those with and without dependeices.\n remaining = set()\n loaded = set()\n\n for k, v in self.modules.items():\n if v.requirements:\n remaining.add(v)\n else:\n loaded.add(k)\n self.module_call_order.append(v)\n\n for r in remaining:\n # first we check to make sure that all dependencies are satisfied.\n if not self.dependencies_satisfied(r):\n raise Exception(f\"Oops! Module {r} is not satisfied! It desires: {r.requirements}\")\n\n # now confident that all versions check out, arrange the plugins into a suitable load order.\n # no reason to do anything fancy without requirements though.\n if not remaining:\n return\n\n while True:\n new_remaining = remaining.copy()\n for m in remaining:\n if loaded.issuperset({r for r in m.requirements.keys()}):\n new_remaining.remove(m)\n loaded.add(m.name)\n self.module_call_order.append(m)\n if len(new_remaining) < len(remaining):\n # this is good.. we made progress!\n remaining = new_remaining\n if not remaining:\n # hooray! No more plugins to process\n break\n else:\n # this is bad. we are not making progress.\n raise Exception(\"dependency load order is not progressing!\")",
"def check_dependencies(work_dir, fits_dir, fitsbase):\n # Print to screen what processing steps have been selected\n print \"The following processing steps have been selected:\\n\"\n if params.do_rfifind:\n print \" - PRESTO rfifind (RFI mitigation tools)\"\n if params.do_prepsub:\n print \" - PRESTO prepsubband (dedispersion)\"\n if params.do_candsearch:\n print \" - PRESTO acceleration search and candidate sifting\"\n if params.do_presto_sp:\n print \" - PRESTO singlepulse search (singlepulse.py)\"\n # Print to screen what processing steps are being skipped\n print \"\\nThe following processing steps are being skipped:\\n\"\n if params.do_rfifind == 0:\n print \" - PRESTO rfifind (RFI mitigation tools)\"\n if params.do_prepsub == 0:\n print \" - PRESTO prepsubband (dedispersion)\"\n if params.do_candsearch == 0:\n print \" - PRESTO acceleration search and candidate sifting\"\n if params.do_presto_sp == 0:\n print \" - PRESTO singlepulse search (singlepulse.py)\"\n print \"\\nChecking dependencies...\\n\"\n # There must be at least one .fits file in the fits directory\n fl = glob(fits_dir + '/%s*.fits' %fitsbase)\n if len(fl):\n print \" Found %d file(s) in %s:\\n\" %(len(fl), fits_dir)\n for i in fl:\n print \" %s\\n\" %(i.split('/')[-1])\n else:\n print \" No %s*.fits files found in %s !\\n Exiting...\\n\" %(fitsbase, fits_dir)\n sys.exit(0)\n # If skipping the RFIFIND step in processing but want to do\n # processing steps further down the line, then there must be a\n # rfi_products folder in the results directory with a .mask file\n # in it\n if params.do_rfifind == 0 and params.use_mask and \\\n (params.do_prepsub or params.do_candsearch or params.do_presto_sp):\n mlist = glob(work_dir + '/rfi_products/*.mask')\n if len(mlist):\n print \" Using RFI .mask:\\n %s\\n\" %(mlist[0])\n else:\n print \" No RFI .mask found in %s/rfi_products!\\n Exiting...\\n\"\\\n %(work_dir)\n sys.exit(0)\n # If skipping the PREPSUBBAND step in processing but want to\n # do processing steps further down the line, then there must be\n # de-dispersed time series files in the results directory of\n # the form basename*DM*.dat and basename*DM*.inf\n if params.do_prepsub == 0 and (params.do_candsearch or \n params.do_presto_sp):\n dats = glob(work_dir + '/*DM*dat')\n infs = glob(work_dir + '/*DM*inf')\n if not (len(dats) and len(infs)):\n print \" No .dat and/or .inf files in %s!\\n Exiting...\\n\" %(work_dir)\n sys.exit(0)\n # If we haven't exited by now, then things should be good\n print \"\\nLooks good...\\n\\n\"\n # Pause for a few seconds so you can actually read the output\n time.sleep(5)",
"def check(self):\n for req in self.requirements:\n if not self.COMPARISON_OPERATORS[req.operator](\n packaging.version.parse(get_installed_version(req.package)),\n packaging.version.parse(req.version),\n ):\n fail(f\"The cluster requires {req.package}{req.operator}{req.version}\")",
"def verify_dependencies(self, cond_id):\n undefined_paths = set()\n self._verify_dependencies(cond_id, undefined_paths, tuple())\n return undefined_paths",
"def checkOptionalDependencies(self):\n \n # skip dependency check for downloading only\n if( self.downloadOnly ):\n return\n\n # soft dependencies\n failed = []\n for opt in self.optmodules:\n mod = self.parent.module(opt)\n if( mod == None ):\n failed.append(opt)\n \n # remove soft dependencies that were not found\n self.buildWithout(failed)",
"def checkDeps( self ):\n\n # skip dependency check for downloading only\n if( self.downloadOnly ):\n return True\n\n # skip dependency check if package is going to be installed\n if( self.mode == \"install\" ):\n return True\n\n log.debug( 'Checking dependencies of %s', self.name )\n \n file = self.realPath() + \"/.dependencies\"\n \n r = True\n\n # if file doesn't exist return True\n if( not os.path.exists( file )):\n return True\n\n # open dependencies file\n f = open( file )\n filedeplist = {}\n for line in f.readlines():\n line = line.strip()\n if( (not line.startswith(os.linesep)) and (not line.startswith(\"#\")) \\\n and (len(line) > 0 )):\n tokens = line.split(\":\")\n filedeplist[ tokens[0] ] = tokens[1]\n f.close()\n\n log.debug( 'Dependencies read from file: %s', filedeplist )\n\n # get actual dependecies\n deplist={}\n self.getDepList(deplist)\n del deplist[self.name]\n\n log.debug( 'Dependencies found in current cfg file: %s', deplist )\n \n # compare dependencies\n for k, v in filedeplist.iteritems():\n if( deplist.has_key( k )):\n if( deplist[k] != v ):\n if( os.path.basename(deplist[k]) != os.path.basename(v) ):\n if( r ):\n print \"*** WARNING: ***\\n***\\tFollowing dependencies from \" + self.name + \" located at [ \" \\\n + self.realPath() + \" ] failed:\\n***\"\n print \"***\\t * \" + k + \" \" + os.path.basename(v) + \" differs from version \" \\\n + os.path.basename(deplist[k]) + \" defined in your config file..\"\n r = False\n else:\n if( r ): #just print this once\n print \"*** WARNING: ***\\n***\\tFollowing dependencies from \" + self.name + \" located at [ \" + self.realPath() \\\n + \" ] failed:\\n***\"\n print \"***\\t * \" + k + \" not found in your config file!!\"\n r = False\n \n\n if( not r ):\n print \"***\"\n if( self.useLink ):\n print \"***\\t\" + self.name + \" is in \\\"link\\\" mode, if you want to rebuild it with the new dependencies set it to \\\"use\\\" mode...\"\n r = True\n else:\n if( not self.parent.noAutomaticRebuilds ):\n print \"***\\t * \" + self.name + \" changed to \\\"install\\\" mode and rebuild flag set to True...\"\n self.mode = \"install\"\n self.rebuild = True\n self.preCheckDeps()\n print \"***\\n***\\tUpdating dependency tree ( modules that depend on \" + self.name + \" need also to be rebuilt )...\\n***\"\n self.updateDepTree([])\n print \"***\\n***\\tif you do NOT want to rebuild this module(s) just answer \\\"no\\\" later on in the installation process,\\n\" \\\n + \"***\\tor set the global flag ilcsoft.noAutomaticRebuilds=True in your config file...\"\n else:\n print \"***\\n***\\tglobal flag ilcsoft.noAutomaticRebuilds is set to True, nothing will be done...\\n***\"\n return r",
"def check_requirements():\n if not os.path.exists(REQUIREMENTS):\n sys.exit(\n ansi.error() + ' %s is missing. Please check it in.' % ansi.underline(REQUIREMENTS)\n )\n\n with open(REQUIREMENTS, 'r', encoding='utf-8') as f:\n dependencies = f.readlines()\n\n vcs = [d for d in dependencies if re.match(r'^(-e )?(git|svn|hg|bzr).*', d)]\n\n dependencies = list(set(dependencies) - set(vcs))\n\n missing = []\n try:\n pkg_resources.require(dependencies)\n except (\n pkg_resources.ContextualVersionConflict,\n pkg_resources.DistributionNotFound,\n pkg_resources.VersionConflict\n ) as error:\n missing.append(str(error))\n except pkg_resources.RequirementParseError:\n pass\n\n if missing:\n missing = ' missing requirement:\\n ' + os.linesep.join(missing)\n if '--env-checked' in sys.argv:\n sys.exit(ansi.error() + missing + '\\nRequirement installation failure, please check for errors in:\\n $ lore install\\n')\n else:\n print(ansi.warning() + missing)\n import lore.__main__\n lore.__main__.install_requirements(None)\n reboot('--env-checked')",
"def check_dependencies(cls):\n\n missing = []\n for name in cls.DEPENDENCIES:\n try:\n import_module(name)\n except ModuleNotFoundError:\n missing.append(name)\n\n if any(missing):\n msg = ('The sup3r stitching module depends on the following '\n 'special dependencies that were not found in the active '\n 'environment: {}'.format(missing))\n logger.error(msg)\n raise ModuleNotFoundError(msg)",
"def _sufficient_deps(cls, deps):\n if cls.MODEL_PACKAGE is None:\n return True\n else:\n for d in deps.conda:\n if cls.MODEL_PACKAGE in d:\n return True\n for d in deps.pip:\n if cls.MODEL_PACKAGE in d:\n return True\n return False",
"def requirements():\n print('Verifying basic requirements met')\n # python version 3+ is required\n if sys.version_info[0] < 3:\n print('This program requires Python 3')\n print('Exiting')\n exit(1)\n # you must provide a device list or device file\n if device_file == \"\" and devices == [\"\"]:\n print('You need to either specify the devices (-de) or specify a file with a list of devices one per line (-df)')\n print('No upgrades were performed')\n sys.exit(1)\n if device_file != \"\" and devices != [\"\"]:\n print('You need to either specify the devices (-de) or specify a file with a list of devices one per line (-df)')\n print('No upgrades were performed')\n sys.exit(1)\n if not partition:\n print('You need to specify a partition (-pa) for upgrade')\n sys.exit(1)\n if not upgrade_file:\n print('You must specify a local file to use for upgrade')\n sys.exit(1)",
"def _verify_dependencies(self, cond_id, undefined_paths, current_path):\n\n # Exception for izpack conditions:\n if cond_id in self.conditions.properties[WHITE_LIST]:\n return True\n\n # Short-circuit on well-defined conditions:\n if cond_id in self.well_defined:\n return True\n\n # Short-circuit ill-defined conditions:\n if cond_id in list(self.ill_defined.keys()):\n current_path = current_path + ((cond_id, 'ill-defined condition'),)\n undefined_paths.add(current_path)\n return False\n\n # Cycle checking:\n tup = (cond_id, 'condition')\n if tup in current_path:\n current_path += ((cond_id, 'cyclic condition reference'),)\n undefined_paths.add(current_path)\n return False\n\n # Check for undefined condition.\n if not cond_id in self.conditions.get_keys():\n tup = (cond_id, 'undefined condition')\n current_path += (tup,)\n undefined_paths.add(current_path)\n return False\n\n current_path += (tup,)\n condition = self.conditions.container[cond_id]\n condition_type = condition['type']\n\n if condition_type in list(self.condition_tests.keys()) and not \\\n self.condition_tests[condition_type](self, condition, undefined_paths, current_path):\n return False\n\n self.well_defined.add(cond_id)\n return True",
"def check_requirements():\n\n # Which programs are reqired?\n required_programs = ['virtualbox', 'vagrant']\n\n # Make sure the required programs are installed.\n for program in required_programs:\n\n # What's the path to the executable?\n try:\n subprocess.check_output(['which', program])\n except subprocess.CalledProcessError:\n message = \"Please install \" + program + \" before proceeding.\"\n Utilities.log(message)\n exit(1)",
"def check_dependencies():\n check_python_version()\n\n dependencies = [\"sqlite3\"]\n\n for dependency in dependencies:\n try:\n __import__(dependency)\n except ImportError as e:\n raise CuckooStartupError(\"Unable to import \\\"%s\\\"\" % dependency)\n\n return True",
"def _check_conditional_dependency(obj, condition, package, severity, msg=None):\n if condition:\n if msg is None:\n msg = (\n f\"The specific parameter values of {obj.__class__.__name__}'s \"\n f\"class instance require `{package}` installed. Please run: \"\n f\"`pip install {package}` to \"\n f\"install the `{package}` package. \"\n )\n try:\n _check_soft_dependencies(package, severity=severity, obj=obj)\n except ModuleNotFoundError as e:\n raise ModuleNotFoundError(msg) from e",
"def testCheckDependencies(self):\n dependencies_file = self._GetTestFilePath(['dependencies.ini'])\n self._SkipIfPathNotExists(dependencies_file)\n\n dependency_helper = dependencies.DependencyHelper(\n dependencies_file=dependencies_file)\n\n dependency_helper.CheckDependencies(verbose_output=False)",
"def postCheckDeps(self):\n if( self.mode == \"install\" ):\n\n # check for make\n if( not isinPath( \"make\" )):\n self.abort( \"make not found on your system!!\" )\n\n # check for tee\n if( not isinPath( \"tee\" )):\n self.abort( \"tee not found on your system!!\" )",
"def preCheckDeps(self):\n \n # add cmake dependency\n if( self.mode == \"install\" and self.hasCMakeBuildSupport ):\n self.addExternalDependency( [\"CMake\" ] )\n if self.name != \"LCIO\":\n self.addExternalDependency( [\"ILCUTIL\" ] )",
"def checkDependencies(check=True):\n modules = []\n f = open(CONST_REQUIREMENTS_FILE)\n for line in f:\n if line.find('#'):\n modules.append([line[:line.index('=')], (line[line.index('=')+2:]).strip()])\n f.close()\n\n for module in modules:\n try:\n __import__(module[0])\n except ImportError: \n if query_user_bool(\"Missing module %s.\" \\\n \" Do you wish to install it?\" % module[0]):\n subprocess.call([\"pip2\", \"install\", \"%s==%s\" %\n (module[0], module[1])])\n \n else:\n return False\n return True",
"def test_check_dependencies_with_missing(self):\n self.spy_on(check_install, op=kgb.SpyOpReturn(False))\n\n client = self.build_client(setup=False)\n\n message = \"Command line tools ('cm') are missing.\"\n\n with self.assertRaisesMessage(SCMClientDependencyError, message):\n client.check_dependencies()\n\n self.assertSpyCallCount(check_install, 1)\n self.assertSpyCalledWith(check_install, ['cm', 'version'])"
] | [
"0.68579435",
"0.6556382",
"0.6347779",
"0.63326347",
"0.62767583",
"0.62427205",
"0.61748946",
"0.6174221",
"0.6137063",
"0.60748786",
"0.6053641",
"0.5956393",
"0.588417",
"0.5882532",
"0.58643204",
"0.5854203",
"0.5835256",
"0.58289266",
"0.5825942",
"0.57865596",
"0.57855487",
"0.5742168",
"0.5716644",
"0.57165724",
"0.57164556",
"0.56999207",
"0.5691669",
"0.5658087",
"0.56579465",
"0.56486046"
] | 0.6852399 | 1 |
Verifies that the given condition id is defined, and that its' dependencies and their transitive dependencies are all defined and valid. | def test_verify_dependencies(self, cond_id, conditions):
if not cond_id in conditions.get_keys():
return 1
else:
result = self.verify_dependencies(cond_id)
return result | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _verify_dependencies(self, cond_id, undefined_paths, current_path):\n\n # Exception for izpack conditions:\n if cond_id in self.conditions.properties[WHITE_LIST]:\n return True\n\n # Short-circuit on well-defined conditions:\n if cond_id in self.well_defined:\n return True\n\n # Short-circuit ill-defined conditions:\n if cond_id in list(self.ill_defined.keys()):\n current_path = current_path + ((cond_id, 'ill-defined condition'),)\n undefined_paths.add(current_path)\n return False\n\n # Cycle checking:\n tup = (cond_id, 'condition')\n if tup in current_path:\n current_path += ((cond_id, 'cyclic condition reference'),)\n undefined_paths.add(current_path)\n return False\n\n # Check for undefined condition.\n if not cond_id in self.conditions.get_keys():\n tup = (cond_id, 'undefined condition')\n current_path += (tup,)\n undefined_paths.add(current_path)\n return False\n\n current_path += (tup,)\n condition = self.conditions.container[cond_id]\n condition_type = condition['type']\n\n if condition_type in list(self.condition_tests.keys()) and not \\\n self.condition_tests[condition_type](self, condition, undefined_paths, current_path):\n return False\n\n self.well_defined.add(cond_id)\n return True",
"def verify_dependencies(self, cond_id):\n undefined_paths = set()\n self._verify_dependencies(cond_id, undefined_paths, tuple())\n return undefined_paths",
"def test_verify_all_dependencies(self):\n\n for condition in self.all_references():\n result = self.verify_dependencies(condition)\n\n if result:\n self.ill_defined[condition] = result\n else:\n self.well_defined.add(condition)\n\n return self.ill_defined",
"def _check_conditional_dependency(obj, condition, package, severity, msg=None):\n if condition:\n if msg is None:\n msg = (\n f\"The specific parameter values of {obj.__class__.__name__}'s \"\n f\"class instance require `{package}` installed. Please run: \"\n f\"`pip install {package}` to \"\n f\"install the `{package}` package. \"\n )\n try:\n _check_soft_dependencies(package, severity=severity, obj=obj)\n except ModuleNotFoundError as e:\n raise ModuleNotFoundError(msg) from e",
"def check_control_dependency(\n self,\n context: SlicingContext,\n unique_instr: UniqueInstruction,\n code_object_id: int,\n ) -> bool:\n control_dependency = False\n\n if not unique_instr.is_cond_branch():\n return False\n\n code_object: CodeObjectMetaData = self._known_code_objects[code_object_id]\n cdg: ControlDependenceGraph = code_object.cdg\n curr_node = self.get_node(unique_instr.node_id, cdg)\n assert curr_node, \"Invalid node id\"\n successors = cdg.get_successors(curr_node)\n\n instr_ctrl_deps_copy = context.instr_ctrl_deps.copy()\n\n # Check if any instruction on S_C is control dependent on current instruction\n # If so: include current instruction in the slice, remove all instructions\n # control dependent on current instruction\n for instr in context.instr_ctrl_deps:\n instr_node = self.get_node(instr.node_id, cdg)\n if instr_node in successors:\n instr_ctrl_deps_copy.remove(instr)\n control_dependency = True\n context.instr_ctrl_deps = instr_ctrl_deps_copy\n\n return control_dependency",
"def test_are_dependency_packs_valid(self, current_file, id_set, answer):\n validator = get_validator(current_file)\n assert validator.are_dependency_packs_valid(id_set) is answer",
"def testConditionChecking(self):\n\n state = State.from_problem(self.prob)\n \n drive = self.dom.get_action(\"drive\")\n with drive.instantiate([\"agent\", \"tru1\", \"apt1\"], self.prob):\n self.assert_(state.is_satisfied(drive.precondition))\n\n with drive.instantiate([\"agent\", \"tru1\", \"apt2\"], self.prob):\n self.assertFalse(state.is_satisfied(drive.precondition))",
"def testConditionReasons(self):\n \n state = State.from_problem(self.prob)\n\n relevantVars = []\n drive = self.dom.get_action(\"drive\")\n with drive.instantiate([\"agent\", \"tru1\", \"apt1\"], self.prob):\n self.assert_(state.is_satisfied(drive.precondition, relevantVars))\n\n relevantVars = set(relevantVars)\n \n s1 = StateVariable(self.prob.functions[\"city-of\"][0], [self.prob[\"pos1\"]])\n s2 = StateVariable(self.prob.functions[\"city-of\"][0], [self.prob[\"apt1\"]])\n s3 = StateVariable(self.prob.functions[\"location-of\"][0], [self.prob[\"tru1\"]])\n \n self.assertEqual(len(relevantVars), 3)\n self.assert_(s1 in relevantVars)\n self.assert_(s2 in relevantVars)\n self.assert_(s3 in relevantVars)",
"def check_dependencies():\n\n vars_valid = check_variables(\n AirflowVars.PROJECT_ID, AirflowVars.TERRAFORM_ORGANIZATION, AirflowVars.VM_DAGS_WATCH_LIST\n )\n conns_valid = check_connections(AirflowConns.TERRAFORM)\n\n if not vars_valid or not conns_valid:\n raise AirflowException(\"Required variables or connections are missing\")",
"def check_deps(self):\n\t\tshutit_global.shutit_global_object.yield_to_draw()\n\t\tcfg = self.cfg\n\t\tself.log('PHASE: dependencies', level=logging.DEBUG)\n\t\tself.pause_point('\\nNow checking for dependencies between modules', print_input=False, level=3)\n\t\t# Get modules we're going to build\n\t\tto_build = [\n\t\t\tself.shutit_map[module_id] for module_id in self.shutit_map\n\t\t\tif module_id in cfg and cfg[module_id]['shutit.core.module.build']\n\t\t]\n\t\t# Add any deps we may need by extending to_build and altering cfg\n\t\tfor module in to_build:\n\t\t\tself.resolve_dependencies(to_build, module)\n\n\t\t# Dep checking\n\t\tdef err_checker(errs, triples):\n\t\t\t\"\"\"Collate error information.\n\t\t\t\"\"\"\n\t\t\tnew_triples = []\n\t\t\tfor err, triple in zip(errs, triples):\n\t\t\t\tif not err:\n\t\t\t\t\tnew_triples.append(triple)\n\t\t\t\t\tcontinue\n\t\t\t\tfound_errs.append(err)\n\t\t\treturn new_triples\n\n\t\tfound_errs = []\n\t\ttriples = []\n\t\tfor depender in to_build:\n\t\t\tfor dependee_id in depender.depends_on:\n\t\t\t\ttriples.append((depender, self.shutit_map.get(dependee_id), dependee_id))\n\n\t\ttriples = err_checker([ self.check_dependee_exists(depender, dependee, dependee_id) for depender, dependee, dependee_id in triples ], triples)\n\t\ttriples = err_checker([ self.check_dependee_build(depender, dependee, dependee_id) for depender, dependee, dependee_id in triples ], triples)\n\t\ttriples = err_checker([ check_dependee_order(depender, dependee, dependee_id) for depender, dependee, dependee_id in triples ], triples)\n\n\t\tif found_errs:\n\t\t\treturn [(err,) for err in found_errs]\n\n\t\tself.log('Modules configured to be built (in order) are: ', level=logging.DEBUG)\n\t\tfor module_id in self.module_ids():\n\t\t\tmodule = self.shutit_map[module_id]\n\t\t\tif cfg[module_id]['shutit.core.module.build']:\n\t\t\t\tself.log(module_id + ' ' + str(module.run_order), level=logging.DEBUG)\n\t\tself.log('\\n', level=logging.DEBUG)\n\n\t\treturn []",
"def test_check_job_dependencies_prior_dependency_has_errors(database):\n sess = database.session\n sub = SubmissionFactory(submission_id=1)\n job = JobFactory(submission_id=sub.submission_id, job_status_id=JOB_STATUS_DICT['finished'],\n job_type_id=JOB_TYPE_DICT['csv_record_validation'], file_type_id=FILE_TYPE_DICT['award'],\n number_of_errors=3)\n job_2 = JobFactory(submission_id=sub.submission_id, job_status_id=JOB_STATUS_DICT['waiting'],\n job_type_id=JOB_TYPE_DICT['csv_record_validation'], file_type_id=FILE_TYPE_DICT['award'])\n sess.add_all([sub, job, job_2])\n sess.commit()\n\n # Job 1 finished, it is a prerequisite for job 2 (waiting) but it has errors\n job_dep = JobDependency(job_id=job_2.job_id, prerequisite_id=job.job_id)\n sess.add(job_dep)\n sess.commit()\n\n check_job_dependencies(job.job_id)\n\n assert job_2.job_status_id == JOB_STATUS_DICT['waiting']",
"def consistency_checker(model,universals,existentials):\n universal_set=set(universals)\n existential_set=set(existentials)\n #Additionally to the universal and existential variables the model may\n #contain additional auxiliary variables -- e.g. for setting default values.\n #We consider these variables such as the existential variables.\n auxiliary_variables_in_model={abs(l) for clause in model for l in clause \n if (not abs(l) in universal_set) and (not abs(l) in existential_set)}\n existential_set = existential_set.union(auxiliary_variables_in_model)\n result, certificate = checkModelQBF(model, universal_set, existential_set)\n return result",
"def condition_details(\n self,\n condition_id: str,\n params: Optional[Dict] = None,\n headers: Optional[Dict] = None,\n ) -> ConditionDetails:\n method = self._get_method(\"condition_details\")\n method = method.format(**{\"id\": condition_id})\n\n return self.call_api_get(method=method, params=params, headers=headers)",
"def valid_dependency(self, dep):\r\n return True",
"def check_requirements(config=None):\n for dependency, module_requirements in (\n requirements(config, include_conditional=False).items()):\n for module_requirement in module_requirements:\n if \">=\" in module_requirement:\n module_name, required_version = module_requirement.split(\">=\")\n version_test = \">=\"\n elif \"==\" in module_requirement:\n module_name, required_version = module_requirement.split(\"==\")\n version_test = \"==\"\n else:\n module_name = module_requirement\n version_test = None\n\n try:\n module = __import__(module_name)\n except ImportError:\n logging.exception(\n \"Can't import %r which is part of %r\",\n module_name, dependency\n )\n raise MissingRequirementError(\n \"Can't import %r which is part of %r\"\n % (module_name, dependency), module_name, dependency\n )\n version = getattr(module, \"__version__\", None)\n file_path = getattr(module, \"__file__\", None)\n logger.info(\n \"Using %r version %r from %r to satisfy %r\",\n module_name, version, file_path, dependency\n )\n\n if version_test == \">=\":\n if version is None:\n raise MissingRequirementError(\n \"Version of %r isn't set as __version__ of module %r\"\n % (dependency, module_name), module_name, dependency\n )\n if LooseVersion(version) < LooseVersion(required_version):\n raise MissingRequirementError(\n \"Version of %r in %r is too old. %r < %r\"\n % (dependency, file_path, version, required_version),\n module_name, dependency\n )\n elif version_test == \"==\":\n if version is None:\n raise MissingRequirementError(\n \"Version of %r isn't set as __version__ of module %r\"\n % (dependency, module_name), module_name, dependency\n )\n if LooseVersion(version) != LooseVersion(required_version):\n raise MissingRequirementError(\n \"Unexpected version of %r in %r. %r != %r\"\n % (dependency, file_path, version, required_version),\n module_name, dependency\n )",
"def check_explicit_data_dependency( # noqa: C901\n self,\n context: SlicingContext,\n unique_instr: UniqueInstruction,\n traced_instr: ExecutedInstruction | None,\n ) -> tuple[bool, set[str]]:\n complete_cover = False\n partial_cover = False\n attribute_creation_uses = set()\n\n if not unique_instr.is_def():\n return False, set()\n\n # Check variable definitions\n if isinstance(traced_instr, ExecutedMemoryInstruction):\n complete_cover = self._check_variables(context, traced_instr)\n\n # When an object, of which certain used attributes are taken from,\n # is created, the slicer has to look for the definition of normal variables\n # instead of these attributes, since they are defined as variables and not\n # as attributes on class/module level.\n if traced_instr.arg_address and traced_instr.object_creation:\n attribute_uses = set()\n for use in context.attr_uses:\n if use.startswith(hex(traced_instr.arg_address)) and len(use) > len(\n hex(traced_instr.arg_address)\n ):\n complete_cover = True\n attribute_uses.add(use)\n attribute_creation_uses.add(\"_\".join(use.split(\"_\")[1:]))\n for use in attribute_uses:\n context.attr_uses.remove(use)\n\n # Check for address dependencies\n if traced_instr.is_mutable_type and traced_instr.object_creation:\n # Note that the definition of an object here means the\n # creation of the object.\n address_dependency = self._check_scope_for_def(\n context.var_uses_addresses,\n hex(traced_instr.arg_address),\n None,\n None,\n )\n if address_dependency:\n complete_cover = True\n\n # Check for the attributes which were converted to variables\n # (explained in the previous construct)\n if traced_instr.argument in context.attribute_variables:\n complete_cover = True\n context.attribute_variables.remove(str(traced_instr.argument))\n\n if isinstance(traced_instr, ExecutedAttributeInstruction):\n # check attribute defs\n if traced_instr.combined_attr in context.attr_uses:\n complete_cover = True\n context.attr_uses.remove(traced_instr.combined_attr)\n # Partial cover: modification of attribute of\n # object in search for definition\n if hex(traced_instr.src_address) in context.var_uses_addresses:\n partial_cover = True\n\n return (complete_cover or partial_cover), attribute_creation_uses",
"def dependencies_satisfied(self, module) -> bool:\n for k, v in module.requirements.items():\n if k not in self.modules:\n return False\n found_ver = self.modules[k].version\n if isinstance(v, str):\n return found_ver == v\n elif isinstance(v, dict):\n if \"eq\" in v and (found_ver != v[\"eq\"]):\n return False\n if \"min\" in v and (found_ver < v[\"min\"]):\n return False\n if \"max\" in v and (found_ver > v[\"max\"]):\n return False\n else:\n return True\n return True",
"def check_condition(self):\n\n\t\traw_context = {\n\t\t\t'folk': self.folk\n\t\t}\n\n\t\tstatus, param = self.execute(self.mission_grid, 'condition', self.pending_mission.kingdom, raw_context)\n\t\treturn status",
"def test_check_model_dependencies_complete(dependency_testing_model, complete_env) -> None:\n assert check_model_dependencies(\n model_cls=dependency_testing_model,\n environment=complete_env,\n raise_for_missing=False,\n )",
"def validate_dependencies(self, session, entry):",
"def satisfy(self):\n self.stub.Satisfy(\n depend_pb2.DependSatisfyRequest(depend=self.data), timeout=Cuebot.Timeout)",
"def test_are_integrations_in_dependency_packs(self, current_file, id_set, answer):\n validator = get_validator(current_file)\n assert validator.are_integrations_in_dependency_packs(id_set) is answer",
"def check_program_validity(self):\n for fact in self._facts:\n if fact not in self._predicates:\n raise Exception(\"Invalid fact, no IDB defined: \" + fact)\n for clause in self._facts[fact]:\n self.check_clause_validity(clause)\n\n for rule in self._rules:\n if rule not in self._predicates:\n raise Exception(\"Invalid rule, no IDB defined: \" + rule)\n for clause in self._rules[rule]:\n self.check_clause_validity(clause.head)\n for body_clause in clause.body:\n self.check_clause_validity(body_clause)",
"def mempool_assert_my_coin_id(condition: ConditionWithArgs, unspent: CoinRecord) -> Optional[Err]:\n if unspent.coin.name() != condition.vars[0]:\n return Err.ASSERT_MY_COIN_ID_FAILED\n return None",
"def match_conditions(id_dict, conditions, aircraft_database_df):\n q = '&'.join([ '(' + k + '==' + f'\"{str(v)}\"' + ')' for (k,v) in id_dict.items() ])\n entry = aircraft_database_df.query(q)\n if entry.empty:\n return False\n else:\n for (k, v) in conditions.items():\n if not all(entry[k].str.match(v, na=False)):\n return False\n return True",
"def _check_family(self):\n for (s, (b, c)), (cond, ref) in families.items():\n if s != self.SYMBOL or len(b) != self._.d:\n continue\n vars = tuple(set(sum(map(variables, b + c), ())))\n sols = _solve([SR(l) == r for l, r\n in zip(self._.b[:-1] + self._.c[1:], b + c)],\n vars)\n if any(checkConditions(cond, sol) for sol in sols\n if is_integral(sol)):\n raise InfeasibleError(refs=ref)",
"def condition(*args, delete: bool=True, dependency: Union[AnyStr, List[AnyStr]]=\"\", initialize:\n bool=True, script: AnyStr=\"\", state: bool=True, q=True, query=True, e=True,\n edit=True, **kwargs)->Union[None, Any]:\n pass",
"def check_status(self, id):\n raise NotImplementedError()",
"def are_all_deps_fulfilled(self, package: 'Package', only_make_check: bool = False,\n only_depends: bool = False, print_reason: bool = False) -> bool:\n\n for dep in package.relevant_deps(only_make_check=only_make_check, only_depends=only_depends):\n if not self.provided_by(dep):\n if print_reason:\n aurman_note(\n \"Dependency {} of package {} is not fulfilled\".format(\n Colors.BOLD(Colors.LIGHT_MAGENTA(dep)),\n Colors.BOLD(Colors.LIGHT_MAGENTA(package.name))\n )\n )\n return False\n else:\n return True",
"def dependencies_are_met(\n self,\n parent_job: Optional['Job'] = None,\n pipeline: Optional['Pipeline'] = None,\n exclude_job_id: Optional[str] = None,\n ) -> bool:\n connection = pipeline if pipeline is not None else self.connection\n\n if pipeline is not None:\n connection.watch(*[self.key_for(dependency_id) for dependency_id in self._dependency_ids])\n\n dependencies_ids = {_id.decode() for _id in connection.smembers(self.dependencies_key)}\n\n if exclude_job_id:\n dependencies_ids.discard(exclude_job_id)\n if parent_job and parent_job.id == exclude_job_id:\n parent_job = None\n\n if parent_job:\n # If parent job is canceled, treat dependency as failed\n # If parent job is not finished, we should only continue\n # if this job allows parent job to fail\n dependencies_ids.discard(parent_job.id)\n if parent_job.get_status() == JobStatus.CANCELED:\n return False\n elif parent_job._status == JobStatus.FAILED and not self.allow_dependency_failures:\n return False\n\n # If the only dependency is parent job, dependency has been met\n if not dependencies_ids:\n return True\n\n with connection.pipeline() as pipeline:\n for key in dependencies_ids:\n pipeline.hget(self.key_for(key), 'status')\n\n dependencies_statuses = pipeline.execute()\n\n allowed_statuses = [JobStatus.FINISHED]\n if self.allow_dependency_failures:\n allowed_statuses.append(JobStatus.FAILED)\n\n return all(status.decode() in allowed_statuses for status in dependencies_statuses if status)"
] | [
"0.7233518",
"0.71392715",
"0.60746115",
"0.5732978",
"0.5654439",
"0.5454553",
"0.5448558",
"0.5441111",
"0.52943414",
"0.52780235",
"0.5251414",
"0.5139405",
"0.51285505",
"0.51267743",
"0.51252174",
"0.5048138",
"0.50332785",
"0.5028958",
"0.49913806",
"0.4958716",
"0.49567008",
"0.49434626",
"0.4937245",
"0.4930527",
"0.49145195",
"0.49110186",
"0.48902562",
"0.48675665",
"0.48236367",
"0.48172572"
] | 0.78313273 | 0 |
Given the soup for a condition, test that its dependencies are validly defined. | def _verify_dependencies(self, cond_id, undefined_paths, current_path):
# Exception for izpack conditions:
if cond_id in self.conditions.properties[WHITE_LIST]:
return True
# Short-circuit on well-defined conditions:
if cond_id in self.well_defined:
return True
# Short-circuit ill-defined conditions:
if cond_id in list(self.ill_defined.keys()):
current_path = current_path + ((cond_id, 'ill-defined condition'),)
undefined_paths.add(current_path)
return False
# Cycle checking:
tup = (cond_id, 'condition')
if tup in current_path:
current_path += ((cond_id, 'cyclic condition reference'),)
undefined_paths.add(current_path)
return False
# Check for undefined condition.
if not cond_id in self.conditions.get_keys():
tup = (cond_id, 'undefined condition')
current_path += (tup,)
undefined_paths.add(current_path)
return False
current_path += (tup,)
condition = self.conditions.container[cond_id]
condition_type = condition['type']
if condition_type in list(self.condition_tests.keys()) and not \
self.condition_tests[condition_type](self, condition, undefined_paths, current_path):
return False
self.well_defined.add(cond_id)
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def testConditionChecking(self):\n\n state = State.from_problem(self.prob)\n \n drive = self.dom.get_action(\"drive\")\n with drive.instantiate([\"agent\", \"tru1\", \"apt1\"], self.prob):\n self.assert_(state.is_satisfied(drive.precondition))\n\n with drive.instantiate([\"agent\", \"tru1\", \"apt2\"], self.prob):\n self.assertFalse(state.is_satisfied(drive.precondition))",
"def testConditionReasons(self):\n \n state = State.from_problem(self.prob)\n\n relevantVars = []\n drive = self.dom.get_action(\"drive\")\n with drive.instantiate([\"agent\", \"tru1\", \"apt1\"], self.prob):\n self.assert_(state.is_satisfied(drive.precondition, relevantVars))\n\n relevantVars = set(relevantVars)\n \n s1 = StateVariable(self.prob.functions[\"city-of\"][0], [self.prob[\"pos1\"]])\n s2 = StateVariable(self.prob.functions[\"city-of\"][0], [self.prob[\"apt1\"]])\n s3 = StateVariable(self.prob.functions[\"location-of\"][0], [self.prob[\"tru1\"]])\n \n self.assertEqual(len(relevantVars), 3)\n self.assert_(s1 in relevantVars)\n self.assert_(s2 in relevantVars)\n self.assert_(s3 in relevantVars)",
"def test_verify_dependencies(self, cond_id, conditions):\n\n if not cond_id in conditions.get_keys():\n return 1\n else:\n result = self.verify_dependencies(cond_id)\n return result",
"def _check_conditional_dependency(obj, condition, package, severity, msg=None):\n if condition:\n if msg is None:\n msg = (\n f\"The specific parameter values of {obj.__class__.__name__}'s \"\n f\"class instance require `{package}` installed. Please run: \"\n f\"`pip install {package}` to \"\n f\"install the `{package}` package. \"\n )\n try:\n _check_soft_dependencies(package, severity=severity, obj=obj)\n except ModuleNotFoundError as e:\n raise ModuleNotFoundError(msg) from e",
"def valid_dependency(self, dep):\r\n return True",
"def _alert_condition(self, soup: BeautifulSoup) -> bool:\n raise NotImplementedError()",
"def dependencies_satisfied(self, module) -> bool:\n for k, v in module.requirements.items():\n if k not in self.modules:\n return False\n found_ver = self.modules[k].version\n if isinstance(v, str):\n return found_ver == v\n elif isinstance(v, dict):\n if \"eq\" in v and (found_ver != v[\"eq\"]):\n return False\n if \"min\" in v and (found_ver < v[\"min\"]):\n return False\n if \"max\" in v and (found_ver > v[\"max\"]):\n return False\n else:\n return True\n return True",
"def test_condition_syntax(self):\n\n base = abs_path('./specs/')\n ps = Parser(base+'script3-1.py', base)\n\n spec = {'constraints': [{'block': 'A', 'condition': 'B=b1'}]}\n with self.assertRaises(ParseError):\n read_wrapper(spec, ps)\n\n spec = {'constraints': [{'block': 'A', 'condition': 'B b1'}]}\n with self.assertRaises(ParseError):\n read_wrapper(spec, ps)\n\n spec = {'constraints': [{'block': 'A', 'condition': 'B == 2.5'}]}\n read_wrapper(spec, ps)",
"def satisfied(self, fields, field, values):\n\n requires = field.get(\"requires\", [])\n\n if isinstance(requires, str):\n requires = [requires]\n\n for require in requires:\n if require not in fields or not fields[require].validate(store=False):\n return False\n\n if \"condition\" in field and self.env.from_string(field[\"condition\"]).render(**values) != \"True\":\n return False\n\n return True",
"def test_available(self):\n feature_guard = _make_requires(True, \"Error text\")\n results = []\n\n @feature_guard\n def inner():\n results.append(True)\n return True\n\n assert inner() is True\n assert [True] == results",
"def test_empty_condition(self):\n assert_that(Condition.is_valid(''), equal_to(True))",
"def conditions():\n pass",
"def test_verify_all_dependencies(self):\n\n for condition in self.all_references():\n result = self.verify_dependencies(condition)\n\n if result:\n self.ill_defined[condition] = result\n else:\n self.well_defined.add(condition)\n\n return self.ill_defined",
"def check_requirements(self): # pylint: disable=no-self-use\n self.is_skipped = False",
"def check_condition(self):\n\n\t\traw_context = {\n\t\t\t'folk': self.folk\n\t\t}\n\n\t\tstatus, param = self.execute(self.mission_grid, 'condition', self.pending_mission.kingdom, raw_context)\n\t\treturn status",
"def check_requirements(config=None):\n for dependency, module_requirements in (\n requirements(config, include_conditional=False).items()):\n for module_requirement in module_requirements:\n if \">=\" in module_requirement:\n module_name, required_version = module_requirement.split(\">=\")\n version_test = \">=\"\n elif \"==\" in module_requirement:\n module_name, required_version = module_requirement.split(\"==\")\n version_test = \"==\"\n else:\n module_name = module_requirement\n version_test = None\n\n try:\n module = __import__(module_name)\n except ImportError:\n logging.exception(\n \"Can't import %r which is part of %r\",\n module_name, dependency\n )\n raise MissingRequirementError(\n \"Can't import %r which is part of %r\"\n % (module_name, dependency), module_name, dependency\n )\n version = getattr(module, \"__version__\", None)\n file_path = getattr(module, \"__file__\", None)\n logger.info(\n \"Using %r version %r from %r to satisfy %r\",\n module_name, version, file_path, dependency\n )\n\n if version_test == \">=\":\n if version is None:\n raise MissingRequirementError(\n \"Version of %r isn't set as __version__ of module %r\"\n % (dependency, module_name), module_name, dependency\n )\n if LooseVersion(version) < LooseVersion(required_version):\n raise MissingRequirementError(\n \"Version of %r in %r is too old. %r < %r\"\n % (dependency, file_path, version, required_version),\n module_name, dependency\n )\n elif version_test == \"==\":\n if version is None:\n raise MissingRequirementError(\n \"Version of %r isn't set as __version__ of module %r\"\n % (dependency, module_name), module_name, dependency\n )\n if LooseVersion(version) != LooseVersion(required_version):\n raise MissingRequirementError(\n \"Unexpected version of %r in %r. %r != %r\"\n % (dependency, file_path, version, required_version),\n module_name, dependency\n )",
"def test_AnvilResearchStudy_Condition(anvil_research_studies_with_observations):\n _validate_ResearchStudy_Condition(anvil_research_studies_with_observations)",
"def _isCondition(self, column):\n\n for _, values in self._model.parameters.items():\n if column in values['depends']:\n return True\n return False",
"def test_are_dependency_packs_valid(self, current_file, id_set, answer):\n validator = get_validator(current_file)\n assert validator.are_dependency_packs_valid(id_set) is answer",
"def test_zt_requirement(self):\n test_passes = False\n try:\n self.parser.extract_zt(\"FOOBAR\")\n test_passes = False\n except:\n test_passes = True\n self.assertTrue(test_passes)",
"def condition_forward_checking(csp, var) :\n return False",
"def condition_forward_checking(csp, var) :\n return False",
"def check_condition(self, comment):\n if comment.id in self.touched_comment_ids:\n return False, None\n # First check for keywords in comment, for now we don't care about formatting after the keyword\n has_keyword = self.check_word_in_list_in_string(self.keywords, comment.body)\n if not has_keyword:\n return False, None\n # Next we check if we have states or abbreviations\n abbrevs = self.check_comment_for_dictionary_keys_and_values(comment, self.states)\n if len(abbrevs) < 1:\n return False, None\n if str(comment.author) == self.bot_name:\n return False, None\n for reply in comment.replies:\n if str(reply.author) == self.bot_name:\n return False, None\n\n return True, abbrevs",
"def testConditionBuilderErrors(self):\n\t\ttests = [r'2elem==1', r'elem=<3', r'elem == 0xXY']\n\n\t\tfor test in tests:\n\t\t\tself.assertRaises(ParserError, parseSearchCondition, test)",
"def testConditionalEffects(self):\n \n action = Parser.parse_as(cond_load.split(\"\\n\"), Action, self.domain)\n\n self.assert_(isinstance(action.effect, ConditionalEffect))\n self.assert_(isinstance(action.effect.condition, conditions.LiteralCondition))\n self.assert_(isinstance(action.effect.effect, SimpleEffect))",
"def is_valid(self):\n return not self.missing_from_disk and not self.missing_dependency",
"def __satisfies_necessary_and_sufficient_conditions(g):\n # Condition 0: at least 1 Edge\n if g.get_E() == 0:\n return False\n # Condition 1: indegree(v) == outdegree(v) for every vertex\n for v in range(g.get_V()):\n if g.outdegree() != g.indegree(v):\n return False\n # Condition 2: graph is connected, ignoring isolated vertices\n h = Graph(g.get_V())\n for v in range(g.get_V()):\n for w in g.adj_vertices(v):\n h.add_edge(v, w)\n # check that all non-isolated vertices are connected\n s = DirectedEulerianCycle.__non_isolated_vertex(g)\n bfs = BreadthFirstPaths(h, s)\n for v in range(g.get_V()):\n if h.degree(v) > 0 and not bfs.has_path_to(v):\n return False\n return True",
"def are_all_deps_fulfilled(self, package: 'Package', only_make_check: bool = False,\n only_depends: bool = False, print_reason: bool = False) -> bool:\n\n for dep in package.relevant_deps(only_make_check=only_make_check, only_depends=only_depends):\n if not self.provided_by(dep):\n if print_reason:\n aurman_note(\n \"Dependency {} of package {} is not fulfilled\".format(\n Colors.BOLD(Colors.LIGHT_MAGENTA(dep)),\n Colors.BOLD(Colors.LIGHT_MAGENTA(package.name))\n )\n )\n return False\n else:\n return True",
"def _sufficient_deps(cls, deps):\n if cls.MODEL_PACKAGE is None:\n return True\n else:\n for d in deps.conda:\n if cls.MODEL_PACKAGE in d:\n return True\n for d in deps.pip:\n if cls.MODEL_PACKAGE in d:\n return True\n return False",
"def test_check_job_dependencies_prior_dependency_has_errors(database):\n sess = database.session\n sub = SubmissionFactory(submission_id=1)\n job = JobFactory(submission_id=sub.submission_id, job_status_id=JOB_STATUS_DICT['finished'],\n job_type_id=JOB_TYPE_DICT['csv_record_validation'], file_type_id=FILE_TYPE_DICT['award'],\n number_of_errors=3)\n job_2 = JobFactory(submission_id=sub.submission_id, job_status_id=JOB_STATUS_DICT['waiting'],\n job_type_id=JOB_TYPE_DICT['csv_record_validation'], file_type_id=FILE_TYPE_DICT['award'])\n sess.add_all([sub, job, job_2])\n sess.commit()\n\n # Job 1 finished, it is a prerequisite for job 2 (waiting) but it has errors\n job_dep = JobDependency(job_id=job_2.job_id, prerequisite_id=job.job_id)\n sess.add(job_dep)\n sess.commit()\n\n check_job_dependencies(job.job_id)\n\n assert job_2.job_status_id == JOB_STATUS_DICT['waiting']"
] | [
"0.6394948",
"0.6064821",
"0.6038692",
"0.60127705",
"0.5969924",
"0.5887158",
"0.5801091",
"0.5776576",
"0.57205003",
"0.56385154",
"0.5614725",
"0.56072754",
"0.5539833",
"0.5475634",
"0.54365665",
"0.54158586",
"0.54152584",
"0.53983265",
"0.5347427",
"0.5322336",
"0.53194696",
"0.53194696",
"0.53169954",
"0.530525",
"0.52991325",
"0.529741",
"0.52888894",
"0.52828056",
"0.52557445",
"0.52420264"
] | 0.63148606 | 1 |
Tests if a 'variable' type condition is correctly defined. | def test_variable(self, condition, undefined_paths, current_path):
var = str(condition.find('name').text)
if not var in self.variables.get_keys() and self.fail_on_undefined_vars:
current_path += ((var, 'undefined variable'),)
undefined_paths.add(current_path)
return False
else:
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _validate_variable(self, variable):\n if variable is not None:\n # test type\n if not self.validate_type(variable):\n return False\n\n return True",
"def isvar(var):\n return _coconut_tail_call(isinstance, var, (Const, Var))",
"def _check_variable_definition(variable_name, variable_attrs):\n\n # Variable name must be type str\n if type(variable_name) != str:\n raise TypeError(\"Invalid variable name: \"+str(variable_name)+\" (must be string)\")\n\n # todo - add more tests to check validity of variable definition",
"def _is_op_defined(t_vars) -> bool:\n return all(t_var.name.startswith(\"Variable\") for t_var in t_vars)",
"def _assigns_typevar(node: nodes.NodeNG | None) -> bool:\n if isinstance(node, astroid.Call):\n inferred = utils.safe_infer(node.func)\n if (\n isinstance(inferred, astroid.ClassDef)\n and inferred.qname() in TYPE_VAR_QNAME\n ):\n return True\n return False",
"def _is_typevar(typeval: Type) -> bool:\n return isinstance(typeval, TypeVar) # type: ignore",
"def is_variable(item):\n return len(item) > 0 and item[0].upper() == item[0]",
"def _check_typevar(self, name: str, node: nodes.AssignName) -> None:\n if isinstance(node.parent, nodes.Assign):\n keywords = node.assign_type().value.keywords\n args = node.assign_type().value.args\n elif isinstance(node.parent, nodes.Tuple):\n keywords = (\n node.assign_type().value.elts[node.parent.elts.index(node)].keywords\n )\n args = node.assign_type().value.elts[node.parent.elts.index(node)].args\n\n variance = TypeVarVariance.invariant\n name_arg = None\n for kw in keywords:\n if variance == TypeVarVariance.double_variant:\n pass\n elif kw.arg == \"covariant\" and kw.value.value:\n variance = (\n TypeVarVariance.covariant\n if variance != TypeVarVariance.contravariant\n else TypeVarVariance.double_variant\n )\n elif kw.arg == \"contravariant\" and kw.value.value:\n variance = (\n TypeVarVariance.contravariant\n if variance != TypeVarVariance.covariant\n else TypeVarVariance.double_variant\n )\n\n if kw.arg == \"name\" and isinstance(kw.value, nodes.Const):\n name_arg = kw.value.value\n\n if name_arg is None and args and isinstance(args[0], nodes.Const):\n name_arg = args[0].value\n\n if variance == TypeVarVariance.double_variant:\n self.add_message(\n \"typevar-double-variance\",\n node=node,\n confidence=interfaces.INFERENCE,\n )\n self.add_message(\n \"typevar-name-incorrect-variance\",\n node=node,\n args=(\"\",),\n confidence=interfaces.INFERENCE,\n )\n elif variance == TypeVarVariance.covariant and not name.endswith(\"_co\"):\n suggest_name = f\"{re.sub('_contra$', '', name)}_co\"\n self.add_message(\n \"typevar-name-incorrect-variance\",\n node=node,\n args=(f'. \"{name}\" is covariant, use \"{suggest_name}\" instead'),\n confidence=interfaces.INFERENCE,\n )\n elif variance == TypeVarVariance.contravariant and not name.endswith(\"_contra\"):\n suggest_name = f\"{re.sub('_co$', '', name)}_contra\"\n self.add_message(\n \"typevar-name-incorrect-variance\",\n node=node,\n args=(f'. \"{name}\" is contravariant, use \"{suggest_name}\" instead'),\n confidence=interfaces.INFERENCE,\n )\n elif variance == TypeVarVariance.invariant and (\n name.endswith(\"_co\") or name.endswith(\"_contra\")\n ):\n suggest_name = re.sub(\"_contra$|_co$\", \"\", name)\n self.add_message(\n \"typevar-name-incorrect-variance\",\n node=node,\n args=(f'. \"{name}\" is invariant, use \"{suggest_name}\" instead'),\n confidence=interfaces.INFERENCE,\n )\n\n if name_arg is not None and name_arg != name:\n self.add_message(\n \"typevar-name-mismatch\",\n node=node,\n args=(name_arg, name),\n confidence=interfaces.INFERENCE,\n )",
"def has_value(var) :\n return var != None",
"def _should_use_varname_value(self, value):\n if isinstance(value, KnownValue):\n return type(value.val) in six.integer_types\n elif (\n type(value) is TypedValue\n ): # Only replace exactly TypedValue(int), not subtypes\n return value.typ in six.integer_types\n else:\n return value is UNRESOLVED_VALUE",
"def check_input_type(var, type_name):\n\n type_options = [\"int\", \"float\", \"Date\", \"Region\"]\n if type_name == type_options[0]:\n if int(var):\n return True\n else:\n return False\n elif type_name == type_options[1]:\n if float(var):\n return True\n else:\n return False\n elif type_name == type_options[2]:\n if datetime.date.fromisoformat(var):\n return True\n else:\n return False\n elif type_name == type_options[3]:\n valid_regions = [\"NW\", \"SW\", \"MN\", \"MS\", \"NE\", \"SE\"]\n is_valid = False\n for region in valid_regions:\n if var == region:\n is_valid = True\n return is_valid\n else:\n Exception(\"This type doesn't exist in the checker!\")",
"def test_exists(self, condition, undefined_paths, current_path):\n var = str(condition.find('variable').text)\n if not var in self.variables.get_keys() and self.fail_on_undefined_vars:\n current_path += ((var, 'undefined variable'),)\n undefined_paths.add(current_path)\n return False\n else:\n return True",
"def typematch(variable, expectedtype):\n\n # Return the result\n return isinstance(variable, expectedtype)",
"def is_tvar(x):\n return type(x) is T.TensorVariable",
"def _is_var_declaration(fortress, filename, start):\n v = \"var \"\n return fortress.source_code[filename][start : start + len(v)] == v",
"def has_type_var(annotation) -> bool:\n return any(\n is_type_var(arg) or has_type_var(arg)\n for arg in getattr(annotation, \"__args__\", [])\n )",
"def _isintvar(self, index):\n return 251 <= self._typlist[index] <= 253",
"def is_variable(arg: Expr) -> bool:\n return str(arg)[0].islower()",
"def _isintvar(self, index):\n return 65528 <= self._typlist[index] <= 65530",
"def id_is_variable(self):\n return not self.defined",
"def isVariableDefined(self, varID):\n \n for i in range(self.varList.count()):\n item = self.varList.item(i)\n if varID == item.getVariable().id and not item.isQuickplotItem():\n return True\n return False",
"def check_variable_line(self, line):\n self.E_str = \"check_variable_line\"\n line, any_vars = self.find_vars_in_str(line)\n words = [i for i in line.split('=') if i]\n words = self.fix_words(words)\n\n if len(words) < 2:\n self.print_error(\"The syntax for declaring variables is: \"\n + \"'<name> = <value>'\")",
"def validVarConstructType(self,vartype):\r\n indArray = vartype.find('[]')\r\n if indArray>0:\r\n thisType = vartype[0:indArray]\r\n isArray = True\r\n else:\r\n thisType = vartype\r\n isArray = False\r\n \r\n if thisType in ('rng','range'):\r\n type = 'range'\r\n elif thisType in ('rate'):\r\n type = 'rate'\r\n elif thisType in ('amt','amount'):\r\n type = 'amount'\r\n elif thisType in ('minamt','minamount'):\r\n type = 'minamount'\r\n elif thisType in ('bool'):\r\n type = 'bool'\r\n else:\r\n print 'variable type must be range, rate, amount, minamount, bool (or abbreviated forms)'\r\n return False, ''\r\n \r\n return True, type, isArray",
"def has_variable(self, col: str, name: str) -> bool:\n if self.scope is None:\n raise ValueError(\"Can't access variables on unbound modules\")\n return self.scope.has_variable(col, name)",
"def is_type_var(annotation) -> bool:\n\n return isinstance(annotation, typing.TypeVar) # type:ignore",
"def has_variable(self, name):\n return name in self._variables",
"def varIsValid(self, var):\n if len(var.getAxisList()) != 1:\n return False\n if self.myParent.numValues() != self.myParent.numValues():\n return False\n return True",
"def validVarConstruct(self,thisvar):\r\n validLength = self.validVarConstructLength(thisvar)\r\n if not validLength:\r\n return False, '', '', False\r\n validName, varName = self.validVarConstructName(thisvar[0])\r\n if not validName:\r\n return False, '', '', False \r\n validType, varType, varArray = self.validVarConstructType(thisvar[1])\r\n if not validType:\r\n return False, '', '', False\r\n \r\n return True, varName, varType, varArray",
"def has_var(self, var_name: str) -> bool:\n is_var = var_name.startswith('var$')\n\n class Scopes:\n Global = self.global_variables\n Task = self.task_variables\n Local = self.local_variables\n\n scope = Scopes.Local\n\n if is_var:\n var_name = var_name[3:]\n if var_name.startswith('$$$'):\n scope = Scopes.Global\n var_name = var_name[3:]\n elif var_name.startswith('$$'):\n scope = Scopes.Task\n var_name = var_name[2:]\n elif var_name.startswith('$'):\n scope = Scopes.Local\n var_name = var_name[1:]\n\n return var_name in scope.vars",
"def condition_forward_checking(csp, var) :\n return False"
] | [
"0.71227634",
"0.6907246",
"0.6733456",
"0.65082616",
"0.64736354",
"0.645199",
"0.64293915",
"0.63758117",
"0.6359905",
"0.63548344",
"0.63531965",
"0.63153076",
"0.6288625",
"0.62509894",
"0.62427473",
"0.62259036",
"0.6190489",
"0.6148015",
"0.6144128",
"0.6127952",
"0.6122063",
"0.61206627",
"0.61156344",
"0.59590524",
"0.59487283",
"0.59395254",
"0.59152746",
"0.5911148",
"0.59111047",
"0.5899859"
] | 0.71669835 | 0 |
Unzips a list of tuples, x. | def unzip(self, x):
if (len(x)>0):
return list(zip(*x))
else:
return x, list() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def unzip(pairs):\n return tuple(zip(*pairs))",
"def unzip(zipped):\n return zip(*zipped)",
"def unzip(seq):\n return zip(*seq)",
"def unzip(seq: Iterable) -> tuple:\n seq = iter(seq)\n # check how many iterators we need\n try:\n first = tuple(next(seq))\n except StopIteration:\n return tuple()\n # and create them\n niters = len(first)\n seqs = itertools.tee(itertoolz.cons(first, seq), niters)\n return tuple(itertools.starmap(itertoolz.pluck, enumerate(seqs)))",
"def unzip(seq):\n seq = iter(seq)\n # check how many iterators we need\n try:\n first = tuple(next(seq))\n except StopIteration:\n return tuple()\n # and create them\n niters = len(first)\n seqs = tee(cons(first, seq), niters)\n return tuple(starmap(pluck, enumerate(seqs)))",
"def lzip(*args):\n return list(zip(*args))",
"def unzip(i, iterable):\n return [x[i] for x in iterable]",
"def unzip(ls, nout):\n out = list(zip(*ls))\n if not out:\n out = [()] * nout\n return out",
"def unzip(iterable: Iterable[Tuple[Any, ...]]) -> Tuple[Iterator[Any], ...]:\n first, iterator = _common.peek(iter(iterable))\n if first is None:\n return ()\n tees = itertools.tee(iterator, len(first))\n return (map(operator.itemgetter(i), tee) for i, tee in enumerate(tees))",
"def unzip3(self) -> Tuple[List, List, List]:\n lista, listb, listc = [], [], []\n for a, b, c in self.array:\n lista.append(a)\n listb.append(b)\n listc.append(c)\n return lista, listb, listc",
"def unzip2(self) -> Tuple[List, List]:\n lista, listb = [], []\n for a, b in self.array:\n lista.append(a)\n listb.append(b)\n return lista, listb",
"def x_unzip(xid=None):\n\t_loadconfig()\n\tnavimport.conf.print_zips()\n\n\txrec = None\n\tif xid == None:\n\t\txrec = _prompt_xid(\"No to unzip >\")\n\telse:\n\t\txrec = navimport.conf.get_xplane_zip_info(0)\n\n\tif xrec != None:\n\t\tprint xrec\n\n\t\ts = \"unzip \"\n\t\ts += \" -d \" + navimport.conf.work_dir(\"/xplane_unzipped/%s\" % xrec['zip_dir'])\n\t\ts += \" \"\n\t\ts += navimport.conf.work_dir(\"/xplane_zips/%s\" % xrec['file_name'])\n\t\tlocal(s)",
"def unzip_lst(lst):\n unzipped = list(zip(*lst))\n unzipped_lsts = [list(tp) for tp in unzipped]\n return unzipped_lsts",
"def unzip_finite(\n iterable: Iterable[Tuple[Any, ...]],\n) -> Tuple[Iterator[Any], ...]:\n for zipped in zip(*iterable):\n yield zipped",
"def _unpack_tuple(x):\n if len(x) == 1:\n return x[0]\n else:\n return x",
"def _unpack_tuple(x):\n if len(x) == 1:\n return x[0]\n else:\n return x",
"def unzip_batch(batch):\n unzip = [[i for i, j in batch],\n [j for i, j in batch]]\n return np.transpose(unzip[0]), np.transpose(unzip[1])",
"def myzip(*iterables):\n result_list = []\n for i in range(len(iterables[0])): \n lst = []\n for k in iterables:\n lst.append(k[i])\n result_list.append(tuple(lst))\n return result_list",
"def zip() -> List:\n pass",
"def Unpack(Items):\n \n import numpy as np\n \n nda = np.array(Items)\n \n if len(nda.shape) < 2:\n msg = \"The input argument is a list of length 1. There is nothing to\" \\\n + \" unpack.\"\n \n raise Exception(msg)\n \n dim = nda.shape[1]\n \n # Initialise unpacked lists:\n X = []\n Y = []\n Z = []\n \n if dim == 2:\n # Unpack tuple and store in X, Y:\n for x, y in Items:\n X.append(x)\n Y.append(y)\n \n return X, Y\n \n elif dim == 3:\n # Unpack tuple and store in X, Y, Z:\n for x, y, z in Items:\n X.append(x)\n Y.append(y)\n Z.append(z)\n \n return X, Y, Z\n \n else:\n msg = f\"The input argument has dim = {dim}. Only dim = 2 or dim = 3 \"\\\n + \"is allowed.\"\n \n raise Exception(msg)",
"def easy_unpack(elements: Tuple[int]) -> Tuple[int]:\n\n return itemgetter(0, 2, -2)(elements)",
"def decompose_atom_list(atom_list):\n transpose = list(zip(*atom_list))\n if len(transpose) == 4:\n elements = np.array(transpose[0])\n array_a = np.array(transpose[1]).reshape(-1, 1)\n array_b = np.array(transpose[2]).reshape(-1, 1)\n array_c = np.array(transpose[3]).reshape(-1, 1)\n array_ab = np.concatenate((array_a, array_b), axis=1)\n coordinates = np.concatenate((array_ab, array_c), axis=1)\n return elements, coordinates\n elif len(transpose) == 5:\n elements = np.array(transpose[0])\n atom_ids = np.array(transpose[1])\n array_a = np.array(transpose[2]).reshape(-1, 1)\n array_b = np.array(transpose[3]).reshape(-1, 1)\n array_c = np.array(transpose[4]).reshape(-1, 1)\n array_ab = np.concatenate((array_a, array_b), axis=1)\n coordinates = np.concatenate((array_ab, array_c), axis=1)\n return elements, atom_ids, coordinates\n else:\n raise _FunctionError(\n \"The decompose_atom_list() function accepts only list of lists \"\n \" with only 4 or 5 items per sublist.\")",
"def unflatten(self, x):\n dims = [c.flat_dim for c in self.spaces]\n flat_x = np.split(x, np.cumsum(dims)[:-1])\n return tuple(c.unflatten(xi) for c, xi in zip(self.spaces, flat_x))",
"def from_tuples(cls, tuples):\n x, y = zip(*tuples)\n return cls(x, y)",
"async def azip(*aiters):\n anext_tuple = tuple([_.__aiter__() for _ in aiters])\n while True:\n try:\n next_tuple = tuple([await _.__anext__() for _ in anext_tuple])\n except StopAsyncIteration:\n break\n yield next_tuple",
"def transpose(lst):\n return list(zip(*lst))",
"def vstack (tup ):\n\tl = len(tup[0])\n\tfor j in tup:\n\t\tif l!=len(j):\n\t\t\tprint \"error: dimensions don't match\"\n\t\t\treturn\n\tm = [];\n\tfor i in range(0,len(tup)):\n\t\tm.extend(zip(*tup[i]))\n\treturn zip(*m)",
"def unzip_data():\n zip_ref = zipfile.ZipFile(data_zip, 'r')\n zip_ref.extractall('')\n zip_ref.close()",
"def unzip(input_file, output_file):\n output_file = validator.validate_unzip(input_file, output_file)\n process = subprocess.Popen([PBWT_BIN, 'unzip', input_file, output_file],\n stdout=subprocess.PIPE)\n process_results(str(process.communicate()[0]))",
"def vec2tuple(x):\n return (x.x, x.y, x.z)"
] | [
"0.75591034",
"0.6889838",
"0.68766",
"0.6743751",
"0.6709593",
"0.6456803",
"0.63671273",
"0.62731713",
"0.6267873",
"0.6260321",
"0.6234096",
"0.6152076",
"0.60384727",
"0.60337454",
"0.5970998",
"0.5970998",
"0.5949191",
"0.5846596",
"0.5812543",
"0.5775136",
"0.57129973",
"0.5703603",
"0.5631889",
"0.54450405",
"0.53926265",
"0.5385803",
"0.53790486",
"0.53475875",
"0.532716",
"0.5304803"
] | 0.79986495 | 0 |
returns True if `obj` is changed or deleted on the database | def is_changed(obj):
revision_field = get_version_fieldname(obj)
version = get_revision_of_object(obj)
return not obj.__class__.objects.filter(**{obj._meta.pk.name: obj.pk,
revision_field: version}).exists() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_live(self, obj):\n most_appropriate_object = get_appropriate_object_from_model(self.model)\n if most_appropriate_object == obj:\n return True\n return False",
"def has_change_permission(self, request, obj=None):\n if obj is not None:\n return False\n return super().has_change_permission(request, obj)",
"def isModelDirty(self):\n \n pass",
"def exists(self, obj):\n return False",
"def hasChanged(self):\r\n if self.is_updated:\r\n self.is_updated = False\r\n return True\r\n else:\r\n return False\r\n\r\n # if not self.hasBeenUpdatedOnce:\r\n # self.hasBeenUpdatedOnce = True\r\n # return True\r\n # else:\r\n # if BLENDER_MODE == 'BPY':\r\n # # for e in dir(self.obj): print(e)\r\n # # print(self.obj, self.obj.name, self.obj.is_updated, self.obj.is_updated_data)\r\n # # return self.obj.is_updated # DOESN't UPDATE A THING!\r\n # # return True\r\n # return self.is_updated\r\n\r\n # return False # no update in BGE mode\r",
"def bool(self, obj):\n return True",
"def bool(self, obj):\n return True",
"def has_update_permissions(self, obj):\n return True",
"def has_object_permission(self, request, view, obj):\n if request.method == \"GET\":\n return self.model_admin_config.has_view_permission(self, request, obj=obj)\n if request.method == \"PUT\":\n return self.model_admin_config.has_change_permission(self, request, obj=obj)\n if request.method == \"DELETE\":\n return self.model_admin_config.has_delete_permission(self, request, obj=obj)",
"def _is_sqlalchemy_object(obj):\n # TODO: better way?\n return hasattr(obj, \"_sa_instance_state\")",
"def equals(self, obj: object) -> bool:\n ...",
"def update_if_not_modified(self, obj, fields, upsert=False):\n spec = state(obj).original_document\n self.update(obj.__class__, spec, fields, upsert=upsert)\n err = self.impl.db.command(dict(getlasterror=1))\n return bool(err['n'] and err['updatedExisting'])",
"def has_change_permission(self, request, obj=None):\n has_class_permission = super(EntryAdmin, self).has_change_permission(request, obj)\n if not has_class_permission:\n return False\n if obj is not None and not request.user.is_superuser and request.user.id != obj.author.id:\n return False\n return True",
"def is_editable(obj, request):\n if hasattr(obj, \"is_editable\"):\n return obj.is_editable(request)\n else:\n perm = obj._meta.app_label + \".\" + obj._meta.get_change_permission()\n return request.user.is_authenticated() and request.user.has_perm(perm)",
"def has_change_permission(self, request, obj=None):\n return False",
"def _changeable_fields(self, request, obj):\n return not obj or not self.is_readonly(request, obj)",
"def has_object_permission(self, request, view, obj):\n usuario_request = request.user\n usuario_a_modificar = obj\n\n return usuario_request != usuario_a_modificar",
"def _objectDeleted(self, obj):\n pass",
"def has_change_permission(self, request, obj=None) -> bool:\n permission = super().has_change_permission(request, obj)\n\n if obj is not None:\n permission &= (obj.owner == request.user) or request.user.is_superuser\n\n return permission",
"def isDirty(*args, connection: bool=True, datablock: bool=True, **kwargs)->bool:\n pass",
"def _can_update(self):\r\n if not self._is_persisted: return False\r\n pks = self._primary_keys.keys()\r\n return all([not self._values[k].changed for k in self._primary_keys])",
"def isDirty(self):\n\t#@DEBUG christophe have to fix denoising optionnal issue prior to set isDirty() to True\n return False",
"def has_object_permission(self, request, view, obj):\n if request.method in permissions.SAFE_METHODS:\n return True\n # When the user make a request It will check that is on Safe methods, so it return true if the user is \n # trying to update is own profile or return false. And also it will return the obj.id == request.user.id\n return obj.id == request.user.id",
"def is_dirty(self):\n return self.dirty",
"def is_dirty(self):\n return self.dirty",
"def is_deleted(self):\n if self.deleted:\n return True\n return False",
"def __contains__(self, obj):\n if isinstance(obj, self):\n query = self.where(**obj.data).select()\n result = query.execute()\n if result.count:\n return True\n return False",
"def changed(self):\n if self.exists():\n return self.current_content != self.content\n else:\n return True",
"def should_save(self):\n return self.modified",
"def isdirty(self):\n\n return not not self._olddata"
] | [
"0.6930333",
"0.6525286",
"0.64501333",
"0.64336836",
"0.6431397",
"0.64131296",
"0.64131296",
"0.6349598",
"0.63452303",
"0.63142246",
"0.63139683",
"0.6300048",
"0.62984204",
"0.62824523",
"0.6207841",
"0.616058",
"0.61573696",
"0.6101484",
"0.60884404",
"0.60836864",
"0.6051057",
"0.6023432",
"0.6001744",
"0.5986488",
"0.5986488",
"0.59842175",
"0.59692633",
"0.59689844",
"0.5959626",
"0.59452575"
] | 0.80351144 | 0 |
Redefine the root graph for universe. It omits edges whose labels are in omit_edge_labels and also does not store references for nodes they point at. This is used mostly to get rid of uniquely identifying nodes. | def re_root(self, omit_edge_label: List[str]):
self.leaves = {node for node in self.nodes
if any([edge.label in omit_edge_label for edge in self.edges_to(node)])}
root_nodes = self.nodes - self.leaves
root_edges = {edge for edge in self.edges if edge.node_to in root_nodes and edge.node_from in root_nodes}
self.root_graph = Mask(root_nodes, root_edges, self) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def reset_graph(self):\n self.nodes = {}\n self.add_node(self.initial_state)\n self.add_node(self.final_state)",
"def reset_graph(self):\n raise NotImplementedError",
"def reset_graph(self):\n self.graph = OrderedDict()",
"def _restoreGraph(self):\n\n # self.tempG = self.g.copy()\n\n if nx.is_directed(self.g):\n self.tempG = nx.DiGraph(self.g)\n else:\n self.tempG = nx.Graph(self.g)\n self.deletedEdges = []\n self.deletedNodes = []",
"def strip_useless_attributes(self):\n graph_dict = self.graph.graph\n if \"node\" in graph_dict and \"label\" in graph_dict[\"node\"]:\n graph_dict[\"node\"].pop(\"label\")\n if \"graph\" in graph_dict:\n graph_dict.pop(\"graph\")",
"def ResetGraph(self):\n self.nodes = []\n self.edges = []\n self.connections = []\n Node.resetNodeCount()\n Edge.resetEdgesCount()",
"def clear(self):\n \n self.node_set.clear()\n self.prefix.clear()\n self.suffix.clear()\n self.num_node = 0\n self.edges = 0",
"def unifyPreviewNodes(self):\n\n self.leaves.update(self.forced)\n self.forced = set()",
"def restore(self):\n self.nodes.restore()",
"def __init__(self, nodes: Set[Node], edges: Set[Edge]):\n super().__init__()\n self.nodes = nodes\n for edge in edges:\n self.add_edge(edge)\n self.leaves = {node for node in self.nodes if self.degree_out(node) == 0}\n root_nodes = self.nodes - self.leaves\n root_edges = {edge for edge in self.edges if edge.node_to not in self.leaves}\n self.root_graph = Mask(root_nodes, root_edges, self)",
"def clear_registered_nodes(self):\n self.__nodes.clear()\n self.__names.clear()\n self.__aliases.clear()",
"def reset():\n\n globals()[\"currentGraph\"] = CompositionGraph()",
"def clear(self):\r\n self.nodes = collections.defaultdict(list)\r\n self.nodes_mapping = collections.defaultdict(list)\r\n self.edges = 0\r\n #self.children_length={}\r\n self.parents_length = collections.defaultdict(lambda : collections.defaultdict(int))",
"def _Restore(self) -> None:\n self._SetNodes(self._nodes)",
"def root_replace(self,node):\r\n self.feature_index = node.feature_index\r\n self.threshold = node.threshold\r\n self.label = node.label\r\n self.left = node.left\r\n self.right = node.right\r\n self.substitute = node.substitute\r\n if node.left is not None and node.right is not None:\r\n node.left.parents.remove(node) if node in node.left.parents else node.left.parents\r\n node.left.parents.append(self) if self not in node.left.parents else node.left.parents\r\n node.right.parents.remove(node) if node in node.right.parents else node.right.parents\r\n node.right.parents.append(self) if self not in node.right.parents else node.right.parents",
"def reset(self):\n self.G = nx.Graph()\n self.form.plot_canvas.plot(self.G)",
"def complete_graph(self):\n root_nodes = set()\n\n for name, a_block in self.wf['action'].items():\n\n a_block['name'] = name\n\n for n in a_block.get('needs', []):\n if not self.wf['action'][n].get('next', None):\n self.wf['action'][n]['next'] = set()\n self.wf['action'][n]['next'].add(name)\n\n if not a_block.get('needs', None):\n root_nodes.add(name)\n\n self.wf['root'] = root_nodes",
"def reset_edges(self):\n\n # Ensure original edges are stored in cache, otherwise nothing to do.\n if self._modified_edges is None or self._weighted_modified_edges is None:\n return\n\n # Restore the former value from cache\n self.adj_matrices = {**self.adj_matrices, **self._modified_edges}\n self.degree_weighted_matrices = {**self.degree_weighted_matrices, **self._weighted_modified_edges}\n self.in_degree = {**self.in_degree, **self._orig_in_degree}\n self.out_degree = {**self.out_degree, **self._orig_out_degree}\n\n # Reset the edge and degree cache\n self._modified_edges = None\n self._weighted_modified_edges = None\n self._orig_in_degree = dict()\n self._orig_out_degree = dict()",
"def update_node2edge(self):\n self.node2edge = {e.child : e for e in self.edge}\n childrenset = set(self.node2edge.keys())\n rootset = set(e.parent for e in self.edge).difference(childrenset)\n if len(rootset) > 1:\n raise Warning(\"there should be a single root: \" + str(rootset))\n if len(rootset) == 0:\n raise Exception(\"there should be at least one root!\")\n self.root = rootset.pop()",
"def reset_edges(self):\n super().reset_edges()\n\n # If we're in default state, notheing to rest\n if self._modified_weighted_adj_matrices is None:\n return\n\n # Degrees are reset, so we need to reset the original weight scaling\n if self.scale_weights and not self.scaling_skipped:\n self._scale_weights_to_degree()\n self._generate_weighted_adj_matrices()\n else:\n # No weight scaling so just load prev values from cache\n self.weighted_adj_matrices = {**self.weighted_adj_matrices, **self._modified_weighted_adj_matrices}\n self._modified_weighted_adj_matrices = None",
"def start_new_graph(self):\n self.nodes = {}\n self.reset_graph()",
"def reset_tree(self):\n self.root = None\n self.action = None\n self.dist_probability = None",
"def disown(self):\r\n for apply_node in self.apply_nodes:\r\n del apply_node.fgraph\r\n del apply_node.deps\r\n for variable in self.variables:\r\n del variable.fgraph\r\n del variable.clients\r\n self.apply_nodes = set()\r\n self.variables = set()\r\n self.inputs = None\r\n self.outputs = None",
"def to_undirected_graph(self):\n visited = set() \n G = Graph.Graph()\n \n for node in self.node_set:\n \n if node not in visited:\n visited.add(node)\n for i in self.suffix[node]:\n G.add_edge(node, i)\n \n return G",
"def reset(self):\n\t\tself.graph = OrderedDict()\n\t\tself.bottoms = OrderedDict()\n\t\tself.output_shape = OrderedDict()\n\t\tself.cur_tensor = None\n\t\tself.cur_id = None\n\t\tself.tmp_list = []\n\t\tself.log_init()",
"def clear(self):\n self._nodes = { }\n self._arcs = set()",
"def __root(T: \"Graph\"):\n T_copy = T.copy()\n\n # Leaves are removed from the copy untill 1 or 2 vertices remain\n while len(T_copy.vertices) > 2:\n vertices_to_remove = []\n for v in T_copy.vertices:\n if v.degree == 1:\n vertices_to_remove.append(v)\n for v in vertices_to_remove:\n T_copy.del_vertex(v)\n\n root_labels = []\n for v in T_copy.vertices:\n root_labels.append(v.label)\n\n # From the original tree, the roots are returned\n T_root = []\n for v in T.vertices:\n if v.label in root_labels:\n T_root.append(v)\n\n return T_root",
"def reset(self):\n self.edges = None\n self.chi = None\n self.k = None\n self.n_bins = None\n self.classes = None\n self.n_params = None",
"def clear_nastran(self):\n self.eid_map = {}\n self.nid_map = {}\n self.eid_to_nid_map = {}\n self.element_ids = None\n self.node_ids = None",
"def clear(self):\n self.root = _NGramMapNode()\n self.size_freqs = dict()\n self.ele_freqs = dict()"
] | [
"0.66198105",
"0.6407392",
"0.61415255",
"0.61093223",
"0.6048665",
"0.60347766",
"0.60298306",
"0.5988843",
"0.5949874",
"0.5943684",
"0.5910985",
"0.5874265",
"0.58275336",
"0.57994217",
"0.5747671",
"0.57045126",
"0.57044613",
"0.565234",
"0.56324726",
"0.5624728",
"0.56222004",
"0.56187963",
"0.5585501",
"0.55618304",
"0.5561007",
"0.55475235",
"0.5544825",
"0.55439115",
"0.55436563",
"0.54859316"
] | 0.7510522 | 0 |
Initializes the VNIStatsTableEntrySchema object attributes. | def __init__(self, py_dict=None):
super(VNIStatsTableEntrySchema, self).__init__()
self.update_arp = None
self.query_arp = None
if py_dict is not None:
self.get_object_from_py_dict(py_dict) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def InitStats(ss):\n\n ss.SumSSE = 0\n ss.SumAvgSSE = 0\n ss.SumCosDiff = 0\n ss.SumErr = 0\n ss.FirstZero = -1\n ss.NZero = 0\n\n ss.TrlErr = 0\n ss.TrlSSE = 0\n ss.TrlAvgSSE = 0\n ss.EpcSSE = 0\n ss.EpcAvgSSE = 0\n ss.EpcPctErr = 0\n ss.EpcCosDiff = 0",
"def _init_table(self, table: \"Table\"):\n if not self.columns:\n self.columns = table.columns\n self._data = table.data",
"def __init__(self):\n _snap.TTableRow_swiginit(self, _snap.new_TTableRow())",
"def __init__(self, schema=None):\n self._dict = {}\n self.schema = schema",
"def __init__(self, *args):\n _snap.TTable_swiginit(self, _snap.new_TTable(*args))",
"def _setup_stats(self) -> None:\n\n # Save statistics\n self.mass = np.array([0])\n self.mass_balance = np.array([0])\n self.mass_balance_trend = np.array([0])",
"def __init__(self, *args):\n _table.Table_swiginit(self, _table.new_Table(*args))",
"def __init__(self, metrics, schema, table, nid):\n\n self.id = nid\n self.metrics = metrics\n self.schema = schema\n self.table = table\n self.batch_size = 20\n self.__init_metrics()",
"def __init__(self):\n self.table = {}",
"def _initialize_table(self):\n self.table = gtk.Table()\n self.table.set_col_spacings(8)\n self.table.set_row_spacings(3)\n self.window.add(self.table)\n self._view_schedule()\n self.table.show()",
"def setup(self):\n self.table = prettytable.PrettyTable()\n self.table.field_names = self.titles\n if self.convert_columns:\n self.rows = self.convert_columns_to_rows(self.rows)\n if self.colour:\n self.colour = self.convert_columns_to_rows(self.colour)",
"def __init__(self, schema=None):\n self.schema = schema or {}",
"def __init__(self):\n self.buckets = 1009\n self.table = [{} for _ in range(self.buckets)]",
"def __init__(self, *args):\n _snap.Schema_swiginit(self, _snap.new_Schema(*args))",
"def __init__(self):\n super(ObjectSchema, self).__init__()\n self.is_allow_undefined = False",
"def __init__(self):\n self.title = None\n self.table = pd.DataFrame()\n self.column_widths = None;",
"def _init(self):\n self._nfields = 0\n self._converted = {}\n self._heapoffset = 0\n self._heapsize = 0\n self._col_weakrefs = weakref.WeakSet()\n self._coldefs = None\n self._gap = 0\n self._uint = False",
"def setup_table(self):\n\n self.setup.create_basic_table_in_dev()\n self.setup.insert_random_records_into_dev()",
"def __init__(self, *args, **kwargs):\n \n self.dense = True\n\n # Create table\n super().__init__(*args, **kwargs)",
"def __init__(self, PTableV):\n _snap.TTableIterator_swiginit(self, _snap.new_TTableIterator(PTableV))",
"def init_blank(self, T):\n self.headings = []\n self.table = {}\n self.rowcount = 0\n for e in T.entries:\n self.headings.append(e.name)\n self.table[e.name] = []",
"def process_table_init(self):\n logging.debug(\"Processing table initialization, %d entries\",\n len(self.table_initialization))\n\n for init_entry in self.table_initialization:\n for table_name, entry_desc in init_entry.items():\n self.air_table[table_name].add_entry(\n table_entry.description_to_entry(entry_desc))",
"def _initialise_sufficient_statistics(self):\n stats = super()._initialise_sufficient_statistics()\n\n stats['B'] = {\n 'numer': [\n np.zeros((self.n_states, self.n_features[i]))\n for i in range(self.n_emissions)\n ],\n 'denom': [\n np.zeros((self.n_states, self.n_features[i]))\n for i in range(self.n_emissions)\n ],\n }\n\n return stats",
"def __init__(self, table_name, cursor=None, schema=None):\n self.name = table_name\n self.tablespace_name = None\n self.table_type = None\n self.columns = {}\n self.indexes = {}\n self.constraints = {}\n self.triggers = {}\n if schema:\n self.schema = schema\n else:\n self.schema = None\n if cursor:\n self._get_table(cursor)",
"def __init__(self):\n self.table = {}\n self.ls = []",
"def setup_table(self):\n self.interface.start_transaction()\n self.interface.drop_table(_history_table)\n self.interface.drop_table(_history_stats_table)\n self.interface.create_table(_history_table)\n self.interface.create_index('index1', _history_table, [_history_table['timestamp']])\n self.interface.create_table(_history_stats_table)\n self.interface.create_index('index2', _history_stats_table, [_history_stats_table['benchmark']])\n self.interface.commit_transaction()",
"def __init__(self, schema ):\n self.schema = schema",
"def _get_table_schema(self):\n\n return {\n 'AttributeDefinitions': [\n {\n 'AttributeName': self._key_field.name,\n 'AttributeType': self._key_field.data_type\n }\n ],\n 'TableName': self.table_name,\n 'KeySchema': [\n {\n 'AttributeName': self._key_field.name,\n 'KeyType': 'HASH'\n }\n ],\n 'ProvisionedThroughput': {\n 'ReadCapacityUnits': self.read_capacity_units,\n 'WriteCapacityUnits': self.write_capacity_units\n }\n }",
"def _initialize(self):\n query_table = self._cursor.execute(f\"\"\"\n SELECT name\n FROM sqlite_master\n WHERE type='table' AND name='{self._table_name}';\"\"\")\n\n if not query_table.fetchone():\n self._cursor.execute(f\"\"\"\n CREATE TABLE {self._table_name} (\n id char(36),\n term TEXT,\n timestamp BIGINT\n );\"\"\")\n\n self._cursor.execute(f\"\"\"\n CREATE INDEX index_timestamp\n ON {self._table_name} (timestamp);\"\"\")\n\n self._conn.commit()",
"def init_widget(self):\n super(QtViewTable, self).init_widget()\n d = self.declaration\n self.set_table_model(d.table_model)\n self.set_orientation(d.orientation)"
] | [
"0.6132577",
"0.61266005",
"0.582939",
"0.56516737",
"0.56412935",
"0.5606859",
"0.5601135",
"0.55859405",
"0.5571048",
"0.555038",
"0.5544171",
"0.54862404",
"0.54842335",
"0.54761785",
"0.5465003",
"0.5443442",
"0.5419218",
"0.5413731",
"0.54091996",
"0.539648",
"0.5395933",
"0.538987",
"0.5384708",
"0.5361659",
"0.53546315",
"0.5330871",
"0.53015184",
"0.52919835",
"0.52830005",
"0.52745175"
] | 0.70146513 | 0 |
Search through a table and return the first [row, column] pair who's value is None. | def find_unassigned_table_cell(table):
for row in range(len(table)):
for column in range(len(table[row])):
if table[row][column] is None:
return row, column
return row, column | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_first_element(cls, d):\n\n t = np.where(d[:, 2] > 0)[0]\n if len(t):\n return d[t[0], 0], d[t[0], 1], t[0]\n return None, None, None",
"def firstEmptyCell(board):\r\n for i in range(9):\r\n for j in range(9):\r\n if board[i][j] == 0:\r\n return (i, j) # row, col\r\n return None",
"def get_first_selection(table, column_name):\n def replace(entry):\n if pd.isnull(entry):\n return None\n else:\n return re.sub(r',.*', '', entry)\n assert (isinstance(table, Table)), \"Input not a supported type.\"\n column = table.apply(replace, column_name)\n return table.append_column(column_name, column)",
"def _find_empty_cell(self):\n\n for r, row in enumerate(self._board):\n for c, cell in enumerate(row):\n if cell is None:\n return r, c",
"def get_next_empty_cell(self):\n for row in range(len(self.grid)):\n for col in range(len(self.grid[0])):\n if self.grid[row][col] == 0:\n return (row, col)\n return None",
"def find_next_empty_cell(grid):\n for i, row in enumerate(grid):\n for j, col in enumerate(row):\n if col == 0:\n return (i, j)\n return None",
"def find_empty(bo):\n for i in range(len(bo)):\n for j in range(len(bo[0])):\n if bo[i][j] == 0:\n return (i, j)\n\n return None",
"def first_valid_index(self) -> Optional[Union[Scalar, Tuple[Scalar, ...]]]:\n data_spark_columns = self._internal.data_spark_columns\n\n if len(data_spark_columns) == 0:\n return None\n\n cond = reduce(lambda x, y: x & y, map(lambda x: x.isNotNull(), data_spark_columns))\n\n with sql_conf({SPARK_CONF_ARROW_ENABLED: False}):\n # Disable Arrow to keep row ordering.\n first_valid_row = (\n self._internal.spark_frame.filter(cond)\n .select(self._internal.index_spark_columns)\n .limit(1)\n .toPandas()\n )\n\n # For Empty Series or DataFrame, returns None.\n if len(first_valid_row) == 0:\n return None\n\n first_valid_row = first_valid_row.iloc[0]\n if len(first_valid_row) == 1:\n return first_valid_row.iloc[0]\n else:\n return tuple(first_valid_row)",
"def find_empty(grid):\n for i in range(LEN_GRID):\n for j in range(LEN_GRID):\n if grid[i][j] == 0:\n return (i, j) # row, col\n return None",
"def not_null(table_rows, col_name_list=[], col_num_list=[]):\n keys = col_name_list\n rst = True\n lst = []\n if(not keys): #key == [] or key == None\n keys = [table_rows[0].keys[x] for x in col_num_list]\n\n row_num = 0\n for row in table_rows:\n for key in keys:\n if(row.kv[key].strip() == \"\"):\n rst = False\n lst.append(\"(col:{0},row:{1})\".format(\n key, row_num\n ))\n row_num += 1\n return rst,\",\".join(lst)",
"def fetch_first(self, tablename):\n\n query = 'select * from ' + tablename + \" ASC LIMIT 1\"\n try:\n self.__cur.execute(query)\n except Exception as e:\n self.__conn.rollback()\n raise e\n fetcheddata = self.__cur.fetchall()\n if fetcheddata:\n fetcheddata = fetcheddata[0]\n fetcheddata = self.__helper._functions__rowtodict([fetcheddata])\n return fetcheddata[0]\n return None",
"def find_empty(board):\n for ii in range(len(board)):\n for jj in range(len(board[ii])):\n if board[ii][jj] == 0:\n print('Empty: ', (jj , ii)) # column, row\n return jj, ii # column, row\n return None",
"def get_first(self):\n return self.A[1][0] if self.n > 0 else None",
"def find_first_free_cell(board, picked_column):\n for row in reversed(range(len(board))):\n if board[row][picked_column] == 0:\n return row",
"def find_empty_col(slots):\n index = 0\n for i in list(zip(*list(slots.values())[::])):\n if sum([1 for j in list(i) if j]) == 0:\n return index\n index += 1\n return 6",
"def get_val_or_null(map, row, col):\n value = map.get(row, col)\n if map.mtype == \"CELL\" and value == CNULL:\n value = FNULL\n return value",
"def _first_non_none_value(items: Iterable[tuple[bool | None, str] | None]) -> tuple[bool, str]:\n for item_opt in items:\n if item_opt is not None:\n item, reason = item_opt\n if item is not None:\n return item, reason\n return False, \"default\"",
"def _get_none(self, x, y):\n try:\n return self[x, y]\n except ArrayError:\n return None",
"def row_by_value(idl_, table, column, match, default=_NO_DEFAULT):\n tab = idl_.tables[table]\n for r in tab.rows.values():\n if getattr(r, column) == match:\n return r\n if default is not _NO_DEFAULT:\n return default\n raise None",
"def searchColumnHeadings(self, table: Table):\n lista = []\n if table:\n for col in table.columns:\n lista.append(col.name)\n return lista\n return None",
"def find_index(row):\n value = row[index]\n if value in seen:\n return seen[value]\n for row_ in merged.iter_dicts(True):\n if row_[index] == value:\n seen[value] = row_[\"index\"]\n return row_[\"index\"]\n return None",
"def find_first_non_nan(array):\n for index, value in enumerate(array):\n if not np.isnan(value):\n return index",
"def is_match(cells):\n if len(cells) == 1 and \"-\" not in cells:\n return list(cells)[0]\n return None",
"def find(self, value):\n for row in range(self.getHeight()):\n for column in range(self.getWidth()):\n if self[row][column] == value:\n return (row, column)\n return None",
"def find_empty(self):\n num_rows = len(self.board)\n num_cols = len(self.board[0])\n\n for i in range(num_rows):\n for j in range(num_cols):\n if self.board[i][j] == 0:\n return (i, j)",
"def first_value(self):\n if not self.is_empty():\n return self.data[self.head]\n return None",
"def get_column(col_to_search, value_to_match, col_to_get, table, db_file):\n \n try:\n conn, c = connect_to_db(db_file) \n c.execute('SELECT {cg} FROM {t} WHERE {col}=\"{value}\"'.format(t=safe(table), \n cg=safe(col_to_get), col=safe(col_to_search), value=safe(value_to_match)))\n column = c.fetchone()\n conn.close()\n return column\n except Exception as e:\n print(\"Error when trying to fetch row in table\", table, \"in database file\", db_file)\n print(e)\n return None",
"def _FollowedByEmpty(row, index):\n return not any(row[index + 1:])",
"def _get_next_element(cls, d, idx):\n t = np.where(d[:, 2] > 0)[0]\n t = t[t > idx]\n if len(t):\n return d[t[0], 0], d[t[0], 1], t[0]\n return None, None, None",
"def find_empty(game_board):\n for row in range(len(game_board)):\n for col in range(len(game_board[row])):\n if len(game_board[row][col]) == 2:\n return row, col\n for row in range(len(game_board)):\n for col in range(len(game_board[row])):\n if len(game_board[row][col]) >= 3:\n return row, col\n\n return None"
] | [
"0.64960337",
"0.634782",
"0.633694",
"0.6331864",
"0.6245917",
"0.6233299",
"0.60606706",
"0.60600805",
"0.5962007",
"0.5913134",
"0.5856047",
"0.5801559",
"0.57643837",
"0.57631135",
"0.5742628",
"0.5713552",
"0.56185967",
"0.5603105",
"0.559049",
"0.55511755",
"0.54848987",
"0.5481309",
"0.54790294",
"0.5473898",
"0.54591984",
"0.54591745",
"0.54491323",
"0.5446869",
"0.5445198",
"0.54064846"
] | 0.7773614 | 0 |
Create mock input block. | def fixture_input_block():
return Mock() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def mockRawInput(mock):\n original_raw_input = __builtin__.raw_input\n __builtin__.raw_input = lambda _: mock\n yield\n __builtin__.raw_input = original_raw_input",
"def get_input_mock(inputs=None): # Use this mock if a contest requires interactive input.\n stdin_mock = MagicMock()\n stdin_mock.side_effect = inputs # You can emulate standard input lines by using this list.\n return stdin_mock",
"def fixture_output_block():\n return Mock()",
"def make_an_xblock(cls, **kw):\n course_id = SlashSeparatedCourseKey('foo', 'bar', 'baz')\n runtime = Mock(course_id=course_id)\n scope_ids = Mock()\n field_data = DictFieldData(kw)\n xblock = AdaptiveNumericInput(runtime, field_data, scope_ids)\n xblock.xmodule_runtime = runtime\n return xblock",
"def fake_input(inputs):\n it = iter(inputs)\n def mock_input(prompt=''):\n try:\n return next(it)\n except StopIteration as e:\n raise EOFError('No more inputs given') from e\n\n return patch('builtins.input', mock_input)",
"def test_raw_input_ex(input_output):\n with mock.patch.object(builtins, 'input', lambda _: input_output):\n assert GC.raw_input_ex() == input_output",
"def parse_input_mocked_metadata(monkeypatch):\n\n def _parse_input(text, cwd=None):\n \"\"\"The parser fixture accepts a blackbird string to parse\"\"\"\n text = \"name mockname\\nversion 1.0\\n\" + text\n lexer = blackbirdLexer(antlr4.InputStream(text))\n stream = antlr4.CommonTokenStream(lexer)\n parser = blackbirdParser(stream)\n\n tree = parser.start()\n\n bb = BlackbirdListener(cwd=cwd)\n walker = antlr4.ParseTreeWalker()\n walker.walk(bb, tree)\n\n return bb.program\n\n return _parse_input",
"def mock_inputs(inputs):\n def inner(test_func):\n @wraps(test_func)\n def wrapped(*args):\n replier = Replier(inputs)\n old_input = createsuperuser.input\n createsuperuser.input = replier\n createtenant.raw_input = replier\n\n old_getpass = createsuperuser.getpass\n createsuperuser.getpass = GetPass(replier)\n\n try:\n test_func(*args)\n finally:\n createsuperuser.input = old_input\n createtenant.raw_input = raw_input\n createsuperuser.getpass = old_getpass\n return wrapped\n return inner",
"def make_block(self, in_size, out_size, **kwargs):\n raise NotImplementedError(\"Abstract\")",
"async def test_is_block_momentary_input(mock_block_device, monkeypatch) -> None:\n monkeypatch.setattr(mock_block_device.blocks[DEVICE_BLOCK_ID], \"type\", \"relay\")\n\n monkeypatch.setitem(mock_block_device.settings, \"mode\", \"roller\")\n monkeypatch.setitem(\n mock_block_device.settings, \"rollers\", [{\"button_type\": \"detached\"}]\n )\n assert (\n is_block_momentary_input(\n mock_block_device.settings,\n mock_block_device.blocks[DEVICE_BLOCK_ID],\n )\n is False\n )\n assert (\n is_block_momentary_input(\n mock_block_device.settings, mock_block_device.blocks[DEVICE_BLOCK_ID], True\n )\n is True\n )\n\n monkeypatch.setitem(mock_block_device.settings, \"mode\", \"relay\")\n monkeypatch.setitem(mock_block_device.settings[\"device\"], \"type\", \"SHSW-L\")\n assert (\n is_block_momentary_input(\n mock_block_device.settings, mock_block_device.blocks[DEVICE_BLOCK_ID], True\n )\n is False\n )\n\n monkeypatch.delitem(mock_block_device.settings, \"relays\")\n monkeypatch.delitem(mock_block_device.settings, \"rollers\")\n assert (\n is_block_momentary_input(\n mock_block_device.settings,\n mock_block_device.blocks[DEVICE_BLOCK_ID],\n )\n is False\n )\n\n monkeypatch.setitem(mock_block_device.settings[\"device\"], \"type\", \"SHBTN-2\")\n\n assert (\n is_block_momentary_input(\n mock_block_device.settings,\n mock_block_device.blocks[DEVICE_BLOCK_ID],\n )\n is True\n )",
"def part1_test_input():\n return \"\"\"\"\"\"",
"def test(self, parent, block):\r\n pass",
"def test_prompt_setInput_default_valid(self):\n self.prompt.setInput()\n\n self.assertEquals(\n self.prompt._instr,\n '/dev/tty'\n )\n\n with mock.patch('__builtin__.raw_input', return_value='mocked input') as mockinput:\n result = self.prompt._prompt({}, {\n 'say': 'test',\n 'ask': 'varname'\n })\n\n self.assertEquals(result['ansible_facts']['varname'], 'mocked input')",
"def __init__(self, meta_model, input_spec):\r\n\r\n # Check if the input specifications are correct\r\n RM.check_if_type(input_spec, list, 'The input specifications')\r\n for i in range(len(input_spec)):\r\n RM.check_if_type(input_spec[i], str, 'Input specification %x' % i)\r\n RM.check_if_poss_input_spec(input_spec[i], i)\r\n\r\n InputDecorator.__init__(self, meta_model)\r\n\r\n self.input_spec = input_spec",
"def create_input_element(self, **kwargs):\r\n return None",
"def mock_workflow():\n\n workflow = DockerBuildWorkflow(\"mock:default_built\", source=SOURCE)\n workflow.source = StubSource()\n builder = StubInsideBuilder().for_workflow(workflow)\n builder.set_df_path('/mock-path')\n base_image_name = ImageName.parse(\"mock:tag\")\n builder.parent_images[ImageName.parse(\"mock:base\")] = base_image_name\n builder.base_image = base_image_name\n builder.tasker = flexmock()\n workflow.builder = flexmock(builder)\n\n return workflow",
"def open_input(name=None):\n return Input(name)",
"def dummy_input(request, tmpdir_factory):\n # Init variables\n\n input_path = tmpdir_factory.mktemp(\"input_data\").join(\"datasink_test_s3.txt\")\n\n # Create input file\n input_path.write_binary(b\"ABCD1234\")\n\n # Return path\n return str(input_path)",
"def part2_test_input():\n return \"\"\"\"\"\"",
"def sample_input(self, loader, is_test=False):\n pass",
"def test_exit_on_input_signal_error(input_block, kwargs):\n input_block.side_effect = Exception()\n with pytest.raises(SystemExit) as exc_info:\n create_flow(**kwargs)\n assert exc_info.value.code == 1",
"def test_prompt_setInput_stringio_valid(self):\n instr = StringIO.StringIO()\n self.prompt.setInput(instr)\n\n self.assertEquals(instr, self.prompt._instr)\n self.assertEquals(instr.getvalue(), \"\")\n\n with mock.patch('__builtin__.raw_input', return_value='mocked input') as mockinput:\n result = self.prompt._prompt({}, {\n 'say': 'test',\n 'ask': 'varname'\n })\n\n self.assertEquals(result['ansible_facts']['varname'], 'mocked input')",
"def test0_init(self):\n print(\"\\nTest 0: Initialization\")\n builder = StaticBuilder()\n in1_name = builder.addInput(10)\n in1 = builder.input_nodes[in1_name]\n \n print('Node keys in builder:', list(builder.input_nodes.keys()))\n self.assertEqual(in1.label, 0, \"The label has not been assigned correctly\")\n self.assertEqual(builder.num_nodes, 1, \"The number of nodes has not been \"\n \"assigned correctly\")\n self.assertEqual(in1.num_declared_outputs, 0, \"The number of outputs of \"\n \"the InputNode has not been assigned correctly\")\n self.assertEqual(in1.num_declared_inputs, 0, \"The number of outputs of \"\n \"the InputNode has not been assigned correctly\")",
"def MockArbitraryBuffer( filetype ):\n\n # Arbitrary, but valid, single buffer open.\n current_buffer = VimBuffer( os.path.realpath( 'TEST_BUFFER' ),\n filetype = filetype )\n\n with MockVimBuffers( [ current_buffer ], [ current_buffer ] ):\n yield",
"def test_default_creation_2():\n actual = os.path.join('.', 'test_files', 'rc_test_default.input')\n times = list(range(0, 30, 5))\n params = {\"names\": ['V'],\n \"values\": [\n [1],\n [0],\n [-1],\n [0],\n [1]\n ]\n }\n input_creator = InputCreator(None, times, params)\n f_out = input_creator.default_creation_2()\n with open(actual) as f_actual:\n actual_content = f_actual.read()\n\n content = f_out.getvalue()\n\n assert_equal(content, actual_content)",
"def createMemoryBlock(self, name: unicode, start: ghidra.program.model.address.Address, input: java.io.InputStream, length: long, overlay: bool) -> ghidra.program.model.mem.MemoryBlock:\n ...",
"def test_dummy():\n dummyblock = DummyBlockNode(\n name=\"None\",\n parameters=(),\n ancestor=None,\n dirty=False,\n filepath=\"/some/random/path\"\n )\n dummydirective = DummyDirectiveNode(\n name=\"Name\",\n ancestor=None,\n filepath=\"/another/path\"\n )\n dummycomment = DummyCommentNode(\n comment=\"Comment\",\n ancestor=dummyblock,\n filepath=\"/some/file\"\n )",
"def test_execute_job_with_inline_input_values(self):\n cwl = {\n \"cwlVersion\": \"v1.0\",\n \"class\": \"CommandLineTool\",\n \"baseCommand\": [\"python3\", \"script.py\"],\n \"inputs\": {\n \"stringInput\": \"string\",\n \"integerInput\": \"int\",\n \"doubleInput\": \"float\",\n \"stringArrayInput\": {\"type\": {\"type\": \"array\", \"items\": \"string\"}},\n \"integerArrayInput\": {\"type\": {\"type\": \"array\", \"items\": \"int\"}},\n \"floatArrayInput\": {\"type\": {\"type\": \"array\", \"items\": \"float\"}},\n \"measureStringInput\": \"string\",\n \"measureIntegerInput\": \"int\",\n \"measureFloatInput\": \"float\",\n \"measureFileInput\": \"File\"\n },\n \"requirements\": {\n CWL_REQUIREMENT_APP_DOCKER: {\n \"dockerPull\": \"python:3.7-alpine\"\n },\n CWL_REQUIREMENT_INIT_WORKDIR: {\n \"listing\": [\n {\n \"entryname\": \"script.py\",\n \"entry\": cleandoc(\"\"\"\n import json\n import os\n import ast\n input = $(inputs)\n try:\n for key, value in input.items():\n if isinstance(value, dict):\n path_ = value.get(\"path\")\n if path_ and os.path.exists(path_):\n with open (path_, \"r\") as file_:\n file_data = file_.read()\n input[key] = ast.literal_eval(file_data.upper())\n json.dump(input, open(\"./tmp.txt\", \"w\"))\n except Exception as exc:\n print(exc)\n raise\n \"\"\")\n }\n ]\n }\n },\n \"outputs\": [{\"id\": \"output_test\", \"type\": \"File\", \"outputBinding\": {\"glob\": \"tmp.txt\"}}],\n }\n body = {\n \"processDescription\": {\n \"process\": {\n \"id\": self._testMethodName,\n \"title\": \"some title\",\n \"abstract\": \"this is a test\",\n },\n },\n \"deploymentProfileName\": \"http://www.opengis.net/profiles/eoc/wpsApplication\",\n \"executionUnit\": [{\"unit\": cwl}],\n }\n try:\n desc, _ = self.deploy_process(body, describe_schema=\"OLD\")\n except colander.Invalid:\n self.fail(\"Test\")\n\n assert desc[\"process\"] is not None\n\n with contextlib.ExitStack() as stack_exec:\n for mock_exec in mocked_execute_process():\n stack_exec.enter_context(mock_exec)\n tmp_file = stack_exec.enter_context(tempfile.NamedTemporaryFile(mode=\"w\", suffix=\".json\")) # noqa\n tmp_file.write(json.dumps({\"value\": {\"ref\": 1, \"measurement\": 10.3, \"uom\": \"m\"}}))\n tmp_file.seek(0)\n\n exec_body = {\n \"mode\": EXECUTE_MODE_ASYNC,\n \"response\": EXECUTE_RESPONSE_DOCUMENT,\n \"inputs\": {\n \"stringInput\": \"string_test\",\n \"integerInput\": 10,\n \"doubleInput\": 3.14159,\n \"stringArrayInput\": [\"1\", \"2\", \"3\", \"4\", \"5\", \"6\"],\n \"integerArrayInput\": [1, 2, 3, 4, 5, 6],\n \"floatArrayInput\": [1.45, 2.65, 3.5322, 4.86, 5.57, 6.02],\n \"measureStringInput\": {\n \"value\": \"this is a test\"\n },\n \"measureIntegerInput\": {\n \"value\": 45\n },\n \"measureFloatInput\": {\n \"value\": 10.2\n },\n \"measureFileInput\": {\n \"href\": \"file://{}\".format(tmp_file.name)\n }\n },\n \"outputs\": [\n {\"id\": \"output_test\", \"type\": \"File\"},\n ]\n }\n\n proc_url = \"/processes/{}/jobs\".format(self._testMethodName)\n resp = mocked_sub_requests(self.app, \"post_json\", proc_url, timeout=5,\n data=exec_body, headers=self.json_headers, only_local=True)\n assert resp.status_code in [200, 201], \"Failed with: [{}]\\nReason:\\n{}\".format(resp.status_code, resp.json)\n status_url = resp.json.get(\"location\")\n\n results = self.monitor_job(status_url)\n\n job_output_path = results.get(\"output_test\")[\"href\"].split(self.settings[\"weaver.wps_output_path\"])[-1]\n tmp_file = \"{}/{}\".format(self.settings[\"weaver.wps_output_dir\"], job_output_path)\n\n try:\n with open(tmp_file, \"r\") as f:\n processed_values = json.load(f)\n except FileNotFoundError:\n self.fail(\"Output file [{}] was not found where it was expected to resume test\".format(tmp_file))\n except Exception as exception:\n self.fail(\"An error occurred during the reading of the file: {}\".format(exception))\n assert processed_values[\"stringInput\"] == \"string_test\"\n assert processed_values[\"integerInput\"] == 10\n assert processed_values[\"doubleInput\"] == 3.14159\n assert processed_values[\"stringArrayInput\"] == [\"1\", \"2\", \"3\", \"4\", \"5\", \"6\"]\n assert processed_values[\"integerArrayInput\"] == [1, 2, 3, 4, 5, 6]\n assert processed_values[\"floatArrayInput\"] == [1.45, 2.65, 3.5322, 4.86, 5.57, 6.02]\n assert processed_values[\"measureStringInput\"] == \"this is a test\"\n assert processed_values[\"measureIntegerInput\"] == 45\n assert processed_values[\"measureFloatInput\"] == 10.2\n assert processed_values[\"measureFileInput\"] == {\"VALUE\": {\"REF\": 1, \"MEASUREMENT\": 10.3, \"UOM\": \"M\"}}",
"async def test_get_block_input_triggers(mock_block_device, monkeypatch) -> None:\n monkeypatch.setattr(\n mock_block_device.blocks[DEVICE_BLOCK_ID],\n \"sensor_ids\",\n {\"inputEvent\": \"S\", \"inputEventCnt\": 0},\n )\n monkeypatch.setitem(\n mock_block_device.settings, \"rollers\", [{\"button_type\": \"detached\"}]\n )\n assert set(\n get_block_input_triggers(\n mock_block_device, mock_block_device.blocks[DEVICE_BLOCK_ID]\n )\n ) == {(\"long\", \"button\"), (\"single\", \"button\")}\n\n monkeypatch.setitem(mock_block_device.settings[\"device\"], \"type\", \"SHBTN-1\")\n assert set(\n get_block_input_triggers(\n mock_block_device, mock_block_device.blocks[DEVICE_BLOCK_ID]\n )\n ) == {\n (\"long\", \"button\"),\n (\"double\", \"button\"),\n (\"single\", \"button\"),\n (\"triple\", \"button\"),\n }\n\n monkeypatch.setitem(mock_block_device.settings[\"device\"], \"type\", \"SHIX3-1\")\n assert set(\n get_block_input_triggers(\n mock_block_device, mock_block_device.blocks[DEVICE_BLOCK_ID]\n )\n ) == {\n (\"long_single\", \"button\"),\n (\"single_long\", \"button\"),\n (\"triple\", \"button\"),\n (\"long\", \"button\"),\n (\"single\", \"button\"),\n (\"double\", \"button\"),\n }",
"def dummy_code_block() -> CodeBlock:\n return make_dummy_code_block()"
] | [
"0.6843587",
"0.66973007",
"0.6653255",
"0.63940287",
"0.6144296",
"0.6105641",
"0.5916343",
"0.58348626",
"0.57676345",
"0.56919837",
"0.55923426",
"0.55832005",
"0.55725527",
"0.5560723",
"0.5554768",
"0.55407476",
"0.5519871",
"0.5497402",
"0.54970086",
"0.5474238",
"0.5459816",
"0.54584813",
"0.5456826",
"0.5443481",
"0.5430159",
"0.54167366",
"0.54028976",
"0.53781635",
"0.53760946",
"0.53734905"
] | 0.8625998 | 0 |
Create mock output block. | def fixture_output_block():
return Mock() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fixture_input_block():\n return Mock()",
"def test_03_out(self, mock_stdout):\n msg = udocker.Msg(udocker.Msg.MSG)\n msg.out(\"111\", \"222\", \"333\", 444, ('555'))\n self.assertEqual(\"111 222 333 444 555\\n\", mock_stdout.getvalue())\n sys.stdout = STDOUT\n sys.stderr = STDERR",
"def test_block_default_output(self):\n\n @output('out1', default=True)\n @output('out2')\n class MyBlock(Block):\n pass\n\n blk = MyBlock()\n self.configure_block(blk, {})\n sig1, sig2, sig3 = (\n Signal({'sig': 1}),\n Signal({'sig': 2}),\n Signal({'sig': 3}),\n )\n blk.notify_signals([sig1], 'out1')\n self.assert_last_signal_notified(sig1, 'out1')\n blk.notify_signals([sig2], 'out2')\n self.assert_last_signal_notified(sig2, 'out2')\n # Notify the last signal without any output ID, make sure the actual\n # default terminal of the block is used\n blk.notify_signals([sig3])\n self.assert_last_signal_notified(sig3, 'out1')",
"def setUp(self):\n self._output = io.StringIO()",
"def _generate_output(self):\n raise NotImplementedError()",
"def __init__(self, output_mediator_object):\n super(TestOutputModule, self).__init__(output_mediator_object)\n self.events = []\n self.macb_groups = []",
"def _populate_output(self):\n pass",
"def setUp(self):\n\t\tself.output = self.switchstdout()",
"def mock_output(self, command, _rc, output, _err):\n\n self.mocks[command] = (_rc, output, _err)",
"def simulate_block():\n return '''\n```sh\n# In build directory\n./simulate\n```'''",
"def test_export_custom(self): # pylint: disable=no-self-use\n mock_record_str = Mock(str)\n\n def formatter(record): # pylint: disable=unused-argument\n return mock_record_str\n\n mock_stdout = Mock()\n exporter = ConsoleLogExporter(out=mock_stdout, formatter=formatter)\n log_data = LogData(\n log_record=LogRecord(),\n instrumentation_scope=InstrumentationScope(\n \"first_name\", \"first_version\"\n ),\n )\n exporter.export([log_data])\n mock_stdout.write.assert_called_once_with(mock_record_str)",
"def test_create(self):\n _help = \"[Usage: create <class name>]\\n\"\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd(\"help create\")\n self.assertEqual(f.getvalue(), _help)",
"def test_dump(\n mock_hvac_client_read, mock_dump, localhost_client, gen_input_config, gen_processed_config, gen_vault_response_kv1\n):\n mock_hvac_client_read.return_value = gen_vault_response_kv1()\n\n localhost_client.dump(gen_input_config(), \"out.json\")\n\n mock_hvac_client_read.assert_called_with(gen_input_config()[\"vault_secrets\"][\"acme.user\"])\n mock_dump.assert_called_with(gen_processed_config(), \"out.json\")",
"def create_outputs(block):\n return [nbbase.NotebookNode(output)\n for output in json.loads(block['content'])]",
"def test(self, parent, block):\r\n pass",
"def test_default_output(self):\n env = pike.Environment()\n output = pike.Graph('output')\n output.sink = pike.noop()\n with patch.object(output, 'run') as run:\n run.return_value = []\n env.set_default_output(output)\n with pike.Graph('g') as graph:\n pike.glob('.', '*')\n env.add(graph)\n env.run_all()\n run.assert_called_with([])",
"def test_exit_on_output_cb_error(output_block, kwargs):\n output_block.side_effect = Exception()\n with pytest.raises(SystemExit) as exc_info:\n create_flow(**kwargs)\n assert exc_info.value.code == 1",
"def test_create_stream_handler(self, mock_handler: MagicMock):\n\n instance = mock_handler.return_value\n instance.setFormatter = MagicMock()\n instance.setLevel = MagicMock()\n\n handler = create_stream_handler(self.level, self.format)\n\n mock_handler.assert_called_with(stdout)\n instance.setFormatter.assert_called_with(self.format)\n instance.setLevel.assert_called_with(self.level)\n\n self.assertIsNotNone(handler)",
"def __init__(self):\n super(MockOutputModule, self).__init__()\n self.hostname = None\n self.port = None",
"def setUp(self):\n super(BlockBookingsReportTests, self).setUp()\n self.user1 = baker.make_recipe('booking.user')\n self.user2 = baker.make_recipe('booking.user')\n\n self.event_type = baker.make_recipe('booking.event_type_PC')\n\n self.user1_active_block = baker.make_recipe(\n 'booking.block_5', user=self.user1,\n start_date=timezone.now() - timedelta(10),\n block_type__event_type=self.event_type,\n paid=True\n )\n self.user2_active_block = baker.make_recipe(\n 'booking.block_5', user=self.user2,\n start_date=timezone.now() - timedelta(10),\n block_type__event_type=self.event_type, paid=True\n )\n\n user1_bookings_on_block = baker.make_recipe(\n 'booking.booking',\n user=self.user1,\n event__event_type=self.event_type,\n block=self.user1_active_block,\n date_booked=timezone.now() - timedelta(8),\n _quantity=2\n )\n self.user1_booking_not_on_block = baker.make_recipe(\n 'booking.booking',\n user=self.user1,\n event__event_type=self.event_type,\n date_booked=timezone.now() - timedelta(8)\n )\n user1_booking_old = baker.make_recipe(\n 'booking.booking',\n user=self.user1,\n event__event_type=self.event_type,\n date_booked=timezone.now() - timedelta(12)\n )\n user1_booking_free = baker.make_recipe(\n 'booking.booking',\n user=self.user1,\n event__event_type=self.event_type,\n free_class=True,\n date_booked=timezone.now() - timedelta(8)\n )\n\n # redirect stdout so we can test it\n self.output = StringIO()\n self.saved_stdout = sys.stdout\n sys.stdout = self.output",
"def write_output(self):",
"def create_output(self, messages):",
"def testNewOutputModule(self):\n manager.OutputManager.RegisterOutput(TestOutput)\n\n output_module = manager.OutputManager.NewOutputModule('test_output')\n self.assertIsInstance(output_module, TestOutput)\n\n with self.assertRaises(ValueError):\n manager.OutputManager.NewOutputModule(1)\n\n with self.assertRaises(KeyError):\n manager.OutputManager.NewOutputModule('bogus')\n\n manager.OutputManager.DeregisterOutput(TestOutput)",
"def generateStandardMock(monkeypatch, return_value_output, return_value_error, return_code, type=\"gpt\"):\n mock_popen = mock.MagicMock()\n mock_popen.communicate.return_value = (return_value_output, return_value_error)\n mock_popen.returncode = return_code\n def popen_constructor(*args, **kargs):\n return mock_popen\n def mock_table_type(*args, **kargs):\n return type\n monkeypatch.setattr(\"subprocess.Popen\", popen_constructor)\n if type != None:\n monkeypatch.setattr(\"weresync.device.DeviceManager.get_partition_table_type\", mock_table_type)",
"def GetTestWrapper(self):\n return ''",
"def test_updated_display1(self):\n capturedOutput = io.StringIO()\n sys.stdout = capturedOutput\n r1 = Rectangle(2, 3, 2, 2)\n r1.display()\n sys.stdout = sys.__stdout__\n desired = '\\n\\n ##\\n ##\\n ##\\n'\n self.assertEqual(capturedOutput.getvalue(), desired)",
"def setUp(self):\n self.actualstdout = sys.stdout\n sys.stdout = StringIO.StringIO()",
"def test_generateconfig(self):\n args = mock.Mock()\n args.debug = None\n args.generateconfig = True\n args.config = None\n expected_text = ('Sample configuration file written to sample_config.json\\n'\n \"Replicate the site JSON for each site.\\n\"\n \" Valid values for use_https and local are 'True' and 'False'\\n\"\n \" One site must have local set to 'True'\\n\"\n 'Replicate the export JSON for each exported contract.\\n')\n with mock.patch('sys.stdout', new=StringIO()) as fake_out:\n execute_tool(args)\n self.assertEqual(fake_out.getvalue(), expected_text)",
"def test_format_start_new_output(self):\n # Setup params and mock result of parking lot execution\n start_new_cmd = \"create_parking_lot\"\n result = 5\n\n # Verify formatting is correct\n success, output = self.controller.format_start_new_output(result)\n self.assertTrue(success)\n self.assertEqual(output, \"Created a parking lot with {} slots\".format(result))",
"def test_html_output(self):\n pass"
] | [
"0.64789915",
"0.6219317",
"0.6141315",
"0.61250573",
"0.5885682",
"0.58214647",
"0.5781268",
"0.5775005",
"0.5772843",
"0.57672167",
"0.57131493",
"0.5694267",
"0.5624239",
"0.56047845",
"0.5593248",
"0.55418766",
"0.5537944",
"0.5513371",
"0.5504529",
"0.5503281",
"0.54982126",
"0.54970914",
"0.5494901",
"0.54899496",
"0.54666483",
"0.5453526",
"0.54494965",
"0.5445999",
"0.54419893",
"0.5434951"
] | 0.85911703 | 0 |
Calculates hexadecimal value of userentered base10 integer. As long as the remainder of the userentered base10 value and modulo 16 does not equal 0, the function stores the remainder in a queue and uses a dictionary to assign remainders 1015. Outputs the queue representation of the hex value at the end. | def hex_calc(value):
hex_dict = { # Dictionary for hex values over 9
10: "A",
11: "B",
12: "C",
13: "D",
14: "E",
15: "F"
}
hex_stack = deque() # Queue to hold hexidecimal representation
while value > 0:
remainder = value % 16
if remainder > 9:
remainder = hex_dict[remainder]
hex_stack.append(remainder)
else:
hex_stack.append(remainder)
value = value // 16
print("Hexadecimal Value: ", end="")
while hex_stack:
print(hex_stack.pop(), end="") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def decimal_to_hexadecimal(number):\n if number >= 1 and number <= 10: #if the positive integer is less than 10, its binary form is itself\n print(number)\n else:\n \"\"\"\n divide number by 16, take the reminder and start again until the result is 0\n \"\"\"\n new_number = []\n while number > 0:\n new_number.append(int(number%16))\n number = number // 16\n if number == 10: #for number greater than 10, the integer will be represented as hexadecimal element\n number == \"A\"\n elif number == 11:\n number == \"B\"\n elif number == 12:\n number == \"C\"\n elif number == 13:\n number == \"D\"\n elif number == 14:\n number == \"E\"\n elif number == 15:\n number == \"F\"\n print(str(new_number))",
"def int2hex(n: int) -> str:",
"def baseConverter(number, base):\n\n digits = \"0123456789ABCDEF\"\n\n remainders = Stack()\n\n while number > 0:\n rem = number % base\n remainders.push(rem)\n number = number // base\n\n result = \"\"\n\n while not remainders.isEmpty():\n popped = remainders.pop()\n digit = digits[popped]\n result += str(digit)\n return result",
"def test_int_to_hex():\n hex_values = ['61', '62', '63', '64', '65', '66', '67', '68', '69', '6a', '6b', '6c', '6d', '6e', '6f',\n '70', '71', '72', '73', '74', '75', '76', '77', '78', '79', '7a', '7b', '7c', '7d', '7e', '7f']\n index = 0\n for x in range(97, 123):\n assert pi_finder.int_to_hex(x, hex_dict) == hex_values[index]\n index += 1",
"def fn(c):\n ans = 0\n for k in range(1, 16): \n ans = min(ans, k*16+k, key=lambda x: abs(x - int(c, 16)))\n return hex(ans)[2:].zfill(2)",
"def padded_dec2base(n, q, base):\n convertstring = \"0123456789ABCDEF\"\n if n < base:\n return convertstring[n].zfill(q)\n else:\n return (dec2base(n // base, base) + convertstring[n % base]).zfill(q)",
"def convert_base(num, n):\r\n new_num_string = ''\r\n current = num\r\n while current != 0:\r\n remainder = current % n\r\n if remainder > 9:\r\n remainder_string = HEX_CHARS[remainder]\r\n elif remainder >= 36:\r\n remainder_string = '('+str(remainder)+')'\r\n else:\r\n remainder_string = str(remainder)\r\n new_num_string = remainder_string+new_num_string\r\n current = current//n\r\n return new_num_string",
"def int_to_hex(n):\r\n #return \"0x%X\" % n\r\n return hex(n)",
"def conv_hex(num):\n\n if num < 10:\n return str(num)\n if num == 10:\n return 'A'\n if num == 11:\n return 'B'\n if num == 12:\n return 'C'\n if num == 13:\n return 'D'\n if num == 14:\n return 'E'\n if num == 15:\n return 'F'",
"def base_converter(decimal_number, base):\n digits = \"0123456789ABCDEF\"\n quotient_stack = Stack()\n reminder = decimal_number\n while reminder > 0:\n quotient = reminder % base\n quotient_stack.push(quotient)\n reminder = reminder // base\n\n new_string = \"\"\n while not quotient_stack.is_empty():\n new_string = new_string + digits[quotient_stack.pop()]\n return new_string",
"def hx(i):\n a = hex(i)[2:]\n if len(a)<2: a = ''.join(['0',a])\n return a",
"def convert_dec(integer, base):\n digits = '0123456789ABCDEFGHIJKLMNOP'\n s = Stack()\n while integer:\n s.push(digits[integer%base])\n integer //= base\n b = ''\n while not s.is_empty():\n b += str(s.pop())\n return b",
"def convertebase10basen(basedest, numero):\n ret = \"\"\n while True:\n digit = numero%basedest\n ret = ret + DIGITOS[digit]\n numero = numero // basedest\n if numero == 0:\n break\n return ret[::-1]",
"def to_hex(value: int, length: int = -1) -> str:\n\n power_index = -2\n\n result = \"\"\n\n while power_index == -2 or power_index >= 0:\n\n if value == 0:\n if power_index == -2:\n result = \"0\"\n break\n else:\n while power_index >= 0:\n result += \"0\"\n power_index -= 1\n break\n\n # Find largest power of 16\n power = 0\n for power in range(0, value):\n if pow(16, power) > value:\n power = power - 1\n break\n\n # Find multiplier\n multiplier = 0\n for multiplier in range(1, 16):\n if multiplier * pow(16, power) > value:\n multiplier = multiplier - 1\n break\n\n if power_index == -2:\n power_index = power\n\n while power_index > power:\n result += \"0\"\n power_index -= 1\n\n # Power index = power\n if multiplier < 10:\n result += str(multiplier)\n else:\n if multiplier == 10:\n result += \"A\"\n elif multiplier == 11:\n result += \"B\"\n elif multiplier == 12:\n result += \"C\"\n elif multiplier == 13:\n result += \"D\"\n elif multiplier == 14:\n result += \"E\"\n elif multiplier == 15:\n result += \"F\"\n else:\n print(\"Serious error, multiplier cannot be above 15 with base 16\")\n\n value = value - multiplier * pow(16, power)\n power_index -= 1\n\n if length > len(result):\n while length > len(result):\n result = \"0\" + result\n\n return result",
"def hackerrank_Python_String_print_formatted_decimal_octal_hex_binary():\n def print_formatted(number):\n # your code goes here\n\n padw = len(bin(number).lstrip(\"0b\"))\n for i in range(1, number+1):\n print(str(i).rjust(padw) + \" \" \\\n + str(oct(i).lstrip(\"0\")).rjust(padw) + \" \" \\\n + str(hex(i).lstrip(\"0x\").upper()).rjust(padw) + \" \" \\\n + str(bin(i).lstrip(\"0b\").rjust(padw)))\n\n print_formatted(20)\n # 1 1 1 1\n # 2 2 2 10\n # 3 3 3 11\n # 4 4 4 100 ...",
"def binary_calc(value):\r\n binary_stack = deque() # Queue to hold binary representation\r\n\r\n while value > 0:\r\n remainder = value % 2\r\n binary_stack.append(remainder) # Add binary digit to queue\r\n value = value // 2\r\n\r\n print(\"Binary Value: \", end=\"\")\r\n while binary_stack:\r\n print(binary_stack.pop(), end=\"\")",
"def encode(record: int) -> str:\r\n result = ''\r\n queue = record\r\n while queue:\r\n remainder = queue % BASE\r\n queue = floor(queue / BASE)\r\n result = CODEX[remainder] + result\r\n return result",
"def phred(q):\n n = int(q * 30 + 33)\n if n == 43:\n n += 1\n if n == 58:\n n += 1\n return chr(n)",
"def baseEncode(number, base=36):\n if base == 10:\n return str(number)\n if not isinstance(number, int):\n raise TypeError('number must be an integer')\n alphabet='0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'\n if base > 62 or base <=1:\n print(\"base should be between 2 and 62\")\n return None\n sign = \"\"\n if number < 0:\n sign = \"-\"\n number = -number\n alphabet = alphabet[:base+1]\n if 0 <= number and number <base:\n return sign+alphabet[number]\n numberbase=\"\"\n while number != 0:\n number, i = divmod(number, base)\n numberbase = alphabet[i] + numberbase\n return sign+numberbase",
"def create_hex(num):\n\n # Leverage method 2 outlined here: https://www.wikihow.com/Convert-from-Decimal-to-Hexadecimal\n\n hexadecimal = ''\n while num >= 16:\n remainder = num % 16\n num = num // 16\n # Convert the remainder to hex & append to hexadecimal string\n hexadecimal = conv_hex(remainder) + hexadecimal\n # Convert the final quotient to hex & append to hexadecimal string\n hexadecimal = conv_hex(num) + hexadecimal\n\n return hexadecimal",
"def base10toN(num, base):\n\n converted_string, modstring = \"\", \"\"\n currentnum = num\n if not 1 < base < 37:\n raise ValueError(\"base must be between 2 and 36\")\n if not num:\n return '0'\n while currentnum:\n mod = currentnum % base\n currentnum = currentnum // base\n converted_string = chr(48 + mod + 7*(mod > 10)) + converted_string\n return converted_string",
"def decimal_to_base(n, base):\n\n chars = \"0123456789ABCDEF\"\n stack = []\n is_negative = False\n\n if n < 0:\n n = abs(n)\n is_negative = True\n\n while n > 0:\n remainder = n % base\n stack.append(remainder)\n n = n // base\n\n result = \"\"\n\n while stack:\n result = result + chars[stack.pop()]\n\n if is_negative:\n return \"-\"+result\n else:\n return result",
"def int_to_hex(num):\n return hex(num)",
"def base10toN(num, base):\n\n converted_string, modstring = \"\", \"\"\n\n currentnum = num\n\n if not 1 < base < 37:\n raise ValueError(\"base must be between 2 and 36\")\n\n if not num:\n return '0'\n\n while currentnum:\n mod = currentnum % base\n currentnum = currentnum // base\n converted_string = chr(48 + mod + 7*(mod > 10)) + converted_string\n\n return converted_string",
"def myHash(string, base=91, mod=1000000321):\n value = 0\n for pos, elem in enumerate(string[::-1]): # считаем значение полинома\n value += ord(elem) * base**pos # в последней задаче сделано с помощью массива (динамика)\n return value % mod",
"def _base32_to_hex(base32):\n ALPHABET = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ234567'\n x = 0\n for digit in str(base32.upper().strip(' ')):\n x = x * len(ALPHABET) + ALPHABET.index(digit)\n return hex(x).lstrip('0x').rstrip('L').upper()",
"def base_repr(i, base):\n\n assert i>=0 and base>=2\n \n if i==0:\n return ['0']\n\n if base<=10:\n return _small_base(i, base)\n\n assert base<=36\n return _large_base(i, base)",
"def baseconvert(num, base):\n\n digits = \"0123456789abcdefghijklmnopqrstuvwxyz\"\n\n try:\n num = int(num)\n base = int(base)\n except ValueError:\n return \"\"\n\n if num < 0 or base < 2 or base > 36:\n return \"\"\n\n num_string = \"\"\n while 1:\n remainder = num % base\n num_string = digits[remainder] + num_string\n num = num / base\n if num == 0:\n break\n\n return num_string",
"def main():\n test = 'abc'\n digest = uint_test(test)\n print([hex(x) for x in digest])\n\n test = ''\n digest = uint_test(test)\n print([hex(x) for x in digest])\n\n test = 'abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq'\n digest = uint_test(test)\n print([hex(x) for x in digest])\n\n test = \"\"\"abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijkl\n mnhijklmnoijklmnopjklmnopqklmnopqrlmnopqrsmnopqrstnopqrstu\"\"\"\n digest = uint_test(test)\n print([hex(x) for x in digest])",
"def ToBase(b, n):\r\n d = []\r\n while n:\r\n d.append(n % b)\r\n n //= b\r\n d.reverse() \r\n return int(''.join(map(str, d)))"
] | [
"0.6767114",
"0.62019885",
"0.6037745",
"0.59984386",
"0.5930948",
"0.5927391",
"0.5909252",
"0.5885277",
"0.5724089",
"0.5716753",
"0.56764406",
"0.5669568",
"0.56636024",
"0.56621295",
"0.56443244",
"0.56175005",
"0.56158185",
"0.5566997",
"0.5562639",
"0.5524696",
"0.5519877",
"0.55101705",
"0.5504585",
"0.5492735",
"0.5489267",
"0.54771477",
"0.545486",
"0.5427079",
"0.5364618",
"0.53522897"
] | 0.72924364 | 0 |
Calculates binary value of userentered base10 integer. As long as the remainder of the userentered base10 value and modulo 2 does not equal 0, the function stores the remainder in a queue. Outputs the queue representation of the binary value at the end. | def binary_calc(value):
binary_stack = deque() # Queue to hold binary representation
while value > 0:
remainder = value % 2
binary_stack.append(remainder) # Add binary digit to queue
value = value // 2
print("Binary Value: ", end="")
while binary_stack:
print(binary_stack.pop(), end="") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def dec_to_bin(num, base):\n\n # Divide number by base and keep track of remainder in a stack.\n # What is one of the key indicators that a stack should be used?\n # Reversability\n # The reversal property signals that a stack is likely the appropriate\n # data structure for solving the problem.\n\n result = \"\"\n remstack = Stack()\n digits = \"0123456789ABCDEF\"\n\n while num > 0:\n rem = num % base\n num = num // base\n remstack.push(rem)\n\n while not remstack.is_empty():\n result += digits[remstack.pop()]\n\n return result",
"def binary(num):\n binary = \"\"\n \n while num > 0:\n bit = num%2\n binary = str(bit) + binary # on rajoute le bit au nombre en binaire mais à la fin parce que comme ça ça inverse l'ordre\n num = num//2\n\n return binary",
"def ToBase(b, n):\r\n d = []\r\n while n:\r\n d.append(n % b)\r\n n //= b\r\n d.reverse() \r\n return int(''.join(map(str, d)))",
"def decimal_to_binary(number):\n if number == 1: #if the positive integer is less than 2, its binary form is itself\n print(number)\n else:\n new_number = []\n while number > 0:\n new_number.append(int(number%2))\n number = number // 2\n print(str(new_number))",
"def binary(n):\n # handles negative numbers\n negative = False\n if n < 0:\n negative = True\n n = abs(n)\n\n # divide n by 2 while n != 0, append remainder of division to array\n number = []\n while n != 0:\n number.append(n % 2)\n n //= 2\n\n # return binary number as integer\n bin_number = 0\n mult = 10 ** (len(number) - 1)\n n_len = len(number)\n for i in range(n_len - 1, -1, -1):\n bin_number += (number[i] * mult)\n mult //= 10\n return bin_number if not negative else -bin_number",
"def binary(n):\n a=str(n)\n bin=\"\"\n while n>=1:\n bin+=str(int(n%2))\n n=n//2\n bin=bin[len(bin)-1:-0:-1]+bin[0]\n for ele in bin:\n if ele!=0:\n index=bin.find(ele)\n break\n return bin",
"def decimalToBinary(num):\r\n if num > 1:\r\n decimalToBinary(num // 2)\r\n print(num % 2, end='')",
"def int_to_base(n: int, b: int) -> List[int]:\n if n == 0:\n return [0]\n digits = []\n while n:\n digits.append(int(n % b))\n n //= b\n return digits[::-1]",
"def dec_to_bin(num):\n\n count = 0\n out = \"\"\n\n if num == 0 or num == 1:\n return num\n\n while (num > 1):\n rem = num % 2\n num = num / 2\n out = str(rem * 10**count) + out\n count += 1\n \n out = str(num) + out \n return out",
"def convertebase10basen(basedest, numero):\n ret = \"\"\n while True:\n digit = numero%basedest\n ret = ret + DIGITOS[digit]\n numero = numero // basedest\n if numero == 0:\n break\n return ret[::-1]",
"def to_byte( n, bytesize):\n assert(bytesize>1) ## this coder does base 3, 7, 15,...\n assert (n>=0)\n B = (1<<bytesize) - 1\n answer=\"\"\n while n>0 :\n rem = n % B\n answer=dec_to_bin(rem,bytesize)+answer\n# print n,B,rem,answer\n n = n/B\n pass\n answer=answer+\"1\"*bytesize\n return answer",
"def decimal_to_binary(num):\n binary_res = \"\"\n while num >= 1:\n binary_char = num % BINARY_BASE\n num = math.floor(num / BINARY_BASE)\n binary_res += str(binary_char)\n if len(binary_res) < REGISTER_SIZE:\n binary_res += \"0\" * (REGISTER_SIZE - len(binary_res))\n return binary_res[::-1]",
"def dec_to_bin(n, digits):\n if(n<0) :\n sys.stderr.write( \"warning, negative n not expected\\n\")\n pass\n i = digits-1\n ans = \"\"\n while i >= 0 :\n b = (((1<<i)&n)>0) \n i -= 1\n ans = ans + str(int(b))\n return ans",
"def get_base_2(n):\n return str(bin(int(n))).removeprefix('0b')",
"def baseConverter(number, base):\n\n digits = \"0123456789ABCDEF\"\n\n remainders = Stack()\n\n while number > 0:\n rem = number % base\n remainders.push(rem)\n number = number // base\n\n result = \"\"\n\n while not remainders.isEmpty():\n popped = remainders.pop()\n digit = digits[popped]\n result += str(digit)\n return result",
"def int_to_binary(x, n=64):\n return format(x, 'b').zfill(n)",
"def int2bin(n: int) -> str:",
"def _get_binary(value, bits):\n\n # http://www.daniweb.com/code/snippet216539.html\n return ''.join([str((value >> y) & 1) for y in range(bits - 1, -1, -1)])",
"def get_user_number_input(message: str) -> (int, bin):\n print(message)\n return_int = None\n return_bin = None\n while (not isinstance(return_int, int) or\n len(return_bin) > MAX_BIT_LENGTH):\n user_input = input(\"=> \")\n try:\n return_int = int(user_input)\n return_bin = bin(return_int)\n except ValueError:\n return_int = None\n return_bin = None\n print(\"Got {} ({})\\n\".format(return_int, return_bin))\n return (return_int, return_bin)",
"async def intbin(self, ctx, *, input_int = None):\n if input_int == None:\n await ctx.send(\"Usage: `{}intbin [input_int]`\".format(ctx.prefix))\n return\n try:\n input_int = int(input_int)\n except Exception:\n await ctx.send(\"Input must be an integer.\")\n return\n\n await ctx.send(\"{:08b}\".format(input_int))",
"def makeBinary(self):\r\n\t\tls = 5.12 #limite superior\r\n\t\tli = -5.12 #limite inferior\r\n\t\tt = 14 # total de binarios\r\n\t\t\r\n\t\tcadena_bits = \"\"\r\n\t\tfor i in self.values:\r\n\t\t\tentero = (int) ( ( ( i - li ) * ( 2 ** t ) ) / ( ls - li ) )\r\n\t\t\t#print entero\r\n\t\t\tcadena_bits += \"{0:b}\".format(entero).zfill(14)\r\n\t\t\t\r\n\t\tself.cadenaBits = cadena_bits\r\n\t\treturn cadena_bits",
"def recursive_decode(integers, bits=16):\n\n new = []\n power = 2 ** (bits - 1)\n cutoff = [power - 1, 0 - power]\n index = 0\n while index < len(integers):\n value = 0\n while integers[index] in cutoff:\n value += integers[index]\n index += 1\n if integers[index] == 0: break\n value += integers[index]\n index += 1\n new.append(value)\n return new",
"def int2bin(i):\n if i == 0: return \"0\"\n s = ''\n while i:\n if i & 1 == 1:\n s = \"1\" + s\n else:\n s = \"0\" + s\n i /= 2\n return s",
"def addition_mod(a, b, nbr):\n bina = [int(x) for x in bin(a)[2:]]\n binb = [int(x) for x in bin(b)[2:]]\n binn = [int(x) for x in bin(nbr)[2:]]\n #print(binn)\n while len(bina) >= len(binb):\n binb = [0]+binb\n while len(bina) < len(binb)-1:\n bina = [0]+bina\n while len(binn) < len(bina):\n binn = [0]+binn\n while len(binn) > len(bina):\n bina = [0]+bina\n binb = [0]+binb\n binn.reverse()\n bina.reverse()\n binb.reverse()\n #print(bina, binb, binn)\n n = len(bina)+len(binb)+len(binn)\n na = len(bina)\n nab = len(bina)+len(binb)\n q = QuantumRegister(n+2, 'q')\n circ = QuantumCircuit(q)\n for i in range(na):\n if bina[i]:\n circ.x(q[i])\n for i in range(len(binb)):\n if binb[i]:\n circ.x(q[na+i])\n for i in range(len(binn)):\n if binn[i]:\n circ.x(q[nab+i])\n addmod(circ, q, # A, B, lost, last, N, lost2, binn):\n [q[i] for i in range(len(bina))],\n [q[i+na] for i in range(len(binb)-1)],\n q[n],\n q[na+len(binb)-1],\n [q[i+nab] for i in range(len(binn))],\n q[n+1],\n binn)\n circ_m = measure(circ, q, [i for i in range(na,nab)])\n return circ_m",
"def algorithm(n_str,b_int):\n\n #print(\"In algo\")\n k_int = len(n_str)\n y_str = ''.join(sorted(n_str))\n x_str = y_str[::-1]\n #print(\"X: %s, Y: %s\"%(x_str, y_str))\n\n z_base10 = int(x_str,b_int) - int(y_str,b_int)\n #print(\"algo: z_base10: %d\"% z_base10)\n z_str = get_str_in_base_b(z_base10,b_int)\n #print(\"z_str computed: %s \\n\"% z_str)\n\n while len(z_str) < k_int:\n z_str = '0'+z_str\n\n #print(\"After Append 0: z_str: %s \\n\" % z_str)\n return z_str",
"def binarify(num):\n if num<=0: return '0'\n digits = []",
"def num_to_binary(n):\n if n == 0:\n return ''\n elif n % 2 == 1:\n return num_to_binary(n // 2) + '1'\n else:\n return num_to_binary(n // 2) + '0'",
"def convert_dec(integer, base):\n digits = '0123456789ABCDEFGHIJKLMNOP'\n s = Stack()\n while integer:\n s.push(digits[integer%base])\n integer //= base\n b = ''\n while not s.is_empty():\n b += str(s.pop())\n return b",
"async def intbin(self, ctx, *, input_int = None):\n\t\tif input_int == None:\n\t\t\tawait ctx.send(\"Usage: `{}intbin [input_int]`\".format(ctx.prefix))\n\t\t\treturn\n\t\ttry:\n\t\t\tinput_int = int(input_int)\n\t\texcept Exception:\n\t\t\tawait ctx.send(\"Input must be an integer.\")\n\t\t\treturn\n\n\t\tawait ctx.send(\"{:08b}\".format(input_int))",
"def encode(record: int) -> str:\r\n result = ''\r\n queue = record\r\n while queue:\r\n remainder = queue % BASE\r\n queue = floor(queue / BASE)\r\n result = CODEX[remainder] + result\r\n return result"
] | [
"0.6342449",
"0.6292792",
"0.6045751",
"0.59172094",
"0.5907794",
"0.58624756",
"0.5839806",
"0.58302724",
"0.57647806",
"0.57232666",
"0.5678209",
"0.5669591",
"0.5661115",
"0.5660052",
"0.5625247",
"0.5621955",
"0.5618219",
"0.5616829",
"0.5614025",
"0.55944234",
"0.55932754",
"0.5591821",
"0.55670446",
"0.5562088",
"0.55550456",
"0.55397916",
"0.55234456",
"0.551605",
"0.5515939",
"0.5511529"
] | 0.7517937 | 0 |
Starting point for the program. Asks user for a base10, positive decimal integer and calls the binary_calc and hex_calc functions for computation. The program will loop, asking the user for a new number as long as they do not enter the string "quit". | def setup():
value = input("Enter a positive decimal integer (\"quit\" to stop): ")
while value.lower() != "quit":
binary_calc(int(value)) # Calls converter function on inputted value
print("\n")
hex_calc(int(value)) # Calls converter function on inputted value
value = input(
"\nEnter a positive decimal integer (\"quit\" to stop): ") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def run():\n reset_calc()\n finish = False\n printCurrent()\n while not finish:\n printMenu()\n\n m = input().strip()\n if (m == 'x'):\n finish = True\n elif (m == '+'):\n m = input(\"Give nominator:\")\n n = input(\"Give denominator:\")\n try:\n calc_add (int(m), int(n))\n printCurrent()\n except ValueError:\n print (\"Enter integers for m, n, with not null n\")\n elif (m=='c'):\n reset_calc()\n printCurrent()\n elif (m=='u'):\n undo()\n printCurrent()\n else:\n print (\"Invalid command\")\n\n print (\"By!!!\")",
"def main():\n welcome_message()\n continue_program = True\n num_calculations = 0\n # all the calculation options in the program\n calculation_options = [\"addition\", \"subtraction\", \"division\",\n \"multiplication\", \"exponents\", \"circle area\",\n \"cube area\",\n \"repeat words\", \"inequalities\", \"in-range\",\n \"stop program\"]\n\n while continue_program:\n print(\"Enter the option number of the calculation you would like to \"\n \"perform: \")\n # prints the calculations options list as a numbered list\n for calculation in calculation_options:\n print(calculation_options.index(calculation) + 1, \". \",\n calculation, sep=\"\")\n\n while True:\n try:\n user_input = int(input(\"\"))\n break\n except ValueError:\n print(\n \"That was not a valid input. Please enter a whole number \"\n \"between 1 and 11.\")\n\n if user_input in range(1, 12):\n if user_input == 1: # addition\n run_addition = True\n while run_addition:\n try:\n user_num1 = float(input(\"Enter the first number: \"))\n user_num2 = float(input(\"Enter the second number: \"))\n addition(user_num1, user_num2)\n run_addition = False\n except ValueError:\n print(\"That was not a valid input.\")\n\n elif user_input == 2: # subtraction\n run_subtraction = True\n while run_subtraction:\n try:\n user_num1 = float(input(\"Enter the first number: \"))\n user_num2 = float(input(\"Enter the second number: \"))\n print(\"The difference is \",\n subtraction(user_num1, user_num2), \".\\n\", sep=\"\")\n run_subtraction = False\n except ValueError:\n print(\"That was not a valid input.\")\n\n elif user_input == 3: # division\n run_division = True\n while run_division:\n try:\n user_num1 = float(input(\"Enter a number: \"))\n user_num2 = float(input(\"Enter a second number: \"))\n division(user_num1, user_num2)\n run_division = False\n except ValueError:\n print(\"That was not a valid input.\")\n\n elif user_input == 4: # multiplication\n run_multiplication = True\n while run_multiplication:\n try:\n user_num1 = float(input(\"Enter a number: \"))\n user_num2 = float(input(\"Enter a second number: \"))\n print(\"The product is \",\n multiplication(user_num1, user_num2), \".\\n\",\n sep=\"\")\n except ValueError:\n print(\"That was not a valid input.\")\n\n elif user_input == 5: # calculates num1 to the num2 power\n run_exponents = True\n while run_exponents:\n try:\n user_num1 = float(input(\"Enter a number: \"))\n user_num2 = float(input(\"Enter a second number: \"))\n print(user_num1, \" to the \", user_num2, \" power is \",\n exponents(user_num1, user_num2), \".\\n\", sep=\"\")\n run_exponents = False\n except ValueError:\n print(\"That was not a valid input.\")\n\n elif user_input == 6: # circle area\n run_circle_area = True\n while run_circle_area:\n try:\n user_radius = float(input(\"Enter a radius: \"))\n print(\"The area is \", circle_area(user_radius), \".\\n\",\n sep=\"\")\n run_circle_area = False\n except ValueError:\n print(\"That was not a valid input.\")\n\n elif user_input == 7: # cube area\n run_cube_area = True\n while run_cube_area:\n try:\n user_length = float(\n input(\"Enter the length of one side of the cube\"))\n print(\"The area of the cube is \",\n cube_area(user_length), \".\\n\", sep=\"\")\n run_cube_area = False\n except ValueError:\n print(\"That was not a valid input.\")\n\n elif user_input == 8: # repeats given word a certain number of\n # times\n run_repeat = True\n while run_repeat:\n try:\n user_word = input(\n \"Enter the word you want to repeat: \")\n repeat = int(\n input(\"How many times do you want to repeat it: \"))\n print(user_word * repeat, \"\\n\")\n run_repeat = False\n except ValueError:\n print(\"That was not a valid input.\")\n\n elif user_input == 9: # whether num1 <,>, or = num2\n run_inequalities = True\n while run_inequalities:\n try:\n user_num1 = float(input(\"Enter a number: \"))\n user_num2 = float(input(\"Enter a second number: \"))\n inequalities(user_num1, user_num2)\n run_inequalities = False\n except ValueError:\n print(\"That was not a valid input.\")\n\n elif user_input == 10: # whether a number is in a certain range\n run_range = True\n while run_range:\n try:\n user_num = float(input(\"Enter a number: \"))\n user_start_range = float(\n input(\"What number does the range start at? \"))\n user_end_range = float(\n input(\"What number does the range end at? \"))\n range_function(user_num, user_start_range,\n user_end_range)\n run_range = False\n except ValueError:\n print(\"That was not a valid input.\")\n\n elif user_input == 11: # prints number of calculations performed\n # ran and stops running\n print(\"You ran the program\", num_calculations, \"times.\")\n continue_program = False\n\n else:\n print(\"That was not an option. Please select an option from \"\n \"1 to 11.\")\n\n if user_input not in range(1, 12):\n print(\n \"That was not an option. \"\n \"Please select an option from 1 to 11.\")\n\n num_calculations += 1 # keeps count of the number of calculations\n # performed",
"def main():\r\n num = enter_num()\r\n if num is not None:\r\n num_lst = mk_num_lst(num)\r\n dec = convert(num_lst)\r\n print(\"decimal value of BASE 36 number\", num, \"is\", dec)\r\n else:\r\n print(\"user terminated program\")",
"def calculator (menuWindow):\n\n #This procedure accepts the parameter subProgram which will tell it which conversion function\n #to call. These functions will then return a value to outputUpdate and\n #set resultText to the appropriate message\"\"\"\n def popUP(message):\n pop = tk.Tk()\n pop.title(\"Error\")\n #resultText.set(message)\n tk.Label(pop, text=message).pack()\n pop.mainloop()\n\n def outputUpdate(subProgram):\n #Selection block that will run the appropriate function based upon\n #the button the user pushes\n #It first obtains the entered value in the input box\n number = baseNumber.get()\n if subProgram == 1:\n value = hex_bin()\n if value != \"Must only contain numbers and letters in the Hex set\\n\" \\\n \"0,1,2,3,4,5,6,7,8,9,a,b,c,d,e,f\":\n resultText.set(\"The binary for this number is: \" + str(value)[2:].upper())\n else:\n popUP(value)\n\n elif subProgram == 2:\n #The function is run within a variable to that the returned\n #value is stored and usable\n value = hex_dec()\n if value != \"Must only contain numbers and letters in the Hex set\\n\" \\\n \"0,1,2,3,4,5,6,7,8,9,a,b,c,d,e,f\":\n resultText.set(\"The decimal for this number is: \" + str(value).upper())\n else:\n popUP(value)\n elif subProgram == 3:\n value = dec_hex()\n #using the is digit method to see if the returned value is a number.\n #If the value is a number the user has entered a valid decimal value\n if value != \"Must only enter whole numbers e.g. 1, 10, 14\":\n resultText.set(\"The decimal for this number is: \" + str(value).upper())\n else:\n #If the user did not enter a valid decimal value\n #The function will have returned an appropriate error message\n popUP(value)\n elif subProgram == 4:\n value = dec_bin()\n test = value.replace(\" \",\"\")\n if test.isalpha():\n popUP(value)\n else:\n #string slicing used to remove the leading 0b from the binary value\n resultText.set(\"The binary value of \" + str(number) + \" is \" + str(value)[2:])\n elif subProgram == 5:\n value = bin_dec()\n if value != \"Must enter a valid binary number i.e. only containint 1 or 0\":\n resultText.set(\"The decimal value of \" + str(number) + \" is \" + str(value))\n else:\n popUP(value)\n else:\n value = bin_hex()\n if value != \"Must enter a valid binary number i.e. only containint 1 or 0\":\n resultText.set(\"The hexadecimal value of \" + str(number) + \" is \" + str(value)[2:].upper())\n else:\n popUP(value)\n\n def hex_bin():\n #This makes use of the hex_dec function to get the decimal value of the hex number\n #This means I don't have to re-write code\n number = hex_dec()\n try:\n binValue = bin(number)\n #Returning the value to the output function\n return binValue\n except:\n return \"Must only contain numbers and letters in the Hex set\\n\" \\\n \"0,1,2,3,4,5,6,7,8,9,a,b,c,d,e,f\"\n\n def hex_dec():\n #Establish a dictionary to store the hex value of each position\n number = baseNumber.get()\n try:\n value = int(number,16)\n return value\n except:\n value = \"Must only contain numbers and letters in the Hex set\\n\" \\\n \"0,1,2,3,4,5,6,7,8,9,a,b,c,d,e,f\"\n return value\n\n def dec_hex():\n #As before this is getting the entered value\n number = baseNumber.get()\n\n if number.isdigit():\n\n #Converting the input to an integer so that we can use it in calculations\n number = int(number)\n #Making use of the inbuilt hex function that returns the hex value of a decimal\n hexConvert = hex(number)\n #hex() returns this with a leading 0x\n #I have used string slicing to remove the elements I do not want\n hexConvert = hexConvert[2:]\n #As with the other functions this returns the numerical value\n else:\n hexConvert = \"Must only enter whole numbers e.g. 1, 10, 14\"\n return hexConvert\n\n '''Completed Not Commented'''\n def dec_bin():\n #Retrieving the value entered by the user to the GUI\n number = baseNumber.get()\n #Selection statement testing if the value etered was a digit\n if number.isdigit():\n #If a digit is entered the conversion is carried out\n number = bin(int(number))\n else:\n #If the user enters a non-digit, the error message is returned\n number = \"Must enter a valid digit\"\n return number\n\n def bin_hex():\n #the bin_dec() function is called to obtain a decimal value for conversion\n decValue = bin_dec()\n #Error checking takes place in an attempt to carry out the conversion\n try:\n #the hex and int functions are used to convert the returned decValue\n #If no error is caused the conversion is carried out and returned\n hexVal = hex(int(decValue))\n return hexVal\n except:\n #Any errors are caught and returned to the output procedure\n return \"Must enter a valid binary number i.e. only containint 1 or 0\"\n\n def bin_dec():\n #The entered number is retrieved and stored in a variable for use\n number = baseNumber.get()\n #Error checking to stop the program crashing\n try:\n #Attempt to convert the entered value into an int with base 2\n #If no error is caused the value is returned\n value = int(number , 2)\n return value\n except:\n #If an error occurs the error is caught and the appropriate message\n #returned to the output function\n return \"Must enter a valid binary number i.e. only containint 1 or 0\"\n\n #Procedure to convert the text the user enters in the entry box to upper case\n def caps(event):\n entryText.set(entryText.get().upper())\n\n def close():\n\n root.destroy()\n menu()\n\n #Setting the tk environment to start the GUI\n menuWindow.destroy()\n root = tk.Tk()\n '''I have set up different frames to allow for different grid layouts'''\n #Setting the title that will appear at the top of the window\n root.title(\"BinHexDec Calculator\")\n #Creating a frame that will hold the top text of the window\n titleFrame = tk.Frame(root, width=400, height=50)\n titleFrame.pack()\n #Creating a frame that will hold the entry widget\n entryFrame = tk.Frame(root, width=400, height=200)\n entryFrame.pack()\n resultFrame = tk.Frame(root, width=400, height=200)\n resultFrame.pack()\n buttonFrame = tk.Frame(root, width=400, height=200)\n buttonFrame.pack()\n menuFrame = tk.Frame(root, width=400, height=200)\n menuFrame.pack()\n #Creating a label to display text on the screen\n title = tk.Label(titleFrame, text=\"BinHexDec Converter\").pack()\n entryText = tk.Label(entryFrame, text=\"Enter the number to convert and select the conversion below\").grid(row=0, columnspan=3)\n #Creatingan entry widget that will allow the user to enter a value\n entryText = tk.StringVar()\n baseNumber = tk.Entry(entryFrame, textvariable=entryText)\n baseNumber.grid(row=1, column=1)\n baseNumber.bind(\"<KeyRelease>\",caps)\n\n #Initialising a variable as a \"string variable\" this allows me\n #to change this value dynamically within the program\n resultText = tk.StringVar()\n #This creates a label that will display whatever is in resultText\n #To create this dynamic label I don't set it with a text, it has a textVariable\n displayResult = tk.Label(resultFrame, textvariable=resultText).grid(row=0, column=1)\n resultText.set(\"The result of the calculation will appear here\")\n\n #Here I am creating a series of buttons.\n #These will all run the outputUpdate procedure\n #So that the correct function is run a value is passed into outputUpdate\n hexBinBtn = tk.Button(buttonFrame, text=\"Hex to Bin\", command= lambda: outputUpdate(1)).grid(row=0,column=0)\n hexDecBtn = tk.Button(buttonFrame, text=\"Hex to Dec\", command= lambda: outputUpdate(2)).grid(row=0,column=1)\n decHexBtn = tk.Button(buttonFrame, text=\"Dec to Hex\", command= lambda: outputUpdate(3)).grid(row=0,column=2)\n decBinBtn = tk.Button(buttonFrame, text=\"Dec to Bin\", command= lambda: outputUpdate(4)).grid(row=0,column=3)\n binDecBtn = tk.Button(buttonFrame, text=\"Bin to Dec\", command= lambda: outputUpdate(5)).grid(row=1,column=1)\n binHexBtn = tk.Button(buttonFrame, text=\"Bin to Hex\", command = lambda: outputUpdate(6)).grid(row=1,column=2)\n\n closeBtn = tk.Button(menuFrame, text = \"Return to Menu\", command = close).grid(row=2,column=2)\n #This initialises the window and keeps it running constantly\n root.mainloop()",
"def main():\n\n\twhile True:\n\t\tseleccion = input(\"selecciona:\\n\\t1) c1 a decimal\\n\\t2) c2 a decimal\\n\\t3) decimal a c1\\n\\t4) decimal a c2\\n\\t5) salir\\nOpcion: \")\n\t\tif int(seleccion) == 1:\n\t\t\tnumb = input(\"Introduce el numero en c_1: \")\n\t\t\tprint (decimal(numb,True))\n\t\telif int(seleccion) == 2:\n\t\t\tnumb = input(\"Introduce el numero en c_2: \")\n\t\t\tprint (decimal(numb,False))\n\t\telif int(seleccion) == 3:\n\t\t\tnumb = input(\"Introduce el numero en decimal: \")\n\t\t\tprint (stringer(comp1(numb)))\n\t\telif int(seleccion) == 4:\n\t\t\tnumb = input(\"Introduce el numero en decimal: \")\n\t\t\tprint (stringer(comp2(numb)))\n\t\telif int(seleccion) == 5:\n\t\t\tprint (\"Saliendo.\")\n\t\t\texit();\n\t\t\tbreak",
"def start_repl():\n print(\"BitCalc v0.1 - a visual calculator for bitwise expressions\")\n print(\"Use Ctrl+C to quit.\\n\")\n parser = Parser()\n\n while True:\n try:\n expr = input(\">>> \")\n if len(expr.strip()) == 0:\n continue\n\n parser.parse(expr)\n print(\"\")\n print(str(parser.tree))\n print(parser.result)\n print(\"\")\n except ParserError as e:\n print(e)\n except KeyboardInterrupt:\n print(\"\")\n raise SystemExit(0)",
"def _main_():\n while True:\n num = input(\"Please enter a number or done: \")\n if num == \"done\":\n print(bold(lightgreen(\"Thank You!\")))\n break\n else:\n try:\n num = int(num)\n if num < 0:\n num = abs(num)\n if num < 100:\n print(f\"Your number is negative {tens_text(num)}\")\n elif num < 1000:\n print(f\"Your number is negative {hundreds_text(num)}\")\n elif num == 0:\n print(\"Your number is zero\")\n elif num < 100:\n print(f\"Your number is {tens_text(num)}\")\n elif num < 1000:\n print(f\"Your number is {hundreds_text(num)}\")\n except Exception:\n print(info(bold(\"Not a valid input, try again\")))",
"def cli():\n print_help()\n while True:\n formula = input('Please enter formula (or type \"exit\"):\\n')\n if formula == \"exit\":\n return\n elif formula == \"help\":\n print_help()\n break\n try:\n print(\";\" + \"=\"*80)\n print(check_formula_and_create_assembly_code(formula))\n print(\";\" + \"=\"*80)\n except Exception as e:\n print(bcolors.FAIL, e, bcolors.ENDC)",
"def main(destination_base, max_number, decimal_number):\n if 2 <= destination_base <= 9:\n if 0 <= decimal_number <= max_number:\n converted_number = base_conversion(destination_base, decimal_number)\n print(f\"the converted number is: {converted_number}\")\n else:\n print(\"invalid input for base 10 number\")\n else:\n print(\"invalid input for destination base\")",
"def basic_calculator():\r\n\r\n num1 = input(\"Enter first number: \") # taking input\r\n\r\n # handling the exception of typecasting the value of 'num1' to float\r\n try:\r\n num1 = float(num1)\r\n except ValueError:\r\n print(\"Error: Input numeric values.\\nTry Again!\")\r\n exit()\r\n\r\n num2 = input(\"Enter second number: \") # taking input\r\n\r\n # handling the exception of typecasting the value of 'num2' to float\r\n try:\r\n num2 = float(num2)\r\n except ValueError:\r\n print(\"Error: Input numeric values.\\nTry Again!\")\r\n exit()\r\n\r\n # Asking user for the operation\r\n print(\"Select the operation:\")\r\n print(\"Type:\")\r\n print(\"1 for Addition\\n2 for Subtraction\\n3 for Multiplication\\n4 for Division\\n5 for Integer Division\\n6 for Power\")\r\n choice = input(\"Enter your choice: \")\r\n\r\n result = 0.0\r\n\r\n # Performing the operation and providing the result\r\n if choice == '1':\r\n result = num1 + num2\r\n elif choice == '2':\r\n result = num1 - num2\r\n elif choice == '3':\r\n result = num1 * num2\r\n elif choice == '4':\r\n result = num1 / num2\r\n elif choice == '5':\r\n result = num1 // num2\r\n elif choice == '6':\r\n result = num1 ** num2\r\n else:\r\n print(\"Wrong Input! Try Again.\")\r\n exit()\r\n\r\n print(f'\\nThe result is: {result}')",
"def main(sample=sample, log=log, in_file=input_file, out_file=output_file):\n message = \"Program {} launched.\".format(sys.argv[0])\n append_logfile(message, log)\n\n if debug: print(\"Program is starting...\")\n\n # Call functions here...\n print(\"{} is executing...\".format(_program_))\n\n print(messages(0))\n print(messages(1))\n input(\"Type return to continue...\")\n print(messages(2))\n input(\"Type return to continue...\")\n print(messages(3))\n input(\"Type return to continue...\")\n print(messages(4))\n print(\"For an example of a decimal value as an exact binary value.\")\n print(\"The fraction 5/8 in decimal is 0.625\")\n print(\"This is 6/10 + 2/100 + 5/1000 = {}\"\n .format(6 / 10 + 2 / 100 + 5 / 1000))\n print(\"Or... 625/1000 = {}\".format(625 / 1000))\n print()\n print(\"The fraction 5/8 in binary is 0.101\")\n print(\"This is 1/2 + 0/4 + 1/8 = 0.101 in binary.\\n\"\n \"Equivalent to: 4/8 + 0/8 + 1/8 = 5/8. Equalling {} in decimal.\"\n .format(1 / 2 + 0 / 4 + 1 / 8))\n\n input(\"\\nType return to continue...\")\n print()\n print(\"As an example of a decimal that doesn't have an exact binary value\")\n print(\"1/10 = 0.1 in decimal.\")\n print(\"Python performs the division of 1 by 10 and stores this as a\\n\"\n \"Binary64 floating point value. When Python is required to display\\n\"\n \"this stored binary value then conversion to decimal and rounding\\n\"\n \"is performed to display 0.1\")\n print(\"1/10 = {}\".format(1 / 10))\n print()\n\n print(\"However on some occasions the slight descrepancies between binary\\n\"\n \"values and their displayed decimal values may be observed...\")\n print(\"0.1 + 0.1 = {}\".format(0.1 + 0.1))\n print(\"0.1 + 0.1 + 0.1 = {}\".format(0.1 + 0.1 + 0.1))\n print(\"0.1 + 0.1 + 0.1 + 0.1 = {}\".format(0.1 + 0.1 + 0.1 + 0.1))\n print(\"0.1 + 0.1 + 0.1 + 0.1 + 0.1 = {}\"\n .format(0.1 + 0.1 + 0.1 + 0.1 + 0.1))\n print(\"0.1 + 0.1 + 0.1 + 0.1 + 0.1 + 0.1 = {}\"\n .format(0.1 + 0.1 + 0.1 + 0.1 + 0.1 + 0.1))\n print(\"0.1 + 0.1 + 0.1 + 0.1 + 0.1 + 0.1 + 0.1 = {}\"\n .format(0.1 + 0.1 + 0.1 + 0.1 + 0.1 + 0.1 + 0.1))\n print(\"0.1 + 0.1 + 0.1 + 0.1 + 0.1 + 0.1 + 0.1 + 0.1 = {}\"\n .format(0.1 + 0.1 + 0.1 + 0.1 + 0.1 + 0.1 + 0.1 + 0.1))\n print(\"0.1 + 0.1 + 0.1 + 0.1 + 0.1 + 0.1 + 0.1 + 0.1 + 0.1 = {}\"\n .format(0.1 + 0.1 + 0.1 + 0.1 + 0.1 + 0.1 + 0.1 + 0.1 + 0.1))\n print(\"0.1 + 0.1 + 0.1 + 0.1 + 0.1 + 0.1 + 0.1 + 0.1 + 0.1 + 0.1 = {}\"\n .format(0.1 + 0.1 + 0.1 + 0.1 + 0.1 + 0.1 + 0.1 + 0.1 + 0.1 + 0.1))\n print()\n print(\"Notice that the rounding algorithm used in displaying the decimal\\n\"\n \"value may not always return a result of just one decimal point.\")\n\n input(\"\\nType return to continue...\")\n print()\n print(messages(5))\n\n input(\"\\nType return to continue...\")\n print()\n print(messages(6))\n input(\"\\nType return to continue...\")\n print()\n # Precision of 1/10th in binary\n # 1/10 in binary form repeats as 0011 sequence. 0.0001100110011...\n print(\"1/10 converted to binary with varying levels of precision...\")\n print()\n for precision in range(4, 61, 8):\n total = 0.0\n bin_string = \"0.0\"\n for exponent in range(precision):\n if exponent % 4 == 0 or exponent % 4 == 1:\n total = total + 0 / 2**(exponent + 2)\n bin_string = bin_string + \"0\"\n if exponent % 4 == 2 or exponent % 4 == 3:\n total = total + 1 / 2**(exponent + 2)\n bin_string = bin_string + \"1\"\n print(\"{: >2d} binary bits: {}\".format(precision + 1, bin_string))\n print(\"Returned in decimal form: {: <19}\"\n .format(total))\n\n input(\"\\nType return to continue...\")\n print()\n print(\"Excessively large floats return inf for infinity.\")\n\n print(\"Maximum positive float until positive infinity...\")\n print(\"1.7976931348623156e+308 is {}\".format(1.7976931348623157e+308))\n print(\"1.7976931348623157e+308 is {}\".format(1.7976931348623157e+308))\n print(\"1.7976931348623158e+308 is {}\".format(1.7976931348623158e+308))\n print(\"1.7976931348623159e+308 is {}\".format(1.7976931348623159e+308))\n print(\"1.7976931348623160e+308 is {}\".format(1.7976931348623160e+308))\n print(\"1.7976931348623161e+308 is {}\".format(1.7976931348623161e+308))\n\n print()\n print(\"Excessively large integer overflows the float() function\")\n print(\"Maximum positive integer **308 to float overload...\")\n try:\n print(\"float(1 * 10**308) is {}\".format(float(1 * 10**308)))\n except OverflowError as e:\n print(\"float(1 * 10**308) is OverflowError: {}\".format(e))\n\n try:\n print(\"float(2 * 10**308) is {}\".format(float(2 * 10**308)))\n except OverflowError as e:\n print(\"float(2 * 10**308) is OverflowError: {}\".format(e))\n\n print()\n print(\"Maximum positive integer **307 to float overload...\")\n\n try:\n print(\"float(16 * 10**307) is {}\".format(float(16 * 10**307)))\n except OverflowError as e:\n print(\"float(16 * 10**307) is OverflowError: {}\".format(e))\n try:\n print(\"float(17 * 10**307) is {}\".format(float(17 * 10**307)))\n except OverflowError as e:\n print(\"float(17 * 10**307) is OverflowError: {}\".format(e))\n try:\n print(\"float(18 * 10**307) is {}\".format(float(18 * 10**307)))\n except OverflowError as e:\n print(\"float(18 * 10**307) is OverflowError: {}\".format(e))\n try:\n print(\"float(19 * 10**307) is {}\".format(float(19 * 10**307)))\n except OverflowError as e:\n print(\"float(19 * 10**307) is OverflowError: {}\".format(e))\n\n append_logfile(\"Completed Floating-Point\")\n\n if debug: print(\"Program is finished.\")\n append_logfile(\"Program {} finished.\".format(_program_))\n input(\"\\nPress Enter key to end program.\")\n sys.exit()\n # ===== end of main function =====",
"def task_086b():\n print(task_086b.__doc__.strip())\n positive_integer_input = input('Enter positive integer: ')\n\n if not positive_integer_input:\n print('You entered no characters.')\n elif is_positive_integer(positive_integer_input):\n digits_sum = get_sum_of_digits(positive_integer_input)\n print(f'The sum of all digits in {positive_integer_input} is {digits_sum}')\n else:\n print(f'\"{positive_integer_input}\" is not positive integer.')",
"def user_input_module():\r\n\r\n fcheck = \"no\"\r\n scheck = \"no\"\r\n last_check = \"no\"\r\n \r\n\r\n while last_check == \"no\" :\r\n while fcheck == \"no\" :\r\n fniput = input(\"Enter first number: \")\r\n if check_for_integer(fniput) == False: \r\n print(\"In order to add, the data type must be integer. So, please re-check and enter.\")\r\n else:\r\n fnumber = int(fniput)\r\n if fnumber > -1 and fnumber < 256 :\r\n fcheck = \"yes\"\r\n else:\r\n print(\"As we are using 8 bit adder, please bear in mind that only numbers between 0-255 is acceptable. So, please re-check and enter.\")\r\n while scheck == \"no\" :\r\n sinput = input(\"Enter second number: \")\r\n if check_for_integer(sinput) == False:\r\n print(\"In order to add, the data type must be integer. So, please re-check and enter.\")\r\n else:\r\n snumber = int(sinput)\r\n if snumber > -1 and snumber < 256 :\r\n scheck = \"yes\"\r\n else:\r\n print(\"As we are using 8 bit adder, please bear in mind that only numbers between 0-255 is acceptable. So, please re-check and enter.\")\r\n if (fnumber + snumber) > 255 :\r\n print(\"The sum of the two numbers inputted is greater than 255 which is not possible as we are using 8-bit adder. So, please re-check and enter\")\r\n fcheck = \"no\"\r\n scheck = \"no\"\r\n\r\n else:\r\n last_check = \"yes\"\r\n return[fnumber,snumber] # A list containing the inputted numbers is returned\r",
"def get_input():\n numb = int(input(\"Enter a number 1-10 \"))\n while True:\n if numb > 0 and numb < 10:\n return(numb)\n else:\n return(\"Please enter a value 1-10\")",
"def main():\n while True:\n # ysb\n vals = input().split(\" \")\n print (convert(vals[0], vals[1], float(vals[2])))",
"def main():\n user_input_name()\n user_input_age()\n choose_unit()\n user_input_weight()\n user_input_height()\n bmi_calculator()\n bmi_categories()\n restart_calculator()",
"def calculator():\r\n print(logo)\r\n num1 = float(input(\"Enter your first number: \"))\r\n for operand in calc_operand:\r\n print(operand)\r\n\r\n user_continue = False\r\n while not user_continue:\r\n calc_operation = input(\"Enter the operation: \")\r\n num2 = float(input(\"Enter your next number: \"))\r\n call_func = calc_operand[calc_operation]\r\n answer = call_func(num1, num2)\r\n print(f\"{num1} {calc_operation} {num2} = {answer}\")\r\n user_selection = input(\r\n f\"Type 'y' to continue calculation with {answer} or 'n' to start new one: \")\r\n\r\n if user_selection == \"y\":\r\n num1 = answer\r\n elif user_selection == \"n\":\r\n user_continue = True\r\n calculator()\r\n else:\r\n print(\"Invalid option. Please select valid input\")\r\n calculator()",
"def main():\n print(\"Choose your desired operator:\")\n print(\"1 to calculate hypotenuse \\n\"\n \"2 to add \\n\"\n \"3 to subtract \\n\"\n \"4 to multiply \\n\"\n \"5 to divide\")\n\n user_input = input(\"your choice: \")\n\n # check if input is an int from 1 to 5.\n while not user_input.isnumeric() or int(user_input) > 5 or int(user_input) < 1:\n print(\"\\ninvalid choice\")\n user_input = input(\"your choice: \")\n choice = int(user_input)\n\n a = float(input(\"enter first number: \"))\n b = float(input(\"enter second number: \"))\n\n # switch case using dictionary\n switcher = {\n 1: hypotenuse.calculate_hypotenuse(a, b),\n 2: sum(a, b),\n 3: subtract(a, b),\n 4: multiply(a, b),\n 5: divide(a, b)\n }\n answer = switcher.get(choice, \"invalid\")\n print(\"answer: {0}\".format(round(answer, 2)))",
"def main():\n user_input = user_input_state()\n check_user_input(user_input)",
"def main(ch):\n try:\n # Here is the search and launch of the selected function\n if ch == '1':\n result = fact(int(input(\"Factorial for \")))\n if ch == '2':\n result = exp2(float(input(\"Square exponention for \"))) \n if ch == '3':\n result = exp3(float(input(\"Cube exponention for \")))\n if ch == '4':\n result = root2(float(input(\"Square root for \")))\n if ch == '5':\n result = root3(float(input(\"Cube root for \")))\n if ch == '6':\n a = float(input(\"Enter a base for this logarithm: \"))\n b = float(input(\"Enter b in this logarithm: \"))\n result = log(a,b)\n if ch == '7':\n b = float(input(\"Enter b in this logarithm: \"))\n result = lg(b)\n if ch == '8':\n b = float(input(\"Enter b in this logarithm: \"))\n result = ln(b)\n # Here is output result\n print(\"Result:\",result)\n except ArithmeticError:\n print(\"Incorrect a values\")",
"def main_f():\n ph_number = read_number()\n if ph_number == -1:\n print('Incorrect number, try again')\n return\n res_l = find_let(ph_number, 0)\n output_result(res_l)",
"def Demo():\n print(\"Users input:\", GetInteger())\n print(\"Users input:\", GetInteger(lowerbound=-3, upperbound=10))\n input(\"Please press <Enter> to exit the demo.\")",
"def calculator():\n print(art.logo)\n # Changed 'int' to 'float' to do calculation for floating numbers as well\n num1 = float(input(\"Enter the first number : \"))\n end_calculation = False\n\n while not end_calculation:\n list_operators()\n operator = input(\"Pick an operation : \")\n num2 = float(input(\"Enter the next number : \"))\n calculation_fun = operations[operator]\n answer = round(calculation_fun(num1, num2), 2)\n print(f\"{num1} {operator} {num2} = {answer}\")\n\n wish_to_continue = input(\"Type 'Y' to Continue or Type 'N' to Exit : \").lower()\n if wish_to_continue == \"y\":\n num1 = answer\n else:\n # clear()\n end_calculation = True\n # recursive function call to restart the calculation freshly when user doesn't want to continue\n calculator()",
"def main_method():\r\n choice = 0\r\n precision = 0\r\n # loop to display menu and validate user's input\r\n while choice != 6:\r\n display_menu()\r\n choice = input(\"Enter choice(1-6):\")\r\n print(\"\\n\")\r\n\r\n # validate choice before casting to integer\r\n if choice.isdigit():\r\n choice = int(choice)\r\n\r\n if choice == 1:\r\n length, has_upper, has_lower, has_numbers, has_special_char, \\\r\n is_all_no = 0, \" \", \" \", \" \", \" \", False\r\n\r\n print(\"-- Generating Password --\")\r\n\r\n # Prompt user for password attribute's\r\n # And validate input\r\n while length < 10 or has_upper not in valid_statement or \\\r\n has_lower not in valid_statement or \\\r\n has_numbers not in valid_statement or \\\r\n has_special_char not in valid_statement or is_all_no:\r\n\r\n print(\r\n \"Length MUST be a number 10 or greater | ALL questions are \"\r\n \"'yes' or 'no' | At LEAST 1 yes required:\")\r\n length = input(\"Enter length of password (minimum 10):\")\r\n\r\n # Validate length is digit before casting to int\r\n if length.isdigit():\r\n length = int(length)\r\n else:\r\n length = 0\r\n\r\n # Prompt user for password complexity\r\n has_upper = input(\"Should password contain uppercase?\")\r\n has_lower = input(\"Should password contain lowercase?\")\r\n has_numbers = input(\"Should password contain numbers?\")\r\n has_special_char = input(\"Should password contain special characters?\")\r\n print(\"\\n\")\r\n\r\n # Boolean check if all answers are no\r\n # This would mean no characters to make password\r\n is_all_no = has_upper in no and has_lower in no and has_numbers in no \\\r\n and has_special_char in no\r\n\r\n # Data is valid so generate password\r\n choice_1(length, has_upper, has_lower, has_numbers, has_special_char)\r\n elif choice == 2:\r\n print(\"-- Calculate a Percentage --\")\r\n\r\n # Prompt user for numerator, denominator and decimal precision\r\n # NOTE: Validate numerator and denominator and precision are integers\r\n # NOTE: Validate denominator is NOT 0\r\n\r\n numerator, denominator, precision = 0, 0, 0\r\n while True:\r\n print(\"Only whole numbers accepted! | decimal precision must be positive!\")\r\n numerator = input(\"What is the numerator?\")\r\n denominator = input(\"What is the denominator?\")\r\n precision = input(\"How many decimal precision needed?\")\r\n print(\"\\n\")\r\n\r\n if numerator[0] == \"-\":\r\n numerator_sign = -1\r\n numerator = numerator[1:]\r\n else:\r\n numerator_sign = 1\r\n\r\n if denominator[0] == \"-\":\r\n denominator_sign = -1\r\n denominator = denominator[1:]\r\n else:\r\n denominator_sign = 1\r\n\r\n if numerator.isdigit() and denominator.isdigit() and \\\r\n precision.isdigit() and denominator != \"0\":\r\n numerator = int(numerator) * numerator_sign\r\n denominator = int(denominator) * denominator_sign\r\n precision = int(precision)\r\n break\r\n\r\n choice_2(numerator, denominator, precision)\r\n elif choice == 3:\r\n choice_3()\r\n elif choice == 4:\r\n print(\"-- Calculate Leg of a Triangle --\")\r\n\r\n side_ac, side_cb, angle_acb, precision = 0, 0, 0, 0\r\n # Prompt user for side AC\r\n # Prompt user for side CB\r\n # Prompt user for angle <ACB\r\n\r\n while True:\r\n print(\"All input must be a positive whole number!\")\r\n side_ac = input(\"Enter length for side AC:\")\r\n side_cb = input(\"Enter length for side CB:\")\r\n angle_acb = input(\"Enter angle for <ACB:\")\r\n precision = input(\"How many decimal precision needed?\")\r\n\r\n # Validate data entered are integers\r\n if side_ac.isdigit() and side_cb.isdigit() and angle_acb.isdigit() \\\r\n and precision.isdigit():\r\n side_ac = int(side_ac)\r\n side_cb = int(side_cb)\r\n angle_acb = int(angle_acb)\r\n precision = int(precision)\r\n break\r\n choice_4(side_ac, side_cb, angle_acb, precision)\r\n elif choice == 5:\r\n print(\"-- Volume of Right Circular Cylinder --\")\r\n\r\n radius, height, precision = 0, 0, 0\r\n\r\n while True:\r\n radius = input(\"Enter radius of cylinder:\")\r\n height = input(\"Enter height of cylinder:\")\r\n precision = input(\"Enter decimal precision for answer:\")\r\n\r\n if radius.isdigit() and height.isdigit() and precision.isdigit():\r\n radius = int(radius)\r\n height = int(height)\r\n precision = int(precision)\r\n break\r\n\r\n choice_5(radius, height, precision)\r\n elif choice == 6:\r\n print(\"Exiting program.\")\r\n else:\r\n print(\"Invalid choice. Must be a number (1 to 6)\")",
"def main():\n # initial user input\n inputs = [\"Dummy variable\"]\n terminal = Terminal()\n\n while(inputs[0] != \"exit\"):\n cwd = os.getcwd()\n inputs = user_input()\n\n if inputs[0] in terminal.dic:\n command = inputs[0]\n terminal.dic[command]()\n elif inputs[0] in terminal.dic_args_1:\n if len(inputs) == 2:\n command, arg = inputs\n terminal.dic_args_1[command](arg)",
"def main():\n hexToBinaryTable = {'0': '0000', '1': '0001', '2': '0010', '3': '0011', '4': '0100', '5': '0101', '6': '0110',\n '7': '0111', '8': '1000', '9': '1001', 'A': '1010', 'B': '1011', 'C': '1100', 'D': '1101',\n 'E': '1110', 'F': '1111'}\n print(hailStone(43))\n\n print(hexToBinary('ABC12', hexToBinaryTable))",
"def main():\n print()\n number = input(\"Enter the number to be converted (whole numbers only, < 4000): \")\n\n if float(number) >= 4000 or float(number) <= 0:\n print(\"That number is out of range!\")\n exit()\n print()\n print(\"{} is the same as {}\".format(number, convert_to_numerals(int(number))))\n print()",
"def inputZip() -> int:\n while True:\n try:\n return int(input(\"Enter your zipcode for concerts near you: \"))\n except ValueError:\n print(\"Input only accepts numbers.\")",
"def fibonacci_numbers():\n print(\"Problem: Fibonacci numbers\")\n\n n = int(input())\n\n result = fib(n)\n print(result)",
"def menu(total):\n print(\"What would you like to do?\")\n print(\"01. Addition\\n\"\n \"02. Subtraction\\n\"\n \"03. Multiplication\\n\"\n \"04. Division\\n\\n\"\n \"05. Trig\")\n if not total:\n print(\"06. Stats\\n\\n\"\n \"07. Create a database\\n\"\n \"08. Graph(data points)\\n\"\n \"09. Graph Coordinate Pairs\\n\"\n \"10. Graph Equation\")\n else:\n print(\"\\n0. Clear\"\n \"\\nTotal is:\", total)\n\n return int_input()"
] | [
"0.65855235",
"0.65636045",
"0.6491302",
"0.64777714",
"0.6328509",
"0.6162278",
"0.6159335",
"0.61103714",
"0.6071295",
"0.59733236",
"0.59465903",
"0.5880005",
"0.57767516",
"0.5758527",
"0.5752787",
"0.5737361",
"0.56870824",
"0.5684198",
"0.5615961",
"0.5587042",
"0.55698204",
"0.55693233",
"0.5538702",
"0.5525894",
"0.55109656",
"0.5463238",
"0.5406982",
"0.5376769",
"0.53755116",
"0.5365777"
] | 0.8094763 | 0 |
FPA object setup, whatever that is. The only info from the problem is that it is a silicon detector, and we have a graph of quantum efficiency vs wavelength. Based on the graph, it is | def setup_fpa():
# it is a silicon detector. Based on the graph, the quantum efficiency
# at 1.06 um is ~50%.
fpa = {}
fpa["quantum_efficiency"] = 0.5
return fpa | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self):\n\n # self.threshold = 3.\n self.gamma_min = 3\n self.gamma_max = 12\n self.n_samples = 40\n # self.do_plots = False\n # self.do_albedo = True\n # self.verbose = True\n\n self.nbands = 7\n self.bu = np.array([0.004, 0.015, 0.003, 0.004, 0.013, 0.010, 0.006])\n\n # Determine 250 or 500 meters product\n # self.resolution = 500\n\n # self.pixelWidth = 500\n # self.pixelHeight = 500",
"def __init__(self, average_disparity, frame_down_factor, mem_down_factor,\n fovea_shape, frame_shape, values,\n verbose=False, memory_length=1, max_n_foveas=1, **bp_args):\n self.verbose = verbose\n self.use_uncertainty = False\n self.n_past_fovea = 0\n\n# self.frame_down_factor = frame_down_factor\n self.mem_down_factor = mem_down_factor\n self.frame_step = 2**frame_down_factor\n self.mem_step = 2**mem_down_factor #step size for uncertainty and importance calculations (pixels)\n\n self.average_disparity = downsample(\n average_disparity, down_factor=mem_down_factor)\n self.frame_shape = frame_shape\n self.fovea_shape = fovea_shape\n self.memory_shape = self.average_disparity.shape\n\n self.values = values\n self.max_n_foveas = max_n_foveas\n\n # self.params = {\n # 'data_weight': 0.16145115747533928, 'disc_max': 294.1504935618425,\n # 'data_max': 32.024780646200725, 'laplacian_ksize': 3} # original hyperopt\n # self.params = {\n # 'data_weight': 0.15109941436798274, 'disc_max': 44.43671813879002,\n # 'data_max': 68.407170602610137, 'laplacian_ksize': 5} # hyperopt on 100 images\n # self.params = {\n # 'data_weight': 0.2715404479972163, 'disc_max': 2.603682635476145,\n # 'data_max': 156312.43116792402, 'laplacian_ksize': 3} # Bryan's hyperopt on 250 images\n # self.params = {\n # 'data_weight': 1.2, 'disc_max': 924.0,\n # 'data_max': 189.0, 'laplacian_ksize': 5} # random\n # self.params = {\n # 'data_weight': 0.16145115747533928, 'disc_max': 294.1504935618425,\n # 'data_max': 32.024780646200725, 'laplacian_ksize': 3} # coarse\n self.params = {\n 'data_exp': 1.09821084614, 'data_max': 112.191597317,\n 'data_weight': 0.0139569211273, 'disc_max': 12.1301410452,\n 'laplacian_ksize': 3, 'smooth': 1.84510833504e-07}\n # self.params = {\n # 'data_exp': 14.2348581842, 'data_max': 79101007093.4,\n # 'data_weight': 0.000102496570364, 'disc_max': 4.93508276126,\n # 'laplacian_ksize': 5, 'laplacian_scale': 0.38937704644,\n # 'smooth': 0.00146126755993} # optimized for frame_down: 1, mem_down: 2, fovea_levels: 1\n\n self.params.update(bp_args)\n\n self.disparity_memory = DisparityMemory(self.memory_shape, n=memory_length)\n self.uncertainty_memory = DisparityMemory(self.memory_shape, n=memory_length)\n self.fovea_memory = DisparityMemory(frame_shape, fovea_shape=fovea_shape, n=self.n_past_fovea)\n\n self._uc = UnusuallyClose(self.average_disparity)",
"def __init__(self, folder):\n print \"folder passed is \", folder\n self.folder = folder\n self.geometry = gf.geometry(self.folder)\n self.elements = gf.dictionary_set()\n self.area = np.zeros(shape = (8))\n self.Vol = (self.geometry.properties['span_number']*(self.geometry.properties['span_width']*\n self.geometry.properties['span_height'] + self.geometry.properties['cover_height']\n *self.geometry.properties['span_width']/2))\n self.F = np.zeros(shape = (8, 8))\n of.view_factor(self.geometry, self.F, self.area, self.Vol)\n tran = [self.geometry.properties['tra_cover_out'],0.0,0.0,\n self.geometry.properties['tra_sidewall_out'],\n self.geometry.properties['tra_cover_in'],\n self.geometry.properties['tra_sidewall_in'],0.0,0.0]\n emi = [self.geometry.properties['emi_cover_out'],1.0,1.0,\n self.geometry.properties['emi_sidewall_out'],\n self.geometry.properties['emi_cover_in'],\n self.geometry.properties['emi_sidewall_in'],1.0,1.0] \n self.tr, self.em, self.re = of.optictal_prop(tran,emi)\n if ((self.tr + self.em).any() > 1.0):\n print \"error in optical properties\"\n self.T = np.zeros(shape = (2,10))\n self.RH = np.zeros(shape = (2,10))\n # 8 inside,9 outside \n self.qcond = np.zeros(shape = (2,8))\n self.qconv = np.zeros(shape = (2,8))\n self.qrad = np.zeros(shape = (2,8))\n self.j = np.zeros(shape = (2,8))\n self.g = np.zeros(shape = (2,8))\n self.alpha = np.zeros(shape = (2,8))\n deltaT = 300\n RH_in = 0.6\n fg.set_initial_conditions(self.geometry.properties['t_air_inside'],\n 278,\n RH_in,self.T,self.RH , self.geometry.properties['t_air'],self.g,\n self.geometry.properties['sky_temp'])\n self.T, self.j, self.g, self.alpha, self.qrad, self.qconv = fg.solver_T(self.T,self.qrad,self.qconv,self.alpha,self.j,self.g,self.em,self.tr,\n self.geometry.properties['wind_speed'],\n self.F,self.geometry.properties['heat_flux'],1,1.0,self.area,\n self.geometry.properties['rho'],self.geometry.properties['cp'],\n self.Vol,self.geometry.properties['degree_window'],deltaT)",
"def __init__(self, options, imgs, frq_sim_guess, otf=None,\n wiener_parameter=1, fbounds=(0.01, 1), fbounds_shift=(0.01, 1),\n use_wicker=True, normalize_histograms=True, background_counts=100,\n do_global_phase_correction=True, determine_amplitudes=False, find_frq_first=True,\n default_to_guess_on_bad_phase_fit=True, max_phase_err=20*np.pi/180,\n default_to_guess_on_low_mcnr=True, min_mcnr=1,\n size_near_fo_to_remove=0,\n phases_guess=None, mod_depths_guess=None, pspec_params_guess=None,\n use_fixed_phase=False, use_fixed_frq=False, use_fixed_mod_depths=False,\n plot_diagnostics=True, interactive_plotting=False, save_dir=None, figsize=(20, 10)):\n # #############################################\n # saving information\n # #############################################\n self.save_dir = save_dir\n self.hold_figs_open = False\n self.figsize = figsize\n\n if self.save_dir is not None:\n self.log_file = open(os.path.join(self.save_dir, \"sim_log.txt\"), 'w')\n else:\n self.log_file = None\n\n # #############################################\n # setup plotting\n # #############################################\n if not interactive_plotting:\n plt.ioff()\n plt.switch_backend(\"agg\")\n\n # #############################################\n # analysis settings\n # #############################################\n self.wiener_parameter = wiener_parameter\n self.use_wicker = use_wicker\n self.global_phase_correction = do_global_phase_correction\n self.normalize_histograms = normalize_histograms\n self.size_near_fo_to_remove = size_near_fo_to_remove\n self.default_to_guess_on_bad_phase_fit = default_to_guess_on_bad_phase_fit\n self.max_phase_error = max_phase_err\n self.default_to_guess_on_low_mcnr = default_to_guess_on_low_mcnr\n self.min_mcnr = min_mcnr\n self.determine_amplitudes = determine_amplitudes\n self.use_fixed_phase = use_fixed_phase\n self.use_fixed_frq = use_fixed_frq\n self.use_fixed_mod_depths = use_fixed_mod_depths\n self.find_frq_first = find_frq_first\n self.plot_diagnostics = plot_diagnostics\n\n # #############################################\n # images\n # #############################################\n self.background_counts = background_counts\n self.imgs = imgs.astype(np.float64)\n self.nangles, self.nphases, self.ny, self.nx = imgs.shape\n \n # #############################################\n # get basic parameters\n # #############################################\n self.dx = options['pixel_size']\n self.dy = options['pixel_size']\n self.na = options['na']\n self.wavelength = options['wavelength']\n\n self.fmax = 1 / (0.5 * self.wavelength / self.na)\n self.fbounds = fbounds\n self.fbounds_shift = fbounds_shift\n\n self.frqs_guess = frq_sim_guess\n self.phases_guess = phases_guess\n self.mod_depths_guess = mod_depths_guess\n self.power_spectrum_params_guess = pspec_params_guess\n\n # #############################################\n # get frequency data and OTF\n # #############################################\n self.fx = tools.get_fft_frqs(self.nx, self.dx)\n self.fy = tools.get_fft_frqs(self.ny, self.dy)\n\n if otf is None:\n otf = psf.circ_aperture_otf(self.fx[None, :], self.fy[:, None], self.na, self.wavelength)\n self.otf = otf\n\n # #############################################\n # print current time\n # #############################################\n now = datetime.datetime.now()\n\n self.print_tee(\"####################################################################################\", self.log_file)\n self.print_tee(\"%d/%02d/%02d %02d:%02d:%02d\" % (now.year, now.month, now.day, now.hour, now.minute, now.second), self.log_file)\n self.print_tee(\"####################################################################################\", self.log_file)\n\n # #############################################\n # normalize histograms for input images\n # #############################################\n if self.normalize_histograms:\n tstart = time.process_time()\n\n for ii in range(self.nangles):\n for jj in range(1, self.nphases):\n self.imgs[ii, jj] = match_histograms(self.imgs[ii, jj], self.imgs[ii, 0])\n\n tend = time.process_time()\n self.print_tee(\"Normalizing histograms took %0.2fs\" % (tend - tstart), self.log_file)\n\n # #############################################\n # remove background\n # #############################################\n self.imgs = self.imgs - self.background_counts\n self.imgs[self.imgs <= 0] = 1e-12\n\n # #############################################\n # Fourier transform SIM images\n # #############################################\n tstart = time.process_time()\n\n self.imgs_ft = np.zeros((self.nangles, self.nphases, self.ny, self.nx), dtype=np.complex)\n for jj in range(self.nangles):\n for kk in range(self.nphases):\n # use periodic/smooth decomposition instead of traditional apodization\n img_to_xform, _ = psd.periodic_smooth_decomp(self.imgs[jj, kk])\n self.imgs_ft[jj, kk] = fft.fftshift(fft.fft2(fft.ifftshift(img_to_xform)))\n\n tend = time.process_time()\n\n self.print_tee(\"FT images took %0.2fs\" % (tend - tstart), self.log_file)\n\n # #############################################\n # get widefield image\n # #############################################\n tstart = time.process_time()\n\n self.widefield = get_widefield(self.imgs)\n wf_to_xform, _ = psd.periodic_smooth_decomp(self.widefield)\n self.widefield_ft = fft.fftshift(fft.fft2(fft.ifftshift(wf_to_xform)))\n\n tend = time.process_time()\n self.print_tee(\"Computing widefield image took %0.2fs\" % (tend - tstart), self.log_file)\n\n # #############################################\n # get optically sectioned image\n # #############################################\n tstart = time.process_time()\n\n sim_os = np.zeros((self.nangles, self.imgs.shape[-2], self.imgs.shape[-1]))\n for ii in range(self.nangles):\n sim_os[ii] = sim_optical_section(self.imgs[ii])\n # todo: maybe want to weight by power/mod depth?\n self.imgs_os = np.mean(sim_os, axis=0)\n\n tend = time.process_time()\n self.print_tee(\"Computing OS image took %0.2fs\" % (tend - tstart), self.log_file)",
"def main():\n print(\"Program version: 1.5\")\n StartTime = datetime.now()\n args = parseArguments()\n\n verbose = args.verbose\n images = args.images\n ignore_warnings = args.ignore_warnings\n if(args.silent):\n verbose = False\n images = False\n ignore_warnings = True\n\n if(args.images):\n plt.ioff()\n\n if(args.ignore_warnings):\n warnings.simplefilter('ignore', UserWarning)\n\n #sample header keywords\n # OBJECT = 'P016+03_P1_JKdeep' / Original target\n # RA = ' 01:06:37.759' / 01:06:37.7 RA (J2000) pointing\n # DEC = ' 03:32:36.096' / 03:32:36.0 DEC (J2000) pointing\n # EQUINOX = 2000. / Standard FK5 (years)\n # RADECSYS= 'FK5 ' / Coordinate reference frame\n # CRVAL1 = 16.65733 / 01:06:37.7, RA at ref pixel\n # CRVAL2 = 3.54336 / 03:32:36.0, DEC at ref pixel\n # CRPIX1 = 447. /Ref pixel in X\n # CRPIX2 = 452. / Ref pixel in Y\n # CDELT1 = -8.0000000000000E-5 / SS arcsec per pixel in RA\n # CDELT2 = 8.00000000000003E-5 / SS arcsec per pixel in DEC\n # CTYPE1 = 'RA---TAN' / pixel coordinate system\n # CTYPE2 = 'DEC--TAN' / pixel coordinate system\n # PC1_1 = 0.000000 / Translation matrix element\n # PC1_2 = 1.000000 / Translation matrix element\n # PC2_1 = -1.000000 / Translation matrix element\n # PC2_2 = 0.000000 / Translation matrix element\n\n fits_image_filenames = args.input\n\n #if directory given search for appropriate fits files\n\n if(os.path.isdir(fits_image_filenames[0])):\n print(\"detected a directory. Will search for fits files in it\")\n path = fits_image_filenames[0]\n fits_image_filenames = []\n for file in os.listdir(path):\n if file.endswith(\".fits\") and \"_astro\" not in file:\n fits_image_filenames.append(path+\"/\"+file)\n print(fits_image_filenames)\n\n multiple = False\n if(len(fits_image_filenames)>1):\n multiple = True\n not_converged = []\n converged_counter = 0\n for fits_image_filename in fits_image_filenames:\n\n result,_ = astrometry_script(fits_image_filename, catalog=args.catalog, rotation_scaling=0, xy_transformation=args.xy_transformation, fine_transformation=args.fine_transformation,\n images=images, vignette=args.vignette,vignette_rectangular=args.vignette_rectangular, cutouts=args.cutout, ra=args.ra, dec=args.dec, projection_ra=args.projection_ra, projection_dec=args.projection_dec, verbose=verbose, save_images=args.save_images, ignore_header_rot=args.ignore_header_rot, radius = args.radius, save_bad_result=args.save_bad_result, silent =args.silent, sigma_threshold_for_source_detection= args.sigma_threshold_for_source_detection, high_res=args.high_resolution, hdul_idx=args.hdul_idx, filename_for_sources=args.filename_for_sources, FWHM=args.seeing)\n\n if((not result) and args.rotation_scaling):\n print(\"Did not converge. Will try again with full rotation and scaling\")\n result, _ = astrometry_script(fits_image_filename, catalog=args.catalog, rotation_scaling=args.rotation_scaling, xy_transformation=args.xy_transformation, fine_transformation=args.fine_transformation,\n images=images, vignette=args.vignette,vignette_rectangular=args.vignette_rectangular, cutouts=args.cutout, ra=args.ra, dec=args.dec, projection_ra=args.projection_ra, projection_dec=args.projection_dec, verbose=verbose, save_images=args.save_images, ignore_header_rot=args.ignore_header_rot, radius = args.radius, save_bad_result=args.save_bad_result, silent=args.silent, sigma_threshold_for_source_detection=args.sigma_threshold_for_source_detection, high_res=args.high_resolution, hdul_idx=args.hdul_idx, filename_for_sources=args.filename_for_sources, FWHM=args.seeing)\n\n if(result):\n print(\"Astrometry was determined to be good.\")\n converged_counter = converged_counter+1\n else:\n print(\"Astrometry was determined to be bad.\")\n not_converged.append(fits_image_filename)\n if(args.save_bad_result):\n print(\"Result was saved anyway\")\n else:\n print(\"Result was not saved.\")\n # print(\"\")\n # print(\">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\")\n # print(\"> Astrometry for {} \".format(fits_image_filename))\n #\n # with fits.open(fits_image_filename) as hdul:\n # #print(hdul.info())\n # if(args.verbose):\n # print(\"if image is not at first position in the fits file the program will break later on\")\n # #print(hdul[0].header)\n #\n # hdu = hdul[0]\n # #hdu.verify('fix')\n # hdr = hdu.header\n #\n #\n # image_or = hdul[0].data.astype(float)\n # median = np.nanmedian(image_or)\n # image_or[np.isnan(image_or)]=median\n # image = image_or - median\n #\n # observation = find_sources(image, args.vignette)\n # #print(observation)\n #\n # positions = (observation['xcenter'], observation['ycenter'])\n # apertures = CircularAperture(positions, r=4.)\n #\n #\n # #world coordinates\n # print(\">Info found in the file -- (CRVAl: position of central pixel (CRPIX) on the sky)\")\n # print(WCS(hdr))\n #\n # hdr[\"NAXIS1\"] = image.shape[0]\n # hdr[\"NAXIS2\"] = image.shape[1]\n #\n # #wcsprm = Wcsprm(hdr.tostring().encode('utf-8')) #everything else gave me errors with python 3, seemed to make problems with pc conversios, so i wwitched to the form below\n # wcsprm = WCS(hdr).wcs\n # wcsprm_original = WCS(hdr).wcs\n # if(args.verbose):\n # print(WCS(wcsprm.to_header()))\n # wcsprm, fov_radius, INCREASE_FOV_FLAG, PIXSCALE_UNCLEAR = read_additional_info_from_header(wcsprm, hdr, args.ra, args.dec, args.projection_ra, args.projection_dec)\n # if(args.verbose):\n # print(WCS(wcsprm.to_header()))\n #\n # #print(wcsprm)\n # #wcsprm.pc = [[2, 0],[0,1]]\n #\n #\n # #Possibly usefull examples of how to use wcsprm:\n # #print(wcsprm.set())\n # #print(wcsprm.get_pc())\n # #pc = wcsprm.get_pc()\n # #print(np.linalg.det(pc))\n # #print(wcsprm.get_cdelt())\n # #wcs.fix()\n # #print(wcsprm.print_contents())\n # #print(repr(hdr.update(wcsprm.to_header().encode('utf-8')))) #not working\n #\n # #hdu.verify(\"fix\")\n # #print(repr(hdr))\n # #wcs.wcs_pix2world(pixcrd, 1)\n # #wcs.wcs_world2pix(world, 1)\n # #wcs.wcs.crpix = [-234.75, 8.3393]\n # # wcs.wcs.cdelt = np.array([-0.066667, 0.066667])\n # # wcs.wcs.crval = [0, -90]\n # # wcs.wcs.ctype = [\"RA---AIR\", \"DEC--AIR\"]\n # # wcs.wcs.set_pv([(2, 1, 45.0)])\n # # For historical compatibility, three alternate specifications of the linear transformations\n # # are available in wcslib. The canonical PCi_ja with CDELTia, CDi_ja, and the deprecated CROTAia\n # # keywords. Although the latter may not formally co-exist with PCi_ja,\n # # the approach here is simply to ignore them if given in conjunction with PCi_ja.\n # # has_pc, has_cd and has_crota can be used to determine which of these alternatives are present in the header.\n # # These alternate specifications of the linear transformation matrix are translated immediately to PCi_ja by set\n # # and are nowhere visible to the lower-level routines. In particular, set resets cdelt to unity if CDi_ja is present\n # # (and no PCi_ja). If no CROTAia is associated with the latitude axis, set reverts to a unity PCi_ja matrix.\n #\n #\n #\n #\n #\n # #get rough coordinates\n # #print(hdr[\"RA\"])\n # #coord = SkyCoord(hdr[\"RA\"], hdr[\"DEC\"], unit=(u.hourangle, u.deg), frame=\"icrs\")\n # coord = SkyCoord(wcsprm.crval[0], wcsprm.crval[1], unit=(u.deg, u.deg), frame=\"icrs\")\n # if(not PIXSCALE_UNCLEAR):\n # if(wcsprm.crpix[0] < 0 or wcsprm.crpix[1] < 0 or wcsprm.crpix[0] > image.shape[0] or wcsprm.crpix[1] > image.shape[1] ):\n # print(\"central value outside of the image, moving it to the center\")\n # coord_radec = wcsprm.p2s([[image.shape[0]/2, image.shape[1]/2]], 0)[\"world\"][0]\n # coord = SkyCoord(coord_radec[0], coord_radec[1], unit=(u.deg, u.deg), frame=\"icrs\")\n # #print(wcsprm)\n #\n #\n #\n # #better: put in nice wrapper! with repeated tries and maybe try synchron!\n # print(\">Dowloading catalog data\")\n # radius = u.Quantity(fov_radius, u.arcmin)#will prob need more\n # catalog_data = query.get_data(coord, radius, args.catalog)\n # #reference = reference.query(\"mag <20\")\n # max_sources = 500\n # if(INCREASE_FOV_FLAG):\n # max_sources= max_sources*2.25 #1.5 times the radius, so 2.25 the area\n # if(catalog_data.shape[0]>max_sources):\n # catalog_data = catalog_data.nsmallest(400, \"mag\")\n #\n # if(args.catalog == \"GAIA\" and catalog_data.shape[0] < 5):\n # print(\"GAIA seems to not have enough objects, will enhance with PS1\")\n # catalog_data2 = query.get_data(coord, radius, \"PS\")\n # catalog_data = pd.concat([catalog_data, catalog_data2])\n # #apertures_catalog = CircularAperture(wcs.wcs_world2pix(catalog_data[[\"ra\", \"dec\"]], 1), r=5.)\n # print(\"Now we have a total of {} sources. Keep in mind that there might be duplicates now since we combined 2 catalogs\".format(catalog_data.shape[0]))\n # elif(args.catalog == \"PS\" and (catalog_data is None or catalog_data.shape[0] < 5)):\n # print(\"We seem to be outside the PS footprint, enhance with GAIA data\")\n # catalog_data2 = query.get_data(coord, radius, \"GAIA\")\n # catalog_data = pd.concat([catalog_data, catalog_data2])\n # #apertures_catalog = CircularAperture(wcs.wcs_world2pix(catalog_data[[\"ra\", \"dec\"]], 1), r=5.)\n # print(\"Now we have a total of {} sources. Keep in mind that there might be duplicates now since we combined 2 catalogs\".format(catalog_data.shape[0]))\n #\n # #remove duplicates in catalog?\n #\n # apertures_catalog = CircularAperture(wcsprm.s2p(catalog_data[[\"ra\", \"dec\"]], 1)['pixcrd'], r=5.)\n #\n #\n # #plotting what we have, I keep it in the detector field, world coordinates are more painfull to plot\n # if(args.images):\n # fig = plt.figure()\n # fig.canvas.set_window_title('Input for {}'.format(fits_image_filename))\n # plt.xlabel(\"pixel x direction\")\n # plt.ylabel(\"pixel y direction\")\n # plt.title(\"Input - red: catalog sources, blue: detected sources in img\")\n # plt.imshow(image,cmap='Greys', origin='lower', norm=LogNorm())\n # apertures.plot(color='blue', lw=1.5, alpha=0.5)\n # apertures_catalog.plot(color='red', lw=1.5, alpha=0.5)\n #\n # plt.xlim(-200,image.shape[0]+200)\n # plt.ylim(-200,image.shape[1]+200)\n # if(args.save_images):\n # name_parts = fits_image_filename.rsplit('.', 1)\n # plt.savefig(name_parts[0]+\"_image_before.pdf\")\n #\n # ###tranforming to match the sources\n # print(\"---------------------------------\")\n # print(\">Finding the transformation\")\n # if(args.rotation_scaling):\n # print(\"Finding scaling and rotation\")\n # wcsprm = register.get_scaling_and_rotation(observation, catalog_data, wcsprm, scale_guessed=PIXSCALE_UNCLEAR, verbose=args.verbose)\n # if(args.xy_transformation):\n # print(\"Finding offset\")\n # wcsprm,_,_ = register.offset_with_orientation(observation, catalog_data, wcsprm, fast=False , INCREASE_FOV_FLAG=INCREASE_FOV_FLAG, verbose= args.verbose)\n #\n # #correct subpixel error\n # obs_x, obs_y, cat_x, cat_y, distances = register.find_matches(observation, catalog_data, wcsprm, threshold=3)\n # rms = np.sqrt(np.mean(np.square(distances)))\n # best_score = len(obs_x)/(rms+10) #start with current best score\n # fine_transformation = False\n # if(args.fine_transformation):\n # for i in [2,3,5,8,10,6,4, 20,2,1,0.5]:\n # wcsprm_new, score = register.fine_transformation(observation, catalog_data, wcsprm, threshold=i)\n # if(score> best_score):\n # wcsprm = wcsprm_new\n # best_score = score\n # fine_transformation = True\n # if not fine_transformation:\n # print(\"Fine transformation did not improve result so will be discarded.\")\n # else:\n # print(\"Fine transformation applied to improve result\")\n # #register.calculate_rms(observation, catalog_data,wcs)\n #\n # #make wcsprim more physical by moving scaling to cdelt, out of the pc matrix\n # wcs =WCS(wcsprm.to_header())\n # if(args.verbose):\n # print(wcs)\n #\n # from astropy.wcs import utils\n # scales = utils.proj_plane_pixel_scales(wcs)\n # print(scales)\n # cdelt = wcsprm.get_cdelt()\n # print(cdelt)\n # scale_ratio = scales/cdelt\n # #print(scale_ratio)\n # pc = np.array(wcsprm.get_pc())\n # pc[0,0] = pc[0,0]/scale_ratio[0]\n # pc[1,0] = pc[1,0]/scale_ratio[1]\n # pc[0,1] = pc[0,1]/scale_ratio[0]\n # pc[1,1] = pc[1,1]/scale_ratio[1]\n # wcsprm.pc = pc\n # wcsprm.cdelt = scales\n # if(args.verbose):\n # print(\"moved scaling info to CDelt\")\n # print(WCS(wcsprm.to_header()))\n #\n # #WCS difference before and after\n # print(\"> Compared to the input the Wcs was changed by: \")\n # scales_original = utils.proj_plane_pixel_scales(WCS(hdr))\n # print(\"WCS got scaled by {} in x direction and {} in y direction\".format(scales[0]/scales_original[0], scales[1]/scales_original[1]))\n # #sources:\n # #https://math.stackexchange.com/questions/2113634/comparing-two-rotation-matrices\n # #https://stackoverflow.com/questions/2827393/angles-between-two-n-dimensional-vectors-in-python/13849249#13849249\n # def unit_vector(vector):\n # \"\"\" Returns the unit vector of the vector. \"\"\"\n # return vector / max(np.linalg.norm(vector), 1e-10)\n # def matrix_angle( B, A ):\n # \"\"\" comment cos between vectors or matrices \"\"\"\n # Aflat = A.reshape(-1)\n # Aflat = unit_vector(Aflat)\n # Bflat = B.reshape(-1)\n # Bflat = unit_vector(Bflat)\n # #return np.arccos((np.dot( Aflat, Bflat ) / max( np.linalg.norm(Aflat) * np.linalg.norm(Bflat), 1e-10 )))\n # return np.arccos(np.clip(np.dot(Aflat, Bflat), -1.0, 1.0))\n # #print(matrix_angle(wcsprm.get_pc(), wcsprm_original.get_pc()) /2/np.pi*360)\n # rotation_angle = matrix_angle(wcsprm.get_pc(), wcsprm_original.get_pc()) /2/np.pi*360\n # if((wcsprm.get_pc() @ wcsprm_original.get_pc() )[0,1] > 0):\n # text = \"counterclockwise\"\n # else:\n # text = \"clockwise\"\n # print(\"Rotation of WCS by an angle of {} deg \".format(rotation_angle)+text)\n # old_central_pixel = wcsprm_original.s2p([wcsprm.crval], 0)[\"pixcrd\"][0]\n # print(\"x offset: {} px, y offset: {} px \".format(wcsprm.crpix[0]- old_central_pixel[0], wcsprm.crpix[1]- old_central_pixel[1]))\n #\n #\n # #check final figure\n # if(args.images):\n # fig = plt.figure()\n # fig.canvas.set_window_title('Result for {}'.format(fits_image_filename))\n # plt.xlabel(\"pixel x direction\")\n # plt.ylabel(\"pixel y direction\")\n # plt.title(\"Result - red: catalog sources, blue: detected sources in img\")\n # plt.imshow(image,cmap='Greys', origin='lower', norm=LogNorm())\n # apertures.plot(color='blue', lw=1.5, alpha=0.5)\n # #apertures_catalog = CircularAperture(wcs.wcs_world2pix(catalog_data[[\"ra\", \"dec\"]], 1), r=5.)\n # apertures_catalog = CircularAperture(wcsprm.s2p(catalog_data[[\"ra\", \"dec\"]], 1)['pixcrd'], r=5.)\n #\n # apertures_catalog.plot(color='red', lw=1.5, alpha=0.5)\n # if(args.save_images):\n # name_parts = fits_image_filename.rsplit('.', 1)\n # plt.savefig(name_parts[0]+\"_image_after.pdf\")\n #\n # print(\"--- Evaluate how good the transformation is ----\")\n # register.calculate_rms(observation, catalog_data,wcsprm)\n #\n #\n # #updating file\n # write_wcs_to_hdr(fits_image_filename, wcsprm)\n #\n #\n # print(\"overall time taken\")\n # print(datetime.now()-StartTime)\n # if(args.images):\n # plt.show()\n if(multiple):\n print(\">> Final report:\")\n print(\"Processed {} files, {} of them did converge. The following files failed:\".format(len(fits_image_filenames), converged_counter))\n print(not_converged)\n print(\"-- finished --\")",
"def main():\n\n #\n # Generate waveform\n #\n\n print 'generating waveoform...'\n waveform = pmns_utils.Waveform('shen_135135_lessvisc')\n\n # Pick some extrinsic parameters\n ext_params = ExtParams(distance=1, ra=0.0, dec=0.0, polarization=0.0,\n inclination=0.0, phase=0.0, geocent_peak_time=0.0+5.0)\n\n # Construct the time series for these params\n waveform.make_wf_timeseries(theta=ext_params.inclination,\n phi=ext_params.phase)\n\n #\n # Generate IFO data\n #\n det1_data = DetData(waveform=waveform, ext_params=ext_params)\n\n from scipy import signal\n import pylab as pl\n\n pl.figure()\n pl.plot(det1_data.td_response.sample_times,det1_data.td_response.data)\n pl.plot(det1_data.td_signal.sample_times,det1_data.td_signal.data)\n\n pl.figure()\n f,p = signal.welch(det1_data.td_response.data, fs=1./det1_data.delta_t,\n nperseg=512)\n pl.loglog(f,np.sqrt(p))\n\n f,p = signal.welch(det1_data.td_signal.data, fs=1./det1_data.delta_t,\n nperseg=512)\n pl.loglog(f,np.sqrt(p))\n pl.ylim(1e-25,1e-21)\n pl.show()",
"def __init__(self, MRIObj, pRFModelObj = None, FAModelObj = None,\n pRF_data = [], FA_data = [],\n prf_dm = [], max_ecc_ext = 5.5,\n pysub = 'hcp_999999', flatmap_height = 2048, full_figsize = (12, 8)):\n\n # set data object to use later on\n self.MRIObj = MRIObj\n\n # Load pRF and model object\n self.pRFModelObj = pRFModelObj\n self.FAModelObj = FAModelObj\n\n ## data to be plotted \n self.pRF_data = pRF_data\n self.FA_data = FA_data\n\n ## figure settings\n self.flatmap_height = flatmap_height\n self.full_figsize = full_figsize\n self.images = {}\n \n ## create pycortex vars\n self.mask, extents = cortex.quickflat.utils.get_flatmask(pysub, height = self.flatmap_height)\n self.vc = cortex.quickflat.utils._make_vertex_cache(pysub, height = self.flatmap_height)\n\n self.mask_index = np.zeros(self.mask.shape)\n self.mask_index[self.mask] = np.arange(self.mask.sum())\n\n # set prf dm\n self.prf_dm = prf_dm\n\n ## set grid of possible points in downsampled space\n self.point_grid_2D = np.array(np.meshgrid(np.linspace(-1, 1, prf_dm.shape[0]) * max_ecc_ext,\n np.linspace(1, -1, prf_dm.shape[0]) * max_ecc_ext))",
"def __init__(self):\n self.eps = 1e-5\n self.use_global_stats = True\n self.workspace = 512\n self.units = (3, 4, 23, 3) # use for 101\n self.filter_list = [256, 512, 1024, 2048]",
"def setup(self):\n super().setup()\n\n # prepare scratch directory\n unix.mkdir(PATH.ORTHO)\n\n # get data file names from solver\n solver = sys.modules['seisflows_solver']\n\n nevt = PAR.NEVT # number of encoded sources\n ntpss = PAR.NTPSS # number of timesteps after steady state\n dt = PAR.DT # total number of timesteps\n nrec = PAR.NREC # number of stations\n # ntrace = len(solver.data_filenames)\n freq_min = float(PAR.FREQ_MIN) # minimium frequency of interest\n freq_max = float(PAR.FREQ_MAX) # maximium frequency of interest\n \n #create a mask on relevant frequencies\n freq_full = fftfreq(ntpss, dt) # full frequency compunent\n freq_thresh = 1 / (ntpss * dt) / 200 # threshold for frequency alignment\n freq_idx = np.squeeze(np.where((freq_min <= (freq_full)) & ((freq_full) < freq_max - freq_thresh))) # frequency band of interest\n freq = freq_full[freq_idx] # index of frequencies within the frequency band\n nfreq = len(freq_idx) # number of frequency within the frequency band\n print('Number of frequencies considered: ' +str(nfreq)+' / '+str(len(freq_full)))\n\n # converts time data to Fourier domain\n sff_obs = np.zeros((nfreq, nevt), dtype=complex) # fourier transform of observed source time function\n ft_obs = np.zeros((nfreq, nevt, nrec), dtype=complex) # TODO ntrace fourier transform of observed seismogram\n\n for isrc in range(nevt):\n source_name = solver.source_names_all[isrc] # name of source\n stf_file = solver.stf_files_all[isrc] # name of source file\n with open(stf_file) as f:\n lines = f.readlines()\n stf_obs = []\n for line in lines:\n stf_obs.append(float(line.split()[1]))\n\n sff_obs[:, isrc] = fft(stf_obs, n=ntpss)[freq_idx]\n # for itrace in range(ntrace):\n # trace = self.reader(PATH.DATA + '/' + source_name, solver.data_filenames[itrace])\n # for irec in range(nrec):\n # ft_obs[:, isrc, irec, itrace] = fft(trace[irec].data, n=ntpss)[freq_idx]\n for irec in range(nrec):\n trace = self.reader(PATH.DATA + '/' + source_name, solver.data_filenames[0])\n ft_obs[:, isrc, irec] = fft(trace[irec].data, n=ntpss)[freq_idx]\n \n self.save('freq_idx', freq_idx)\n self.save('freq', freq)\n self.save('sff_obs', sff_obs)\n self.save('ft_obs', ft_obs)",
"def __init__(self):\n self.__deviceselected__ = \"SR-DMS4AP{LOCALBUMP}DEV:Sel-SP\"\n self.__source__ = \"SR-DMS4AP{LOCALBUMP}S-SP\"\n self.__plane__ = \"SR-DMS4AP{LOCALBUMP}PLANE-SP\"\n #self.__xshift__ = \"SR-DMS4AP{LOCALBUMP}SHIFT:X-SP\"\n #self.__yshift__ = \"SR-DMS4AP{LOCALBUMP}SHIFT:Y-SP\"\n #self.__xangle__ = \"SR-DMS4AP{LOCALBUMP}ANGLE:X-SP\"\n #self.__yangle__ = \"SR-DMS4AP{LOCALBUMP}ANGLE:Y-SP\"\n self.__shift__ = \"SR-DMS4AP{LOCALBUMP}SHIFT-SP\"\n self.__angle__ = \"SR-DMS4AP{LOCALBUMP}ANGLE-SP\"\n # with all offsets\n self.__anglerb__ = \"SR-DMS4AP{LOCALBUMP}ANGLE-I\"\n self.__positionrb__ = \"SR-DMS4AP{LOCALBUMP}POS-I\"\n # with BBA offset only\n self.__anglerb0__ = \"SR-DMS4AP{LOCALBUMP}ANGLE:BBA-I\"\n self.__positionrb0__ = \"SR-DMS4AP{LOCALBUMP}POS:BBA-I\"\n\n self.__bpmposition__ = \"SR-DMS4AP{LOCALBUMP:BPM}Pos-I\"\n self.__bpmorbitx__ = \"SR-DMS4AP{LOCALBUMP:BPM}ORB:X-I\"\n self.__bpmorbity__ = \"SR-DMS4AP{LOCALBUMP:BPM}ORB:Y-I\"\n self.__bpmorbitx0__ = \"SR-DMS4AP{LOCALBUMP:BPM}ORB:X0-I\"\n self.__bpmorbity0__ = \"SR-DMS4AP{LOCALBUMP:BPM}ORB:Y0-I\"\n\n self.__correctorposition__ = \"SR-DMS4AP{LOCALBUMP:COR}Pos-I\"\n self.__hcorrectorcurrent__ = \"SR-DMS4AP{LOCALBUMP:HCOR}PS-SP\"\n self.__hcorrectordiff__ = \"SR-DMS4AP{LOCALBUMP:HCOR}PS:Delta-SP\"\n self.__vcorrectorcurrent__ = \"SR-DMS4AP{LOCALBUMP:VCOR}PS-SP\"\n self.__vcorrectordiff__ = \"SR-DMS4AP{LOCALBUMP:VCOR}PS:Delta-SP\"\n\n self.__undo__ = \"SR-DMS4AP{LOCALBUMP}Enbl:Undo-Cmd\"\n self.__apply__ = \"SR-DMS4AP{LOCALBUMP}Enbl-Cmd\"\n self.__status__ = \"SR-DMS4AP{LOCALBUMP}TS-I\"\n self.__idposinfo__ = \"SR-DMS4AP{LOCALBUMP}S-I\"\n self.__srcposition__ = \"SR-DMS4AP{LOCALBUMP}SRC-SP\"",
"def _radioPointingSetup( aperture, ants ): \n if aperture == RX1MM:\n apString = \"Aperture1mm\"\n elif aperture == RX3MM:\n apString = \"Aperture3mm\"\n elif aperture == RX1CM:\n apString = \"Aperture1cm\"\n else:\n raise Exception, \"Invalid aperture.\"\n\n ants = makeAntList(ants)\n mpList = []\n for ant in ants:\n prefix = \"Control.Antenna%d.%s.PointingConstants.\" % (ant, apString)\n mpNames = [ prefix + mp for mp in [\"azOffset\", \"elOffset\", \"sag\"] ]\n mpList.append( mpNames ) \n return queryMpValues( mpList )",
"def __init__(self,x=0.1,E=10.0, mpar={}, topchem='He', topden=1.78e-4, botchem='Sr50Cl100H110493.721O55246.86', botden=1.0032, element='Sr', line='Ka1', vslit= 0.04, detlen=10.5, qoff=0.0, yscale=1,int_bg=0, Rc=0, sur_den=0,ion_depth=0):\n if type(x)==list:\n self.x=np.array(x)\n else:\n self.x=x\n self.E=E\n self.__mpar__ = mpar\n self.topchem = topchem\n self.topden = topden\n self.botchem = botchem\n self.botden = botden\n self.element = element\n self.line = line\n self.vslit = vslit\n self.detlen = detlen\n self.qoff = qoff\n self.yscale = yscale\n self.int_bg = int_bg\n self.Rc = Rc\n self.sur_den = sur_den\n self.ion_depth = ion_depth\n elelist = xdb.atomic_symbols\n linelist = list(xdb.xray_lines(98).keys())\n self.choices={'element':elelist,'line': linelist} #If there are choices available for any fixed parameters\n self.output_params = {}\n self.init_params()\n self.__fit__=False\n self.__avoganum__ = scipy.constants.Avogadro\n self.__eleradius__ = scipy.constants.physical_constants['classical electron radius'][0]*1e10 #classic electron radius in \\AA",
"def doParametersOfInterest(self):\n \n self.modelBuilder.doVar('expr::cosW(\"0.87681811112\",)')\n self.modelBuilder.doVar('expr::sinW(\"0.48082221247\",)')\n self.modelBuilder.doVar('expr::mZ(\"91.2\",)')\n self.modelBuilder.doVar('expr::Lambda1(\"100.0\",)')\n self.modelBuilder.doVar('expr::e2(\"0.0917\",)')\n self.modelBuilder.doVar('expr::gs2(\"1.533\",)')\n\n # EFT Higgs basis couplings\n\n self.modelBuilder.doVar('cZ[0,-1,1]') \n self.modelBuilder.doVar(\"cZZ[0,-2,2]\") \n self.modelBuilder.doVar(\"cZZt[0,-2,2]\") \n self.modelBuilder.doVar(\"cZB[0,-6,6]\") \n\n poi='cZ,cZZ,cZZt,cZB'\n\n # Amplitude couplings from EFT couplings \n\n self.modelBuilder.doVar('expr::a1(\"@0+1\",cZ)') # (\"2*(@0+1)\",cZ) in AN/Paper but a1 = 1 for signal model and width calculation\n self.modelBuilder.doVar('expr::a2(\"-1*@0*(@1/(2*pow(@2,2)*pow(@3,2)))\",cZZ,e2,sinW,cosW)')\n self.modelBuilder.doVar('expr::a3(\"-1*@0*(@1/(2*pow(@2,2)*pow(@3,2)))\",cZZt,e2,sinW,cosW)')\n self.modelBuilder.doVar('expr::k1(\"@0*(@1*pow(@2,2)/(pow(@3,2)*pow(@4,2)))\",cZB,e2,Lambda1,sinW,mZ)')\n self.modelBuilder.doVar('expr::k1L1(\"@0/pow(@1,2)\",k1,Lambda1)')\n\n ###### gamma_H ########\n\n # SMEFT relationships for VV couplings (Expressed using amplitude couplings)\n\n self.modelBuilder.doVar('expr::kappa(\"1.0\",)')\n self.modelBuilder.doVar('expr::kappa_tilde(\"0.0\",)') \n\n self.modelBuilder.doVar('expr::a1_WW(\"@0\",a1)')\n self.modelBuilder.doVar('expr::a2_WW(\"@0*@0*@1\",cosW,a2)')\n self.modelBuilder.doVar('expr::a3_WW(\"@0*@0*@1\",cosW,a3)')\n self.modelBuilder.doVar('expr::k1_WW(\"(@2 / (@0*@0 - @1*@1) - 2*@1*@1*@3*@4*@4 /(@5*@5*(@0*@0 - @1*@1)))\",cosW,sinW,k1,a2,Lambda1,mZ)')\n self.modelBuilder.doVar('expr::k2_k1(\"2*@0*@1*@2/(@0*@0 - @1*@1)\",cosW,sinW,k1)')\n self.modelBuilder.doVar('expr::k2_a2(\"-2*@0*@1*@3*@4*@4/((@2*@2)*(@0*@0 - @1*@1))\",cosW,sinW,mZ,a2,Lambda1)')\n self.modelBuilder.doVar('expr::k2(\"@0 + @1\",k2_k1,k2_a2)')\n\n # Determine gamma_H from VV couplings\n\n zz_expr = '\"4*(@0*@0/4. + 0.1695*@3*@3 + 0.09076*@1*@1 + 0.03809*@2*@2 + 0.8095*@0*@3/2. + 0.5046*@0*@1/2. + 0.2092*@1*@3 + 0.1023*@4*@4 + 0.1901*@0*@4/2. + 0.07429*@3*@4 + 0.04710*@1*@4) \",a1,a2,a3,k1,k2'\n ww_expr = '\"4*(@0*@0/4. + 0.1320*@3*@3 + 0.1944*@1*@1 + 0.08075*@2*@2 + 0.7204*@0*@3/2. + 0.7437*@0*@1/2. + 0.2774*@3*@1) \",a1_WW,a2_WW,a3_WW,k1_WW'\n zgamma_expr = '\"4*(1.118600*@0*@0/4. +0.0035*@1*@1 - 0.125010*@0*@1/2. + 0.000003*@1*@1 - 0.00018*@1*@1 + 0.003100*@0*@1/2. +0.00126*@2*@2 + 0.000005*@2*@2 -0.00047*@2*@2)\",a1_WW,kappa,kappa_tilde'\n gg_expr = '\"(1.1068*@0*@0 + 0.0082*@0*@0 - 0.1150*@0*@0 + 2.5717*@1*@1 + 0.0091*@1*@1 - 0.1982*@1*@1)\",kappa,kappa_tilde'\n bb_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n cc_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n tautau_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n mumu_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n gmgm_expr = '\"4*(1.6054*@0*@0/4. + 0.07312*@1*@1 - 0.6854*@0*@1/2. + 0.00002*@1*@1 - 0.0018*@1*@1 + 0.0085*@0*@1/2. + 0.1699*@2*@2 + 0.00002*@2*@2 - 0.0031*@2*@2)\",a1_WW,kappa,kappa_tilde'\n \n self.modelBuilder.doVar('expr::R_WW('+str(ww_expr)+')')\n self.modelBuilder.doVar('expr::R_ZZ('+str(zz_expr)+')')\n self.modelBuilder.doVar('expr::R_Zgamma('+str(zgamma_expr)+')')\n self.modelBuilder.doVar('expr::R_gg('+str(gg_expr)+')')\n self.modelBuilder.doVar('expr::R_bb('+str(bb_expr)+')')\n self.modelBuilder.doVar('expr::R_cc('+str(cc_expr)+')')\n self.modelBuilder.doVar('expr::R_tautau('+str(tautau_expr)+')')\n self.modelBuilder.doVar('expr::R_mumu('+str(mumu_expr)+')')\n self.modelBuilder.doVar('expr:R_gammagamma('+str(gmgm_expr)+')')\n\n self.modelBuilder.doVar('expr::gammaH(\"(0.5824*@0 + 0.2137*@1 + 0.08187*@2 + 0.06272*@3 + 0.02891*@4 + 0.02619*@5 + 0.002270*@6 + 0.001533*@7 + 0.0002176*@8 )/0.9998\",R_bb,R_WW,R_gg,R_tautau,R_cc,R_ZZ,R_gammagamma,R_Zgamma,R_mumu)') \n\n ###########################\n\n self.g1V = GetCoupTerms(1,1,1,-0.0001,\"1V\") # Compensate for scaling of k1 templates \n self.g2V = GetCoupTerms(1,1,1,-0.0001,\"2V\") \n \n self.modelBuilder.doVar(\"expr::g2V_1(\\\"\"+str(self.g2V[0])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T1(\\\"((pow(@0,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_1)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T1_Neg(\\\"-1*((pow(@0,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_1)\") \n self.modelBuilder.doVar(\"expr::g2V_2(\\\"\"+str(self.g2V[1])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T2(\\\"((pow(@0,3)*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_2)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T2_Neg(\\\"-1*((pow(@0,3)*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_2)\") \n self.modelBuilder.doVar(\"expr::g2V_3(\\\"\"+str(self.g2V[2])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T3(\\\"((pow(@0,2)*pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_3)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T3_Neg(\\\"-1*((pow(@0,2)*pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_3)\") \n self.modelBuilder.doVar(\"expr::g2V_4(\\\"\"+str(self.g2V[3])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T4(\\\"((@0*pow(@1,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_4)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T4_Neg(\\\"-1*((@0*pow(@1,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_4)\") \n self.modelBuilder.doVar(\"expr::g2V_5(\\\"\"+str(self.g2V[4])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T5(\\\"((pow(@1,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_5)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T5_Neg(\\\"-1*((pow(@1,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_5)\") \n self.modelBuilder.doVar(\"expr::g2V_6(\\\"\"+str(self.g2V[5])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T6(\\\"((pow(@0,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_6)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T6_Neg(\\\"-1*((pow(@0,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_6)\") \n self.modelBuilder.doVar(\"expr::g2V_7(\\\"\"+str(self.g2V[6])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T7(\\\"((pow(@0,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_7)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T7_Neg(\\\"-1*((pow(@0,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_7)\") \n self.modelBuilder.doVar(\"expr::g2V_8(\\\"\"+str(self.g2V[7])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T8(\\\"((@0*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_8)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T8_Neg(\\\"-1*((@0*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_8)\") \n self.modelBuilder.doVar(\"expr::g2V_9(\\\"\"+str(self.g2V[8])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T9(\\\"((pow(@2,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_9)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T9_Neg(\\\"-1*((pow(@2,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_9)\") \n self.modelBuilder.doVar(\"expr::g2V_10(\\\"\"+str(self.g2V[9])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T10(\\\"((pow(@0,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_10)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T10_Neg(\\\"-1*((pow(@0,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_10)\") \n self.modelBuilder.doVar(\"expr::g2V_11(\\\"\"+str(self.g2V[10])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T11(\\\"((pow(@0,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_11)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T11_Neg(\\\"-1*((pow(@0,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_11)\") \n self.modelBuilder.doVar(\"expr::g2V_12(\\\"\"+str(self.g2V[11])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T12(\\\"((@0*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_12)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T12_Neg(\\\"-1*((@0*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_12)\") \n self.modelBuilder.doVar(\"expr::g2V_13(\\\"\"+str(self.g2V[12])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T13(\\\"((pow(@3,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_13)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T13_Neg(\\\"-1*((pow(@3,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_13)\") \n self.modelBuilder.doVar(\"expr::g2V_14(\\\"\"+str(self.g2V[13])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T14(\\\"((pow(@1,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_14)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T14_Neg(\\\"-1*((pow(@1,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_14)\") \n self.modelBuilder.doVar(\"expr::g2V_15(\\\"\"+str(self.g2V[14])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T15(\\\"((pow(@1,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_15)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T15_Neg(\\\"-1*((pow(@1,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_15)\") \n self.modelBuilder.doVar(\"expr::g2V_16(\\\"\"+str(self.g2V[15])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T16(\\\"((@1*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_16)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T16_Neg(\\\"-1*((@1*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_16)\") \n self.modelBuilder.doVar(\"expr::g2V_17(\\\"\"+str(self.g2V[16])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T17(\\\"((pow(@1,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_17)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T17_Neg(\\\"-1*((pow(@1,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_17)\") \n self.modelBuilder.doVar(\"expr::g2V_18(\\\"\"+str(self.g2V[17])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T18(\\\"((pow(@1,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_18)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T18_Neg(\\\"-1*((pow(@1,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_18)\") \n self.modelBuilder.doVar(\"expr::g2V_19(\\\"\"+str(self.g2V[18])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T19(\\\"((@1*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_19)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T19_Neg(\\\"-1*((@1*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_19)\") \n self.modelBuilder.doVar(\"expr::g2V_20(\\\"\"+str(self.g2V[19])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T20(\\\"((pow(@2,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_20)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T20_Neg(\\\"-1*((pow(@2,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_20)\") \n self.modelBuilder.doVar(\"expr::g2V_21(\\\"\"+str(self.g2V[20])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T21(\\\"((pow(@2,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_21)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T21_Neg(\\\"-1*((pow(@2,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_21)\") \n self.modelBuilder.doVar(\"expr::g2V_22(\\\"\"+str(self.g2V[21])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T22(\\\"((@2*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_22)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T22_Neg(\\\"-1*((@2*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_22)\") \n self.modelBuilder.doVar(\"expr::g2V_23(\\\"\"+str(self.g2V[22])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T23(\\\"((@0*@1*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_23)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T23_Neg(\\\"-1*((@0*@1*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_23)\") \n self.modelBuilder.doVar(\"expr::g2V_24(\\\"\"+str(self.g2V[23])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T24(\\\"((@0*pow(@1,2)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_24)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T24_Neg(\\\"-1*((@0*pow(@1,2)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_24)\") \n self.modelBuilder.doVar(\"expr::g2V_25(\\\"\"+str(self.g2V[24])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T25(\\\"((pow(@0,2)*@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_25)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T25_Neg(\\\"-1*((pow(@0,2)*@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_25)\") \n self.modelBuilder.doVar(\"expr::g2V_26(\\\"\"+str(self.g2V[25])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T26(\\\"((@0*@1*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_26)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T26_Neg(\\\"-1*((@0*@1*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_26)\") \n self.modelBuilder.doVar(\"expr::g2V_27(\\\"\"+str(self.g2V[26])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T27(\\\"((@0*pow(@1,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_27)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T27_Neg(\\\"-1*((@0*pow(@1,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_27)\") \n self.modelBuilder.doVar(\"expr::g2V_28(\\\"\"+str(self.g2V[27])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T28(\\\"((pow(@0,2)*@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_28)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T28_Neg(\\\"-1*((pow(@0,2)*@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_28)\") \n self.modelBuilder.doVar(\"expr::g2V_29(\\\"\"+str(self.g2V[28])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T29(\\\"((@0*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_29)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T29_Neg(\\\"-1*((@0*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_29)\") \n self.modelBuilder.doVar(\"expr::g2V_30(\\\"\"+str(self.g2V[29])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T30(\\\"((@0*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_30)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T30_Neg(\\\"-1*((@0*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_30)\") \n self.modelBuilder.doVar(\"expr::g2V_31(\\\"\"+str(self.g2V[30])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T31(\\\"((pow(@0,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_31)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T31_Neg(\\\"-1*((pow(@0,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_31)\") \n self.modelBuilder.doVar(\"expr::g2V_32(\\\"\"+str(self.g2V[31])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T32(\\\"((@1*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_32)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T32_Neg(\\\"-1*((@1*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_32)\") \n self.modelBuilder.doVar(\"expr::g2V_33(\\\"\"+str(self.g2V[32])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T33(\\\"((@1*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_33)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T33_Neg(\\\"-1*((@1*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_33)\") \n self.modelBuilder.doVar(\"expr::g2V_34(\\\"\"+str(self.g2V[33])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T34(\\\"((pow(@1,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_34)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T34_Neg(\\\"-1*((pow(@1,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_34)\") \n self.modelBuilder.doVar(\"expr::g2V_35(\\\"\"+str(self.g2V[34])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T35(\\\"((@0*@1*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_35)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T35_Neg(\\\"-1*((@0*@1*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_35)\") \n \n self.modelBuilder.doVar(\"expr::g1V_1(\\\"\"+str(self.g1V[0])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T1(\\\"((pow(@0,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_1)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T1_Neg(\\\"-1*((pow(@0,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_1)\") \n self.modelBuilder.doVar(\"expr::g1V_2(\\\"\"+str(self.g1V[1])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T2(\\\"((@0*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_2)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T2_Neg(\\\"-1*((@0*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_2)\") \n self.modelBuilder.doVar(\"expr::g1V_3(\\\"\"+str(self.g1V[2])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T3(\\\"((pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_3)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T3_Neg(\\\"-1*((pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_3)\") \n self.modelBuilder.doVar(\"expr::g1V_4(\\\"\"+str(self.g1V[3])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T4(\\\"((@0*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_4)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T4_Neg(\\\"-1*((@0*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_4)\") \n self.modelBuilder.doVar(\"expr::g1V_5(\\\"\"+str(self.g1V[4])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T5(\\\"((pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_5)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T5_Neg(\\\"-1*((pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_5)\") \n self.modelBuilder.doVar(\"expr::g1V_6(\\\"\"+str(self.g1V[5])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T6(\\\"((@0*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_6)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T6_Neg(\\\"-1*((@0*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_6)\") \n self.modelBuilder.doVar(\"expr::g1V_7(\\\"\"+str(self.g1V[6])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T7(\\\"((pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_7)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T7_Neg(\\\"-1*((pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_7)\") \n self.modelBuilder.doVar(\"expr::g1V_8(\\\"\"+str(self.g1V[7])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T8(\\\"((@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_8)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T8_Neg(\\\"-1*((@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_8)\") \n self.modelBuilder.doVar(\"expr::g1V_9(\\\"\"+str(self.g1V[8])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T9(\\\"((@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_9)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T9_Neg(\\\"-1*((@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_9)\") \n self.modelBuilder.doVar(\"expr::g1V_10(\\\"\"+str(self.g1V[9])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T10(\\\"((@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_10)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T10_Neg(\\\"-1*((@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_10)\") \n \n self.modelBuilder.doSet(\"POI\",poi)",
"def _fp_setup2(self):\n # TODO: right now it's hard to implement this required stage",
"def __init__(self, flagTrackShape = 0):\n\n \"\"\" Nos interesa que el planner tenga una pista algo mas reducida de la real\n para conservar algo de robustez y no salirnos de la pista en el primer segundo. \"\"\"\n \n ### is HW is the half width of vehicle dimension + some saftey factor?\n ### what is slack??\n \n # HW = rospy.get_param(\"halfWidth\")+0.1\n HW = 0.4\n # print (\"HW\",HW)\n # if flagTrackShape == 0:\n # selectedTrack = rospy.get_param(\"trackShape\") # comentado para el testeo del planner\n # # selectedTrack = \"L_shape\"\n # else:\n # selectedTrack = \"oval\"\n\n selectedTrack = \"L_shape\"\n print (\"track selected\",selectedTrack)\n if selectedTrack == \"3110\":\n self.halfWidth = 0.6\n self.slack = 0.15\n spec = np.array([[60 * 0.03, 0],\n [80 * 0.03, +80 * 0.03 * 2 / np.pi],\n [20 * 0.03, 0],\n [80 * 0.03, +80 * 0.03 * 2 / np.pi],\n [40 * 0.03, -40 * 0.03 * 10 / np.pi],\n [60 * 0.03, +60 * 0.03 * 5 / np.pi],\n [40 * 0.03, -40 * 0.03 * 10 / np.pi],\n [80 * 0.03, +80 * 0.03 * 2 / np.pi],\n [20 * 0.03, 0],\n [80 * 0.03, +80 * 0.03 * 2 / np.pi],\n [80 * 0.03, 0]])\n\n elif selectedTrack == \"oval\":\n self.halfWidth = HW\n self.slack = 0.15\n spec = np.array([[1.0, 0],\n [4.5, 4.5 / np.pi],\n [2.0, 0],\n [4.5, 4.5 / np.pi],\n [1.0, 0]])\n\n # elif selectedTrack == \"L_shape\":\n # self.halfWidth = HW\n # self.slack = 0.01\n # lengthCurve = 4.5\n # spec = np.array([[1.0, 0],\n # [lengthCurve, lengthCurve / np.pi],\n # # Note s = 1 * np.pi / 2 and r = -1 ---> Angle spanned = np.pi / 2\n # [lengthCurve/2,-lengthCurve / np.pi ],\n # [lengthCurve, lengthCurve / np.pi],\n # [lengthCurve / np.pi *2, 0],\n # [lengthCurve/2, lengthCurve / np.pi]])\n\n elif selectedTrack == \"L_shape_n\":\n self.halfWidth = HW\n self.slack = 0.01\n lengthCurve = 4.5\n spec = np.array([[1.0, 0],\n [lengthCurve, lengthCurve / np.pi],\n [lengthCurve/2,-lengthCurve / np.pi ],\n [lengthCurve, lengthCurve / np.pi],\n [lengthCurve / np.pi *2, 0],\n [lengthCurve/2, lengthCurve / np.pi]])\n\n elif selectedTrack == \"L_shape_IDIADA\":\n self.halfWidth = HW\n self.slack = 6*0.45\n lengthCurve = 10*4.5\n spec = np.array([[1.0, 0],\n [lengthCurve, lengthCurve / np.pi],\n # Note s = 1 * np.pi / 2 and r = -1 ---> Angle spanned = np.pi / 2\n [lengthCurve/2,-lengthCurve / np.pi ],\n [lengthCurve, lengthCurve / np.pi],\n [lengthCurve / np.pi *2, 0],\n [lengthCurve/2, lengthCurve / np.pi]])\n\n elif selectedTrack == \"L_shape\":\n # elif selectedTrack == \"SLAM_shape1\":\n self.halfWidth = 0.4\n self.slack = 0.01\n lengthCurve = 1.5*(np.pi/2)\n spec = np.array([[2.5,0],\n [2*lengthCurve,(lengthCurve*2)/np.pi],\n [lengthCurve,-(lengthCurve*2) / np.pi],\n [1.0,0],\n [lengthCurve,lengthCurve*2/np.pi],\n [2.0,0],\n [lengthCurve,(lengthCurve*2)/np.pi],\n [4.0,0],\n [lengthCurve,(lengthCurve*2)/np.pi],\n [2.6,0]])\n\n\n elif selectedTrack == \"8_track\":\n self.halfWidth = 0.4\n self.slack = 0.15\n lengthCurve = 1.5*(np.pi/2)\n spec = np.array([[0.5,0],\n [lengthCurve,(lengthCurve*2)/np.pi],\n [1.0,0],\n [lengthCurve,-(lengthCurve*2) / np.pi],\n [lengthCurve,lengthCurve*2/np.pi],\n [lengthCurve,lengthCurve*2/np.pi],\n [1.0,0],\n [lengthCurve,(lengthCurve*2)/np.pi],\n [lengthCurve,-(lengthCurve*2)/np.pi],\n [lengthCurve,(lengthCurve*2)/np.pi],\n [1.0,0],\n [lengthCurve,lengthCurve*2/np.pi]])\n\n\n\n # Now given the above segments we compute the (x, y) points of the track and the angle of the tangent vector (psi) at\n # these points. For each segment we compute the (x, y, psi) coordinate at the last point of the segment. Furthermore,\n # we compute also the cumulative s at the starting point of the segment at signed curvature\n # PointAndTangent = [x, y, psi, cumulative s, segment length, signed curvature]\n\n ### what is cumulative s and signed curvature.?\n\n PointAndTangent = np.zeros((spec.shape[0] + 1, 6))\n for i in range(0, spec.shape[0]):\n if spec[i, 1] == 0.0: # If the current segment is a straight line\n l = spec[i, 0] # Length of the segments\n if i == 0:\n ang = 0 # Angle of the tangent vector at the starting point of the segment\n x = 0 + l * np.cos(ang) # x coordinate of the last point of the segment\n y = 0 + l * np.sin(ang) # y coordinate of the last point of the segment\n else:\n ang = PointAndTangent[i - 1, 2] # Angle of the tangent vector at the starting point of the segment\n x = PointAndTangent[i-1, 0] + l * np.cos(ang) # x coordinate of the last point of the segment\n y = PointAndTangent[i-1, 1] + l * np.sin(ang) # y coordinate of the last point of the segment\n psi = ang # Angle of the tangent vector at the last point of the segment\n\n # # With the above information create the new line\n # if i == 0:\n # NewLine = np.array([x, y, psi, PointAndTangent[i, 3], l, 0])\n # else:\n # NewLine = np.array([x, y, psi, PointAndTangent[i, 3] + PointAndTangent[i, 4], l, 0])\n #\n # PointAndTangent[i + 1, :] = NewLine # Write the new info\n\n if i == 0:\n NewLine = np.array([x, y, psi, PointAndTangent[i, 3], l, 0])\n else:\n NewLine = np.array([x, y, psi, PointAndTangent[i-1, 3] + PointAndTangent[i-1, 4], l, 0])\n\n PointAndTangent[i, :] = NewLine # Write the new info\n else:\n l = spec[i, 0] # Length of the segment\n r = spec[i, 1] # Radius of curvature\n\n\n if r >= 0:\n direction = 1\n else:\n direction = -1\n\n if i == 0:\n ang = 0 # Angle of the tangent vector at the\n # starting point of the segment\n CenterX = 0 \\\n + np.abs(r) * np.cos(ang + direction * np.pi / 2) # x coordinate center of circle\n CenterY = 0 \\\n + np.abs(r) * np.sin(ang + direction * np.pi / 2) # y coordinate center of circle\n else:\n ang = PointAndTangent[i - 1, 2] # Angle of the tangent vector at the\n # starting point of the segment\n CenterX = PointAndTangent[i-1, 0] \\\n + np.abs(r) * np.cos(ang + direction * np.pi / 2) # x coordinate center of circle\n CenterY = PointAndTangent[i-1, 1] \\\n + np.abs(r) * np.sin(ang + direction * np.pi / 2) # y coordinate center of circle\n\n spanAng = l / np.abs(r) # Angle spanned by the circle\n psi = wrap(ang + spanAng * np.sign(r)) # Angle of the tangent vector at the last point of the segment\n\n angleNormal = wrap((direction * np.pi / 2 + ang))\n angle = -(np.pi - np.abs(angleNormal)) * (sign(angleNormal))\n x = CenterX + np.abs(r) * np.cos(\n angle + direction * spanAng) # x coordinate of the last point of the segment\n y = CenterY + np.abs(r) * np.sin(\n angle + direction * spanAng) # y coordinate of the last point of the segment\n\n # With the above information create the new line\n # plt.plot(CenterX, CenterY, 'bo')\n # plt.plot(x, y, 'ro')\n\n # if i == 0:\n # NewLine = np.array([x, y, psi, PointAndTangent[i, 3], l, 1 / r])\n # else:\n # NewLine = np.array([x, y, psi, PointAndTangent[i, 3] + PointAndTangent[i, 4], l, 1 / r])\n #\n # PointAndTangent[i + 1, :] = NewLine # Write the new info\n\n if i == 0:\n NewLine = np.array([x, y, psi, PointAndTangent[i, 3], l, 1 / r])\n else:\n NewLine = np.array([x, y, psi, PointAndTangent[i-1, 3] + PointAndTangent[i-1, 4], l, 1 / r])\n\n PointAndTangent[i, :] = NewLine # Write the new info\n # plt.plot(x, y, 'or')\n\n # Now update info on last point\n # xs = PointAndTangent[PointAndTangent.shape[0] - 2, 0]\n # ys = PointAndTangent[PointAndTangent.shape[0] - 2, 1]\n # xf = PointAndTangent[0, 0]\n # yf = PointAndTangent[0, 1]\n # psif = PointAndTangent[PointAndTangent.shape[0] - 2, 2]\n #\n # # plt.plot(xf, yf, 'or')\n # # plt.show()\n # l = np.sqrt((xf - xs) ** 2 + (yf - ys) ** 2)\n #\n # NewLine = np.array([xf, yf, psif, PointAndTangent[PointAndTangent.shape[0] - 2, 3] + PointAndTangent[\n # PointAndTangent.shape[0] - 2, 4], l, 0])\n # PointAndTangent[-1, :] = NewLine\n\n\n xs = PointAndTangent[-2, 0]\n ys = PointAndTangent[-2, 1]\n xf = 0\n yf = 0\n psif = 0\n\n # plt.plot(xf, yf, 'or')\n # plt.show()\n l = np.sqrt((xf - xs) ** 2 + (yf - ys) ** 2)\n\n NewLine = np.array([xf, yf, psif, PointAndTangent[-2, 3] + PointAndTangent[-2, 4], l, 0])\n PointAndTangent[-1, :] = NewLine\n\n self.PointAndTangent = PointAndTangent\n self.TrackLength = PointAndTangent[-1, 3] + PointAndTangent[-1, 4]",
"def __init__(self,cosmology, mass_function, halo_physics, kh_vector, mass_bins, volume, kh_min=0, pt_type = 'EFT', pade_resum = True, smooth_density = True, IR_resum = True, npoints = 1000, verb=False):\n\n # Write attributes, if they're of the correct type\n if isinstance(cosmology, Cosmology):\n self.cosmology = cosmology\n else:\n raise TypeError('cosmology input must be an instance of the Cosmology class!')\n if isinstance(mass_function, MassFunction):\n self.mass_function = mass_function\n else:\n raise TypeError('mass_function input must be an instance of the MassFunction class!')\n if isinstance(halo_physics, HaloPhysics):\n self.halo_physics = halo_physics\n else:\n raise TypeError('halo_physics input must be an instance of the HaloPhysics class!')\n\n # Write useful attributes\n self.kh_vector = kh_vector\n self.kh_min = kh_min\n self.mass_bins = mass_bins\n self.N_bins = len(mass_bins)-1\n self.N_k = len(self.kh_vector)\n self.volume = volume\n self.verb = verb\n self.pt_type = pt_type\n self.pade_resum = pade_resum\n self.smooth_density = smooth_density\n self.IR_resum = IR_resum\n self.npoints = npoints\n\n # Generate a power spectrum class with this k-vector\n self.halo_model = HaloModel(cosmology, mass_function, halo_physics, kh_vector, kh_min,verb=self.verb)\n\n # Copy in the MassIntegrals class\n self.mass_integrals = self.halo_model.mass_integrals\n\n if self.cosmology.use_neutrinos:\n if self.verb:\n print(\"Note: massive neutrinos are not implemented in full, so we assume CDM+baryon power spectra here.\")\n print(\"(This will creates only a (subdominant) percent-level error for typical neutrino masses.)\")\n\n # Run some checks\n assert self.mass_bins[0]>=np.power(10.,self.mass_integrals.min_logM_h), 'Minimum bin must be above MassIntegral limit!'\n assert self.mass_bins[-1]<=np.power(10.,self.mass_integrals.max_logM_h), 'Maximum bin must be below MassIntegral limit!'\n\n # Compute linear power for the k-vector\n self.linear_power = self.cosmology.compute_linear_power(self.kh_vector,self.kh_min).copy()",
"def _basic_setup(self):\n\n if not self.label.isalnum():\n raise ValueError(\n f\"Label '{self.label}' is not alphanumeric,\"\n \" which is incompatible with the SFTv3 naming specification\"\n \" ( https://dcc.ligo.org/T040164-v2/public ).\"\n \" Please avoid underscores, hyphens etc.\"\n )\n if len(self.label) > 60:\n raise ValueError(\n f\"Label {self.label} is too long to comply with SFT naming rules\"\n f\" ({len(self.label)}>60).\"\n )\n\n os.makedirs(self.outdir, exist_ok=True)\n self.config_file_name = os.path.join(self.outdir, self.label + \".cff\")\n self.theta = np.array([self.phi, self.F0, self.F1, self.F2])\n\n if self.h0 and np.any(\n [getattr(self, k, None) is None for k in self.required_signal_parameters]\n ):\n raise ValueError(\n \"If h0>0, also need all of ({:s})\".format(\n \",\".join(self.required_signal_parameters)\n )\n )\n\n incompatible_with_TS = [\"tstart\", \"duration\", \"noiseSFTs\"]\n TS_required_options = [\"Tsft\"]\n no_noiseSFTs_options = [\"tstart\", \"duration\", \"Tsft\", \"detectors\"]\n\n if getattr(self, \"timestamps\", None) is not None:\n if np.any(\n [getattr(self, k, None) is not None for k in incompatible_with_TS]\n ):\n raise ValueError(\n \"timestamps option is incompatible with\"\n f\" ({','.join(incompatible_with_TS)}).\"\n )\n if np.any([getattr(self, k, None) is None for k in TS_required_options]):\n raise ValueError(\n \"With timestamps option, need also all of\"\n f\" ({','.join(TS_required_options)}).\"\n )\n self._get_setup_from_timestamps()\n elif self.noiseSFTs is not None:\n logger.info(\n \"noiseSFTs is not None: Inferring tstart, duration, Tsft. \"\n \"Input tstart and duration will be treated as SFT constraints \"\n \"using lalpulsar.SFTConstraints; Tsft will be checked for \"\n \"internal consistency accross input SFTs.\"\n )\n self._get_setup_from_noiseSFTs()\n elif np.any([getattr(self, k, None) is None for k in no_noiseSFTs_options]):\n raise ValueError(\n \"Need either noiseSFTs, timestamps or all of ({:s}).\".format(\n \",\".join(no_noiseSFTs_options)\n )\n )\n else:\n self._get_setup_from_tstart_duration()\n\n self.sftfilenames = [os.path.join(self.outdir, fn) for fn in self.sftfilenames]\n self.sftfilepath = \";\".join(self.sftfilenames)\n\n if self.tref is None:\n self.tref = self.tstart\n\n if getattr(self, \"SFTWindowBeta\", None):\n raise ValueError(\n \"Option 'SFTWindowBeta' is defunct, please use 'SFTWindowParam'.\"\n )\n if getattr(self, \"SFTWindowType\", None):\n try:\n lal.CheckNamedWindow(\n self.SFTWindowType, self.SFTWindowParam is not None\n )\n except RuntimeError:\n raise ValueError(\n \"XLAL error on checking SFT window options.\"\n f\" Likely either SFTWindowType={self.SFTWindowType} is not a recognised window name,\"\n \" or it requires also setting an SFTWindowParam.\"\n )",
"def __init__(self):\n\n # Filter parameters\n self.p_bp_filter = [2.0, 16.0, 2]\n self.s_bp_filter = [2.0, 12.0, 2]\n\n # Onset window parameters\n self.p_onset_win = [0.2, 1.0]\n self.s_onset_win = [0.2, 1.0]\n\n # Traveltime lookup table decimation factor\n self.decimate = [1, 1, 1]\n\n # Time step for continuous compute in detect\n self.time_step = 120.\n\n # Data sampling rate\n self.sampling_rate = 50\n\n # Centred onset function override -- None means it will be\n # automatically set in detect() and locate()\n self.onset_centred = None\n\n # Pick related parameters\n self.pick_threshold = 1.0\n self.picking_mode = \"Gaussian\"\n self.fraction_tt = 0.1\n\n # Marginal window\n self.marginal_window = 2.\n\n # Default pre-pad for compute\n self.pre_pad = None\n\n # Number of cores to perform detect/locate on\n self.n_cores = 1\n\n # Toggle whether to incrementally write .scanmseed in detect()\n self.continuous_scanmseed_write = False\n\n # Plotting toggles\n self.plot_event_summary = True\n self.plot_station_traces = False\n self.plot_coal_video = False\n\n # Saving toggles\n self.write_4d_coal_grid = False\n self.write_cut_waveforms = False\n self.cut_waveform_format = \"MSEED\"\n self.pre_cut = None\n self.post_cut = None\n\n # xy files for plotting\n self.xy_files = None",
"def __init__(self):\n\n self.Cp_air0 = config_earth.earth_properties['Cp_air0']\n self.Rsp_air = config_earth.earth_properties['Rsp_air']\n\n self.d = config_earth.balloon_properties['d']\n self.vol = math.pi*4/3*pow((self.d/2),3) #volume m^3\n self.surfArea = math.pi*self.d*self.d #m^2\n self.cs_area = math.pi*self.d*self.d/4.0 #m^2\n\n #self.emissEnv = config_earth.balloon_properties['emissEnv']\n self.areaDensityEnv = config_earth.balloon_properties['areaDensityEnv']\n self.mp = config_earth.balloon_properties['mp']\n self.mdot = 0\n self.massEnv = config_earth.balloon_properties['mEnv']\n self.Upsilon = config_earth.balloon_properties['Upsilon']\n\n self.vent = config_earth.simulation['vent']\n self.coord = config_earth.simulation['start_coord']\n self.t = config_earth.simulation['start_time']\n self.lat = math.radians(self.coord['lat'])\n self.Ls = self.t.timetuple().tm_yday\n self.min_alt = config_earth.simulation['min_alt']\n\n self.vm_coeff = .1 #virtual mass coefficient\n self.k = self.massEnv*config_earth.balloon_properties['cp'] #thermal mass coefficient\n\n self.dt = config_earth.dt",
"def __init__(self):\r\n self.label = \"Step 2: FEMA BFE\"\r\n self.description = \"This tool takes the FEMA area of interest clip output from Step 1 and converts the \" \\\r\n \"feature class into a base flood elevation raster dataset based on the attribute \" \\\r\n \"'STATIC_BFE'. This raster dataset is then reclassified to remove areas that do not \" \\\r\n \"undergo flooding. NOTE: FEMA BFE raster dataset that is created has a pixel size of 30.\"\r\n self.canRunInBackground = False",
"def __init__(self, encut, magmom, ldaul, Uparam, Jparam, name=\"DFTCL_settings\"):\n\n cl_settings = {\"ISPIN\": 2, \"MAGMOM\": magmom, \"SAXIS\": None, \"LSORBIT\": None, \"LNONCOLLINEAR\": None}\n dftu_settings = {\"LDAU\": \".TRUE.\", \"LDAUU\": Uparam, \"LDATYPE\": 2, \"LDAUL\": ldaul, \"LDAUJ\": Jparam , \"LMAXMIMX\": 4}\n InputParameters.__init__(self, name=name, magnetic_settings=cl_settings, hubbard_settings=dftu_settings)\n self.update_electronic_settings(\"encut\", encut)",
"def __init__(self, filePrefix=\"\", fileDirectory=\"\", parameterIdentifier=\"\", CLfitOrder=1,\n CDfitOrder=2, CmfitOrder=1, weightedFit=False, plotFit=False):\n\n # Airfoil instance with characteristics read from files\n self.airfoil = None\n\n # Polars read from file\n self.polars = []\n\n # Point pairs (Pr, AOA) as well as CL CD and Cm at each point\n self.points = []\n\n self.valuesCL = []\n self.valuesCD = []\n self.valuesCm = []\n\n # A list of Parameter values\n self.Prs = []\n\n # AOA, CL, CD, Cm at each Pr\n self.AOAs_Pr = []\n self.CLs_Pr = []\n self.CDs_Pr = []\n self.Cms_Pr = []\n\n # Fit characteristics\n self.CLfitOrder = CLfitOrder\n self.CDfitOrder = CDfitOrder\n self.CmfitOrder = CmfitOrder\n self.weightedFit = weightedFit\n self.plotFit = plotFit\n\n # Polyfit coeffs at each Pr w.r.t. angle\n self.CLfit_Pr = []\n self.CDfit_Pr = []\n self.Cmfit_Pr = []\n\n # Keep some handy flags\n self.importedPolars = False\n self.createdCPolyfitTables = False\n\n if filePrefix is not \"\" and fileDirectory is not \"\":\n print(\"\\nReading from xflr5 files...\")\n try:\n # Try getting polars from file\n self.xflr5AirplanePolarReader(filePrefix, fileDirectory, parameterIdentifier)\n self.importedPolars = True\n\n except:\n self.importedPolars = False\n print(\"Read unsuccessful!\")\n\n if self.importedPolars:\n print(\"Read successful!\")\n print(\"\\nCreating polynomial fits for coefficients...\")\n try:\n # Create lookup tables for CL, CD and Cm\n self.CreateCoefficientPolyfitTables()\n self.createdCPolyfitTables = True\n\n except:\n print(\"Fit unsuccessful!\")\n self.createdCPolyfitTables = False\n\n if self.createdCPolyfitTables:\n print(\"Fit successful!\")\n try:\n self.PlotPolyFit()\n\n except:\n print(\"Plot unsuccessful\")",
"def __init__(self):\n ProcessingUnit.__init__(self)\n print(\" [ START ] init - Metodo Simulator Reader\")\n\n self.isConfig = False\n self.basicHeaderObj = BasicHeader(LOCALTIME)\n self.systemHeaderObj = SystemHeader()\n self.radarControllerHeaderObj = RadarControllerHeader()\n self.processingHeaderObj = ProcessingHeader()\n self.profileIndex = 2**32-1\n self.dataOut = Voltage()\n #code0 = numpy.array([1,1,1,0,1,1,0,1,1,1,1,0,0,0,1,0,1,1,1,0,1,1,0,1,0,0,0,1,1,1,0,1])\n code0 = numpy.array([1,1,1,-1,1,1,-1,1,1,1,1,-1,-1,-1,1,-1,1,1,1,-1,1,1,-1,1,-1,-1,-1,1,1,1,-1,1])\n #code1 = numpy.array([1,1,1,0,1,1,0,1,1,1,1,0,0,0,1,0,0,0,0,1,0,0,1,0,1,1,1,0,0,0,1,0])\n code1 = numpy.array([1,1,1,-1,1,1,-1,1,1,1,1,-1,-1,-1,1,-1,-1,-1,-1,1,-1,-1,1,-1,1,1,1,-1,-1,-1,1,-1])\n #self.Dyn_snCode = numpy.array([code0,code1])\n self.Dyn_snCode = None",
"def __init__(self, path: str, rmf_path: str, arf_path: str, b_path: str,\n central_coord: Quantity, inn_rad: Quantity, out_rad: Quantity, obs_id: str, instrument: str,\n grouped: bool, min_counts: int, min_sn: float, over_sample: int, stdout_str: str,\n stderr_str: str, gen_cmd: str, region: bool = False, b_rmf_path: str = '', b_arf_path: str = ''):\n super().__init__(path, obs_id, instrument, stdout_str, stderr_str, gen_cmd)\n self._prod_type = \"spectrum\"\n\n if os.path.exists(rmf_path):\n self._rmf = rmf_path\n else:\n self._rmf = ''\n self._usable = False\n self._why_unusable.append(\"RMFPathDoesNotExist\")\n\n if os.path.exists(arf_path):\n self._arf = arf_path\n else:\n self._arf = ''\n self._usable = False\n self._why_unusable.append(\"ARFPathDoesNotExist\")\n\n if os.path.exists(b_path):\n self._back_spec = b_path\n else:\n self._back_spec = ''\n self._usable = False\n self._why_unusable.append(\"BackSpecPathDoesNotExist\")\n\n if b_rmf_path != '' and os.path.exists(b_rmf_path):\n self._back_rmf = b_rmf_path\n elif b_rmf_path == '':\n self._back_rmf = None\n else:\n self._back_rmf = ''\n self._usable = False\n self._why_unusable.append(\"BackRMFPathDoesNotExist\")\n\n if b_arf_path != '' and os.path.exists(b_arf_path):\n self._back_arf = b_arf_path\n elif b_arf_path == '':\n self._back_arf = None\n else:\n self._back_arf = ''\n self._usable = False\n self._why_unusable.append(\"BackARFPathDoesNotExist\")\n\n # Storing the central coordinate of this spectrum\n self._central_coord = central_coord\n\n # Storing the region information\n self._inner_rad = inn_rad\n self._outer_rad = out_rad\n # And also the shape of the region\n if self._inner_rad.isscalar:\n self._shape = 'circular'\n else:\n self._shape = 'elliptical'\n\n # If this spectrum has just been generated by XGA then we'll set the headers, otherwise its\n # too slow and must be avoided. I am assuming here that the gen_cmd will be \"\" if the object\n # hasn't just been generated - which is true of XGA's behaviour\n if gen_cmd != \"\":\n try:\n self._update_spec_headers(\"main\")\n self._update_spec_headers(\"back\")\n except OSError as err:\n self._usable = False\n self._why_unusable.append(\"FITSIOOSError\")\n\n self._exp = None\n self._plot_data = {}\n self._luminosities = {}\n self._count_rate = {}\n\n # This is specifically for fakeit runs (for cntrate - lum conversions) on the ARF/RMF\n # associated with this Spectrum\n self._conv_factors = {}\n\n # This set of properties describe the configuration of evselect/specgroup during generation\n self._grouped = grouped\n self._min_counts = min_counts\n self._min_sn = min_sn\n if self._grouped and self._min_counts is not None:\n self._grouped_on = 'counts'\n elif self._grouped and self._min_sn is not None:\n self._grouped_on = 'signal to noise'\n else:\n self._grouped_on = None\n\n # Not to do with grouping, but this states the level of oversampling requested from evselect\n self._over_sample = over_sample\n\n # This describes whether this spectrum was generated directly from a region present in a region file\n self._region = region\n\n # Here we generate the storage key for this object, its just convenient to do it in here\n # Sets up the extra part of the storage key name depending on if grouping is enabled\n if grouped and min_counts is not None:\n extra_name = \"_mincnt{}\".format(min_counts)\n elif grouped and min_sn is not None:\n extra_name = \"_minsn{}\".format(min_sn)\n else:\n extra_name = ''\n\n # And if it was oversampled during generation then we need to include that as well\n if over_sample is not None:\n extra_name += \"_ovsamp{ov}\".format(ov=over_sample)\n\n spec_storage_name = \"ra{ra}_dec{dec}_ri{ri}_ro{ro}_grp{gr}\"\n if not self._region and self.inner_rad.isscalar:\n spec_storage_name = spec_storage_name.format(ra=self.central_coord[0].value,\n dec=self.central_coord[1].value,\n ri=self._inner_rad.value, ro=self._outer_rad.value,\n gr=grouped)\n elif not self._region and not self._inner_rad.isscalar:\n inn_rad_str = 'and'.join(self._inner_rad.value.astype(str))\n out_rad_str = 'and'.join(self._outer_rad.value.astype(str))\n spec_storage_name = spec_storage_name.format(ra=self.central_coord[0].value,\n dec=self.central_coord[1].value, ri=inn_rad_str,\n ro=out_rad_str, gr=grouped)\n else:\n spec_storage_name = \"region_grp{gr}\".format(gr=grouped)\n\n spec_storage_name += extra_name\n # And we save the completed key to an attribute\n self._storage_key = spec_storage_name\n\n # This attribute is set via the property, ONLY if this spectrum is considered to be a member of a set\n # of annular spectra. It describes which position in the set this spectrum has\n self._ann_ident = None\n # This holds a unique random identifier for the set itself, and again will only be set from outside\n self._set_ident = None",
"def __init__(self, osi, fy, fu, e_mod, e_mod_sh, eps_sh, eps_ult, buck=None, cm_fatigue=None, mp_curve=None):\n self.osi = osi\n self.fy = float(fy)\n self.fu = float(fu)\n self.e_mod = float(e_mod)\n self.e_mod_sh = float(e_mod_sh)\n self.eps_sh = float(eps_sh)\n self.eps_ult = float(eps_ult)\n if buck is None:\n self.buck_pms = []\n else:\n self.buck_pms = [] # TODO:\n if cm_fatigue is None:\n self.cm_fatigue = None\n self.cm_params = []\n else:\n self.cm_fatigue = cm_fatigue\n self.cm_params = ['-CMFatigue', cm_fatigue['cf'], cm_fatigue['alpha'], cm_fatigue['cd']]\n if mp_curve is None:\n self.mp_curve = None\n self.mp_params = []\n else:\n self.mp_curve = mp_curve\n r1 = self.mp_curve.setdefault('r1', 0.333)\n r2 = self.mp_curve.setdefault('r2', 18)\n r3 = self.mp_curve.setdefault('r3', 4)\n self.mp_params = ['-MPCurveParams', r1, r2, r3]\n\n if osi is not None:\n osi.n_mat += 1\n self._tag = osi.n_mat\n self._parameters = [self.op_type, self._tag, self.fy, self.fu, self.e_mod, self.e_mod_sh, self.eps_sh,\n self.eps_ult, *self.buck_pms, *self.cm_params, *self.mp_params]\n if osi is None:\n self.built = 0\n if osi is not None:\n self.to_process(osi)",
"def __init__(self):\r\n\r\n #480p 2.39:1 720x302\r\n #2048x2048 is more than 7.3GB of vRAM for the Master DISC model\r\n\r\n #Loading the preprocessed data\r\n preprocessVars = Preprocess()\r\n\r\n #The training and display of the trained models\r\n self.modelTrain = train.Train(preprocessVars)\r\n self.disp = display.Display(preprocessVars)",
"def __init__(self, runway_type):\n self.primary_surface_length = 200\n self.primary_surface_width = 0\n self.approach_surface_extendedwidth = 0\n self.first_section_length = 0\n self.first_section_slope = 0\n self.second_section_length = 0\n self.second_section_slope = 0\n self.horizontal_surface_height = 150\n self.horizontal_surface_radius = 0\n self.conical_surface_slope = 20\n self.conical_surface_offset = 4000\n self.transitional_surface_slope = 7\n \n # The runway types listed in the documentation for FAA FAR 77 do not \n # match what appears when you actually run the tool in ArcMap.\n # These regular expressions should match either version. \n if re.match(\"Visual\\s*(?:Runway)?\\s*Visual\\sApproach\", runway_type, re.I):\n self.primary_surface_width = 500\n self.approach_surface_extendedwidth = 1500\n self.first_section_length = 5000\n self.first_section_slope = 20\n self.horizontal_surface_radius = 5000\n elif re.match(\"Utility\\s*(?:Runway)?\\s*Visual Approach\", runway_type, re.I):\n self.primary_surface_width = 250\n self.approach_surface_extendedwidth = 1250\n self.first_section_length = 5000\n self.first_section_slope = 20\n self.horizontal_surface_radius = 5000\n elif re.match(\"Utility\\s*(?:Runway)?\\s*Non[\\s\\-]*Precision Instrument Approach\", runway_type, re.I):\n self.primary_surface_width = 500\n self.approach_surface_extendedwidth = 2000\n self.first_section_length = 5000\n self.first_section_slope = 20\n self.horizontal_surface_radius = 5000\n elif re.match(\"Precision Instrument\\s*(?:Runway)?\", runway_type, re.I):\n self.primary_surface_width = 1000\n self.approach_surface_extendedwidth = 16000\n self.first_section_length = 10000\n self.first_section_slope = 50\n self.second_section_length = 40000\n self.second_section_slope = 40\n self.horizontal_surface_radius = 10000\n elif re.match(\"Non Precision Instrument\\s*(?:Runway)?\\s*(?:(?:High)|(?:Greater)) Visibility\", runway_type, re.I):\n self.primary_surface_width = 500\n self.approach_surface_extendedwidth = 3500\n self.first_section_length = 10000\n self.first_section_slope = 34\n self.horizontal_surface_radius = 10000\n elif re.match(\"Non Precision Instrument\\s*(?:Runway)\\s*Approach Low Visibility\", runway_type, re.I):\n self.primary_surface_width = 1000\n self.approach_surface_extendedwidth = 4000\n self.first_section_length = 10000\n self.first_section_slope = 34\n self.horizontal_surface_radius = 10000",
"def __init__(self,l,options):\n #### Setup options\n self.options = options\n # For execution\n self.shots = 1000 if options.get('shots') == None\\\n else options.get('shots')\n self.seed = options.get('seed')\n if self.seed != None:\n from qiskit.aqua import aqua_globals\n aqua_globals.random_seed = self.seed\n self.prnt = options.get('print')\n self.ancilla_measure = options.get('ancilla') if options.get('ancilla') != None else False\n\n self.ibmq = False\n if options.get('ibmq') == True:\n print('Running on real quantum computer')\n self.ibmq = True\n self.backend = options['backend']\n from qiskit.tools.monitor import job_monitor\n self.monitor = job_monitor\n from attributes import get_measurement_fitter\n self.meas_fitter = get_measurement_fitter(l,\n self.backend,\n None,\n self.shots)\n \n else:\n # For Backend\n if options.get('backend') == None:\n self.options['backend'] = 'qasm_simulator' \n self.backend = qk.Aer.get_backend(options['backend'])\n # For noise model, coupling map and basis gates\n self.noise_model, self.coupling_map, self.basis_gates = None,None,None\n self.meas_fitter = None\n if options.get('device') != None:\n device = QuantumComputer(options.get('device'))\n if options.get('noise_model') != None:\n self.noise_model = device.noise_model\n # Create error mitigation fitter\n if options.get('meas_fit') in [None,True]:\n from attributes import get_measurement_fitter\n self.meas_fitter = get_measurement_fitter(l,\n self.backend,\n device,\n self.shots)\n if options.get('coupling_map') != None:\n self.coupling_map = device.coupling_map\n if options.get('basis_gates') != None:\n self.basis_gates = device.basis_gates\n # Qubit layout, virtual to physical\n self.layout = options.get('layout')\n # Optimization level\n self.optimization_level= 1 if options.get('optimization_level')==None else options['optimization_level']\n\n # GPU accelerated\n if options.get('gpu'):\n from qiskit_qcgpu_provider import QCGPUProvider\n Provider = QCGPUProvider()\n self.backend = Provider.get_backend(options['backend'])",
"def __init__(self, filt,\n objname=\"obj\",\n src=\"A0V\",\n chooseholes=None,\n affine2d=None,\n bandpass=None,\n **kwargs):\n self.chooseholes = chooseholes\n self.objname = objname\n self.filt = filt\n\n # 12 waves in f430 - data analysis:\n self.lam_bin = {\"F277W\": 50, \"F380M\": 20, \"F430M\": 40, \"F480M\": 30}\n\n # use 150 for 3 waves ax f430m; nominal values\n self.lam_c = {\"F277W\": 2.77e-6, # central wavelength (SI)\n \"F380M\": 3.8e-6,\n \"F430M\": 4.28521033106325E-06,\n \"F480M\": 4.8e-6}\n self.lam_w = {\"F277W\": 0.2, \"F380M\": 0.1, \"F430M\": 0.0436, \"F480M\": 0.08} # fractional filter width\n\n self.throughput = utils.tophatfilter(self.lam_c[self.filt], self.lam_w[self.filt], npoints=11)\n\n # update nominal filter parameters with those of the filter read in and used in the analysis...\n # Weighted mean wavelength in meters, etc, etc \"central wavelength\" for the filter:\n from scipy.integrate import simps\n\n thru_st = np.stack(self.throughput, axis=1)\n thru_st_0 = thru_st[0, :]\n thru_st_1 = thru_st[1, :]\n\n num = (thru_st_0 * thru_st_1).sum()\n den = thru_st[0, :].sum()\n self.lam_c[self.filt] = num / den\n\n area = simps(thru_st_0, thru_st_1)\n ew = area / thru_st_0.max() # equivalent width\n\n beta = ew / self.lam_c[self.filt] # fractional bandpass\n self.lam_w[self.filt] = beta\n\n if bandpass is not None:\n bandpass = np.array(bandpass) # type simplification\n wt = bandpass[:, 0]\n wl = bandpass[:, 1]\n cw = (wl * wt).sum() / wt.sum() # Weighted mean wavelength in meters \"central wavelength\"\n area = simps(wt, wl)\n ew = area / wt.max() # equivalent width\n beta = ew / cw # fractional bandpass\n self.lam_c = {\"F277W\": cw, \"F380M\": cw, \"F430M\": cw, \"F480M\": cw, }\n self.lam_w = {\"F277W\": beta, \"F380M\": beta, \"F430M\": beta, \"F480M\": beta}\n self.throughput = bandpass\n\n self.wls = [self.throughput, ]\n # Wavelength info for NIRISS bands F277W, F380M, F430M, or F480M\n self.wavextension = ([self.lam_c[self.filt], ], [self.lam_w[self.filt], ])\n self.nwav = 1\n\n # only one NRM on JWST:\n self.telname = \"NIRISS\"\n self.instrument = \"NIRISS\"\n self.arrname = \"jwst_g7s6c\"\n self.holeshape = \"hex\"\n self.mask = NRM_mask_definitions(maskname=self.arrname, chooseholes=chooseholes, holeshape=self.holeshape)\n # save affine deformation of pupil object or create a no-deformation object.\n # We apply this when sampling the PSF, not to the pupil geometry.\n # This will set a default Ideal or a measured rotation, for example,\n # and include pixel scale changes due to pupil distortion.\n # Separating detector tilt pixel scale effects from pupil distortion effects is\n # yet to be determined... see comments in Affine class definition.\n if affine2d is None:\n self.affine2d = utils.Affine2d(mx=1.0, my=1.0,\n sx=0.0, sy=0.0,\n xo=0.0, yo=0.0, name=\"Ideal\")\n else:\n self.affine2d = affine2d\n\n # finding centroid from phase slope only considered cv_phase data\n # when cv_abs data exceeds this cvsupport_threshold.\n # Absolute value of cv data normalized to unity maximum\n # for the threshold application.\n # Data reduction gurus: tweak the threshold value with experience...\n # Gurus: tweak cvsupport with use...\n self.cvsupport_threshold = {\"F277W\": 0.02, \"F380M\": 0.02, \"F430M\": 0.02, \"F480M\": 0.02}\n self.threshold = self.cvsupport_threshold[filt]",
"def __init__(self, osi, fy, fu, e_mod, e_mod_sh, eps_sh, eps_ult, lsr, alpha=1.0, cm_fatigue=None, mp_curve=None):\n self.osi = osi\n self.fy = float(fy)\n self.fu = float(fu)\n self.e_mod = float(e_mod)\n self.e_mod_sh = float(e_mod_sh)\n self.eps_sh = float(eps_sh)\n self.eps_ult = float(eps_ult)\n self.lsr = float(lsr)\n self.alpha = float(alpha)\n if cm_fatigue is None:\n self.cm_fatigue = None\n self.cm_params = []\n else:\n self.cm_fatigue = cm_fatigue\n self.cm_params = ['-CMFatigue', cm_fatigue['cf'], cm_fatigue['alpha'], cm_fatigue['cd']]\n if mp_curve is None:\n self.mp_curve = None\n self.mp_params = []\n else:\n self.mp_curve = mp_curve\n r1 = self.mp_curve.setdefault('r1', 0.333)\n r2 = self.mp_curve.setdefault('r2', 18)\n r3 = self.mp_curve.setdefault('r3', 4)\n self.mp_params = ['-MPCurveParams', r1, r2, r3]\n\n if osi is not None:\n osi.n_mat += 1\n self._tag = osi.n_mat\n self._parameters = [self.op_type, self._tag, self.fy, self.fu, self.e_mod, self.e_mod_sh, self.eps_sh, self.eps_ult, '-DMBuck', self.lsr, self.alpha, *self.cm_params, *self.mp_params]\n if osi is None:\n self.built = 0\n if osi is not None:\n self.to_process(osi)"
] | [
"0.65070575",
"0.6292009",
"0.62877405",
"0.60567147",
"0.6045674",
"0.6028957",
"0.60077965",
"0.59778076",
"0.59438014",
"0.5908957",
"0.58956885",
"0.5890363",
"0.5871577",
"0.5847138",
"0.583425",
"0.5827184",
"0.5803118",
"0.58006674",
"0.57921225",
"0.5785037",
"0.5782426",
"0.5781964",
"0.57684785",
"0.57615155",
"0.57488304",
"0.5737472",
"0.5736693",
"0.5735288",
"0.5732383",
"0.57310855"
] | 0.7589595 | 0 |
Converts RGB image to a dataframe where each row corresponds to single pixel. If sample_count is set then only random subset of rows is returned. | def image_to_colorspace(image: np.array, sample_count: int = None) -> DataFrame:
assert len(image.shape) == 3 and image.shape[2] == 3, "Image must be m x n x 3 dimensional RGB array"
if sample_count:
return DataFrame({'R':image[:,:,0].flatten(),'G':image[:,:, 1].flatten(),'B':image[:,:, 2].flatten()}).sample(n = sample_count, replace=False)
else:
return DataFrame({'R':image[:,:,0].flatten(),'G':image[:,:, 1].flatten(),'B':image[:,:, 2].flatten()}) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_sample_df(self, df, features, r):\n grouped = df.groupby('feature')\n df_sample = pd.DataFrame()\n for feature in features:\n group = grouped.get_group(feature)\n samples = group.sample(n=r)\n df_sample = df_sample.append(samples)\n return df_sample",
"def sample_rows(df, nrows):",
"def get_dummy_data(nrows=3, img_in_shape=(10, 10), img_out_shape=(10, 10)):\n # Choose from the following electrodes\n electrodes = ['A01', 'A2', 'A03', 'A3', 'A04', 'B01', 'B2']\n data = []\n for _ in range(nrows):\n img = np.random.rand(np.prod(img_in_shape)).reshape(img_in_shape)\n el = np.random.randint(len(electrodes))\n data.append({'electrode': electrodes[el], 'image': img,\n 'img_shape': img_out_shape})\n # Shuffle row indices\n idx = np.arange(nrows)\n np.random.shuffle(idx)\n # Create data frame\n X = pd.DataFrame(data, index=idx)\n return X",
"def random_sample_images(self, images, sample_size):\n\n #return sample(images, int(sample_size))\n return images.order_by('?')[:sample_size]",
"def make_original_dataframe(base_path=\"images\", sample_folders=20):\r\n img_dirs = os.listdir(base_path)\r\n\r\n if sample_folders > len(img_dirs):\r\n raise ValueError(\r\n f\"Wrong number of samples {sample_folders} for number of folders {len(img_dirs)}\")\r\n\r\n np.random.seed(42)\r\n\r\n images_folder = np.random.choice(img_dirs, sample_folders)\r\n\r\n images_path = []\r\n images_size = []\r\n\r\n for folder in images_folder:\r\n image_folder = os.path.join(base_path, folder)\r\n\r\n for img_name in os.listdir(image_folder):\r\n img_path = os.path.join(image_folder, img_name).replace(\"\\\\\", \"/\")\r\n images_path.append(img_path)\r\n img = Image.open(img_path)\r\n images_size.append(img.size)\r\n\r\n dataframe = pd.DataFrame(pd.Series(images_path).to_frame(\r\n \"path\")).join(pd.Series(images_size).to_frame(\"size\"))\r\n\r\n dataframe[[\"size_x\", \"size_y\"]] = pd.DataFrame(\r\n dataframe[\"size\"].tolist(), index=dataframe.index)\r\n dataframe.drop([\"size\"], axis=1, inplace=True)\r\n\r\n return dataframe[(dataframe[\"size_x\"] > 100) & (dataframe[\"size_y\"] > 100)]",
"def sample(self, num_rows=1):\n raise NotImplementedError",
"def sample_images(images, n=1000):\n ix = np.random.choice(np.arange(len(images)), size=n, replace=False)\n sample = labels.loc[ix, [\"img_name\", \"breed\"]]\n assert len(sample) == n\n return sample",
"def rvs(\n self,\n n_samples=1,\n random_state: Union[\n int, np.random.RandomState, np.random.Generator, None\n ] = None,\n ):\n\n rng = get_random_generator(random_state)\n\n columns = []\n\n for dim in self.dimensions:\n index_array = rng.uniform(size=n_samples)\n columns.append(dim.sample(index_array))\n\n # Transpose\n rows = []\n for i in range(n_samples):\n r = []\n for j in range(self.n_dims):\n r.append(columns[j][i])\n\n rows.append(r)\n\n return rows",
"def sample_data_grid8(num_samples=0):\n db, imsize = utils.load_grid8()\n\n return sample_data(db, imsize, num_samples)",
"def random_sample(df, batch_size):\n sample = df.sample(n=batch_size)\n #print(sample)\n video_ids = list(sample.video_id.values.astype(str))\n labels = list(sample.label.values)\n\n return video_ids, labels",
"def _color_sample(img: np.ndarray, p: float = 0.05) -> np.ndarray:\n # combine the X and Y dimension into one, only keep the channels dimension\n ravelled = img.reshape(-1, 3)\n # for 5%, take every 20th value, for 10% every 10th, etc...\n every_nth = int(1 / p)\n return ravelled[::every_nth, :]",
"def get_dataframe_sample(dataframe: pd.DataFrame, fraction: float) -> pd.DataFrame:\n df = dataframe.sample(frac = fraction)\n return df.reset_index(drop=True)",
"def sample(num_dims, num_samples):\n samples = np.random.rand(num_samples, num_dims)\n ### TODO: Update with a uniform sampling plan to fill space\n return samples",
"def sample(self, sample_shape=torch.Size()):\n with torch.no_grad():\n return self.rsample(sample_shape=sample_shape)",
"def sample(self):\n sampleIndices = self.random_state.choice(len(self.X), int(len(self.X)*self.sample_ratio), replace=False)\n\n return self.X[sampleIndices]\n pass",
"def random_dataframe(pool, shape, columns):\n return pd.DataFrame(\n np.random.randint(pool[0], pool[1], size=shape), columns=columns\n )",
"def random_grid(self, width, height, count, units='native'):\n\n if not self.src:\n raise RuntimeError('source not set or failed to open')\n\n if units == 'pixels':\n dims = width * self.src.res[0], height * self.src.res[1]\n elif units == 'native':\n dims = width, height\n else:\n raise ValueError('units must be \"native\" or \"pixels\"')\n\n gdf = grid.random_grid(*self.src.bounds, *dims, count)\n gdf.crs = self.src.crs\n return gdf",
"def random_sample(\n df: DataFrame,\n shape: tuple, \n name: str,\n n_samples: int,\n frames: int =30, \n box: int =60,\n id_col: str ='ID', \n time_col: str ='t', \n array_order: Iterable[str] =('t', 'x', 'y', 'z'), \n scale: Iterable[int] =(1, 1, 1, 4), \n non_tzyx_col: Union[Iterable[str], str, None] = None,\n seed: Union[int, None] =None,\n weights: Union[str, None] =None,\n max_lost_prop: Union[float, None] =None,\n **kwargs\n ):\n #array = single_zarr(image_path)\n #shape = array.shape\n _frames = np.floor_divide(frames, 2)\n _box = np.floor_divide(box, 2)\n # this is the image shape scaled to the data\n scaled_shape = [s * scale[i] # scaled shape in image order\n for i, s in enumerate(shape)]\n # curried function. Add important info for later\n _add_info = _add_sample_info(\n id_col, \n time_col,\n _frames, \n max_lost_prop)\n if seed is None:\n seed = np.random.randint(0, 100_000)\n # initalise the sample dict \n # Why? The function is recursive (if max_lost_prop < 1.0)\n # initialising within isnt an option.\n # and mutable defaults are just silly\n sample = {}\n sample = _sample(\n sample, \n df, \n n_samples, \n seed, \n array_order, \n shape, \n name, \n weights, \n _add_info\n )\n # sample scale is the scale that brings the data to the image\n sample = _estimate_bounding_boxes(\n sample, \n shape,\n id_col, \n time_col, \n _frames, \n _box,\n array_order, \n non_tzyx_col, \n scale\n )\n sample['info'] = _tracks_df(df, sample, id_col, time_col, array_order)\n return sample",
"def get_sample(df,n):\n idxs = sorted(np.random.permutation(len(df))[:n])\n return df.iloc[idxs].copy()",
"def test_sample_random_state(self):\n # Setup\n instance = GaussianMultivariate(GaussianUnivariate, random_seed=0)\n data = pd.DataFrame([\n {'A': 25, 'B': 75, 'C': 100},\n {'A': 30, 'B': 60, 'C': 250},\n {'A': 10, 'B': 65, 'C': 350},\n {'A': 20, 'B': 80, 'C': 150},\n {'A': 25, 'B': 70, 'C': 500}\n ])\n instance.fit(data)\n\n expected_result = pd.DataFrame(\n np.array([\n [25.19031668, 61.96527251, 543.43595269],\n [31.50262306, 49.70971698, 429.06537124],\n [20.31636799, 64.3492326, 384.27561823],\n [25.00302427, 72.06019812, 415.85215123],\n [23.07525773, 66.70901743, 390.8226672]\n ]),\n columns=['A', 'B', 'C']\n )\n\n # Run\n result = instance.sample(5)\n\n # Check\n pd.testing.assert_frame_equal(result, expected_result, check_less_precise=True)",
"def sample(self, count):\n batch = deepcopy(random.sample(self.buffer, count))\n batch = [np.array(arr) for arr in zip(*batch)]\n\n return batch",
"def uniform_subsampling(dataframe, num_point, random_seed=0):\n if not isinstance(dataframe, pd.core.frame.DataFrame):\n raise NotImplementedError()\n\n df = dataframe.sample(n=num_point, random_state=random_seed).reset_index(drop=True)\n\n return df",
"def sample(self, sample_size):\n m = self.a.shape[0]\n ss = min(m, sample_size)\n indices = np.arange(m)\n random_indices = _random_batch(indices, ss)\n return _build_prediction_Report(self.name, self.x, self.y, self.a, random_indices)",
"def jpg_to_df(image):\n width, height = image.size\n df = pd.DataFrame(np.vstack(np.array(image)[:,:,:3]), columns=('R', 'G', 'B'))\n\n # Associate the height and width for each pixel. \n df['height'] = np.repeat(np.arange(height), width)\n df['width'] = np.tile(np.arange(width), height)\n return df",
"def sample_data(db, imsize, num_samples=0):\n # load data\n im_data = db['im_data']\n value_data = db['value_data']\n states = db['state_xy_data']\n label_data = db['label_data']\n\n # created a sampler\n grid_sampler = GridDataSampler(im_data, value_data, imsize,\n states, label_data)\n print (\"[MESSAGE] Create a sampler\")\n\n data_collector = []\n value_collector = []\n start_pos_collector = []\n pos_traj_collector = []\n goal_pos_collector = []\n\n idx = 0\n while grid_sampler.grid_available and idx < num_samples:\n grid, value, start_pos_list, pos_traj, goal_pos = grid_sampler.next()\n if len(start_pos_list) < 8:\n print (\"[MESSAGE] THE %i-TH GRID SAMPLED. %i PATH FOUND.\" %\n (idx, len(start_pos_list)))\n data_collector.append(grid)\n value_collector.append(value)\n start_pos_collector.append(start_pos_list)\n pos_traj_collector.append(pos_traj)\n goal_pos_collector.append(goal_pos)\n idx += 1\n\n data_collector = np.asarray(data_collector, dtype=np.uint8)\n value_collector = np.asarray(value_collector, dtype=np.uint8)\n\n if idx < num_samples:\n print (\"[MESSAGE] %i samples collected.\" % (idx+1))\n return (data_collector, value_collector, start_pos_collector,\n pos_traj_collector, goal_pos_collector)",
"def get_sample():\n global counter\n counter = counter + 1 \n \n # capture frames from the camera;\n \n frame=picamera.array.PiRGBArray(camera)\n camera.capture(frame, 'bgr', use_video_port=True)\n\t# grab the raw NumPy array representing the image, then initialize the timestamp\n\t# and occupied/unoccupied text\n image = frame.array\n preprocessed_image_buffer=preprocess(image)\n\t# show the frame\n cv2.imshow(\"Frame0\", image)\n key = cv2.waitKey(1) & 0xFF\n return preprocessed_image_buffer",
"def sample_image(n_row, batches_done):\n # Sample noise\n z = Variable(Tensor(np.random.normal(0, 1, (n_row ** 2, opt.latent_dim))))\n gen_imgs = decoder(z)\n save_image(\n gen_imgs.data, \"images/%d.png\" % batches_done, nrow=n_row, normalize=True\n )",
"def build_dataframe(input_path, img_input_shape, conform_shape=False):\r\n number_classes = os.listdir(path=input_path)\r\n \r\n image_array = []\r\n class_label = []\r\n \r\n for folder in number_classes:\r\n image_array.extend([cv2.imread(os.path.join(input_path,folder,x)) for\r\n x in os.listdir(os.path.join(input_path,folder))\r\n if '.jpeg' in x or '.jpg' in x])\r\n class_label.extend([folder for x in os.listdir(os.path.join(input_path,folder)) if\r\n '.jpeg' in x or '.jpg' in x])\r\n \r\n if conform_shape:\r\n if max([len(img) for img in image_array]) > img_input_shape[0]:\r\n image_array = [pp.resize_image(x,img_input_shape[0]) for x in image_array]\r\n\r\n # Ensure shape matches exactly \r\n for i,img in enumerate(image_array):\r\n shape_delta = img_input_shape[0] - img.shape[0]\r\n if shape_delta > 0:\r\n new_row = np.random.randint(0,255,[shape_delta,img_input_shape[1],img_input_shape[2]],dtype='uint8')\r\n image_array[i] = np.vstack([image_array[i],new_row])\r\n \r\n elif shape_delta < 0:\r\n image_array[i] = image_array[i][:img_input_shape[0],:,:]\r\n \r\n # Ensure type is uint8 for HOG & Surf\r\n image_array = [x.astype('uint8') for x in image_array]\r\n \r\n return np.array(image_array), np.array(class_label)",
"def sample_frames(frame_dir, fps, visualize_sample_rate):\n visualize_every_x_frames = visualize_sample_rate * int(fps)\n sampled_frames = np.empty((0, 3, IMG_DIM, IMG_DIM), dtype=np.float32) # B, C, H, W\n i = 0\n for file in sorted(os.listdir(frame_dir)):\n if i % visualize_every_x_frames == 0:\n img = skimage.img_as_float(skimage.io.imread(os.path.join(frame_dir, file))).astype(np.float32)\n img = skimage.transform.resize(img, (IMG_DIM, IMG_DIM)) # H, W, C\n img = img.swapaxes(1, 2).swapaxes(0, 1) # C, H, W\n sampled_frames = np.append(sampled_frames, np.array([img]), axis=0)\n i += 1\n logger.debug(\"total number of frames: {}\".format(i))\n return sampled_frames",
"def train_test_samples(df):\n\n from math import floor\n\n shuffled_df = df.reindex(np.random.permutation(df.index))\n\n seventy_five_percent = int(floor(len(shuffled_df) * 0.75))\n train_df = shuffled_df.iloc[:seventy_five_percent, ]\n test_df = shuffled_df.iloc[seventy_five_percent:, ]\n\n return train_df, test_df"
] | [
"0.6141033",
"0.603381",
"0.60076314",
"0.5756627",
"0.56922126",
"0.5570779",
"0.5568138",
"0.555071",
"0.5497111",
"0.546993",
"0.5440586",
"0.5375774",
"0.5369649",
"0.53203547",
"0.53200895",
"0.53108627",
"0.52745223",
"0.5240417",
"0.5232344",
"0.5230368",
"0.5151618",
"0.5125613",
"0.5122846",
"0.5119534",
"0.51175886",
"0.51155835",
"0.5102521",
"0.51005405",
"0.5092247",
"0.5089993"
] | 0.68630415 | 0 |
Converts colorspace vector back to RGB image. Colorspace can have extra columns | def colorspace_to_image(cspace: DataFrame, m: int, n: int) -> np.array:
assert isinstance(cspace, DataFrame), "Colorspace must be a dataframe"
assert len(cspace) == m * n, 'Image dimensions must match'
assert all(np.isin(['R', 'G', 'B'], cspace.columns)), "Colorspace must contain RGB columns"
result = np.empty([m,n,3])
result[:,:, 0] = cspace['R'].values.reshape(m, n)
result[:,:, 1] = cspace['G'].values.reshape(m, n)
result[:,:, 2] = cspace['B'].values.reshape(m, n)
return result | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def features_colorspace(X, colorspace):\n if colorspace == \"BGR\":\n return X\n\n X = np.array(X)\n\n if colorspace == \"HLS\":\n cs = cv2.COLOR_BGR2HLS\n elif colorspace == \"HSV\":\n cs = cv2.COLOR_BGR2HSV\n elif colorspace == \"LUV\":\n cs = cv2.COLOR_BGR2LUV\n elif colorspace == \"YUV\":\n cs = cv2.COLOR_BGR2YUV\n elif colorspace == \"YCrCb\":\n cs = cv2.COLOR_BGR2YCrCb\n else:\n raise ValueError(colorspace)\n\n for i in range(X.shape[0]):\n X[i,...] = cv2.cvtColor(X[i,...], cs)\n\n return X",
"def ycbcr_to_rgb(image: torch.Tensor) -> torch.Tensor:\n y: torch.Tensor = image[..., 0, :, :]\n cb: torch.Tensor = image[..., 1, :, :]\n cr: torch.Tensor = image[..., 2, :, :]\n\n delta: float = 0.5\n cb_shifted: torch.Tensor = cb - delta\n cr_shifted: torch.Tensor = cr - delta\n\n r: torch.Tensor = y + 1.403 * cr_shifted\n g: torch.Tensor = y - 0.714 * cr_shifted - 0.344 * cb_shifted\n b: torch.Tensor = y + 1.773 * cb_shifted\n return torch.stack([r, g, b], -3)",
"def rgb_reconstruction(lumaspace: DataFrame) -> DataFrame:\n \n assert isinstance(lumaspace, DataFrame), \"Colorspace must be a dataframe\"\n assert all(np.isin(['luma', 'rR', 'rG', 'rB'], lumaspace.columns)), \"Lumaspace must contain RGB columns\" \n return (lumaspace\n .assign(R = lambda df: df['luma'] * df['rR'])\n .assign(G = lambda df: df['luma'] * df['rG'])\n .assign(B = lambda df: df['luma'] * df['rB']))",
"def red_channel(img):\n\n red = np.zeros(img.shape,dtype=float)\n\n red[:,:,2] = np.copy(img[:,:,2])\n\n return red",
"def convert_color(image, color_space):\n out_image = None\n if color_space != 'RGB':\n if color_space == 'HSV':\n out_image = cv2.cvtColor(image.astype(np.float32), cv2.COLOR_RGB2HSV)\n elif color_space == 'LUV':\n out_image = cv2.cvtColor(image.astype(np.float32), cv2.COLOR_RGB2LUV)\n elif color_space == 'HLS':\n out_image = cv2.cvtColor(image.astype(np.float32), cv2.COLOR_RGB2HLS)\n elif color_space == 'YUV':\n out_image = cv2.cvtColor(image.astype(np.float32), cv2.COLOR_RGB2YUV)\n elif color_space == 'YCrCb':\n out_image = cv2.cvtColor(image.astype(np.float32), cv2.COLOR_RGB2YCrCb)\n else:\n out_image = np.copy(image)\n return out_image",
"def luma(cspace: np.array) -> np.array:\n \n assert isinstance(cspace, DataFrame), \"Colorspace must be a dataframe\"\n assert all(np.isin(['R', 'G', 'B'], cspace.columns)), \"Colorspace must contain RGB columns\"\n return 0.2989 * cspace['R'] + 0.5870 * cspace['G'] + 0.1140 * cspace['B']",
"def rgb_image(self):\n z3 = self.z[:,:,newaxis]\n return z3 * self.c",
"def to_image_space(data):\n return np.swapaxes(np.flip(data, 1), 0, 1)",
"def rgb_processing(rgb_img, center, scale, rot=0):\n rgb_img = crop(rgb_img, center, scale, \n [constants.IMG_RES, constants.IMG_RES], rot=rot)\n # (3,224,224),float,[0,1]\n rgb_img = np.transpose(rgb_img.astype('float32'),(2,0,1))/255.0\n return rgb_img",
"def convert_grayscale_to_rgb(x: np.ndarray) -> np.ndarray:\n return np.stack((x, ) * 3, axis=-1)",
"def convert_color(image, color_space='RGB'):\n color_space = color_space.lower()\n if color_space != 'rgb':\n if color_space == 'hsv':\n color_transformation = cv2.COLOR_BGR2HSV\n elif color_space == 'luv':\n color_transformation = cv2.COLOR_BGR2LUV\n elif color_space == 'hls':\n color_transformation = cv2.COLOR_BGR2HLS\n elif color_space == 'yuv':\n color_transformation = cv2.COLOR_BGR2YUV\n elif color_space == 'ycrcb':\n color_transformation = cv2.COLOR_BGR2YCrCb\n else:\n raise ValueError('Invalid value %s for color_space parameters. Valid color spaces are: RGB, HSV, LUV, '\n 'HLS, YUV, YCrCb' % color_space)\n\n return cv2.cvtColor(image, color_transformation)\n else:\n return image",
"def bgr_to_rgb(ims):\n out = []\n for im in ims:\n out.append(im[:,:,::-1])\n return out",
"def convert_rgb_cmyk(rcol, gcol, bcol):\n if (rcol == 0) and (gcol == 0) and (bcol == 0):\n # black\n return 0, 0, 0, 1\n\n kcol = 1-max(rcol, gcol, bcol)\n ccol = (1-rcol-kcol)/(1-kcol)\n mcol = (1-gcol-kcol)/(1-kcol)\n ycol = (1-bcol-kcol)/(1-kcol)\n\n return ccol, mcol, ycol, kcol",
"def grey_to_rgb_imitation(img):\n return np.repeat(img[...,np.newaxis], 3, -1)",
"def _preprocess(self, image):\n\n # Scale from [0, 255] to [0, 1] and BGR to RGB \n return (image / 255.0)[:, :, ::-1]",
"def format_data(img_path, size):\n img_color = cv2.imread(img_path)\n img_color = img_color[:, :, ::-1]\n img_color = cv2.resize(img_color, (size, size), interpolation=cv2.INTER_AREA)\n img_color = img_color.reshape((1, size, size, 3))\\\n #.transpose(0, 3, 1, 2)\n\n return img_color",
"def RGB_to_RGB(RGB,\n input_colourspace,\n output_colourspace,\n chromatic_adaptation_transform='CAT02',\n apply_decoding_cctf=False,\n apply_encoding_cctf=False):\n\n if apply_decoding_cctf:\n RGB = input_colourspace.decoding_cctf(RGB)\n\n M = RGB_to_RGB_matrix(input_colourspace, output_colourspace,\n chromatic_adaptation_transform)\n\n RGB = dot_vector(M, RGB)\n\n if apply_encoding_cctf:\n RGB = output_colourspace.encoding_cctf(RGB)\n\n return RGB",
"def colorPaletteToRGB(image_data,color_table): \n color_table_array = numpy.array([ord(c) for c in color_table])\n n_colors = color_table_array.size / 3\n color_table_array = color_table_array.reshape((n_colors,3))\n channels = [color_table_array[image_data,i] for i in range(3)]\n return channels",
"def lab_to_rgb(image: tf.Tensor) -> tf.Tensor:\n xyz = lab_to_xyz(image)\n rgb_image = xyz_to_rgb(xyz)\n return rgb_image",
"def generate_normalized_rgb(self):\n \n r,g,b=(Numeric.zeros(256),Numeric.zeros(256),Numeric.zeros(256))\n for i in Numeric.arange(256):\n r_,g_,b_=self.colfct(i/255.0) # these are from [0,1]\n r[i],g[i],b[i]=int(255*r_),int(255*g_),int(255*b_)\n return r/256.0,g/256.0,b/256.0",
"def convert_rgb_hsv(rcol, gcol, bcol):\n\n mxi = max(rcol, gcol, bcol)\n mni = min(rcol, gcol, bcol)\n\n d_f = mxi-mni\n if mxi == mni:\n hcol = 0\n elif mxi == rcol:\n hcol = (60 * ((gcol-bcol)/d_f) + 360) % 360\n elif mxi == gcol:\n hcol = (60 * ((bcol-rcol)/d_f) + 120) % 360\n elif mxi == bcol:\n hcol = (60 * ((rcol-gcol)/d_f) + 240) % 360\n if mxi == 0:\n scol = 0\n else:\n scol = d_f/mxi\n vcol = mxi\n return hcol, scol, vcol",
"def yuv_to_rgb(img_yuv):\n\n y = img_yuv[..., 0]\n u = img_yuv[..., 1]\n v = img_yuv[..., 2]\n\n r = y + 1.14 * v\n g = y - 0.396 * u - 0.581 * v\n b = y + 2.029 * u\n\n img_rgb = np.stack((r, g, b), axis=2)\n img_rgb = np.clip(img_rgb, 0, 1)\n return img_rgb",
"def xyz_to_rgb(image: tf.Tensor) -> tf.Tensor:\n x, y, z = tf.unstack(image, axis=-1)\n var_x = x / 100\n var_y = y / 100\n var_z = z / 100\n\n var_r = var_x * 3.2406 + var_y * -1.5372 + var_z * -0.4986\n var_g = var_x * -0.9689 + var_y * 1.8758 + var_z * 0.0415\n var_b = var_x * 0.0557 + var_y * -0.2040 + var_z * 1.0570\n\n var_r = tf.where(var_r > 0.0031308,\n 1.055 * tf.pow(var_r, (1 / 2.4)) - 0.055,\n 12.92 * var_r)\n var_g = tf.where(var_g > 0.0031308,\n 1.055 * tf.pow(var_g, (1 / 2.4)) - 0.055,\n 12.92 * var_g)\n var_b = tf.where(var_b > 0.0031308,\n 1.055 * tf.pow(var_b, (1 / 2.4)) - 0.055,\n 12.92 * var_b)\n r = var_r * 255\n g = var_g * 255\n b = var_b * 255\n rgb_image = tf.cast(tf.stack([r, g, b], axis=-1), tf.uint8)\n return rgb_image",
"def get_rgb(self, img, r, g, b):\r\n\r\n # Get specific bands of hyperspectral image\r\n red_channel = img[:, :, r]\r\n green_channel = img[:, :, g]\r\n blue_channel = img[:, :, b]\r\n\r\n img = np.stack((red_channel, green_channel, blue_channel), axis=2)\r\n img = img.astype('float32')\r\n return img",
"def _rgb(x, y, z):\n rgb = np.array([x, y, z]).T\n rgb -= rgb.min(0)\n rgb /= np.maximum(rgb.max(0), 1e-16) # avoid div by zero\n return rgb",
"def to_image(x):\n x = denorm(x.data.cpu())\n ndarr = x.mul(255).clamp(0, 255).byte().permute(1, 2, 0).cpu().numpy()\n im = ndarr\n return im",
"def _colored_img_to_arr(image, verbose=False):\n height, width = image.size\n arr = np.array(image.getdata())\n arr = arr.reshape(3, height, width)\n r = arr[0]\n g = arr[1]\n b = arr[2]\n return r, g, b",
"def to_rgb(im):\n w, h = im.shape\n ret = np.empty((w, h, 3), dtype=np.uint8)\n ret[:, :, 2] = ret[:, :, 1] = ret[:, :, 0] = im\n return ret",
"def recreate_image(x):\n reverse_mean = [-0.485, -0.456, -0.406]\n reverse_std = [1/0.229, 1/0.224, 1/0.225]\n in_channel = x.shape[-1]\n recreated_im = copy.copy(x) # C, H, W\n if in_channel == 3:\n for c in range(in_channel):\n recreated_im[:, :, c] /= reverse_std[c]\n recreated_im[:, :, c] -= reverse_mean[c]\n elif in_channel == 1:\n recreated_im[:, :, 0] /= reverse_std[1]\n recreated_im[:, :, 0] -= reverse_mean[1]\n recreated_im[recreated_im > 1] = 1\n recreated_im[recreated_im < 0] = 0\n recreated_im = np.round(recreated_im * 255)\n\n recreated_im = np.uint8(recreated_im) # H, W, C\n return recreated_im",
"def x_redim(self, x):\n x[0:4] *= self.r_scale\n return x"
] | [
"0.6144378",
"0.60548836",
"0.6034356",
"0.5993296",
"0.5984861",
"0.5929783",
"0.58545554",
"0.58436054",
"0.5751462",
"0.573934",
"0.5706569",
"0.5676334",
"0.567407",
"0.5654705",
"0.56331134",
"0.5625428",
"0.5621792",
"0.5558728",
"0.5558652",
"0.5552761",
"0.5528596",
"0.5509052",
"0.55081743",
"0.550799",
"0.5504384",
"0.5500919",
"0.5490245",
"0.547427",
"0.5462447",
"0.54551286"
] | 0.6287839 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.